diff --git a/arch/x86/kernel/local.c b/arch/x86/kernel/local.c index 014b4a93..a88700d0 100644 --- a/arch/x86/kernel/local.c +++ b/arch/x86/kernel/local.c @@ -11,7 +11,7 @@ struct x86_cpu_local_variables *locals; void init_processors_local(int max_id) { /* Is contiguous allocating adequate?? */ - locals = ihk_mc_alloc_pages(max_id, 0); + locals = ihk_mc_alloc_pages(max_id, IHK_MC_AP_CRITICAL); memset(locals, 0, PAGE_SIZE * max_id); kprintf("locals = %p\n", locals); diff --git a/arch/x86/kernel/memory.c b/arch/x86/kernel/memory.c index 03239e6b..5d5fd6a8 100644 --- a/arch/x86/kernel/memory.c +++ b/arch/x86/kernel/memory.c @@ -133,7 +133,7 @@ static unsigned long setup_l3(struct page_table *pt, pt->entry[i] = 0; continue; } - pt_phys = setup_l2(arch_alloc_page(0), phys, start, end); + pt_phys = setup_l2(arch_alloc_page(IHK_MC_AP_CRITICAL), phys, start, end); pt->entry[i] = pt_phys | PFL3_KERN_ATTR; } @@ -157,7 +157,7 @@ static void init_normal_area(struct page_table *pt) for (phys = (map_start & ~(PTL4_SIZE - 1)); phys < map_end; phys += PTL4_SIZE) { - pt_phys = setup_l3(arch_alloc_page(0), phys, + pt_phys = setup_l3(arch_alloc_page(IHK_MC_AP_CRITICAL), phys, map_start, map_end); pt->entry[ident_index++] = pt_phys | PFL4_KERN_ATTR; @@ -165,11 +165,12 @@ static void init_normal_area(struct page_table *pt) } } -static struct page_table *__alloc_new_pt(void) +static struct page_table *__alloc_new_pt(enum ihk_mc_ap_flag ap_flag) { - struct page_table *newpt = arch_alloc_page(0); + struct page_table *newpt = arch_alloc_page(ap_flag); - memset(newpt, 0, sizeof(struct page_table)); + if(newpt) + memset(newpt, 0, sizeof(struct page_table)); return newpt; } @@ -224,6 +225,7 @@ void set_pte(pte_t *ppte, unsigned long phys, int attr) } +#if 0 /* * get_pte() * @@ -231,7 +233,7 @@ void set_pte(pte_t *ppte, unsigned long phys, int attr) * and returns a pointer to the PTE corresponding to the * virtual address. */ -pte_t *get_pte(struct page_table *pt, void *virt, int attr) +pte_t *get_pte(struct page_table *pt, void *virt, int attr, enum ihk_mc_ap_flag ap_flag) { int l4idx, l3idx, l2idx, l1idx; unsigned long v = (unsigned long)virt; @@ -247,7 +249,8 @@ pte_t *get_pte(struct page_table *pt, void *virt, int attr) if (pt->entry[l4idx] & PFL4_PRESENT) { pt = phys_to_virt(pt->entry[l4idx] & PAGE_MASK); } else { - newpt = __alloc_new_pt(); + if((newpt = __alloc_new_pt(ap_flag)) == NULL) + return NULL; pt->entry[l4idx] = virt_to_phys(newpt) | attr_to_l4attr(attr); pt = newpt; } @@ -255,7 +258,8 @@ pte_t *get_pte(struct page_table *pt, void *virt, int attr) if (pt->entry[l3idx] & PFL3_PRESENT) { pt = phys_to_virt(pt->entry[l3idx] & PAGE_MASK); } else { - newpt = __alloc_new_pt(); + if((newpt = __alloc_new_pt(ap_flag)) == NULL) + return NULL; pt->entry[l3idx] = virt_to_phys(newpt) | attr_to_l3attr(attr); pt = newpt; } @@ -279,7 +283,8 @@ pte_t *get_pte(struct page_table *pt, void *virt, int attr) if (pt->entry[l2idx] & PFL2_PRESENT) { pt = phys_to_virt(pt->entry[l2idx] & PAGE_MASK); } else { - newpt = __alloc_new_pt(); + if((newpt = __alloc_new_pt(ap_flag)) == NULL) + return NULL; pt->entry[l2idx] = virt_to_phys(newpt) | attr_to_l2attr(attr) | PFL2_PRESENT; pt = newpt; @@ -287,6 +292,7 @@ pte_t *get_pte(struct page_table *pt, void *virt, int attr) return &(pt->entry[l1idx]); } +#endif static int __set_pt_page(struct page_table *pt, void *virt, unsigned long phys, int attr) @@ -294,6 +300,9 @@ static int __set_pt_page(struct page_table *pt, void *virt, unsigned long phys, int l4idx, l3idx, l2idx, l1idx; unsigned long v = (unsigned long)virt; struct page_table *newpt; + enum ihk_mc_ap_flag ap_flag; + + ap_flag = attr & PTATTR_USER? IHK_MC_AP_NOWAIT: IHK_MC_AP_CRITICAL; if (!pt) { pt = init_pt; @@ -310,7 +319,8 @@ static int __set_pt_page(struct page_table *pt, void *virt, unsigned long phys, if (pt->entry[l4idx] & PFL4_PRESENT) { pt = phys_to_virt(pt->entry[l4idx] & PAGE_MASK); } else { - newpt = __alloc_new_pt(); + if((newpt = __alloc_new_pt(ap_flag)) == NULL) + return -ENOMEM; pt->entry[l4idx] = virt_to_phys(newpt) | attr_to_l4attr(attr); pt = newpt; } @@ -318,7 +328,8 @@ static int __set_pt_page(struct page_table *pt, void *virt, unsigned long phys, if (pt->entry[l3idx] & PFL3_PRESENT) { pt = phys_to_virt(pt->entry[l3idx] & PAGE_MASK); } else { - newpt = __alloc_new_pt(); + if((newpt = __alloc_new_pt(ap_flag)) == NULL) + return -ENOMEM; pt->entry[l3idx] = virt_to_phys(newpt) | attr_to_l3attr(attr); pt = newpt; } @@ -340,7 +351,8 @@ static int __set_pt_page(struct page_table *pt, void *virt, unsigned long phys, if (pt->entry[l2idx] & PFL2_PRESENT) { pt = phys_to_virt(pt->entry[l2idx] & PAGE_MASK); } else { - newpt = __alloc_new_pt(); + if((newpt = __alloc_new_pt(ap_flag)) == NULL) + return -ENOMEM; pt->entry[l2idx] = virt_to_phys(newpt) | attr_to_l2attr(attr) | PFL2_PRESENT; pt = newpt; @@ -524,7 +536,7 @@ int ihk_mc_pt_prepare_map(page_table_t p, void *virt, unsigned long size, if (pt->entry[l4idx] & PFL4_PRESENT) { return 0; } else { - newpt = __alloc_new_pt(); + newpt = __alloc_new_pt(IHK_MC_AP_CRITICAL); if (!newpt) { ret = -ENOMEM; } else { @@ -545,9 +557,12 @@ int ihk_mc_pt_prepare_map(page_table_t p, void *virt, unsigned long size, return ret; } -struct page_table *ihk_mc_pt_create(void) +struct page_table *ihk_mc_pt_create(enum ihk_mc_ap_flag ap_flag) { - struct page_table *pt = ihk_mc_alloc_pages(1, 0); + struct page_table *pt = ihk_mc_alloc_pages(1, ap_flag); + + if(pt == NULL) + return NULL; memset(pt->entry, 0, PAGE_SIZE); /* Copy the kernel space */ @@ -633,7 +648,9 @@ void *map_fixed_area(unsigned long phys, unsigned long size, int uncachable) kprintf("map_fixed: %lx => %p (%d pages)\n", paligned, v, npages); for (i = 0; i < npages; i++) { - __set_pt_page(init_pt, (void *)fixed_virt, paligned, flag); + if(__set_pt_page(init_pt, (void *)fixed_virt, paligned, flag)){ + return NULL; + } fixed_virt += PAGE_SIZE; paligned += PAGE_SIZE; @@ -651,7 +668,7 @@ void init_low_area(struct page_table *pt) void init_page_table(void) { - init_pt = arch_alloc_page(0); + init_pt = arch_alloc_page(IHK_MC_AP_CRITICAL); memset(init_pt, 0, sizeof(PAGE_SIZE)); diff --git a/arch/x86/kernel/mikc.c b/arch/x86/kernel/mikc.c index ab06dd79..546e9719 100644 --- a/arch/x86/kernel/mikc.c +++ b/arch/x86/kernel/mikc.c @@ -17,8 +17,8 @@ int ihk_mc_ikc_init_first_local(struct ihk_ikc_channel_desc *channel, memset(channel, 0, sizeof(struct ihk_ikc_channel_desc)); /* Place both sides in this side */ - rq = arch_alloc_page(0); - wq = arch_alloc_page(0); + rq = arch_alloc_page(IHK_MC_AP_CRITICAL); + wq = arch_alloc_page(IHK_MC_AP_CRITICAL); ihk_ikc_init_queue(rq, 0, 0, PAGE_SIZE, MASTER_IKCQ_PKTSIZE); ihk_ikc_init_queue(wq, 0, 0, PAGE_SIZE, MASTER_IKCQ_PKTSIZE); diff --git a/executer/kernel/control.c b/executer/kernel/control.c index c68015ad..e84ea277 100644 --- a/executer/kernel/control.c +++ b/executer/kernel/control.c @@ -230,6 +230,7 @@ int mcexec_wait_syscall(ihk_os_t os, struct syscall_wait_desc *__user req) unsigned long s, w, d; #endif +//printk("mcexec_wait_syscall swd=%p req=%p size=%d\n", &swd, req, sizeof(swd.cpu)); if (copy_from_user(&swd, req, sizeof(swd.cpu))) { return -EFAULT; } diff --git a/executer/kernel/mcctrl.h b/executer/kernel/mcctrl.h index 0708f354..36f1a162 100644 --- a/executer/kernel/mcctrl.h +++ b/executer/kernel/mcctrl.h @@ -9,6 +9,7 @@ #define SCD_MSG_PREPARE_PROCESS 0x1 #define SCD_MSG_PREPARE_PROCESS_ACKED 0x2 +#define SCD_MSG_PREPARE_PROCESS_NACKED 0x7 #define SCD_MSG_SCHEDULE_PROCESS 0x3 #define SCD_MSG_INIT_CHANNEL 0x5 diff --git a/kernel/cls.c b/kernel/cls.c index 9b84569c..afb81ac7 100644 --- a/kernel/cls.c +++ b/kernel/cls.c @@ -19,7 +19,7 @@ void cpu_local_var_init(void) z = sizeof(struct cpu_local_var) * num_processors; z = (z + PAGE_SIZE - 1) >> PAGE_SHIFT; - clv = allocate_pages(z, 0); + clv = allocate_pages(z, IHK_MC_AP_CRITICAL); memset(clv, 0, z * PAGE_SIZE); } diff --git a/kernel/host.c b/kernel/host.c index 4e080bb0..57ebccaf 100644 --- a/kernel/host.c +++ b/kernel/host.c @@ -33,7 +33,7 @@ void check_mapping_for_proc(struct process *proc, unsigned long addr) /* * Communication with host */ -static void process_msg_prepare_process(unsigned long rphys) +static int process_msg_prepare_process(unsigned long rphys) { unsigned long phys, sz, s, e, up; struct program_load_desc *p, *pn; @@ -46,23 +46,36 @@ static void process_msg_prepare_process(unsigned long rphys) int argc, envc, args_envs_npages; char **env; int range_npages; + void *up_v; sz = sizeof(struct program_load_desc) + sizeof(struct program_image_section) * 16; npages = ((rphys + sz - 1) >> PAGE_SHIFT) - (rphys >> PAGE_SHIFT) + 1; phys = ihk_mc_map_memory(NULL, rphys, sz); - p = ihk_mc_map_virtual(phys, npages, PTATTR_WRITABLE); + if((p = ihk_mc_map_virtual(phys, npages, PTATTR_WRITABLE)) == NULL){ + ihk_mc_unmap_memory(NULL, phys, sz); + return -ENOMEM; + } n = p->num_sections; dkprintf("# of sections: %d\n", n); - pn = ihk_mc_allocate(sizeof(struct program_load_desc) - + sizeof(struct program_image_section) * n, 0); + if((pn = ihk_mc_allocate(sizeof(struct program_load_desc) + + sizeof(struct program_image_section) * n, IHK_MC_AP_NOWAIT)) == NULL){ + ihk_mc_unmap_virtual(p, npages, 0); + ihk_mc_unmap_memory(NULL, phys, sz); + return -ENOMEM; + } memcpy_long(pn, p, sizeof(struct program_load_desc) + sizeof(struct program_image_section) * n); - proc = create_process(p->entry); + if((proc = create_process(p->entry)) == NULL){ + ihk_mc_free(pn); + ihk_mc_unmap_virtual(p, npages, 1); + ihk_mc_unmap_memory(NULL, phys, sz); + return -ENOMEM; + } proc->pid = 1024; proc->vm->region.user_start = pn->user_start; proc->vm->region.user_end = pn->user_end; @@ -79,8 +92,14 @@ static void process_msg_prepare_process(unsigned long rphys) #if 0 if (range_npages <= 256) { #endif - up = virt_to_phys(ihk_mc_alloc_pages(range_npages, 0)); - add_process_memory_range(proc, s, e, up, 0); + if((up_v = ihk_mc_alloc_pages(range_npages, IHK_MC_AP_NOWAIT)) == NULL){ + goto err; + } + up = virt_to_phys(up_v); + if(add_process_memory_range(proc, s, e, up, VR_NONE) != 0){ + ihk_mc_free_pages(up_v, range_npages); + goto err; + } { void *_virt = (void *)s; @@ -110,7 +129,7 @@ static void process_msg_prepare_process(unsigned long rphys) } else { up = 0; - if (add_process_large_range(proc, s, e, 0, &up)) { + if (add_process_large_range(proc, s, e, VR_NONE, &up)) { kprintf("ERROR: not enough memory\n"); while (1) cpu_halt(); } @@ -165,30 +184,39 @@ static void process_msg_prepare_process(unsigned long rphys) /* Map system call stuffs */ addr = proc->vm->region.map_start - PAGE_SIZE * SCD_RESERVED_COUNT; e = addr + PAGE_SIZE * DOORBELL_PAGE_COUNT; - add_process_memory_range(proc, addr, e, + if(add_process_memory_range(proc, addr, e, cpu_local_var(scp).doorbell_pa, - VR_REMOTE | VR_RESERVED); + VR_REMOTE | VR_RESERVED) != 0){ + goto err; + } addr = e; e = addr + PAGE_SIZE * REQUEST_PAGE_COUNT; - add_process_memory_range(proc, addr, e, + if(add_process_memory_range(proc, addr, e, cpu_local_var(scp).request_pa, - VR_REMOTE | VR_RESERVED); + VR_REMOTE | VR_RESERVED) != 0){ + goto err; + } addr = e; e = addr + PAGE_SIZE * RESPONSE_PAGE_COUNT; - add_process_memory_range(proc, addr, e, + if(add_process_memory_range(proc, addr, e, cpu_local_var(scp).response_pa, - VR_RESERVED); + VR_RESERVED) != 0){ + goto err; + } /* Map, copy and update args and envs */ addr = e; e = addr + PAGE_SIZE * ARGENV_PAGE_COUNT; - args_envs = ihk_mc_alloc_pages(ARGENV_PAGE_COUNT, 0); + if((args_envs = ihk_mc_alloc_pages(ARGENV_PAGE_COUNT, IHK_MC_AP_NOWAIT)) == NULL){ + goto err; + } args_envs_p = virt_to_phys(args_envs); - add_process_memory_range(proc, addr, e, - args_envs_p, - VR_RESERVED); + if(add_process_memory_range(proc, addr, e, args_envs_p, VR_RESERVED) != 0){ + ihk_mc_free_pages(args_envs, ARGENV_PAGE_COUNT); + goto err; + } dkprintf("args_envs mapping\n"); @@ -199,8 +227,10 @@ static void process_msg_prepare_process(unsigned long rphys) dkprintf("args_envs_npages: %d\n", args_envs_npages); args_envs_rp = ihk_mc_map_memory(NULL, (unsigned long)p->args, p->args_len); dkprintf("args_envs_rp: 0x%lX\n", args_envs_rp); - args_envs_r = (char *)ihk_mc_map_virtual(args_envs_rp, args_envs_npages, - PTATTR_WRITABLE); + if((args_envs_r = (char *)ihk_mc_map_virtual(args_envs_rp, args_envs_npages, + PTATTR_WRITABLE)) == NULL){ + goto err; + } dkprintf("args_envs_r: 0x%lX\n", args_envs_r); dkprintf("args copy, nr: %d\n", *((int*)args_envs_r)); @@ -217,8 +247,10 @@ static void process_msg_prepare_process(unsigned long rphys) dkprintf("args_envs_npages: %d\n", args_envs_npages); args_envs_rp = ihk_mc_map_memory(NULL, (unsigned long)p->envs, p->envs_len); dkprintf("args_envs_rp: 0x%lX\n", args_envs_rp); - args_envs_r = (char *)ihk_mc_map_virtual(args_envs_rp, args_envs_npages, - PTATTR_WRITABLE); + if((args_envs_r = (char *)ihk_mc_map_virtual(args_envs_rp, args_envs_npages, + PTATTR_WRITABLE)) == NULL){ + goto err; + } dkprintf("args_envs_r: 0x%lX\n", args_envs_r); dkprintf("envs copy, nr: %d\n", *((int*)args_envs_r)); @@ -257,7 +289,9 @@ static void process_msg_prepare_process(unsigned long rphys) p->rprocess = (unsigned long)proc; p->rpgtable = virt_to_phys(proc->vm->page_table); - init_process_stack(proc, pn, argc, argv, envc, env); + if(init_process_stack(proc, pn, argc, argv, envc, env) != 0){ + goto err; + } dkprintf("new process : %p [%d] / table : %p\n", proc, proc->pid, proc->vm->page_table); @@ -266,6 +300,15 @@ static void process_msg_prepare_process(unsigned long rphys) ihk_mc_unmap_virtual(p, npages, 1); ihk_mc_unmap_memory(NULL, phys, sz); + return 0; +err: + ihk_mc_free(pn); + ihk_mc_unmap_virtual(p, npages, 1); + ihk_mc_unmap_memory(NULL, phys, sz); + free_process_memory(proc); + // TODO: call page tables free by nakamura + destroy_process(proc); + return -ENOMEM; } static void process_msg_init(struct ikc_scd_init_param *pcp) @@ -290,23 +333,32 @@ static void process_msg_init_acked(unsigned long pphys) lparam->request_rpa = param->request_page; lparam->request_pa = ihk_mc_map_memory(NULL, param->request_page, REQUEST_PAGE_COUNT * PAGE_SIZE); - lparam->request_va = ihk_mc_map_virtual(lparam->request_pa, + if((lparam->request_va = ihk_mc_map_virtual(lparam->request_pa, REQUEST_PAGE_COUNT, - PTATTR_WRITABLE); + PTATTR_WRITABLE)) == NULL){ + // TODO: + panic("ENOMEM"); + } lparam->doorbell_rpa = param->doorbell_page; lparam->doorbell_pa = ihk_mc_map_memory(NULL, param->doorbell_page, DOORBELL_PAGE_COUNT * PAGE_SIZE); - lparam->doorbell_va = ihk_mc_map_virtual(lparam->doorbell_pa, + if((lparam->doorbell_va = ihk_mc_map_virtual(lparam->doorbell_pa, DOORBELL_PAGE_COUNT, - PTATTR_WRITABLE); + PTATTR_WRITABLE)) == NULL){ + // TODO: + panic("ENOMEM"); + } lparam->post_rpa = param->post_page; lparam->post_pa = ihk_mc_map_memory(NULL, param->post_page, PAGE_SIZE); - lparam->post_va = ihk_mc_map_virtual(lparam->post_pa, 1, - PTATTR_WRITABLE); + if((lparam->post_va = ihk_mc_map_virtual(lparam->post_pa, 1, + PTATTR_WRITABLE)) == NULL){ + // TODO: + panic("ENOMEM"); + } lparam->post_fin = 1; @@ -340,9 +392,10 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c, return 0; case SCD_MSG_PREPARE_PROCESS: - process_msg_prepare_process(packet->arg); - - pckt.msg = SCD_MSG_PREPARE_PROCESS_ACKED; + if(process_msg_prepare_process(packet->arg) == 0) + pckt.msg = SCD_MSG_PREPARE_PROCESS_ACKED; + else + pckt.msg = SCD_MSG_PREPARE_PROCESS_NACKED; pckt.ref = packet->ref; pckt.arg = packet->arg; syscall_channel_send(c, &pckt); diff --git a/kernel/include/process.h b/kernel/include/process.h index f57ff567..681993d8 100644 --- a/kernel/include/process.h +++ b/kernel/include/process.h @@ -7,6 +7,7 @@ #include #include +#define VR_NONE 0x0 #define VR_STACK 0x1 #define VR_RESERVED 0x2 #define VR_IO_NOCACHE 0x100 @@ -96,13 +97,16 @@ void free_process_memory(struct process *proc); int add_process_memory_range(struct process *process, unsigned long start, unsigned long end, unsigned long phys, unsigned long flag); +#if 0 int add_process_large_range(struct process *process, unsigned long start, unsigned long end, - unsigned long flag, unsigned long *phys); + unsigned long flag, unsigned long *phys, + enum ihk_mc_ap_flag ap_flag); +#endif int remove_process_region(struct process *proc, unsigned long start, unsigned long end); struct program_load_desc; -void init_process_stack(struct process *process, struct program_load_desc *pn, +int init_process_stack(struct process *process, struct program_load_desc *pn, int argc, char **argv, int envc, char **env); unsigned long extend_process_region(struct process *proc, diff --git a/kernel/include/syscall.h b/kernel/include/syscall.h index a63e2b2f..051a9dcf 100644 --- a/kernel/include/syscall.h +++ b/kernel/include/syscall.h @@ -14,6 +14,7 @@ #define SCD_MSG_PREPARE_PROCESS 0x1 #define SCD_MSG_PREPARE_PROCESS_ACKED 0x2 +#define SCD_MSG_PREPARE_PROCESS_NACKED 0x7 #define SCD_MSG_SCHEDULE_PROCESS 0x3 #define SCD_MSG_INIT_CHANNEL 0x5 diff --git a/kernel/listeners.c b/kernel/listeners.c index 2b057df4..551b8f36 100644 --- a/kernel/listeners.c +++ b/kernel/listeners.c @@ -48,8 +48,12 @@ static int test_packet_handler(struct ihk_ikc_channel_desc *c, a = (unsigned long)packet->param1 << 12; pp = ihk_mc_map_memory(NULL, a, 4 * 1024 * 1024); - v = ihk_mc_map_virtual(pp, 4 * 1024, - PTATTR_UNCACHABLE); + if((v = ihk_mc_map_virtual(pp, 4 * 1024, + PTATTR_UNCACHABLE)) == NULL){ + ihk_mc_unmap_memory(NULL, pp, 4 * 1024 * 1024); + kprintf("Test msg : Not enough space\n"); + return 0; + } testmem(v, 4 * 1024 * 1024); diff --git a/kernel/mem.c b/kernel/mem.c index e4faca3f..c5e0bde4 100644 --- a/kernel/mem.c +++ b/kernel/mem.c @@ -44,10 +44,14 @@ static void reserve_pages(unsigned long start, unsigned long end, int type) void *allocate_pages(int npages, enum ihk_mc_ap_flag flag) { - unsigned long pa = ihk_pagealloc_alloc(pa_allocator, npages); - /* all_pagealloc_alloc returns zero when error occured, - and callee (in mcos/kernel/process.c) so propagate it */ - return pa ? phys_to_virt(pa) : 0; + unsigned long pa = ihk_pagealloc_alloc(pa_allocator, npages); + /* all_pagealloc_alloc returns zero when error occured, + and callee (in mcos/kernel/process.c) so propagate it */ + if(pa) + return phys_to_virt(pa); + if(flag != IHK_MC_AP_NOWAIT) + panic("Not enough space\n"); + return NULL; } void free_pages(void *va, int npages) @@ -194,8 +198,15 @@ void *ihk_mc_map_virtual(unsigned long phys, int npages, return NULL; } for (i = 0; i < npages; i++) { - ihk_mc_pt_set_page(NULL, (char *)p + (i << PAGE_SHIFT), - phys + (i << PAGE_SHIFT), attr); + if(ihk_mc_pt_set_page(NULL, (char *)p + (i << PAGE_SHIFT), + phys + (i << PAGE_SHIFT), attr) != 0){ + int j; + for(j = 0; j < i; j++){ + ihk_mc_pt_clear_page(NULL, (char *)p + (j << PAGE_SHIFT)); + } + ihk_pagealloc_free(vmap_allocator, virt_to_phys(p), npages); + return NULL; + } } return (char *)p + offset; } @@ -308,7 +319,9 @@ void *kmalloc(int size, enum ihk_mc_ap_flag flag) req_page = ((u + 1) * sizeof(*h) + PAGE_SIZE - 1) >> PAGE_SHIFT; - h = allocate_pages(req_page, 0); + h = allocate_pages(req_page, flag); + if(h == NULL) + return NULL; prev->next = h; h->size = (req_page * PAGE_SIZE) / sizeof(*h) - 2; /* Guard entry */ diff --git a/kernel/mikc.c b/kernel/mikc.c index b85b943c..4d8de6c5 100644 --- a/kernel/mikc.c +++ b/kernel/mikc.c @@ -12,7 +12,8 @@ static int arch_master_channel_packet_handler(struct ihk_ikc_channel_desc *, void ikc_master_init(void) { mchannel = kmalloc(sizeof(struct ihk_ikc_channel_desc) + - sizeof(struct ihk_ikc_master_packet), 0); + sizeof(struct ihk_ikc_master_packet), + IHK_MC_AP_CRITICAL); ihk_mc_ikc_init_first(mchannel, arch_master_channel_packet_handler); } diff --git a/kernel/process.c b/kernel/process.c index 2b334d6a..4c7187c8 100644 --- a/kernel/process.c +++ b/kernel/process.c @@ -23,28 +23,34 @@ extern long do_arch_prctl(unsigned long code, unsigned long address); -void init_process_vm(struct process_vm *vm) +static int init_process_vm(struct process_vm *vm) { int i; + void *pt = ihk_mc_pt_create(IHK_MC_AP_NOWAIT); + + if(pt == NULL) + return -ENOMEM; ihk_mc_spinlock_init(&vm->memory_range_lock); ihk_mc_spinlock_init(&vm->page_table_lock); ihk_atomic_set(&vm->refcount, 1); INIT_LIST_HEAD(&vm->vm_range_list); - vm->page_table = ihk_mc_pt_create(); + vm->page_table = pt; + /* Initialize futex queues */ for (i = 0; i < (1 << FUTEX_HASHBITS); ++i) futex_queue_init(&vm->futex_queues[i]); + return 0; } struct process *create_process(unsigned long user_pc) { struct process *proc; - proc = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, 0); + proc = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, IHK_MC_AP_NOWAIT); if (!proc) return NULL; @@ -56,7 +62,10 @@ struct process *create_process(unsigned long user_pc) proc->vm = (struct process_vm *)(proc + 1); - init_process_vm(proc->vm); + if(init_process_vm(proc->vm) != 0){ + ihk_mc_free_pages(proc, KERNEL_STACK_NR_PAGES); + return NULL; + } ihk_mc_spinlock_init(&proc->spin_sleep_lock); proc->spin_sleep = 0; @@ -69,7 +78,9 @@ struct process *clone_process(struct process *org, unsigned long pc, { struct process *proc; - proc = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, 0); + if((proc = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, IHK_MC_AP_NOWAIT)) == NULL){ + return NULL; + } memset(proc, 0, KERNEL_STACK_NR_PAGES); @@ -91,12 +102,12 @@ struct process *clone_process(struct process *org, unsigned long pc, extern void __host_update_process_range(struct process *process, struct vm_range *range); -void update_process_page_table(struct process *process, struct vm_range *range, - enum ihk_mc_pt_attribute flag) +static int update_process_page_table(struct process *process, + struct vm_range *range, enum ihk_mc_pt_attribute flag) { unsigned long p, pa = range->phys; - unsigned long flags = ihk_mc_spinlock_lock(&process->vm->page_table_lock); + unsigned long flags = ihk_mc_spinlock_lock(&process->vm->page_table_lock); p = range->start; while (p < range->end) { #ifdef USE_LARGE_PAGES @@ -119,8 +130,11 @@ void update_process_page_table(struct process *process, struct vm_range *range, } else { #endif - ihk_mc_pt_set_page(process->vm->page_table, (void *)p, - pa, PTATTR_WRITABLE | PTATTR_USER | flag); + if(ihk_mc_pt_set_page(process->vm->page_table, (void *)p, + pa, PTATTR_WRITABLE | PTATTR_USER | flag) != 0){ + ihk_mc_spinlock_unlock(&process->vm->page_table_lock, flags); + return -ENOMEM; + } pa += PAGE_SIZE; p += PAGE_SIZE; @@ -128,9 +142,11 @@ void update_process_page_table(struct process *process, struct vm_range *range, } #endif } - ihk_mc_spinlock_unlock(&process->vm->page_table_lock, flags); + ihk_mc_spinlock_unlock(&process->vm->page_table_lock, flags); + return 0; } +#if 0 int add_process_large_range(struct process *process, unsigned long start, unsigned long end, unsigned long flag, unsigned long *phys) @@ -148,7 +164,7 @@ int add_process_large_range(struct process *process, return -EINVAL; } - range = kmalloc(sizeof(struct vm_range), 0); + range = kmalloc(sizeof(struct vm_range), ap_flag); if (!range) { return -ENOMEM; } @@ -164,7 +180,7 @@ int add_process_large_range(struct process *process, npages_allocated += 64) { struct vm_range sub_range; - virt = ihk_mc_alloc_pages(64, 0); + virt = ihk_mc_alloc_pages(64, IHK_MC_AP_NOWAIT); if (!virt) { return -ENOMEM; } @@ -194,13 +210,14 @@ int add_process_large_range(struct process *process, list_add_tail(&range->list, &process->vm->vm_range_list); return 0; } - +#endif int add_process_memory_range(struct process *process, unsigned long start, unsigned long end, unsigned long phys, unsigned long flag) { struct vm_range *range; + int rc; if ((start < process->vm->region.user_start) || (process->vm->region.user_end < end)) { @@ -210,7 +227,7 @@ int add_process_memory_range(struct process *process, return -EINVAL; } - range = kmalloc(sizeof(struct vm_range), 0); + range = kmalloc(sizeof(struct vm_range), IHK_MC_AP_NOWAIT); if (!range) { return -ENOMEM; } @@ -225,11 +242,15 @@ int add_process_memory_range(struct process *process, range->end - range->start, range->end - range->start); if (flag & VR_REMOTE) { - update_process_page_table(process, range, IHK_PTA_REMOTE); + rc = update_process_page_table(process, range, IHK_PTA_REMOTE); } else if (flag & VR_IO_NOCACHE) { - update_process_page_table(process, range, PTATTR_UNCACHABLE); + rc = update_process_page_table(process, range, PTATTR_UNCACHABLE); } else { - update_process_page_table(process, range, 0); + rc = update_process_page_table(process, range, 0); + } + if(rc != 0){ + kfree(range); + return rc; } #if 0 // disable __host_update_process_range() in add_process_memory_range(), because it has no effect on the actual mapping on the MICs side. @@ -249,21 +270,28 @@ int add_process_memory_range(struct process *process, -void init_process_stack(struct process *process, struct program_load_desc *pn, +int init_process_stack(struct process *process, struct program_load_desc *pn, int argc, char **argv, int envc, char **env) { int s_ind = 0; int arg_ind; unsigned long size = USER_STACK_NR_PAGES * PAGE_SIZE; - char *stack = ihk_mc_alloc_pages(USER_STACK_NR_PAGES, 0); + char *stack = ihk_mc_alloc_pages(USER_STACK_NR_PAGES, IHK_MC_AP_NOWAIT); unsigned long *p = (unsigned long *)(stack + size); unsigned long end = process->vm->region.user_end; unsigned long start = end - size; + int rc; + + if(stack == NULL) + return -ENOMEM; memset(stack, 0, size); - add_process_memory_range(process, start, end, virt_to_phys(stack), VR_STACK); + if((rc = add_process_memory_range(process, start, end, virt_to_phys(stack), VR_STACK)) != 0){ + ihk_mc_free_pages(stack, USER_STACK_NR_PAGES); + return rc; + } s_ind = -1; p[s_ind--] = 0; /* AT_NULL */ @@ -292,6 +320,7 @@ void init_process_stack(struct process *process, struct program_load_desc *pn, end + sizeof(unsigned long) * s_ind); process->vm->region.stack_end = end; process->vm->region.stack_start = start; + return 0; } @@ -301,6 +330,7 @@ unsigned long extend_process_region(struct process *proc, { unsigned long aligned_end, aligned_new_end; void *p; + int rc; if (!address || address < start || address >= USER_END) { return end; @@ -324,9 +354,15 @@ unsigned long extend_process_region(struct process *proc, aligned_end = (aligned_end + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK; /* Fill in the gap between old_aligned_end and aligned_end * with regular pages */ - p = allocate_pages((aligned_end - old_aligned_end) >> PAGE_SHIFT, 0); - add_process_memory_range(proc, old_aligned_end, aligned_end, - virt_to_phys(p), 0); + if((p = allocate_pages((aligned_end - old_aligned_end) >> PAGE_SHIFT, + IHK_MC_AP_NOWAIT)) == NULL){ + return end; + } + if((rc = add_process_memory_range(proc, old_aligned_end, + aligned_end, virt_to_phys(p), VR_NONE)) != 0){ + free_pages(p, (aligned_end - old_aligned_end) >> PAGE_SHIFT); + return end; + } dkprintf("filled in gap for LARGE_PAGE_SIZE aligned start: 0x%lX -> 0x%lX\n", old_aligned_end, aligned_end); @@ -337,8 +373,10 @@ unsigned long extend_process_region(struct process *proc, (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK; address = aligned_new_end; - p = allocate_pages((aligned_new_end - aligned_end + LARGE_PAGE_SIZE) - >> PAGE_SHIFT, 0); + if((p = allocate_pages((aligned_new_end - aligned_end + LARGE_PAGE_SIZE) >> PAGE_SHIFT, + IHK_MC_AP_NOWAIT)) == NULL){ + return end; + } p_aligned = ((unsigned long)p + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK; @@ -346,8 +384,12 @@ unsigned long extend_process_region(struct process *proc, free_pages(p, (p_aligned - (unsigned long)p) >> PAGE_SHIFT); } - add_process_memory_range(proc, aligned_end, aligned_new_end, - virt_to_phys((void *)p_aligned), flag); + if((rc = add_process_memory_range(proc, aligned_end, + aligned_new_end, virt_to_phys((void *)p_aligned), + flag)) != 0){ + free_pages(p, (aligned_new_end - aligned_end + LARGE_PAGE_SIZE) >> PAGE_SHIFT); + return end; + } dkprintf("largePTE area: 0x%lX - 0x%lX (s: %lu) -> 0x%lX - \n", aligned_end, aligned_new_end, @@ -358,14 +400,17 @@ unsigned long extend_process_region(struct process *proc, } #endif - p = allocate_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT, 0); + p = allocate_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT, IHK_MC_AP_NOWAIT); if (!p) { return end; } - add_process_memory_range(proc, aligned_end, aligned_new_end, - virt_to_phys(p), flag); + if((rc = add_process_memory_range(proc, aligned_end, aligned_new_end, + virt_to_phys(p), flag)) != 0){ + free_pages(p, (aligned_new_end - aligned_end) >> PAGE_SHIFT); + return end; + } return address; } diff --git a/kernel/syscall.c b/kernel/syscall.c index 693ceac2..78173ac1 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -370,7 +370,8 @@ SYSCALL_DECLARE(exit_group) SYSCALL_DECLARE(mmap) { struct vm_regions *region = &cpu_local_var(current)->vm->region; - unsigned long lockr; + unsigned long lockr; + void *va; kprintf("syscall.c,mmap,addr=%lx,len=%lx,prot=%lx,flags=%x,fd=%x,offset=%lx\n", ihk_mc_syscall_arg0(ctx), ihk_mc_syscall_arg1(ctx), @@ -415,15 +416,20 @@ SYSCALL_DECLARE(mmap) } e = (e + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK; - p = (unsigned long)ihk_mc_alloc_pages( - (e - s + 2 * LARGE_PAGE_SIZE) >> PAGE_SHIFT, 0); + if((p = (unsigned long)ihk_mc_alloc_pages( + (e - s + 2 * LARGE_PAGE_SIZE) >> PAGE_SHIFT, IHK_MC_AP_NOWAIT)) == NULL){ + return -ENOMEM; + } p_aligned = (p + LARGE_PAGE_SIZE + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK; // add range, mapping - add_process_memory_range(cpu_local_var(current), s_orig, e, - virt_to_phys((void *)(p_aligned - head_space)), 0); + if(add_process_memory_range(cpu_local_var(current), s_orig, e, + virt_to_phys((void *)(p_aligned - head_space)), VR_NONE) != 0){ + ihk_mc_free_pages(p, range_npages); + return -ENOMEM; + } dkprintf("largePTE area: 0x%lX - 0x%lX (s: %lu) -> 0x%lX -\n", s_orig, e, (e - s_orig), @@ -432,10 +438,16 @@ SYSCALL_DECLARE(mmap) else { #endif // allocate physical address - pa = virt_to_phys(ihk_mc_alloc_pages(range_npages, 0)); + if((va = ihk_mc_alloc_pages(range_npages, IHK_MC_AP_NOWAIT)) == NULL){ + return -ENOMEM; + } + pa = virt_to_phys(va); // add page_table, add memory-range - add_process_memory_range(cpu_local_var(current), s, e, pa, 0); + if(add_process_memory_range(cpu_local_var(current), s, e, pa, VR_NONE) != 0){ + ihk_mc_free_pages(va, range_npages); + return -ENOMEM; + } dkprintf("syscall.c,pa allocated=%lx\n", pa); #ifdef USE_LARGE_PAGES diff --git a/lib/include/ihk/mm.h b/lib/include/ihk/mm.h index 354641d4..c82b2f3f 100644 --- a/lib/include/ihk/mm.h +++ b/lib/include/ihk/mm.h @@ -22,6 +22,9 @@ enum ihk_mc_ma_type { enum ihk_mc_ap_flag { IHK_MC_AP_FLAG, + IHK_MC_AP_CRITICAL, /* panic on no memory space */ + IHK_MC_AP_NOWAIT, /* error return on no memory space */ + IHK_MC_AP_WAIT /* wait on no memory space */ }; enum ihk_mc_pt_prepare_flag { @@ -91,7 +94,7 @@ int ihk_mc_pt_clear_page(page_table_t pt, void *virt); int ihk_mc_pt_prepare_map(page_table_t pt, void *virt, unsigned long size, enum ihk_mc_pt_prepare_flag); -struct page_table *ihk_mc_pt_create(void); +struct page_table *ihk_mc_pt_create(enum ihk_mc_ap_flag ap_flag); void ihk_mc_load_page_table(struct page_table *pt); int ihk_mc_pt_virt_to_phys(struct page_table *pt, void *virt, unsigned long *phys); diff --git a/lib/page_alloc.c b/lib/page_alloc.c index 3a40aa8f..b931ffa1 100644 --- a/lib/page_alloc.c +++ b/lib/page_alloc.c @@ -44,7 +44,7 @@ void *__ihk_pagealloc_init(unsigned long start, unsigned long size, desc = initial; *pdescsize = descsize; } else { - desc = (void *)allocate_pages(descsize, 0); + desc = (void *)allocate_pages(descsize, IHK_MC_AP_CRITICAL); } if (!desc) { kprintf("IHK: failed to allocate page-allocator-desc "\