brk(): larger allocation units internally
This commit is contained in:
@ -198,7 +198,8 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
pn->at_entry += aout_base;
|
||||
}
|
||||
|
||||
vm->region.brk_start = vm->region.brk_end = vm->region.data_end;
|
||||
vm->region.brk_start = vm->region.brk_end =
|
||||
vm->region.brk_end_reported = vm->region.data_end;
|
||||
|
||||
/* Map, copy and update args and envs */
|
||||
flags = VR_PROT_READ | VR_PROT_WRITE;
|
||||
|
||||
@ -382,7 +382,7 @@ struct vm_regions {
|
||||
unsigned long vm_start, vm_end;
|
||||
unsigned long text_start, text_end;
|
||||
unsigned long data_start, data_end;
|
||||
unsigned long brk_start, brk_end;
|
||||
unsigned long brk_start, brk_end, brk_end_reported;
|
||||
unsigned long map_start, map_end;
|
||||
unsigned long stack_start, stack_end;
|
||||
unsigned long user_start, user_end;
|
||||
|
||||
@ -1940,8 +1940,8 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
|
||||
|
||||
|
||||
unsigned long extend_process_region(struct process_vm *vm,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long address, unsigned long flag)
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long address, unsigned long flag)
|
||||
{
|
||||
unsigned long aligned_end, aligned_new_end;
|
||||
void *p;
|
||||
@ -1952,93 +1952,32 @@ unsigned long extend_process_region(struct process_vm *vm,
|
||||
}
|
||||
|
||||
aligned_end = ((end + PAGE_SIZE - 1) & PAGE_MASK);
|
||||
|
||||
if (aligned_end >= address) {
|
||||
return address;
|
||||
}
|
||||
|
||||
aligned_new_end = (address + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
|
||||
#ifdef USE_LARGE_PAGES
|
||||
if (aligned_new_end - aligned_end >= LARGE_PAGE_SIZE) {
|
||||
if(flag & VR_DEMAND_PAGING){panic("demand paging for large page is not available!");}
|
||||
unsigned long p_aligned;
|
||||
unsigned long old_aligned_end = aligned_end;
|
||||
if (flag & VR_DEMAND_PAGING) {
|
||||
// demand paging no need to allocate page now
|
||||
kprintf("%s: demand paging, no physical memory allocated\n",
|
||||
__FUNCTION__);
|
||||
p = 0;
|
||||
}
|
||||
else {
|
||||
p = ihk_mc_alloc_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
|
||||
if ((aligned_end & (LARGE_PAGE_SIZE - 1)) != 0) {
|
||||
|
||||
aligned_end = (aligned_end + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
|
||||
/* Fill in the gap between old_aligned_end and aligned_end
|
||||
* with regular pages */
|
||||
if((p = ihk_mc_alloc_pages((aligned_end - old_aligned_end) >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT)) == NULL){
|
||||
return end;
|
||||
}
|
||||
if((rc = add_process_memory_range(vm, old_aligned_end,
|
||||
aligned_end, virt_to_phys(p), flag,
|
||||
LARGE_PAGE_SHIFT)) != 0){
|
||||
ihk_mc_free_pages(p, (aligned_end - old_aligned_end) >> PAGE_SHIFT);
|
||||
return end;
|
||||
}
|
||||
|
||||
dkprintf("filled in gap for LARGE_PAGE_SIZE aligned start: 0x%lX -> 0x%lX\n",
|
||||
old_aligned_end, aligned_end);
|
||||
}
|
||||
|
||||
/* Add large region for the actual mapping */
|
||||
aligned_new_end = (aligned_new_end + (aligned_end - old_aligned_end) +
|
||||
(LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
|
||||
address = aligned_new_end;
|
||||
|
||||
if((p = ihk_mc_alloc_pages((aligned_new_end - aligned_end + LARGE_PAGE_SIZE) >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT)) == NULL){
|
||||
if (!p) {
|
||||
return end;
|
||||
}
|
||||
|
||||
p_aligned = ((unsigned long)p + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
|
||||
|
||||
if (p_aligned > (unsigned long)p) {
|
||||
ihk_mc_free_pages(p, (p_aligned - (unsigned long)p) >> PAGE_SHIFT);
|
||||
}
|
||||
ihk_mc_free_pages(
|
||||
(void *)(p_aligned + aligned_new_end - aligned_end),
|
||||
(LARGE_PAGE_SIZE - (p_aligned - (unsigned long)p)) >> PAGE_SHIFT);
|
||||
|
||||
if((rc = add_process_memory_range(vm, aligned_end,
|
||||
aligned_new_end, virt_to_phys((void *)p_aligned),
|
||||
flag, LARGE_PAGE_SHIFT)) != 0){
|
||||
ihk_mc_free_pages(p, (aligned_new_end - aligned_end + LARGE_PAGE_SIZE) >> PAGE_SHIFT);
|
||||
return end;
|
||||
}
|
||||
|
||||
dkprintf("largePTE area: 0x%lX - 0x%lX (s: %lu) -> 0x%lX - \n",
|
||||
aligned_end, aligned_new_end,
|
||||
(aligned_new_end - aligned_end),
|
||||
virt_to_phys((void *)p_aligned));
|
||||
|
||||
return address;
|
||||
}
|
||||
#endif
|
||||
if(flag & VR_DEMAND_PAGING){
|
||||
// demand paging no need to allocate page now
|
||||
kprintf("demand page do not allocate page\n");
|
||||
p=0;
|
||||
}else{
|
||||
|
||||
p = ihk_mc_alloc_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT, IHK_MC_AP_NOWAIT);
|
||||
|
||||
if (!p) {
|
||||
return end;
|
||||
}
|
||||
}
|
||||
if((rc = add_process_memory_range(vm, aligned_end, aligned_new_end,
|
||||
(p==0?0:virt_to_phys(p)), flag, NULL, 0,
|
||||
PAGE_SHIFT)) != 0){
|
||||
if ((rc = add_process_memory_range(vm, aligned_end, aligned_new_end,
|
||||
(p == 0 ? 0 : virt_to_phys(p)), flag, NULL, 0,
|
||||
PAGE_SHIFT)) != 0) {
|
||||
ihk_mc_free_pages(p, (aligned_new_end - aligned_end) >> PAGE_SHIFT);
|
||||
return end;
|
||||
}
|
||||
|
||||
return address;
|
||||
/* NOTE: returned value is not the user requested new end! */
|
||||
return aligned_new_end;
|
||||
}
|
||||
|
||||
// Original version retained because dcfa (src/mccmd/client/ibmic/main.c) calls this
|
||||
|
||||
@ -1595,35 +1595,51 @@ SYSCALL_DECLARE(brk)
|
||||
struct vm_regions *region = &cpu_local_var(current)->vm->region;
|
||||
unsigned long r;
|
||||
unsigned long vrflag;
|
||||
long diff = address - region->brk_end;
|
||||
|
||||
dkprintf("SC(%d)[sys_brk] brk_start=%lx,end=%lx\n",
|
||||
ihk_mc_get_processor_id(), region->brk_start, region->brk_end);
|
||||
dkprintf("%s: len: %ld, address: %lx\n",
|
||||
__FUNCTION__, diff, address);
|
||||
|
||||
flush_nfo_tlb();
|
||||
|
||||
/* brk change fail, including glibc trick brk(0) to obtain current brk */
|
||||
if(address < region->brk_start) {
|
||||
r = region->brk_end;
|
||||
/* Don't shrink, including glibc trick brk(0) to obtain current brk */
|
||||
if (address < region->brk_end_reported) {
|
||||
r = region->brk_end_reported;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* brk change fail, because we don't shrink memory region */
|
||||
if(address < region->brk_end) {
|
||||
r = region->brk_end;
|
||||
/* If inside allocated area, simply report and update reported value */
|
||||
if (address < region->brk_end) {
|
||||
region->brk_end_reported = address;
|
||||
r = region->brk_end_reported;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* try to extend memory region */
|
||||
/* We need to extend, do it by at least n large page size */
|
||||
vrflag = VR_PROT_READ | VR_PROT_WRITE;
|
||||
vrflag |= VRFLAG_PROT_TO_MAXPROT(vrflag);
|
||||
ihk_mc_spinlock_lock_noirq(&cpu_local_var(current)->vm->memory_range_lock);
|
||||
region->brk_end = extend_process_region(cpu_local_var(current)->vm,
|
||||
region->brk_start, region->brk_end, address, vrflag);
|
||||
region->brk_start,
|
||||
region->brk_end,
|
||||
(address + (5 * LARGE_PAGE_SIZE) - 1) & LARGE_PAGE_MASK,
|
||||
vrflag);
|
||||
ihk_mc_spinlock_unlock_noirq(&cpu_local_var(current)->vm->memory_range_lock);
|
||||
dkprintf("SC(%d)[sys_brk] brk_end set to %lx\n",
|
||||
ihk_mc_get_processor_id(), region->brk_end);
|
||||
|
||||
r = region->brk_end;
|
||||
/* Did we succeed with extending? */
|
||||
if (region->brk_end >= address) {
|
||||
region->brk_end_reported = address;
|
||||
r = region->brk_end_reported;
|
||||
}
|
||||
else {
|
||||
r = region->brk_end;
|
||||
}
|
||||
|
||||
dkprintf("%s: len: %ld, brk_end_reported: 0x%lx, brk_end: 0x%lx\n",
|
||||
__FUNCTION__,
|
||||
diff,
|
||||
region->brk_end_reported,
|
||||
region->brk_end);
|
||||
|
||||
out:
|
||||
return r;
|
||||
|
||||
Reference in New Issue
Block a user