use large page PTEs for allocations bigger than large page size, enforce alignment (USE_LARGE_PAGES)

This commit is contained in:
Balazs Gerofi bgerofi@riken.jp
2012-11-03 15:43:07 +09:00
committed by Masamichi Takagi m-takagi@ab.jp.nec.com
parent 337fe4b20d
commit a21fe11b00
3 changed files with 139 additions and 12 deletions

View File

@ -20,6 +20,7 @@
#define PS_NORMAL (PS_INTERRUPTIBLE | PS_UNINTERRUPTIBLE)
#define USE_LARGE_PAGES
struct vm_range {
struct list_head list;

View File

@ -96,12 +96,34 @@ void update_process_page_table(struct process *process, struct vm_range *range,
unsigned long flags = aal_mc_spinlock_lock(&process->vm->page_table_lock);
p = range->start;
while (p < range->end) {
#ifdef USE_LARGE_PAGES
/* Use large PTE if both virtual and physical addresses are large page
* aligned and more than LARGE_PAGE_SIZE is left from the range */
if ((p & (LARGE_PAGE_SIZE - 1)) == 0 &&
(pa & (LARGE_PAGE_SIZE - 1)) == 0 &&
(range->end - p) >= LARGE_PAGE_SIZE) {
if (aal_mc_pt_set_large_page(process->vm->page_table, (void *)p,
pa, PTATTR_WRITABLE | PTATTR_USER | flag) != 0) {
kprintf("ERROR:setting large page for 0x%lX -> 0x%lX\n", p, pa);
panic("");
}
dkprintf("large page set for 0x%lX -> 0x%lX\n", p, pa);
pa += LARGE_PAGE_SIZE;
p += LARGE_PAGE_SIZE;
}
else {
#endif
aal_mc_pt_set_page(process->vm->page_table, (void *)p,
pa, PTATTR_WRITABLE | PTATTR_USER | flag);
pa += PAGE_SIZE;
p += PAGE_SIZE;
#ifdef USE_LARGE_PAGES
}
#endif
}
aal_mc_spinlock_unlock(&process->vm->page_table_lock, flags);
}
@ -275,6 +297,50 @@ unsigned long extend_process_region(struct process *proc,
aligned_new_end = (address + PAGE_SIZE - 1) & PAGE_MASK;
#ifdef USE_LARGE_PAGES
if (aligned_new_end - aligned_end >= LARGE_PAGE_SIZE) {
unsigned long p_aligned;
if ((aligned_end & (LARGE_PAGE_SIZE - 1)) != 0) {
unsigned long old_aligned_end = aligned_end;
aligned_end = (aligned_end + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
/* Fill in the gap between old_aligned_end and aligned_end
* with regular pages */
p = allocate_pages((aligned_end - old_aligned_end) >> PAGE_SHIFT, 0);
add_process_memory_range(proc, old_aligned_end, aligned_end,
virt_to_phys(p), 0);
dkprintf("filled in gap for LARGE_PAGE_SIZE aligned start: 0x%lX -> 0x%lX\n",
old_aligned_end, aligned_end);
}
/* Add large region for the actual mapping */
aligned_new_end = (aligned_new_end + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
address = aligned_new_end;
p = allocate_pages((aligned_new_end - aligned_end + LARGE_PAGE_SIZE)
>> PAGE_SHIFT, 0);
p_aligned = ((unsigned long)p + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
if (p_aligned > (unsigned long)p) {
free_pages(p, (p_aligned - (unsigned long)p) >> PAGE_SHIFT);
}
add_process_memory_range(proc, aligned_end, aligned_new_end,
virt_to_phys((void *)p_aligned), 0);
dkprintf("largePTE area: 0x%lX - 0x%lX (s: %lu) -> 0x%lX - \n",
aligned_end, aligned_new_end,
(aligned_new_end - aligned_end),
virt_to_phys((void *)p_aligned));
return address;
}
#endif
p = allocate_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT, 0);
if (!p) {

View File

@ -365,11 +365,62 @@ SYSCALL_DECLARE(mmap)
int r = aal_mc_pt_virt_to_phys(cpu_local_var(current)->vm->page_table, (void *)s, &pa);
// va range is not overwrapped with existing mmap
if(r != 0) {
pa = virt_to_phys(aal_mc_alloc_pages(range_npages, 0)); // allocate pa
// lockr = aal_mc_spinlock_lock(&cpu_status_lock);
add_process_memory_range(cpu_local_var(current), s, e, pa, 0); // register to page_table
// aal_mc_spinlock_unlock(&cpu_status_lock, lockr);
kprintf("syscall.c,pa allocated=%lx\n", pa);
#ifdef USE_LARGE_PAGES
// use large pages if mapping is big enough
if (e - s >= LARGE_PAGE_SIZE) {
unsigned long old_s = s;
unsigned long p;
unsigned long p_aligned;
// fill in gap with regular pages until the first large
// page aligned address
if ((s & (LARGE_PAGE_SIZE - 1)) != 0) {
s = (s + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
// allocate physical address
pa = virt_to_phys(aal_mc_alloc_pages(
(s - old_s) >> PAGE_SHIFT, 0));
// add page_table, add memory-range
add_process_memory_range(cpu_local_var(current),
old_s, s, pa, 0);
dkprintf("filled in gap for LARGE_PAGE_SIZE aligned start: 0x%lX -> 0x%lX\n",
old_s, s);
}
e = (e + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
p = (unsigned long)aal_mc_alloc_pages((e - s + LARGE_PAGE_SIZE)
>> PAGE_SHIFT, 0);
p_aligned = (p + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
// free unneeded
if (p_aligned > p) {
free_pages((void *)p, (p_aligned - (unsigned long)p)
>> PAGE_SHIFT);
}
// add range, mapping
add_process_memory_range(cpu_local_var(current), s, e,
virt_to_phys((void *)p_aligned), 0);
dkprintf("largePTE area: 0x%lX - 0x%lX (s: %lu) -> 0x%lX -\n",
s, e, (e - s), virt_to_phys((void *)p_aligned));
}
else {
#endif
// allocate physical address
pa = virt_to_phys(aal_mc_alloc_pages(range_npages, 0));
// add page_table, add memory-range
add_process_memory_range(cpu_local_var(current), s, e, pa, 0);
dkprintf("syscall.c,pa allocated=%lx\n", pa);
#ifdef USE_LARGE_PAGES
}
#endif
} else {
kprintf("syscall.c,pa found=%lx\n", pa);
// we need to clear to avoid BSS contamination, even when reusing physical memory range
@ -403,9 +454,18 @@ SYSCALL_DECLARE(mmap)
region->map_end,
s + len);
aal_mc_spinlock_unlock(&cpu_local_var(current)->vm->memory_range_lock, flags);
// kprintf("syscall.c,mmap,map_end=%lx,s+len=%lx\n", region->map_end, s+len);
if (region->map_end == s + len) { return s; } else { return -EINVAL; }
dkprintf("syscall.c,mmap,map_end=%lx,s+len=%lx\n", region->map_end, s+len);
#ifdef USE_LARGE_PAGES
if (region->map_end >= s + len) {
/* NOTE: extend_process_region() might have large page aligned */
return region->map_end - len;
}
#else
if (region->map_end == s + len) return s;
#endif
else {
return -EINVAL;
}
} else if ((aal_mc_syscall_arg3(ctx) & 0x02) == 0x02) {
// #define MAP_PRIVATE 0x02