delete old page fault handlers

This commit is contained in:
NAKAMURA Gou
2014-05-07 22:10:21 +09:00
parent 71d1359d53
commit e5aad641ac

View File

@ -984,285 +984,6 @@ out:
return error;
}
#if 0
static int page_fault_process_memory_range(struct process_vm *vm,
struct vm_range *range, uintptr_t fault_addr, uint64_t reason)
{
int error;
int npages;
void *virt = NULL;
void *ptepgaddr;
size_t ptepgsize;
int ptep2align;
void *pgaddr;
size_t pgsize;
int p2align;
uintptr_t phys;
enum ihk_mc_pt_attribute attr;
pte_t *ptep;
off_t off;
struct page *page = NULL;
dkprintf("[%d]page_fault_process_memory_range(%p,%lx-%lx %lx,%lx)\n",
ihk_mc_get_processor_id(), vm, range->start,
range->end, range->flag, fault_addr);
ihk_mc_spinlock_lock_noirq(&vm->page_table_lock);
/* (1) check PTE */
ptep = ihk_mc_pt_lookup_pte(vm->page_table, (void *)fault_addr,
&ptepgaddr, &ptepgsize, &ptep2align);
if (ptep && !pte_is_null(ptep)) {
if (!pte_is_present(ptep)) {
error = -EFAULT;
kprintf("[%d]page_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):"
"disabled page. %d\n",
ihk_mc_get_processor_id(), vm,
range->start, range->end,
range->flag, fault_addr, error);
goto out;
}
error = 0;
kprintf("[%d]page_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):already mapped. %d\n",
ihk_mc_get_processor_id(), vm, range->start,
range->end, range->flag, fault_addr, error);
goto out;
}
/* (2) select page size */
#ifdef USE_LARGE_PAGES
if (!ptep) {
/* get largest page size */
error = arch_get_smaller_page_size(NULL, -1, &ptepgsize, &ptep2align);
if (error) {
kprintf("[%d]page_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):"
"get pgsize failed. %d\n",
ihk_mc_get_processor_id(), vm,
range->start, range->end,
range->flag, fault_addr, error);
goto out;
}
}
#else
if (!ptep || (ptepgsize != PAGE_SIZE)) {
ptep = NULL;
ptepgsize = PAGE_SIZE;
ptep2align = PAGE_P2ALIGN;
}
#endif
pgsize = ptepgsize;
p2align = ptep2align;
/* (3) get physical page */
for (;;) {
pgaddr = (void *)(fault_addr & ~(pgsize - 1));
if ((range->start <= (uintptr_t)pgaddr)
&& (((uintptr_t)pgaddr + pgsize) <= range->end)) {
npages = pgsize / PAGE_SIZE;
if (range->memobj) {
off = range->objoff + ((uintptr_t)pgaddr - range->start);
error = memobj_get_page(range->memobj, off, p2align, &phys);
if (!error) {
page = phys_to_page(phys);
break;
}
else if (error == -ERESTART) {
goto out;
}
else if (error != -ENOMEM) {
kprintf("[%d]page_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):"
"get page failed. %d\n",
ihk_mc_get_processor_id(), vm,
range->start, range->end,
range->flag, fault_addr, error);
goto out;
}
}
else {
virt = ihk_mc_alloc_aligned_pages(npages,
p2align, IHK_MC_AP_NOWAIT);
if (virt) {
phys = virt_to_phys(virt);
page_map(phys_to_page(phys));
memset(virt, 0, pgsize);
break;
}
}
}
/* (4) if failed, select smaller page size, and retry */
ptep = NULL;
error = arch_get_smaller_page_size(
vm, pgsize, &pgsize, &p2align);
if (error) {
kprintf("[%d]page_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):"
"get pgsize failed. %d\n",
ihk_mc_get_processor_id(), vm,
range->start, range->end,
range->flag, fault_addr, error);
goto out;
}
}
/* (5) mapping */
attr = arch_vrflag_to_ptattr(range->flag, reason, ptep);
if (range->memobj && (range->flag & VR_PRIVATE) && (range->flag & VR_PROT_WRITE)) {
/* for copy-on-write */
attr &= ~PTATTR_WRITABLE;
}
if (ptep) {
error = ihk_mc_pt_set_pte(vm->page_table, ptep, pgsize, phys, attr);
if (error) {
kprintf("[%d]page_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):"
"set pte failed. %d\n",
ihk_mc_get_processor_id(), vm,
range->start, range->end,
range->flag, fault_addr, error);
goto out;
}
}
else {
error = ihk_mc_pt_set_range(vm->page_table, pgaddr,
pgaddr+pgsize, phys, attr);
if (error) {
kprintf("[%d]page_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):"
"set range failed. %d\n",
ihk_mc_get_processor_id(), vm,
range->start, range->end,
range->flag, fault_addr, error);
goto out;
}
}
virt = NULL; /* avoid ihk_mc_free_pages() */
page = NULL; /* avoid page_unmap() */
error = 0;
out:
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
if (page) {
int need_free;
memobj_lock(range->memobj);
need_free = page_unmap(page);
memobj_unlock(range->memobj);
if (need_free) {
ihk_mc_free_pages(phys_to_virt(phys), npages);
}
}
if (virt != NULL) {
page_unmap(phys_to_page(phys));
ihk_mc_free_pages(virt, npages);
}
dkprintf("[%d]page_fault_process_memory_range(%p,%lx-%lx %lx,%lx): %d\n",
ihk_mc_get_processor_id(), vm, range->start,
range->end, range->flag, fault_addr, error);
return error;
}
static int protection_fault_process_memory_range(struct process_vm *vm, struct vm_range *range, uintptr_t fault_addr, uint64_t reason)
{
int error;
pte_t *ptep;
void *pgaddr;
size_t pgsize;
int pgp2align;
int npages;
uintptr_t oldpa;
void *oldkva;
void *newkva;
uintptr_t newpa;
struct page *oldpage;
enum ihk_mc_pt_attribute attr;
dkprintf("protection_fault_process_memory_range(%p,%lx-%lx %lx,%lx)\n",
vm, range->start, range->end, range->flag, fault_addr);
if (!range->memobj) {
error = -EFAULT;
kprintf("protection_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):no memobj. %d\n",
vm, range->start, range->end, range->flag,
fault_addr, error);
goto out;
}
ihk_mc_spinlock_lock_noirq(&vm->page_table_lock);
ptep = ihk_mc_pt_lookup_pte(vm->page_table, (void *)fault_addr, &pgaddr, &pgsize, &pgp2align);
if (!ptep || !pte_is_present(ptep)) {
error = 0;
kprintf("protection_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):page not found. %d\n",
vm, range->start, range->end, range->flag,
fault_addr, error);
flush_tlb();
goto out;
}
if (pgsize != PAGE_SIZE) {
panic("protection_fault_process_memory_range:NYI:cow large page");
}
npages = 1 << pgp2align;
oldpa = pte_get_phys(ptep);
oldkva = phys_to_virt(oldpa);
oldpage = phys_to_page(oldpa);
if (oldpage) {
newpa = memobj_copy_page(range->memobj, oldpa, pgp2align);
newkva = phys_to_virt(newpa);
}
else {
newkva = ihk_mc_alloc_aligned_pages(npages, pgp2align,
IHK_MC_AP_NOWAIT);
if (!newkva) {
error = -ENOMEM;
kprintf("protection_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):"
"alloc page failed. %d\n",
vm, range->start, range->end,
range->flag, fault_addr, error);
goto out;
}
memcpy(newkva, oldkva, pgsize);
newpa = virt_to_phys(newkva);
page_map(phys_to_page(newpa));
}
attr = arch_vrflag_to_ptattr(range->flag, reason, ptep);
error = ihk_mc_pt_set_pte(vm->page_table, ptep, pgsize, newpa, attr);
if (error) {
kprintf("protection_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx):set pte failed. %d\n",
vm, range->start, range->end, range->flag,
fault_addr, error);
panic("protection_fault_process_memory_range:ihk_mc_pt_set_pte failed.");
page_unmap(phys_to_page(newpa));
ihk_mc_free_pages(newkva, npages);
goto out;
}
flush_tlb_single(fault_addr);
error = 0;
out:
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
dkprintf("protection_fault_process_memory_range"
"(%p,%lx-%lx %lx,%lx): %d\n",
vm, range->start, range->end, range->flag,
fault_addr, error);
return error;
}
#endif
static int page_fault_process_memory_range(struct process_vm *vm, struct vm_range *range, uintptr_t fault_addr, uint64_t reason)
{
int error;
@ -1419,67 +1140,6 @@ static int do_page_fault_process(struct process *proc, void *fault_addr0, uint64
goto out;
}
#if 0
if (reason & PF_PROT) {
error = protection_fault_process_memory_range(vm, range, fault_addr, reason);
if (error) {
kprintf("[%d]do_page_fault_process(%p,%lx,%lx):"
"protection range failed. %d\n",
ihk_mc_get_processor_id(), proc,
fault_addr0, reason, error);
goto out;
}
}
else if (reason & PF_POPULATE) {
pte_t *ptep;
void *ptepgaddr;
size_t ptepgsize;
int ptep2align;
ihk_mc_spinlock_lock_noirq(&vm->page_table_lock);
ptep = ihk_mc_pt_lookup_pte(vm->page_table, fault_addr0,
&ptepgaddr, &ptepgsize, &ptep2align);
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
if (!ptep || pte_is_null(ptep)) {
error = page_fault_process_memory_range(vm, range, fault_addr, reason);
if (error == -ERESTART) {
goto out;
}
else if (error) {
kprintf("[%d]do_page_fault_process(%p,%lx,%lx):"
"fault range failed. %d\n",
ihk_mc_get_processor_id(), proc,
fault_addr0, reason, error);
goto out;
}
}
else if (!pte_is_writable(ptep) && (range->flag & VR_PROT_WRITE)) {
error = protection_fault_process_memory_range(vm, range, fault_addr, reason);
if (error) {
kprintf("[%d]do_page_fault_process(%p,%lx,%lx):"
"protection range failed. %d\n",
ihk_mc_get_processor_id(), proc,
fault_addr0, reason, error);
goto out;
}
}
}
else {
error = page_fault_process_memory_range(vm, range, fault_addr, reason);
if (error == -ERESTART) {
goto out;
}
if (error) {
kprintf("[%d]do_page_fault_process(%p,%lx,%lx):"
"fault range failed. %d\n",
ihk_mc_get_processor_id(), proc,
fault_addr0, reason, error);
goto out;
}
}
#endif
error = page_fault_process_memory_range(vm, range, fault_addr, reason);
if (error == -ERESTART) {
goto out;