Revert "xpmem: Support large page attachment"

This reverts commit a8696d811d.

Conflicts:
	kernel/include/process.h
	kernel/syscall.c
	kernel/xpmem.c

Change-Id: I726e74450f6228d3fc78fc62dda15b2067732a53
This commit is contained in:
Masamichi Takagi
2020-06-16 10:42:53 +09:00
parent 29d27b7c8d
commit 6332903f0d
35 changed files with 104 additions and 2394 deletions

View File

@ -142,7 +142,7 @@ int prepare_process_ranges_args_envs(struct thread *thread,
if (add_process_memory_range(vm, s, e, NOPHYS, flags, NULL, 0,
pn->sections[i].len > LARGE_PAGE_SIZE ?
LARGE_PAGE_SHIFT : PAGE_SHIFT,
NULL, &range) != 0) {
&range) != 0) {
kprintf("ERROR: adding memory range for ELF section %i\n", i);
goto err;
}
@ -284,7 +284,7 @@ int prepare_process_ranges_args_envs(struct thread *thread,
dkprintf("%s: args_envs: %d pages\n",
__func__, argenv_page_count);
if(add_process_memory_range(vm, addr, e, args_envs_p,
flags, NULL, 0, PAGE_SHIFT, NULL, NULL) != 0){
flags, NULL, 0, PAGE_SHIFT, NULL) != 0){
ihk_mc_free_pages_user(args_envs, argenv_page_count);
kprintf("ERROR: adding memory range for args/envs\n");
goto err;

View File

@ -55,7 +55,6 @@
#define VR_MEMTYPE_MASK 0x0f000000
#define VR_PAGEOUT 0x10000000
#define VR_DONTDUMP 0x20000000
#define VR_XPMEM 0x40000000
#define VR_WIPEONFORK 0x80000000
#define PROT_TO_VR_FLAG(prot) (((unsigned long)(prot) << 16) & VR_PROT_MASK)
@ -805,7 +804,7 @@ int add_process_memory_range(struct process_vm *vm,
unsigned long start, unsigned long end,
unsigned long phys, unsigned long flag,
struct memobj *memobj, off_t offset,
int pgshift, void *private_data, struct vm_range **rp);
int pgshift, struct vm_range **rp);
int remove_process_memory_range(struct process_vm *vm, unsigned long start,
unsigned long end, int *ro_freedp);
int split_process_memory_range(struct process_vm *vm,

View File

@ -508,7 +508,7 @@ enum set_cputime_mode {
void set_cputime(enum set_cputime_mode mode);
int do_munmap(void *addr, size_t len, int holding_memory_range_lock);
intptr_t do_mmap(uintptr_t addr0, size_t len0, int prot, int flags, int fd,
off_t off0, const int vrf0, void *private_data);
off_t off0);
void clear_host_pte(uintptr_t addr, size_t len, int holding_memory_range_lock);
typedef int32_t key_t;
int do_shmget(key_t key, size_t size, int shmflg);

View File

@ -27,20 +27,6 @@ int xpmem_remove_process_memory_range(struct process_vm *vm,
struct vm_range *vmr);
int xpmem_fault_process_memory_range(struct process_vm *vm,
struct vm_range *vmr, unsigned long vaddr, uint64_t reason);
int xpmem_update_process_page_table(struct process_vm *vm,
struct vm_range *vmr);
struct xpmem_attachment {
mcs_rwlock_lock_t at_lock; /* att lock */
unsigned long vaddr; /* starting address of seg attached */
unsigned long at_vaddr; /* address where seg is attached */
size_t at_size; /* size of seg attachment */
struct vm_range *at_vmr; /* vm_range where seg is attachment */
int flags; /* att attributes and state */
ihk_atomic_t refcnt; /* references to att */
struct xpmem_access_permit *ap; /* associated access permit */
struct list_head att_list; /* atts linked to access permit */
struct process_vm *vm; /* process_vm attached to */
};
#endif /* _XPMEM_H */

View File

@ -177,6 +177,19 @@ struct xpmem_access_permit {
struct list_head ap_hashlist; /* access permit hash list */
};
struct xpmem_attachment {
mcs_rwlock_lock_t at_lock; /* att lock */
unsigned long vaddr; /* starting address of seg attached */
unsigned long at_vaddr; /* address where seg is attached */
size_t at_size; /* size of seg attachment */
struct vm_range *at_vmr; /* vm_range where seg is attachment */
volatile int flags; /* att attributes and state */
ihk_atomic_t refcnt; /* references to att */
struct xpmem_access_permit *ap; /* associated access permit */
struct list_head att_list; /* atts linked to access permit */
struct process_vm *vm; /* process_vm attached to */
};
struct xpmem_partition {
ihk_atomic_t n_opened; /* # of /dev/xpmem opened */
struct xpmem_hashlist tg_hashtable[]; /* locks + tg hash lists */
@ -318,7 +331,6 @@ static void xpmem_ap_deref(struct xpmem_access_permit *ap);
static void xpmem_att_deref(struct xpmem_attachment *att);
static int xpmem_validate_access(struct xpmem_access_permit *, off_t, size_t,
int, unsigned long *);
static int is_remote_vm(struct process_vm *vm);
/*
* Inlines that mark an internal driver structure as being destroyable or not.

View File

@ -1316,7 +1316,7 @@ int add_process_memory_range(struct process_vm *vm,
unsigned long start, unsigned long end,
unsigned long phys, unsigned long flag,
struct memobj *memobj, off_t offset,
int pgshift, void *private_data, struct vm_range **rp)
int pgshift, struct vm_range **rp)
{
dkprintf("%s: start=%lx,end=%lx,phys=%lx,flag=%lx\n", __FUNCTION__, start, end, phys, flag);
struct vm_range *range;
@ -1344,7 +1344,7 @@ int add_process_memory_range(struct process_vm *vm,
range->memobj = memobj;
range->objoff = offset;
range->pgshift = pgshift;
range->private_data = private_data;
range->private_data = NULL;
rc = 0;
if (phys == NOPHYS) {
@ -1356,10 +1356,6 @@ int add_process_memory_range(struct process_vm *vm,
else if (flag & VR_IO_NOCACHE) {
rc = update_process_page_table(vm, range, phys, PTATTR_UNCACHABLE);
}
else if (flag & VR_XPMEM) {
range->memobj->flags |= MF_XPMEM;
rc = xpmem_update_process_page_table(vm, range);
}
else if (flag & VR_DEMAND_PAGING) {
dkprintf("%s: range: 0x%lx - 0x%lx is demand paging\n",
__FUNCTION__, range->start, range->end);
@ -1387,8 +1383,7 @@ int add_process_memory_range(struct process_vm *vm,
}
/* Clear content! */
if (phys != NOPHYS
&& !(flag & (VR_REMOTE | VR_DEMAND_PAGING | VR_XPMEM))
if (phys != NOPHYS && !(flag & (VR_REMOTE | VR_DEMAND_PAGING))
&& ((flag & VR_PROT_MASK) != VR_PROT_NONE)) {
#if 1
memset((void *)phys_to_virt(phys), 0, end - start);
@ -2406,8 +2401,7 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
vrflag |= VR_MAXPROT_READ | VR_MAXPROT_WRITE | VR_MAXPROT_EXEC;
#define NOPHYS ((uintptr_t)-1)
if ((rc = add_process_memory_range(thread->vm, start, end, NOPHYS,
vrflag, NULL, 0, USER_STACK_PAGE_SHIFT,
NULL, &range)) != 0) {
vrflag, NULL, 0, USER_STACK_PAGE_SHIFT, &range)) != 0) {
ihk_mc_free_pages_user(stack, minsz >> PAGE_SHIFT);
kprintf("%s: error addding process memory range: %d\n", rc);
return rc;
@ -2571,7 +2565,7 @@ unsigned long extend_process_region(struct process_vm *vm,
if ((rc = add_process_memory_range(vm, end_allocated, new_end_allocated,
(p == 0 ? 0 : virt_to_phys(p)), flag, NULL, 0,
align_shift, NULL, NULL)) != 0) {
align_shift, NULL)) != 0) {
ihk_mc_free_pages_user(p, (new_end_allocated - end_allocated) >> PAGE_SHIFT);
return end_allocated;
}

View File

@ -1598,12 +1598,6 @@ static int search_free_space(size_t len, int pgshift, uintptr_t *addrp)
/* try given addr first */
addr = *addrp;
if (addr != 0) {
if ((region->user_end <= addr)
|| ((region->user_end - len) < addr)) {
error = -ENOMEM;
goto out;
}
range = lookup_process_memory_range(thread->vm, addr, addr+len);
if (range == NULL)
goto out;
@ -1639,8 +1633,7 @@ out:
intptr_t
do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
const int flags, const int fd, const off_t off0,
const int vrf0, void *private_data)
const int flags, const int fd, const off_t off0)
{
struct thread *thread = cpu_local_var(current);
struct vm_regions *region = &thread->vm->region;
@ -1702,8 +1695,7 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
}
p2align = pgshift - PAGE_SHIFT;
}
else if ((((flags & MAP_PRIVATE) && (flags & MAP_ANONYMOUS))
|| (vrf0 & VR_XPMEM))
else if ((flags & MAP_PRIVATE) && (flags & MAP_ANONYMOUS)
&& !proc->thp_disable) {
pgshift = 0; /* transparent huge page */
p2align = PAGE_P2ALIGN;
@ -1734,29 +1726,7 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
}
else if (flags & MAP_ANONYMOUS) {
/* Obtain mapping address */
if (vrf0 && VR_XPMEM) {
/* Fit address format to segment area */
struct xpmem_attachment *att;
uintptr_t prev_addr;
att = (struct xpmem_attachment *)private_data;
addr = att->vaddr;
while (!error) {
prev_addr = addr;
error = search_free_space(len,
PAGE_SHIFT + p2align, &addr);
if (prev_addr == addr) {
break;
}
addr = prev_addr +
(1UL << (PAGE_SHIFT + p2align));
}
}
else {
error = search_free_space(len,
PAGE_SHIFT + p2align, &addr);
}
error = search_free_space(len, PAGE_SHIFT + p2align, &addr);
if (error) {
ekprintf("do_mmap:search_free_space(%lx,%lx,%d) failed. %d\n",
len, region->map_end, p2align, error);
@ -1766,13 +1736,12 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
/* do the map */
vrflags = VR_NONE;
vrflags |= vrf0;
vrflags |= PROT_TO_VR_FLAG(prot);
vrflags |= (flags & MAP_PRIVATE)? VR_PRIVATE: 0;
vrflags |= (flags & MAP_LOCKED)? VR_LOCKED: 0;
vrflags |= VR_DEMAND_PAGING;
if (flags & MAP_ANONYMOUS && !anon_on_demand) {
if (flags & MAP_PRIVATE || vrflags & VR_XPMEM) {
if (flags & MAP_ANONYMOUS) {
if (!anon_on_demand && (flags & MAP_PRIVATE)) {
vrflags &= ~VR_DEMAND_PAGING;
}
}
@ -1910,7 +1879,6 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
}
/* Prepopulated ANONYMOUS mapping */
else if (!(vrflags & VR_DEMAND_PAGING)
&& !(flags & MAP_SHARED)
&& ((vrflags & VR_PROT_MASK) != VR_PROT_NONE)) {
npages = len >> PAGE_SHIFT;
/* Small allocations mostly benefit from closest RAM,
@ -1989,7 +1957,7 @@ do_mmap(const uintptr_t addr0, const size_t len0, const int prot,
vrflags |= VRFLAG_PROT_TO_MAXPROT(PROT_TO_VR_FLAG(maxprot));
error = add_process_memory_range(thread->vm, addr, addr+len, phys,
vrflags, memobj, off, pgshift, private_data, &range);
vrflags, memobj, off, pgshift, &range);
if (error) {
kprintf("%s: add_process_memory_range failed for 0x%lx:%lu"
" flags: %lx, vrflags: %lx, pgshift: %d, error: %d\n",
@ -4432,7 +4400,7 @@ perf_mmap(struct mckfd *sfd, ihk_mc_user_context_t *ctx)
flags |= MAP_ANONYMOUS;
prot |= PROT_WRITE;
rc = do_mmap(addr0, len0, prot, flags, fd, off0, 0, NULL);
rc = do_mmap(addr0, len0, prot, flags, fd, off0);
// setup perf_event_mmap_page
page = (struct perf_event_mmap_page *)rc;
@ -5901,7 +5869,7 @@ SYSCALL_DECLARE(shmat)
}
error = add_process_memory_range(vm, addr, addr+len, -1,
vrflags, &obj->memobj, 0, obj->pgshift, NULL, NULL);
vrflags, &obj->memobj, 0, obj->pgshift, NULL);
if (error) {
if (!(prot & PROT_WRITE)) {
(void)set_host_vma(addr, len, PROT_READ | PROT_WRITE | PROT_EXEC, 1/* holding memory_range_lock */);
@ -8698,7 +8666,7 @@ SYSCALL_DECLARE(mremap)
error = add_process_memory_range(thread->vm, newstart, newend, -1,
range->flag, range->memobj,
range->objoff + (oldstart - range->start),
0, NULL, NULL);
range->pgshift, NULL);
if (error) {
ekprintf("sys_mremap(%#lx,%#lx,%#lx,%#x,%#lx):"
"add failed. %d\n",

View File

@ -423,11 +423,6 @@ static int xpmem_make(
struct xpmem_thread_group *seg_tg;
struct xpmem_segment *seg;
struct mcs_rwlock_node_irqsave lock;
struct process_vm *vm = cpu_local_var(current)->vm;
int ret;
pte_t *seg_pte = NULL;
size_t pgsize = 0, seg_size = 0;
unsigned long pf_addr;
XPMEM_DEBUG("call: vaddr=0x%lx, size=0x%lx, permit_type=%d, "
"permit_value=0%04lo",
@ -459,27 +454,6 @@ static int xpmem_make(
return -EINVAL;
}
/* Page-in segment area */
pf_addr = vaddr;
while (pf_addr < vaddr + size) {
ret = page_fault_process_vm(vm, (void *)pf_addr,
PF_POPULATE | PF_WRITE | PF_USER);
if (ret) {
xpmem_tg_deref(seg_tg);
return -ENOENT;
}
seg_pte = xpmem_vaddr_to_pte(vm, pf_addr, &pgsize);
if (!seg_pte || pte_is_null(seg_pte)) {
xpmem_tg_deref(seg_tg);
return -ENOENT;
}
pf_addr += pgsize;
seg_size += pgsize;
}
if (seg_size > size) {
size = seg_size;
}
segid = xpmem_make_segid(seg_tg);
if (segid < 0) {
xpmem_tg_deref(seg_tg);
@ -1037,6 +1011,7 @@ static int xpmem_attach(
struct xpmem_segment *seg;
struct xpmem_attachment *att;
struct mcs_rwlock_node_irqsave at_lock;
struct vm_range *vmr;
struct process_vm *vm = cpu_local_var(current)->vm;
XPMEM_DEBUG("call: apid=0x%lx, offset=0x%lx, size=0x%lx, vaddr=0x%lx, "
@ -1151,15 +1126,37 @@ static int xpmem_attach(
XPMEM_DEBUG("do_mmap(): vaddr=0x%lx, size=0x%lx, prot_flags=0x%lx, "
"flags=0x%lx, fd=%d, offset=0x%lx",
vaddr, size, prot_flags, flags, mckfd->fd, offset);
/* The new range is associated with shmobj because of
/* The new range uses on-demand paging and is associated with shmobj because of
MAP_ANONYMOUS && !MAP_PRIVATE && MAP_SHARED */
at_vaddr = do_mmap(vaddr, size, prot_flags, flags, mckfd->fd,
offset, VR_XPMEM, att);
at_vaddr = do_mmap(vaddr, size, prot_flags, flags, mckfd->fd, offset);
if (IS_ERR((void *)(uintptr_t)at_vaddr)) {
ret = at_vaddr;
goto out_2;
}
XPMEM_DEBUG("at_vaddr=0x%lx", at_vaddr);
att->at_vaddr = at_vaddr;
ihk_rwspinlock_read_lock_noirq(&vm->memory_range_lock);
vmr = lookup_process_memory_range(vm, at_vaddr, at_vaddr + 1);
/* To identify pages of XPMEM attachment for rusage accounting */
if(vmr->memobj) {
vmr->memobj->flags |= MF_XPMEM;
} else {
ekprintf("%s: vmr->memobj equals to NULL\n", __FUNCTION__);
}
ihk_rwspinlock_read_unlock_noirq(&vm->memory_range_lock);
if (!vmr) {
ret = -ENOENT;
goto out_2;
}
vmr->private_data = att;
att->at_vmr = vmr;
*at_vaddr_p = at_vaddr + offset_in_page(att->vaddr);
@ -1185,6 +1182,7 @@ out_1:
return ret;
}
static int xpmem_detach(
unsigned long at_vaddr)
{
@ -1880,117 +1878,6 @@ out_1:
return ret;
}
int xpmem_update_process_page_table(
struct process_vm *vm, struct vm_range *vmr)
{
int ret = 0;
unsigned long seg_vaddr = 0;
unsigned long vaddr = vmr->start;
pte_t *pte = NULL;
pte_t *seg_pte = NULL;
struct xpmem_thread_group *ap_tg;
struct xpmem_thread_group *seg_tg;
struct xpmem_access_permit *ap;
struct xpmem_attachment *att;
struct xpmem_segment *seg;
size_t seg_pgsize;
size_t pgsize;
XPMEM_DEBUG("call: vmr=0x%p", vmr);
att = (struct xpmem_attachment *)vmr->private_data;
if (att == NULL) {
return -EFAULT;
}
xpmem_att_ref(att);
ap = att->ap;
xpmem_ap_ref(ap);
ap_tg = ap->tg;
xpmem_tg_ref(ap_tg);
if ((ap->flags & XPMEM_FLAG_DESTROYING) ||
(ap_tg->flags & XPMEM_FLAG_DESTROYING)) {
ret = -EFAULT;
goto out_1;
}
DBUG_ON(cpu_local_var(current)->proc->pid != ap_tg->tgid);
DBUG_ON(ap->mode != XPMEM_RDWR);
seg = ap->seg;
xpmem_seg_ref(seg);
seg_tg = seg->tg;
xpmem_tg_ref(seg_tg);
if ((seg->flags & XPMEM_FLAG_DESTROYING) ||
(seg_tg->flags & XPMEM_FLAG_DESTROYING)) {
ret = -ENOENT;
goto out_2;
}
att->at_vaddr = vmr->start;
att->at_vmr = vmr;
if ((att->flags & XPMEM_FLAG_DESTROYING) ||
(ap_tg->flags & XPMEM_FLAG_DESTROYING) ||
(seg_tg->flags & XPMEM_FLAG_DESTROYING)) {
goto out_2;
}
seg_vaddr = (att->vaddr & PAGE_MASK) + (vaddr - att->at_vaddr);
XPMEM_DEBUG("vaddr=%lx, seg_vaddr=%lx", vaddr, seg_vaddr);
while (vaddr < vmr->end) {
ret = xpmem_ensure_valid_page(seg, seg_vaddr);
if (ret != 0) {
goto out_2;
}
seg_pte = xpmem_vaddr_to_pte(seg_tg->vm, seg_vaddr,
&seg_pgsize);
if (seg_pte && !pte_is_null(seg_pte)) {
pte = xpmem_vaddr_to_pte(cpu_local_var(current)->vm,
vaddr, &pgsize);
if (pte && !pte_is_null(pte)) {
if (*seg_pte != *pte) {
ret = -EFAULT;
ekprintf("%s: ERROR: pte mismatch: "
"0x%lx != 0x%lx\n",
__func__, *seg_pte, *pte);
}
ihk_atomic_dec(&seg->tg->n_pinned);
goto out_2;
}
ret = xpmem_remap_pte(vm, vmr, vaddr,
0, seg, seg_vaddr);
if (ret) {
ekprintf("%s: ERROR: xpmem_remap_pte() failed %d\n",
__func__, ret);
}
}
flush_tlb_single(vaddr);
att->flags |= XPMEM_FLAG_VALIDPTEs;
seg_vaddr += seg_pgsize;
vaddr += seg_pgsize;
}
out_2:
xpmem_tg_deref(seg_tg);
xpmem_seg_deref(seg);
out_1:
xpmem_att_deref(att);
xpmem_ap_deref(ap);
xpmem_tg_deref(ap_tg);
XPMEM_DEBUG("return: ret=%d", ret);
return ret;
}
static int xpmem_remap_pte(
struct process_vm *vm,
@ -2018,16 +1905,12 @@ static int xpmem_remap_pte(
"seg_vaddr=0x%lx",
vmr, vaddr, reason, seg->segid, seg_vaddr);
if (is_remote_vm(seg_tg->vm)) {
ihk_rwspinlock_read_lock_noirq(&seg_tg->vm->memory_range_lock);
}
ihk_rwspinlock_read_lock_noirq(&seg_tg->vm->memory_range_lock);
seg_vmr = lookup_process_memory_range(seg_tg->vm, seg_vaddr,
seg_vaddr + 1);
if (is_remote_vm(seg_tg->vm)) {
ihk_rwspinlock_read_unlock_noirq(&seg_tg->vm->memory_range_lock);
}
ihk_rwspinlock_read_unlock_noirq(&seg_tg->vm->memory_range_lock);
if (!seg_vmr) {
ret = -EFAULT;
@ -2062,27 +1945,28 @@ static int xpmem_remap_pte(
att_attr = arch_vrflag_to_ptattr(vmr->flag, reason, att_pte);
XPMEM_DEBUG("att_attr=0x%lx", att_attr);
if (att_pte && !pgsize_is_contiguous(seg_pgsize)) {
ret = ihk_mc_pt_set_pte(vm->address_space->page_table, att_pte,
seg_pgsize, seg_phys, att_attr);
if (att_pte) {
ret = ihk_mc_pt_set_pte(vm->address_space->page_table, att_pte,
att_pgsize, seg_phys, att_attr);
if (ret) {
ret = -EFAULT;
ekprintf("%s: ERROR: ihk_mc_pt_set_pte() failed %d\n",
__func__, ret);
ekprintf("%s: ERROR: ihk_mc_pt_set_pte() failed %d\n",
__FUNCTION__, ret);
goto out;
}
// memory_stat_rss_add() is called by the process hosting the memory area
}
else {
ret = ihk_mc_pt_set_range(vm->address_space->page_table, vm,
att_pgaddr, att_pgaddr + seg_pgsize,
seg_phys, att_attr,
pgsize_to_pgshift(seg_pgsize), vmr, 1);
ret = ihk_mc_pt_set_range(vm->address_space->page_table, vm,
att_pgaddr, att_pgaddr + att_pgsize, seg_phys, att_attr,
vmr->pgshift, vmr, 0);
if (ret) {
ret = -EFAULT;
ekprintf("%s: ERROR: ihk_mc_pt_set_range() failed %d\n",
__func__, ret);
__FUNCTION__, ret);
goto out;
}
// memory_stat_rss_add() is called by the process hosting the memory area
}
out:
@ -2142,7 +2026,8 @@ static pte_t * xpmem_vaddr_to_pte(
}
out:
return pte;
return pte;
}
@ -2152,35 +2037,37 @@ static int xpmem_pin_page(
struct process_vm *src_vm,
unsigned long vaddr)
{
int ret = 0;
int ret;
struct vm_range *range;
XPMEM_DEBUG("call: tgid=%d, vaddr=0x%lx", tg->tgid, vaddr);
if (is_remote_vm(src_vm)) {
ihk_rwspinlock_read_lock_noirq(&src_vm->memory_range_lock);
}
ihk_rwspinlock_read_lock_noirq(&src_vm->memory_range_lock);
range = lookup_process_memory_range(src_vm, vaddr, vaddr + 1);
ihk_rwspinlock_read_unlock_noirq(&src_vm->memory_range_lock);
if (!range || range->start > vaddr) {
ret = -ENOENT;
goto out;
return -ENOENT;
}
if (xpmem_is_private_data(range)) {
ret = -ENOENT;
goto out;
return -ENOENT;
}
ihk_atomic_inc(&tg->n_pinned);
out:
if (is_remote_vm(src_vm)) {
ihk_rwspinlock_read_unlock_noirq(&src_vm->memory_range_lock);
ret = page_fault_process_vm(src_vm, (void *)vaddr,
PF_POPULATE | PF_WRITE | PF_USER);
if (!ret) {
ihk_atomic_inc(&tg->n_pinned);
}
else {
return -ENOENT;
}
XPMEM_DEBUG("return: ret=%d", ret);
return ret;
return ret;
}
@ -2190,24 +2077,30 @@ static void xpmem_unpin_pages(
unsigned long vaddr,
size_t size)
{
int n_pgs = (((offset_in_page(vaddr) + (size)) + (PAGE_SIZE - 1)) >>
PAGE_SHIFT);
int n_pgs_unpinned = 0;
size_t vsize = 0;
unsigned long end = vaddr + size;
pte_t *pte = NULL;
XPMEM_DEBUG("call: segid=0x%lx, vaddr=0x%lx, size=0x%lx",
seg->segid, vaddr, size);
XPMEM_DEBUG("n_pgs=%d", n_pgs);
vaddr &= PAGE_MASK;
while (vaddr < end) {
while (n_pgs > 0) {
pte = xpmem_vaddr_to_pte(vm, vaddr, &vsize);
if (pte && !pte_is_null(pte)) {
n_pgs_unpinned++;
vaddr += vsize;
vaddr += PAGE_SIZE;
n_pgs--;
}
else {
vaddr = ((vaddr + vsize) & (~(vsize - 1)));
vsize = ((vaddr + vsize) & (~(vsize - 1)));
n_pgs -= (vsize - vaddr) / PAGE_SIZE;
vaddr = vsize;
}
}
@ -2409,15 +2302,3 @@ static int xpmem_validate_access(
return 0;
}
static int is_remote_vm(struct process_vm *vm)
{
int ret = 0;
if (cpu_local_var(current)->proc->vm != vm) {
/* vm is not mine */
ret = 1;
}
return ret;
}