introduction of mcctrl_per_process data to keep track remote page tables on a per-process basis
This commit is contained in:
@ -53,6 +53,8 @@ static long mcexec_prepare_image(ihk_os_t os,
|
||||
void *args, *envs;
|
||||
long ret = 0;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
unsigned long flags;
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
if (copy_from_user(&desc, udesc,
|
||||
sizeof(struct program_load_desc))) {
|
||||
@ -117,7 +119,23 @@ static long mcexec_prepare_image(ihk_os_t os,
|
||||
goto free_out;
|
||||
}
|
||||
|
||||
usrdata->rpgtable = pdesc->rpgtable;
|
||||
ppd = kmalloc(sizeof(*ppd), GFP_ATOMIC);
|
||||
if (!ppd) {
|
||||
printk("ERROR: allocating per process data\n");
|
||||
ret = -ENOMEM;
|
||||
goto free_out;
|
||||
}
|
||||
|
||||
ppd->pid = pdesc->pid;
|
||||
ppd->rpgtable = pdesc->rpgtable;
|
||||
|
||||
flags = ihk_ikc_spinlock_lock(&usrdata->per_proc_list_lock);
|
||||
list_add_tail(&ppd->list, &usrdata->per_proc_list);
|
||||
ihk_ikc_spinlock_unlock(&usrdata->per_proc_list_lock, flags);
|
||||
|
||||
printk("pid %d, rpgtable: 0x%lx added\n",
|
||||
ppd->pid, ppd->rpgtable);
|
||||
|
||||
if (copy_to_user(udesc, pdesc, sizeof(struct program_load_desc) +
|
||||
sizeof(struct program_image_section) * desc.num_sections)) {
|
||||
ret = -EFAULT;
|
||||
|
||||
@ -302,6 +302,9 @@ int prepare_ikc_channels(ihk_os_t os)
|
||||
memcpy(&usrdata->listen_param2, &listen_param2, sizeof listen_param2);
|
||||
ihk_ikc_listen_port(os, &usrdata->listen_param2);
|
||||
|
||||
INIT_LIST_HEAD(&usrdata->per_proc_list);
|
||||
spin_lock_init(&usrdata->per_proc_list_lock);
|
||||
|
||||
error = init_peer_channel_registry(usrdata);
|
||||
if (error) {
|
||||
return error;
|
||||
|
||||
@ -106,6 +106,12 @@ struct mcctrl_channel {
|
||||
ihk_spinlock_t wq_list_lock;
|
||||
};
|
||||
|
||||
struct mcctrl_per_proc_data {
|
||||
struct list_head list;
|
||||
int pid;
|
||||
unsigned long rpgtable; /* per process, not per OS */
|
||||
};
|
||||
|
||||
struct mcctrl_usrdata {
|
||||
struct ihk_ikc_listen_param listen_param;
|
||||
struct ihk_ikc_listen_param listen_param2;
|
||||
@ -120,7 +126,9 @@ struct mcctrl_usrdata {
|
||||
int mcctrl_dma_abort;
|
||||
unsigned long last_thread_exec;
|
||||
wait_queue_head_t wq_prepare;
|
||||
unsigned long rpgtable; /* per process, not per OS */
|
||||
|
||||
struct list_head per_proc_list;
|
||||
ihk_spinlock_t per_proc_list_lock;
|
||||
void **keys;
|
||||
};
|
||||
|
||||
|
||||
@ -175,6 +175,7 @@ int translate_rva_to_rpa(ihk_os_t os, unsigned long rpt, unsigned long rva,
|
||||
ihk_device_unmap_virtual(ihk_os_to_dev(os), pt, PAGE_SIZE);
|
||||
ihk_device_unmap_memory(ihk_os_to_dev(os), phys, PAGE_SIZE);
|
||||
error = -EFAULT;
|
||||
printk("ERROR: remote PTE is not present for 0x%lx (rpt: %lx) ?\n", rva, rpt);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -345,16 +346,38 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
#if USE_VM_INSERT_PFN
|
||||
size_t pix;
|
||||
#endif
|
||||
struct mcctrl_per_proc_data *ppd, *ppd_iter;
|
||||
unsigned long flags;
|
||||
|
||||
dprintk("mcctrl:page fault:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
|
||||
ppd = NULL;
|
||||
flags = ihk_ikc_spinlock_lock(&usrdata->per_proc_list_lock);
|
||||
|
||||
list_for_each_entry(ppd_iter, &usrdata->per_proc_list, list) {
|
||||
if (ppd_iter->pid == current->tgid) {
|
||||
ppd = ppd_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ihk_ikc_spinlock_unlock(&usrdata->per_proc_list_lock, flags);
|
||||
|
||||
if (!ppd) {
|
||||
printk("ERROR: no per process data for pid %d\n", current->tgid);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
for (try = 1; ; ++try) {
|
||||
error = translate_rva_to_rpa(usrdata->os, usrdata->rpgtable,
|
||||
error = translate_rva_to_rpa(usrdata->os, ppd->rpgtable,
|
||||
(unsigned long)vmf->virtual_address,
|
||||
&rpa, &pgsize);
|
||||
#define NTRIES 2
|
||||
if (!error || (try >= NTRIES)) {
|
||||
if (error) {
|
||||
printk("translate_rva_to_rpa: error\n");
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@ -370,7 +393,7 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
}
|
||||
}
|
||||
if (error) {
|
||||
printk("mcctrl:page fault:flags %#x pgoff %#lx va %p page %p\n",
|
||||
printk("mcctrl:page fault error:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
@ -392,7 +415,7 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
#endif
|
||||
ihk_device_unmap_memory(dev, phys, pgsize);
|
||||
if (error) {
|
||||
printk("mcctrl:page fault:flags %#x pgoff %#lx va %p page %p\n",
|
||||
printk("mcctrl:page fault:remap error:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
@ -1150,7 +1173,25 @@ int __do_in_kernel_syscall(ihk_os_t os, struct mcctrl_channel *c, struct syscall
|
||||
case __NR_munmap:
|
||||
/* Set new remote page table if not zero */
|
||||
if (sc->args[2]) {
|
||||
usrdata->rpgtable = sc->args[2];
|
||||
unsigned long flags;
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
ppd = kmalloc(sizeof(*ppd), GFP_ATOMIC);
|
||||
if (!ppd) {
|
||||
printk("ERROR: allocating per process data\n");
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ppd->pid = current->tgid;
|
||||
ppd->rpgtable = sc->args[2];
|
||||
|
||||
flags = ihk_ikc_spinlock_lock(&usrdata->per_proc_list_lock);
|
||||
list_add_tail(&ppd->list, &usrdata->per_proc_list);
|
||||
ihk_ikc_spinlock_unlock(&usrdata->per_proc_list_lock, flags);
|
||||
|
||||
printk("pid: %d, rpgtable: 0x%lx added\n",
|
||||
ppd->pid, ppd->rpgtable);
|
||||
}
|
||||
|
||||
clear_pte_range(sc->args[0], sc->args[1]);
|
||||
|
||||
Reference in New Issue
Block a user