use per-process wait queues for system call request processing in mcexec so that multiple processes can share the per-core system call channels
This commit is contained in:
@ -93,6 +93,7 @@ struct syscall_request {
|
|||||||
struct syscall_wait_desc {
|
struct syscall_wait_desc {
|
||||||
unsigned long cpu;
|
unsigned long cpu;
|
||||||
struct syscall_request sr;
|
struct syscall_request sr;
|
||||||
|
int pid;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct syscall_load_desc {
|
struct syscall_load_desc {
|
||||||
|
|||||||
@ -267,10 +267,37 @@ static long mcexec_get_cpu(ihk_os_t os)
|
|||||||
return info->n_cpus;
|
return info->n_cpus;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mcexec_syscall(struct mcctrl_channel *c, unsigned long arg)
|
int mcexec_syscall(struct mcctrl_channel *c, int pid, unsigned long arg)
|
||||||
{
|
{
|
||||||
|
struct wait_queue_head_list_node *wqhln = NULL;
|
||||||
|
struct wait_queue_head_list_node *wqhln_iter;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/* Look up per-process wait queue head with pid */
|
||||||
|
flags = ihk_ikc_spinlock_lock(&c->wq_list_lock);
|
||||||
|
list_for_each_entry(wqhln_iter, &c->wq_list, list) {
|
||||||
|
if (wqhln_iter->pid == pid) {
|
||||||
|
wqhln = wqhln_iter;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!wqhln) {
|
||||||
|
retry_alloc:
|
||||||
|
wqhln = kmalloc(sizeof(*wqhln), GFP_KERNEL);
|
||||||
|
if (!wqhln) {
|
||||||
|
printk("WARNING: coudln't alloc wait queue head, retrying..\n");
|
||||||
|
goto retry_alloc;
|
||||||
|
}
|
||||||
|
|
||||||
|
wqhln->pid = pid;
|
||||||
|
init_waitqueue_head(&wqhln->wq_syscall);
|
||||||
|
list_add_tail(&wqhln->list, &c->wq_list);
|
||||||
|
}
|
||||||
|
ihk_ikc_spinlock_unlock(&c->wq_list_lock, flags);
|
||||||
|
|
||||||
c->req = 1;
|
c->req = 1;
|
||||||
wake_up(&c->wq_syscall);
|
wake_up(&wqhln->wq_syscall);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -287,16 +314,21 @@ int mcexec_wait_syscall(ihk_os_t os, struct syscall_wait_desc *__user req)
|
|||||||
struct syscall_wait_desc swd;
|
struct syscall_wait_desc swd;
|
||||||
struct mcctrl_channel *c;
|
struct mcctrl_channel *c;
|
||||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||||
|
struct wait_queue_head_list_node *wqhln;
|
||||||
|
struct wait_queue_head_list_node *wqhln_iter;
|
||||||
|
int ret = 0;
|
||||||
|
unsigned long irqflags;
|
||||||
#ifndef DO_USER_MODE
|
#ifndef DO_USER_MODE
|
||||||
unsigned long s, w, d;
|
unsigned long s, w, d;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//printk("mcexec_wait_syscall swd=%p req=%p size=%d\n", &swd, req, sizeof(swd.cpu));
|
//printk("mcexec_wait_syscall swd=%p req=%p size=%d\n", &swd, req, sizeof(swd.cpu));
|
||||||
if (copy_from_user(&swd, req, sizeof(swd.cpu))) {
|
if (copy_from_user(&swd, req, sizeof(swd))) {
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(swd.cpu >= usrdata->num_channels)return -EINVAL;
|
if (swd.cpu >= usrdata->num_channels)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
c = get_peer_channel(usrdata, current);
|
c = get_peer_channel(usrdata, current);
|
||||||
if (c) {
|
if (c) {
|
||||||
@ -308,9 +340,43 @@ if(swd.cpu >= usrdata->num_channels)return -EINVAL;
|
|||||||
|
|
||||||
#ifdef DO_USER_MODE
|
#ifdef DO_USER_MODE
|
||||||
retry:
|
retry:
|
||||||
if (wait_event_interruptible(c->wq_syscall, c->req)) {
|
/* Prepare per-process wait queue head */
|
||||||
|
retry_alloc:
|
||||||
|
wqhln = kmalloc(sizeof(*wqhln), GFP_KERNEL);
|
||||||
|
if (!wqhln) {
|
||||||
|
printk("WARNING: coudln't alloc wait queue head, retrying..\n");
|
||||||
|
goto retry_alloc;
|
||||||
|
}
|
||||||
|
|
||||||
|
wqhln->pid = swd.pid;
|
||||||
|
init_waitqueue_head(&wqhln->wq_syscall);
|
||||||
|
|
||||||
|
irqflags = ihk_ikc_spinlock_lock(&c->wq_list_lock);
|
||||||
|
/* First see if there is one wait queue already */
|
||||||
|
list_for_each_entry(wqhln_iter, &c->wq_list, list) {
|
||||||
|
if (wqhln_iter->pid == current->tgid) {
|
||||||
|
kfree(wqhln);
|
||||||
|
wqhln = wqhln_iter;
|
||||||
|
list_del(&wqhln->list);
|
||||||
|
printk("DEBUG: wait queue head was already available in syscall wait\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
list_add_tail(&wqhln->list, &c->wq_list);
|
||||||
|
ihk_ikc_spinlock_unlock(&c->wq_list_lock, irqflags);
|
||||||
|
|
||||||
|
ret = wait_event_interruptible(wqhln->wq_syscall, c->req);
|
||||||
|
|
||||||
|
/* Remove per-process wait queue head */
|
||||||
|
irqflags = ihk_ikc_spinlock_lock(&c->wq_list_lock);
|
||||||
|
list_del(&wqhln->list);
|
||||||
|
ihk_ikc_spinlock_unlock(&c->wq_list_lock, irqflags);
|
||||||
|
kfree(wqhln);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
}
|
}
|
||||||
|
|
||||||
c->req = 0;
|
c->req = 0;
|
||||||
#if 1
|
#if 1
|
||||||
mb();
|
mb();
|
||||||
|
|||||||
@ -40,7 +40,7 @@
|
|||||||
|
|
||||||
void mcexec_prepare_ack(ihk_os_t os, unsigned long arg, int err);
|
void mcexec_prepare_ack(ihk_os_t os, unsigned long arg, int err);
|
||||||
static void mcctrl_ikc_init(ihk_os_t os, int cpu, unsigned long rphys, struct ihk_ikc_channel_desc *c);
|
static void mcctrl_ikc_init(ihk_os_t os, int cpu, unsigned long rphys, struct ihk_ikc_channel_desc *c);
|
||||||
int mcexec_syscall(struct mcctrl_channel *c, unsigned long arg);
|
int mcexec_syscall(struct mcctrl_channel *c, int pid, unsigned long arg);
|
||||||
|
|
||||||
static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||||
void *__packet, void *__os)
|
void *__packet, void *__os)
|
||||||
@ -62,7 +62,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case SCD_MSG_SYSCALL_ONESIDE:
|
case SCD_MSG_SYSCALL_ONESIDE:
|
||||||
mcexec_syscall(usrdata->channels + pisp->ref, pisp->arg);
|
mcexec_syscall(usrdata->channels + pisp->ref, pisp->pid, pisp->arg);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,7 +219,9 @@ static int connect_handler(struct ihk_ikc_channel_info *param)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
param->packet_handler = syscall_packet_handler;
|
param->packet_handler = syscall_packet_handler;
|
||||||
init_waitqueue_head(&usrdata->channels[cpu].wq_syscall);
|
|
||||||
|
INIT_LIST_HEAD(&usrdata->channels[cpu].wq_list);
|
||||||
|
spin_lock_init(&usrdata->channels[cpu].wq_list_lock);
|
||||||
|
|
||||||
usrdata->channels[cpu].c = c;
|
usrdata->channels[cpu].c = c;
|
||||||
kprintf("syscall: MC CPU %d connected. c=%p\n", cpu, c);
|
kprintf("syscall: MC CPU %d connected. c=%p\n", cpu, c);
|
||||||
@ -238,7 +240,9 @@ static int connect_handler2(struct ihk_ikc_channel_info *param)
|
|||||||
cpu = usrdata->num_channels - 1;
|
cpu = usrdata->num_channels - 1;
|
||||||
|
|
||||||
param->packet_handler = syscall_packet_handler;
|
param->packet_handler = syscall_packet_handler;
|
||||||
init_waitqueue_head(&usrdata->channels[cpu].wq_syscall);
|
|
||||||
|
INIT_LIST_HEAD(&usrdata->channels[cpu].wq_list);
|
||||||
|
spin_lock_init(&usrdata->channels[cpu].wq_list_lock);
|
||||||
|
|
||||||
usrdata->channels[cpu].c = c;
|
usrdata->channels[cpu].c = c;
|
||||||
kprintf("syscall: MC CPU %d connected. c=%p\n", cpu, c);
|
kprintf("syscall: MC CPU %d connected. c=%p\n", cpu, c);
|
||||||
|
|||||||
@ -56,6 +56,7 @@
|
|||||||
struct ikc_scd_packet {
|
struct ikc_scd_packet {
|
||||||
int msg;
|
int msg;
|
||||||
int ref;
|
int ref;
|
||||||
|
int pid;
|
||||||
int err;
|
int err;
|
||||||
unsigned long arg;
|
unsigned long arg;
|
||||||
};
|
};
|
||||||
@ -88,6 +89,12 @@ struct syscall_params {
|
|||||||
unsigned long *doorbell_va;
|
unsigned long *doorbell_va;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct wait_queue_head_list_node {
|
||||||
|
struct list_head list;
|
||||||
|
wait_queue_head_t wq_syscall;
|
||||||
|
int pid;
|
||||||
|
};
|
||||||
|
|
||||||
struct mcctrl_channel {
|
struct mcctrl_channel {
|
||||||
struct ihk_ikc_channel_desc *c;
|
struct ihk_ikc_channel_desc *c;
|
||||||
struct syscall_params param;
|
struct syscall_params param;
|
||||||
@ -95,7 +102,8 @@ struct mcctrl_channel {
|
|||||||
void *dma_buf;
|
void *dma_buf;
|
||||||
|
|
||||||
int req;
|
int req;
|
||||||
wait_queue_head_t wq_syscall;
|
struct list_head wq_list;
|
||||||
|
ihk_spinlock_t wq_list_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mcctrl_usrdata {
|
struct mcctrl_usrdata {
|
||||||
|
|||||||
@ -57,7 +57,7 @@
|
|||||||
static long pager_call(ihk_os_t os, struct syscall_request *req);
|
static long pager_call(ihk_os_t os, struct syscall_request *req);
|
||||||
|
|
||||||
#ifdef SC_DEBUG
|
#ifdef SC_DEBUG
|
||||||
//static struct ihk_dma_request last_request;
|
static struct ihk_dma_request last_request;
|
||||||
|
|
||||||
static void print_dma_lastreq(void)
|
static void print_dma_lastreq(void)
|
||||||
{
|
{
|
||||||
@ -215,7 +215,7 @@ static int remote_page_fault(struct mcctrl_usrdata *usrdata, void *fault_addr, u
|
|||||||
struct syscall_request *req;
|
struct syscall_request *req;
|
||||||
struct syscall_response *resp;
|
struct syscall_response *resp;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
dprintk("remote_page_fault(%p,%p,%llx)\n", usrdata, fault_addr, reason);
|
dprintk("remote_page_fault(%p,%p,%llx)\n", usrdata, fault_addr, reason);
|
||||||
|
|
||||||
channel = get_peer_channel(usrdata, current);
|
channel = get_peer_channel(usrdata, current);
|
||||||
@ -241,8 +241,43 @@ static int remote_page_fault(struct mcctrl_usrdata *usrdata, void *fault_addr, u
|
|||||||
resp->status = STATUS_PAGE_FAULT;
|
resp->status = STATUS_PAGE_FAULT;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
struct wait_queue_head_list_node *wqhln;
|
||||||
|
struct wait_queue_head_list_node *wqhln_iter;
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
|
retry_alloc:
|
||||||
|
wqhln = kmalloc(sizeof(*wqhln), GFP_KERNEL);
|
||||||
|
if (!wqhln) {
|
||||||
|
printk("WARNING: coudln't alloc wait queue head, retrying..\n");
|
||||||
|
goto retry_alloc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Prepare per-process wait queue head */
|
||||||
|
wqhln->pid = current->tgid;
|
||||||
|
init_waitqueue_head(&wqhln->wq_syscall);
|
||||||
|
|
||||||
|
irqflags = ihk_ikc_spinlock_lock(&channel->wq_list_lock);
|
||||||
|
/* First see if there is a wait queue already */
|
||||||
|
list_for_each_entry(wqhln_iter, &channel->wq_list, list) {
|
||||||
|
if (wqhln_iter->pid == current->tgid) {
|
||||||
|
kfree(wqhln);
|
||||||
|
wqhln = wqhln_iter;
|
||||||
|
list_del(&wqhln->list);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
list_add_tail(&wqhln->list, &channel->wq_list);
|
||||||
|
ihk_ikc_spinlock_unlock(&channel->wq_list_lock, irqflags);
|
||||||
|
|
||||||
/* wait for response */
|
/* wait for response */
|
||||||
error = wait_event_interruptible(channel->wq_syscall, channel->req);
|
error = wait_event_interruptible(wqhln->wq_syscall, channel->req);
|
||||||
|
|
||||||
|
/* Remove per-process wait queue head */
|
||||||
|
irqflags = ihk_ikc_spinlock_lock(&channel->wq_list_lock);
|
||||||
|
list_del(&wqhln->list);
|
||||||
|
ihk_ikc_spinlock_unlock(&channel->wq_list_lock, irqflags);
|
||||||
|
kfree(wqhln);
|
||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
printk("remote_page_fault:interrupted. %d\n", error);
|
printk("remote_page_fault:interrupted. %d\n", error);
|
||||||
goto out;
|
goto out;
|
||||||
|
|||||||
@ -664,7 +664,7 @@ int main(int argc, char **argv)
|
|||||||
argv[optind + 2] = NULL;
|
argv[optind + 2] = NULL;
|
||||||
argc -= (optind - 1);
|
argc -= (optind - 1);
|
||||||
|
|
||||||
printf("target_core: %d, device: %s, command: ", target_core, dev);
|
__dprintf("target_core: %d, device: %s, command: ", target_core, dev);
|
||||||
for (i = 1; i < argc; ++i) {
|
for (i = 1; i < argc; ++i) {
|
||||||
printf("%s ", argv[i]);
|
printf("%s ", argv[i]);
|
||||||
}
|
}
|
||||||
@ -966,6 +966,7 @@ int main_loop(int fd, int cpu, pthread_mutex_t *lock)
|
|||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
|
|
||||||
w.cpu = cpu;
|
w.cpu = cpu;
|
||||||
|
w.pid = getpid();
|
||||||
|
|
||||||
while (((ret = ioctl(fd, MCEXEC_UP_WAIT_SYSCALL, (unsigned long)&w)) == 0) || (ret == -1 && errno == EINTR)) {
|
while (((ret = ioctl(fd, MCEXEC_UP_WAIT_SYSCALL, (unsigned long)&w)) == 0) || (ret == -1 && errno == EINTR)) {
|
||||||
|
|
||||||
|
|||||||
@ -87,6 +87,7 @@ struct user_desc {
|
|||||||
struct ikc_scd_packet {
|
struct ikc_scd_packet {
|
||||||
int msg;
|
int msg;
|
||||||
int ref;
|
int ref;
|
||||||
|
int pid;
|
||||||
int err;
|
int err;
|
||||||
unsigned long arg;
|
unsigned long arg;
|
||||||
};
|
};
|
||||||
|
|||||||
@ -116,6 +116,9 @@ static void send_syscall(struct syscall_request *req, int cpu)
|
|||||||
|
|
||||||
scp = &get_cpu_local_var(0)->scp2;
|
scp = &get_cpu_local_var(0)->scp2;
|
||||||
syscall_channel = get_cpu_local_var(0)->syscall_channel2;
|
syscall_channel = get_cpu_local_var(0)->syscall_channel2;
|
||||||
|
|
||||||
|
/* XXX: is this really going to work if multiple processes
|
||||||
|
* exit/receive signals at the same time?? */
|
||||||
cpu = num_processors;
|
cpu = num_processors;
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
@ -144,8 +147,9 @@ static void send_syscall(struct syscall_request *req, int cpu)
|
|||||||
#ifdef SYSCALL_BY_IKC
|
#ifdef SYSCALL_BY_IKC
|
||||||
packet.msg = SCD_MSG_SYSCALL_ONESIDE;
|
packet.msg = SCD_MSG_SYSCALL_ONESIDE;
|
||||||
packet.ref = cpu;
|
packet.ref = cpu;
|
||||||
packet.arg = scp->request_rpa;
|
packet.pid = cpu_local_var(current)->pid;
|
||||||
|
packet.arg = scp->request_rpa;
|
||||||
|
dkprintf("send syscall, nr: %d, pid: %d\n", req->number, packet.pid);
|
||||||
ihk_ikc_send(syscall_channel, &packet, 0);
|
ihk_ikc_send(syscall_channel, &packet, 0);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -186,6 +190,8 @@ long do_syscall(struct syscall_request *req, ihk_mc_user_context_t *ctx, int cpu
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (res->status == STATUS_PAGE_FAULT) {
|
if (res->status == STATUS_PAGE_FAULT) {
|
||||||
|
dkprintf("STATUS_PAGE_FAULT in syscall, pid: %d\n",
|
||||||
|
cpu_local_var(current)->pid);
|
||||||
error = page_fault_process(get_cpu_local_var(cpu)->current,
|
error = page_fault_process(get_cpu_local_var(cpu)->current,
|
||||||
(void *)res->fault_address,
|
(void *)res->fault_address,
|
||||||
res->fault_reason);
|
res->fault_reason);
|
||||||
|
|||||||
Reference in New Issue
Block a user