some variables definition are gathered to fork_tree_node from process.

- remove both-defined: pid, pgid, status
- move to fork_tree_node: tid
- make dummy fork_tree_node for idle_process.
This commit is contained in:
Tomoki Shirasawa
2014-10-29 16:54:09 +09:00
parent 658ff759ef
commit 3fe7e39607
11 changed files with 102 additions and 126 deletions

View File

@ -175,8 +175,8 @@ void fill_prpsinfo(struct note *head, struct process *proc, void *regs)
memcpy(name, "CORE", sizeof("CORE"));
prpsinfo = (struct elf_prpsinfo64 *)(name + align32(sizeof("CORE")));
prpsinfo->pr_state = proc->status;
prpsinfo->pr_pid = proc->pid;
prpsinfo->pr_state = proc->ftn->status;
prpsinfo->pr_pid = proc->ftn->pid;
/*
We leave most of the fields unfilled.

View File

@ -172,7 +172,7 @@ do_setpgid(int pid, int pgid)
unsigned long irqstate;
if(pid == 0)
pid = proc->pid;
pid = proc->ftn->pid;
if(pgid == 0)
pgid = pid;
@ -180,15 +180,10 @@ do_setpgid(int pid, int pgid)
v = get_cpu_local_var(i);
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){
if(p->pid <= 0)
if(p->ftn->pid <= 0)
continue;
if(p->pid == pid){
p->pgid = pgid;
/* Update pgid in fork_tree because it's used in wait4 */
ihk_mc_spinlock_lock_noirq(&p->ftn->lock);
if(p->ftn->pid == pid){
p->ftn->pgid = pgid;
ihk_mc_spinlock_unlock_noirq(&p->ftn->lock);
}
}
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
@ -293,7 +288,7 @@ do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pendin
struct fork_tree_node *ftn = proc->ftn;
for(w = pending->sigmask.__val[0], sig = 0; w; sig++, w >>= 1);
dkprintf("do_signal,pid=%d,sig=%d\n", proc->pid, sig);
dkprintf("do_signal,pid=%d,sig=%d\n", proc->ftn->pid, sig);
if(regs == NULL){ /* call from syscall */
asm("movq %%gs:132, %0" : "=r" (regs));
@ -383,7 +378,7 @@ do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pendin
dkprintf("do_signal,SIGSTOP,sleeping\n");
/* Sleep */
proc->status = PS_STOPPED;
proc->ftn->status = PS_STOPPED;
schedule();
dkprintf("SIGSTOP(): woken up\n");
break;
@ -404,7 +399,6 @@ do_signal(unsigned long rc, void *regs0, struct process *proc, struct sig_pendin
/* Sleep */
dkprintf("do_signal,SIGTRAP,sleeping\n");
proc->status = PS_TRACED;
schedule();
dkprintf("SIGTRAP(): woken up\n");
@ -449,7 +443,7 @@ static int ptrace_report_signal(struct process *proc, struct x86_regs *regs, str
__sigset_t w;
long rc;
dkprintf("ptrace_report_signal,pid=%d\n", proc->pid);
dkprintf("ptrace_report_signal,pid=%d\n", proc->ftn->pid);
/* Save reason why stopped and process state for wait to reap */
for (w = pending->sigmask.__val[0], sig = 0; w; sig++, w >>= 1);
@ -467,9 +461,9 @@ static int ptrace_report_signal(struct process *proc, struct x86_regs *regs, str
memset(&info, '\0', sizeof info);
info.si_signo = SIGCHLD;
info.si_code = CLD_TRAPPED;
info._sifields._sigchld.si_pid = proc->pid;
info._sifields._sigchld.si_pid = proc->ftn->pid;
info._sifields._sigchld.si_status = proc->ftn->exit_status;
rc = do_kill(proc->ftn->parent->owner->pid, -1, SIGCHLD, &info);
rc = do_kill(proc->ftn->parent->pid, -1, SIGCHLD, &info);
if (rc < 0) {
kprintf("ptrace_report_signal,do_kill failed\n");
}
@ -483,7 +477,6 @@ static int ptrace_report_signal(struct process *proc, struct x86_regs *regs, str
peekuser(proc, regs);
dkprintf("ptrace_report_signal,sleeping\n");
/* Sleep */
proc->status = PS_TRACED;
schedule();
dkprintf("ptrace_report_signal,wake up\n");
@ -506,7 +499,7 @@ check_signal(unsigned long rc, void *regs0)
if(clv == NULL)
return;
proc = cpu_local_var(current);
if(proc == NULL || proc->pid == 0)
if(proc == NULL || proc->ftn->pid == 0)
return;
if(regs != NULL && (regs->rsp & 0x8000000000000000)) {
@ -588,9 +581,9 @@ do_kill(int pid, int tid, int sig, siginfo_t *info)
int sendme = 0;
if(pid == 0){
if(proc == NULL || proc->pid <= 0)
if(proc == NULL || proc->ftn->pid <= 0)
return -ESRCH;
pgid = proc->pgid;
pgid = proc->ftn->pgid;
}
pids = kmalloc(sizeof(int) * num_processors, IHK_MC_AP_NOWAIT);
if(!pids)
@ -599,20 +592,20 @@ do_kill(int pid, int tid, int sig, siginfo_t *info)
v = get_cpu_local_var(i);
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){
if(p->pid <= 0)
if(p->ftn->pid <= 0)
continue;
if(proc && p->pid == proc->pid){
if(proc && p->ftn->pid == proc->ftn->pid){
sendme = 1;
continue;
}
if(pgid == 1 || p->pgid == pgid){
if(pgid == 1 || p->ftn->pgid == pgid){
int j;
for(j = 0; j < n; j++)
if(pids[j] == p->pid)
if(pids[j] == p->ftn->pid)
break;
if(j == n){
pids[n] = p->pid;
pids[n] = p->ftn->pid;
n++;
}
}
@ -622,7 +615,7 @@ do_kill(int pid, int tid, int sig, siginfo_t *info)
for(i = 0; i < n; i++)
rc = do_kill(pids[i], -1, sig, info);
if(sendme)
rc = do_kill(proc->pid, -1, sig, info);
rc = do_kill(proc->ftn->pid, -1, sig, info);
kfree(pids);
return rc;
@ -638,8 +631,8 @@ do_kill(int pid, int tid, int sig, siginfo_t *info)
found = 0;
ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){
if(p->pid == pid){
if(p->tid == pid || tproc == NULL){
if(p->ftn->pid == pid){
if(p->ftn->tid == pid || tproc == NULL){
if(!(mask & p->sigmask.__val[0])){
tproc = p;
if(!found && savelock) {
@ -659,7 +652,7 @@ do_kill(int pid, int tid, int sig, siginfo_t *info)
}
}
if(!(mask & p->sigmask.__val[0])){
if(p->tid == pid || tproc == NULL){
if(p->ftn->tid == pid || tproc == NULL){
}
}
@ -680,8 +673,8 @@ do_kill(int pid, int tid, int sig, siginfo_t *info)
found = 0;
ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){
if(p->pid > 0 &&
p->tid == tid){
if(p->ftn->pid > 0 &&
p->ftn->tid == tid){
savelock = &(v->runq_lock);
found = 1;
tproc = p;
@ -698,8 +691,8 @@ do_kill(int pid, int tid, int sig, siginfo_t *info)
found = 0;
ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){
if(p->pid == pid &&
p->tid == tid){
if(p->ftn->pid == pid &&
p->ftn->tid == tid){
savelock = &(v->runq_lock);
found = 1;
tproc = p;
@ -775,7 +768,7 @@ do_kill(int pid, int tid, int sig, siginfo_t *info)
default:
if(proc != tproc){
dkprintf("do_kill,ipi,pid=%d,cpu_id=%d\n",
tproc->pid, tproc->cpu_id);
tproc->ftn->pid, tproc->cpu_id);
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(tproc->cpu_id)->apic_id, 0xd0);
}
break;
@ -829,7 +822,7 @@ set_signal(int sig, void *regs0, siginfo_t *info)
struct x86_regs *regs = regs0;
struct process *proc = cpu_local_var(current);
if(proc == NULL || proc->pid == 0)
if(proc == NULL || proc->ftn->pid == 0)
return;
if((__sigmask(sig) & proc->sigmask.__val[0]) ||
@ -838,5 +831,5 @@ set_signal(int sig, void *regs0, siginfo_t *info)
terminate(0, sig | 0x80, (ihk_mc_user_context_t *)regs->rsp);
}
else
do_kill(proc->pid, proc->tid, sig, info);
do_kill(proc->ftn->pid, proc->ftn->tid, sig, info);
}

View File

@ -658,7 +658,7 @@ static uint64_t futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q
* queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
*/
xchg4(&(cpu_local_var(current)->status), PS_INTERRUPTIBLE);
xchg4(&(cpu_local_var(current)->ftn->status), PS_INTERRUPTIBLE);
queue_me(q, hb);
if (!plist_node_empty(&q->list)) {
@ -674,7 +674,7 @@ static uint64_t futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q
}
/* This does not need to be serialized */
cpu_local_var(current)->status = PS_RUNNING;
cpu_local_var(current)->ftn->status = PS_RUNNING;
return time_remain;
}

View File

@ -361,9 +361,8 @@ static int process_msg_prepare_process(unsigned long rphys)
ihk_mc_unmap_memory(NULL, phys, sz);
return -ENOMEM;
}
proc->pid = pn->pid;
proc->pgid = pn->pgid;
proc->ftn->pid = pn->pid;
proc->ftn->pgid = pn->pgid;
proc->vm->region.user_start = pn->user_start;
proc->vm->region.user_end = pn->user_end;
proc->vm->region.map_start = (USER_END / 3) & LARGE_PAGE_MASK;

View File

@ -41,6 +41,7 @@ struct cpu_local_var {
ihk_spinlock_t free_list_lock;
struct process idle;
struct fork_tree_node idle_ftn;
struct process_vm idle_vm;
ihk_spinlock_t runq_lock;

View File

@ -265,6 +265,7 @@ struct fork_tree_node {
struct process *owner;
int pid;
int tid;
int pgid;
struct fork_tree_node *parent;
@ -306,8 +307,6 @@ void hold_fork_tree_node(struct fork_tree_node *ftn);
void release_fork_tree_node(struct fork_tree_node *ftn);
struct process {
int pid;
int status;
int cpu_id;
ihk_atomic_t refcount;
@ -327,7 +326,6 @@ struct process {
unsigned long tlsblock_base, tlsblock_limit;
} thread;
int tid;
volatile int sigevent;
sigset_t sigmask;
stack_t sigstack;
@ -344,7 +342,6 @@ struct process {
cpu_set_t cpu_set;
unsigned long saved_auxv[AUXV_LEN];
int pgid; /* process group id */
struct user *userp;
};

View File

@ -199,7 +199,7 @@ void coredump(struct process *proc, void *regs)
request.args[1] = virt_to_phys(coretable);
/* no data for now */
ret = do_syscall(&request, proc->uctx,
proc->cpu_id, proc->pid);
proc->cpu_id, proc->ftn->pid);
if (ret == 0) {
kprintf("dumped core.\n");
} else {

View File

@ -214,7 +214,7 @@ struct process *clone_process(struct process *org, unsigned long pc,
int termsig = clone_flags & 0xff;
if (termsig < 0 || _NSIG < termsig) {
return -EINVAL;
return (void *)-EINVAL;
}
if ((proc = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES,
@ -343,7 +343,7 @@ int ptrace_traceme(void){
int error = 0;
struct process *proc = cpu_local_var(current);
struct fork_tree_node *child, *next;
dkprintf("ptrace_traceme,pid=%d,proc->ftn->parent=%p\n", proc->pid, proc->ftn->parent);
dkprintf("ptrace_traceme,pid=%d,proc->ftn->parent=%p\n", proc->ftn->pid, proc->ftn->parent);
if (proc->ftn->parent == NULL) {
error = -EPERM;
@ -1752,7 +1752,7 @@ out:
void hold_process(struct process *proc)
{
if (proc->status & (PS_ZOMBIE | PS_EXITED)) {
if (proc->ftn->status & (PS_ZOMBIE | PS_EXITED)) {
panic("hold_process: already exited process");
}
@ -1765,7 +1765,7 @@ void destroy_process(struct process *proc)
struct sig_pending *pending;
struct sig_pending *next;
delete_proc_procfs_files(proc->pid);
delete_proc_procfs_files(proc->ftn->pid);
if (proc->vm) {
cpu_clear(proc->cpu_id, &proc->vm->cpu_set, &proc->vm->cpu_set_lock);
@ -1851,7 +1851,7 @@ static void idle(void)
s = ihk_mc_spinlock_lock(&v->runq_lock);
list_for_each_entry(p, &v->runq, sched_list) {
if (p->status == PS_RUNNING) {
if (p->ftn->status == PS_RUNNING) {
v->status = CPU_STATUS_RUNNING;
break;
}
@ -1873,12 +1873,14 @@ void sched_init(void)
memset(idle_process, 0, sizeof(struct process));
memset(&cpu_local_var(idle_vm), 0, sizeof(struct process_vm));
memset(&cpu_local_var(idle_ftn), 0, sizeof(struct fork_tree_node));
idle_process->vm = &cpu_local_var(idle_vm);
idle_process->ftn = &cpu_local_var(idle_ftn);
ihk_mc_init_context(&idle_process->ctx, NULL, idle);
idle_process->pid = 0;
idle_process->tid = ihk_mc_get_processor_id();
idle_process->ftn->pid = 0;
idle_process->ftn->tid = ihk_mc_get_processor_id();
INIT_LIST_HEAD(&cpu_local_var(runq));
cpu_local_var(runq_len) = 0;
@ -1992,7 +1994,7 @@ redo:
--v->runq_len;
/* Round-robin if not exited yet */
if (!(prev->status & (PS_ZOMBIE | PS_EXITED))) {
if (!(prev->ftn->status & (PS_ZOMBIE | PS_EXITED))) {
list_add_tail(&prev->sched_list, &(v->runq));
++v->runq_len;
}
@ -2003,7 +2005,7 @@ redo:
} else {
/* Pick a new running process */
list_for_each_entry_safe(proc, tmp, &(v->runq), sched_list) {
if (proc->status == PS_RUNNING) {
if (proc->ftn->status == PS_RUNNING) {
next = proc;
break;
}
@ -2043,7 +2045,7 @@ redo:
last = ihk_mc_switch_context(NULL, &next->ctx, prev);
}
if ((last != NULL) && (last->status & (PS_ZOMBIE | PS_EXITED))) {
if ((last != NULL) && (last->ftn->status & (PS_ZOMBIE | PS_EXITED))) {
free_process_memory(last);
release_process(last);
}
@ -2077,7 +2079,7 @@ int sched_wakeup_process(struct process *proc, int valid_states)
struct cpu_local_var *v = get_cpu_local_var(proc->cpu_id);
dkprintf("sched_wakeup_process,proc->pid=%d,valid_states=%08x,proc->status=%08x,proc->cpu_id=%d,my cpu_id=%d\n",
proc->pid, valid_states, proc->status, proc->cpu_id, ihk_mc_get_processor_id());
proc->ftn->pid, valid_states, proc->ftn->status, proc->cpu_id, ihk_mc_get_processor_id());
irqstate = ihk_mc_spinlock_lock(&(proc->spin_sleep_lock));
if (proc->spin_sleep) {
@ -2096,8 +2098,8 @@ int sched_wakeup_process(struct process *proc, int valid_states)
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
if (proc->status & valid_states) {
xchg4((int *)(&proc->status), PS_RUNNING);
if (proc->ftn->status & valid_states) {
xchg4((int *)(&proc->ftn->status), PS_RUNNING);
status = 0;
}
else {
@ -2166,7 +2168,7 @@ void __runq_add_proc(struct process *proc, int cpu_id)
list_add_tail(&proc->sched_list, &v->runq);
++v->runq_len;
proc->cpu_id = cpu_id;
proc->status = PS_RUNNING;
proc->ftn->status = PS_RUNNING;
get_cpu_local_var(cpu_id)->status = CPU_STATUS_RUNNING;
dkprintf("runq_add_proc(): tid %d added to CPU[%d]'s runq\n",
@ -2182,7 +2184,7 @@ void runq_add_proc(struct process *proc, int cpu_id)
__runq_add_proc(proc, cpu_id);
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
create_proc_procfs_files(proc->pid, cpu_id);
create_proc_procfs_files(proc->ftn->pid, cpu_id);
/* Kick scheduler */
if (cpu_id != ihk_mc_get_processor_id())
@ -2219,8 +2221,8 @@ findthread_and_lock(int pid, int tid, ihk_spinlock_t **savelock, unsigned long *
*savelock = &(v->runq_lock);
*irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){
if(p->pid == pid &&
(tid == -1 || p->tid == tid)){
if(p->ftn->pid == pid &&
(tid == -1 || p->ftn->tid == tid)){
return p;
}
}

View File

@ -265,7 +265,7 @@ void process_procfs_request(unsigned long rarg)
*/
ret = sscanf(p, "%d/", &pid);
if (ret == 1) {
if (pid != cpu_local_var(current)->pid) {
if (pid != cpu_local_var(current)->ftn->pid) {
/* We are not located in the proper cpu for some reason. */
dprintf("mismatched pid. We are %d, but requested pid is %d.\n",

View File

@ -162,7 +162,7 @@ static void send_syscall(struct syscall_request *req, int cpu, int pid)
#ifdef SYSCALL_BY_IKC
packet.msg = SCD_MSG_SYSCALL_ONESIDE;
packet.ref = cpu;
packet.pid = pid ? pid : cpu_local_var(current)->pid;
packet.pid = pid ? pid : cpu_local_var(current)->ftn->pid;
packet.arg = scp->request_rpa;
dkprintf("send syscall, nr: %d, pid: %d\n", req->number, packet.pid);
@ -218,7 +218,7 @@ long do_syscall(struct syscall_request *req, ihk_mc_user_context_t *ctx,
if (res->status == STATUS_PAGE_FAULT) {
dkprintf("STATUS_PAGE_FAULT in syscall, pid: %d\n",
cpu_local_var(current)->pid);
cpu_local_var(current)->ftn->pid);
error = page_fault_process(get_cpu_local_var(cpu)->current,
(void *)res->fault_address,
res->fault_reason|PF_POPULATE);
@ -323,7 +323,7 @@ static int wait_zombie(struct process *proc, struct fork_tree_node *child, int *
static int wait_stopped(struct process *proc, struct fork_tree_node *child, int *status, int options)
{
dkprintf("wait_stopped,proc->pid=%d,child->pid=%d,options=%08x\n",
proc->pid, child->pid, options);
proc->ftn->pid, child->pid, options);
int ret;
/* Copy exit_status created in do_signal */
@ -381,14 +381,14 @@ SYSCALL_DECLARE(wait4)
struct process *proc = cpu_local_var(current);
struct fork_tree_node *child_iter, *next;
int pid = (int)ihk_mc_syscall_arg0(ctx);
int pgid = proc->pgid;
int pgid = proc->ftn->pgid;
int *status = (int *)ihk_mc_syscall_arg1(ctx);
int options = (int)ihk_mc_syscall_arg2(ctx);
int ret;
struct waitq_entry waitpid_wqe;
int empty = 1;
dkprintf("wait4,proc->pid=%d,pid=%d\n", proc->pid, pid);
dkprintf("wait4,proc->pid=%d,pid=%d\n", proc->ftn->pid, pid);
if (options & ~(WNOHANG | WUNTRACED | WCONTINUED | __WCLONE)) {
dkprintf("wait4: unexpected options(%x).\n", options);
ret = -EINVAL;
@ -535,7 +535,7 @@ terminate(int rc, int sig, ihk_mc_user_context_t *ctx)
struct process *parent_owner;
int error;
dkprintf("terminate,pid=%d\n", proc->pid);
dkprintf("terminate,pid=%d\n", proc->ftn->pid);
request.number = __NR_exit_group;
request.args[0] = ((rc & 0x00ff) << 8) | (sig & 0xff);
@ -567,7 +567,6 @@ terminate(int rc, int sig, ihk_mc_user_context_t *ctx)
if (ftn->parent) {
int parent_owner_pid;
ihk_mc_spinlock_lock_noirq(&ftn->lock);
ftn->pid = proc->pid;
ftn->exit_status = ((rc & 0x00ff) << 8) | (sig & 0xff);
ftn->status = PS_ZOMBIE;
ihk_mc_spinlock_unlock_noirq(&ftn->lock);
@ -579,7 +578,7 @@ terminate(int rc, int sig, ihk_mc_user_context_t *ctx)
/* Signal parent if still attached */
ihk_mc_spinlock_lock_noirq(&ftn->parent->lock);
parent_owner = ftn->parent->owner;
parent_owner_pid = parent_owner ? ftn->parent->owner->pid : 0;
parent_owner_pid = parent_owner ? ftn->parent->pid : 0;
ihk_mc_spinlock_unlock_noirq(&ftn->parent->lock);
if (parent_owner && (ftn->termsig != 0)) {
struct siginfo info;
@ -587,11 +586,11 @@ terminate(int rc, int sig, ihk_mc_user_context_t *ctx)
memset(&info, '\0', sizeof info);
info.si_signo = SIGCHLD;
info.si_code = sig? ((sig & 0x80)? CLD_DUMPED: CLD_KILLED): CLD_EXITED;
info._sifields._sigchld.si_pid = proc->pid;
info._sifields._sigchld.si_pid = proc->ftn->pid;
info._sifields._sigchld.si_status = ((rc & 0x00ff) << 8) | (sig & 0xff);
dkprintf("terminate,kill %d,target pid=%d\n",
ftn->termsig, parent_owner_pid);
error = do_kill(ftn->parent->owner->pid, -1, SIGCHLD, &info);
error = do_kill(ftn->parent->pid, -1, SIGCHLD, &info);
/*
sigchld_parent(ftn->parent->owner, 0);
*/
@ -606,8 +605,6 @@ terminate(int rc, int sig, ihk_mc_user_context_t *ctx)
ihk_mc_spinlock_unlock_noirq(&ftn->lock);
}
release_fork_tree_node(ftn);
proc->status = PS_EXITED;
release_process(proc);
schedule();
@ -636,7 +633,7 @@ SYSCALL_DECLARE(exit_group)
SYSCALL_HEADER;
#endif
dkprintf("sys_exit_group,pid=%d\n", cpu_local_var(current)->pid);
dkprintf("sys_exit_group,pid=%d\n", cpu_local_var(current)->ftn->pid);
terminate((int)ihk_mc_syscall_arg0(ctx), 0, ctx);
#if 0
struct process *proc = cpu_local_var(current);
@ -1305,7 +1302,7 @@ out:
SYSCALL_DECLARE(getpid)
{
return cpu_local_var(current)->pid;
return cpu_local_var(current)->ftn->pid;
}
void
@ -1315,16 +1312,16 @@ settid(struct process *proc, int mode, int newcpuid, int oldcpuid)
unsigned long rc;
ihk_mc_syscall_arg0(&ctx) = mode;
ihk_mc_syscall_arg1(&ctx) = proc->pid;
ihk_mc_syscall_arg1(&ctx) = proc->ftn->pid;
ihk_mc_syscall_arg2(&ctx) = newcpuid;
ihk_mc_syscall_arg3(&ctx) = oldcpuid;
rc = syscall_generic_forwarding(__NR_gettid, &ctx);
proc->tid = rc;
proc->ftn->tid = rc;
}
SYSCALL_DECLARE(gettid)
{
return cpu_local_var(current)->tid;
return cpu_local_var(current)->ftn->tid;
}
long do_arch_prctl(unsigned long code, unsigned long address)
@ -1403,9 +1400,9 @@ static int ptrace_report_exec(struct process *proc)
memset(&info, '\0', sizeof info);
info.si_signo = SIGCHLD;
info.si_code = CLD_TRAPPED;
info._sifields._sigchld.si_pid = proc->pid;
info._sifields._sigchld.si_pid = proc->ftn->pid;
info._sifields._sigchld.si_status = proc->ftn->exit_status;
rc = do_kill(proc->ftn->parent->owner->pid, -1, SIGCHLD, &info);
rc = do_kill(proc->ftn->parent->pid, -1, SIGCHLD, &info);
if(rc < 0) {
dkprintf("ptrace_report_exec,do_kill failed\n");
}
@ -1419,7 +1416,6 @@ static int ptrace_report_exec(struct process *proc)
peekuser(proc, NULL);
/* Sleep */
dkprintf("ptrace_report_exec,sleeping\n");
proc->status = PS_TRACED;
schedule();
dkprintf("ptrace_report_exec,woken up\n");
@ -1590,19 +1586,19 @@ unsigned long do_fork(int clone_flags, unsigned long newsp,
return -ENOMEM;
}
new->pgid = cpu_local_var(current)->pgid;
new->ftn->pgid = cpu_local_var(current)->ftn->pgid;
cpu_set(cpuid, &new->vm->cpu_set, &new->vm->cpu_set_lock);
if (clone_flags & CLONE_VM) {
new->pid = cpu_local_var(current)->pid;
new->ftn->pid = cpu_local_var(current)->ftn->pid;
settid(new, 1, cpuid, -1);
}
/* fork() a new process on the host */
else {
request1.number = __NR_fork;
new->pid = do_syscall(&request1, &ctx1, ihk_mc_get_processor_id(), 0);
if (new->pid == -1) {
new->ftn->pid = do_syscall(&request1, &ctx1, ihk_mc_get_processor_id(), 0);
if (new->ftn->pid == -1) {
kprintf("ERROR: forking host process\n");
/* TODO: clean-up new */
@ -1621,24 +1617,21 @@ unsigned long do_fork(int clone_flags, unsigned long newsp,
new->vm->region.user_start;
/* 3rd parameter denotes new rpgtable of host process */
request1.args[2] = virt_to_phys(new->vm->page_table);
request1.args[3] = new->pid;
request1.args[3] = new->ftn->pid;
dkprintf("fork(): requesting PTE clear and rpgtable (0x%lx) update\n",
request1.args[2]);
if (do_syscall(&request1, &ctx1, ihk_mc_get_processor_id(), new->pid)) {
if (do_syscall(&request1, &ctx1, ihk_mc_get_processor_id(), new->ftn->pid)) {
kprintf("ERROR: clearing PTEs in host process\n");
}
}
new->ftn->pid = new->pid;
new->ftn->pgid = new->pgid;
if (clone_flags & CLONE_PARENT_SETTID) {
dkprintf("clone_flags & CLONE_PARENT_SETTID: 0x%lX\n",
parent_tidptr);
*(int*)parent_tidptr = new->pid;
*(int*)parent_tidptr = new->ftn->pid;
}
if (clone_flags & CLONE_CHILD_CLEARTID) {
@ -1659,7 +1652,7 @@ unsigned long do_fork(int clone_flags, unsigned long newsp,
return -EFAULT;
}
*((int*)phys_to_virt(phys)) = new->tid;
*((int*)phys_to_virt(phys)) = new->ftn->tid;
}
if (clone_flags & CLONE_SETTLS) {
@ -1675,10 +1668,10 @@ unsigned long do_fork(int clone_flags, unsigned long newsp,
ihk_mc_syscall_ret(new->uctx) = 0;
dkprintf("clone: kicking scheduler!,cpuid=%d pid=%d tid=%d\n", cpuid, new->pid, new->tid);
dkprintf("clone: kicking scheduler!,cpuid=%d pid=%d tid=%d\n", cpuid, new->ftn->pid, new->ftn->tid);
runq_add_proc(new, cpuid);
return new->tid;
return new->ftn->tid;
}
SYSCALL_DECLARE(vfork)
@ -1699,7 +1692,7 @@ SYSCALL_DECLARE(set_tid_address)
cpu_local_var(current)->thread.clear_child_tid =
(int*)ihk_mc_syscall_arg0(ctx);
return cpu_local_var(current)->pid;
return cpu_local_var(current)->ftn->pid;
}
SYSCALL_DECLARE(kill)
@ -1713,7 +1706,7 @@ SYSCALL_DECLARE(kill)
memset(&info, '\0', sizeof info);
info.si_signo = sig;
info.si_code = SI_USER;
info._sifields._kill.si_pid = proc->pid;
info._sifields._kill.si_pid = proc->ftn->pid;
dkprintf("sys_kill,enter,pid=%d,sig=%d\n", pid, sig);
error = do_kill(pid, -1, sig, &info);
@ -1733,7 +1726,7 @@ SYSCALL_DECLARE(tgkill)
memset(&info, '\0', sizeof info);
info.si_signo = sig;
info.si_code = SI_TKILL;
info._sifields._kill.si_pid = proc->pid;
info._sifields._kill.si_pid = proc->ftn->pid;
if(tid <= 0)
return -EINVAL;
@ -2229,7 +2222,7 @@ SYSCALL_DECLARE(futex)
SYSCALL_DECLARE(exit)
{
struct process *proc = cpu_local_var(current);
dkprintf("sys_exit,pid=%d\n", proc->pid);
dkprintf("sys_exit,pid=%d\n", proc->ftn->pid);
#ifdef DCFA_KMOD
do_mod_exit((int)ihk_mc_syscall_arg0(ctx));
@ -2250,7 +2243,7 @@ SYSCALL_DECLARE(exit)
FUTEX_WAKE, 1, 0, NULL, 0, 0);
}
proc->status = PS_ZOMBIE;
proc->ftn->status = PS_ZOMBIE;
release_fork_tree_node(proc->ftn);
release_process(proc);
@ -2310,19 +2303,6 @@ static int ptrace_wakeup_sig(int pid, long request, long data) {
}
ihk_mc_spinlock_unlock(savelock, irqstate);
error = sched_wakeup_process(child, PS_TRACED | PS_STOPPED);
if (error < 0) {
goto out;
}
ihk_mc_spinlock_lock_noirq(&child->ftn->lock);
child->ftn->exit_status = data;
if (child->ftn->status & PS_TRACED) {
xchg4((int *)(&child->ftn->status), PS_RUNNING);
}
ihk_mc_spinlock_unlock_noirq(&child->ftn->lock);
if (data > 64 || data < 0) {
error = -EINVAL;
goto out;
@ -2338,7 +2318,7 @@ static int ptrace_wakeup_sig(int pid, long request, long data) {
}
break;
case PTRACE_CONT:
if(data != 0) {
if(data != 0 && data != SIGSTOP) {
struct process *proc;
/* TODO: Tracing process replace the original
@ -2347,7 +2327,7 @@ static int ptrace_wakeup_sig(int pid, long request, long data) {
memset(&info, '\0', sizeof info);
info.si_signo = data;
info.si_code = SI_USER;
info._sifields._kill.si_pid = proc->pid;
info._sifields._kill.si_pid = proc->ftn->pid;
error = do_kill(pid, -1, data, &info);
if (error < 0) {
goto out;
@ -2358,6 +2338,10 @@ static int ptrace_wakeup_sig(int pid, long request, long data) {
break;
}
error = sched_wakeup_process(child, PS_TRACED | PS_STOPPED);
if (error < 0) {
goto out;
}
out:
return error;
}
@ -2374,7 +2358,7 @@ static long ptrace_pokeuser(int pid, long addr, long data)
child = findthread_and_lock(pid, -1, &savelock, &irqstate);
if (!child)
return -ESRCH;
if(child->status == PS_TRACED){
if(child->ftn->status == PS_TRACED){
memcpy((char *)child->userp + addr, &data, 8);
rc = 0;
}
@ -2396,7 +2380,7 @@ static long ptrace_peekuser(int pid, long addr, long data)
child = findthread_and_lock(pid, -1, &savelock, &irqstate);
if (!child)
return -ESRCH;
if(child->status == PS_TRACED){
if(child->ftn->status == PS_TRACED){
if(copy_to_user(child, p, (char *)child->userp + addr, 8))
rc = -EFAULT;
else
@ -2418,7 +2402,7 @@ static long ptrace_getregs(int pid, long data)
child = findthread_and_lock(pid, -1, &savelock, &irqstate);
if (!child)
return -ESRCH;
if(child->status == PS_TRACED){
if(child->ftn->status == PS_TRACED){
if(copy_to_user(child, regs, child->userp, sizeof(struct user_regs_struct)))
rc = -EFAULT;
else
@ -2588,12 +2572,12 @@ SYSCALL_DECLARE(sched_setaffinity)
CPU_SET(cpu_id, &cpu_set);
if(tid == 0)
tid = cpu_local_var(current)->tid;
tid = cpu_local_var(current)->ftn->tid;
for (cpu_id = 0; cpu_id < num_processors; cpu_id++) {
irqstate = ihk_mc_spinlock_lock(&get_cpu_local_var(cpu_id)->runq_lock);
list_for_each_entry(thread, &get_cpu_local_var(cpu_id)->runq, sched_list)
if (thread->pid && thread->tid == tid)
if (thread->ftn->pid && thread->ftn->tid == tid)
goto found; /* without unlocking runq_lock */
ihk_mc_spinlock_unlock(&get_cpu_local_var(cpu_id)->runq_lock, irqstate);
}
@ -2635,13 +2619,13 @@ SYSCALL_DECLARE(sched_getaffinity)
len = MIN2(len, sizeof(k_cpu_set));
if(tid == 0)
tid = cpu_local_var(current)->tid;
tid = cpu_local_var(current)->ftn->tid;
for (i = 0; i < num_processors && !found; i++) {
struct process *thread;
irqstate = ihk_mc_spinlock_lock(&get_cpu_local_var(i)->runq_lock);
list_for_each_entry(thread, &get_cpu_local_var(i)->runq, sched_list) {
if (thread->pid && thread->tid == tid) {
if (thread->ftn->pid && thread->ftn->tid == tid) {
found = 1;
memcpy(&k_cpu_set, &thread->cpu_set, sizeof(k_cpu_set));
break;

View File

@ -89,14 +89,14 @@ waitq_prepare_to_wait(waitq_t *waitq, waitq_entry_t *entry, int state)
ihk_mc_spinlock_lock_noirq(&waitq->lock);
if (list_empty(&entry->link))
list_add(&entry->link, &waitq->waitq);
cpu_local_var(current)->status = state;
cpu_local_var(current)->ftn->status = state;
ihk_mc_spinlock_unlock_noirq(&waitq->lock);
}
void
waitq_finish_wait(waitq_t *waitq, waitq_entry_t *entry)
{
cpu_local_var(current)->status = PS_RUNNING;
cpu_local_var(current)->ftn->status = PS_RUNNING;
waitq_remove_entry(waitq, entry);
}