diff --git a/kernel/include/process.h b/kernel/include/process.h index 9d220785..3ee91ed2 100644 --- a/kernel/include/process.h +++ b/kernel/include/process.h @@ -520,6 +520,7 @@ struct process { unsigned long saved_auxv[AUXV_LEN]; char *saved_cmdline; long saved_cmdline_len; + cpu_set_t cpu_set; /* Store ptrace flags. * The lower 8 bits are PTRACE_O_xxx of the PTRACE_SETOPTIONS request. diff --git a/kernel/process.c b/kernel/process.c index 49db5664..da81111d 100644 --- a/kernel/process.c +++ b/kernel/process.c @@ -281,6 +281,7 @@ struct thread *create_thread(unsigned long user_pc, dkprintf("%s: pid: %d, CPU: %d\n", __FUNCTION__, proc->pid, cpu); CPU_SET(cpu, &thread->cpu_set); + CPU_SET(cpu, &proc->cpu_set); cpu_set_empty = 0; } @@ -292,6 +293,7 @@ struct thread *create_thread(unsigned long user_pc, infop = ihk_mc_get_cpu_info(); for (i = 0; i < infop->ncpus; ++i) { CPU_SET(i, &thread->cpu_set); + CPU_SET(i, &proc->cpu_set); } } diff --git a/kernel/syscall.c b/kernel/syscall.c index e7b908f8..47e893c7 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -6188,26 +6188,12 @@ SYSCALL_DECLARE(sched_setaffinity) len = MIN2(len, sizeof(k_cpu_set)); if (copy_from_user(&k_cpu_set, u_cpu_set, len)) { - dkprintf("%s: error: copy_from_user failed for %p:%d\n", __FUNCTION__, u_cpu_set, len); + dkprintf("%s: error: copy_from_user failed for %p:%d\n", + __FUNCTION__, u_cpu_set, len); return -EFAULT; } - - // XXX: We should build something like cpu_available_mask in advance - CPU_ZERO(&cpu_set); - for (cpu_id = 0; cpu_id < num_processors; cpu_id++) { - if (CPU_ISSET(cpu_id, &k_cpu_set)) { - CPU_SET(cpu_id, &cpu_set); - dkprintf("sched_setaffinity(): tid %d: setting target core %d\n", - cpu_local_var(current)->tid, cpu_id); - empty_set = 0; - } - } - /* Empty target set? */ - if (empty_set) { - return -EINVAL; - } - + /* Find thread */ if (tid == 0) { tid = cpu_local_var(current)->tid; thread = cpu_local_var(current); @@ -6219,21 +6205,45 @@ SYSCALL_DECLARE(sched_setaffinity) struct thread *mythread = cpu_local_var(current); thread = find_thread(0, tid, &lock); - if(!thread) + + if (!thread) return -ESRCH; - if(mythread->proc->euid != 0 && - mythread->proc->euid != thread->proc->ruid && - mythread->proc->euid != thread->proc->euid){ + + if (mythread->proc->euid != 0 && + mythread->proc->euid != thread->proc->ruid && + mythread->proc->euid != thread->proc->euid) { thread_unlock(thread, &lock); return -EPERM; } + hold_thread(thread); thread_unlock(thread, &lock); cpu_id = thread->cpu_id; } + /* Only allow cores that are also in process' cpu_set */ + CPU_ZERO(&cpu_set); + for (cpu_id = 0; cpu_id < num_processors; cpu_id++) { + if (CPU_ISSET(cpu_id, &k_cpu_set) && + CPU_ISSET(cpu_id, &thread->proc->cpu_set)) { + CPU_SET(cpu_id, &cpu_set); + dkprintf("sched_setaffinity(): tid %d: setting target core %d\n", + cpu_local_var(current)->tid, cpu_id); + empty_set = 0; + } + } + + /* Empty target set? */ + if (empty_set) { + release_thread(thread); + return -EINVAL; + } + + /* Update new affinity mask */ memcpy(&thread->cpu_set, &cpu_set, sizeof(cpu_set)); + /* Current core not part of new mask? */ + cpu_id = thread->cpu_id; if (!CPU_ISSET(cpu_id, &thread->cpu_set)) { dkprintf("sched_setaffinity(): tid %d sched_request_migrate: %d\n", cpu_local_var(current)->tid, cpu_id);