process table traversing must be under interrupt inhibition

add finding process table function (findthread_and_lock/process_unlock)
This commit is contained in:
Tomoki Shirasawa
2014-08-25 13:24:06 +09:00
parent d2537e0963
commit bc8b441358
4 changed files with 113 additions and 37 deletions

View File

@ -277,4 +277,7 @@ void check_need_resched(void);
void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock);
void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock);
struct process *findthread_and_lock(int pid, int tid, void *savelock, unsigned long *irqstate);
void process_unlock(void *savelock, unsigned long irqstate);
#endif

View File

@ -1754,21 +1754,21 @@ void sched_init(void)
#endif
}
static void double_rq_lock(struct cpu_local_var *v1, struct cpu_local_var *v2)
static void double_rq_lock(struct cpu_local_var *v1, struct cpu_local_var *v2, unsigned long *irqstate)
{
if (v1 < v2) {
ihk_mc_spinlock_lock_noirq(&v1->runq_lock);
*irqstate = ihk_mc_spinlock_lock(&v1->runq_lock);
ihk_mc_spinlock_lock_noirq(&v2->runq_lock);
} else {
ihk_mc_spinlock_lock_noirq(&v2->runq_lock);
*irqstate = ihk_mc_spinlock_lock(&v2->runq_lock);
ihk_mc_spinlock_lock_noirq(&v1->runq_lock);
}
}
static void double_rq_unlock(struct cpu_local_var *v1, struct cpu_local_var *v2)
static void double_rq_unlock(struct cpu_local_var *v1, struct cpu_local_var *v2, unsigned long irqstate)
{
ihk_mc_spinlock_unlock_noirq(&v1->runq_lock);
ihk_mc_spinlock_unlock_noirq(&v2->runq_lock);
ihk_mc_spinlock_unlock(&v2->runq_lock, irqstate);
}
struct migrate_request {
@ -1782,6 +1782,7 @@ static void do_migrate(void)
int cur_cpu_id = ihk_mc_get_processor_id();
struct cpu_local_var *cur_v = get_cpu_local_var(cur_cpu_id);
struct migrate_request *req, *tmp;
unsigned long irqstate = 0;
ihk_mc_spinlock_lock_noirq(&cur_v->migq_lock);
list_for_each_entry_safe(req, tmp, &cur_v->migq, list) {
@ -1805,7 +1806,7 @@ static void do_migrate(void)
/* 2. migrate thread */
v = get_cpu_local_var(cpu_id);
double_rq_lock(cur_v, v);
double_rq_lock(cur_v, v, &irqstate);
list_del(&req->proc->sched_list);
cur_v->runq_len -= 1;
old_cpu_id = req->proc->cpu_id;
@ -1821,7 +1822,7 @@ static void do_migrate(void)
if (v->runq_len == 1)
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpu_id)->apic_id, 0xd1);
double_rq_unlock(cur_v, v);
double_rq_unlock(cur_v, v, irqstate);
ack:
waitq_wakeup(&req->wq);
@ -2057,3 +2058,31 @@ void runq_del_proc(struct process *proc, int cpu_id)
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
}
struct process *
findthread_and_lock(int pid, int tid, void *savelock, unsigned long *irqstate)
{
struct cpu_local_var *v;
struct process *p;
int i;
extern int num_processors;
for(i = 0; i < num_processors; i++){
v = get_cpu_local_var(i);
*(ihk_spinlock_t **)savelock = &(v->runq_lock);
*irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
list_for_each_entry(p, &(v->runq), sched_list){
if(p->pid == pid &&
p->tid == tid){
return p;
}
}
ihk_mc_spinlock_unlock(&(v->runq_lock), *irqstate);
}
return NULL;
}
void
process_unlock(void *savelock, unsigned long irqstate)
{
ihk_mc_spinlock_unlock((ihk_spinlock_t *)savelock, irqstate);
}

View File

@ -1992,6 +1992,7 @@ SYSCALL_DECLARE(sched_setaffinity)
cpu_set_t k_cpu_set, cpu_set;
struct process *thread;
int cpu_id;
unsigned long irqstate;
if (sizeof(k_cpu_set) > len) {
kprintf("%s:%d\n Too small buffer.", __FILE__, __LINE__);
@ -2012,11 +2013,11 @@ SYSCALL_DECLARE(sched_setaffinity)
CPU_SET(cpu_id, &cpu_set);
for (cpu_id = 0; cpu_id < num_processors; cpu_id++) {
ihk_mc_spinlock_lock_noirq(&get_cpu_local_var(cpu_id)->runq_lock);
irqstate = ihk_mc_spinlock_lock(&get_cpu_local_var(cpu_id)->runq_lock);
list_for_each_entry(thread, &get_cpu_local_var(cpu_id)->runq, sched_list)
if (thread->pid && thread->tid == tid)
goto found; /* without unlocking runq_lock */
ihk_mc_spinlock_unlock_noirq(&get_cpu_local_var(cpu_id)->runq_lock);
ihk_mc_spinlock_unlock(&get_cpu_local_var(cpu_id)->runq_lock, irqstate);
}
kprintf("%s:%d Thread not found.\n", __FILE__, __LINE__);
return -ESRCH;
@ -2026,12 +2027,12 @@ found:
if (!CPU_ISSET(cpu_id, &thread->cpu_set)) {
hold_process(thread);
ihk_mc_spinlock_unlock_noirq(&get_cpu_local_var(cpu_id)->runq_lock);
ihk_mc_spinlock_unlock(&get_cpu_local_var(cpu_id)->runq_lock, irqstate);
sched_request_migrate(cpu_id, thread);
release_process(thread);
return 0;
} else {
ihk_mc_spinlock_unlock_noirq(&get_cpu_local_var(cpu_id)->runq_lock);
ihk_mc_spinlock_unlock(&get_cpu_local_var(cpu_id)->runq_lock, irqstate);
return 0;
}
}
@ -2046,6 +2047,7 @@ SYSCALL_DECLARE(sched_getaffinity)
int ret;
int found = 0;
int i;
unsigned long irqstate;
if (sizeof(k_cpu_set) > len) {
kprintf("%s:%d Too small buffer.\n", __FILE__, __LINE__);
@ -2056,7 +2058,7 @@ SYSCALL_DECLARE(sched_getaffinity)
extern int num_processors;
for (i = 0; i < num_processors && !found; i++) {
struct process *thread;
ihk_mc_spinlock_lock_noirq(&get_cpu_local_var(i)->runq_lock);
irqstate = ihk_mc_spinlock_lock(&get_cpu_local_var(i)->runq_lock);
list_for_each_entry(thread, &get_cpu_local_var(i)->runq, sched_list) {
if (thread->pid && thread->tid == tid) {
found = 1;
@ -2064,7 +2066,7 @@ SYSCALL_DECLARE(sched_getaffinity)
break;
}
}
ihk_mc_spinlock_unlock_noirq(&get_cpu_local_var(i)->runq_lock);
ihk_mc_spinlock_unlock(&get_cpu_local_var(i)->runq_lock, irqstate);
}
if (!found) {
kprintf("%s:%d Thread not found.\n", __FILE__, __LINE__);