fix thread migration code (i.e., sched_setaffinity())
- moved migration code into idle() process and updated schedule() to detect when a thread has moved to another CPU in order to avoid doing housekeeping on behalf of the original one - start CPU head from core 0 - keeps track of nested interrupts
This commit is contained in:
@ -612,9 +612,8 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
|
||||
struct ihk_mc_interrupt_handler *h;
|
||||
struct cpu_local_var *v = get_this_cpu_local_var();
|
||||
|
||||
v->in_interrupt = 1;
|
||||
|
||||
lapic_ack();
|
||||
++v->in_interrupt;
|
||||
|
||||
dkprintf("CPU[%d] got interrupt, vector: %d, RIP: 0x%lX\n",
|
||||
ihk_mc_get_processor_id(), vector, regs->gpr.rip);
|
||||
@ -684,7 +683,7 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
|
||||
check_signal(0, regs, 0);
|
||||
check_need_resched();
|
||||
|
||||
v->in_interrupt = 0;
|
||||
--v->in_interrupt;
|
||||
}
|
||||
|
||||
void gpe_handler(struct x86_user_context *regs)
|
||||
|
||||
@ -79,7 +79,7 @@ Core with BSP HW ID 226 boots next and is given SW-ID of 226.
|
||||
Core with BSP HW ID 227 boots next and is given SW-ID of 227.
|
||||
*/
|
||||
static ihk_spinlock_t cpuid_head_lock = 0;
|
||||
static int cpuid_head = 1;
|
||||
static int cpuid_head = 0;
|
||||
|
||||
/* archtecture-depended syscall handlers */
|
||||
int obtain_clone_cpuid() {
|
||||
|
||||
@ -2042,6 +2042,8 @@ void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
|
||||
ihk_mc_spinlock_unlock(lock, flags);
|
||||
}
|
||||
|
||||
static void do_migrate(void);
|
||||
|
||||
static void idle(void)
|
||||
{
|
||||
struct cpu_local_var *v = get_this_cpu_local_var();
|
||||
@ -2053,6 +2055,13 @@ static void idle(void)
|
||||
while (1) {
|
||||
schedule();
|
||||
cpu_disable_interrupt();
|
||||
|
||||
/* See if we need to migrate a process somewhere */
|
||||
if (v->flags & CPU_FLAG_NEED_MIGRATE) {
|
||||
v->flags &= ~CPU_FLAG_NEED_MIGRATE;
|
||||
do_migrate();
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: KLUDGE: It is desirable to be resolved in schedule().
|
||||
*
|
||||
@ -2205,14 +2214,15 @@ ack:
|
||||
|
||||
void schedule(void)
|
||||
{
|
||||
struct cpu_local_var *v = get_this_cpu_local_var();
|
||||
struct cpu_local_var *v;
|
||||
struct process *next, *prev, *proc, *tmp = NULL;
|
||||
int switch_ctx = 0;
|
||||
unsigned long irqstate;
|
||||
struct process *last;
|
||||
|
||||
redo:
|
||||
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
irqstate = ihk_mc_spinlock_lock(&(get_this_cpu_local_var()->runq_lock));
|
||||
v = get_this_cpu_local_var();
|
||||
|
||||
next = NULL;
|
||||
prev = v->current;
|
||||
@ -2284,7 +2294,13 @@ redo:
|
||||
last = ihk_mc_switch_context(NULL, &next->ctx, prev);
|
||||
}
|
||||
|
||||
if ((last != NULL) && (last->ftn->status & (PS_ZOMBIE | PS_EXITED))) {
|
||||
/* Have we migrated to another core meanwhile? */
|
||||
if (v != get_this_cpu_local_var()) {
|
||||
dkprintf("migrated, skipping freeing last\n");
|
||||
goto redo;
|
||||
}
|
||||
|
||||
if ((last != NULL) && (last->ftn) && (last->ftn->status & (PS_ZOMBIE | PS_EXITED))) {
|
||||
free_process_memory(last);
|
||||
release_process(last);
|
||||
}
|
||||
@ -2292,12 +2308,6 @@ redo:
|
||||
else {
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
}
|
||||
|
||||
if (v->flags & CPU_FLAG_NEED_MIGRATE && !v->in_interrupt) {
|
||||
v->flags &= ~CPU_FLAG_NEED_MIGRATE;
|
||||
do_migrate();
|
||||
goto redo;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
Reference in New Issue
Block a user