From 67f5a1d4e063b2ee965b58983e4e7dbfcfb004f0 Mon Sep 17 00:00:00 2001 From: "Shiratori, Takehiro" Date: Thu, 5 Mar 2020 15:51:28 +0900 Subject: [PATCH] migrate-cpu: Prevent migration target from calling schedule() twice Symptom: A thread could call schedule() twice. Cause: (1) The migrator raises rescheduling flag (2) The thread calls check_need_resched() for other reason than the migrate IPI, e.g, response to system call offload. And it finds that the flag is set and it's trying to call schedule(). (3) The thread is interrupted by the migrate IPI and it finds that the flag is set and calls schedule() in the interrupt context. (4) The thread resumes the execution and call schedule() Solution: (1) Reset the rescheduling flag when checking it and it's set (2) Set it again if it's decided not to call schedule() Change-Id: I5376662d0b02ca4ebb29b42732e347f3b82d766d Refs: #1400 --- kernel/process.c | 20 +++++++++++++++++--- kernel/syscall.c | 13 +++++++++++++ 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/kernel/process.c b/kernel/process.c index bd51d2bf..6b6fd492 100644 --- a/kernel/process.c +++ b/kernel/process.c @@ -3381,6 +3381,7 @@ void spin_sleep_or_schedule(void) v = get_this_cpu_local_var(); if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1) { + v->flags &= ~CPU_FLAG_NEED_RESCHED; do_schedule = 1; } @@ -3406,6 +3407,11 @@ void spin_sleep_or_schedule(void) } if (woken) { + if (do_schedule) { + irqstate = ihk_mc_spinlock_lock(&v->runq_lock); + v->flags |= CPU_FLAG_NEED_RESCHED; + ihk_mc_spinlock_unlock(&v->runq_lock, irqstate); + } return; } @@ -3432,6 +3438,16 @@ void schedule(void) if (cpu_local_var(no_preempt)) { kprintf("%s: WARNING can't schedule() while no preemption, cnt: %d\n", __FUNCTION__, cpu_local_var(no_preempt)); + + irqstate = cpu_disable_interrupt_save(); + ihk_mc_spinlock_lock_noirq( + &(get_this_cpu_local_var()->runq_lock)); + v = get_this_cpu_local_var(); + + v->flags |= CPU_FLAG_NEED_RESCHED; + + ihk_mc_spinlock_unlock_noirq(&v->runq_lock); + cpu_restore_interrupt(irqstate); return; } @@ -3444,8 +3460,6 @@ void schedule(void) prev = v->current; prevpid = v->prevpid; - v->flags &= ~CPU_FLAG_NEED_RESCHED; - /* All runnable processes are on the runqueue */ if (prev && prev != &cpu_local_var(idle)) { list_del(&prev->sched_list); @@ -3576,7 +3590,6 @@ void schedule(void) /* Have we migrated to another core meanwhile? */ if (v != get_this_cpu_local_var()) { v = get_this_cpu_local_var(); - v->flags &= ~CPU_FLAG_NEED_RESCHED; } } else { @@ -3610,6 +3623,7 @@ void check_need_resched(void) ihk_mc_spinlock_unlock(&v->runq_lock, irqstate); return; } + v->flags &= ~CPU_FLAG_NEED_RESCHED; ihk_mc_spinlock_unlock(&v->runq_lock, irqstate); schedule(); } diff --git a/kernel/syscall.c b/kernel/syscall.c index 6167ccdc..fa341445 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -259,6 +259,7 @@ long do_syscall(struct syscall_request *req, int cpu) if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1 || req->number == __NR_sched_setaffinity) { + v->flags &= ~CPU_FLAG_NEED_RESCHED; do_schedule = 1; } @@ -285,6 +286,16 @@ long do_syscall(struct syscall_request *req, int cpu) schedule(); waitq_finish_wait(&thread->scd_wq, &scd_wq_entry); } + else { + if (do_schedule) { + runq_irqstate = + ihk_mc_spinlock_lock( + &v->runq_lock); + v->flags |= CPU_FLAG_NEED_RESCHED; + ihk_mc_spinlock_unlock( + &v->runq_lock, runq_irqstate); + } + } cpu_restore_interrupt(flags); } @@ -4969,6 +4980,7 @@ do_sigsuspend(struct thread *thread, const sigset_t *set) v = get_this_cpu_local_var(); if (v->flags & CPU_FLAG_NEED_RESCHED) { + v->flags &= ~CPU_FLAG_NEED_RESCHED; do_schedule = 1; } @@ -7934,6 +7946,7 @@ SYSCALL_DECLARE(sched_yield) runq_irqstate = ihk_mc_spinlock_lock(&v->runq_lock); if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1) { + v->flags &= ~CPU_FLAG_NEED_RESCHED; do_schedule = 1; }