Prevent one CPU from getting chosen by concurrent forks

One CPU could be chosen by concurrent forks because CPU selection and
runq addition are not done atomicly. So this fix makes the two steps
atomic.

Change-Id: Ib6b75ad655789385d13207e0a47fa4717dec854a
This commit is contained in:
Masamichi Takagi
2018-09-04 09:33:10 +09:00
parent 82914c6a2e
commit 0b0b7b03d7
4 changed files with 26 additions and 10 deletions

View File

@ -93,21 +93,23 @@ extern int num_processors;
int obtain_clone_cpuid(cpu_set_t *cpu_set) {
int min_queue_len = -1;
int cpu, min_cpu = -1;
unsigned long irqstate;
irqstate = ihk_mc_spinlock_lock(&runq_reservation_lock);
/* Find the first allowed core with the shortest run queue */
for (cpu = 0; cpu < num_processors; ++cpu) {
struct cpu_local_var *v;
unsigned long irqstate;
if (!CPU_ISSET(cpu, cpu_set)) continue;
v = get_cpu_local_var(cpu);
irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
if (min_queue_len == -1 || v->runq_len < min_queue_len) {
min_queue_len = v->runq_len;
ihk_mc_spinlock_lock_noirq(&v->runq_lock);
dkprintf("%s: cpu=%d,runq_len=%d,runq_reserved=%d\n", __FUNCTION__, cpu, v->runq_len, v->runq_reserved);
if (min_queue_len == -1 || v->runq_len + v->runq_reserved < min_queue_len) {
min_queue_len = v->runq_len + v->runq_reserved;
min_cpu = cpu;
}
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
ihk_mc_spinlock_unlock_noirq(&v->runq_lock);
if (min_queue_len == 0)
break;
@ -116,7 +118,9 @@ int obtain_clone_cpuid(cpu_set_t *cpu_set) {
if (min_cpu != -1) {
if (get_cpu_local_var(min_cpu)->status != CPU_STATUS_RESERVED)
get_cpu_local_var(min_cpu)->status = CPU_STATUS_RESERVED;
__sync_fetch_and_add(&get_cpu_local_var(min_cpu)->runq_reserved, 1);
}
ihk_mc_spinlock_unlock(&runq_reservation_lock, irqstate);
return min_cpu;
}

View File

@ -74,6 +74,7 @@ struct cpu_local_var {
struct thread *current;
struct list_head runq;
size_t runq_len;
size_t runq_reserved; /* Number of threads which are about to be added to runq */
struct ihk_ikc_channel_desc *ikc2linux;

View File

@ -275,6 +275,7 @@ extern struct list_head resource_set_list;
extern mcs_rwlock_lock_t resource_set_lock;
extern int idle_halt;
extern int allow_oversubscribe;
extern ihk_spinlock_t runq_reservation_lock; /* To serialize runq reservations for competeing fork()s */
struct process_hash {
struct list_head list[HASH_SIZE];

View File

@ -88,6 +88,7 @@ extern void procfs_delete_thread(struct thread *);
struct list_head resource_set_list;
mcs_rwlock_lock_t resource_set_lock;
ihk_spinlock_t runq_reservation_lock;
int idle_halt = 0;
int allow_oversubscribe = 0;
@ -3268,8 +3269,15 @@ void schedule(void)
void
release_cpuid(int cpuid)
{
if (!get_cpu_local_var(cpuid)->runq_len)
get_cpu_local_var(cpuid)->status = CPU_STATUS_IDLE;
unsigned long irqstate;
struct cpu_local_var *v = get_cpu_local_var(cpuid);
irqstate = ihk_mc_spinlock_lock(&runq_reservation_lock);
ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
if (!v->runq_len)
v->status = CPU_STATUS_IDLE;
__sync_fetch_and_sub(&v->runq_reserved, 1);
ihk_mc_spinlock_unlock_noirq(&(v->runq_lock));
ihk_mc_spinlock_unlock(&runq_reservation_lock, irqstate);
}
void check_need_resched(void)
@ -3438,10 +3446,12 @@ void runq_add_thread(struct thread *thread, int cpu_id)
{
struct cpu_local_var *v = get_cpu_local_var(cpu_id);
unsigned long irqstate;
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
irqstate = ihk_mc_spinlock_lock(&runq_reservation_lock);
ihk_mc_spinlock_lock_noirq(&(v->runq_lock));
__runq_add_thread(thread, cpu_id);
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
__sync_fetch_and_sub(&v->runq_reserved, 1);
ihk_mc_spinlock_unlock_noirq(&(v->runq_lock));
ihk_mc_spinlock_unlock(&runq_reservation_lock, irqstate);
procfs_create_thread(thread);