support for backlog

Change-Id: Id8f503234e7afaa284e6b97dc264eb3a2af145c7
This commit is contained in:
Tomoki Shirasawa
2019-11-15 15:49:18 +09:00
committed by Masamichi Takagi
parent e069694c12
commit 37605740a4
5 changed files with 71 additions and 1 deletions

View File

@ -111,6 +111,8 @@ static void timer_handler(void *priv)
/* set timer re-enable for periodic */
arch_timer_reg_write(ARCH_TIMER_REG_TVAL, clocks);
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
do_backlog();
}
}

View File

@ -952,6 +952,8 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
v->flags |= CPU_FLAG_NEED_RESCHED;
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
dkprintf("timer[%lu]: CPU_FLAG_NEED_RESCHED \n", rdtsc());
do_backlog();
}
else if (vector == LOCAL_PERF_VECTOR) {
struct siginfo info;

View File

@ -17,6 +17,7 @@
#include <ihk/lock.h>
#include <ihk/mm.h>
#include <ihk/page_alloc.h>
#include <kmalloc.h>
#include <cls.h>
#include <page.h>
#include <rusage_private.h>
@ -42,6 +43,7 @@ void cpu_local_var_init(void)
clv[i].monitor = monitor->cpu + i;
clv[i].rusage = rusage.cpu + i;
INIT_LIST_HEAD(&clv[i].smp_func_req_list);
INIT_LIST_HEAD(&clv[i].backlog_list);
#ifdef ENABLE_PER_CPU_ALLOC_CACHE
clv[i].free_chunks.rb_node = NULL;
#endif
@ -67,3 +69,54 @@ void preempt_disable(void)
if (cpu_local_var_initialized)
++cpu_local_var(no_preempt);
}
int add_backlog(int (*func)(void *arg), void *arg)
{
struct backlog *bl;
struct cpu_local_var *v = get_this_cpu_local_var();
unsigned long irqstate;
if (!(bl = kmalloc(sizeof(struct backlog), IHK_MC_AP_NOWAIT))) {
return -ENOMEM;
}
INIT_LIST_HEAD(&bl->list);
bl->func = func;
bl->arg = arg;
irqstate = ihk_mc_spinlock_lock(&v->backlog_lock);
list_add_tail(&bl->list, &v->backlog_list);
ihk_mc_spinlock_unlock(&v->backlog_lock, irqstate);
irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
v->flags |= CPU_FLAG_NEED_RESCHED;
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
set_timer(0);
return 0;
}
void do_backlog(void)
{
unsigned long irqstate;
struct list_head list;
struct cpu_local_var *v = get_this_cpu_local_var();
struct backlog *bl;
struct backlog *next;
INIT_LIST_HEAD(&list);
irqstate = ihk_mc_spinlock_lock(&v->backlog_lock);
list_for_each_entry_safe(bl, next, &v->backlog_list, list) {
list_del(&bl->list);
list_add_tail(&bl->list, &list);
}
ihk_mc_spinlock_unlock(&v->backlog_lock, irqstate);
list_for_each_entry_safe(bl, next, &list, list) {
list_del(&bl->list);
if (bl->func(bl->arg)) {
irqstate = ihk_mc_spinlock_lock(&v->backlog_lock);
list_add_tail(&bl->list, &v->backlog_list);
ihk_mc_spinlock_unlock(&v->backlog_lock, irqstate);
}
else {
kfree(bl);
}
}
}

View File

@ -59,6 +59,12 @@ struct smp_func_call_request {
struct list_head list;
};
struct backlog {
struct list_head list;
int (*func)(void *arg);
void *arg;
};
struct cpu_local_var {
/* malloc */
struct list_head free_list;
@ -103,6 +109,9 @@ struct cpu_local_var {
struct process_vm *on_fork_vm;
ihk_spinlock_t backlog_lock;
struct list_head backlog_list;
/* UTI */
void *uti_futex_resp;
#ifdef ENABLE_PER_CPU_ALLOC_CACHE
@ -123,4 +132,7 @@ static struct cpu_local_var *get_this_cpu_local_var(void)
#define cpu_local_var_with_override(name, clv_override) (clv_override ? clv_override->name : get_this_cpu_local_var()->name)
int add_backlog(int (*func)(void *arg), void *arg);
void do_backlog(void);
#endif

View File

@ -3301,7 +3301,8 @@ void set_timer(int runq_locked)
}
/* Toggle timesharing if CPU core is oversubscribed */
if (num_running > 1 || v->current->itimer_enabled) {
if (num_running > 1 || v->current->itimer_enabled ||
!list_empty(&v->backlog_list)) {
if (!cpu_local_var(timer_enabled)) {
lapic_timer_enable(/*10000000*/1000000);
cpu_local_var(timer_enabled) = 1;