Compare commits
165 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| bf5ac7afc8 | |||
| bc423255d9 | |||
| 6714161c25 | |||
| 992a292c08 | |||
| 64c2e437c6 | |||
| dd9675d65e | |||
| 51ed8dce06 | |||
| 01f5e46865 | |||
| 38961fca78 | |||
| 2d7890731e | |||
| 7d181fccd9 | |||
| bd75e80df2 | |||
| 035e7913d8 | |||
| 7d38c7c147 | |||
| a801bcc591 | |||
| d7b8e7f4f4 | |||
| 6afea4af48 | |||
| 6415dcfdcc | |||
| 0f58e9e77d | |||
| 72e3f5ee50 | |||
| 8d57ad9bc4 | |||
| 35b36c2d33 | |||
| 632611d78c | |||
| d48d44d365 | |||
| 4c0f401424 | |||
| 06f824c829 | |||
| 7a606baad4 | |||
| 4c6c66555e | |||
| 8426cf589a | |||
| da7421e8ee | |||
| 209748d913 | |||
| f81722c63b | |||
| 2189c55d99 | |||
| 201a7e2595 | |||
| 5cdd194856 | |||
| 0061adadfb | |||
| 67843151d3 | |||
| 083cf3fcc9 | |||
| 4236323661 | |||
| 5a9bee55c9 | |||
| 6e23b07b20 | |||
| e64bd49d9e | |||
| 72b8f99d3b | |||
| 090937a5a3 | |||
| 2082acdf0d | |||
| a8f11634e6 | |||
| 4f9865cc8f | |||
| 07efb3ab9a | |||
| 2afc9d37d1 | |||
| fa6f20a3c4 | |||
| 52bc052e1a | |||
| f84415c310 | |||
| 1a853e07d7 | |||
| 07b0954610 | |||
| 1f006b2381 | |||
| 4dfd806aa7 | |||
| c6e3185246 | |||
| d9e6ff235d | |||
| b03f69783a | |||
| ab915f3331 | |||
| 7773c4aef6 | |||
| 58e531eb58 | |||
| 9beef7d901 | |||
| 0733592eb5 | |||
| 4d0e0728f4 | |||
| 66fad4c7a4 | |||
| 5758dba7cf | |||
| 1ca16b9693 | |||
| d29922c820 | |||
| 46b48ac59b | |||
| 446ef0465b | |||
| 200fe9aec4 | |||
| fedba28a93 | |||
| b527503937 | |||
| 6bdafbd33b | |||
| 12e7ed644f | |||
| edf059888d | |||
| a66fb96cd9 | |||
| dd2ef89997 | |||
| ba7edf1981 | |||
| a669fc5125 | |||
| c0cabc2d83 | |||
| e306b1e838 | |||
| 0c3b705f98 | |||
| 9f55263528 | |||
| 74c5f61fd5 | |||
| cadb66e5c1 | |||
| 9b5ccb5a33 | |||
| c5079898c2 | |||
| 746b459e7f | |||
| 4c42086154 | |||
| 56ee0787c9 | |||
| e901d42fb6 | |||
| 29ab087fa2 | |||
| 105d373765 | |||
| 0dd2fad33b | |||
| e554f4e2f9 | |||
| a256280118 | |||
| d75be7228b | |||
| 923dc4aa11 | |||
| e3e0f6a174 | |||
| dd6f721e03 | |||
| 9c25d47d9b | |||
| 5a4148aaaf | |||
| 32c8f6192d | |||
| e2f424846c | |||
| 989af7e045 | |||
| 721cee05a2 | |||
| 86aa76e088 | |||
| ab113658f1 | |||
| 2d72042021 | |||
| 610463ff39 | |||
| dfb0a37305 | |||
| 26b9484bae | |||
| b4aecfd43c | |||
| bf036f19f7 | |||
| 182202523e | |||
| afb7cb3a1e | |||
| fdbdcbd0ee | |||
| a18fd1f45c | |||
| d8170e292c | |||
| fee5234c54 | |||
| 6309095fd2 | |||
| b005adc103 | |||
| 21373338cc | |||
| 39352cd364 | |||
| 84025cc9cb | |||
| 04cbfbb025 | |||
| ba58054c9d | |||
| 7fd55dc83f | |||
| d66af42f7b | |||
| 4b964b8e0d | |||
| 65dc3440cb | |||
| fbd9086ce5 | |||
| c2b1d8e3ef | |||
| e2d59e2cb9 | |||
| 3de0f5ea19 | |||
| 373e9ea63c | |||
| 8daffa939e | |||
| eaa4d35fab | |||
| a968c935b5 | |||
| e01f6dd6ea | |||
| a07d802cbe | |||
| 1e442cce10 | |||
| 3f870b69a6 | |||
| 0fef80cb19 | |||
| 9992fe0d72 | |||
| 2d19ed9391 | |||
| 2f2f04d5a1 | |||
| 1541b26086 | |||
| e6c4d7731d | |||
| 94b527e027 | |||
| 8c9b207557 | |||
| dacb05844b | |||
| c3ec5d20ca | |||
| 92a40f92dd | |||
| 45bddf3caa | |||
| b7671fedd3 | |||
| c38d536aaa | |||
| 4ee0c05e08 | |||
| f2ab0193e5 | |||
| ef910fdf0e | |||
| b97a8c5138 | |||
| 034d10b185 | |||
| 3fe2257929 |
@ -30,6 +30,7 @@
|
||||
#include <cls.h>
|
||||
#include <prctl.h>
|
||||
#include <page.h>
|
||||
#include <kmalloc.h>
|
||||
|
||||
#define LAPIC_ID 0x020
|
||||
#define LAPIC_TIMER 0x320
|
||||
@ -42,8 +43,6 @@
|
||||
#define LAPIC_ICR0 0x300
|
||||
#define LAPIC_ICR2 0x310
|
||||
#define LAPIC_ESR 0x280
|
||||
#define LOCAL_TIMER_VECTOR 0xef
|
||||
#define LOCAL_PERF_VECTOR 0xf0
|
||||
|
||||
#define APIC_INT_LEVELTRIG 0x08000
|
||||
#define APIC_INT_ASSERT 0x04000
|
||||
@ -80,6 +79,7 @@ static void (*lapic_icr_write)(unsigned int h, unsigned int l);
|
||||
static void (*lapic_wait_icr_idle)(void);
|
||||
void (*x86_issue_ipi)(unsigned int apicid, unsigned int low);
|
||||
int running_on_kvm(void);
|
||||
static void smp_func_call_handler(void);
|
||||
|
||||
void init_processors_local(int max_id);
|
||||
void assign_processor_id(void);
|
||||
@ -971,6 +971,9 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
|
||||
|
||||
tlb_flush_handler(vector);
|
||||
}
|
||||
else if (vector == LOCAL_SMP_FUNC_CALL_VECTOR) {
|
||||
smp_func_call_handler();
|
||||
}
|
||||
else if (vector == 133) {
|
||||
show_context_stack((uintptr_t *)regs->gpr.rbp);
|
||||
}
|
||||
@ -1101,13 +1104,12 @@ unhandled_page_fault(struct thread *thread, void *fault_addr, void *regs)
|
||||
|
||||
kprintf_unlock(irqflags);
|
||||
|
||||
if (!(error & PF_USER)) {
|
||||
panic("panic: kernel mode PF");
|
||||
}
|
||||
|
||||
/* TODO */
|
||||
ihk_mc_debug_show_interrupt_context(regs);
|
||||
|
||||
if (!(error & PF_USER)) {
|
||||
panic("panic: kernel mode PF");
|
||||
}
|
||||
|
||||
//dkprintf("now dump a core file\n");
|
||||
//coredump(proc, regs);
|
||||
@ -1813,4 +1815,178 @@ int running_on_kvm(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
mod_nmi_ctx(void *nmi_ctx, void (*func)())
|
||||
{
|
||||
unsigned long *l = nmi_ctx;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
asm volatile("pushf; pop %0" : "=r"(flags) : : "memory", "cc");
|
||||
for (i = 0; i < 22; i++)
|
||||
l[i] = l[i + 5];
|
||||
l[i++] = (unsigned long)func; // return address
|
||||
l[i++] = 0x20; // KERNEL CS
|
||||
l[i++] = flags & ~RFLAGS_IF; // rflags (disable interrupt)
|
||||
l[i++] = (unsigned long)(l + 27); // ols rsp
|
||||
l[i++] = 0x28; // KERNEL DS
|
||||
}
|
||||
|
||||
int arch_cpu_read_write_register(
|
||||
struct ihk_os_cpu_register *desc,
|
||||
enum mcctrl_os_cpu_operation op)
|
||||
{
|
||||
if (op == MCCTRL_OS_CPU_READ_REGISTER) {
|
||||
desc->val = rdmsr(desc->addr);
|
||||
}
|
||||
else if (op == MCCTRL_OS_CPU_WRITE_REGISTER) {
|
||||
wrmsr(desc->addr, desc->val);
|
||||
}
|
||||
else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic remote CPU function invocation facility.
|
||||
*/
|
||||
static void smp_func_call_handler(void)
|
||||
{
|
||||
int irq_flags;
|
||||
struct smp_func_call_request *req;
|
||||
int reqs_left;
|
||||
|
||||
reiterate:
|
||||
req = NULL;
|
||||
reqs_left = 0;
|
||||
|
||||
irq_flags = ihk_mc_spinlock_lock(
|
||||
&cpu_local_var(smp_func_req_lock));
|
||||
|
||||
/* Take requests one-by-one */
|
||||
if (!list_empty(&cpu_local_var(smp_func_req_list))) {
|
||||
req = list_first_entry(&cpu_local_var(smp_func_req_list),
|
||||
struct smp_func_call_request, list);
|
||||
list_del(&req->list);
|
||||
|
||||
reqs_left = !list_empty(&cpu_local_var(smp_func_req_list));
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock(&cpu_local_var(smp_func_req_lock),
|
||||
irq_flags);
|
||||
|
||||
if (req) {
|
||||
req->ret = req->sfcd->func(req->cpu_index,
|
||||
req->sfcd->nr_cpus, req->sfcd->arg);
|
||||
ihk_atomic_dec(&req->sfcd->cpus_left);
|
||||
}
|
||||
|
||||
if (reqs_left)
|
||||
goto reiterate;
|
||||
}
|
||||
|
||||
int smp_call_func(cpu_set_t *__cpu_set, smp_func_t __func, void *__arg)
|
||||
{
|
||||
int cpu, nr_cpus = 0;
|
||||
int cpu_index = 0;
|
||||
int this_cpu_index = 0;
|
||||
struct smp_func_call_data sfcd;
|
||||
struct smp_func_call_request *reqs;
|
||||
int ret = 0;
|
||||
int call_on_this_cpu = 0;
|
||||
cpu_set_t cpu_set;
|
||||
|
||||
/* Sanity checks */
|
||||
if (!__cpu_set || !__func) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Make sure it won't change in between */
|
||||
cpu_set = *__cpu_set;
|
||||
|
||||
for_each_set_bit(cpu, (unsigned long *)&cpu_set,
|
||||
sizeof(cpu_set) * BITS_PER_BYTE) {
|
||||
|
||||
if (cpu == ihk_mc_get_processor_id()) {
|
||||
call_on_this_cpu = 1;
|
||||
}
|
||||
++nr_cpus;
|
||||
}
|
||||
|
||||
if (!nr_cpus) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reqs = kmalloc(sizeof(*reqs) * nr_cpus, IHK_MC_AP_NOWAIT);
|
||||
if (!reqs) {
|
||||
ret = -ENOMEM;
|
||||
goto free_out;
|
||||
}
|
||||
|
||||
sfcd.nr_cpus = nr_cpus;
|
||||
sfcd.func = __func;
|
||||
sfcd.arg = __arg;
|
||||
ihk_atomic_set(&sfcd.cpus_left,
|
||||
call_on_this_cpu ? nr_cpus - 1 : nr_cpus);
|
||||
|
||||
/* Add requests and send IPIs */
|
||||
cpu_index = 0;
|
||||
for_each_set_bit(cpu, (unsigned long *)&cpu_set,
|
||||
sizeof(cpu_set) * BITS_PER_BYTE) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
reqs[cpu_index].cpu_index = cpu_index;
|
||||
reqs[cpu_index].ret = 0;
|
||||
|
||||
if (cpu == ihk_mc_get_processor_id()) {
|
||||
this_cpu_index = cpu_index;
|
||||
++cpu_index;
|
||||
continue;
|
||||
}
|
||||
|
||||
reqs[cpu_index].sfcd = &sfcd;
|
||||
|
||||
irq_flags =
|
||||
ihk_mc_spinlock_lock(&get_cpu_local_var(cpu)->smp_func_req_lock);
|
||||
list_add_tail(&reqs[cpu_index].list,
|
||||
&get_cpu_local_var(cpu)->smp_func_req_list);
|
||||
ihk_mc_spinlock_unlock(&get_cpu_local_var(cpu)->smp_func_req_lock,
|
||||
irq_flags);
|
||||
|
||||
ihk_mc_interrupt_cpu(
|
||||
get_x86_cpu_local_variable(cpu)->apic_id,
|
||||
LOCAL_SMP_FUNC_CALL_VECTOR);
|
||||
|
||||
++cpu_index;
|
||||
}
|
||||
|
||||
/* Is this CPU involved? */
|
||||
if (call_on_this_cpu) {
|
||||
reqs[this_cpu_index].ret =
|
||||
__func(this_cpu_index, nr_cpus, __arg);
|
||||
}
|
||||
|
||||
/* Wait for the rest of the CPUs */
|
||||
while (ihk_atomic_read(&sfcd.cpus_left) > 0) {
|
||||
cpu_pause();
|
||||
}
|
||||
|
||||
/* Check return values, if error, report the first non-zero */
|
||||
for (cpu_index = 0; cpu_index < nr_cpus; ++cpu_index) {
|
||||
if (reqs[cpu_index].ret != 0) {
|
||||
ret = reqs[cpu_index].ret;
|
||||
goto free_out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
free_out:
|
||||
kfree(reqs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*** end of file ***/
|
||||
|
||||
@ -182,7 +182,6 @@ void fill_prpsinfo(struct note *head, struct thread *thread, void *regs)
|
||||
/*
|
||||
We leave most of the fields unfilled.
|
||||
|
||||
char pr_state;
|
||||
char pr_sname;
|
||||
char pr_zomb;
|
||||
char pr_nice;
|
||||
|
||||
@ -208,6 +208,8 @@ static void mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
}
|
||||
|
||||
|
||||
#define SPINLOCK_IN_MCS_RWLOCK
|
||||
|
||||
// reader/writer lock
|
||||
typedef struct mcs_rwlock_node {
|
||||
ihk_atomic_t count; // num of readers (use only common reader)
|
||||
@ -224,21 +226,31 @@ typedef struct mcs_rwlock_node {
|
||||
} __attribute__((aligned(64))) mcs_rwlock_node_t;
|
||||
|
||||
typedef struct mcs_rwlock_node_irqsave {
|
||||
#ifndef SPINLOCK_IN_MCS_RWLOCK
|
||||
struct mcs_rwlock_node node;
|
||||
#endif
|
||||
unsigned long irqsave;
|
||||
} __attribute__((aligned(64))) mcs_rwlock_node_irqsave_t;
|
||||
|
||||
typedef struct mcs_rwlock_lock {
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_spinlock_t slock;
|
||||
#else
|
||||
struct mcs_rwlock_node reader; /* common reader lock */
|
||||
struct mcs_rwlock_node *node; /* base */
|
||||
#endif
|
||||
} __attribute__((aligned(64))) mcs_rwlock_lock_t;
|
||||
|
||||
static void
|
||||
mcs_rwlock_init(struct mcs_rwlock_lock *lock)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_init(&lock->slock);
|
||||
#else
|
||||
ihk_atomic_set(&lock->reader.count, 0);
|
||||
lock->reader.type = MCS_RWLOCK_TYPE_COMMON_READER;
|
||||
lock->node = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
@ -253,6 +265,9 @@ __kprintf("[%d] ret mcs_rwlock_writer_lock_noirq\n", ihk_mc_get_processor_id());
|
||||
static void
|
||||
__mcs_rwlock_writer_lock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_lock_noirq(&lock->slock);
|
||||
#else
|
||||
struct mcs_rwlock_node *pred;
|
||||
|
||||
preempt_disable();
|
||||
@ -270,8 +285,10 @@ __mcs_rwlock_writer_lock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_n
|
||||
cpu_pause();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef SPINLOCK_IN_MCS_RWLOCK
|
||||
static void
|
||||
mcs_rwlock_unlock_readers(struct mcs_rwlock_lock *lock)
|
||||
{
|
||||
@ -328,6 +345,7 @@ mcs_rwlock_unlock_readers(struct mcs_rwlock_lock *lock)
|
||||
|
||||
f->locked = MCS_RWLOCK_UNLOCKED;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_writer_unlock_noirq(l, n) { \
|
||||
@ -341,6 +359,9 @@ __kprintf("[%d] ret mcs_rwlock_writer_unlock_noirq\n", ihk_mc_get_processor_id()
|
||||
static void
|
||||
__mcs_rwlock_writer_unlock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_unlock_noirq(&lock->slock);
|
||||
#else
|
||||
if (node->next == NULL) {
|
||||
struct mcs_rwlock_node *old = (struct mcs_rwlock_node *)
|
||||
atomic_cmpxchg8((unsigned long *)&lock->node,
|
||||
@ -365,6 +386,7 @@ __mcs_rwlock_writer_unlock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
@ -397,6 +419,9 @@ atomic_inc_ifnot0(ihk_atomic_t *v)
|
||||
static void
|
||||
__mcs_rwlock_reader_lock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_lock_noirq(&lock->slock);
|
||||
#else
|
||||
struct mcs_rwlock_node *pred;
|
||||
|
||||
preempt_disable();
|
||||
@ -445,6 +470,7 @@ __mcs_rwlock_reader_lock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_n
|
||||
}
|
||||
out:
|
||||
return;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
@ -459,6 +485,9 @@ __kprintf("[%d] ret mcs_rwlock_reader_unlock_noirq\n", ihk_mc_get_processor_id()
|
||||
static void
|
||||
__mcs_rwlock_reader_unlock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_unlock_noirq(&lock->slock);
|
||||
#else
|
||||
if(ihk_atomic_dec_return(&lock->reader.count))
|
||||
goto out;
|
||||
|
||||
@ -488,6 +517,7 @@ __mcs_rwlock_reader_unlock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
@ -502,8 +532,12 @@ __kprintf("[%d] ret mcs_rwlock_writer_lock\n", ihk_mc_get_processor_id()); \
|
||||
static void
|
||||
__mcs_rwlock_writer_lock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
node->irqsave = ihk_mc_spinlock_lock(&lock->slock);
|
||||
#else
|
||||
node->irqsave = cpu_disable_interrupt_save();
|
||||
__mcs_rwlock_writer_lock_noirq(lock, &node->node);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
@ -518,8 +552,12 @@ __kprintf("[%d] ret mcs_rwlock_writer_unlock\n", ihk_mc_get_processor_id()); \
|
||||
static void
|
||||
__mcs_rwlock_writer_unlock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_unlock(&lock->slock, node->irqsave);
|
||||
#else
|
||||
__mcs_rwlock_writer_unlock_noirq(lock, &node->node);
|
||||
cpu_restore_interrupt(node->irqsave);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
@ -534,8 +572,12 @@ __kprintf("[%d] ret mcs_rwlock_reader_lock\n", ihk_mc_get_processor_id()); \
|
||||
static void
|
||||
__mcs_rwlock_reader_lock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
node->irqsave = ihk_mc_spinlock_lock(&lock->slock);
|
||||
#else
|
||||
node->irqsave = cpu_disable_interrupt_save();
|
||||
__mcs_rwlock_reader_lock_noirq(lock, &node->node);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
@ -550,8 +592,12 @@ __kprintf("[%d] ret mcs_rwlock_reader_unlock\n", ihk_mc_get_processor_id()); \
|
||||
static void
|
||||
__mcs_rwlock_reader_unlock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_unlock(&lock->slock, node->irqsave);
|
||||
#else
|
||||
__mcs_rwlock_reader_unlock_noirq(lock, &node->node);
|
||||
cpu_restore_interrupt(node->irqsave);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -204,6 +204,11 @@ static inline int pte_is_fileoff(pte_t *ptep, size_t pgsize)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void pte_update_phys(pte_t *ptep, unsigned long phys)
|
||||
{
|
||||
*ptep = (*ptep & ~PT_PHYSMASK) | (phys & PT_PHYSMASK);
|
||||
}
|
||||
|
||||
static inline uintptr_t pte_get_phys(pte_t *ptep)
|
||||
{
|
||||
return (*ptep & PT_PHYSMASK);
|
||||
|
||||
@ -20,4 +20,23 @@ static inline void *__inline_memcpy(void *to, const void *from, size_t n)
|
||||
return to;
|
||||
}
|
||||
|
||||
#define ARCH_FAST_MEMSET
|
||||
|
||||
static inline void *__inline_memset(void *s, unsigned long c, size_t count)
|
||||
{
|
||||
int d0, d1;
|
||||
asm volatile("rep ; stosl\n\t"
|
||||
"testb $2,%b3\n\t"
|
||||
"je 1f\n\t"
|
||||
"stosw\n"
|
||||
"1:\ttestb $1,%b3\n\t"
|
||||
"je 2f\n\t"
|
||||
"stosb\n"
|
||||
"2:"
|
||||
: "=&c" (d0), "=&D" (d1)
|
||||
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
|
||||
: "memory");
|
||||
return s;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -215,4 +215,25 @@ static inline unsigned long atomic_cmpxchg4(unsigned int *addr,
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline void ihk_atomic_add_long(long i, long *v) {
|
||||
asm volatile("lock addq %1,%0"
|
||||
: "+m" (*v)
|
||||
: "ir" (i));
|
||||
}
|
||||
static inline void ihk_atomic_add_ulong(long i, unsigned long *v) {
|
||||
asm volatile("lock addq %1,%0"
|
||||
: "+m" (*v)
|
||||
: "ir" (i));
|
||||
}
|
||||
|
||||
static inline unsigned long ihk_atomic_add_long_return(long i, long *v) {
|
||||
long __i;
|
||||
|
||||
__i = i;
|
||||
asm volatile("lock xaddq %0, %1"
|
||||
: "+r" (i), "+m" (*v)
|
||||
: : "memory");
|
||||
return i + __i;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -15,6 +15,9 @@
|
||||
|
||||
#include <ikc/ihk.h>
|
||||
|
||||
#define IKC_PORT_IKC2MCKERNEL 501
|
||||
#define IKC_PORT_IKC2LINUX 503
|
||||
|
||||
/* manycore side */
|
||||
int ihk_mc_ikc_init_first(struct ihk_ikc_channel_desc *,
|
||||
ihk_ikc_ph_t handler);
|
||||
|
||||
@ -215,6 +215,7 @@ struct x86_sregs {
|
||||
* bit 4 == 1: fault was an instruction fetch
|
||||
*
|
||||
* internal use:
|
||||
* bit 29 == 1: Make PF map text modified by ptrace_poketext()
|
||||
* bit 30 == 1: don't use COW page to resolve page fault.
|
||||
*/
|
||||
enum x86_pf_error_code {
|
||||
|
||||
@ -150,8 +150,11 @@ SYSCALL_HANDLED(602, pmc_start)
|
||||
SYSCALL_HANDLED(603, pmc_stop)
|
||||
SYSCALL_HANDLED(604, pmc_reset)
|
||||
SYSCALL_HANDLED(700, get_cpu_id)
|
||||
#ifdef TRACK_SYSCALLS
|
||||
SYSCALL_HANDLED(__NR_track_syscalls, track_syscalls)
|
||||
#endif // TRACK_SYSCALLS
|
||||
#ifdef PROFILE_ENABLE
|
||||
SYSCALL_HANDLED(__NR_profile, profile)
|
||||
#endif // PROFILE_ENABLE
|
||||
SYSCALL_HANDLED(730, util_migrate_inter_kernel)
|
||||
SYSCALL_HANDLED(731, util_indicate_clone)
|
||||
SYSCALL_HANDLED(732, get_system)
|
||||
|
||||
/**** End of File ****/
|
||||
|
||||
@ -130,11 +130,40 @@ general_protection_exception:
|
||||
addq $8, %rsp
|
||||
iretq
|
||||
|
||||
.global __freeze
|
||||
__freeze:
|
||||
PUSH_ALL_REGS
|
||||
callq freeze
|
||||
POP_ALL_REGS
|
||||
iretq
|
||||
|
||||
.globl nmi
|
||||
nmi:
|
||||
#define PANICED 232
|
||||
#define PANIC_REGS 240
|
||||
movq %rax,%gs:PANIC_REGS+0x00
|
||||
movq %rsp,%gs:PANIC_REGS+0x08
|
||||
|
||||
movl nmi_mode(%rip),%eax
|
||||
cmp $1,%rax
|
||||
je 1f
|
||||
cmp $2,%rax
|
||||
jne 3f
|
||||
1:
|
||||
cld
|
||||
movq %gs:PANIC_REGS+0x00,%rax
|
||||
PUSH_ALL_REGS
|
||||
subq $40, %rsp
|
||||
movq %rsp,%gs:PANIC_REGS+0x10
|
||||
movq %rsp, %rdi
|
||||
call freeze_thaw
|
||||
cmpq $0, %rax
|
||||
jnz 2f
|
||||
addq $40, %rsp
|
||||
2:
|
||||
POP_ALL_REGS
|
||||
iretq
|
||||
3:
|
||||
movq %rbx,%gs:PANIC_REGS+0x08
|
||||
movq %rcx,%gs:PANIC_REGS+0x10
|
||||
movq %rdx,%gs:PANIC_REGS+0x18
|
||||
@ -210,6 +239,7 @@ enter_user_mode:
|
||||
movq $0, %rdi
|
||||
movq %rsp, %rsi
|
||||
call check_signal
|
||||
call utilthr_migrate
|
||||
movq $0, %rdi
|
||||
call set_cputime
|
||||
POP_ALL_REGS
|
||||
|
||||
@ -1075,11 +1075,29 @@ int visit_pte_range(page_table_t pt, void *start0, void *end0, int pgshift,
|
||||
|
||||
struct clear_range_args {
|
||||
int free_physical;
|
||||
uint8_t padding[4];
|
||||
struct memobj *memobj;
|
||||
struct process_vm *vm;
|
||||
unsigned long *addr;
|
||||
int nr_addr;
|
||||
int max_nr_addr;
|
||||
};
|
||||
|
||||
static void remote_flush_tlb_add_addr(struct clear_range_args *args,
|
||||
unsigned long addr)
|
||||
{
|
||||
if (args->nr_addr < args->max_nr_addr) {
|
||||
args->addr[args->nr_addr] = addr;
|
||||
++args->nr_addr;
|
||||
return;
|
||||
}
|
||||
|
||||
remote_flush_tlb_array_cpumask(args->vm, args->addr, args->nr_addr,
|
||||
ihk_mc_get_processor_id());
|
||||
|
||||
args->addr[0] = addr;
|
||||
args->nr_addr = 1;
|
||||
}
|
||||
|
||||
static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
|
||||
uint64_t start, uint64_t end)
|
||||
{
|
||||
@ -1093,7 +1111,7 @@ static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
|
||||
}
|
||||
|
||||
old = xchg(ptep, PTE_NULL);
|
||||
remote_flush_tlb_cpumask(args->vm, base, ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
|
||||
page = NULL;
|
||||
if (!pte_is_fileoff(&old, PTL1_SIZE)) {
|
||||
@ -1101,14 +1119,14 @@ static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
|
||||
page = phys_to_page(phys);
|
||||
}
|
||||
|
||||
if (page && page_is_in_memobj(page) && (old & PFL1_DIRTY) &&
|
||||
if (page && page_is_in_memobj(page) && (old & PFL1_DIRTY) && (args->memobj) &&
|
||||
!(args->memobj->flags & MF_ZEROFILL)) {
|
||||
memobj_flush_page(args->memobj, phys, PTL1_SIZE);
|
||||
}
|
||||
|
||||
if (!(old & PFL1_FILEOFF) && args->free_physical) {
|
||||
if (!page || (page && page_unmap(page))) {
|
||||
ihk_mc_free_pages(phys_to_virt(phys), 1);
|
||||
ihk_mc_free_pages_user(phys_to_virt(phys), 1);
|
||||
dkprintf("%s: freeing regular page at 0x%lx\n", __FUNCTION__, base);
|
||||
}
|
||||
args->vm->currss -= PTL1_SIZE;
|
||||
@ -1142,8 +1160,7 @@ static int clear_range_l2(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if (*ptep & PFL2_SIZE) {
|
||||
old = xchg(ptep, PTE_NULL);
|
||||
remote_flush_tlb_cpumask(args->vm, base,
|
||||
ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
|
||||
page = NULL;
|
||||
if (!pte_is_fileoff(&old, PTL2_SIZE)) {
|
||||
@ -1157,7 +1174,8 @@ static int clear_range_l2(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if (!(old & PFL2_FILEOFF) && args->free_physical) {
|
||||
if (!page || (page && page_unmap(page))) {
|
||||
ihk_mc_free_pages(phys_to_virt(phys), PTL2_SIZE/PTL1_SIZE);
|
||||
ihk_mc_free_pages_user(phys_to_virt(phys),
|
||||
PTL2_SIZE/PTL1_SIZE);
|
||||
dkprintf("%s: freeing large page at 0x%lx\n", __FUNCTION__, base);
|
||||
}
|
||||
args->vm->currss -= PTL2_SIZE;
|
||||
@ -1174,8 +1192,7 @@ static int clear_range_l2(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if ((start <= base) && ((base + PTL2_SIZE) <= end)) {
|
||||
*ptep = PTE_NULL;
|
||||
remote_flush_tlb_cpumask(args->vm, base,
|
||||
ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
ihk_mc_free_pages(pt, 1);
|
||||
}
|
||||
|
||||
@ -1207,8 +1224,7 @@ static int clear_range_l3(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if (*ptep & PFL3_SIZE) {
|
||||
old = xchg(ptep, PTE_NULL);
|
||||
remote_flush_tlb_cpumask(args->vm, base,
|
||||
ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
|
||||
page = NULL;
|
||||
if (!pte_is_fileoff(&old, PTL3_SIZE)) {
|
||||
@ -1222,7 +1238,8 @@ static int clear_range_l3(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if (!(old & PFL3_FILEOFF) && args->free_physical) {
|
||||
if (!page || (page && page_unmap(page))) {
|
||||
ihk_mc_free_pages(phys_to_virt(phys), PTL3_SIZE/PTL1_SIZE);
|
||||
ihk_mc_free_pages_user(phys_to_virt(phys),
|
||||
PTL3_SIZE/PTL1_SIZE);
|
||||
}
|
||||
args->vm->currss -= PTL3_SIZE;
|
||||
}
|
||||
@ -1238,8 +1255,7 @@ static int clear_range_l3(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if (use_1gb_page && (start <= base) && ((base + PTL3_SIZE) <= end)) {
|
||||
*ptep = PTE_NULL;
|
||||
remote_flush_tlb_cpumask(args->vm, base,
|
||||
ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
ihk_mc_free_pages(pt, 1);
|
||||
}
|
||||
|
||||
@ -1259,8 +1275,10 @@ static int clear_range_l4(void *args0, pte_t *ptep, uint64_t base,
|
||||
return walk_pte_l3(pt, base, start, end, &clear_range_l3, args0);
|
||||
}
|
||||
|
||||
static int clear_range(struct page_table *pt, struct process_vm *vm,
|
||||
uintptr_t start, uintptr_t end, int free_physical,
|
||||
#define TLB_INVALID_ARRAY_PAGES (4)
|
||||
|
||||
static int clear_range(struct page_table *pt, struct process_vm *vm,
|
||||
uintptr_t start, uintptr_t end, int free_physical,
|
||||
struct memobj *memobj)
|
||||
{
|
||||
int error;
|
||||
@ -1275,14 +1293,35 @@ static int clear_range(struct page_table *pt, struct process_vm *vm,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TODO: embedd this in tlb_flush_entry? */
|
||||
args.addr = (unsigned long *)ihk_mc_alloc_pages(
|
||||
TLB_INVALID_ARRAY_PAGES, IHK_MC_AP_CRITICAL);
|
||||
if (!args.addr) {
|
||||
ekprintf("%s: error: allocating address array\n", __FUNCTION__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
args.nr_addr = 0;
|
||||
args.max_nr_addr = (TLB_INVALID_ARRAY_PAGES * PAGE_SIZE /
|
||||
sizeof(uint64_t));
|
||||
|
||||
args.free_physical = free_physical;
|
||||
if (memobj && (memobj->flags & MF_DEV_FILE)) {
|
||||
args.free_physical = 0;
|
||||
}
|
||||
if (memobj && ((memobj->flags & MF_PREMAP))) {
|
||||
args.free_physical = 0;
|
||||
}
|
||||
args.memobj = memobj;
|
||||
args.vm = vm;
|
||||
|
||||
error = walk_pte_l4(pt, 0, start, end, &clear_range_l4, &args);
|
||||
if (args.nr_addr) {
|
||||
remote_flush_tlb_array_cpumask(vm, args.addr, args.nr_addr,
|
||||
ihk_mc_get_processor_id());
|
||||
}
|
||||
|
||||
ihk_mc_free_pages(args.addr, TLB_INVALID_ARRAY_PAGES);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -2063,7 +2102,8 @@ void *map_fixed_area(unsigned long phys, unsigned long size, int uncachable)
|
||||
attr |= PTATTR_UNCACHABLE;
|
||||
}
|
||||
|
||||
kprintf("map_fixed: %lx => %p (%d pages)\n", paligned, v, npages);
|
||||
kprintf("map_fixed: phys: 0x%lx => 0x%lx (%d pages)\n",
|
||||
paligned, v, npages);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
if(__set_pt_page(init_pt, (void *)fixed_virt, paligned, attr)){
|
||||
@ -2166,26 +2206,18 @@ int copy_from_user(void *dst, const void *src, size_t siz)
|
||||
int strlen_user(const char *s)
|
||||
{
|
||||
struct process_vm *vm = cpu_local_var(current)->vm;
|
||||
struct vm_range *range;
|
||||
unsigned long pgstart;
|
||||
int maxlen;
|
||||
const char *head = s;
|
||||
int err;
|
||||
|
||||
maxlen = 4096 - (((unsigned long)s) & 0x0000000000000fffUL);
|
||||
pgstart = ((unsigned long)s) & 0xfffffffffffff000UL;
|
||||
if(!pgstart || pgstart >= MAP_KERNEL_START)
|
||||
return -EFAULT;
|
||||
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
|
||||
for(;;){
|
||||
range = lookup_process_memory_range(vm, pgstart, pgstart+1);
|
||||
if(range == NULL){
|
||||
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
if((range->flag & VR_PROT_MASK) == VR_PROT_NONE){
|
||||
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
if ((err = verify_process_vm(vm, s, 1)))
|
||||
return err;
|
||||
while(*s && maxlen > 0){
|
||||
s++;
|
||||
maxlen--;
|
||||
@ -2195,14 +2227,12 @@ int strlen_user(const char *s)
|
||||
maxlen = 4096;
|
||||
pgstart += 4096;
|
||||
}
|
||||
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
|
||||
return s - head;
|
||||
}
|
||||
|
||||
int strcpy_from_user(char *dst, const char *src)
|
||||
{
|
||||
struct process_vm *vm = cpu_local_var(current)->vm;
|
||||
struct vm_range *range;
|
||||
unsigned long pgstart;
|
||||
int maxlen;
|
||||
int err = 0;
|
||||
@ -2211,17 +2241,9 @@ int strcpy_from_user(char *dst, const char *src)
|
||||
pgstart = ((unsigned long)src) & 0xfffffffffffff000UL;
|
||||
if(!pgstart || pgstart >= MAP_KERNEL_START)
|
||||
return -EFAULT;
|
||||
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
|
||||
for(;;){
|
||||
range = lookup_process_memory_range(vm, pgstart, pgstart + 1);
|
||||
if(range == NULL){
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
if((range->flag & VR_PROT_MASK) == VR_PROT_NONE){
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
if ((err = verify_process_vm(vm, src, 1)))
|
||||
return err;
|
||||
while(*src && maxlen > 0){
|
||||
*(dst++) = *(src++);
|
||||
maxlen--;
|
||||
@ -2233,7 +2255,6 @@ int strcpy_from_user(char *dst, const char *src)
|
||||
maxlen = 4096;
|
||||
pgstart += 4096;
|
||||
}
|
||||
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2261,6 +2282,37 @@ int getint_user(int *dest, const int *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int verify_process_vm(struct process_vm *vm,
|
||||
const void *usrc, size_t size)
|
||||
{
|
||||
const uintptr_t ustart = (uintptr_t)usrc;
|
||||
const uintptr_t uend = ustart + size;
|
||||
uint64_t reason;
|
||||
uintptr_t addr;
|
||||
int error = 0;
|
||||
|
||||
if ((ustart < vm->region.user_start)
|
||||
|| (vm->region.user_end <= ustart)
|
||||
|| ((vm->region.user_end - ustart) < size)) {
|
||||
kprintf("%s: error: out of user range\n", __FUNCTION__);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
reason = PF_USER; /* page not present */
|
||||
for (addr = ustart & PAGE_MASK; addr < uend; addr += PAGE_SIZE) {
|
||||
if (!addr)
|
||||
return -EINVAL;
|
||||
|
||||
error = page_fault_process_vm(vm, (void *)addr, reason);
|
||||
if (error) {
|
||||
kprintf("%s: error: PF for %p failed\n", __FUNCTION__, addr);
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t siz)
|
||||
{
|
||||
const uintptr_t ustart = (uintptr_t)usrc;
|
||||
|
||||
@ -17,8 +17,26 @@
|
||||
extern unsigned int *x86_march_perfmap;
|
||||
extern int running_on_kvm(void);
|
||||
|
||||
//#define PERFCTR_DEBUG
|
||||
#ifdef PERFCTR_DEBUG
|
||||
#define dkprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#else
|
||||
#define dkprintf(...) do { } while (0)
|
||||
#define ekprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#endif
|
||||
|
||||
#define X86_CR4_PCE 0x00000100
|
||||
|
||||
#define PERFCTR_CHKANDJUMP(cond, msg, err) \
|
||||
do { \
|
||||
if(cond) { \
|
||||
ekprintf("%s,"msg"\n", __FUNCTION__); \
|
||||
ret = err; \
|
||||
goto fn_fail; \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
int perf_counters_discovered = 0;
|
||||
int X86_IA32_NUM_PERF_COUNTERS = 0;
|
||||
unsigned long X86_IA32_PERF_COUNTERS_MASK = 0;
|
||||
@ -203,9 +221,12 @@ extern void x86_march_perfctr_start(unsigned long counter_mask);
|
||||
|
||||
int ihk_mc_perfctr_start(unsigned long counter_mask)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long value = 0;
|
||||
unsigned long mask = X86_IA32_PERF_COUNTERS_MASK | X86_IA32_FIXED_PERF_COUNTERS_MASK;
|
||||
|
||||
PERFCTR_CHKANDJUMP(counter_mask & ~mask, "counter_mask out of range", -EINVAL);
|
||||
|
||||
#ifdef HAVE_MARCH_PERFCTR_START
|
||||
x86_march_perfctr_start(counter_mask);
|
||||
#endif
|
||||
@ -213,15 +234,20 @@ int ihk_mc_perfctr_start(unsigned long counter_mask)
|
||||
value = rdmsr(MSR_PERF_GLOBAL_CTRL);
|
||||
value |= counter_mask;
|
||||
wrmsr(MSR_PERF_GLOBAL_CTRL, value);
|
||||
|
||||
return 0;
|
||||
fn_exit:
|
||||
return ret;
|
||||
fn_fail:
|
||||
goto fn_exit;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_stop(unsigned long counter_mask)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long value;
|
||||
unsigned long mask = X86_IA32_PERF_COUNTERS_MASK | X86_IA32_FIXED_PERF_COUNTERS_MASK;
|
||||
|
||||
PERFCTR_CHKANDJUMP(counter_mask & ~mask, "counter_mask out of range", -EINVAL);
|
||||
|
||||
counter_mask &= mask;
|
||||
value = rdmsr(MSR_PERF_GLOBAL_CTRL);
|
||||
value &= ~counter_mask;
|
||||
@ -244,8 +270,10 @@ int ihk_mc_perfctr_stop(unsigned long counter_mask)
|
||||
value &= ~(0xf << 8);
|
||||
wrmsr(MSR_PERF_FIXED_CTRL, value);
|
||||
}
|
||||
|
||||
return 0;
|
||||
fn_exit:
|
||||
return ret;
|
||||
fn_fail:
|
||||
goto fn_exit;
|
||||
}
|
||||
|
||||
// init for fixed counter
|
||||
|
||||
@ -259,7 +259,7 @@ SYSCALL_DECLARE(rt_sigreturn)
|
||||
|
||||
extern struct cpu_local_var *clv;
|
||||
extern unsigned long do_kill(struct thread *thread, int pid, int tid, int sig, struct siginfo *info, int ptracecont);
|
||||
extern void interrupt_syscall(int pid, int tid);
|
||||
extern void interrupt_syscall(struct thread *, int sig);
|
||||
extern int num_processors;
|
||||
|
||||
#define RFLAGS_MASK (RFLAGS_CF | RFLAGS_PF | RFLAGS_AF | RFLAGS_ZF | \
|
||||
@ -1179,7 +1179,8 @@ done:
|
||||
if(pid != -1 && tthread->proc->pid != pid){
|
||||
continue;
|
||||
}
|
||||
if(tthread->tid == tid){
|
||||
if (tthread->tid == tid &&
|
||||
tthread->status != PS_EXITED) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
@ -1229,6 +1230,12 @@ done:
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (tthread->thread_offloaded) {
|
||||
interrupt_syscall(tthread, sig);
|
||||
release_thread(tthread);
|
||||
return 0;
|
||||
}
|
||||
|
||||
doint = 0;
|
||||
|
||||
mcs_rwlock_writer_lock_noirq(savelock, &mcs_rw_node);
|
||||
@ -1274,8 +1281,6 @@ done:
|
||||
cpu_restore_interrupt(irqstate);
|
||||
|
||||
if (doint && !(mask & tthread->sigmask.__val[0])) {
|
||||
int tid = tthread->tid;
|
||||
int pid = tproc->pid;
|
||||
int status = tthread->status;
|
||||
|
||||
if (thread != tthread) {
|
||||
@ -1285,7 +1290,7 @@ done:
|
||||
}
|
||||
|
||||
if(!tthread->proc->nohost)
|
||||
interrupt_syscall(pid, tid);
|
||||
interrupt_syscall(tthread, 0);
|
||||
|
||||
if (status != PS_RUNNING) {
|
||||
if(sig == SIGKILL){
|
||||
@ -1297,6 +1302,9 @@ done:
|
||||
sched_wakeup_thread(tthread, PS_STOPPED);
|
||||
tthread->proc->status = PS_RUNNING;
|
||||
}
|
||||
else {
|
||||
sched_wakeup_thread(tthread, PS_INTERRUPTIBLE);
|
||||
}
|
||||
}
|
||||
}
|
||||
release_thread(tthread);
|
||||
@ -1543,7 +1551,7 @@ static int vdso_get_vdso_info(void)
|
||||
{
|
||||
int error;
|
||||
struct ikc_scd_packet packet;
|
||||
struct ihk_ikc_channel_desc *ch = cpu_local_var(syscall_channel);
|
||||
struct ihk_ikc_channel_desc *ch = cpu_local_var(ikc2linux);
|
||||
|
||||
dkprintf("vdso_get_vdso_info()\n");
|
||||
memset(&vdso, '\0', sizeof vdso);
|
||||
@ -1822,4 +1830,61 @@ out:
|
||||
return error;
|
||||
} /* arch_map_vdso() */
|
||||
|
||||
void
|
||||
save_uctx(void *uctx, struct x86_user_context *regs)
|
||||
{
|
||||
struct trans_uctx {
|
||||
volatile int cond;
|
||||
int fregsize;
|
||||
|
||||
unsigned long rax;
|
||||
unsigned long rbx;
|
||||
unsigned long rcx;
|
||||
unsigned long rdx;
|
||||
unsigned long rsi;
|
||||
unsigned long rdi;
|
||||
unsigned long rbp;
|
||||
unsigned long r8;
|
||||
unsigned long r9;
|
||||
unsigned long r10;
|
||||
unsigned long r11;
|
||||
unsigned long r12;
|
||||
unsigned long r13;
|
||||
unsigned long r14;
|
||||
unsigned long r15;
|
||||
unsigned long rflags;
|
||||
unsigned long rip;
|
||||
unsigned long rsp;
|
||||
unsigned long fs;
|
||||
} *ctx = uctx;
|
||||
|
||||
if (!regs) {
|
||||
asm ("movq %%gs:(%1),%0" : "=r"(regs) :
|
||||
"r"(offsetof(struct x86_cpu_local_variables, tss.rsp0)));
|
||||
regs--;
|
||||
}
|
||||
|
||||
ctx->cond = 0;
|
||||
ctx->rax = regs->gpr.rax;
|
||||
ctx->rbx = regs->gpr.rbx;
|
||||
ctx->rcx = regs->gpr.rcx;
|
||||
ctx->rdx = regs->gpr.rdx;
|
||||
ctx->rsi = regs->gpr.rsi;
|
||||
ctx->rdi = regs->gpr.rdi;
|
||||
ctx->rbp = regs->gpr.rbp;
|
||||
ctx->r8 = regs->gpr.r8;
|
||||
ctx->r9 = regs->gpr.r9;
|
||||
ctx->r10 = regs->gpr.r10;
|
||||
ctx->r11 = regs->gpr.r11;
|
||||
ctx->r12 = regs->gpr.r12;
|
||||
ctx->r13 = regs->gpr.r13;
|
||||
ctx->r14 = regs->gpr.r14;
|
||||
ctx->r15 = regs->gpr.r15;
|
||||
ctx->rflags = regs->gpr.rflags;
|
||||
ctx->rsp = regs->gpr.rsp;
|
||||
ctx->rip = regs->gpr.rip;
|
||||
ihk_mc_arch_get_special_register(IHK_ASR_X86_FS, &ctx->fs);
|
||||
ctx->fregsize = 0;
|
||||
}
|
||||
|
||||
/*** End of File ***/
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
* make sure that these are position-independent codes.
|
||||
*/
|
||||
|
||||
#include <cls.h>
|
||||
#include <syscall.h>
|
||||
#include <ihk/atomic.h>
|
||||
#include <arch/cpu.h>
|
||||
|
||||
@ -22,6 +22,7 @@ ENABLE_MCOVERLAYFS="@ENABLE_MCOVERLAYFS@"
|
||||
|
||||
mem="512M@0"
|
||||
cpus=""
|
||||
ikc_map=""
|
||||
|
||||
if [ "${BASH_VERSINFO[0]}" -lt 4 ]; then
|
||||
echo "You need at least bash-4.0 to run this script." >&2
|
||||
@ -40,8 +41,9 @@ else
|
||||
fi
|
||||
|
||||
turbo=""
|
||||
ihk_irq=""
|
||||
|
||||
while getopts :ti:k:c:m:o:f: OPT
|
||||
while getopts :ti:k:c:m:o:f:r:q: OPT
|
||||
do
|
||||
case ${OPT} in
|
||||
f) facility=${OPTARG}
|
||||
@ -78,6 +80,10 @@ do
|
||||
;;
|
||||
m) mem=${OPTARG}
|
||||
;;
|
||||
r) ikc_map=${OPTARG}
|
||||
;;
|
||||
q) ihk_irq=${OPTARG}
|
||||
;;
|
||||
t) turbo="turbo"
|
||||
;;
|
||||
*) echo "invalid option -${OPT}" >&2
|
||||
@ -174,11 +180,6 @@ error_exit() {
|
||||
fi
|
||||
fi
|
||||
;&
|
||||
aslr_disabled)
|
||||
if [ -f /tmp/mckernel_randomize_va_space ]; then
|
||||
cat /tmp/mckernel_randomize_va_space > /proc/sys/kernel/randomize_va_space
|
||||
fi
|
||||
;&
|
||||
initial)
|
||||
# Nothing more to revert
|
||||
;;
|
||||
@ -230,12 +231,6 @@ if [ "$cpus" == "" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# Disable address space layout randomization
|
||||
if [ -f /proc/sys/kernel/randomize_va_space ] && [ "`cat /proc/sys/kernel/randomize_va_space`" != "0" ]; then
|
||||
cat /proc/sys/kernel/randomize_va_space > /tmp/mckernel_randomize_va_space
|
||||
echo "0" > /proc/sys/kernel/randomize_va_space
|
||||
fi
|
||||
|
||||
# Remove mcoverlay if loaded
|
||||
if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
if grep mcoverlay /proc/modules &>/dev/null; then
|
||||
@ -246,7 +241,7 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
if [ -e /tmp/mcos ]; then rm -rf /tmp/mcos; fi
|
||||
if ! rmmod mcoverlay 2>/dev/null; then
|
||||
echo "error: removing mcoverlay" >&2
|
||||
error_exit "aslr_disabled"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -256,23 +251,33 @@ if [ "${irqbalance_used}" == "yes" ]; then
|
||||
systemctl stop irqbalance_mck.service 2>/dev/null
|
||||
if ! systemctl stop irqbalance.service 2>/dev/null ; then
|
||||
echo "error: stopping irqbalance" >&2
|
||||
error_exit "aslr_disabled"
|
||||
exit 1
|
||||
fi;
|
||||
fi
|
||||
|
||||
# Start mcklogd. Note that McKernel blocks when kmsg buffer is full
|
||||
# with '-k 1' until mcklogd unblocks it so starting mcklogd must preceed
|
||||
# booting McKernel
|
||||
if [ ${LOGMODE} -ne 0 ]; then
|
||||
# Stop mcklogd which has survived McKernel shutdown because
|
||||
# mcstop+release.sh is not used
|
||||
pkill mcklogd
|
||||
SBINDIR=${SBINDIR} ${SBINDIR}/mcklogd -i ${INTERVAL} -f ${facility}
|
||||
if ! etcdir=@ETCDIR@ perl -e 'use File::Copy qw(copy); $etcdir=$ENV{'etcdir'}; @files = grep { -f } glob "/proc/irq/*/smp_affinity"; foreach $file (@files) { $rel = substr($file, 1); $dir=substr($rel, 0, length($rel)-length("/smp_affinity")); if(0) { print "cp $file $etcdir/$rel\n";} if(system("mkdir -p $etcdir/$dir")){ exit 1;} if(!copy($file,"$etcdir/$rel")){ exit 1;} }'; then
|
||||
echo "error: saving /proc/irq/*/smp_affinity" >&2
|
||||
error_exit "mcos_sys_mounted"
|
||||
fi;
|
||||
|
||||
# Prevent /proc/irq/*/smp_affinity from getting zero after offlining
|
||||
# McKernel CPUs by using the following algorithm.
|
||||
# if (smp_affinity & mck_cores) {
|
||||
# smp_affinity = (mck_cores ^ -1);
|
||||
# }
|
||||
ncpus=`lscpu | grep -E '^CPU\(s\):' | awk '{print $2}'`
|
||||
smp_affinity_mask=`echo $cpus | ncpus=$ncpus perl -e 'while(<>){@tokens = split /,/;foreach $token (@tokens) {@nums = split /-/,$token; for($num = $nums[0]; $num <= $nums[$#nums]; $num++) {$ndx=int($num/32); $mask[$ndx] |= (1<<($num % 32))}}} $nint32s = int(($ENV{'ncpus'}+31)/32); for($j = $nint32s - 1; $j >= 0; $j--) { if($j != $nint32s - 1){print ",";} $nblks = ($j != $nint32s - 1) ? 8 : ($ENV{'ncpus'} % 32 != 0) ? int((($ENV{'ncpus'} + 3) % 32) / 4) : 8; for($i = $nblks - 1;$i >= 0;$i--){ printf("%01x",($mask[$j] >> ($i*4)) & 0xf);}}'`
|
||||
# echo cpus=$cpus ncpus=$ncpus smp_affinity_mask=$smp_affinity_mask
|
||||
|
||||
if ! ncpus=$ncpus smp_affinity_mask=$smp_affinity_mask perl -e '@dirs = grep { -d } glob "/proc/irq/*"; foreach $dir (@dirs) { $hit = 0; $affinity_str = `cat $dir/smp_affinity`; chomp $affinity_str; @int32strs = split /,/, $affinity_str; @int32strs_mask=split /,/, $ENV{'smp_affinity_mask'}; for($i=0;$i <= $#int32strs_mask; $i++) { $int32strs_inv[$i] = sprintf("%08x",hex($int32strs_mask[$i])^0xffffffff); if($i == 0) { $len = int((($ENV{'ncpus'}%32)+3)/4); if($len != 0) { $int32strs_inv[$i] = substr($int32strs_inv[$i], -$len, $len); } } } $inv = join(",", @int32strs_inv); $nint32s = int(($ENV{'ncpus'}+31)/32); for($j = $nint32s - 1; $j >= 0; $j--) { if(hex($int32strs[$nint32s - 1 - $j]) & hex($int32strs_mask[$nint32s - 1 - $j])) { $hit = 1; }} if($hit == 1) { $cmd = "echo $inv > $dir/smp_affinity 2>/dev/null"; system $cmd;}}'; then
|
||||
echo "error: modifying /proc/irq/*/smp_affinity" >&2
|
||||
error_exit "mcos_sys_mounted"
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
# Load IHK if not loaded
|
||||
if ! grep -E 'ihk\s' /proc/modules &>/dev/null; then
|
||||
if ! insmod ${KMODDIR}/ihk.ko 2>/dev/null; then
|
||||
if ! taskset -c 0 insmod ${KMODDIR}/ihk.ko 2>/dev/null; then
|
||||
echo "error: loading ihk" >&2
|
||||
error_exit "irqbalance_stopped"
|
||||
fi
|
||||
@ -291,24 +296,33 @@ sync
|
||||
|
||||
# Load IHK-SMP if not loaded and reserve CPUs and memory
|
||||
if ! grep ihk_smp_x86 /proc/modules &>/dev/null; then
|
||||
ihk_irq=""
|
||||
for i in `seq 64 255`; do
|
||||
if [ ! -d /proc/irq/$i ] && [ "`cat /proc/interrupts | grep ":" | awk '{print $1}' | grep -o '[0-9]*' | grep -e '^$i$'`" == "" ]; then
|
||||
ihk_irq=$i
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$ihk_irq" == "" ]; then
|
||||
echo "error: no IRQ available" >&2
|
||||
error_exit "ihk_loaded"
|
||||
for i in `seq 64 255`; do
|
||||
if [ ! -d /proc/irq/$i ] && [ "`cat /proc/interrupts | grep ":" | awk '{print $1}' | grep -o '[0-9]*' | grep -e '^$i$'`" == "" ]; then
|
||||
ihk_irq=$i
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "$ihk_irq" == "" ]; then
|
||||
echo "error: no IRQ available" >&2
|
||||
error_exit "ihk_loaded"
|
||||
fi
|
||||
fi
|
||||
if ! insmod ${KMODDIR}/ihk-smp-x86.ko ihk_start_irq=$ihk_irq ihk_ikc_irq_core=$ihk_ikc_irq_core 2>/dev/null; then
|
||||
if ! taskset -c 0 insmod ${KMODDIR}/ihk-smp-x86.ko ihk_start_irq=$ihk_irq ihk_ikc_irq_core=$ihk_ikc_irq_core 2>/dev/null; then
|
||||
echo "error: loading ihk-smp-x86" >&2
|
||||
error_exit "ihk_loaded"
|
||||
fi
|
||||
|
||||
# Free MCDRAM (special case for OFP SNC-4 mode)
|
||||
# Offline-reonline RAM (special case for OFP SNC-4 mode)
|
||||
if [ "`hostname | grep "c[0-9][0-9][0-9][0-9].ofp"`" != "" ] && [ "`cat /sys/devices/system/node/online`" == "0-7" ]; then
|
||||
for i in 0 1 2 3; do
|
||||
find /sys/devices/system/node/node$i/memory*/ -name "online" | while read f; do
|
||||
echo 0 > $f 2>&1 > /dev/null;
|
||||
done
|
||||
find /sys/devices/system/node/node$i/memory*/ -name "online" | while read f; do
|
||||
echo 1 > $f 2>&1 > /dev/null;
|
||||
done
|
||||
done
|
||||
for i in 4 5 6 7; do
|
||||
find /sys/devices/system/node/node$i/memory*/ -name "online" | while read f; do
|
||||
echo 0 > $f 2>&1 > /dev/null;
|
||||
@ -331,7 +345,7 @@ fi
|
||||
|
||||
# Load mcctrl if not loaded
|
||||
if ! grep mcctrl /proc/modules &>/dev/null; then
|
||||
if ! insmod ${KMODDIR}/mcctrl.ko 2>/dev/null; then
|
||||
if ! taskset -c 0 insmod ${KMODDIR}/mcctrl.ko 2>/dev/null; then
|
||||
echo "error: inserting mcctrl.ko" >&2
|
||||
error_exit "cpus_reserved"
|
||||
fi
|
||||
@ -360,6 +374,14 @@ if ! ${SBINDIR}/ihkosctl 0 assign cpu ${cpus}; then
|
||||
error_exit "os_created"
|
||||
fi
|
||||
|
||||
if [ "$ikc_map" != "" ]; then
|
||||
# Specify IKC map
|
||||
if ! ${SBINDIR}/ihkosctl 0 ikc_map ${ikc_map}; then
|
||||
echo "error: assign CPUs" >&2
|
||||
error_exit "os_created"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Assign memory
|
||||
if ! ${SBINDIR}/ihkosctl 0 assign mem ${mem}; then
|
||||
echo "error: assign memory" >&2
|
||||
@ -405,7 +427,7 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
echo "error: mount /tmp/mcos/linux_proc" >&2
|
||||
error_exit "tmp_mcos_mounted"
|
||||
fi
|
||||
if ! insmod ${KMODDIR}/mcoverlay.ko 2>/dev/null; then
|
||||
if ! taskset -c 0 insmod ${KMODDIR}/mcoverlay.ko 2>/dev/null; then
|
||||
echo "error: inserting mcoverlay.ko" >&2
|
||||
error_exit "linux_proc_bind_mounted"
|
||||
fi
|
||||
@ -492,19 +514,6 @@ fi
|
||||
|
||||
# Start irqbalance with CPUs and IRQ for McKernel banned
|
||||
if [ "${irqbalance_used}" == "yes" ]; then
|
||||
if ! etcdir=@ETCDIR@ perl -e 'use File::Copy qw(copy); $etcdir=$ENV{'etcdir'}; @files = grep { -f } glob "/proc/irq/*/smp_affinity"; foreach $file (@files) { $rel = substr($file, 1); $dir=substr($rel, 0, length($rel)-length("/smp_affinity")); if(0) { print "cp $file $etcdir/$rel\n";} if(system("mkdir -p $etcdir/$dir")){ exit 1;} if(!copy($file,"$etcdir/$rel")){ exit 1;} }'; then
|
||||
echo "error: saving /proc/irq/*/smp_affinity" >&2
|
||||
error_exit "mcos_sys_mounted"
|
||||
fi;
|
||||
|
||||
ncpus=`lscpu | grep -E '^CPU\(s\):' | awk '{print $2}'`
|
||||
smp_affinity_mask=`echo $cpus | ncpus=$ncpus perl -e 'while(<>){@tokens = split /,/;foreach $token (@tokens) {@nums = split /-/,$token; for($num = $nums[0]; $num <= $nums[$#nums]; $num++) {$ndx=int($num/32); $mask[$ndx] |= (1<<($num % 32))}}} $nint32s = int(($ENV{'ncpus'}+31)/32); for($j = $nint32s - 1; $j >= 0; $j--) { if($j != $nint32s - 1){print ",";} $nblks = $j == $nint32s - 1 ? int(($ENV{'ncpus'} % 32)/4) : 8; for($i = $nblks - 1;$i >= 0;$i--){ printf("%01x",($mask[$j] >> ($i*4)) & 0xf);}}'`
|
||||
|
||||
if ! ncpus=$ncpus smp_affinity_mask=$smp_affinity_mask perl -e '@dirs = grep { -d } glob "/proc/irq/*"; foreach $dir (@dirs) { $hit = 0; $affinity_str = `cat $dir/smp_affinity`; chomp $affinity_str; @int32strs = split /,/, $affinity_str; @int32strs_mask=split /,/, $ENV{'smp_affinity_mask'}; for($i=0;$i <= $#int32strs_mask; $i++) { $int32strs_inv[$i] = sprintf("%08x",hex($int32strs_mask[$i])^0xffffffff); if($i == 0) { $len = int((($ENV{'ncpus'}%32)+3)/4); $int32strs_inv[$i] = substr($int32strs_inv[$i], -$len, $len); } } $inv = join(",", @int32strs_inv); $nint32s = int(($ENV{'ncpus'}+31)/32); for($j = $nint32s - 1; $j >= 0; $j--) { if(hex($int32strs[$nint32s - 1 - $j]) & hex($int32strs_mask[$nint32s - 1 - $j])) { $hit = 1; }} if($hit == 1) { $cmd = "echo $inv > $dir/smp_affinity 2>/dev/null"; system $cmd;}}'; then
|
||||
echo "error: modifying /proc/irq/*/smp_affinity" >&2
|
||||
error_exit "mcos_sys_mounted"
|
||||
fi
|
||||
|
||||
banirq=`cat /proc/interrupts| perl -e 'while(<>) { if(/^\s*(\d+).*IHK\-SMP\s*$/) {print $1;}}'`
|
||||
|
||||
sed "s/%mask%/$smp_affinity_mask/g" $ETCDIR/irqbalance_mck.in | sed "s/%banirq%/$banirq/g" > /tmp/irqbalance_mck
|
||||
@ -518,6 +527,16 @@ if [ "${irqbalance_used}" == "yes" ]; then
|
||||
echo "error: starting irqbalance_mck" >&2
|
||||
error_exit "mcos_sys_mounted"
|
||||
fi
|
||||
# echo cpus=$cpus mask=$smp_affinity_mask banirq=$banirq
|
||||
# echo cpus=$cpus ncpus=$ncpus banirq=$banirq
|
||||
fi
|
||||
|
||||
# Start mcklogd. Note that McKernel blocks when kmsg buffer is full
|
||||
# with '-k 1' until mcklogd unblocks it so starting mcklogd must preceed
|
||||
# booting McKernel
|
||||
if [ ${LOGMODE} -ne 0 ]; then
|
||||
# Stop mcklogd which has survived McKernel shutdown because
|
||||
# mcstop+release.sh is not used
|
||||
pkill mcklogd
|
||||
SBINDIR=${SBINDIR} ${SBINDIR}/mcklogd -i ${INTERVAL} -f ${facility}
|
||||
fi
|
||||
|
||||
|
||||
@ -16,10 +16,27 @@ KERNDIR="@KERNDIR@"
|
||||
|
||||
mem=""
|
||||
cpus=""
|
||||
irqbalance_used=""
|
||||
|
||||
# No SMP module? Exit.
|
||||
if ! grep ihk_smp_x86 /proc/modules &>/dev/null; then exit 0; fi
|
||||
|
||||
# Stop mcklogd
|
||||
while pgrep "mcklogd" > /dev/null 2>&1;
|
||||
do
|
||||
pkill -9 mcklogd
|
||||
done
|
||||
|
||||
if [ "`systemctl status irqbalance_mck.service 2> /dev/null |grep -E 'Active: active'`" != "" ]; then
|
||||
irqbalance_used="yes"
|
||||
if ! systemctl stop irqbalance_mck.service 2>/dev/null; then
|
||||
echo "warning: failed to stop irqbalance_mck" >&2
|
||||
fi
|
||||
if ! systemctl disable irqbalance_mck.service >/dev/null 2>/dev/null; then
|
||||
echo "warning: failed to disable irqbalance_mck" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
# Destroy all LWK instances
|
||||
if ls /dev/mcos* 1>/dev/null 2>&1; then
|
||||
for i in /dev/mcos*; do
|
||||
@ -94,17 +111,8 @@ if grep -E 'ihk\s' /proc/modules &>/dev/null; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# Stop mcklogd
|
||||
pkill mcklogd
|
||||
|
||||
# Start irqbalance with the original settings
|
||||
if [ "`systemctl status irqbalance_mck.service 2> /dev/null |grep -E 'Active: active'`" != "" ]; then
|
||||
if ! systemctl stop irqbalance_mck.service 2>/dev/null; then
|
||||
echo "warning: failed to stop irqbalance_mck" >&2
|
||||
fi
|
||||
if ! systemctl disable irqbalance_mck.service >/dev/null 2>/dev/null; then
|
||||
echo "warning: failed to disable irqbalance_mck" >&2
|
||||
fi
|
||||
if [ "${irqbalance_used}" != "" ]; then
|
||||
if ! etcdir=@ETCDIR@ perl -e '$etcdir=$ENV{'etcdir'}; @files = grep { -f } glob "$etcdir/proc/irq/*/smp_affinity"; foreach $file (@files) { $dest = substr($file, length($etcdir)); if(0) {print "cp $file $dest\n";} system("cp $file $dest 2>/dev/null"); }'; then
|
||||
echo "warning: failed to restore /proc/irq/*/smp_affinity" >&2
|
||||
fi
|
||||
@ -113,10 +121,5 @@ if [ "`systemctl status irqbalance_mck.service 2> /dev/null |grep -E 'Active: ac
|
||||
fi
|
||||
fi
|
||||
|
||||
# Re-enable ASLR
|
||||
if [ -f /tmp/mckernel_randomize_va_space ]; then
|
||||
cat /tmp/mckernel_randomize_va_space > /proc/sys/kernel/randomize_va_space
|
||||
fi
|
||||
|
||||
# Set back default swappiness
|
||||
echo 60 > /proc/sys/vm/swappiness
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* executer/config.h.in. Generated from configure.ac by autoheader. */
|
||||
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* whether mcoverlayfs is enabled */
|
||||
#undef ENABLE_MCOVERLAYFS
|
||||
@ -6,6 +6,9 @@
|
||||
/* whether memdump feature is enabled */
|
||||
#undef ENABLE_MEMDUMP
|
||||
|
||||
/* whether rusage is enabled */
|
||||
#undef ENABLE_RUSAGE
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#undef HAVE_INTTYPES_H
|
||||
|
||||
@ -72,6 +75,9 @@
|
||||
/* Define to address of kernel symbol zap_page_range, or 0 if exported */
|
||||
#undef MCCTRL_KSYM_zap_page_range
|
||||
|
||||
/* McKernel specific libraries */
|
||||
#undef MCKERNEL_LIBDIR
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#undef PACKAGE_BUGREPORT
|
||||
|
||||
@ -90,5 +96,8 @@
|
||||
/* Define to the version of this package. */
|
||||
#undef PACKAGE_VERSION
|
||||
|
||||
/* Path of bind-mount source directory */
|
||||
#undef ROOTFSDIR
|
||||
|
||||
/* Define to 1 if you have the ANSI C header files. */
|
||||
#undef STDC_HEADERS
|
||||
57
configure.ac
57
configure.ac
@ -17,6 +17,13 @@ DCFA_RELEASE_DATE=DCFA_RELEASE_DATE_m4
|
||||
|
||||
AC_PREFIX_DEFAULT([/opt/ppos])
|
||||
|
||||
AC_CHECK_HEADER([numa.h],[numa_header_found=yes])
|
||||
AS_IF([test "x$numa_header_found" != "xyes"],
|
||||
[AC_MSG_ERROR([Unable to find numa.h header file, missing numactl-devel?])])
|
||||
AC_CHECK_LIB([numa],[numa_run_on_node],[numa_lib_found=yes])
|
||||
AS_IF([test "x$numa_lib_found" != "xyes"],
|
||||
[AC_MSG_ERROR([Unable to find NUMA library, missing numactl-devel?])])
|
||||
|
||||
AC_ARG_WITH([kernelsrc],
|
||||
AC_HELP_STRING(
|
||||
[--with-kernelsrc=path],[Path to 'kernel src', default is /lib/modules/uname_r/build]),
|
||||
@ -48,6 +55,23 @@ AC_ARG_ENABLE([mcoverlayfs],
|
||||
[ENABLE_MCOVERLAYFS=$enableval],
|
||||
[ENABLE_MCOVERLAYFS=yes])
|
||||
|
||||
AC_ARG_ENABLE([rusage],
|
||||
AC_HELP_STRING([--enable-rusage],
|
||||
[enable rusage implementation]),
|
||||
[ENABLE_RUSAGE=$enableval],
|
||||
[ENABLE_RUSAGE=yes])
|
||||
|
||||
AC_ARG_WITH([uname_r],
|
||||
AC_HELP_STRING(
|
||||
[--with-uname_r=uname_r],[Value of '`uname -r`' on the target platform, default is local value]),
|
||||
[WITH_UNAME_R=$withval],[WITH_UNAME_R=yes])
|
||||
|
||||
case "X$WITH_UNAME_R" in
|
||||
Xyes | Xno | X)
|
||||
WITH_UNAME_R='`uname -r`'
|
||||
;;
|
||||
esac
|
||||
|
||||
case "X$WITH_KERNELSRC" in
|
||||
Xyes | Xno | X)
|
||||
WITH_KERNELSRC='/lib/modules/`uname -r`/build'
|
||||
@ -64,6 +88,7 @@ if test "X$WITH_TARGET" = Xyes -o "X$WITH_TARGET" = Xno; then
|
||||
fi
|
||||
|
||||
test "x$prefix" = xNONE && prefix="$ac_default_prefix"
|
||||
AC_DEFINE_UNQUOTED(ROOTFSDIR,"$prefix/rootfs",[Path of bind-mount source directory])
|
||||
|
||||
case $WITH_TARGET in
|
||||
attached-mic|builtin-x86|smp-x86)
|
||||
@ -147,6 +172,9 @@ case $WITH_TARGET in
|
||||
if test "X$SBINDIR" = X; then
|
||||
SBINDIR="$prefix/sbin"
|
||||
fi
|
||||
if test "X$MCKERNEL_LIBDIR" = X; then
|
||||
MCKERNEL_LIBDIR="$prefix/lib"
|
||||
fi
|
||||
if test "X$ETCDIR" = X; then
|
||||
ETCDIR="$prefix/etc"
|
||||
fi
|
||||
@ -163,6 +191,7 @@ case $WITH_TARGET in
|
||||
esac
|
||||
|
||||
KDIR="$WITH_KERNELSRC"
|
||||
UNAME_R="$WITH_UNAME_R"
|
||||
TARGET="$WITH_TARGET"
|
||||
|
||||
MCCTRL_LINUX_SYMTAB=""
|
||||
@ -276,19 +305,44 @@ else
|
||||
AC_MSG_NOTICE([mcoverlayfs is disabled])
|
||||
fi
|
||||
|
||||
case $ENABLE_RUSAGE in
|
||||
yes|no)
|
||||
;;
|
||||
default)
|
||||
ENABLE_RUSAGE=yes
|
||||
;;
|
||||
*)
|
||||
AC_MSG_ERROR([unknown rusage argument: $ENABLE_RUSAGE])
|
||||
;;
|
||||
esac
|
||||
|
||||
if test "x$ENABLE_RUSAGE" = "xyes" ; then
|
||||
AC_MSG_NOTICE([rusage is enabled])
|
||||
AC_DEFINE([ENABLE_RUSAGE],[1],[whether rusage is enabled])
|
||||
else
|
||||
AC_MSG_NOTICE([rusage is disabled])
|
||||
fi
|
||||
|
||||
if test "x$MCKERNEL_LIBDIR" != "x" ; then
|
||||
AC_DEFINE_UNQUOTED(MCKERNEL_LIBDIR,"$MCKERNEL_LIBDIR",[McKernel specific libraries])
|
||||
fi
|
||||
|
||||
AC_SUBST(CC)
|
||||
AC_SUBST(XCC)
|
||||
AC_SUBST(ARCH)
|
||||
AC_SUBST(KDIR)
|
||||
AC_SUBST(UNAME_R)
|
||||
AC_SUBST(TARGET)
|
||||
AC_SUBST(BINDIR)
|
||||
AC_SUBST(SBINDIR)
|
||||
AC_SUBST(MCKERNEL_LIBDIR)
|
||||
AC_SUBST(ETCDIR)
|
||||
AC_SUBST(KMODDIR)
|
||||
AC_SUBST(KERNDIR)
|
||||
AC_SUBST(MANDIR)
|
||||
AC_SUBST(CFLAGS)
|
||||
AC_SUBST(ENABLE_MCOVERLAYFS)
|
||||
AC_SUBST(ENABLE_RUSAGE)
|
||||
|
||||
AC_SUBST(IHK_VERSION)
|
||||
AC_SUBST(MCKERNEL_VERSION)
|
||||
@ -298,10 +352,11 @@ AC_SUBST(MCKERNEL_RELEASE_DATE)
|
||||
AC_SUBST(DCFA_RESEASE_DATE)
|
||||
AC_SUBST(uncomment_if_ENABLE_MEMDUMP)
|
||||
|
||||
AC_CONFIG_HEADERS([executer/config.h])
|
||||
AC_CONFIG_HEADERS([config.h])
|
||||
AC_CONFIG_FILES([
|
||||
Makefile
|
||||
executer/user/Makefile
|
||||
executer/user/arch/x86_64/Makefile
|
||||
executer/kernel/mcctrl/Makefile
|
||||
executer/kernel/mcctrl/arch/x86_64/Makefile
|
||||
executer/kernel/mcoverlayfs/Makefile
|
||||
|
||||
@ -43,6 +43,7 @@
|
||||
#define MCEXEC_UP_GET_CREDV 0x30a0290b
|
||||
#define MCEXEC_UP_GET_NODES 0x30a0290c
|
||||
#define MCEXEC_UP_GET_CPUSET 0x30a0290d
|
||||
#define MCEXEC_UP_CREATE_PPD 0x30a0290e
|
||||
|
||||
#define MCEXEC_UP_PREPARE_DMA 0x30a02910
|
||||
#define MCEXEC_UP_FREE_DMA 0x30a02911
|
||||
@ -54,6 +55,16 @@
|
||||
#define MCEXEC_UP_SYS_UMOUNT 0x30a02915
|
||||
#define MCEXEC_UP_SYS_UNSHARE 0x30a02916
|
||||
|
||||
#define MCEXEC_UP_UTIL_THREAD1 0x30a02920
|
||||
#define MCEXEC_UP_UTIL_THREAD2 0x30a02921
|
||||
#define MCEXEC_UP_SIG_THREAD 0x30a02922
|
||||
#define MCEXEC_UP_SYSCALL_THREAD 0x30a02924
|
||||
#define MCEXEC_UP_TERMINATE_THREAD 0x30a02925
|
||||
#define MCEXEC_UP_GET_NUM_POOL_THREADS 0x30a02926
|
||||
|
||||
#define MCEXEC_UP_COPY_FROM_MCK 0x30a03000
|
||||
#define MCEXEC_UP_COPY_TO_MCK 0x30a03001
|
||||
|
||||
#define MCEXEC_UP_DEBUG_LOG 0x40000000
|
||||
|
||||
#define MCEXEC_UP_TRANSFER_TO_REMOTE 0
|
||||
@ -86,12 +97,20 @@ struct get_cpu_set_arg {
|
||||
size_t cpu_set_size; // Size in bytes
|
||||
int *target_core;
|
||||
int *mcexec_linux_numa; // NUMA domain to bind mcexec to
|
||||
void *mcexec_cpu_set;
|
||||
size_t mcexec_cpu_set_size; // Size in bytes
|
||||
int *ikc_mapped;
|
||||
};
|
||||
|
||||
#define PLD_CPU_SET_MAX_CPUS 1024
|
||||
typedef unsigned long __cpu_set_unit;
|
||||
#define PLD_CPU_SET_SIZE (PLD_CPU_SET_MAX_CPUS / (8 * sizeof(__cpu_set_unit)))
|
||||
|
||||
#define MPOL_NO_HEAP 0x01
|
||||
#define MPOL_NO_STACK 0x02
|
||||
#define MPOL_NO_BSS 0x04
|
||||
#define MPOL_SHM_PREMAP 0x08
|
||||
|
||||
struct program_load_desc {
|
||||
int num_sections;
|
||||
int status;
|
||||
@ -120,8 +139,13 @@ struct program_load_desc {
|
||||
unsigned long envs_len;
|
||||
struct rlimit rlimit[MCK_RLIM_MAX];
|
||||
unsigned long interp_align;
|
||||
unsigned long mpol_flags;
|
||||
unsigned long mpol_threshold;
|
||||
unsigned long heap_extension;
|
||||
int nr_processes;
|
||||
char shell_path[SHELL_PATH_MAX_LEN];
|
||||
__cpu_set_unit cpu_set[PLD_CPU_SET_SIZE];
|
||||
int profile;
|
||||
struct program_image_section sections[0];
|
||||
};
|
||||
|
||||
@ -220,4 +244,34 @@ struct sys_unshare_desc {
|
||||
unsigned long unshare_flags;
|
||||
};
|
||||
|
||||
enum perf_ctrl_type {
|
||||
PERF_CTRL_SET,
|
||||
PERF_CTRL_GET,
|
||||
PERF_CTRL_ENABLE,
|
||||
PERF_CTRL_DISABLE,
|
||||
};
|
||||
|
||||
struct perf_ctrl_desc {
|
||||
enum perf_ctrl_type ctrl_type;
|
||||
int status;
|
||||
union {
|
||||
/* for SET, GET */
|
||||
struct {
|
||||
unsigned int target_cntr;
|
||||
unsigned long config;
|
||||
unsigned long read_value;
|
||||
unsigned disabled :1,
|
||||
pinned :1,
|
||||
exclude_user :1,
|
||||
exclude_kernel :1,
|
||||
exclude_hv :1,
|
||||
exclude_idle :1;
|
||||
};
|
||||
|
||||
/* for START, STOP*/
|
||||
struct {
|
||||
unsigned long target_cntr_mask;
|
||||
};
|
||||
};
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
#include <linux/version.h>
|
||||
#include "../../config.h"
|
||||
#include "../../../config.h"
|
||||
#include "../../mcctrl.h"
|
||||
|
||||
#ifdef MCCTRL_KSYM_vdso_image_64
|
||||
@ -64,7 +64,9 @@ reserve_user_space(struct mcctrl_usrdata *usrdata, unsigned long *startp, unsign
|
||||
unsigned long start = 0L;
|
||||
unsigned long end;
|
||||
|
||||
mutex_lock(&usrdata->reserve_lock);
|
||||
if (mutex_lock_killable(&usrdata->reserve_lock) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
#define DESIRED_USER_END 0x800000000000
|
||||
#define GAP_FOR_MCEXEC 0x008000000000UL
|
||||
@ -194,3 +196,65 @@ out:
|
||||
ihk_device_unmap_memory(dev, vdso_pa, sizeof(*vdso));
|
||||
return;
|
||||
} /* get_vdso_info() */
|
||||
|
||||
void *
|
||||
get_user_sp(void)
|
||||
{
|
||||
unsigned long usp;
|
||||
|
||||
asm volatile("movq %%gs:0xaf80, %0" : "=r" (usp));
|
||||
return (void *)usp;
|
||||
}
|
||||
|
||||
void
|
||||
set_user_sp(void *usp)
|
||||
{
|
||||
asm volatile("movq %0, %%gs:0xaf80" :: "r" (usp));
|
||||
}
|
||||
|
||||
struct trans_uctx {
|
||||
volatile int cond;
|
||||
int fregsize;
|
||||
|
||||
unsigned long rax;
|
||||
unsigned long rbx;
|
||||
unsigned long rcx;
|
||||
unsigned long rdx;
|
||||
unsigned long rsi;
|
||||
unsigned long rdi;
|
||||
unsigned long rbp;
|
||||
unsigned long r8;
|
||||
unsigned long r9;
|
||||
unsigned long r10;
|
||||
unsigned long r11;
|
||||
unsigned long r12;
|
||||
unsigned long r13;
|
||||
unsigned long r14;
|
||||
unsigned long r15;
|
||||
unsigned long rflags;
|
||||
unsigned long rip;
|
||||
unsigned long rsp;
|
||||
unsigned long fs;
|
||||
};
|
||||
|
||||
void
|
||||
restore_fs(unsigned long fs)
|
||||
{
|
||||
wrmsrl(MSR_FS_BASE, fs);
|
||||
}
|
||||
|
||||
void
|
||||
save_fs_ctx(void *ctx)
|
||||
{
|
||||
struct trans_uctx *tctx = ctx;
|
||||
|
||||
rdmsrl(MSR_FS_BASE, tctx->fs);
|
||||
}
|
||||
|
||||
unsigned long
|
||||
get_fs_ctx(void *ctx)
|
||||
{
|
||||
struct trans_uctx *tctx = ctx;
|
||||
|
||||
return tctx->fs;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -27,6 +27,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
#include "mcctrl.h"
|
||||
#include <ihk/ihk_host_user.h>
|
||||
|
||||
#define OS_MAX_MINOR 64
|
||||
|
||||
@ -45,6 +46,12 @@ extern void rus_page_hash_put_pages(void);
|
||||
extern void binfmt_mcexec_init(void);
|
||||
extern void binfmt_mcexec_exit(void);
|
||||
|
||||
extern int mcctrl_os_read_cpu_register(ihk_os_t os, int cpu,
|
||||
struct ihk_os_cpu_register *desc);
|
||||
extern int mcctrl_os_write_cpu_register(ihk_os_t os, int cpu,
|
||||
struct ihk_os_cpu_register *desc);
|
||||
extern int mcctrl_get_request_os_cpu(ihk_os_t os, int *cpu);
|
||||
|
||||
static long mcctrl_ioctl(ihk_os_t os, unsigned int request, void *priv,
|
||||
unsigned long arg, struct file *file)
|
||||
{
|
||||
@ -62,6 +69,7 @@ static struct ihk_os_user_call_handler mcctrl_uchs[] = {
|
||||
{ .request = MCEXEC_UP_GET_CPU, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_NODES, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_CPUSET, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_CREATE_PPD, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_STRNCPY_FROM_USER, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_NEW_PROCESS, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_PREPARE_DMA, .func = mcctrl_ioctl },
|
||||
@ -73,7 +81,27 @@ static struct ihk_os_user_call_handler mcctrl_uchs[] = {
|
||||
{ .request = MCEXEC_UP_SYS_MOUNT, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_SYS_UMOUNT, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_SYS_UNSHARE, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_UTIL_THREAD1, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_UTIL_THREAD2, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_SIG_THREAD, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_SYSCALL_THREAD, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_TERMINATE_THREAD, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_NUM_POOL_THREADS, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_DEBUG_LOG, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_COPY_FROM_MCK, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_COPY_TO_MCK, .func = mcctrl_ioctl },
|
||||
{ .request = IHK_OS_AUX_PERF_NUM, .func = mcctrl_ioctl },
|
||||
{ .request = IHK_OS_AUX_PERF_SET, .func = mcctrl_ioctl },
|
||||
{ .request = IHK_OS_AUX_PERF_GET, .func = mcctrl_ioctl },
|
||||
{ .request = IHK_OS_AUX_PERF_ENABLE, .func = mcctrl_ioctl },
|
||||
{ .request = IHK_OS_AUX_PERF_DISABLE, .func = mcctrl_ioctl },
|
||||
{ .request = IHK_OS_AUX_PERF_DESTROY, .func = mcctrl_ioctl },
|
||||
};
|
||||
|
||||
static struct ihk_os_kernel_call_handler mcctrl_kernel_handlers = {
|
||||
.get_request_cpu = mcctrl_get_request_os_cpu,
|
||||
.read_cpu_register = mcctrl_os_read_cpu_register,
|
||||
.write_cpu_register = mcctrl_os_write_cpu_register,
|
||||
};
|
||||
|
||||
static struct ihk_os_user_call mcctrl_uc_proto = {
|
||||
@ -110,12 +138,16 @@ int mcctrl_os_boot_notifier(int os_index)
|
||||
|
||||
memcpy(mcctrl_uc + os_index, &mcctrl_uc_proto, sizeof mcctrl_uc_proto);
|
||||
|
||||
rc = ihk_os_set_kernel_call_handlers(os[os_index], &mcctrl_kernel_handlers);
|
||||
if (rc < 0) {
|
||||
printk("mcctrl: error: setting kernel callbacks for OS %d\n", os_index);
|
||||
goto error_cleanup_channels;
|
||||
}
|
||||
|
||||
rc = ihk_os_register_user_call_handlers(os[os_index], mcctrl_uc + os_index);
|
||||
if (rc < 0) {
|
||||
destroy_ikc_channels(os[os_index]);
|
||||
printk("mcctrl: error: registering callbacks for OS %d\n", os_index);
|
||||
|
||||
goto error_cleanup_channels;
|
||||
goto error_clear_kernel_handlers;
|
||||
}
|
||||
|
||||
procfs_init(os_index);
|
||||
@ -123,6 +155,8 @@ int mcctrl_os_boot_notifier(int os_index)
|
||||
|
||||
return 0;
|
||||
|
||||
error_clear_kernel_handlers:
|
||||
ihk_os_clear_kernel_call_handlers(os[os_index]);
|
||||
error_cleanup_channels:
|
||||
destroy_ikc_channels(os[os_index]);
|
||||
|
||||
@ -136,6 +170,7 @@ int mcctrl_os_shutdown_notifier(int os_index)
|
||||
sysfsm_cleanup(os[os_index]);
|
||||
free_topology_info(os[os_index]);
|
||||
ihk_os_unregister_user_call_handlers(os[os_index], mcctrl_uc + os_index);
|
||||
ihk_os_clear_kernel_call_handlers(os[os_index]);
|
||||
destroy_ikc_channels(os[os_index]);
|
||||
procfs_exit(os_index);
|
||||
}
|
||||
|
||||
@ -53,6 +53,10 @@ void mcexec_prepare_ack(ihk_os_t os, unsigned long arg, int err);
|
||||
static void mcctrl_ikc_init(ihk_os_t os, int cpu, unsigned long rphys, struct ihk_ikc_channel_desc *c);
|
||||
int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet);
|
||||
void sig_done(unsigned long arg, int err);
|
||||
void mcctrl_perf_ack(ihk_os_t os, struct ikc_scd_packet *packet);
|
||||
void mcctrl_os_read_write_cpu_response(ihk_os_t os,
|
||||
struct ikc_scd_packet *pisp);
|
||||
void mcctrl_eventfd(ihk_os_t os, struct ikc_scd_packet *pisp);
|
||||
|
||||
/* XXX: this runs in atomic context! */
|
||||
static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
@ -109,6 +113,18 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
get_vdso_info(__os, pisp->arg);
|
||||
break;
|
||||
|
||||
case SCD_MSG_PERF_ACK:
|
||||
mcctrl_perf_ack(__os, pisp);
|
||||
break;
|
||||
|
||||
case SCD_MSG_CPU_RW_REG_RESP:
|
||||
mcctrl_os_read_write_cpu_response(__os, pisp);
|
||||
break;
|
||||
|
||||
case SCD_MSG_EVENTFD:
|
||||
mcctrl_eventfd(__os, pisp);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR "mcctrl:syscall_packet_handler:"
|
||||
"unknown message (%d.%d.%d.%d.%d.%#lx)\n",
|
||||
@ -122,11 +138,22 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
* mcexec_ret_syscall(), for the rest, free it here.
|
||||
*/
|
||||
if (msg != SCD_MSG_SYSCALL_ONESIDE) {
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)__packet, c);
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)__packet,
|
||||
(usrdata->ikc2linux[smp_processor_id()] ?
|
||||
usrdata->ikc2linux[smp_processor_id()] :
|
||||
usrdata->ikc2linux[0]));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dummy_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
void *__packet, void *__os)
|
||||
{
|
||||
kprintf("%s: WARNING: packet received\n", __FUNCTION__);
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)__packet, c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mcctrl_ikc_send(ihk_os_t os, int cpu, struct ikc_scd_packet *pisp)
|
||||
{
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
@ -196,57 +223,62 @@ static void mcctrl_ikc_init(ihk_os_t os, int cpu, unsigned long rphys, struct ih
|
||||
ihk_ikc_send(pmc->c, &packet, 0);
|
||||
}
|
||||
|
||||
static int connect_handler(struct ihk_ikc_channel_info *param)
|
||||
static int connect_handler_ikc2linux(struct ihk_ikc_channel_info *param)
|
||||
{
|
||||
struct ihk_ikc_channel_desc *c;
|
||||
int cpu;
|
||||
ihk_os_t os = param->channel->remote_os;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
int linux_cpu;
|
||||
|
||||
c = param->channel;
|
||||
cpu = c->send.queue->read_cpu;
|
||||
linux_cpu = c->send.queue->write_cpu;
|
||||
if (linux_cpu > nr_cpu_ids) {
|
||||
kprintf("%s: invalid Linux CPU id %d\n",
|
||||
__FUNCTION__, linux_cpu);
|
||||
return -1;
|
||||
}
|
||||
dkprintf("%s: Linux CPU: %d\n", __FUNCTION__, linux_cpu);
|
||||
|
||||
if (cpu < 0 || cpu >= usrdata->num_channels) {
|
||||
kprintf("Invalid connect source processor: %d\n", cpu);
|
||||
param->packet_handler = syscall_packet_handler;
|
||||
usrdata->ikc2linux[linux_cpu] = c;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int connect_handler_ikc2mckernel(struct ihk_ikc_channel_info *param)
|
||||
{
|
||||
struct ihk_ikc_channel_desc *c;
|
||||
int mck_cpu;
|
||||
ihk_os_t os = param->channel->remote_os;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
|
||||
c = param->channel;
|
||||
mck_cpu = c->send.queue->read_cpu;
|
||||
|
||||
if (mck_cpu < 0 || mck_cpu >= usrdata->num_channels) {
|
||||
kprintf("Invalid connect source processor: %d\n", mck_cpu);
|
||||
return 1;
|
||||
}
|
||||
param->packet_handler = syscall_packet_handler;
|
||||
|
||||
usrdata->channels[cpu].c = c;
|
||||
dkprintf("syscall: MC CPU %d connected. c=%p\n", cpu, c);
|
||||
param->packet_handler = dummy_packet_handler;
|
||||
|
||||
usrdata->channels[mck_cpu].c = c;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int connect_handler2(struct ihk_ikc_channel_info *param)
|
||||
{
|
||||
struct ihk_ikc_channel_desc *c;
|
||||
int cpu;
|
||||
ihk_os_t os = param->channel->remote_os;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
|
||||
c = param->channel;
|
||||
cpu = usrdata->num_channels - 1;
|
||||
|
||||
param->packet_handler = syscall_packet_handler;
|
||||
|
||||
usrdata->channels[cpu].c = c;
|
||||
dkprintf("syscall: MC CPU %d connected. c=%p\n", cpu, c);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ihk_ikc_listen_param listen_param = {
|
||||
.port = 501,
|
||||
.handler = connect_handler,
|
||||
static struct ihk_ikc_listen_param lp_ikc2linux = {
|
||||
.port = 503,
|
||||
.ikc_direction = IHK_IKC_DIRECTION_RECV,
|
||||
.handler = connect_handler_ikc2linux,
|
||||
.pkt_size = sizeof(struct ikc_scd_packet),
|
||||
.queue_size = PAGE_SIZE * 4,
|
||||
.magic = 0x1129,
|
||||
};
|
||||
|
||||
static struct ihk_ikc_listen_param listen_param2 = {
|
||||
.port = 502,
|
||||
.handler = connect_handler2,
|
||||
static struct ihk_ikc_listen_param lp_ikc2mckernel = {
|
||||
.port = 501,
|
||||
.ikc_direction = IHK_IKC_DIRECTION_SEND,
|
||||
.handler = connect_handler_ikc2mckernel,
|
||||
.pkt_size = sizeof(struct ikc_scd_packet),
|
||||
.queue_size = PAGE_SIZE * 4,
|
||||
.magic = 0x1329,
|
||||
@ -256,38 +288,57 @@ int prepare_ikc_channels(ihk_os_t os)
|
||||
{
|
||||
struct mcctrl_usrdata *usrdata;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
usrdata = kzalloc(sizeof(struct mcctrl_usrdata), GFP_KERNEL);
|
||||
if (!usrdata) {
|
||||
printk("%s: error: allocating mcctrl_usrdata\n", __FUNCTION__);
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
usrdata->cpu_info = ihk_os_get_cpu_info(os);
|
||||
usrdata->mem_info = ihk_os_get_memory_info(os);
|
||||
|
||||
if (!usrdata->cpu_info || !usrdata->mem_info) {
|
||||
printk("Error: cannot obtain OS CPU and memory information.\n");
|
||||
return -EINVAL;
|
||||
printk("%s: cannot obtain OS CPU and memory information.\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (usrdata->cpu_info->n_cpus < 1) {
|
||||
printk("Error: # of cpu is invalid.\n");
|
||||
return -EINVAL;
|
||||
printk("%s: Error: # of cpu is invalid.\n", __FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
usrdata->num_channels = usrdata->cpu_info->n_cpus + 1;
|
||||
usrdata->num_channels = usrdata->cpu_info->n_cpus;
|
||||
usrdata->channels = kzalloc(sizeof(struct mcctrl_channel) *
|
||||
usrdata->num_channels,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!usrdata->channels) {
|
||||
printk("Error: cannot allocate channels.\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
usrdata->ikc2linux = kzalloc(sizeof(struct ihk_ikc_channel_desc *) *
|
||||
nr_cpu_ids, GFP_KERNEL);
|
||||
|
||||
if (!usrdata->ikc2linux) {
|
||||
printk("Error: cannot allocate ikc2linux channels.\n");
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
usrdata->os = os;
|
||||
ihk_host_os_set_usrdata(os, usrdata);
|
||||
memcpy(&usrdata->listen_param, &listen_param, sizeof listen_param);
|
||||
ihk_ikc_listen_port(os, &usrdata->listen_param);
|
||||
memcpy(&usrdata->listen_param2, &listen_param2, sizeof listen_param2);
|
||||
ihk_ikc_listen_port(os, &usrdata->listen_param2);
|
||||
|
||||
ihk_ikc_listen_port(os, &lp_ikc2linux);
|
||||
ihk_ikc_listen_port(os, &lp_ikc2mckernel);
|
||||
|
||||
init_waitqueue_head(&usrdata->wq_procfs);
|
||||
mutex_init(&usrdata->reserve_lock);
|
||||
|
||||
@ -300,9 +351,19 @@ int prepare_ikc_channels(ihk_os_t os)
|
||||
INIT_LIST_HEAD(&usrdata->node_topology_list);
|
||||
|
||||
mutex_init(&usrdata->part_exec.lock);
|
||||
INIT_LIST_HEAD(&usrdata->part_exec.pli_list);
|
||||
usrdata->part_exec.nr_processes = -1;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (usrdata) {
|
||||
if (usrdata->channels) kfree(usrdata->channels);
|
||||
if (usrdata->ikc2linux) kfree(usrdata->ikc2linux);
|
||||
kfree(usrdata);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __destroy_ikc_channel(ihk_os_t os, struct mcctrl_channel *pmc)
|
||||
@ -324,12 +385,23 @@ void destroy_ikc_channels(ihk_os_t os)
|
||||
|
||||
for (i = 0; i < usrdata->num_channels; i++) {
|
||||
if (usrdata->channels[i].c) {
|
||||
// ihk_ikc_disconnect(usrdata->channels[i].c);
|
||||
ihk_ikc_free_channel(usrdata->channels[i].c);
|
||||
__destroy_ikc_channel(os, usrdata->channels + i);
|
||||
ihk_ikc_destroy_channel(usrdata->channels[i].c);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
if (usrdata->ikc2linux[i]) {
|
||||
ihk_ikc_destroy_channel(usrdata->ikc2linux[i]);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(usrdata->channels);
|
||||
kfree(usrdata->ikc2linux);
|
||||
kfree(usrdata);
|
||||
}
|
||||
|
||||
void
|
||||
mcctrl_eventfd(ihk_os_t os, struct ikc_scd_packet *pisp)
|
||||
{
|
||||
ihk_os_eventfd(os, 0);
|
||||
}
|
||||
|
||||
@ -92,6 +92,14 @@
|
||||
#define SCD_MSG_PROCFS_TID_CREATE 0x44
|
||||
#define SCD_MSG_PROCFS_TID_DELETE 0x45
|
||||
|
||||
#define SCD_MSG_EVENTFD 0x46
|
||||
|
||||
#define SCD_MSG_PERF_CTRL 0x50
|
||||
#define SCD_MSG_PERF_ACK 0x51
|
||||
|
||||
#define SCD_MSG_CPU_RW_REG 0x52
|
||||
#define SCD_MSG_CPU_RW_REG_RESP 0x53
|
||||
|
||||
#define DMA_PIN_SHIFT 21
|
||||
|
||||
#define DO_USER_MODE
|
||||
@ -103,6 +111,12 @@ struct coretable {
|
||||
unsigned long addr;
|
||||
};
|
||||
|
||||
enum mcctrl_os_cpu_operation {
|
||||
MCCTRL_OS_CPU_READ_REGISTER,
|
||||
MCCTRL_OS_CPU_WRITE_REGISTER,
|
||||
MCCTRL_OS_CPU_MAX_OP
|
||||
};
|
||||
|
||||
struct ikc_scd_packet {
|
||||
int msg;
|
||||
int err;
|
||||
@ -128,6 +142,13 @@ struct ikc_scd_packet {
|
||||
struct {
|
||||
int ttid;
|
||||
};
|
||||
|
||||
/* SCD_MSG_CPU_RW_REG */
|
||||
struct {
|
||||
struct ihk_os_cpu_register desc;
|
||||
enum mcctrl_os_cpu_operation op;
|
||||
void *resp;
|
||||
};
|
||||
};
|
||||
char padding[12];
|
||||
};
|
||||
@ -192,9 +213,10 @@ struct mcctrl_per_proc_data {
|
||||
int pid;
|
||||
unsigned long rpgtable; /* per process, not per OS */
|
||||
|
||||
struct list_head wq_list;
|
||||
struct list_head wq_req_list;
|
||||
struct list_head wq_list_exact;
|
||||
struct list_head wq_list; /* All these requests come from mcexec */
|
||||
struct list_head wq_req_list; /* These requests come from IKC IRQ handler (can be processed by any threads) */
|
||||
struct list_head wq_list_exact; /* These requests come from IKC IRQ handler targeting a particular thread */
|
||||
|
||||
ihk_spinlock_t wq_list_lock;
|
||||
wait_queue_head_t wq_prepare;
|
||||
wait_queue_head_t wq_procfs;
|
||||
@ -204,6 +226,9 @@ struct mcctrl_per_proc_data {
|
||||
cpumask_t cpu_set;
|
||||
int ikc_target_cpu;
|
||||
atomic_t refcount;
|
||||
|
||||
struct list_head devobj_pager_list;
|
||||
struct semaphore devobj_pager_lock;
|
||||
};
|
||||
|
||||
struct sysfsm_req {
|
||||
@ -260,11 +285,19 @@ struct node_topology {
|
||||
struct list_head chain;
|
||||
};
|
||||
|
||||
struct process_list_item {
|
||||
int ready;
|
||||
struct task_struct *task;
|
||||
struct list_head list;
|
||||
wait_queue_head_t pli_wq;
|
||||
};
|
||||
|
||||
struct mcctrl_part_exec {
|
||||
struct mutex lock;
|
||||
int nr_processes;
|
||||
int nr_processes_left;
|
||||
cpumask_t cpus_used;
|
||||
struct list_head pli_list;
|
||||
};
|
||||
|
||||
#define CPU_LONGS (((NR_CPUS) + (BITS_PER_LONG) - 1) / (BITS_PER_LONG))
|
||||
@ -278,7 +311,10 @@ struct mcctrl_usrdata {
|
||||
struct ihk_ikc_listen_param listen_param2;
|
||||
ihk_os_t os;
|
||||
int num_channels;
|
||||
/* Channels used for sending messages to LWK */
|
||||
struct mcctrl_channel *channels;
|
||||
/* Channels used for receiving messages from LWK */
|
||||
struct ihk_ikc_channel_desc **ikc2linux;
|
||||
int remaining_job;
|
||||
int base_cpu;
|
||||
int job_pos;
|
||||
@ -298,6 +334,7 @@ struct mcctrl_usrdata {
|
||||
struct list_head cpu_topology_list;
|
||||
struct list_head node_topology_list;
|
||||
struct mcctrl_part_exec part_exec;
|
||||
int perf_event_num;
|
||||
};
|
||||
|
||||
struct mcctrl_signal {
|
||||
@ -315,6 +352,9 @@ int mcctrl_ikc_is_valid_thread(ihk_os_t os, int cpu);
|
||||
ihk_os_t osnum_to_os(int n);
|
||||
|
||||
/* syscall.c */
|
||||
void pager_add_process(void);
|
||||
void pager_remove_process(struct mcctrl_per_proc_data *ppd);
|
||||
|
||||
int __do_in_kernel_syscall(ihk_os_t os, struct ikc_scd_packet *packet);
|
||||
int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid,
|
||||
struct mcctrl_per_proc_data *ppd);
|
||||
@ -401,4 +441,14 @@ struct get_cpu_mapping_req {
|
||||
wait_queue_head_t wq;
|
||||
};
|
||||
|
||||
struct ihk_perf_event_attr{
|
||||
unsigned long config;
|
||||
unsigned disabled:1;
|
||||
unsigned pinned:1;
|
||||
unsigned exclude_user:1;
|
||||
unsigned exclude_kernel:1;
|
||||
unsigned exclude_hv:1;
|
||||
unsigned exclude_idle:1;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@ -640,11 +640,11 @@ retry_wait:
|
||||
*/
|
||||
if (pid > 0) {
|
||||
retw = wait_event_interruptible_timeout(ppd->wq_procfs,
|
||||
r->status != 0, HZ);
|
||||
r->status != 0, 5 * HZ);
|
||||
}
|
||||
else {
|
||||
retw = wait_event_interruptible_timeout(udp->wq_procfs,
|
||||
r->status != 0, HZ);
|
||||
r->status != 0, 5 * HZ);
|
||||
}
|
||||
|
||||
/* Timeout? */
|
||||
|
||||
@ -45,7 +45,7 @@
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/io.h>
|
||||
#include "../../config.h"
|
||||
#include "../../../config.h"
|
||||
#include "mcctrl.h"
|
||||
#include <linux/version.h>
|
||||
|
||||
@ -278,9 +278,178 @@ static int __notify_syscall_requester(ihk_os_t os, struct ikc_scd_packet *packet
|
||||
return ret;
|
||||
}
|
||||
|
||||
long syscall_backward(struct mcctrl_usrdata *usrdata, int num,
|
||||
unsigned long arg1, unsigned long arg2,
|
||||
unsigned long arg3, unsigned long arg4,
|
||||
unsigned long arg5, unsigned long arg6,
|
||||
unsigned long *ret)
|
||||
{
|
||||
struct ikc_scd_packet *packet;
|
||||
struct syscall_request *req;
|
||||
struct syscall_response *resp;
|
||||
unsigned long syscall_ret;
|
||||
struct wait_queue_head_list_node *wqhln;
|
||||
unsigned long irqflags;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
unsigned long phys;
|
||||
struct syscall_request _request[2];
|
||||
struct syscall_request *request;
|
||||
|
||||
if (((unsigned long)_request ^ (unsigned long)(_request + 1)) &
|
||||
~(PAGE_SIZE -1))
|
||||
request = _request + 1;
|
||||
else
|
||||
request = _request;
|
||||
request->number = num;
|
||||
request->args[0] = arg1;
|
||||
request->args[1] = arg2;
|
||||
request->args[2] = arg3;
|
||||
request->args[3] = arg4;
|
||||
request->args[4] = arg5;
|
||||
request->args[5] = arg6;
|
||||
|
||||
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
|
||||
if (!ppd) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
packet = (struct ikc_scd_packet *)mcctrl_get_per_thread_data(ppd, current);
|
||||
if (!packet) {
|
||||
syscall_ret = -ENOENT;
|
||||
printk("%s: no packet registered for TID %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current));
|
||||
goto out_put_ppd;
|
||||
}
|
||||
|
||||
req = &packet->req;
|
||||
|
||||
/* Map response structure */
|
||||
phys = ihk_device_map_memory(ihk_os_to_dev(usrdata->os),
|
||||
packet->resp_pa, sizeof(*resp));
|
||||
resp = ihk_device_map_virtual(ihk_os_to_dev(usrdata->os),
|
||||
phys, sizeof(*resp), NULL, 0);
|
||||
|
||||
retry_alloc:
|
||||
wqhln = kmalloc(sizeof(*wqhln), GFP_ATOMIC);
|
||||
if (!wqhln) {
|
||||
printk("WARNING: coudln't alloc wait queue head, retrying..\n");
|
||||
goto retry_alloc;
|
||||
}
|
||||
|
||||
/* Prepare per-thread wait queue head */
|
||||
wqhln->task = current;
|
||||
/* Save the TID explicitly, because mcexec_syscall(), where the request
|
||||
* will be matched, is in IRQ context and can't call task_pid_vnr() */
|
||||
wqhln->rtid = task_pid_vnr(current);
|
||||
wqhln->req = 0;
|
||||
init_waitqueue_head(&wqhln->wq_syscall);
|
||||
|
||||
irqflags = ihk_ikc_spinlock_lock(&ppd->wq_list_lock);
|
||||
/* Add to exact list */
|
||||
list_add_tail(&wqhln->list, &ppd->wq_list_exact);
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, irqflags);
|
||||
|
||||
resp->stid = task_pid_vnr(current);
|
||||
resp->fault_address = virt_to_phys(request);
|
||||
|
||||
#define STATUS_IN_PROGRESS 0
|
||||
#define STATUS_SYSCALL 4
|
||||
req->valid = 0;
|
||||
|
||||
if (__notify_syscall_requester(usrdata->os, packet, resp) < 0) {
|
||||
printk("%s: WARNING: failed to notify PID %d\n",
|
||||
__FUNCTION__, packet->pid);
|
||||
}
|
||||
|
||||
mb();
|
||||
resp->status = STATUS_SYSCALL;
|
||||
|
||||
dprintk("%s: tid: %d, syscall: %d SLEEPING\n",
|
||||
__FUNCTION__, task_pid_vnr(current), num);
|
||||
/* wait for response */
|
||||
syscall_ret = wait_event_interruptible(wqhln->wq_syscall, wqhln->req);
|
||||
|
||||
/* Remove per-thread wait queue head */
|
||||
irqflags = ihk_ikc_spinlock_lock(&ppd->wq_list_lock);
|
||||
list_del(&wqhln->list);
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, irqflags);
|
||||
|
||||
dprintk("%s: tid: %d, syscall: %d WOKEN UP\n",
|
||||
__FUNCTION__, task_pid_vnr(current), num);
|
||||
|
||||
if (syscall_ret) {
|
||||
kfree(wqhln);
|
||||
goto out;
|
||||
}
|
||||
else {
|
||||
unsigned long phys2;
|
||||
struct syscall_response *resp2;
|
||||
|
||||
/* Update packet reference */
|
||||
packet = wqhln->packet;
|
||||
req = &packet->req;
|
||||
phys2 = ihk_device_map_memory(ihk_os_to_dev(usrdata->os),
|
||||
packet->resp_pa, sizeof(*resp));
|
||||
resp2 = ihk_device_map_virtual(ihk_os_to_dev(usrdata->os),
|
||||
phys2, sizeof(*resp), NULL, 0);
|
||||
|
||||
if (resp != resp2) {
|
||||
resp = resp2;
|
||||
phys = phys2;
|
||||
printk("%s: updated new remote PA for resp\n", __FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
if (!req->valid) {
|
||||
printk("%s:not valid\n", __FUNCTION__);
|
||||
}
|
||||
req->valid = 0;
|
||||
|
||||
/* check result */
|
||||
if (req->number != __NR_mmap) {
|
||||
printk("%s:unexpected response. %lx %lx\n",
|
||||
__FUNCTION__, req->number, req->args[0]);
|
||||
syscall_ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
#define PAGER_REQ_RESUME 0x0101
|
||||
else if (req->args[0] != PAGER_REQ_RESUME) {
|
||||
resp->ret = pager_call(usrdata->os, (void *)req);
|
||||
|
||||
if (__notify_syscall_requester(usrdata->os, packet, resp) < 0) {
|
||||
printk("%s: WARNING: failed to notify PID %d\n",
|
||||
__FUNCTION__, packet->pid);
|
||||
}
|
||||
|
||||
mb();
|
||||
}
|
||||
else {
|
||||
*ret = req->args[1];
|
||||
}
|
||||
|
||||
kfree(wqhln);
|
||||
syscall_ret = 0;
|
||||
out:
|
||||
ihk_device_unmap_virtual(ihk_os_to_dev(usrdata->os), resp, sizeof(*resp));
|
||||
ihk_device_unmap_memory(ihk_os_to_dev(usrdata->os), phys, sizeof(*resp));
|
||||
|
||||
out_put_ppd:
|
||||
dprintk("%s: tid: %d, syscall: %d, reason: %lu, syscall_ret: %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current), num, reason, syscall_ret);
|
||||
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
return syscall_ret;
|
||||
}
|
||||
|
||||
static int remote_page_fault(struct mcctrl_usrdata *usrdata, void *fault_addr, uint64_t reason)
|
||||
{
|
||||
struct ikc_scd_packet *packet;
|
||||
struct ikc_scd_packet *free_packet = NULL;
|
||||
struct syscall_request *req;
|
||||
struct syscall_response *resp;
|
||||
int error;
|
||||
@ -316,6 +485,12 @@ static int remote_page_fault(struct mcctrl_usrdata *usrdata, void *fault_addr, u
|
||||
packet->resp_pa, sizeof(*resp));
|
||||
resp = ihk_device_map_virtual(ihk_os_to_dev(usrdata->os),
|
||||
phys, sizeof(*resp), NULL, 0);
|
||||
if (!resp) {
|
||||
printk("%s: ERROR: invalid response structure address\n",
|
||||
__FUNCTION__);
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
retry_alloc:
|
||||
wqhln = kmalloc(sizeof(*wqhln), GFP_ATOMIC);
|
||||
@ -377,6 +552,7 @@ retry_alloc:
|
||||
else {
|
||||
/* Update packet reference */
|
||||
packet = wqhln->packet;
|
||||
free_packet = packet;
|
||||
req = &packet->req;
|
||||
{
|
||||
unsigned long phys2;
|
||||
@ -434,6 +610,12 @@ retry_alloc:
|
||||
kfree(wqhln);
|
||||
error = 0;
|
||||
out:
|
||||
/* Release remote page-fault response packet */
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)free_packet,
|
||||
(usrdata->ikc2linux[smp_processor_id()] ?
|
||||
usrdata->ikc2linux[smp_processor_id()] :
|
||||
usrdata->ikc2linux[0]));
|
||||
|
||||
ihk_device_unmap_virtual(ihk_os_to_dev(usrdata->os), resp, sizeof(*resp));
|
||||
ihk_device_unmap_memory(ihk_os_to_dev(usrdata->os), phys, sizeof(*resp));
|
||||
|
||||
@ -579,11 +761,12 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
size_t pix;
|
||||
#endif
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
struct ikc_scd_packet *packet;
|
||||
int ret = 0;
|
||||
|
||||
dprintk("mcctrl:page fault:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
|
||||
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
if (!ppd) {
|
||||
@ -593,9 +776,19 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
if (!ppd) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
printk("mcctrl:page fault:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
packet = (struct ikc_scd_packet *)mcctrl_get_per_thread_data(ppd, current);
|
||||
if (!packet) {
|
||||
error = -ENOENT;
|
||||
printk("%s: no packet registered for TID %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current));
|
||||
goto put_and_out;
|
||||
}
|
||||
|
||||
for (try = 1; ; ++try) {
|
||||
error = translate_rva_to_rpa(usrdata->os, ppd->rpgtable,
|
||||
(unsigned long)vmf->virtual_address,
|
||||
@ -603,7 +796,10 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
#define NTRIES 2
|
||||
if (!error || (try >= NTRIES)) {
|
||||
if (error) {
|
||||
printk("translate_rva_to_rpa: error\n");
|
||||
printk("%s: error translating 0x%p "
|
||||
"(req: TID: %u, syscall: %lu)\n",
|
||||
__FUNCTION__, vmf->virtual_address,
|
||||
packet->req.rtid, packet->req.number);
|
||||
}
|
||||
|
||||
break;
|
||||
@ -616,13 +812,14 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
}
|
||||
error = remote_page_fault(usrdata, vmf->virtual_address, reason);
|
||||
if (error) {
|
||||
printk("forward_page_fault failed. %d\n", error);
|
||||
printk("%s: error forwarding PF for 0x%p "
|
||||
"(req: TID: %d, syscall: %lu)\n",
|
||||
__FUNCTION__, vmf->virtual_address,
|
||||
packet->req.rtid, packet->req.number);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (error) {
|
||||
printk("mcctrl:page fault error:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto put_and_out;
|
||||
}
|
||||
@ -636,16 +833,30 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
for (pix = 0; pix < (pgsize / PAGE_SIZE); ++pix) {
|
||||
struct page *page;
|
||||
|
||||
/* LWK may hold large page based mappings that align rva outside
|
||||
* Linux' VMA, make sure we don't try to map to those pages */
|
||||
if (rva + (pix * PAGE_SIZE) < vma->vm_start) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pfn_valid(pfn+pix)) {
|
||||
page = pfn_to_page(pfn+pix);
|
||||
|
||||
if ((error = rus_page_hash_insert(page)) < 0) {
|
||||
printk("rus_vm_fault: error hashing page??\n");
|
||||
printk("%s: error adding page to RUS hash for 0x%p "
|
||||
"(req: TID: %d, syscall: %lu)\n",
|
||||
__FUNCTION__, vmf->virtual_address,
|
||||
packet->req.rtid, packet->req.number);
|
||||
}
|
||||
|
||||
error = vm_insert_page(vma, rva+(pix*PAGE_SIZE), page);
|
||||
if (error) {
|
||||
printk("vm_insert_page: %d\n", error);
|
||||
printk("%s: error inserting mapping for 0x%p "
|
||||
"(req: TID: %d, syscall: %lu) error: %d, "
|
||||
"vm_start: 0x%lx, vm_end: 0x%lx\n",
|
||||
__FUNCTION__, vmf->virtual_address,
|
||||
packet->req.rtid, packet->req.number, error,
|
||||
vma->vm_start, vma->vm_end);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -659,8 +870,10 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
#endif
|
||||
ihk_device_unmap_memory(dev, phys, pgsize);
|
||||
if (error) {
|
||||
printk("mcctrl:page fault:remap error:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
printk("%s: remote PF failed for 0x%p, pgoff: %lu "
|
||||
"(req: TID: %d, syscall: %lu)\n",
|
||||
__FUNCTION__, vmf->virtual_address, vmf->pgoff,
|
||||
packet->req.rtid, packet->req.number);
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto put_and_out;
|
||||
}
|
||||
@ -716,11 +929,11 @@ reserve_user_space_common(struct mcctrl_usrdata *usrdata, unsigned long start, u
|
||||
original = override_creds(promoted);
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
|
||||
start = vm_mmap_pgoff(file, start, end,
|
||||
PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, 0);
|
||||
start = vm_mmap_pgoff(file, start, end, PROT_READ|PROT_WRITE|PROT_EXEC,
|
||||
MAP_FIXED|MAP_SHARED, 0);
|
||||
#else
|
||||
start = vm_mmap(file, start, end,
|
||||
PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, 0);
|
||||
start = vm_mmap(file, start, end, PROT_READ|PROT_WRITE|PROT_EXEC,
|
||||
MAP_FIXED|MAP_SHARED, 0);
|
||||
#endif
|
||||
|
||||
revert_creds(original);
|
||||
@ -754,6 +967,81 @@ struct pager {
|
||||
static DEFINE_SEMAPHORE(pager_sem);
|
||||
static struct list_head pager_list = LIST_HEAD_INIT(pager_list);
|
||||
|
||||
int pager_nr_processes = 0;
|
||||
|
||||
void pager_add_process(void)
|
||||
{
|
||||
int error;
|
||||
error = down_interruptible(&pager_sem);
|
||||
if (error) {
|
||||
return;
|
||||
}
|
||||
|
||||
++pager_nr_processes;
|
||||
|
||||
up(&pager_sem);
|
||||
}
|
||||
|
||||
void pager_remove_process(struct mcctrl_per_proc_data *ppd)
|
||||
{
|
||||
int error;
|
||||
struct pager *pager_next, *pager;
|
||||
|
||||
if (in_atomic() || in_interrupt()) {
|
||||
printk("%s: WARNING: shouldn't be called in IRQ context..\n",
|
||||
__FUNCTION__);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clean up device file mappings of this process */
|
||||
error = down_interruptible(&ppd->devobj_pager_lock);
|
||||
if (error) {
|
||||
return;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(pager, pager_next,
|
||||
&ppd->devobj_pager_list, list) {
|
||||
|
||||
dprintk("%s: devobj pager 0x%lx removed\n", __FUNCTION__, pager);
|
||||
list_del(&pager->list);
|
||||
kfree(pager);
|
||||
}
|
||||
up(&ppd->devobj_pager_lock);
|
||||
|
||||
/* Clean up global pagers for regular file mappings if this
|
||||
* was the last process */
|
||||
error = down_interruptible(&pager_sem);
|
||||
if (error) {
|
||||
return;
|
||||
}
|
||||
|
||||
--pager_nr_processes;
|
||||
if (pager_nr_processes > 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(pager, pager_next, &pager_list, list) {
|
||||
list_del(&pager->list);
|
||||
|
||||
if (pager->rofile) {
|
||||
fput(pager->rofile);
|
||||
}
|
||||
|
||||
if (pager->rwfile) {
|
||||
fput(pager->rwfile);
|
||||
}
|
||||
|
||||
dprintk("%s: pager 0x%lx removed\n", __FUNCTION__, pager);
|
||||
kfree(pager);
|
||||
}
|
||||
|
||||
/* Flush page hash as well */
|
||||
rus_page_hash_put_pages();
|
||||
|
||||
out:
|
||||
up(&pager_sem);
|
||||
}
|
||||
|
||||
struct pager_create_result {
|
||||
uintptr_t handle;
|
||||
int maxprot;
|
||||
@ -770,6 +1058,7 @@ enum {
|
||||
MF_ZEROFILL = 0x0010,
|
||||
MF_REG_FILE = 0x1000,
|
||||
MF_DEV_FILE = 0x2000,
|
||||
MF_PREMAP = 0x8000,
|
||||
MF_END
|
||||
};
|
||||
|
||||
@ -863,9 +1152,8 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
fullpath = d_path(&file->f_path, pathbuf, PATH_MAX);
|
||||
if (!IS_ERR(fullpath)) {
|
||||
if (!strncmp("/dev/shm/Intel_MPI", fullpath, 18)) {
|
||||
//mf_flags = (MF_PREFETCH | MF_ZEROFILL);
|
||||
mf_flags = (MF_ZEROFILL);
|
||||
dprintk("%s: filename: %s, zerofill\n",
|
||||
mf_flags = (MF_PREMAP | MF_ZEROFILL);
|
||||
dprintk("%s: filename: %s, premap & zerofill\n",
|
||||
__FUNCTION__, fullpath);
|
||||
}
|
||||
else if (strstr(fullpath, "libmpi") != NULL) {
|
||||
@ -906,6 +1194,13 @@ found:
|
||||
|
||||
phys = ihk_device_map_memory(dev, result_pa, sizeof(*resp));
|
||||
resp = ihk_device_map_virtual(dev, phys, sizeof(*resp), NULL, 0);
|
||||
if (!resp) {
|
||||
printk("%s: ERROR: invalid response structure address\n",
|
||||
__FUNCTION__);
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
resp->handle = (uintptr_t)pager;
|
||||
resp->maxprot = maxprot;
|
||||
resp->flags = mf_flags;
|
||||
@ -1012,6 +1307,13 @@ static int pager_req_read(ihk_os_t os, uintptr_t handle, off_t off, size_t size,
|
||||
|
||||
phys = ihk_device_map_memory(dev, rpa, size);
|
||||
buf = ihk_device_map_virtual(dev, phys, size, NULL, 0);
|
||||
if (!buf) {
|
||||
printk("%s: ERROR: invalid buffer address\n",
|
||||
__FUNCTION__);
|
||||
ss = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
pos = off;
|
||||
@ -1094,6 +1396,13 @@ static int pager_req_write(ihk_os_t os, uintptr_t handle, off_t off, size_t size
|
||||
|
||||
phys = ihk_device_map_memory(dev, rpa, size);
|
||||
buf = ihk_device_map_virtual(dev, phys, size, NULL, 0);
|
||||
if (!buf) {
|
||||
printk("%s: ERROR: invalid buffer address\n",
|
||||
__FUNCTION__);
|
||||
ss = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
pos = off;
|
||||
@ -1140,8 +1449,18 @@ static int pager_req_map(ihk_os_t os, int fd, size_t len, off_t off,
|
||||
struct pager *pager = NULL;
|
||||
struct pager_map_result *resp;
|
||||
uintptr_t phys;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
dprintk("pager_req_map(%p,%d,%lx,%lx,%lx)\n", os, fd, len, off, result_rpa);
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
if (unlikely(!ppd)) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return -1;
|
||||
}
|
||||
|
||||
pager = kzalloc(sizeof(*pager), GFP_ATOMIC);
|
||||
if (!pager) {
|
||||
error = -ENOMEM;
|
||||
@ -1202,13 +1521,29 @@ static int pager_req_map(ihk_os_t os, int fd, size_t len, off_t off,
|
||||
|
||||
phys = ihk_device_map_memory(dev, result_rpa, sizeof(*resp));
|
||||
resp = ihk_device_map_virtual(dev, phys, sizeof(*resp), NULL, 0);
|
||||
if (!resp) {
|
||||
printk("%s: ERROR: invalid response structure address\n",
|
||||
__FUNCTION__);
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
resp->handle = (uintptr_t)pager;
|
||||
resp->maxprot = maxprot;
|
||||
ihk_device_unmap_virtual(dev, resp, sizeof(*resp));
|
||||
ihk_device_unmap_memory(dev, phys, sizeof(*resp));
|
||||
|
||||
error = down_interruptible(&ppd->devobj_pager_lock);
|
||||
if (error) {
|
||||
error = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add_tail(&pager->list, &ppd->devobj_pager_list);
|
||||
up(&ppd->devobj_pager_lock);
|
||||
|
||||
pager = 0;
|
||||
error = 0;
|
||||
pager = 0; /* pager should be in list? */
|
||||
|
||||
out:
|
||||
if (file) {
|
||||
@ -1217,6 +1552,7 @@ out:
|
||||
if (pager) {
|
||||
kfree(pager);
|
||||
}
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
dprintk("pager_req_map(%p,%d,%lx,%lx,%lx): %d\n", os, fd, len, off, result_rpa, error);
|
||||
return error;
|
||||
}
|
||||
@ -1307,6 +1643,13 @@ out_release:
|
||||
|
||||
phys = ihk_device_map_memory(dev, ppfn_rpa, sizeof(*ppfn));
|
||||
ppfn = ihk_device_map_virtual(dev, phys, sizeof(*ppfn), NULL, 0);
|
||||
if (!ppfn) {
|
||||
printk("%s: ERROR: invalid PFN address\n",
|
||||
__FUNCTION__);
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*ppfn = pfn;
|
||||
ihk_device_unmap_virtual(dev, ppfn, sizeof(*ppfn));
|
||||
ihk_device_unmap_memory(dev, phys, sizeof(*ppfn));
|
||||
@ -1317,13 +1660,10 @@ out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int pager_req_unmap(ihk_os_t os, uintptr_t handle)
|
||||
static int __pager_unmap(struct pager *pager)
|
||||
{
|
||||
struct pager * const pager = (void *)handle;
|
||||
int error;
|
||||
|
||||
dprintk("pager_req_unmap(%p,%lx)\n", os, handle);
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
error = do_munmap(current->mm, pager->map_uaddr, pager->map_len);
|
||||
@ -1333,12 +1673,42 @@ static int pager_req_unmap(ihk_os_t os, uintptr_t handle)
|
||||
#endif
|
||||
|
||||
if (error) {
|
||||
printk("pager_req_unmap(%p,%lx):do_munmap failed. %d\n", os, handle, error);
|
||||
/* through */
|
||||
printk("%s: WARNING: munmap failed for pager 0x%lx: %d\n",
|
||||
__FUNCTION__, (uintptr_t)pager, error);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int pager_req_unmap(ihk_os_t os, uintptr_t handle)
|
||||
{
|
||||
struct pager * const pager = (void *)handle;
|
||||
int error;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
dprintk("pager_req_unmap(%p,%lx)\n", os, handle);
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
if (unlikely(!ppd)) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
return -1;
|
||||
}
|
||||
|
||||
error = down_interruptible(&ppd->devobj_pager_lock);
|
||||
if (error) {
|
||||
error = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del(&pager->list);
|
||||
up(&ppd->devobj_pager_lock);
|
||||
|
||||
error = __pager_unmap(pager);
|
||||
kfree(pager);
|
||||
dprintk("pager_req_unmap(%p,%lx): %d\n", os, handle, error);
|
||||
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -1405,6 +1775,12 @@ void __return_syscall(ihk_os_t os, struct ikc_scd_packet *packet,
|
||||
res = ihk_device_map_virtual(ihk_os_to_dev(os),
|
||||
phys, sizeof(*res), NULL, 0);
|
||||
|
||||
if (!res) {
|
||||
printk("%s: ERROR: invalid response structure address\n",
|
||||
__FUNCTION__);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Map response structure and notify offloading thread */
|
||||
res->ret = ret;
|
||||
res->stid = stid;
|
||||
|
||||
@ -278,8 +278,10 @@ release_i(struct sysfsm_node *np)
|
||||
|
||||
sdp = np->sdp;
|
||||
|
||||
if (np->server_ops && np->server_ops->release) {
|
||||
(*np->server_ops->release)(np->server_ops, np);
|
||||
if (np->type != SNT_DIR) {
|
||||
if (np->server_ops && np->server_ops->release) {
|
||||
(*np->server_ops->release)(np->server_ops, np);
|
||||
}
|
||||
}
|
||||
kfree(np->name);
|
||||
kfree(np);
|
||||
@ -719,8 +721,6 @@ unlink_i(struct sysfsm_node *np)
|
||||
else if (np->type == SNT_DIR) {
|
||||
if (np->parent != np) {
|
||||
kobject_del(&np->kobj);
|
||||
error = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
else if (np->type == SNT_LINK) {
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/version.h>
|
||||
#include "../../config.h"
|
||||
#include "../../../config.h"
|
||||
#include "mcctrl.h"
|
||||
#include "sysfs_msg.h"
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
ENABLE_MCOVERLAYFS=@ENABLE_MCOVERLAYFS@
|
||||
RELEASE=@UNAME_R@
|
||||
|
||||
RELEASE=$(shell uname -r)
|
||||
MAJOR=$(shell echo ${RELEASE} | sed -e 's/^\([0-9]*\).*/\1/')
|
||||
MINOR=$(shell echo ${RELEASE} | sed -e 's/^[0-9]*.\([0-9]*\).*/\1/')
|
||||
PATCH=$(shell echo ${RELEASE} | sed -e 's/^[0-9]*.[0-9]*.\([0-9]*\).*/\1/')
|
||||
@ -9,6 +9,7 @@ RHEL_RELEASE_TMP=$(shell echo ${RELEASE} | sed -e 's/^[0-9]*.[0-9]*.[0-9]*-\([0-
|
||||
RHEL_RELEASE=$(shell if [ "${RELEASE}" == "${RHEL_RELEASE_TMP}" ]; then echo ""; else echo ${RHEL_RELEASE_TMP}; fi)
|
||||
BUILD_MODULE_TMP=$(shell if [ "${RHEL_RELEASE}" == "" ]; then echo "org"; else echo "rhel"; fi)
|
||||
BUILD_MODULE=none
|
||||
#$(info "LINUX_VERSION_CODE: ${LINUX_VERSION_CODE}, RHEL_RELEASE: ${RHEL_RELEASE}")
|
||||
ifeq ($(ENABLE_MCOVERLAYFS),yes)
|
||||
ifeq ($(BUILD_MODULE_TMP),org)
|
||||
ifeq ($(BUILD_MODULE),none)
|
||||
@ -20,7 +21,7 @@ endif
|
||||
endif
|
||||
ifeq ($(BUILD_MODULE_TMP),rhel)
|
||||
ifeq ($(BUILD_MODULE),none)
|
||||
BUILD_MODULE=$(shell if [ ${LINUX_VERSION_CODE} -eq 199168 -a ${RHEL_RELEASE} -eq 327 ]; then echo "linux-3.10.0-327.36.1.el7"; else echo "none"; fi)
|
||||
BUILD_MODULE=$(shell if [ ${LINUX_VERSION_CODE} -eq 199168 -a ${RHEL_RELEASE} -ge 327 -a ${RHEL_RELEASE} -le 514 ]; then echo "linux-3.10.0-327.36.1.el7"; else echo "none"; fi)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -420,8 +420,8 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
|
||||
dentry, dentry->d_inode->i_ino);
|
||||
OVL_DEBUG("sysfs: realpath.dentry=%pd4, i_ino=%lu\n",
|
||||
realpath.dentry, realpath.dentry->d_inode->i_ino);
|
||||
if (!dentry->d_inode->i_private) {
|
||||
dentry->d_inode->i_private = dentry->d_fsdata;
|
||||
if (!ovl_find_d_fsdata(dentry)) {
|
||||
ovl_add_d_fsdata(dentry);
|
||||
dentry->d_fsdata = realpath.dentry->d_fsdata;
|
||||
}
|
||||
}
|
||||
|
||||
@ -43,6 +43,12 @@ enum ovl_opt_bit {
|
||||
#define OVL_OPT_NOCOPYUPW(opt) ((opt) & __OVL_OPT_NOCOPYUPW)
|
||||
#define OVL_OPT_NOFSCHECK(opt) ((opt) & __OVL_OPT_NOFSCHECK)
|
||||
|
||||
struct ovl_d_fsdata {
|
||||
struct list_head list;
|
||||
struct dentry *d;
|
||||
struct ovl_entry *oe;
|
||||
};
|
||||
|
||||
static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
int err = vfs_rmdir(dir, dentry);
|
||||
@ -149,6 +155,8 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
|
||||
|
||||
unsigned ovl_get_config_opt(struct dentry *dentry);
|
||||
void ovl_reset_ovl_entry(struct ovl_entry **oe, struct dentry *dentry);
|
||||
struct ovl_entry *ovl_find_d_fsdata(struct dentry *dentry);
|
||||
int ovl_add_d_fsdata(struct dentry *dentry);
|
||||
enum ovl_path_type ovl_path_type(struct dentry *dentry);
|
||||
u64 ovl_dentry_version_get(struct dentry *dentry);
|
||||
void ovl_dentry_version_inc(struct dentry *dentry);
|
||||
|
||||
@ -45,6 +45,7 @@ struct ovl_fs {
|
||||
long lower_namelen;
|
||||
/* pathnames of lower and upper dirs, for show_options */
|
||||
struct ovl_config config;
|
||||
struct list_head d_fsdata_list;
|
||||
};
|
||||
|
||||
struct ovl_dir_cache;
|
||||
@ -76,15 +77,76 @@ unsigned ovl_get_config_opt(struct dentry *dentry)
|
||||
void ovl_reset_ovl_entry(struct ovl_entry **oe, struct dentry *dentry)
|
||||
{
|
||||
unsigned opt = ovl_get_config_opt(dentry);
|
||||
struct ovl_entry *d_fsdata;
|
||||
|
||||
if (OVL_OPT_NOFSCHECK(opt)) {
|
||||
if (dentry->d_inode && dentry->d_inode->i_private &&
|
||||
!S_ISDIR(dentry->d_inode->i_mode)) {
|
||||
*oe = dentry->d_inode->i_private;
|
||||
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
|
||||
return;
|
||||
}
|
||||
|
||||
d_fsdata = ovl_find_d_fsdata(dentry);
|
||||
if (d_fsdata) {
|
||||
OVL_DEBUG("reset: dentry=%pd4, 0x%p, oe=0x%p\n",
|
||||
dentry, dentry, d_fsdata);
|
||||
*oe = d_fsdata;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ovl_entry *ovl_find_d_fsdata(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
|
||||
struct ovl_d_fsdata *d_fsdata;
|
||||
|
||||
list_for_each_entry(d_fsdata, &ofs->d_fsdata_list, list) {
|
||||
if (dentry == d_fsdata->d) {
|
||||
OVL_DEBUG("exist: dentry=%pd4, 0x%p, oe=0x%p\n",
|
||||
d_fsdata->d, d_fsdata->d, d_fsdata->oe);
|
||||
return d_fsdata->oe;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int ovl_add_d_fsdata(struct dentry *dentry)
|
||||
{
|
||||
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
|
||||
struct ovl_d_fsdata *d_fsdata;
|
||||
|
||||
d_fsdata = kzalloc(sizeof(struct ovl_d_fsdata), GFP_KERNEL);
|
||||
if (!d_fsdata) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
d_fsdata->d = dentry;
|
||||
d_fsdata->oe = dentry->d_fsdata;
|
||||
|
||||
list_add(&d_fsdata->list, &ofs->d_fsdata_list);
|
||||
|
||||
OVL_DEBUG("add: dentry=%pd4, 0x%p, oe=0x%p\n",
|
||||
d_fsdata->d, d_fsdata->d, d_fsdata->oe);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ovl_clear_d_fsdata(struct ovl_fs *ofs)
|
||||
{
|
||||
struct ovl_d_fsdata *d_fsdata;
|
||||
struct ovl_d_fsdata *d_fsdata_next;
|
||||
|
||||
list_for_each_entry_safe(d_fsdata, d_fsdata_next, &ofs->d_fsdata_list,
|
||||
list) {
|
||||
OVL_DEBUG("delete: dentry=%pd4, 0x%p\n",
|
||||
d_fsdata->d, d_fsdata->d);
|
||||
list_del(&d_fsdata->list);
|
||||
|
||||
kfree(d_fsdata);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
|
||||
{
|
||||
return oe->numlower ? oe->lowerstack[0].dentry : NULL;
|
||||
@ -658,6 +720,8 @@ static void ovl_put_super(struct super_block *sb)
|
||||
struct ovl_fs *ufs = sb->s_fs_info;
|
||||
unsigned i;
|
||||
|
||||
ovl_clear_d_fsdata(ufs);
|
||||
|
||||
dput(ufs->workdir);
|
||||
mntput(ufs->upper_mnt);
|
||||
for (i = 0; i < ufs->numlower; i++)
|
||||
@ -1049,6 +1113,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
|
||||
if (!ufs)
|
||||
goto out;
|
||||
|
||||
INIT_LIST_HEAD(&ufs->d_fsdata_list);
|
||||
err = ovl_parse_opt((char *) data, &ufs->config);
|
||||
if (err)
|
||||
goto out_free_config;
|
||||
|
||||
@ -1,28 +1,43 @@
|
||||
CC=@CC@
|
||||
BINDIR=@BINDIR@
|
||||
prefix=@prefix@
|
||||
exec_prefix=@exec_prefix@
|
||||
LIBDIR=@libdir@
|
||||
MCKERNEL_LIBDIR=@MCKERNEL_LIBDIR@
|
||||
KDIR ?= @KDIR@
|
||||
CFLAGS=-Wall -O -I.
|
||||
CFLAGS=-Wall -O -I. -I$(VPATH)/arch/${ARCH}
|
||||
VPATH=@abs_srcdir@
|
||||
TARGET=mcexec
|
||||
TARGET=mcexec libsched_yield
|
||||
@uncomment_if_ENABLE_MEMDUMP@TARGET+=eclair
|
||||
LIBS=@LIBS@
|
||||
ARCH=@ARCH@
|
||||
IHKDIR ?= $(VPATH)/../../../ihk/linux/include/
|
||||
|
||||
all: $(TARGET)
|
||||
|
||||
mcexec: mcexec.c
|
||||
$(CC) -I${KDIR} $(CFLAGS) $(EXTRA_CFLAGS) -fPIE -pie -lrt -lnuma -pthread -o $@ $^ $(EXTRA_OBJS)
|
||||
mcexec: mcexec.c libmcexec.a
|
||||
$(CC) -I${KDIR} $(CFLAGS) $(EXTRA_CFLAGS) -DLIBDIR=\"$(LIBDIR)\" -fPIE -pie -L. -lmcexec -lrt -lnuma -pthread -o $@ $^ $(EXTRA_OBJS)
|
||||
|
||||
eclair: eclair.c
|
||||
$(CC) $(CFLAGS) -I${IHKDIR} -o $@ $^ $(LIBS)
|
||||
|
||||
clean:
|
||||
libsched_yield: libsched_yield.c
|
||||
$(CC) -shared -fPIC -Wl,-soname,sched_yield.so.1 -o libsched_yield.so.1.0.0 $^ -lc -ldl
|
||||
|
||||
libmcexec.a::
|
||||
(cd arch/${ARCH}; make)
|
||||
|
||||
clean::
|
||||
(cd arch/${ARCH}; make clean)
|
||||
$(RM) $(TARGET) *.o
|
||||
|
||||
.PHONY: all clean install
|
||||
|
||||
install:
|
||||
install::
|
||||
(cd arch/${ARCH}; make install)
|
||||
mkdir -p -m 755 $(BINDIR)
|
||||
install -m 755 mcexec $(BINDIR)
|
||||
mkdir -p -m 755 $(MCKERNEL_LIBDIR)
|
||||
install -m 755 libsched_yield.so.1.0.0 $(MCKERNEL_LIBDIR)
|
||||
@uncomment_if_ENABLE_MEMDUMP@install -m 755 eclair $(BINDIR)
|
||||
|
||||
|
||||
23
executer/user/arch/x86_64/Makefile.in
Normal file
23
executer/user/arch/x86_64/Makefile.in
Normal file
@ -0,0 +1,23 @@
|
||||
CC=@CC@
|
||||
AR=ar
|
||||
BINDIR=@BINDIR@
|
||||
KDIR ?= @KDIR@
|
||||
CFLAGS=-Wall -O -I.
|
||||
VPATH=@abs_srcdir@
|
||||
TARGET=../../libmcexec.a
|
||||
LIBS=@LIBS@
|
||||
|
||||
all: $(TARGET)
|
||||
|
||||
../../libmcexec.a: archdep.o
|
||||
$(AR) cr ../../libmcexec.a archdep.o
|
||||
|
||||
archdep.o: archdep.S
|
||||
$(CC) -c -I${KDIR} $(CFLAGS) $(EXTRA_CFLAGS) -fPIE -pie -pthread $<
|
||||
|
||||
clean:
|
||||
$(RM) $(TARGET) *.o
|
||||
|
||||
.PHONY: all clean install
|
||||
|
||||
install:
|
||||
113
executer/user/arch/x86_64/arch_args.h
Normal file
113
executer/user/arch/x86_64/arch_args.h
Normal file
@ -0,0 +1,113 @@
|
||||
#ifndef ARCH_ARGS_H
|
||||
#define ARCH_ARGS_H
|
||||
|
||||
typedef struct user_regs_struct syscall_args;
|
||||
|
||||
static inline int
|
||||
get_syscall_args(int pid, syscall_args *args)
|
||||
{
|
||||
return ptrace(PTRACE_GETREGS, pid, NULL, args);
|
||||
}
|
||||
|
||||
static inline int
|
||||
set_syscall_args(int pid, syscall_args *args)
|
||||
{
|
||||
return ptrace(PTRACE_SETREGS, pid, NULL, args);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
get_syscall_number(syscall_args *args)
|
||||
{
|
||||
return args->orig_rax;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
get_syscall_return(syscall_args *args)
|
||||
{
|
||||
return args->rax;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
get_syscall_arg1(syscall_args *args)
|
||||
{
|
||||
return args->rdi;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
get_syscall_arg2(syscall_args *args)
|
||||
{
|
||||
return args->rsi;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
get_syscall_arg3(syscall_args *args)
|
||||
{
|
||||
return args->rdx;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
get_syscall_arg4(syscall_args *args)
|
||||
{
|
||||
return args->r10;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
get_syscall_arg5(syscall_args *args)
|
||||
{
|
||||
return args->r8;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
get_syscall_arg6(syscall_args *args)
|
||||
{
|
||||
return args->r9;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_syscall_number(syscall_args *args, unsigned long value)
|
||||
{
|
||||
args->orig_rax = value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_syscall_return(syscall_args *args, unsigned long value)
|
||||
{
|
||||
args->rax = value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_syscall_arg1(syscall_args *args, unsigned long value)
|
||||
{
|
||||
args->rdi = value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_syscall_arg2(syscall_args *args, unsigned long value)
|
||||
{
|
||||
args->rsi = value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_syscall_arg3(syscall_args *args, unsigned long value)
|
||||
{
|
||||
args->rdx = value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_syscall_arg4(syscall_args *args, unsigned long value)
|
||||
{
|
||||
args->r10 = value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_syscall_arg5(syscall_args *args, unsigned long value)
|
||||
{
|
||||
args->r8 = value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_syscall_arg6(syscall_args *args, unsigned long value)
|
||||
{
|
||||
args->r9 = value;
|
||||
}
|
||||
#endif
|
||||
149
executer/user/arch/x86_64/archdep.S
Normal file
149
executer/user/arch/x86_64/archdep.S
Normal file
@ -0,0 +1,149 @@
|
||||
/*
|
||||
arg: rdi, rsi, rdx, rcx, r8, r9
|
||||
ret: rax
|
||||
|
||||
rax syscall number
|
||||
syscall: (rax:num) rdi rsi rdx r10 r8 r9 (rcx:ret addr)
|
||||
fd, cmd, param
|
||||
rdi: fd
|
||||
rsi: cmd
|
||||
rdx: param
|
||||
rcx: save area
|
||||
r8: new thread context
|
||||
*/
|
||||
|
||||
.global switch_ctx
|
||||
switch_ctx:
|
||||
movq $0,0x00(%rcx)
|
||||
movq %rax,0x8(%rcx)
|
||||
movq %rbx,0x10(%rcx)
|
||||
movq %rcx,0x18(%rcx)
|
||||
movq %rdx,0x20(%rcx)
|
||||
movq %rsi,0x28(%rcx)
|
||||
movq %rdi,0x30(%rcx)
|
||||
movq %rbp,0x38(%rcx)
|
||||
movq %r8,0x40(%rcx)
|
||||
movq %r9,0x48(%rcx)
|
||||
movq %r10,0x50(%rcx)
|
||||
movq %r11,0x58(%rcx)
|
||||
movq %r12,0x60(%rcx)
|
||||
movq %r13,0x68(%rcx)
|
||||
movq %r14,0x70(%rcx)
|
||||
movq %r15,0x78(%rcx)
|
||||
pushfq
|
||||
popq %rax
|
||||
movq %rax,0x80(%rcx)
|
||||
movq 0x00(%rsp),%rax
|
||||
movq %rax,0x88(%rcx)
|
||||
movq %rsp,0x90(%rcx)
|
||||
movq %rcx,%r10
|
||||
|
||||
pushq %rcx
|
||||
pushq %r8
|
||||
pushq %rax
|
||||
|
||||
mov $0x10,%eax /* ioctl */
|
||||
syscall
|
||||
3:
|
||||
|
||||
popq %r8
|
||||
popq %r8
|
||||
popq %rcx
|
||||
|
||||
movq %r10,%rcx
|
||||
cmp $0xfffffffffffff001,%eax
|
||||
jae 1f
|
||||
|
||||
test %eax,%eax
|
||||
jnz 2f
|
||||
|
||||
pushq %rax
|
||||
movq $158,%rax /* arch_prctl */
|
||||
movq $0x1002,%rdi /* ARCH_SET_FS */
|
||||
movq 0x98(%r8),%rsi
|
||||
syscall
|
||||
popq %rax
|
||||
|
||||
movq 0x10(%r8),%rbx
|
||||
movq 0x18(%r8),%rcx
|
||||
movq 0x20(%r8),%rdx
|
||||
movq 0x28(%r8),%rsi
|
||||
movq 0x30(%r8),%rdi
|
||||
movq 0x38(%r8),%rbp
|
||||
movq 0x48(%r8),%r9
|
||||
movq 0x50(%r8),%r10
|
||||
movq 0x58(%r8),%r11
|
||||
movq 0x60(%r8),%r12
|
||||
movq 0x68(%r8),%r13
|
||||
movq 0x70(%r8),%r14
|
||||
movq 0x78(%r8),%r15
|
||||
movq 0x80(%r8),%rax
|
||||
pushq %rax
|
||||
popfq
|
||||
movq 0x90(%r8),%rsp
|
||||
// movq 0x8(%r8),%rax /* for interrupts */
|
||||
movq 0x40(%r8),%r8
|
||||
|
||||
movq $0,%rax /* ioctl return */
|
||||
|
||||
pushq %rcx
|
||||
retq
|
||||
|
||||
1:
|
||||
mov $0xffffffffffffffff,%eax
|
||||
2:
|
||||
pushq %rax
|
||||
movq $158,%rax /* arch_prctl */
|
||||
movq $0x1002,%rdi /* ARCH_SET_FS */
|
||||
movq 0x98(%rcx),%rsi
|
||||
syscall
|
||||
popq %rax
|
||||
|
||||
movq 0x10(%rcx),%rbx
|
||||
movq 0x28(%rcx),%rsi
|
||||
movq 0x30(%rcx),%rdi
|
||||
movq 0x38(%rcx),%rbp
|
||||
movq 0x40(%rcx),%r8
|
||||
movq 0x48(%rcx),%r9
|
||||
movq 0x50(%rcx),%r10
|
||||
movq 0x58(%rcx),%r11
|
||||
movq 0x60(%rcx),%r12
|
||||
movq 0x68(%rcx),%r13
|
||||
movq 0x70(%rcx),%r14
|
||||
movq 0x78(%rcx),%r15
|
||||
movq 0x80(%rcx),%rdx
|
||||
pushq %rdx
|
||||
popfq
|
||||
movq 0x20(%rcx),%rdx
|
||||
movq 0x18(%rcx),%rcx
|
||||
retq
|
||||
|
||||
/*
|
||||
arg: rdi, rsi, rdx, rcx, r8, r9
|
||||
ret: rax
|
||||
unsigned long
|
||||
compare_and_swap(unsigned long *addr, unsigned long old, unsigned long new);
|
||||
rdi: addr
|
||||
rsi: old
|
||||
rdx: new
|
||||
RET: old value
|
||||
*/
|
||||
.global compare_and_swap
|
||||
compare_and_swap:
|
||||
movq %rsi,%rax
|
||||
lock
|
||||
cmpxchgq %rdx,0(%rdi)
|
||||
retq
|
||||
|
||||
/*
|
||||
unsigned int
|
||||
compare_and_swap_int(unsigned int *addr, unsigned int old, unsigned int new);
|
||||
ret: old value
|
||||
*/
|
||||
.global compare_and_swap_int
|
||||
compare_and_swap_int:
|
||||
movl %esi,%eax
|
||||
lock
|
||||
cmpxchgl %edx,0(%rdi)
|
||||
retq
|
||||
|
||||
3
executer/user/archdep.h
Normal file
3
executer/user/archdep.h
Normal file
@ -0,0 +1,3 @@
|
||||
extern int switch_ctx(int fd, unsigned long cmd, void **param, void *lctx, void *rctx);
|
||||
extern unsigned long compare_and_swap(unsigned long *addr, unsigned long old, unsigned long new);
|
||||
extern unsigned int compare_and_swap_int(unsigned int *addr, unsigned int old, unsigned int new);
|
||||
@ -578,14 +578,14 @@ static int setup_dump(char *fname) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
kernel_base = mem_chunks->kernel_base;
|
||||
|
||||
dumpscn = bfd_get_section_by_name(dumpbfd, "physmem");
|
||||
if (!dumpscn) {
|
||||
bfd_perror("bfd_get_section_by_name");
|
||||
return 1;
|
||||
}
|
||||
|
||||
kernel_base = dumpscn->vma + 0x200000;
|
||||
|
||||
return 0;
|
||||
} /* setup_dump() */
|
||||
|
||||
|
||||
27
executer/user/libsched_yield.c
Normal file
27
executer/user/libsched_yield.c
Normal file
@ -0,0 +1,27 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <dlfcn.h>
|
||||
#include <sys/time.h>
|
||||
#include <sched.h>
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#undef sched_yield
|
||||
|
||||
typedef int (*int_void_fn)(void);
|
||||
|
||||
static int_void_fn orig_sched_yield = 0;
|
||||
|
||||
int sched_yield(void)
|
||||
{
|
||||
#if 0
|
||||
if (!orig_sched_yield) {
|
||||
orig_sched_yield = (int_void_fn)dlsym(RTLD_NEXT, "sched_yield");
|
||||
}
|
||||
|
||||
printf("sched_yield() called\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@ -3,10 +3,11 @@ SRC=$(VPATH)
|
||||
IHKDIR=$(IHKBASE)/$(TARGETDIR)
|
||||
OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o
|
||||
OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o
|
||||
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o
|
||||
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o profile.o freeze.o
|
||||
OBJS += rbtree.o
|
||||
DEPSRCS=$(wildcard $(SRC)/*.c)
|
||||
|
||||
CFLAGS += -I$(SRC)/include -D__KERNEL__ -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
|
||||
CFLAGS += -I$(SRC)/include -I@abs_builddir@/../ -I@abs_builddir@/include -D__KERNEL__ -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
|
||||
LDFLAGS += -e arch_start
|
||||
IHKOBJ = ihk/ihk.o
|
||||
|
||||
|
||||
19
kernel/ap.c
19
kernel/ap.c
@ -25,15 +25,18 @@
|
||||
#include <init.h>
|
||||
#include <march.h>
|
||||
#include <cls.h>
|
||||
#include <time.h>
|
||||
#include <syscall.h>
|
||||
#include <rusage.h>
|
||||
|
||||
//#define DEBUG_PRINT_AP
|
||||
|
||||
#ifdef DEBUG_PRINT_AP
|
||||
#define dkprintf(...) kprintf(__VA_ARGS__)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#define dkprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#else
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#define dkprintf(...) do { } while (0)
|
||||
#define ekprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#endif
|
||||
|
||||
int num_processors = 1;
|
||||
@ -56,9 +59,13 @@ static void ap_wait(void)
|
||||
|
||||
if (find_command_line("hidos")) {
|
||||
mcs_lock_node_t mcs_node;
|
||||
|
||||
int ikc_cpu = ihk_mc_get_ikc_cpu(ihk_mc_get_processor_id());
|
||||
if(ikc_cpu < 0) {
|
||||
ekprintf("%s,ihk_mc_get_ikc_cpu failed\n", __FUNCTION__);
|
||||
}
|
||||
mcs_lock_lock_noirq(&ap_syscall_semaphore, &mcs_node);
|
||||
init_host_syscall_channel();
|
||||
init_host_ikc2mckernel();
|
||||
init_host_ikc2linux(ikc_cpu);
|
||||
mcs_lock_unlock_noirq(&ap_syscall_semaphore, &mcs_node);
|
||||
}
|
||||
|
||||
|
||||
@ -19,6 +19,7 @@
|
||||
#include <ihk/page_alloc.h>
|
||||
#include <cls.h>
|
||||
#include <page.h>
|
||||
#include <rusage.h>
|
||||
|
||||
extern int num_processors;
|
||||
|
||||
@ -28,12 +29,19 @@ int cpu_local_var_initialized = 0;
|
||||
void cpu_local_var_init(void)
|
||||
{
|
||||
int z;
|
||||
int i;
|
||||
|
||||
z = sizeof(struct cpu_local_var) * num_processors;
|
||||
z = (z + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
clv = ihk_mc_alloc_pages(z, IHK_MC_AP_CRITICAL);
|
||||
memset(clv, 0, z * PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < num_processors; i++) {
|
||||
clv[i].monitor = monitor->cpu + i;
|
||||
INIT_LIST_HEAD(&clv[i].smp_func_req_list);
|
||||
}
|
||||
|
||||
cpu_local_var_initialized = 1;
|
||||
}
|
||||
|
||||
|
||||
@ -231,6 +231,9 @@ static int devobj_get_page(struct memobj *memobj, off_t off, int p2align, uintpt
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
pfn = obj->pfn_table[ix];
|
||||
#ifdef PROFILE_ENABLE
|
||||
profile_event_add(PROFILE_page_fault_dev_file, PAGE_SIZE);
|
||||
#endif // PROFILE_ENABLE
|
||||
if (!(pfn & PFN_VALID)) {
|
||||
memobj_unlock(&obj->memobj);
|
||||
|
||||
|
||||
128
kernel/fileobj.c
128
kernel/fileobj.c
@ -29,8 +29,7 @@
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
|
||||
mcs_rwlock_lock_t fileobj_list_lock =
|
||||
{{{0}, MCS_RWLOCK_TYPE_COMMON_READER, 0, 0, 0, NULL}, NULL};
|
||||
mcs_rwlock_lock_t fileobj_list_lock;
|
||||
static LIST_HEAD(fileobj_list);
|
||||
|
||||
#define FILEOBJ_PAGE_HASH_SHIFT 9
|
||||
@ -231,6 +230,54 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
|
||||
if (to_memobj(obj)->flags & MF_PREFETCH) {
|
||||
to_memobj(obj)->status = MEMOBJ_TO_BE_PREFETCHED;
|
||||
}
|
||||
|
||||
/* XXX: KNL specific optimization for OFP runs */
|
||||
if ((to_memobj(obj)->flags & MF_PREMAP) &&
|
||||
(to_memobj(obj)->flags & MF_ZEROFILL)) {
|
||||
struct memobj *mo = to_memobj(obj);
|
||||
int nr_pages = (result.size + (PAGE_SIZE - 1))
|
||||
>> PAGE_SHIFT;
|
||||
int j = 0;
|
||||
int node = ihk_mc_get_nr_numa_nodes() / 2;
|
||||
dkprintf("%s: MF_PREMAP, start node: %d\n",
|
||||
__FUNCTION__, node);
|
||||
|
||||
mo->pages = kmalloc(nr_pages * sizeof(void *), IHK_MC_AP_NOWAIT);
|
||||
if (!mo->pages) {
|
||||
kprintf("%s: WARNING: failed to allocate pages\n",
|
||||
__FUNCTION__);
|
||||
goto error_cleanup;
|
||||
}
|
||||
|
||||
mo->nr_pages = nr_pages;
|
||||
memset(mo->pages, 0, nr_pages * sizeof(*mo->pages));
|
||||
|
||||
if (cpu_local_var(current)->proc->mpol_flags & MPOL_SHM_PREMAP) {
|
||||
/* Get the actual pages NUMA interleaved */
|
||||
for (j = 0; j < nr_pages; ++j) {
|
||||
mo->pages[j] = ihk_mc_alloc_aligned_pages_node_user(1,
|
||||
PAGE_P2ALIGN, IHK_MC_AP_NOWAIT, node);
|
||||
if (!mo->pages[j]) {
|
||||
kprintf("%s: ERROR: allocating pages[%d]\n",
|
||||
__FUNCTION__, j);
|
||||
goto error_cleanup;
|
||||
}
|
||||
|
||||
memset(mo->pages[j], 0, PAGE_SIZE);
|
||||
|
||||
++node;
|
||||
if (node == ihk_mc_get_nr_numa_nodes()) {
|
||||
node = ihk_mc_get_nr_numa_nodes() / 2;
|
||||
}
|
||||
}
|
||||
dkprintf("%s: allocated %d pages interleaved\n",
|
||||
__FUNCTION__, nr_pages);
|
||||
}
|
||||
error_cleanup:
|
||||
/* TODO: cleanup allocated portion */
|
||||
;
|
||||
}
|
||||
|
||||
newobj = NULL;
|
||||
dkprintf("%s: new obj 0x%lx cref: %d, %s\n",
|
||||
__FUNCTION__,
|
||||
@ -319,12 +366,14 @@ static void fileobj_release(struct memobj *memobj)
|
||||
page_va = phys_to_virt(page_to_phys(page));
|
||||
|
||||
if (ihk_atomic_read(&page->count) != 1) {
|
||||
kprintf("%s: WARNING: page count for phys 0x%lx is invalid\n",
|
||||
__FUNCTION__, page->phys);
|
||||
kprintf("%s: WARNING: page count %d for phys 0x%lx is invalid, flags: 0x%lx\n",
|
||||
__FUNCTION__,
|
||||
ihk_atomic_read(&page->count),
|
||||
page->phys,
|
||||
to_memobj(free_obj)->flags);
|
||||
}
|
||||
|
||||
if (page_unmap(page)) {
|
||||
ihk_mc_free_pages(page_va, 1);
|
||||
else if (page_unmap(page)) {
|
||||
ihk_mc_free_pages_user(page_va, 1);
|
||||
}
|
||||
#if 0
|
||||
count = ihk_atomic_sub_return(1, &page->count);
|
||||
@ -345,6 +394,19 @@ static void fileobj_release(struct memobj *memobj)
|
||||
page->mode = PM_NONE;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Pre-mapped? */
|
||||
if (to_memobj(free_obj)->flags & MF_PREMAP) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < to_memobj(free_obj)->nr_pages; ++i) {
|
||||
if (to_memobj(free_obj)->pages[i])
|
||||
ihk_mc_free_pages_user(to_memobj(free_obj)->pages[i], 1);
|
||||
}
|
||||
|
||||
kfree(to_memobj(free_obj)->pages);
|
||||
}
|
||||
|
||||
obj_list_remove(free_obj);
|
||||
mcs_rwlock_writer_unlock_noirq(&fileobj_list_lock, &node);
|
||||
kfree(free_obj);
|
||||
@ -414,6 +476,9 @@ static void fileobj_do_pageio(void *args0)
|
||||
if (to_memobj(obj)->flags & MF_ZEROFILL) {
|
||||
void *virt = phys_to_virt(page_to_phys(page));
|
||||
memset(virt, 0, PAGE_SIZE);
|
||||
#ifdef PROFILE_ENABLE
|
||||
profile_event_add(PROFILE_page_fault_file_clr, PAGE_SIZE);
|
||||
#endif // PROFILE_ENABLE
|
||||
}
|
||||
else {
|
||||
page->mode = PM_PAGEIO;
|
||||
@ -484,6 +549,46 @@ static int fileobj_get_page(struct memobj *memobj, off_t off,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#ifdef PROFILE_ENABLE
|
||||
profile_event_add(PROFILE_page_fault_file, PAGE_SIZE);
|
||||
#endif // PROFILE_ENABLE
|
||||
|
||||
if (memobj->flags & MF_PREMAP) {
|
||||
int page_ind = off >> PAGE_SHIFT;
|
||||
|
||||
if (!memobj->pages[page_ind]) {
|
||||
virt = ihk_mc_alloc_pages_user(1, IHK_MC_AP_NOWAIT | IHK_MC_AP_USER);
|
||||
|
||||
if (!virt) {
|
||||
error = -ENOMEM;
|
||||
kprintf("fileobj_get_page(%p,%lx,%x,%p):"
|
||||
"alloc failed. %d\n",
|
||||
obj, off, p2align, physp,
|
||||
error);
|
||||
goto out_nolock;
|
||||
}
|
||||
|
||||
/* Update the array but see if someone did it already and use
|
||||
* that if so */
|
||||
if (!__sync_bool_compare_and_swap(&memobj->pages[page_ind],
|
||||
NULL, virt)) {
|
||||
ihk_mc_free_pages_user(virt, 1);
|
||||
}
|
||||
else {
|
||||
dkprintf("%s: MF_ZEROFILL: off: %lu -> 0x%lx allocated\n",
|
||||
__FUNCTION__, off, virt_to_phys(virt));
|
||||
}
|
||||
}
|
||||
|
||||
virt = memobj->pages[page_ind];
|
||||
error = 0;
|
||||
*physp = virt_to_phys(virt);
|
||||
dkprintf("%s: MF_ZEROFILL: off: %lu -> 0x%lx resolved\n",
|
||||
__FUNCTION__, off, virt_to_phys(virt));
|
||||
virt = NULL;
|
||||
goto out_nolock;
|
||||
}
|
||||
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
page = __fileobj_page_hash_lookup(obj, hash, off);
|
||||
@ -501,7 +606,7 @@ static int fileobj_get_page(struct memobj *memobj, off_t off,
|
||||
if (!page) {
|
||||
npages = 1 << p2align;
|
||||
|
||||
virt = ihk_mc_alloc_pages(npages, IHK_MC_AP_NOWAIT |
|
||||
virt = ihk_mc_alloc_pages_user(npages, IHK_MC_AP_NOWAIT |
|
||||
(to_memobj(obj)->flags & MF_ZEROFILL) ? IHK_MC_AP_USER : 0);
|
||||
|
||||
if (!virt) {
|
||||
@ -559,8 +664,9 @@ static int fileobj_get_page(struct memobj *memobj, off_t off,
|
||||
out:
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
out_nolock:
|
||||
if (virt) {
|
||||
ihk_mc_free_pages(virt, npages);
|
||||
ihk_mc_free_pages_user(virt, npages);
|
||||
}
|
||||
if (args) {
|
||||
kfree(args);
|
||||
@ -582,6 +688,10 @@ static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (memobj->flags |= MF_HOST_RELEASED) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
page = phys_to_page(phys);
|
||||
if (!page) {
|
||||
kprintf("%s: warning: tried to flush non-existing page for phys addr: 0x%lx\n",
|
||||
|
||||
55
kernel/freeze.c
Normal file
55
kernel/freeze.c
Normal file
@ -0,0 +1,55 @@
|
||||
#include <kmsg.h>
|
||||
#include <string.h>
|
||||
#include <ihk/cpu.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <cls.h>
|
||||
#include <rusage.h>
|
||||
|
||||
extern int nmi_mode;
|
||||
extern void mod_nmi_ctx(void *, void(*)());
|
||||
extern void lapic_ack();
|
||||
extern void __freeze();
|
||||
|
||||
void
|
||||
freeze()
|
||||
{
|
||||
struct ihk_os_cpu_monitor *monitor = cpu_local_var(monitor);
|
||||
|
||||
monitor->status_bak = monitor->status;
|
||||
monitor->status = IHK_OS_MONITOR_KERNEL_FROZEN;
|
||||
while (monitor->status == IHK_OS_MONITOR_KERNEL_FROZEN)
|
||||
cpu_halt();
|
||||
monitor->status = monitor->status_bak;
|
||||
}
|
||||
|
||||
long
|
||||
freeze_thaw(void *nmi_ctx)
|
||||
{
|
||||
struct ihk_os_cpu_monitor *monitor = cpu_local_var(monitor);
|
||||
|
||||
if (nmi_mode == 1) {
|
||||
if (monitor->status != IHK_OS_MONITOR_KERNEL_FROZEN) {
|
||||
#if 1
|
||||
mod_nmi_ctx(nmi_ctx, __freeze);
|
||||
return 1;
|
||||
#else
|
||||
unsigned long flags;
|
||||
|
||||
flags = cpu_disable_interrupt_save();
|
||||
monitor->status_bak = monitor->status;
|
||||
monitor->status = IHK_OS_MONITOR_KERNEL_FROZEN;
|
||||
lapic_ack();
|
||||
while (monitor->status == IHK_OS_MONITOR_KERNEL_FROZEN)
|
||||
cpu_halt();
|
||||
monitor->status = monitor->status_bak;
|
||||
cpu_restore_interrupt(flags);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else if(nmi_mode == 2) {
|
||||
if (monitor->status == IHK_OS_MONITOR_KERNEL_FROZEN) {
|
||||
monitor->status = IHK_OS_MONITOR_KERNEL_THAW;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -788,6 +788,15 @@ static int futex_wait(uint32_t __user *uaddr, int fshared,
|
||||
if (!bitset)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef PROFILE_ENABLE
|
||||
if (cpu_local_var(current)->profile &&
|
||||
cpu_local_var(current)->profile_start_ts) {
|
||||
cpu_local_var(current)->profile_elapsed_ts +=
|
||||
(rdtsc() - cpu_local_var(current)->profile_start_ts);
|
||||
cpu_local_var(current)->profile_start_ts = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
q.bitset = bitset;
|
||||
q.requeue_pi_key = NULL;
|
||||
|
||||
@ -822,6 +831,11 @@ retry:
|
||||
out_put_key:
|
||||
put_futex_key(fshared, &q.key);
|
||||
out:
|
||||
#ifdef PROFILE_ENABLE
|
||||
if (cpu_local_var(current)->profile) {
|
||||
cpu_local_var(current)->profile_start_ts = rdtsc();
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
283
kernel/host.c
283
kernel/host.c
@ -23,14 +23,15 @@
|
||||
#include <ihk/debug.h>
|
||||
#include <ihk/ikc.h>
|
||||
#include <ikc/master.h>
|
||||
#include <syscall.h>
|
||||
#include <cls.h>
|
||||
#include <syscall.h>
|
||||
#include <process.h>
|
||||
#include <page.h>
|
||||
#include <mman.h>
|
||||
#include <init.h>
|
||||
#include <kmalloc.h>
|
||||
#include <sysfs.h>
|
||||
#include <ihk/perfctr.h>
|
||||
|
||||
//#define DEBUG_PRINT_HOST
|
||||
|
||||
@ -40,6 +41,9 @@
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#endif
|
||||
|
||||
/* Linux channel table, indexec by Linux CPU id */
|
||||
static struct ihk_ikc_channel_desc **ikc2linuxs = NULL;
|
||||
|
||||
void check_mapping_for_proc(struct thread *thread, unsigned long addr)
|
||||
{
|
||||
unsigned long __phys;
|
||||
@ -87,11 +91,15 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
struct address_space *as = vm->address_space;
|
||||
long aout_base;
|
||||
int error;
|
||||
struct vm_range *range;
|
||||
unsigned long ap_flags;
|
||||
enum ihk_mc_pt_attribute ptattr;
|
||||
|
||||
n = p->num_sections;
|
||||
|
||||
aout_base = (pn->reloc)? vm->region.map_end: 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
ap_flags = 0;
|
||||
if (pn->sections[i].interp && (interp_nbase == (uintptr_t)-1)) {
|
||||
interp_obase = pn->sections[i].vaddr;
|
||||
interp_obase -= (interp_obase % pn->interp_align);
|
||||
@ -112,48 +120,51 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
s = (pn->sections[i].vaddr) & PAGE_MASK;
|
||||
e = (pn->sections[i].vaddr + pn->sections[i].len
|
||||
+ PAGE_SIZE - 1) & PAGE_MASK;
|
||||
range_npages = (e - s) >> PAGE_SHIFT;
|
||||
range_npages = ((pn->sections[i].vaddr - s) +
|
||||
pn->sections[i].filesz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
flags = VR_NONE;
|
||||
flags |= PROT_TO_VR_FLAG(pn->sections[i].prot);
|
||||
flags |= VRFLAG_PROT_TO_MAXPROT(flags);
|
||||
flags |= VR_DEMAND_PAGING;
|
||||
|
||||
if ((up_v = ihk_mc_alloc_pages(range_npages, IHK_MC_AP_NOWAIT))
|
||||
== NULL) {
|
||||
kprintf("ERROR: alloc pages for ELF section %i\n", i);
|
||||
goto err;
|
||||
}
|
||||
|
||||
up = virt_to_phys(up_v);
|
||||
if (add_process_memory_range(vm, s, e, up, flags, NULL, 0,
|
||||
PAGE_SHIFT, NULL) != 0) {
|
||||
ihk_mc_free_pages(up_v, range_npages);
|
||||
/* Non-TEXT sections that are large respect user allocation policy
|
||||
* unless user explicitly requests otherwise */
|
||||
if (i >= 1 && pn->sections[i].len >= pn->mpol_threshold &&
|
||||
!(pn->mpol_flags & MPOL_NO_BSS)) {
|
||||
dkprintf("%s: section: %d size: %d pages -> IHK_MC_AP_USER\n",
|
||||
__FUNCTION__, i, range_npages);
|
||||
ap_flags = IHK_MC_AP_USER;
|
||||
flags |= VR_AP_USER;
|
||||
}
|
||||
|
||||
if (add_process_memory_range(vm, s, e, NOPHYS, flags, NULL, 0,
|
||||
pn->sections[i].len > LARGE_PAGE_SIZE ?
|
||||
LARGE_PAGE_SHIFT : PAGE_SHIFT,
|
||||
&range) != 0) {
|
||||
kprintf("ERROR: adding memory range for ELF section %i\n", i);
|
||||
goto err;
|
||||
}
|
||||
|
||||
{
|
||||
void *_virt = (void *)s;
|
||||
unsigned long _phys;
|
||||
if (ihk_mc_pt_virt_to_phys(as->page_table,
|
||||
_virt, &_phys)) {
|
||||
kprintf("ERROR: no mapping for 0x%lX\n", _virt);
|
||||
}
|
||||
for (_virt = (void *)s + PAGE_SIZE;
|
||||
(unsigned long)_virt < e; _virt += PAGE_SIZE) {
|
||||
unsigned long __phys;
|
||||
if (ihk_mc_pt_virt_to_phys(as->page_table,
|
||||
_virt, &__phys)) {
|
||||
kprintf("ERROR: no mapping for 0x%lX\n", _virt);
|
||||
panic("mapping");
|
||||
}
|
||||
if (__phys != _phys + PAGE_SIZE) {
|
||||
kprintf("0x%lX + PAGE_SIZE is not physically contigous, from 0x%lX to 0x%lX\n", _virt - PAGE_SIZE, _phys, __phys);
|
||||
panic("mondai");
|
||||
}
|
||||
if ((up_v = ihk_mc_alloc_pages_user(range_npages,
|
||||
IHK_MC_AP_NOWAIT | ap_flags)) == NULL) {
|
||||
kprintf("ERROR: alloc pages for ELF section %i\n", i);
|
||||
goto err;
|
||||
}
|
||||
|
||||
_phys = __phys;
|
||||
}
|
||||
dkprintf("0x%lX -> 0x%lX is physically contigous\n", s, e);
|
||||
up = virt_to_phys(up_v);
|
||||
|
||||
ptattr = arch_vrflag_to_ptattr(range->flag, PF_POPULATE, NULL);
|
||||
error = ihk_mc_pt_set_range(vm->address_space->page_table, vm,
|
||||
(void *)range->start,
|
||||
(void *)range->start + (range_npages * PAGE_SIZE),
|
||||
up, ptattr,
|
||||
range->pgshift);
|
||||
|
||||
if (error) {
|
||||
kprintf("%s: ihk_mc_pt_set_range failed. %d\n",
|
||||
__FUNCTION__, error);
|
||||
ihk_mc_free_pages_user(up_v, range_npages);
|
||||
goto err;
|
||||
}
|
||||
|
||||
p->sections[i].remote_pa = up;
|
||||
@ -198,7 +209,43 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
pn->at_entry += aout_base;
|
||||
}
|
||||
|
||||
vm->region.brk_start = vm->region.brk_end = vm->region.data_end;
|
||||
vm->region.brk_start = vm->region.brk_end =
|
||||
(vm->region.data_end + LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK;
|
||||
|
||||
#if 0
|
||||
{
|
||||
void *heap;
|
||||
|
||||
dkprintf("%s: requested heap size: %lu\n",
|
||||
__FUNCTION__, proc->heap_extension);
|
||||
heap = ihk_mc_alloc_aligned_pages(proc->heap_extension >> PAGE_SHIFT,
|
||||
LARGE_PAGE_P2ALIGN, IHK_MC_AP_NOWAIT |
|
||||
(!(proc->mpol_flags & MPOL_NO_HEAP) ? IHK_MC_AP_USER : 0));
|
||||
|
||||
if (!heap) {
|
||||
kprintf("%s: error: allocating heap\n", __FUNCTION__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
flags = VR_PROT_READ | VR_PROT_WRITE;
|
||||
flags |= VRFLAG_PROT_TO_MAXPROT(flags);
|
||||
if (add_process_memory_range(vm, vm->region.brk_start,
|
||||
vm->region.brk_start + proc->heap_extension,
|
||||
virt_to_phys(heap),
|
||||
flags, NULL, 0, LARGE_PAGE_P2ALIGN, NULL) != 0) {
|
||||
ihk_mc_free_pages(heap, proc->heap_extension >> PAGE_SHIFT);
|
||||
kprintf("%s: error: adding memory range for heap\n", __FUNCTION__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
vm->region.brk_end_allocated = vm->region.brk_end +
|
||||
proc->heap_extension;
|
||||
dkprintf("%s: heap @ 0x%lx:%lu\n",
|
||||
__FUNCTION__, vm->region.brk_start, proc->heap_extension);
|
||||
}
|
||||
#else
|
||||
vm->region.brk_end_allocated = vm->region.brk_end;
|
||||
#endif
|
||||
|
||||
/* Map, copy and update args and envs */
|
||||
flags = VR_PROT_READ | VR_PROT_WRITE;
|
||||
@ -206,7 +253,8 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
addr = vm->region.map_start - PAGE_SIZE * SCD_RESERVED_COUNT;
|
||||
e = addr + PAGE_SIZE * ARGENV_PAGE_COUNT;
|
||||
|
||||
if((args_envs = ihk_mc_alloc_pages(ARGENV_PAGE_COUNT, IHK_MC_AP_NOWAIT)) == NULL){
|
||||
if((args_envs = ihk_mc_alloc_pages_user(ARGENV_PAGE_COUNT,
|
||||
IHK_MC_AP_NOWAIT)) == NULL){
|
||||
kprintf("ERROR: allocating pages for args/envs\n");
|
||||
goto err;
|
||||
}
|
||||
@ -214,7 +262,7 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
|
||||
if(add_process_memory_range(vm, addr, e, args_envs_p,
|
||||
flags, NULL, 0, PAGE_SHIFT, NULL) != 0){
|
||||
ihk_mc_free_pages(args_envs, ARGENV_PAGE_COUNT);
|
||||
ihk_mc_free_pages_user(args_envs, ARGENV_PAGE_COUNT);
|
||||
kprintf("ERROR: adding memory range for args/envs\n");
|
||||
goto err;
|
||||
}
|
||||
@ -416,6 +464,14 @@ static int process_msg_prepare_process(unsigned long rphys)
|
||||
proc->sgid = pn->cred[6];
|
||||
proc->fsgid = pn->cred[7];
|
||||
proc->termsig = SIGCHLD;
|
||||
proc->mpol_flags = pn->mpol_flags;
|
||||
proc->mpol_threshold = pn->mpol_threshold;
|
||||
proc->nr_processes = pn->nr_processes;
|
||||
proc->heap_extension = pn->heap_extension;
|
||||
#ifdef PROFILE_ENABLE
|
||||
proc->profile = pn->profile;
|
||||
thread->profile = pn->profile;
|
||||
#endif
|
||||
|
||||
vm->region.user_start = pn->user_start;
|
||||
vm->region.user_end = pn->user_end;
|
||||
@ -474,6 +530,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
{
|
||||
struct ikc_scd_packet *packet = __packet;
|
||||
struct ikc_scd_packet pckt;
|
||||
struct ihk_ikc_channel_desc *resp_channel = cpu_local_var(ikc2linux);
|
||||
int rc;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
struct thread *thread;
|
||||
@ -488,6 +545,8 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
unsigned long pp;
|
||||
int cpuid;
|
||||
int ret = 0;
|
||||
struct perf_ctrl_desc *pcd;
|
||||
unsigned int mode = 0;
|
||||
|
||||
switch (packet->msg) {
|
||||
case SCD_MSG_INIT_CHANNEL_ACKED:
|
||||
@ -507,7 +566,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
}
|
||||
pckt.ref = packet->ref;
|
||||
pckt.arg = packet->arg;
|
||||
syscall_channel_send(c, &pckt);
|
||||
syscall_channel_send(resp_channel, &pckt);
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
@ -564,10 +623,10 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
pckt.err = 0;
|
||||
pckt.ref = packet->ref;
|
||||
pckt.arg = packet->arg;
|
||||
syscall_channel_send(c, &pckt);
|
||||
syscall_channel_send(resp_channel, &pckt);
|
||||
|
||||
rc = do_kill(NULL, info.pid, info.tid, info.sig, &info.info, 0);
|
||||
kprintf("SCD_MSG_SEND_SIGNAL: do_kill(pid=%d, tid=%d, sig=%d)=%d\n", info.pid, info.tid, info.sig, rc);
|
||||
dkprintf("SCD_MSG_SEND_SIGNAL: do_kill(pid=%d, tid=%d, sig=%d)=%d\n", info.pid, info.tid, info.sig, rc);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
@ -597,6 +656,61 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case SCD_MSG_PERF_CTRL:
|
||||
pp = ihk_mc_map_memory(NULL, packet->arg, sizeof(struct perf_ctrl_desc));
|
||||
pcd = (struct perf_ctrl_desc *)ihk_mc_map_virtual(pp, 1, PTATTR_WRITABLE | PTATTR_ACTIVE);
|
||||
|
||||
switch (pcd->ctrl_type) {
|
||||
case PERF_CTRL_SET:
|
||||
if (!pcd->exclude_kernel) {
|
||||
mode |= PERFCTR_KERNEL_MODE;
|
||||
}
|
||||
if (!pcd->exclude_user) {
|
||||
mode |= PERFCTR_USER_MODE;
|
||||
}
|
||||
ihk_mc_perfctr_init_raw(pcd->target_cntr, pcd->config, mode);
|
||||
ihk_mc_perfctr_stop(1 << pcd->target_cntr);
|
||||
ihk_mc_perfctr_reset(pcd->target_cntr);
|
||||
break;
|
||||
|
||||
case PERF_CTRL_ENABLE:
|
||||
ihk_mc_perfctr_start(pcd->target_cntr_mask);
|
||||
break;
|
||||
|
||||
case PERF_CTRL_DISABLE:
|
||||
ihk_mc_perfctr_stop(pcd->target_cntr_mask);
|
||||
break;
|
||||
|
||||
case PERF_CTRL_GET:
|
||||
pcd->read_value = ihk_mc_perfctr_read(pcd->target_cntr);
|
||||
break;
|
||||
|
||||
default:
|
||||
kprintf("%s: SCD_MSG_PERF_CTRL unexpected ctrl_type\n", __FUNCTION__);
|
||||
}
|
||||
|
||||
ihk_mc_unmap_virtual(pcd, 1, 0);
|
||||
ihk_mc_unmap_memory(NULL, pp, sizeof(struct perf_ctrl_desc));
|
||||
|
||||
pckt.msg = SCD_MSG_PERF_ACK;
|
||||
pckt.err = 0;
|
||||
pckt.arg = packet->arg;
|
||||
ihk_ikc_send(resp_channel, &pckt, 0);
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case SCD_MSG_CPU_RW_REG:
|
||||
|
||||
pckt.msg = SCD_MSG_CPU_RW_REG_RESP;
|
||||
memcpy(&pckt.desc, &packet->desc,
|
||||
sizeof(struct ihk_os_cpu_register));
|
||||
pckt.resp = packet->resp;
|
||||
pckt.err = arch_cpu_read_write_register(&pckt.desc, packet->op);
|
||||
|
||||
ihk_ikc_send(resp_channel, &pckt, 0);
|
||||
break;
|
||||
|
||||
default:
|
||||
kprintf("syscall_pakcet_handler:unknown message "
|
||||
"(%d.%d.%d.%d.%d.%#lx)\n",
|
||||
@ -611,54 +725,77 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void init_host_syscall_channel(void)
|
||||
static int dummy_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
void *__packet, void *__os)
|
||||
{
|
||||
struct ihk_ikc_connect_param param;
|
||||
struct ikc_scd_packet pckt;
|
||||
|
||||
param.port = 501;
|
||||
param.pkt_size = sizeof(struct ikc_scd_packet);
|
||||
param.queue_size = PAGE_SIZE * 4;
|
||||
param.magic = 0x1129;
|
||||
param.handler = syscall_packet_handler;
|
||||
|
||||
dkprintf("(syscall) Trying to connect host ...");
|
||||
while (ihk_ikc_connect(NULL, ¶m) != 0) {
|
||||
dkprintf(".");
|
||||
ihk_mc_delay_us(1000 * 1000);
|
||||
}
|
||||
dkprintf("connected.\n");
|
||||
|
||||
get_this_cpu_local_var()->syscall_channel = param.channel;
|
||||
|
||||
pckt.msg = SCD_MSG_INIT_CHANNEL;
|
||||
pckt.ref = ihk_mc_get_processor_id();
|
||||
pckt.arg = virt_to_phys(&cpu_local_var(iip));
|
||||
syscall_channel_send(param.channel, &pckt);
|
||||
struct ikc_scd_packet *packet = __packet;
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet, c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void init_host_syscall_channel2(void)
|
||||
void init_host_ikc2linux(int linux_cpu)
|
||||
{
|
||||
struct ihk_ikc_connect_param param;
|
||||
struct ikc_scd_packet pckt;
|
||||
struct ihk_ikc_channel_desc *c;
|
||||
|
||||
param.port = 502;
|
||||
/* Main thread allocates channel pointer table */
|
||||
if (!ikc2linuxs) {
|
||||
ikc2linuxs = kmalloc(sizeof(*ikc2linuxs) *
|
||||
ihk_mc_get_nr_linux_cores(), IHK_MC_AP_NOWAIT);
|
||||
if (!ikc2linuxs) {
|
||||
kprintf("%s: error: allocating Linux channels\n", __FUNCTION__);
|
||||
panic("");
|
||||
}
|
||||
|
||||
memset(ikc2linuxs, 0, sizeof(*ikc2linuxs) *
|
||||
ihk_mc_get_nr_linux_cores());
|
||||
}
|
||||
|
||||
c = ikc2linuxs[linux_cpu];
|
||||
|
||||
if (!c) {
|
||||
param.port = 503;
|
||||
param.intr_cpu = linux_cpu;
|
||||
param.pkt_size = sizeof(struct ikc_scd_packet);
|
||||
param.queue_size = 2 * num_processors * sizeof(struct ikc_scd_packet);
|
||||
if (param.queue_size < PAGE_SIZE * 4) {
|
||||
param.queue_size = PAGE_SIZE * 4;
|
||||
}
|
||||
param.magic = 0x1129;
|
||||
param.handler = dummy_packet_handler;
|
||||
|
||||
dkprintf("(ikc2linux) Trying to connect host ...");
|
||||
while (ihk_ikc_connect(NULL, ¶m) != 0) {
|
||||
dkprintf(".");
|
||||
ihk_mc_delay_us(1000 * 1000);
|
||||
}
|
||||
dkprintf("connected.\n");
|
||||
|
||||
ikc2linuxs[linux_cpu] = param.channel;
|
||||
c = param.channel;
|
||||
}
|
||||
|
||||
get_this_cpu_local_var()->ikc2linux = c;
|
||||
}
|
||||
|
||||
void init_host_ikc2mckernel(void)
|
||||
{
|
||||
struct ihk_ikc_connect_param param;
|
||||
|
||||
param.port = 501;
|
||||
param.intr_cpu = -1;
|
||||
param.pkt_size = sizeof(struct ikc_scd_packet);
|
||||
param.queue_size = PAGE_SIZE * 4;
|
||||
param.magic = 0x1329;
|
||||
param.handler = syscall_packet_handler;
|
||||
|
||||
dkprintf("(syscall) Trying to connect host ...");
|
||||
dkprintf("(ikc2mckernel) Trying to connect host ...");
|
||||
while (ihk_ikc_connect(NULL, ¶m) != 0) {
|
||||
dkprintf(".");
|
||||
ihk_mc_delay_us(1000 * 1000);
|
||||
}
|
||||
dkprintf("connected.\n");
|
||||
|
||||
get_this_cpu_local_var()->syscall_channel2 = param.channel;
|
||||
|
||||
pckt.msg = SCD_MSG_INIT_CHANNEL;
|
||||
pckt.ref = ihk_mc_get_processor_id();
|
||||
pckt.arg = virt_to_phys(&cpu_local_var(iip2));
|
||||
syscall_channel_send(param.channel, &pckt);
|
||||
ihk_ikc_set_regular_channel(NULL, param.channel, ihk_ikc_get_processor_id());
|
||||
}
|
||||
|
||||
|
||||
@ -38,6 +38,26 @@ extern ihk_spinlock_t cpu_status_lock;
|
||||
#define CPU_FLAG_NEED_RESCHED 0x1U
|
||||
#define CPU_FLAG_NEED_MIGRATE 0x2U
|
||||
|
||||
typedef int (*smp_func_t)(int cpu_index, int nr_cpus, void *arg);
|
||||
int smp_call_func(cpu_set_t *__cpu_set, smp_func_t __func, void *__arg);
|
||||
|
||||
struct smp_func_call_data {
|
||||
/* XXX: Sync MCS lock to avoid contention on counter */
|
||||
// mcs_lock_node_t lock;
|
||||
int nr_cpus;
|
||||
ihk_atomic_t cpus_left;
|
||||
|
||||
smp_func_t func;
|
||||
void *arg;
|
||||
};
|
||||
|
||||
struct smp_func_call_request {
|
||||
struct smp_func_call_data *sfcd;
|
||||
int cpu_index;
|
||||
int ret;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct cpu_local_var {
|
||||
/* malloc */
|
||||
struct list_head free_list;
|
||||
@ -55,11 +75,8 @@ struct cpu_local_var {
|
||||
struct list_head runq;
|
||||
size_t runq_len;
|
||||
|
||||
struct ihk_ikc_channel_desc *syscall_channel;
|
||||
struct ikc_scd_init_param iip;
|
||||
struct ihk_ikc_channel_desc *ikc2linux;
|
||||
|
||||
struct ihk_ikc_channel_desc *syscall_channel2;
|
||||
struct ikc_scd_init_param iip2;
|
||||
struct resource_set *resource_set;
|
||||
|
||||
int status;
|
||||
@ -75,6 +92,10 @@ struct cpu_local_var {
|
||||
int no_preempt;
|
||||
int timer_enabled;
|
||||
int kmalloc_initialized;
|
||||
struct ihk_os_cpu_monitor *monitor;
|
||||
|
||||
ihk_spinlock_t smp_func_req_lock;
|
||||
struct list_head smp_func_req_list;
|
||||
} __attribute__((aligned(64)));
|
||||
|
||||
|
||||
|
||||
@ -24,11 +24,15 @@ extern void cpu_local_var_init(void);
|
||||
extern void kmalloc_init(void);
|
||||
extern void ap_start(void);
|
||||
extern void ihk_mc_dma_init(void);
|
||||
extern void init_host_syscall_channel(void);
|
||||
extern void init_host_syscall_channel2(void);
|
||||
extern void init_host_ikc2linux(int linux_cpu);
|
||||
extern void init_host_ikc2mckernel(void);
|
||||
//extern void set_ikc2linux_to_local(int linux_cpu);
|
||||
extern void sched_init(void);
|
||||
extern void pc_ap_init(void);
|
||||
extern void cpu_sysfs_setup(void);
|
||||
extern void numa_sysfs_setup(void);
|
||||
extern void rusage_sysfs_setup(void);
|
||||
extern void status_sysfs_setup(void);
|
||||
|
||||
extern char *find_command_line(char *name);
|
||||
|
||||
|
||||
@ -36,6 +36,7 @@ enum {
|
||||
MF_ZEROFILL = 0x0010,
|
||||
MF_REG_FILE = 0x1000,
|
||||
MF_DEV_FILE = 0x2000,
|
||||
MF_PREMAP = 0x8000,
|
||||
MF_HOST_RELEASED = 0x80000000,
|
||||
MF_END
|
||||
};
|
||||
@ -49,6 +50,10 @@ struct memobj {
|
||||
uint32_t status;
|
||||
size_t size;
|
||||
ihk_spinlock_t lock;
|
||||
|
||||
/* For pre-mapped memobjects */
|
||||
void **pages;
|
||||
int nr_pages;
|
||||
};
|
||||
|
||||
typedef void memobj_release_func_t(struct memobj *obj);
|
||||
|
||||
@ -23,10 +23,12 @@
|
||||
#include <affinity.h>
|
||||
#include <syscall.h>
|
||||
#include <bitops.h>
|
||||
#include <profile.h>
|
||||
|
||||
#define VR_NONE 0x0
|
||||
#define VR_STACK 0x1
|
||||
#define VR_RESERVED 0x2
|
||||
#define VR_AP_USER 0x4
|
||||
#define VR_IO_NOCACHE 0x100
|
||||
#define VR_REMOTE 0x200
|
||||
#define VR_WRITE_COMBINED 0x400
|
||||
@ -229,6 +231,10 @@ enum mpol_rebind_step {
|
||||
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
|
||||
#define MPOL_F_MORON (1 << 4) /* Migrate On pte_numa Reference On Node */
|
||||
|
||||
#define SPAWN_TO_LOCAL 0
|
||||
#define SPAWN_TO_REMOTE 1
|
||||
#define SPAWNING_TO_REMOTE 1001
|
||||
|
||||
#include <waitq.h>
|
||||
#include <futex.h>
|
||||
|
||||
@ -242,27 +248,6 @@ struct process_vm;
|
||||
struct vm_regions;
|
||||
struct vm_range;
|
||||
|
||||
//#define TRACK_SYSCALLS
|
||||
|
||||
#ifdef TRACK_SYSCALLS
|
||||
#define TRACK_SYSCALLS_MAX 300
|
||||
#define __NR_track_syscalls 701
|
||||
|
||||
#define TRACK_SYSCALLS_CLEAR 0x01
|
||||
#define TRACK_SYSCALLS_ON 0x02
|
||||
#define TRACK_SYSCALLS_OFF 0x04
|
||||
#define TRACK_SYSCALLS_PRINT 0x08
|
||||
#define TRACK_SYSCALLS_PRINT_PROC 0x10
|
||||
|
||||
void track_syscalls_print_thread_stats(struct thread *thread);
|
||||
void track_syscalls_print_proc_stats(struct process *proc);
|
||||
void track_syscalls_accumulate_counters(struct thread *thread,
|
||||
struct process *proc);
|
||||
void track_syscalls_alloc_counters(struct thread *thread);
|
||||
void track_syscalls_dealloc_thread_counters(struct thread *thread);
|
||||
void track_syscalls_dealloc_proc_counters(struct process *proc);
|
||||
#endif // TRACK_SYSCALLS
|
||||
|
||||
|
||||
#define HASH_SIZE 73
|
||||
|
||||
@ -389,6 +374,7 @@ struct vm_range {
|
||||
off_t objoff;
|
||||
int pgshift; /* page size. 0 means THP */
|
||||
int padding;
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
struct vm_range_numa_policy {
|
||||
@ -402,7 +388,7 @@ struct vm_regions {
|
||||
unsigned long vm_start, vm_end;
|
||||
unsigned long text_start, text_end;
|
||||
unsigned long data_start, data_end;
|
||||
unsigned long brk_start, brk_end;
|
||||
unsigned long brk_start, brk_end, brk_end_allocated;
|
||||
unsigned long map_start, map_end;
|
||||
unsigned long stack_start, stack_end;
|
||||
unsigned long user_start, user_end;
|
||||
@ -518,6 +504,7 @@ struct process {
|
||||
unsigned long saved_auxv[AUXV_LEN];
|
||||
char *saved_cmdline;
|
||||
long saved_cmdline_len;
|
||||
cpu_set_t cpu_set;
|
||||
|
||||
/* Store ptrace flags.
|
||||
* The lower 8 bits are PTRACE_O_xxx of the PTRACE_SETOPTIONS request.
|
||||
@ -551,6 +538,10 @@ struct process {
|
||||
|
||||
long maxrss;
|
||||
long maxrss_children;
|
||||
/* Memory policy flags and memory specific options */
|
||||
unsigned long mpol_flags;
|
||||
size_t mpol_threshold;
|
||||
unsigned long heap_extension;
|
||||
|
||||
// perf_event
|
||||
int perf_status;
|
||||
@ -559,13 +550,13 @@ struct process {
|
||||
#define PP_COUNT 2
|
||||
#define PP_STOP 3
|
||||
struct mc_perf_event *monitoring_event;
|
||||
#ifdef TRACK_SYSCALLS
|
||||
mcs_lock_node_t st_lock;
|
||||
uint64_t *syscall_times;
|
||||
uint32_t *syscall_cnts;
|
||||
uint64_t *offload_times;
|
||||
uint32_t *offload_cnts;
|
||||
#endif // TRACK_SYSCALLS
|
||||
#ifdef PROFILE_ENABLE
|
||||
int profile;
|
||||
mcs_lock_node_t profile_lock;
|
||||
struct profile_event *profile_events;
|
||||
unsigned long profile_elapsed_ts;
|
||||
#endif // PROFILE_ENABLE
|
||||
int nr_processes; /* For partitioned execution */
|
||||
};
|
||||
|
||||
void hold_thread(struct thread *ftn);
|
||||
@ -638,13 +629,12 @@ struct thread {
|
||||
fp_regs_struct *fp_regs;
|
||||
int in_syscall_offload;
|
||||
|
||||
#ifdef TRACK_SYSCALLS
|
||||
int track_syscalls;
|
||||
uint64_t *syscall_times;
|
||||
uint32_t *syscall_cnts;
|
||||
uint64_t *offload_times;
|
||||
uint32_t *offload_cnts;
|
||||
#endif // TRACK_SYSCALLS
|
||||
#ifdef PROFILE_ENABLE
|
||||
int profile;
|
||||
struct profile_event *profile_events;
|
||||
unsigned long profile_start_ts;
|
||||
unsigned long profile_elapsed_ts;
|
||||
#endif // PROFILE_ENABLE
|
||||
|
||||
// signal
|
||||
struct sig_common *sigcommon;
|
||||
@ -664,9 +654,14 @@ struct thread {
|
||||
struct sig_pending *ptrace_sendsig;
|
||||
|
||||
// cpu time
|
||||
/*
|
||||
struct timespec stime;
|
||||
struct timespec utime;
|
||||
struct timespec btime;
|
||||
*/
|
||||
unsigned long system_tsc;
|
||||
unsigned long user_tsc;
|
||||
unsigned long base_tsc;
|
||||
int times_update;
|
||||
int in_kernel;
|
||||
|
||||
@ -679,6 +674,11 @@ struct thread {
|
||||
|
||||
/* Syscall offload wait queue head */
|
||||
struct waitq scd_wq;
|
||||
|
||||
int thread_offloaded;
|
||||
int mod_clone;
|
||||
struct uti_attr *mod_clone_arg;
|
||||
int parent_cpuid;
|
||||
};
|
||||
|
||||
#define VM_RANGE_CACHE_SIZE 4
|
||||
@ -780,8 +780,8 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
|
||||
int argc, char **argv,
|
||||
int envc, char **env);
|
||||
unsigned long extend_process_region(struct process_vm *vm,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long address, unsigned long flag);
|
||||
unsigned long end_allocated,
|
||||
unsigned long address, unsigned long flag);
|
||||
extern enum ihk_mc_pt_attribute arch_vrflag_to_ptattr(unsigned long flag, uint64_t fault, pte_t *ptep);
|
||||
enum ihk_mc_pt_attribute common_vrflag_to_ptattr(unsigned long flag, uint64_t fault, pte_t *ptep);
|
||||
|
||||
|
||||
66
kernel/include/profile.h
Normal file
66
kernel/include/profile.h
Normal file
@ -0,0 +1,66 @@
|
||||
#ifndef __PROCESS_PROFILE_H_
|
||||
#define __PROCESS_PROFILE_H_
|
||||
|
||||
/* Uncomment this to enable profiling */
|
||||
#define PROFILE_ENABLE
|
||||
|
||||
#ifdef PROFILE_ENABLE
|
||||
#define PROFILE_SYSCALL_MAX 300
|
||||
#define PROFILE_OFFLOAD_MAX (PROFILE_SYSCALL_MAX << 1)
|
||||
#define PROFILE_EVENT_MIN PROFILE_OFFLOAD_MAX
|
||||
#define __NR_profile 701
|
||||
|
||||
#define PROF_JOB 0x40000000
|
||||
#define PROF_PROC 0x80000000
|
||||
#define PROF_CLEAR 0x01
|
||||
#define PROF_ON 0x02
|
||||
#define PROF_OFF 0x04
|
||||
#define PROF_PRINT 0x08
|
||||
|
||||
struct profile_event {
|
||||
uint32_t cnt;
|
||||
uint64_t tsc;
|
||||
};
|
||||
|
||||
/*
|
||||
* The layout of profile events is as follows:
|
||||
* [0,PROFILE_SYSCALL_MAX) - syscalls
|
||||
* [PROFILE_SYSCALL_MAX,PROFILE_OFFLOAD_MAX) - syscall offloads
|
||||
* [PROFILE_OFFLOAD_MAX,PROFILE_EVENT_MAX) - general events
|
||||
*
|
||||
* XXX: Make sure to fill in prof_event_names in profile.c
|
||||
* for each added profiled event.
|
||||
*/
|
||||
enum profile_event_type {
|
||||
PROFILE_tlb_invalidate = PROFILE_EVENT_MIN,
|
||||
PROFILE_page_fault,
|
||||
PROFILE_page_fault_anon_clr,
|
||||
PROFILE_page_fault_file,
|
||||
PROFILE_page_fault_dev_file,
|
||||
PROFILE_page_fault_file_clr,
|
||||
PROFILE_mpol_alloc_missed,
|
||||
PROFILE_mmap_anon_contig_phys,
|
||||
PROFILE_mmap_anon_no_contig_phys,
|
||||
PROFILE_mmap_regular_file,
|
||||
PROFILE_mmap_device_file,
|
||||
PROFILE_EVENT_MAX /* Should be the last event type */
|
||||
};
|
||||
|
||||
struct thread;
|
||||
struct process;
|
||||
|
||||
enum profile_event_type profile_syscall2offload(enum profile_event_type sc);
|
||||
void profile_event_add(enum profile_event_type type, uint64_t tsc);
|
||||
void profile_print_thread_stats(struct thread *thread);
|
||||
void profile_print_proc_stats(struct process *proc);
|
||||
void profile_print_job_stats(struct process *proc);
|
||||
void profile_accumulate_events(struct thread *thread, struct process *proc);
|
||||
int profile_accumulate_and_print_job_events(struct process *proc);
|
||||
int profile_alloc_events(struct thread *thread);
|
||||
void profile_dealloc_thread_events(struct thread *thread);
|
||||
void profile_dealloc_proc_events(struct process *proc);
|
||||
#endif // PROFILE_ENABLE
|
||||
|
||||
|
||||
|
||||
#endif // __PROCESS_PROFILE_H_
|
||||
109
kernel/include/rbtree.h
Normal file
109
kernel/include/rbtree.h
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
Red Black Trees
|
||||
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
linux/include/linux/rbtree.h
|
||||
|
||||
To use rbtrees you'll have to implement your own insert and search cores.
|
||||
This will avoid us to use callbacks and to drop drammatically performances.
|
||||
I know it's not the cleaner way, but in C (not in C++) to get
|
||||
performances and genericity...
|
||||
|
||||
See Documentation/rbtree.txt for documentation and samples.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RBTREE_H
|
||||
#define _LINUX_RBTREE_H
|
||||
|
||||
#include <ihk/types.h>
|
||||
#include <lwk/compiler.h>
|
||||
#include <lwk/stddef.h>
|
||||
|
||||
struct rb_node {
|
||||
unsigned long __rb_parent_color;
|
||||
struct rb_node *rb_right;
|
||||
struct rb_node *rb_left;
|
||||
} __attribute__((aligned(sizeof(long))));
|
||||
/* The alignment might seem pointless, but allegedly CRIS needs it */
|
||||
|
||||
struct rb_root {
|
||||
struct rb_node *rb_node;
|
||||
};
|
||||
|
||||
|
||||
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
|
||||
|
||||
#define RB_ROOT (struct rb_root) { NULL, }
|
||||
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
|
||||
|
||||
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
|
||||
|
||||
/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
|
||||
#define RB_EMPTY_NODE(node) \
|
||||
((node)->__rb_parent_color == (unsigned long)(node))
|
||||
#define RB_CLEAR_NODE(node) \
|
||||
((node)->__rb_parent_color = (unsigned long)(node))
|
||||
|
||||
|
||||
extern void rb_insert_color(struct rb_node *, struct rb_root *);
|
||||
extern void rb_erase(struct rb_node *, struct rb_root *);
|
||||
|
||||
|
||||
/* Find logical next and previous nodes in a tree */
|
||||
extern struct rb_node *rb_next(const struct rb_node *);
|
||||
extern struct rb_node *rb_prev(const struct rb_node *);
|
||||
extern struct rb_node *rb_first(const struct rb_root *);
|
||||
extern struct rb_node *rb_last(const struct rb_root *);
|
||||
|
||||
/* Postorder iteration - always visit the parent after its children */
|
||||
extern struct rb_node *rb_first_postorder(const struct rb_root *);
|
||||
extern struct rb_node *rb_next_postorder(const struct rb_node *);
|
||||
|
||||
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
|
||||
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root);
|
||||
|
||||
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
|
||||
struct rb_node ** rb_link)
|
||||
{
|
||||
node->__rb_parent_color = (unsigned long)parent;
|
||||
node->rb_left = node->rb_right = NULL;
|
||||
|
||||
*rb_link = node;
|
||||
}
|
||||
|
||||
#define rb_entry_safe(ptr, type, member) \
|
||||
({ typeof(ptr) ____ptr = (ptr); \
|
||||
____ptr ? rb_entry(____ptr, type, member) : NULL; \
|
||||
})
|
||||
|
||||
/**
|
||||
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
|
||||
* given type safe against removal of rb_node entry
|
||||
*
|
||||
* @pos: the 'type *' to use as a loop cursor.
|
||||
* @n: another 'type *' to use as temporary storage
|
||||
* @root: 'rb_root *' of the rbtree.
|
||||
* @field: the name of the rb_node field within 'type'.
|
||||
*/
|
||||
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
|
||||
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
|
||||
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
|
||||
typeof(*pos), field); 1; }); \
|
||||
pos = n)
|
||||
|
||||
#endif /* _LINUX_RBTREE_H */
|
||||
231
kernel/include/rbtree_augmented.h
Normal file
231
kernel/include/rbtree_augmented.h
Normal file
@ -0,0 +1,231 @@
|
||||
/*
|
||||
Red Black Trees
|
||||
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
||||
(C) 2002 David Woodhouse <dwmw2@infradead.org>
|
||||
(C) 2012 Michel Lespinasse <walken@google.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
linux/include/linux/rbtree_augmented.h
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RBTREE_AUGMENTED_H
|
||||
#define _LINUX_RBTREE_AUGMENTED_H
|
||||
|
||||
#include <rbtree.h>
|
||||
|
||||
/*
|
||||
* Please note - only struct rb_augment_callbacks and the prototypes for
|
||||
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
|
||||
* The rest are implementation details you are not expected to depend on.
|
||||
*
|
||||
* See Documentation/rbtree.txt for documentation and samples.
|
||||
*/
|
||||
|
||||
struct rb_augment_callbacks {
|
||||
void (*propagate)(struct rb_node *node, struct rb_node *stop);
|
||||
void (*copy)(struct rb_node *old, struct rb_node *new);
|
||||
void (*rotate)(struct rb_node *old, struct rb_node *new);
|
||||
};
|
||||
|
||||
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
|
||||
static inline void
|
||||
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||
const struct rb_augment_callbacks *augment)
|
||||
{
|
||||
__rb_insert_augmented(node, root, augment->rotate);
|
||||
}
|
||||
|
||||
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
|
||||
rbtype, rbaugmented, rbcompute) \
|
||||
static inline void \
|
||||
rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
|
||||
{ \
|
||||
while (rb != stop) { \
|
||||
rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
|
||||
rbtype augmented = rbcompute(node); \
|
||||
if (node->rbaugmented == augmented) \
|
||||
break; \
|
||||
node->rbaugmented = augmented; \
|
||||
rb = rb_parent(&node->rbfield); \
|
||||
} \
|
||||
} \
|
||||
static inline void \
|
||||
rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
|
||||
{ \
|
||||
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
|
||||
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
|
||||
new->rbaugmented = old->rbaugmented; \
|
||||
} \
|
||||
static void \
|
||||
rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
|
||||
{ \
|
||||
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
|
||||
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
|
||||
new->rbaugmented = old->rbaugmented; \
|
||||
old->rbaugmented = rbcompute(old); \
|
||||
} \
|
||||
rbstatic const struct rb_augment_callbacks rbname = { \
|
||||
rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
|
||||
};
|
||||
|
||||
|
||||
#define RB_RED 0
|
||||
#define RB_BLACK 1
|
||||
|
||||
#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
|
||||
|
||||
#define __rb_color(pc) ((pc) & 1)
|
||||
#define __rb_is_black(pc) __rb_color(pc)
|
||||
#define __rb_is_red(pc) (!__rb_color(pc))
|
||||
#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
|
||||
#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
|
||||
#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
|
||||
|
||||
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
|
||||
{
|
||||
rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
|
||||
}
|
||||
|
||||
static inline void rb_set_parent_color(struct rb_node *rb,
|
||||
struct rb_node *p, int color)
|
||||
{
|
||||
rb->__rb_parent_color = (unsigned long)p | color;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__rb_change_child(struct rb_node *old, struct rb_node *new,
|
||||
struct rb_node *parent, struct rb_root *root)
|
||||
{
|
||||
if (parent) {
|
||||
if (parent->rb_left == old)
|
||||
parent->rb_left = new;
|
||||
else
|
||||
parent->rb_right = new;
|
||||
} else
|
||||
root->rb_node = new;
|
||||
}
|
||||
|
||||
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
|
||||
|
||||
static __always_inline struct rb_node *
|
||||
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||
const struct rb_augment_callbacks *augment)
|
||||
{
|
||||
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
|
||||
struct rb_node *parent, *rebalance;
|
||||
unsigned long pc;
|
||||
|
||||
if (!tmp) {
|
||||
/*
|
||||
* Case 1: node to erase has no more than 1 child (easy!)
|
||||
*
|
||||
* Note that if there is one child it must be red due to 5)
|
||||
* and node must be black due to 4). We adjust colors locally
|
||||
* so as to bypass __rb_erase_color() later on.
|
||||
*/
|
||||
pc = node->__rb_parent_color;
|
||||
parent = __rb_parent(pc);
|
||||
__rb_change_child(node, child, parent, root);
|
||||
if (child) {
|
||||
child->__rb_parent_color = pc;
|
||||
rebalance = NULL;
|
||||
} else
|
||||
rebalance = __rb_is_black(pc) ? parent : NULL;
|
||||
tmp = parent;
|
||||
} else if (!child) {
|
||||
/* Still case 1, but this time the child is node->rb_left */
|
||||
tmp->__rb_parent_color = pc = node->__rb_parent_color;
|
||||
parent = __rb_parent(pc);
|
||||
__rb_change_child(node, tmp, parent, root);
|
||||
rebalance = NULL;
|
||||
tmp = parent;
|
||||
} else {
|
||||
struct rb_node *successor = child, *child2;
|
||||
tmp = child->rb_left;
|
||||
if (!tmp) {
|
||||
/*
|
||||
* Case 2: node's successor is its right child
|
||||
*
|
||||
* (n) (s)
|
||||
* / \ / \
|
||||
* (x) (s) -> (x) (c)
|
||||
* \
|
||||
* (c)
|
||||
*/
|
||||
parent = successor;
|
||||
child2 = successor->rb_right;
|
||||
augment->copy(node, successor);
|
||||
} else {
|
||||
/*
|
||||
* Case 3: node's successor is leftmost under
|
||||
* node's right child subtree
|
||||
*
|
||||
* (n) (s)
|
||||
* / \ / \
|
||||
* (x) (y) -> (x) (y)
|
||||
* / /
|
||||
* (p) (p)
|
||||
* / /
|
||||
* (s) (c)
|
||||
* \
|
||||
* (c)
|
||||
*/
|
||||
do {
|
||||
parent = successor;
|
||||
successor = tmp;
|
||||
tmp = tmp->rb_left;
|
||||
} while (tmp);
|
||||
parent->rb_left = child2 = successor->rb_right;
|
||||
successor->rb_right = child;
|
||||
rb_set_parent(child, successor);
|
||||
augment->copy(node, successor);
|
||||
augment->propagate(parent, successor);
|
||||
}
|
||||
|
||||
successor->rb_left = tmp = node->rb_left;
|
||||
rb_set_parent(tmp, successor);
|
||||
|
||||
pc = node->__rb_parent_color;
|
||||
tmp = __rb_parent(pc);
|
||||
__rb_change_child(node, successor, tmp, root);
|
||||
if (child2) {
|
||||
successor->__rb_parent_color = pc;
|
||||
rb_set_parent_color(child2, parent, RB_BLACK);
|
||||
rebalance = NULL;
|
||||
} else {
|
||||
unsigned long pc2 = successor->__rb_parent_color;
|
||||
successor->__rb_parent_color = pc;
|
||||
rebalance = __rb_is_black(pc2) ? parent : NULL;
|
||||
}
|
||||
tmp = successor;
|
||||
}
|
||||
|
||||
augment->propagate(tmp, NULL);
|
||||
return rebalance;
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||
const struct rb_augment_callbacks *augment)
|
||||
{
|
||||
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
|
||||
if (rebalance)
|
||||
__rb_erase_color(rebalance, root, augment->rotate);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_RBTREE_AUGMENTED_H */
|
||||
197
kernel/include/rusage.h
Normal file
197
kernel/include/rusage.h
Normal file
@ -0,0 +1,197 @@
|
||||
#ifndef __RUSAGE_H
|
||||
#define __RUSAGE_H
|
||||
|
||||
#include <config.h>
|
||||
#include <ihk/rusage.h>
|
||||
|
||||
#ifdef ENABLE_RUSAGE
|
||||
#define RUSAGE_MEM_LIMIT (2 * 1024 * 1024) // 2MB
|
||||
|
||||
extern void eventfd();
|
||||
|
||||
static inline void
|
||||
rusage_total_memory_add(unsigned long size)
|
||||
{
|
||||
monitor->rusage_total_memory += size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_rss_add(unsigned long size)
|
||||
{
|
||||
unsigned long newval;
|
||||
unsigned long oldval;
|
||||
unsigned long retval;
|
||||
|
||||
newval = __sync_add_and_fetch(&monitor->rusage_rss_current, size);
|
||||
oldval = monitor->rusage_rss_max;
|
||||
while (newval > oldval) {
|
||||
retval = __sync_val_compare_and_swap(&monitor->rusage_rss_max,
|
||||
oldval, newval);
|
||||
if (retval == oldval) {
|
||||
break;
|
||||
}
|
||||
oldval = retval;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_rss_sub(unsigned long size)
|
||||
{
|
||||
__sync_sub_and_fetch(&monitor->rusage_rss_current, size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_kmem_add(unsigned long size)
|
||||
{
|
||||
unsigned long newval;
|
||||
unsigned long oldval;
|
||||
unsigned long retval;
|
||||
|
||||
newval = __sync_add_and_fetch(&monitor->rusage_kmem_usage, size);
|
||||
oldval = monitor->rusage_kmem_max_usage;
|
||||
while (newval > oldval) {
|
||||
retval = __sync_val_compare_and_swap(
|
||||
&monitor->rusage_kmem_max_usage,
|
||||
oldval, newval);
|
||||
if (retval == oldval) {
|
||||
break;
|
||||
}
|
||||
oldval = retval;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_kmem_sub(unsigned long size)
|
||||
{
|
||||
__sync_sub_and_fetch(&monitor->rusage_kmem_usage, size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_numa_add(int numa_id, unsigned long size)
|
||||
{
|
||||
__sync_add_and_fetch(monitor->rusage_numa_stat + numa_id, size);
|
||||
rusage_rss_add(size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_numa_sub(int numa_id, unsigned long size)
|
||||
{
|
||||
rusage_rss_sub(size);
|
||||
__sync_sub_and_fetch(monitor->rusage_numa_stat + numa_id, size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_page_add(int numa_id, unsigned long pages, int is_user)
|
||||
{
|
||||
unsigned long size = pages * PAGE_SIZE;
|
||||
unsigned long newval;
|
||||
unsigned long oldval;
|
||||
unsigned long retval;
|
||||
|
||||
if (is_user)
|
||||
rusage_numa_add(numa_id, size);
|
||||
else
|
||||
rusage_kmem_add(size);
|
||||
|
||||
newval = __sync_add_and_fetch(&monitor->rusage_total_memory_usage, size);
|
||||
oldval = monitor->rusage_total_memory_max_usage;
|
||||
while (newval > oldval) {
|
||||
retval = __sync_val_compare_and_swap(&monitor->rusage_total_memory_max_usage,
|
||||
oldval, newval);
|
||||
if (retval == oldval) {
|
||||
if (monitor->rusage_total_memory - newval <
|
||||
RUSAGE_MEM_LIMIT) {
|
||||
eventfd();
|
||||
}
|
||||
break;
|
||||
}
|
||||
oldval = retval;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_page_sub(int numa_id, unsigned long pages, int is_user)
|
||||
{
|
||||
unsigned long size = pages * PAGE_SIZE;
|
||||
|
||||
__sync_sub_and_fetch(&monitor->rusage_total_memory_usage, size);
|
||||
|
||||
if (is_user)
|
||||
rusage_numa_sub(numa_id, size);
|
||||
else
|
||||
rusage_kmem_sub(size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_num_threads_inc()
|
||||
{
|
||||
unsigned long newval;
|
||||
unsigned long oldval;
|
||||
unsigned long retval;
|
||||
|
||||
newval = __sync_add_and_fetch(&monitor->rusage_num_threads, 1);
|
||||
oldval = monitor->rusage_max_num_threads;
|
||||
while (newval > oldval) {
|
||||
retval = __sync_val_compare_and_swap(&monitor->
|
||||
rusage_max_num_threads,
|
||||
oldval, newval);
|
||||
if (retval == oldval) {
|
||||
break;
|
||||
}
|
||||
oldval = retval;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_num_threads_dec()
|
||||
{
|
||||
__sync_sub_and_fetch(&monitor->rusage_num_threads, 1);
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
rusage_total_memory_add(unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_rss_add(unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_rss_sub(unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_numa_add(int numa_id, unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_numa_sub(int numa_id, unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_page_add(int numa_id, unsigned long size, int is_user)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_page_sub(int numa_id, unsigned long size, int is_user)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_num_threads_inc()
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
rusage_num_threads_dec()
|
||||
{
|
||||
}
|
||||
#endif // ENABLE_RUSAGE
|
||||
|
||||
#endif
|
||||
@ -73,6 +73,13 @@
|
||||
/* #define SCD_MSG_SYSFS_RESP_CLEANUP 0x43 */
|
||||
#define SCD_MSG_PROCFS_TID_CREATE 0x44
|
||||
#define SCD_MSG_PROCFS_TID_DELETE 0x45
|
||||
#define SCD_MSG_EVENTFD 0x46
|
||||
|
||||
#define SCD_MSG_PERF_CTRL 0x50
|
||||
#define SCD_MSG_PERF_ACK 0x51
|
||||
|
||||
#define SCD_MSG_CPU_RW_REG 0x52
|
||||
#define SCD_MSG_CPU_RW_REG_RESP 0x53
|
||||
|
||||
/* Cloning flags. */
|
||||
# define CSIGNAL 0x000000ff /* Signal mask to be sent at exit. */
|
||||
@ -153,6 +160,11 @@ struct program_image_section {
|
||||
typedef unsigned long __cpu_set_unit;
|
||||
#define PLD_CPU_SET_SIZE (PLD_CPU_SET_MAX_CPUS / (8 * sizeof(__cpu_set_unit)))
|
||||
|
||||
#define MPOL_NO_HEAP 0x01
|
||||
#define MPOL_NO_STACK 0x02
|
||||
#define MPOL_NO_BSS 0x04
|
||||
#define MPOL_SHM_PREMAP 0x08
|
||||
|
||||
struct program_load_desc {
|
||||
int num_sections;
|
||||
int status;
|
||||
@ -181,8 +193,13 @@ struct program_load_desc {
|
||||
unsigned long envs_len;
|
||||
struct rlimit rlimit[MCK_RLIM_MAX];
|
||||
unsigned long interp_align;
|
||||
unsigned long mpol_flags;
|
||||
unsigned long mpol_threshold;
|
||||
unsigned long heap_extension;
|
||||
int nr_processes;
|
||||
char shell_path[SHELL_PATH_MAX_LEN];
|
||||
__cpu_set_unit cpu_set[PLD_CPU_SET_SIZE];
|
||||
int profile;
|
||||
struct program_image_section sections[0];
|
||||
};
|
||||
|
||||
@ -206,6 +223,18 @@ struct syscall_request {
|
||||
unsigned long args[6];
|
||||
};
|
||||
|
||||
struct ihk_os_cpu_register {
|
||||
unsigned long addr;
|
||||
unsigned long val;
|
||||
unsigned long addr_ext;
|
||||
};
|
||||
|
||||
enum mcctrl_os_cpu_operation {
|
||||
MCCTRL_OS_CPU_READ_REGISTER,
|
||||
MCCTRL_OS_CPU_WRITE_REGISTER,
|
||||
MCCTRL_OS_CPU_MAX_OP
|
||||
};
|
||||
|
||||
struct ikc_scd_packet {
|
||||
int msg;
|
||||
int err;
|
||||
@ -231,6 +260,13 @@ struct ikc_scd_packet {
|
||||
struct {
|
||||
int ttid;
|
||||
};
|
||||
|
||||
/* SCD_MSG_CPU_RW_REG */
|
||||
struct {
|
||||
struct ihk_os_cpu_register desc;
|
||||
enum mcctrl_os_cpu_operation op;
|
||||
void *resp;
|
||||
};
|
||||
};
|
||||
char padding[12];
|
||||
};
|
||||
@ -374,6 +410,34 @@ struct tod_data_s {
|
||||
};
|
||||
extern struct tod_data_s tod_data; /* residing in arch-dependent file */
|
||||
|
||||
static inline void tsc_to_ts(unsigned long tsc, struct timespec *ts)
|
||||
{
|
||||
time_t sec_delta;
|
||||
long ns_delta;
|
||||
|
||||
sec_delta = tsc / tod_data.clocks_per_sec;
|
||||
ns_delta = NS_PER_SEC * (tsc % tod_data.clocks_per_sec)
|
||||
/ tod_data.clocks_per_sec;
|
||||
/* calc. of ns_delta overflows if clocks_per_sec exceeds 18.44 GHz */
|
||||
|
||||
ts->tv_sec = sec_delta;
|
||||
ts->tv_nsec = ns_delta;
|
||||
if (ts->tv_nsec >= NS_PER_SEC) {
|
||||
ts->tv_nsec -= NS_PER_SEC;
|
||||
++ts->tv_sec;
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long timeval_to_jiffy(const struct timeval *ats)
|
||||
{
|
||||
return ats->tv_sec * 100 + ats->tv_usec / 10000;
|
||||
}
|
||||
|
||||
static inline unsigned long timespec_to_jiffy(const struct timespec *ats)
|
||||
{
|
||||
return ats->tv_sec * 100 + ats->tv_nsec / 10000000;
|
||||
}
|
||||
|
||||
void reset_cputime();
|
||||
void set_cputime(int mode);
|
||||
int do_munmap(void *addr, size_t len);
|
||||
@ -385,6 +449,8 @@ int do_shmget(key_t key, size_t size, int shmflg);
|
||||
struct process_vm;
|
||||
int arch_map_vdso(struct process_vm *vm); /* arch dependent */
|
||||
int arch_setup_vdso(void);
|
||||
int arch_cpu_read_write_register(struct ihk_os_cpu_register *desc,
|
||||
enum mcctrl_os_cpu_operation op);
|
||||
|
||||
#define VDSO_MAXPAGES 2
|
||||
struct vdso {
|
||||
@ -421,4 +487,64 @@ struct get_cpu_mapping_req {
|
||||
#endif
|
||||
};
|
||||
|
||||
enum perf_ctrl_type {
|
||||
PERF_CTRL_SET,
|
||||
PERF_CTRL_GET,
|
||||
PERF_CTRL_ENABLE,
|
||||
PERF_CTRL_DISABLE,
|
||||
};
|
||||
|
||||
struct perf_ctrl_desc {
|
||||
enum perf_ctrl_type ctrl_type;
|
||||
int status;
|
||||
union {
|
||||
/* for SET, GET */
|
||||
struct {
|
||||
unsigned int target_cntr;
|
||||
unsigned long config;
|
||||
unsigned long read_value;
|
||||
unsigned disabled :1,
|
||||
pinned :1,
|
||||
exclude_user :1,
|
||||
exclude_kernel :1,
|
||||
exclude_hv :1,
|
||||
exclude_idle :1;
|
||||
};
|
||||
|
||||
/* for START, STOP*/
|
||||
struct {
|
||||
unsigned long target_cntr_mask;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#define UTI_FLAG_NUMA_SET (1ULL<<1) /* Indicates NUMA_SET is specified */
|
||||
|
||||
#define UTI_FLAG_SAME_NUMA_DOMAIN (1ULL<<2)
|
||||
#define UTI_FLAG_DIFFERENT_NUMA_DOMAIN (1ULL<<3)
|
||||
|
||||
#define UTI_FLAG_SAME_L1 (1ULL<<4)
|
||||
#define UTI_FLAG_SAME_L2 (1ULL<<5)
|
||||
#define UTI_FLAG_SAME_L3 (1ULL<<6)
|
||||
|
||||
#define UTI_FLAG_DIFFERENT_L1 (1ULL<<7)
|
||||
#define UTI_FLAG_DIFFERENT_L2 (1ULL<<8)
|
||||
#define UTI_FLAG_DIFFERENT_L3 (1ULL<<9)
|
||||
|
||||
#define UTI_FLAG_EXCLUSIVE_CPU (1ULL<<10)
|
||||
#define UTI_FLAG_CPU_INTENSIVE (1ULL<<11)
|
||||
#define UTI_FLAG_HIGH_PRIORITY (1ULL<<12)
|
||||
#define UTI_FLAG_NON_COOPERATIVE (1ULL<<13)
|
||||
|
||||
/* Linux default value is used */
|
||||
#define UTI_MAX_NUMA_DOMAINS (1024)
|
||||
|
||||
typedef struct uti_attr {
|
||||
/* UTI_CPU_SET environmental variable is used to denote the preferred
|
||||
location of utility thread */
|
||||
uint64_t numa_set[(UTI_MAX_NUMA_DOMAINS + sizeof(uint64_t) * 8 - 1) /
|
||||
(sizeof(uint64_t) * 8)];
|
||||
uint64_t flags; /* Representing location and behavior hints by bitmap */
|
||||
} uti_attr_t;
|
||||
|
||||
#endif
|
||||
|
||||
@ -11,11 +11,16 @@
|
||||
#ifndef _XPMEM_H
|
||||
#define _XPMEM_H
|
||||
|
||||
#include <process.h>
|
||||
#include <ihk/context.h>
|
||||
|
||||
#define XPMEM_DEV_PATH "/dev/xpmem"
|
||||
|
||||
extern int xpmem_open(ihk_mc_user_context_t *ctx);
|
||||
extern int xpmem_remove_process_memory_range(struct process_vm *vm,
|
||||
struct vm_range *vmr);
|
||||
extern int xpmem_fault_process_memory_range(struct process_vm *vm,
|
||||
struct vm_range *vmr, unsigned long vaddr, uint64_t reason);
|
||||
|
||||
#endif /* _XPMEM_H */
|
||||
|
||||
|
||||
@ -160,7 +160,7 @@ static inline int xpmem_ap_hashtable_index(xpmem_apid_t apid)
|
||||
|
||||
index = ((xpmem_id_t *)&apid)->xpmem_id.uniq % XPMEM_AP_HASHTABLE_SIZE;
|
||||
|
||||
XPMEM_DEBUG("return: apid=%lu, index=%d", apid, index);
|
||||
XPMEM_DEBUG("return: apid=0x%lx, index=%d", apid, index);
|
||||
|
||||
return index;
|
||||
}
|
||||
@ -174,22 +174,20 @@ struct xpmem_thread_group {
|
||||
uid_t uid; /* tg's uid */
|
||||
gid_t gid; /* tg's gid */
|
||||
volatile int flags; /* tg attributes and state */
|
||||
ihk_atomic_t uniq_segid;
|
||||
ihk_atomic_t uniq_apid;
|
||||
mcs_rwlock_lock_t seg_list_lock;
|
||||
ihk_atomic_t uniq_segid; /* segid uniq */
|
||||
ihk_atomic_t uniq_apid; /* apid uniq */
|
||||
mcs_rwlock_lock_t seg_list_lock; /* tg's list of segs lock */
|
||||
struct list_head seg_list; /* tg's list of segs */
|
||||
ihk_atomic_t refcnt; /* references to tg */
|
||||
ihk_atomic_t n_pinned; /* #of pages pinned by this tg */
|
||||
struct list_head tg_hashlist; /* tg hash list */
|
||||
struct thread *group_leader; /* thread group leader */
|
||||
struct process_vm *vm; /* tg's mm */
|
||||
ihk_atomic_t n_recall_PFNs; /* #of recall of PFNs in progress */
|
||||
struct process_vm *vm; /* tg's process_vm */
|
||||
struct xpmem_hashlist ap_hashtable[]; /* locks + ap hash lists */
|
||||
};
|
||||
|
||||
struct xpmem_segment {
|
||||
ihk_spinlock_t lock; /* seg lock */
|
||||
mcs_rwlock_lock_t seg_lock; /* seg sema */
|
||||
xpmem_segid_t segid; /* unique segid */
|
||||
unsigned long vaddr; /* starting address */
|
||||
size_t size; /* size of seg */
|
||||
@ -216,18 +214,16 @@ struct xpmem_access_permit {
|
||||
};
|
||||
|
||||
struct xpmem_attachment {
|
||||
mcs_rwlock_lock_t at_lock; /* att lock for serialization */
|
||||
struct mcs_rwlock_node_irqsave at_irqsave; /* att lock for serialization */
|
||||
mcs_rwlock_lock_t at_lock; /* att lock */
|
||||
unsigned long vaddr; /* starting address of seg attached */
|
||||
unsigned long at_vaddr; /* address where seg is attached */
|
||||
size_t at_size; /* size of seg attachment */
|
||||
struct vm_range *at_vma; /* vma where seg is attachment */
|
||||
struct vm_range *at_vmr; /* vm_range where seg is attachment */
|
||||
volatile int flags; /* att attributes and state */
|
||||
ihk_atomic_t refcnt; /* references to att */
|
||||
struct xpmem_access_permit *ap; /* associated access permit */
|
||||
struct list_head att_list; /* atts linked to access permit */
|
||||
struct process_vm *vm; /* mm struct attached to */
|
||||
mcs_rwlock_lock_t invalidate_lock; /* to serialize page table invalidates */
|
||||
struct process_vm *vm; /* process_vm attached to */
|
||||
};
|
||||
|
||||
struct xpmem_partition {
|
||||
@ -249,8 +245,10 @@ struct xpmem_perm {
|
||||
#define XPMEM_PERM_IRUSR 00400
|
||||
#define XPMEM_PERM_IWUSR 00200
|
||||
|
||||
extern struct xpmem_partition *xpmem_my_part;
|
||||
|
||||
static int xpmem_ioctl(struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
|
||||
static int xpmem_close( struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
|
||||
static int xpmem_close(struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
|
||||
|
||||
static int xpmem_init(void);
|
||||
static void xpmem_exit(void);
|
||||
@ -263,10 +261,47 @@ static xpmem_segid_t xpmem_make_segid(struct xpmem_thread_group *);
|
||||
static int xpmem_remove(xpmem_segid_t);
|
||||
static void xpmem_remove_seg(struct xpmem_thread_group *,
|
||||
struct xpmem_segment *);
|
||||
static void xpmem_remove_segs_of_tg(struct xpmem_thread_group *seg_tg);
|
||||
|
||||
static int xpmem_get(xpmem_segid_t, int, int, void *, xpmem_apid_t *);
|
||||
static int xpmem_check_permit_mode(int, struct xpmem_segment *);
|
||||
static int xpmem_perms(struct xpmem_perm *, short);
|
||||
static xpmem_apid_t xpmem_make_apid(struct xpmem_thread_group *);
|
||||
|
||||
static int xpmem_release(xpmem_apid_t);
|
||||
static void xpmem_release_ap(struct xpmem_thread_group *,
|
||||
struct xpmem_access_permit *);
|
||||
static void xpmem_release_aps_of_tg(struct xpmem_thread_group *ap_tg);
|
||||
|
||||
static int xpmem_attach(struct mckfd *, xpmem_apid_t, off_t, size_t,
|
||||
unsigned long, int, int, unsigned long *);
|
||||
|
||||
static int xpmem_detach(unsigned long);
|
||||
static int xpmem_vm_munmap(struct process_vm *vm, void *addr, size_t len);
|
||||
static int xpmem_remove_process_range(struct process_vm *vm,
|
||||
unsigned long start, unsigned long end, int *ro_freedp);
|
||||
static int xpmem_free_process_memory_range(struct process_vm *vm,
|
||||
struct vm_range *range);
|
||||
static void xpmem_detach_att(struct xpmem_access_permit *,
|
||||
struct xpmem_attachment *);
|
||||
static void xpmem_clear_PTEs(struct xpmem_segment *);
|
||||
static void xpmem_clear_PTEs_range(struct xpmem_segment *, unsigned long,
|
||||
unsigned long);
|
||||
static void xpmem_clear_PTEs_of_ap(struct xpmem_access_permit *, unsigned long,
|
||||
unsigned long);
|
||||
static void xpmem_clear_PTEs_of_att(struct xpmem_attachment *, unsigned long,
|
||||
unsigned long);
|
||||
|
||||
extern struct xpmem_partition *xpmem_my_part;
|
||||
static int xpmem_remap_pte(struct process_vm *, struct vm_range *,
|
||||
unsigned long, uint64_t, struct xpmem_segment *, unsigned long);
|
||||
|
||||
static int xpmem_ensure_valid_page(struct xpmem_segment *, unsigned long);
|
||||
static pte_t * xpmem_vaddr_to_pte(struct process_vm *, unsigned long,
|
||||
size_t *pgsize);
|
||||
static int xpmem_pin_page(struct xpmem_thread_group *, struct thread *,
|
||||
struct process_vm *, unsigned long);
|
||||
static void xpmem_unpin_pages(struct xpmem_segment *, struct process_vm *,
|
||||
unsigned long, size_t);
|
||||
|
||||
static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal(
|
||||
pid_t, int, int);
|
||||
@ -317,10 +352,17 @@ static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid_nolock(
|
||||
#define xpmem_tg_ref_by_tgid_all_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 1)
|
||||
|
||||
static struct xpmem_thread_group * xpmem_tg_ref_by_segid(xpmem_segid_t);
|
||||
static struct xpmem_thread_group * xpmem_tg_ref_by_apid(xpmem_apid_t);
|
||||
static void xpmem_tg_deref(struct xpmem_thread_group *);
|
||||
static struct xpmem_segment *xpmem_seg_ref_by_segid(struct xpmem_thread_group *,
|
||||
xpmem_segid_t);
|
||||
static void xpmem_seg_deref(struct xpmem_segment *);
|
||||
static struct xpmem_access_permit * xpmem_ap_ref_by_apid(
|
||||
struct xpmem_thread_group *, xpmem_apid_t);
|
||||
static void xpmem_ap_deref(struct xpmem_access_permit *);
|
||||
static void xpmem_att_deref(struct xpmem_attachment *);
|
||||
static int xpmem_validate_access(struct xpmem_access_permit *, off_t, size_t,
|
||||
int, unsigned long *);
|
||||
|
||||
/*
|
||||
* Inlines that mark an internal driver structure as being destroyable or not.
|
||||
@ -363,6 +405,42 @@ static inline void xpmem_seg_destroyable(
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
static inline void xpmem_ap_not_destroyable(
|
||||
struct xpmem_access_permit *ap)
|
||||
{
|
||||
ihk_atomic_set(&ap->refcnt, 1);
|
||||
|
||||
XPMEM_DEBUG("return: ap->refcnt=%d", ap->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_ap_destroyable(
|
||||
struct xpmem_access_permit *ap)
|
||||
{
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
xpmem_ap_deref(ap);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
static inline void xpmem_att_not_destroyable(
|
||||
struct xpmem_attachment *att)
|
||||
{
|
||||
ihk_atomic_set(&att->refcnt, 1);
|
||||
|
||||
XPMEM_DEBUG("return: att->refcnt=%d", att->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_att_destroyable(
|
||||
struct xpmem_attachment *att)
|
||||
{
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
xpmem_att_deref(att);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
/*
|
||||
* Inlines that increment the refcnt for the specified structure.
|
||||
*/
|
||||
@ -384,5 +462,29 @@ static inline void xpmem_seg_ref(
|
||||
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_ap_ref(
|
||||
struct xpmem_access_permit *ap)
|
||||
{
|
||||
DBUG_ON(ihk_atomic_read(&ap->refcnt) <= 0);
|
||||
ihk_atomic_inc(&ap->refcnt);
|
||||
|
||||
XPMEM_DEBUG("return: ap->refcnt=%d", ap->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_att_ref(
|
||||
struct xpmem_attachment *att)
|
||||
{
|
||||
DBUG_ON(ihk_atomic_read(&att->refcnt) <= 0);
|
||||
ihk_atomic_inc(&att->refcnt);
|
||||
|
||||
XPMEM_DEBUG("return: att->refcnt=%d", att->refcnt);
|
||||
}
|
||||
|
||||
static inline int xpmem_is_private_data(
|
||||
struct vm_range *vmr)
|
||||
{
|
||||
return (vmr->private_data != NULL);
|
||||
}
|
||||
|
||||
#endif /* _XPMEM_PRIVATE_H */
|
||||
|
||||
|
||||
@ -31,6 +31,7 @@
|
||||
#include <cls.h>
|
||||
#include <syscall.h>
|
||||
#include <sysfs.h>
|
||||
#include <rusage.h>
|
||||
|
||||
//#define IOCTL_FUNC_EXTENSION
|
||||
#ifdef IOCTL_FUNC_EXTENSION
|
||||
@ -40,17 +41,21 @@
|
||||
//#define DEBUG_PRINT_INIT
|
||||
|
||||
#ifdef DEBUG_PRINT_INIT
|
||||
#define dkprintf kprintf
|
||||
#define dkprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#else
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define dkprintf(...) do { } while (0)
|
||||
#define ekprintf(...) do { kprintf(__VA_ARGS__); } while (0)
|
||||
#endif
|
||||
|
||||
int osnum = 0;
|
||||
|
||||
extern struct ihk_kmsg_buf kmsg_buf;
|
||||
|
||||
extern unsigned long ihk_mc_get_ns_per_tsc(void);
|
||||
extern long syscall(int, ihk_mc_user_context_t *);
|
||||
|
||||
struct ihk_os_monitor *monitor;
|
||||
|
||||
static void handler_init(void)
|
||||
{
|
||||
ihk_mc_set_syscall_handler(syscall);
|
||||
@ -239,6 +244,34 @@ static void time_init(void)
|
||||
return;
|
||||
}
|
||||
|
||||
static void monitor_init()
|
||||
{
|
||||
int z;
|
||||
unsigned long phys;
|
||||
|
||||
z = sizeof(struct ihk_os_monitor) +
|
||||
sizeof(struct ihk_os_cpu_monitor) * num_processors;
|
||||
z = (z + PAGE_SIZE -1) >> PAGE_SHIFT;
|
||||
monitor = ihk_mc_alloc_pages(z, IHK_MC_AP_CRITICAL);
|
||||
memset(monitor, 0, z * PAGE_SIZE);
|
||||
monitor->num_processors = num_processors;
|
||||
monitor->num_numa_nodes = ihk_mc_get_nr_numa_nodes();
|
||||
monitor->ns_per_tsc = ihk_mc_get_ns_per_tsc();
|
||||
phys = virt_to_phys(monitor);
|
||||
ihk_set_monitor(phys, sizeof(struct ihk_os_monitor) +
|
||||
sizeof(struct ihk_os_cpu_monitor) * num_processors);
|
||||
}
|
||||
|
||||
int nmi_mode;
|
||||
|
||||
static void nmi_init()
|
||||
{
|
||||
unsigned long phys;
|
||||
|
||||
phys = virt_to_phys(&nmi_mode);
|
||||
ihk_set_nmi_mode_addr(phys);
|
||||
}
|
||||
|
||||
static void rest_init(void)
|
||||
{
|
||||
handler_init();
|
||||
@ -250,7 +283,9 @@ static void rest_init(void)
|
||||
//pc_test();
|
||||
|
||||
ap_init();
|
||||
monitor_init();
|
||||
cpu_local_var_init();
|
||||
nmi_init();
|
||||
time_init();
|
||||
kmalloc_init();
|
||||
|
||||
@ -320,6 +355,7 @@ static void setup_remote_snooping_samples(void)
|
||||
static void populate_sysfs(void)
|
||||
{
|
||||
cpu_sysfs_setup();
|
||||
numa_sysfs_setup();
|
||||
//setup_remote_snooping_samples();
|
||||
} /* populate_sysfs() */
|
||||
|
||||
@ -336,8 +372,12 @@ static void post_init(void)
|
||||
}
|
||||
|
||||
if (find_command_line("hidos")) {
|
||||
init_host_syscall_channel();
|
||||
init_host_syscall_channel2();
|
||||
int ikc_cpu = ihk_mc_get_ikc_cpu(ihk_mc_get_processor_id());
|
||||
if(ikc_cpu < 0) {
|
||||
ekprintf("%s,ihk_mc_get_ikc_cpu failed\n", __FUNCTION__);
|
||||
}
|
||||
init_host_ikc2mckernel();
|
||||
init_host_ikc2linux(ikc_cpu);
|
||||
}
|
||||
|
||||
arch_setup_vdso();
|
||||
@ -369,7 +409,6 @@ int main(void)
|
||||
kmsg_init(mode);
|
||||
|
||||
kputs("IHK/McKernel started.\n");
|
||||
|
||||
ihk_set_kmsg(virt_to_phys(&kmsg_buf), IHK_KMSG_SIZE);
|
||||
arch_init();
|
||||
|
||||
|
||||
378
kernel/mem.c
378
kernel/mem.c
@ -37,6 +37,11 @@
|
||||
#include <cpulocal.h>
|
||||
#include <init.h>
|
||||
#include <cas.h>
|
||||
#include <rusage.h>
|
||||
#include <syscall.h>
|
||||
#include <profile.h>
|
||||
#include <limits.h>
|
||||
#include <sysfs.h>
|
||||
|
||||
//#define DEBUG_PRINT_MEM
|
||||
|
||||
@ -69,9 +74,9 @@ static void *___kmalloc(int size, ihk_mc_ap_flag flag);
|
||||
static void ___kfree(void *ptr);
|
||||
|
||||
static void *___ihk_mc_alloc_aligned_pages_node(int npages,
|
||||
int p2align, ihk_mc_ap_flag flag, int node);
|
||||
static void *___ihk_mc_alloc_pages(int npages, ihk_mc_ap_flag flag);
|
||||
static void ___ihk_mc_free_pages(void *p, int npages);
|
||||
int p2align, ihk_mc_ap_flag flag, int node, int is_user);
|
||||
static void *___ihk_mc_alloc_pages(int npages, ihk_mc_ap_flag flag, int is_user);
|
||||
static void ___ihk_mc_free_pages(void *p, int npages, int is_user);
|
||||
|
||||
/*
|
||||
* Page allocator tracking routines
|
||||
@ -152,14 +157,15 @@ struct pagealloc_track_entry *__pagealloc_track_find_entry(
|
||||
|
||||
/* Top level routines called from macros */
|
||||
void *_ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int node, char *file, int line)
|
||||
ihk_mc_ap_flag flag, int node, int is_user,
|
||||
char *file, int line)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
struct pagealloc_track_entry *entry;
|
||||
struct pagealloc_track_addr_entry *addr_entry;
|
||||
int hash, addr_hash;
|
||||
void *r = ___ihk_mc_alloc_aligned_pages_node(npages,
|
||||
p2align, flag, node);
|
||||
p2align, flag, node, is_user);
|
||||
|
||||
if (!memdebug || !pagealloc_track_initialized)
|
||||
return r;
|
||||
@ -231,7 +237,8 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
void _ihk_mc_free_pages(void *ptr, int npages, char *file, int line)
|
||||
void _ihk_mc_free_pages(void *ptr, int npages, int is_user,
|
||||
char *file, int line)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
struct pagealloc_track_entry *entry;
|
||||
@ -255,6 +262,15 @@ void _ihk_mc_free_pages(void *ptr, int npages, char *file, int line)
|
||||
}
|
||||
|
||||
if (addr_entry) {
|
||||
if (npages > addr_entry->npages) {
|
||||
kprintf("%s: ERROR: trying to deallocate %d pages"
|
||||
" for a %d pages allocation at %s:%d\n",
|
||||
__FUNCTION__,
|
||||
npages, addr_entry->npages,
|
||||
file, line);
|
||||
panic("invalid deallocation");
|
||||
}
|
||||
|
||||
if (addr_entry->npages > npages) {
|
||||
addr_entry->addr += (npages * PAGE_SIZE);
|
||||
addr_entry->npages -= npages;
|
||||
@ -297,9 +313,9 @@ void _ihk_mc_free_pages(void *ptr, int npages, char *file, int line)
|
||||
|
||||
/* Still not? Invalid deallocation */
|
||||
if (!addr_entry) {
|
||||
kprintf("%s: ERROR: invalid deallocation @ %s:%d\n",
|
||||
__FUNCTION__, file, line);
|
||||
panic("invalid deallocation");
|
||||
kprintf("%s: ERROR: invalid deallocation for addr: 0x%lx @ %s:%d\n",
|
||||
__FUNCTION__, ptr, file, line);
|
||||
panic("panic: invalid deallocation");
|
||||
}
|
||||
|
||||
dkprintf("%s: found covering addr_entry: 0x%lx:%d\n", __FUNCTION__,
|
||||
@ -393,7 +409,7 @@ void _ihk_mc_free_pages(void *ptr, int npages, char *file, int line)
|
||||
___kfree(entry);
|
||||
|
||||
out:
|
||||
___ihk_mc_free_pages(ptr, npages);
|
||||
___ihk_mc_free_pages(ptr, npages, is_user);
|
||||
}
|
||||
|
||||
void pagealloc_memcheck(void)
|
||||
@ -445,23 +461,24 @@ void pagealloc_memcheck(void)
|
||||
|
||||
/* Actual allocation routines */
|
||||
static void *___ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int node)
|
||||
ihk_mc_ap_flag flag, int node, int is_user)
|
||||
{
|
||||
if (pa_ops)
|
||||
return pa_ops->alloc_page(npages, p2align, flag, node);
|
||||
return pa_ops->alloc_page(npages, p2align, flag, node, is_user);
|
||||
else
|
||||
return early_alloc_pages(npages);
|
||||
}
|
||||
|
||||
static void *___ihk_mc_alloc_pages(int npages, ihk_mc_ap_flag flag)
|
||||
static void *___ihk_mc_alloc_pages(int npages, ihk_mc_ap_flag flag,
|
||||
int is_user)
|
||||
{
|
||||
return ___ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1);
|
||||
return ___ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1, is_user);
|
||||
}
|
||||
|
||||
static void ___ihk_mc_free_pages(void *p, int npages)
|
||||
static void ___ihk_mc_free_pages(void *p, int npages, int is_user)
|
||||
{
|
||||
if (pa_ops)
|
||||
pa_ops->free_page(p, npages);
|
||||
pa_ops->free_page(p, npages, is_user);
|
||||
}
|
||||
|
||||
void ihk_mc_set_page_allocator(struct ihk_mc_pa_ops *ops)
|
||||
@ -491,11 +508,14 @@ static void reserve_pages(struct ihk_page_allocator_desc *pa_allocator,
|
||||
|
||||
extern int cpu_local_var_initialized;
|
||||
static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int pref_node)
|
||||
ihk_mc_ap_flag flag, int pref_node, int is_user)
|
||||
{
|
||||
unsigned long pa = 0;
|
||||
int i, node;
|
||||
#ifndef IHK_RBTREE_ALLOCATOR
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
#endif
|
||||
int numa_id;
|
||||
|
||||
/* Not yet initialized or idle process */
|
||||
if (!cpu_local_var_initialized ||
|
||||
@ -503,8 +523,9 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
!cpu_local_var(current)->vm)
|
||||
goto distance_based;
|
||||
|
||||
/* User requested policy? */
|
||||
if (!(flag & IHK_MC_AP_USER)) {
|
||||
/* No explicitly requested NUMA or user policy? */
|
||||
if ((pref_node == -1) && (!(flag & IHK_MC_AP_USER) ||
|
||||
cpu_local_var(current)->vm->numa_mem_policy == MPOL_DEFAULT)) {
|
||||
goto distance_based;
|
||||
}
|
||||
|
||||
@ -512,6 +533,39 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
if (!memory_nodes[node].nodes_by_distance)
|
||||
goto order_based;
|
||||
|
||||
/* Explicit valid node? */
|
||||
if (pref_node > -1 && pref_node < ihk_mc_get_nr_numa_nodes()) {
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
{
|
||||
pa = ihk_numa_alloc_pages(&memory_nodes[pref_node], npages, p2align);
|
||||
#else
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[pref_node].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
#endif
|
||||
|
||||
if (pa) {
|
||||
dkprintf("%s: explicit (node: %d) CPU @ node %d allocated "
|
||||
"%d pages from node %d\n",
|
||||
__FUNCTION__,
|
||||
pref_node,
|
||||
ihk_mc_get_numa_id(),
|
||||
npages, node);
|
||||
|
||||
rusage_page_add(pref_node, npages, is_user);
|
||||
|
||||
return phys_to_virt(pa);
|
||||
}
|
||||
else {
|
||||
#ifdef PROFILE_ENABLE
|
||||
//profile_event_add(PROFILE_numa_alloc_missed, npages * 4096);
|
||||
#endif
|
||||
dkprintf("%s: couldn't fulfill explicit NUMA request for %d pages\n",
|
||||
__FUNCTION__, npages);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (cpu_local_var(current)->vm->numa_mem_policy) {
|
||||
case MPOL_BIND:
|
||||
case MPOL_PREFERRED:
|
||||
@ -526,10 +580,16 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
continue;
|
||||
}
|
||||
|
||||
numa_id = memory_nodes[node].nodes_by_distance[i].id;
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
{
|
||||
pa = ihk_numa_alloc_pages(&memory_nodes[memory_nodes[node].
|
||||
nodes_by_distance[i].id], npages, p2align);
|
||||
#else
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[memory_nodes[node].
|
||||
nodes_by_distance[i].id].allocators, list) {
|
||||
&memory_nodes[numa_id].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
#endif
|
||||
|
||||
if (pa) {
|
||||
dkprintf("%s: policy: CPU @ node %d allocated "
|
||||
@ -537,6 +597,10 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
__FUNCTION__,
|
||||
ihk_mc_get_numa_id(),
|
||||
npages, node);
|
||||
|
||||
rusage_page_add(numa_id, npages,
|
||||
is_user);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -557,6 +621,9 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
return phys_to_virt(pa);
|
||||
}
|
||||
else {
|
||||
#ifdef PROFILE_ENABLE
|
||||
profile_event_add(PROFILE_mpol_alloc_missed, npages * 4096);
|
||||
#endif
|
||||
dkprintf("%s: couldn't fulfill user policy for %d pages\n",
|
||||
__FUNCTION__, npages);
|
||||
}
|
||||
@ -569,11 +636,17 @@ distance_based:
|
||||
goto order_based;
|
||||
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
numa_id = memory_nodes[node].nodes_by_distance[i].id;
|
||||
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
{
|
||||
pa = ihk_numa_alloc_pages(&memory_nodes[memory_nodes[node].
|
||||
nodes_by_distance[i].id], npages, p2align);
|
||||
#else
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[memory_nodes[node].
|
||||
nodes_by_distance[i].id].allocators, list) {
|
||||
&memory_nodes[numa_id].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
#endif
|
||||
|
||||
if (pa) {
|
||||
dkprintf("%s: distance: CPU @ node %d allocated "
|
||||
@ -582,6 +655,7 @@ distance_based:
|
||||
ihk_mc_get_numa_id(),
|
||||
npages,
|
||||
memory_nodes[node].nodes_by_distance[i].id);
|
||||
rusage_page_add(numa_id, npages, is_user);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -597,13 +671,20 @@ order_based:
|
||||
|
||||
/* Fall back to regular order */
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
|
||||
numa_id = (node + i) % ihk_mc_get_nr_numa_nodes();
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
{
|
||||
pa = ihk_numa_alloc_pages(&memory_nodes[(node + i) %
|
||||
ihk_mc_get_nr_numa_nodes()], npages, p2align);
|
||||
#else
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[(node + i) %
|
||||
ihk_mc_get_nr_numa_nodes()].allocators, list) {
|
||||
&memory_nodes[numa_id].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
|
||||
if (pa) break;
|
||||
#endif
|
||||
if (pa) {
|
||||
rusage_page_add(numa_id, npages, is_user);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pa) break;
|
||||
@ -618,15 +699,32 @@ order_based:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __mckernel_free_pages_in_allocator(void *va, int npages)
|
||||
static void __mckernel_free_pages_in_allocator(void *va, int npages,
|
||||
int is_user)
|
||||
{
|
||||
int i;
|
||||
unsigned long pa_start = virt_to_phys(va);
|
||||
unsigned long pa_end = pa_start + (npages * PAGE_SIZE);
|
||||
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
for (i = 0; i < ihk_mc_get_nr_memory_chunks(); ++i) {
|
||||
unsigned long start, end;
|
||||
int numa_id;
|
||||
|
||||
ihk_mc_get_memory_chunk(i, &start, &end, &numa_id);
|
||||
if (start > pa_start || end < pa_end) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ihk_numa_free_pages(&memory_nodes[numa_id], pa_start, npages);
|
||||
rusage_page_sub(numa_id, npages, is_user);
|
||||
break;
|
||||
}
|
||||
#else
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
|
||||
/* Find corresponding memory allocator */
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[i].allocators, list) {
|
||||
@ -634,14 +732,16 @@ static void __mckernel_free_pages_in_allocator(void *va, int npages)
|
||||
if (pa_start >= pa_allocator->start &&
|
||||
pa_end <= pa_allocator->end) {
|
||||
ihk_pagealloc_free(pa_allocator, pa_start, npages);
|
||||
rusage_page_sub(i, npages, is_user);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static void mckernel_free_pages(void *va, int npages)
|
||||
static void mckernel_free_pages(void *va, int npages, int is_user)
|
||||
{
|
||||
struct list_head *pendings = &cpu_local_var(pending_free_pages);
|
||||
struct page *page;
|
||||
@ -660,7 +760,7 @@ static void mckernel_free_pages(void *va, int npages)
|
||||
}
|
||||
}
|
||||
|
||||
__mckernel_free_pages_in_allocator(va, npages);
|
||||
__mckernel_free_pages_in_allocator(va, npages, is_user);
|
||||
}
|
||||
|
||||
void begin_free_pages_pending(void) {
|
||||
@ -690,7 +790,7 @@ void finish_free_pages_pending(void)
|
||||
page->mode = PM_NONE;
|
||||
list_del(&page->list);
|
||||
__mckernel_free_pages_in_allocator(phys_to_virt(page_to_phys(page)),
|
||||
page->offset);
|
||||
page->offset, IHK_MC_PG_USER);
|
||||
}
|
||||
|
||||
pendings->next = pendings->prev = NULL;
|
||||
@ -711,6 +811,9 @@ static void query_free_mem_interrupt_handler(void *priv)
|
||||
|
||||
/* Iterate memory allocators */
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
pages += memory_nodes[i].nr_free_pages;
|
||||
#else
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
|
||||
list_for_each_entry(pa_allocator,
|
||||
@ -720,6 +823,7 @@ static void query_free_mem_interrupt_handler(void *priv)
|
||||
pa_allocator->start, pa_allocator->end, __pages);
|
||||
pages += __pages;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
kprintf("McKernel free pages in total: %d\n", pages);
|
||||
@ -781,119 +885,149 @@ void coredump(struct thread *thread, void *regs)
|
||||
freecore(&coretable);
|
||||
}
|
||||
|
||||
void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
unsigned long addr, int cpu_id)
|
||||
{
|
||||
unsigned long __addr = addr;
|
||||
return remote_flush_tlb_array_cpumask(vm, &__addr, 1, cpu_id);
|
||||
}
|
||||
|
||||
void remote_flush_tlb_array_cpumask(struct process_vm *vm,
|
||||
unsigned long *addr,
|
||||
int nr_addr,
|
||||
int cpu_id)
|
||||
{
|
||||
unsigned long cpu;
|
||||
int flush_ind;
|
||||
struct tlb_flush_entry *flush_entry;
|
||||
cpu_set_t _cpu_set;
|
||||
|
||||
if (addr) {
|
||||
flush_ind = (addr >> PAGE_SHIFT) % IHK_TLB_FLUSH_IRQ_VECTOR_SIZE;
|
||||
if (addr[0]) {
|
||||
flush_ind = (addr[0] >> PAGE_SHIFT) % IHK_TLB_FLUSH_IRQ_VECTOR_SIZE;
|
||||
}
|
||||
/* Zero address denotes full TLB flush */
|
||||
else {
|
||||
else {
|
||||
/* Random.. */
|
||||
flush_ind = (rdtsc()) % IHK_TLB_FLUSH_IRQ_VECTOR_SIZE;
|
||||
}
|
||||
|
||||
flush_entry = &tlb_flush_vector[flush_ind];
|
||||
|
||||
flush_entry = &tlb_flush_vector[flush_ind];
|
||||
|
||||
/* Take a copy of the cpu set so that we don't hold the lock
|
||||
* all the way while interrupting other cores */
|
||||
ihk_mc_spinlock_lock_noirq(&vm->address_space->cpu_set_lock);
|
||||
memcpy(&_cpu_set, &vm->address_space->cpu_set, sizeof(cpu_set_t));
|
||||
ihk_mc_spinlock_unlock_noirq(&vm->address_space->cpu_set_lock);
|
||||
|
||||
|
||||
dkprintf("trying to aquire flush_entry->lock flush_ind: %d\n", flush_ind);
|
||||
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&flush_entry->lock);
|
||||
|
||||
flush_entry->vm = vm;
|
||||
flush_entry->addr = addr;
|
||||
flush_entry->nr_addr = nr_addr;
|
||||
ihk_atomic_set(&flush_entry->pending, 0);
|
||||
|
||||
dkprintf("lock aquired, iterating cpu mask.. flush_ind: %d\n", flush_ind);
|
||||
|
||||
|
||||
/* Loop through CPUs in this address space and interrupt them for
|
||||
* TLB flush on the specified address */
|
||||
for_each_set_bit(cpu, (const unsigned long*)&_cpu_set.__bits, CPU_SETSIZE) {
|
||||
|
||||
if (ihk_mc_get_processor_id() == cpu)
|
||||
|
||||
if (ihk_mc_get_processor_id() == cpu)
|
||||
continue;
|
||||
|
||||
ihk_atomic_inc(&flush_entry->pending);
|
||||
dkprintf("remote_flush_tlb_cpumask: flush_ind: %d, addr: 0x%lX, interrupting cpu: %d\n",
|
||||
flush_ind, addr, cpu);
|
||||
|
||||
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpu)->apic_id,
|
||||
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpu)->apic_id,
|
||||
flush_ind + IHK_TLB_FLUSH_IRQ_VECTOR_START);
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG_IC_TLB
|
||||
{
|
||||
unsigned long tsc;
|
||||
tsc = rdtsc() + 12884901888; /* 1.2GHz =>10 sec */
|
||||
#endif
|
||||
if (flush_entry->addr) {
|
||||
flush_tlb_single(flush_entry->addr & PAGE_MASK);
|
||||
if (flush_entry->addr[0]) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < flush_entry->nr_addr; ++i) {
|
||||
flush_tlb_single(flush_entry->addr[i] & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
/* Zero address denotes full TLB flush */
|
||||
else {
|
||||
flush_tlb();
|
||||
}
|
||||
|
||||
/* Flush on this core */
|
||||
flush_tlb_single(addr & PAGE_MASK);
|
||||
/* Wait for all cores */
|
||||
while (ihk_atomic_read(&flush_entry->pending) != 0) {
|
||||
cpu_pause();
|
||||
|
||||
#ifdef DEBUG_IC_TLB
|
||||
if (rdtsc() > tsc) {
|
||||
kprintf("waited 10 secs for remote TLB!! -> panic_all()\n");
|
||||
panic_all_cores("waited 10 secs for remote TLB!!\n");
|
||||
kprintf("waited 10 secs for remote TLB!! -> panic_all()\n");
|
||||
panic_all_cores("waited 10 secs for remote TLB!!\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#ifdef DEBUG_IC_TLB
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
ihk_mc_spinlock_unlock_noirq(&flush_entry->lock);
|
||||
}
|
||||
|
||||
void tlb_flush_handler(int vector)
|
||||
{
|
||||
#ifdef PROFILE_ENABLE
|
||||
unsigned long t_s = rdtsc();
|
||||
#endif // PROFILE_ENABLE
|
||||
int flags = cpu_disable_interrupt_save();
|
||||
|
||||
struct tlb_flush_entry *flush_entry = &tlb_flush_vector[vector -
|
||||
struct tlb_flush_entry *flush_entry = &tlb_flush_vector[vector -
|
||||
IHK_TLB_FLUSH_IRQ_VECTOR_START];
|
||||
|
||||
dkprintf("decreasing pending cnt for %d\n",
|
||||
vector - IHK_TLB_FLUSH_IRQ_VECTOR_START);
|
||||
|
||||
/* Decrease counter */
|
||||
ihk_atomic_dec(&flush_entry->pending);
|
||||
if (flush_entry->addr[0]) {
|
||||
int i;
|
||||
|
||||
dkprintf("flusing TLB for addr: 0x%lX\n", flush_entry->addr);
|
||||
|
||||
if (flush_entry->addr) {
|
||||
flush_tlb_single(flush_entry->addr & PAGE_MASK);
|
||||
for (i = 0; i < flush_entry->nr_addr; ++i) {
|
||||
flush_tlb_single(flush_entry->addr[i] & PAGE_MASK);
|
||||
dkprintf("flusing TLB for addr: 0x%lX\n", flush_entry->addr[i]);
|
||||
}
|
||||
}
|
||||
/* Zero address denotes full TLB flush */
|
||||
else {
|
||||
flush_tlb();
|
||||
}
|
||||
|
||||
|
||||
/* Decrease counter */
|
||||
dkprintf("decreasing pending cnt for %d\n",
|
||||
vector - IHK_TLB_FLUSH_IRQ_VECTOR_START);
|
||||
ihk_atomic_dec(&flush_entry->pending);
|
||||
|
||||
cpu_restore_interrupt(flags);
|
||||
#ifdef PROFILE_ENABLE
|
||||
{
|
||||
unsigned long t_e = rdtsc();
|
||||
profile_event_add(PROFILE_tlb_invalidate, (t_e - t_s));
|
||||
if (cpu_local_var(current)->profile)
|
||||
cpu_local_var(current)->profile_elapsed_ts +=
|
||||
(t_e - t_s);
|
||||
}
|
||||
#endif // PROFILE_ENABLE
|
||||
}
|
||||
|
||||
static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
|
||||
{
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
int error;
|
||||
#ifdef PROFILE_ENABLE
|
||||
uint64_t t_s;
|
||||
t_s = rdtsc();
|
||||
#endif // PROFILE_ENABLE
|
||||
|
||||
set_cputime(interrupt_from_user(regs)? 1: 2);
|
||||
dkprintf("%s: addr: %p, reason: %lx, regs: %p\n",
|
||||
@ -956,6 +1090,9 @@ out:
|
||||
__FUNCTION__, fault_addr, reason, regs, error);
|
||||
check_need_resched();
|
||||
set_cputime(0);
|
||||
#ifdef PROFILE_ENABLE
|
||||
profile_event_add(PROFILE_page_fault, (rdtsc() - t_s));
|
||||
#endif // PROFILE_ENABLE
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1031,26 +1168,71 @@ static void numa_init(void)
|
||||
memory_nodes[i].type = type;
|
||||
INIT_LIST_HEAD(&memory_nodes[i].allocators);
|
||||
memory_nodes[i].nodes_by_distance = 0;
|
||||
|
||||
kprintf("NUMA: %d, Linux NUMA: %d, type: %d\n",
|
||||
i, linux_numa_id, type);
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
memory_nodes[i].free_chunks.rb_node = 0;
|
||||
mcs_lock_init(&memory_nodes[i].lock);
|
||||
memory_nodes[i].min_addr = 0xFFFFFFFFFFFFFFFF;
|
||||
memory_nodes[i].max_addr = 0;
|
||||
memory_nodes[i].nr_pages = 0;
|
||||
memory_nodes[i].nr_free_pages = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
for (j = 0; j < ihk_mc_get_nr_memory_chunks(); ++j) {
|
||||
unsigned long start, end;
|
||||
int numa_id;
|
||||
#ifndef IHK_RBTREE_ALLOCATOR
|
||||
struct ihk_page_allocator_desc *allocator;
|
||||
#endif
|
||||
|
||||
ihk_mc_get_memory_chunk(j, &start, &end, &numa_id);
|
||||
|
||||
if (virt_to_phys(get_last_early_heap()) >= start &&
|
||||
virt_to_phys(get_last_early_heap()) < end) {
|
||||
dkprintf("%s: start from 0x%lx\n",
|
||||
__FUNCTION__, virt_to_phys(get_last_early_heap()));
|
||||
start = virt_to_phys(get_last_early_heap());
|
||||
}
|
||||
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
ihk_numa_add_free_pages(&memory_nodes[numa_id], start, end - start);
|
||||
#else
|
||||
allocator = page_allocator_init(start, end);
|
||||
list_add_tail(&allocator->list, &memory_nodes[numa_id].allocators);
|
||||
#endif
|
||||
|
||||
kprintf("Physical memory: 0x%lx - 0x%lx, %lu bytes, %d pages available @ NUMA: %d\n",
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
dkprintf("Physical memory: 0x%lx - 0x%lx, %lu bytes, %d pages available @ NUMA: %d\n",
|
||||
start, end,
|
||||
end - start,
|
||||
(end - start) >> PAGE_SHIFT,
|
||||
numa_id);
|
||||
#else
|
||||
dkprintf("Physical memory: 0x%lx - 0x%lx, %lu bytes, %d pages available @ NUMA: %d\n",
|
||||
start, end,
|
||||
ihk_pagealloc_count(allocator) * PAGE_SIZE,
|
||||
ihk_pagealloc_count(allocator),
|
||||
numa_id);
|
||||
#endif
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
rusage_total_memory_add(end - start);
|
||||
#else
|
||||
rusage_total_memory_add(ihk_pagealloc_count(allocator) *
|
||||
PAGE_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
kprintf("NUMA: %d, Linux NUMA: %d, type: %d, "
|
||||
"available bytes: %lu, pages: %d\n",
|
||||
i, memory_nodes[i].linux_numa_id, memory_nodes[i].type,
|
||||
memory_nodes[i].nr_free_pages * PAGE_SIZE,
|
||||
memory_nodes[i].nr_free_pages);
|
||||
#else
|
||||
kprintf("NUMA: %d, Linux NUMA: %d, type: %d\n",
|
||||
i, memory_nodes[i].linux_numa_id, memory_nodes[i].type);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -1120,6 +1302,46 @@ static void numa_distances_init()
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t numa_sysfs_show_meminfo(struct sysfs_ops *ops,
|
||||
void *instance, void *buf, size_t size)
|
||||
{
|
||||
struct ihk_mc_numa_node *node =
|
||||
(struct ihk_mc_numa_node *)instance;
|
||||
char *sbuf = (char *)buf;
|
||||
int len = 0;
|
||||
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
len += snprintf(&sbuf[len], size - len, "Node %d MemTotal:%15d kB\n",
|
||||
node->id, node->nr_pages << 2);
|
||||
len += snprintf(&sbuf[len], size - len, "Node %d MemFree:%16d kB\n",
|
||||
node->id, node->nr_free_pages << 2);
|
||||
len += snprintf(&sbuf[len], size - len, "Node %d MemUsed:%16d kB\n",
|
||||
node->id, (node->nr_pages - node->nr_free_pages) << 2);
|
||||
#endif
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
struct sysfs_ops numa_sysfs_meminfo = {
|
||||
.show = &numa_sysfs_show_meminfo,
|
||||
};
|
||||
|
||||
void numa_sysfs_setup(void) {
|
||||
int i;
|
||||
int error;
|
||||
char path[PATH_MAX];
|
||||
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
sprintf(path, "/sys/devices/system/node/node%d/meminfo", i);
|
||||
|
||||
error = sysfs_createf(&numa_sysfs_meminfo, &memory_nodes[i],
|
||||
0444, path);
|
||||
if (error) {
|
||||
kprintf("%s: ERROR: creating %s\n", __FUNCTION__, path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define PHYS_PAGE_HASH_SHIFT (10)
|
||||
#define PHYS_PAGE_HASH_SIZE (1 << PHYS_PAGE_HASH_SHIFT)
|
||||
#define PHYS_PAGE_HASH_MASK (PHYS_PAGE_HASH_SIZE - 1)
|
||||
@ -1580,6 +1802,10 @@ void _kfree(void *ptr, char *file, int line)
|
||||
struct kmalloc_track_addr_entry *addr_entry_iter, *addr_entry = NULL;
|
||||
int hash;
|
||||
|
||||
if (!ptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!memdebug) {
|
||||
goto out;
|
||||
}
|
||||
@ -1600,7 +1826,8 @@ void _kfree(void *ptr, char *file, int line)
|
||||
ihk_mc_spinlock_unlock(&kmalloc_addr_hash_locks[hash], irqflags);
|
||||
|
||||
if (!addr_entry) {
|
||||
kprintf("%s: ERROR: kfree()ing invalid pointer\n", __FUNCTION__);
|
||||
kprintf("%s: ERROR: kfree()ing invalid pointer at %s:%d\n",
|
||||
__FUNCTION__, file, line);
|
||||
panic("panic");
|
||||
}
|
||||
|
||||
@ -1828,7 +2055,7 @@ split_and_return:
|
||||
npages = (size + sizeof(struct kmalloc_header) + (PAGE_SIZE - 1))
|
||||
>> PAGE_SHIFT;
|
||||
/* Use low-level page allocator to avoid tracking */
|
||||
chunk = ___ihk_mc_alloc_pages(npages, flag);
|
||||
chunk = ___ihk_mc_alloc_pages(npages, flag, IHK_MC_PG_KERNEL);
|
||||
|
||||
if (!chunk) {
|
||||
cpu_restore_interrupt(kmalloc_irq_flags);
|
||||
@ -1844,9 +2071,14 @@ split_and_return:
|
||||
|
||||
static void ___kfree(void *ptr)
|
||||
{
|
||||
struct kmalloc_header *chunk =
|
||||
(struct kmalloc_header*)(ptr - sizeof(struct kmalloc_header));
|
||||
unsigned long kmalloc_irq_flags = cpu_disable_interrupt_save();
|
||||
struct kmalloc_header *chunk;
|
||||
unsigned long kmalloc_irq_flags;
|
||||
|
||||
if (!ptr)
|
||||
return;
|
||||
|
||||
chunk = (struct kmalloc_header*)(ptr - sizeof(struct kmalloc_header));
|
||||
kmalloc_irq_flags = cpu_disable_interrupt_save();
|
||||
|
||||
/* Sanity check */
|
||||
if (chunk->front_magic != 0x5c5c5c5c || chunk->end_magic != 0x6d6d6d6d) {
|
||||
|
||||
452
kernel/process.c
452
kernel/process.c
@ -31,6 +31,9 @@
|
||||
#include <auxvec.h>
|
||||
#include <timer.h>
|
||||
#include <mman.h>
|
||||
#include <xpmem.h>
|
||||
#include <rusage.h>
|
||||
#include <xpmem.h>
|
||||
|
||||
//#define DEBUG_PRINT_PROCESS
|
||||
|
||||
@ -88,6 +91,8 @@ init_process(struct process *proc, struct process *parent)
|
||||
proc->egid = parent->egid;
|
||||
proc->sgid = parent->sgid;
|
||||
proc->fsgid = parent->fsgid;
|
||||
proc->mpol_flags = parent->mpol_flags;
|
||||
proc->mpol_threshold = parent->mpol_threshold;
|
||||
memcpy(proc->rlimit, parent->rlimit,
|
||||
sizeof(struct rlimit) * MCK_RLIM_MAX);
|
||||
}
|
||||
@ -101,12 +106,9 @@ init_process(struct process *proc, struct process *parent)
|
||||
waitq_init(&proc->waitpid_q);
|
||||
ihk_atomic_set(&proc->refcount, 2);
|
||||
proc->monitoring_event = NULL;
|
||||
#ifdef TRACK_SYSCALLS
|
||||
mcs_lock_init(&proc->st_lock);
|
||||
proc->syscall_times = NULL;
|
||||
proc->syscall_cnts = NULL;
|
||||
proc->offload_times = NULL;
|
||||
proc->offload_cnts = NULL;
|
||||
#ifdef PROFILE_ENABLE
|
||||
mcs_lock_init(&proc->profile_lock);
|
||||
proc->profile_events = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -275,6 +277,7 @@ struct thread *create_thread(unsigned long user_pc,
|
||||
dkprintf("%s: pid: %d, CPU: %d\n",
|
||||
__FUNCTION__, proc->pid, cpu);
|
||||
CPU_SET(cpu, &thread->cpu_set);
|
||||
CPU_SET(cpu, &proc->cpu_set);
|
||||
cpu_set_empty = 0;
|
||||
}
|
||||
|
||||
@ -286,6 +289,7 @@ struct thread *create_thread(unsigned long user_pc,
|
||||
infop = ihk_mc_get_cpu_info();
|
||||
for (i = 0; i < infop->ncpus; ++i) {
|
||||
CPU_SET(i, &thread->cpu_set);
|
||||
CPU_SET(i, &proc->cpu_set);
|
||||
}
|
||||
}
|
||||
|
||||
@ -391,6 +395,9 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
|
||||
goto err_free_proc;
|
||||
memset(proc, '\0', sizeof(struct process));
|
||||
init_process(proc, org->proc);
|
||||
#ifdef PROFILE_ENABLE
|
||||
proc->profile = org->proc->profile;
|
||||
#endif
|
||||
|
||||
proc->termsig = termsig;
|
||||
asp = create_address_space(cpu_local_var(resource_set), 1);
|
||||
@ -475,8 +482,9 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
|
||||
|
||||
ihk_mc_spinlock_init(&thread->spin_sleep_lock);
|
||||
thread->spin_sleep = 0;
|
||||
#ifdef TRACK_SYSCALLS
|
||||
thread->track_syscalls = org->track_syscalls;
|
||||
|
||||
#ifdef PROFILE_ENABLE
|
||||
thread->profile = org->profile | proc->profile;
|
||||
#endif
|
||||
|
||||
return thread;
|
||||
@ -566,7 +574,8 @@ static int copy_user_pte(void *arg0, page_table_t src_pt, pte_t *src_ptep, void
|
||||
dkprintf("copy_user_pte(): page size: %d\n", pgsize);
|
||||
|
||||
npages = pgsize / PAGE_SIZE;
|
||||
virt = ihk_mc_alloc_aligned_pages(npages, pgalign, IHK_MC_AP_NOWAIT);
|
||||
virt = ihk_mc_alloc_aligned_pages_user(npages, pgalign,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (!virt) {
|
||||
kprintf("ERROR: copy_user_pte() allocating new page\n");
|
||||
error = -ENOMEM;
|
||||
@ -637,6 +646,7 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
|
||||
range->memobj = src_range->memobj;
|
||||
range->objoff = src_range->objoff;
|
||||
range->pgshift = src_range->pgshift;
|
||||
range->private_data = src_range->private_data;
|
||||
if (range->memobj) {
|
||||
memobj_ref(range->memobj);
|
||||
}
|
||||
@ -734,6 +744,7 @@ int split_process_memory_range(struct process_vm *vm, struct vm_range *range,
|
||||
newrange->end = range->end;
|
||||
newrange->flag = range->flag;
|
||||
newrange->pgshift = range->pgshift;
|
||||
newrange->private_data = range->private_data;
|
||||
|
||||
if (range->memobj) {
|
||||
memobj_ref(range->memobj);
|
||||
@ -953,6 +964,10 @@ int remove_process_memory_range(struct process_vm *vm,
|
||||
ro_freed = 1;
|
||||
}
|
||||
|
||||
if (freerange->private_data) {
|
||||
xpmem_remove_process_memory_range(vm, freerange);
|
||||
}
|
||||
|
||||
error = free_process_memory_range(vm, freerange);
|
||||
if (error) {
|
||||
ekprintf("remove_process_memory_range(%p,%lx,%lx):"
|
||||
@ -1027,6 +1042,58 @@ enum ihk_mc_pt_attribute common_vrflag_to_ptattr(unsigned long flag, uint64_t fa
|
||||
return attr;
|
||||
}
|
||||
|
||||
|
||||
/* Parallel memset implementation on top of general
|
||||
* SMP funcution call facility */
|
||||
struct memset_smp_req {
|
||||
unsigned long phys;
|
||||
size_t len;
|
||||
int val;
|
||||
};
|
||||
|
||||
int memset_smp_handler(int cpu_index, int nr_cpus, void *arg)
|
||||
{
|
||||
struct memset_smp_req *req =
|
||||
(struct memset_smp_req *)arg;
|
||||
size_t len = req->len / nr_cpus;
|
||||
|
||||
if (!len) {
|
||||
/* First core clears all */
|
||||
if (!cpu_index) {
|
||||
memset((void *)phys_to_virt(req->phys), req->val, req->len);
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Divide and clear */
|
||||
unsigned long p_s = req->phys + (cpu_index * len);
|
||||
unsigned long p_e = p_s + len;
|
||||
if (cpu_index == nr_cpus - 1) {
|
||||
p_e = req->phys + req->len;
|
||||
}
|
||||
|
||||
memset((void *)phys_to_virt(p_s), req->val, p_e - p_s);
|
||||
dkprintf("%s: cpu_index: %d, nr_cpus: %d, phys: 0x%lx, "
|
||||
"len: %lu, p_s: 0x%lx, p_e: 0x%lx\n",
|
||||
__FUNCTION__, cpu_index, nr_cpus,
|
||||
req->phys, req->len,
|
||||
p_s, p_e);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *memset_smp(cpu_set_t *cpu_set, void *s, int c, size_t n)
|
||||
{
|
||||
struct memset_smp_req req = {
|
||||
.phys = virt_to_phys(s),
|
||||
.len = n,
|
||||
.val = c,
|
||||
};
|
||||
|
||||
smp_call_func(cpu_set, memset_smp_handler, &req);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int add_process_memory_range(struct process_vm *vm,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long phys, unsigned long flag,
|
||||
@ -1058,6 +1125,7 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
range->memobj = memobj;
|
||||
range->objoff = offset;
|
||||
range->pgshift = pgshift;
|
||||
range->private_data = NULL;
|
||||
|
||||
rc = 0;
|
||||
if (phys == NOPHYS) {
|
||||
@ -1090,9 +1158,19 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
insert_vm_range_list(vm, range);
|
||||
|
||||
/* Clear content! */
|
||||
if (!(flag & (VR_REMOTE | VR_DEMAND_PAGING))
|
||||
if (phys != NOPHYS && !(flag & (VR_REMOTE | VR_DEMAND_PAGING))
|
||||
&& ((flag & VR_PROT_MASK) != VR_PROT_NONE)) {
|
||||
memset((void*)phys_to_virt(phys), 0, end - start);
|
||||
#if 1
|
||||
memset((void*)phys_to_virt(phys), 0, end - start);
|
||||
#else
|
||||
if (end - start < (1024*1024)) {
|
||||
memset((void*)phys_to_virt(phys), 0, end - start);
|
||||
}
|
||||
else {
|
||||
memset_smp(&cpu_local_var(current)->cpu_set,
|
||||
(void *)phys_to_virt(phys), 0, end - start);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Return range object if requested */
|
||||
@ -1315,7 +1393,7 @@ static int remap_one_page(void *arg0, page_table_t pt, pte_t *ptep,
|
||||
|
||||
page = phys_to_page(phys);
|
||||
if (page && page_unmap(page)) {
|
||||
ihk_mc_free_pages(phys_to_virt(phys), pgsize/PAGE_SIZE);
|
||||
ihk_mc_free_pages_user(phys_to_virt(phys), pgsize/PAGE_SIZE);
|
||||
}
|
||||
|
||||
error = 0;
|
||||
@ -1600,7 +1678,9 @@ static int page_fault_process_memory_range(struct process_vm *vm, struct vm_rang
|
||||
|
||||
retry:
|
||||
npages = pgsize / PAGE_SIZE;
|
||||
virt = ihk_mc_alloc_aligned_pages(npages, p2align, IHK_MC_AP_NOWAIT);
|
||||
virt = ihk_mc_alloc_aligned_pages_user(npages, p2align,
|
||||
IHK_MC_AP_NOWAIT |
|
||||
(range->flag & VR_AP_USER) ? IHK_MC_AP_USER : 0);
|
||||
if (!virt && !range->pgshift && (pgsize != PAGE_SIZE)) {
|
||||
error = arch_get_smaller_page_size(NULL, pgsize, &pgsize, &p2align);
|
||||
if (error) {
|
||||
@ -1618,6 +1698,9 @@ retry:
|
||||
}
|
||||
dkprintf("%s: clearing 0x%lx:%lu\n",
|
||||
__FUNCTION__, pgaddr, pgsize);
|
||||
#ifdef PROFILE_ENABLE
|
||||
profile_event_add(PROFILE_page_fault_anon_clr, pgsize);
|
||||
#endif // PROFILE_ENABLE
|
||||
memset(virt, 0, pgsize);
|
||||
phys = virt_to_phys(virt);
|
||||
if (phys_to_page(phys)) {
|
||||
@ -1633,7 +1716,7 @@ retry:
|
||||
|
||||
attr = arch_vrflag_to_ptattr(range->flag | memobj_flag, reason, ptep);
|
||||
|
||||
/*****/
|
||||
/* Copy on write */
|
||||
if (((range->flag & VR_PRIVATE) ||
|
||||
((reason & PF_PATCH) && !(range->flag & VR_PROT_WRITE)))
|
||||
&& ((!page && phys == NOPHYS) || (page &&
|
||||
@ -1648,7 +1731,8 @@ retry:
|
||||
size_t npages;
|
||||
|
||||
npages = pgsize / PAGE_SIZE;
|
||||
virt = ihk_mc_alloc_aligned_pages(npages, p2align, IHK_MC_AP_NOWAIT);
|
||||
virt = ihk_mc_alloc_aligned_pages_user(npages, p2align,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (!virt) {
|
||||
error = -ENOMEM;
|
||||
kprintf("page_fault_process_memory_range(%p,%lx-%lx %lx,%lx,%lx):cannot allocate copy page. %d\n", vm, range->start, range->end, range->flag, fault_addr, reason, error);
|
||||
@ -1767,13 +1851,19 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: quick fix
|
||||
* Corrupt data was read by the following sequence.
|
||||
* 1) a process did mmap(MAP_PRIVATE|MAP_ANONYMOUS)
|
||||
* 2) the process fetched the contents of a page of (1)'s mapping.
|
||||
* 3) the process wrote the contents of the page of (1)'s mapping.
|
||||
* 4) the process changed the contents of the page of (1)'s mapping.
|
||||
* 5) the process read something in the page of (1)'s mapping.
|
||||
* Fix for #284
|
||||
* Symptom: read() writes data onto the zero page by the following sequence.
|
||||
* (1) A process performs mmap(MAP_PRIVATE|MAP_ANONYMOUS)
|
||||
* (2) The process loads data from the VM range to cause a PF
|
||||
* to make the PTE point to the zero page.
|
||||
* (3) The process performs write() using the VM range as the source
|
||||
* to cause a PF on the Linux side to make the PTE point to the zero page.
|
||||
* Note that we can't make the PTE read-only because [mckernel] pseudo
|
||||
* file covering the range is created with O_RDWR.
|
||||
* (4) The process stores data to the VM range to cause another PF to perform
|
||||
* copy-on-write.
|
||||
* (5) The process performs read() using the VM range as the destination.
|
||||
* However, no PF and hence copy-on-write occurs because of (3).
|
||||
*
|
||||
* In the case of the above sequence,
|
||||
* copy-on-write pages was mapped at (2). And their physical pages
|
||||
@ -1793,7 +1883,12 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
|
||||
}
|
||||
}
|
||||
|
||||
error = page_fault_process_memory_range(vm, range, fault_addr, reason);
|
||||
if (!range->private_data) {
|
||||
error = page_fault_process_memory_range(vm, range, fault_addr, reason);
|
||||
}
|
||||
else {
|
||||
error = xpmem_fault_process_memory_range(vm, range, fault_addr, reason);
|
||||
}
|
||||
if (error == -ERESTART) {
|
||||
goto out;
|
||||
}
|
||||
@ -1851,48 +1946,68 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
|
||||
unsigned long minsz;
|
||||
unsigned long at_rand;
|
||||
struct process *proc = thread->proc;
|
||||
unsigned long __flag;
|
||||
unsigned long ap_flag;
|
||||
|
||||
/* create stack range */
|
||||
end = STACK_TOP(&thread->vm->region);
|
||||
minsz = PAGE_SIZE;
|
||||
size = proc->rlimit[MCK_RLIMIT_STACK].rlim_cur & PAGE_MASK;
|
||||
/* Create stack range */
|
||||
end = STACK_TOP(&thread->vm->region) & LARGE_PAGE_MASK;
|
||||
minsz = (proc->rlimit[MCK_RLIMIT_STACK].rlim_cur
|
||||
+ LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK;
|
||||
size = (proc->rlimit[MCK_RLIMIT_STACK].rlim_max
|
||||
+ LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK;
|
||||
dkprintf("%s: rlim_max: %lu, rlim_cur: %lu\n",
|
||||
__FUNCTION__,
|
||||
proc->rlimit[MCK_RLIMIT_STACK].rlim_max,
|
||||
proc->rlimit[MCK_RLIMIT_STACK].rlim_cur);
|
||||
if (size > (USER_END / 2)) {
|
||||
size = USER_END / 2;
|
||||
}
|
||||
else if (size < minsz) {
|
||||
size = minsz;
|
||||
}
|
||||
start = end - size;
|
||||
start = (end - size) & LARGE_PAGE_MASK;
|
||||
|
||||
/* Apply user allocation policy to stacks */
|
||||
/* TODO: make threshold kernel or mcexec argument */
|
||||
ap_flag = (size >= proc->mpol_threshold &&
|
||||
!(proc->mpol_flags & MPOL_NO_STACK)) ? IHK_MC_AP_USER : 0;
|
||||
dkprintf("%s: max size: %lu, mapped size: %lu %s\n",
|
||||
__FUNCTION__, size, minsz,
|
||||
ap_flag ? "(IHK_MC_AP_USER)" : "");
|
||||
|
||||
stack = ihk_mc_alloc_aligned_pages_user(minsz >> PAGE_SHIFT,
|
||||
LARGE_PAGE_P2ALIGN, IHK_MC_AP_NOWAIT | ap_flag);
|
||||
|
||||
if (!stack) {
|
||||
kprintf("%s: error: couldn't allocate initial stack\n",
|
||||
__FUNCTION__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(stack, 0, minsz);
|
||||
|
||||
vrflag = VR_STACK | VR_DEMAND_PAGING;
|
||||
vrflag |= ((ap_flag & IHK_MC_AP_USER) ? VR_AP_USER : 0);
|
||||
vrflag |= PROT_TO_VR_FLAG(pn->stack_prot);
|
||||
vrflag |= VR_MAXPROT_READ | VR_MAXPROT_WRITE | VR_MAXPROT_EXEC;
|
||||
#define NOPHYS ((uintptr_t)-1)
|
||||
if ((rc = add_process_memory_range(thread->vm, start, end, NOPHYS,
|
||||
vrflag, NULL, 0, PAGE_SHIFT, NULL)) != 0) {
|
||||
vrflag, NULL, 0, LARGE_PAGE_SHIFT, NULL)) != 0) {
|
||||
ihk_mc_free_pages_user(stack, minsz >> PAGE_SHIFT);
|
||||
return rc;
|
||||
}
|
||||
|
||||
__flag = (size >= 16777216) ? IHK_MC_AP_USER : 0;
|
||||
/* map physical pages for initial stack frame */
|
||||
stack = ihk_mc_alloc_pages(minsz >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT | __flag);
|
||||
|
||||
if (!stack) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(stack, 0, minsz);
|
||||
/* Map physical pages for initial stack frame */
|
||||
error = ihk_mc_pt_set_range(thread->vm->address_space->page_table,
|
||||
thread->vm, (void *)(end-minsz),
|
||||
(void *)end, virt_to_phys(stack),
|
||||
arch_vrflag_to_ptattr(vrflag, PF_POPULATE,
|
||||
NULL), 0);
|
||||
thread->vm, (void *)(end - minsz),
|
||||
(void *)end, virt_to_phys(stack),
|
||||
arch_vrflag_to_ptattr(vrflag, PF_POPULATE, NULL),
|
||||
LARGE_PAGE_SHIFT);
|
||||
|
||||
if (error) {
|
||||
kprintf("init_process_stack:"
|
||||
"set range %lx-%lx %lx failed. %d\n",
|
||||
(end-minsz), end, stack, error);
|
||||
ihk_mc_free_pages(stack, minsz >> PAGE_SHIFT);
|
||||
ihk_mc_free_pages_user(stack, minsz >> PAGE_SHIFT);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -1951,111 +2066,57 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
|
||||
end + sizeof(unsigned long) * s_ind);
|
||||
thread->vm->region.stack_end = end;
|
||||
thread->vm->region.stack_start = start;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
unsigned long extend_process_region(struct process_vm *vm,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long address, unsigned long flag)
|
||||
unsigned long end_allocated,
|
||||
unsigned long address, unsigned long flag)
|
||||
{
|
||||
unsigned long aligned_end, aligned_new_end;
|
||||
unsigned long new_end_allocated;
|
||||
void *p;
|
||||
int rc;
|
||||
|
||||
if (!address || address < start || address >= USER_END) {
|
||||
return end;
|
||||
size_t align_size = vm->proc->heap_extension > PAGE_SIZE ?
|
||||
LARGE_PAGE_SIZE : PAGE_SIZE;
|
||||
unsigned long align_mask = vm->proc->heap_extension > PAGE_SIZE ?
|
||||
LARGE_PAGE_MASK : PAGE_MASK;
|
||||
unsigned long align_p2align = vm->proc->heap_extension > PAGE_SHIFT ?
|
||||
LARGE_PAGE_P2ALIGN : PAGE_P2ALIGN;
|
||||
|
||||
new_end_allocated = (address + (PAGE_SIZE - 1)) & PAGE_MASK;
|
||||
if ((new_end_allocated - end_allocated) < vm->proc->heap_extension) {
|
||||
new_end_allocated = (end_allocated + vm->proc->heap_extension +
|
||||
(align_size - 1)) & align_mask;
|
||||
}
|
||||
|
||||
aligned_end = ((end + PAGE_SIZE - 1) & PAGE_MASK);
|
||||
if (flag & VR_DEMAND_PAGING) {
|
||||
p = 0;
|
||||
}
|
||||
else {
|
||||
p = ihk_mc_alloc_aligned_pages_user(
|
||||
(new_end_allocated - end_allocated) >> PAGE_SHIFT,
|
||||
align_p2align, IHK_MC_AP_NOWAIT |
|
||||
(!(vm->proc->mpol_flags & MPOL_NO_HEAP) ? IHK_MC_AP_USER : 0));
|
||||
|
||||
if (aligned_end >= address) {
|
||||
return address;
|
||||
if (!p) {
|
||||
return end_allocated;
|
||||
}
|
||||
}
|
||||
|
||||
aligned_new_end = (address + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
|
||||
#ifdef USE_LARGE_PAGES
|
||||
if (aligned_new_end - aligned_end >= LARGE_PAGE_SIZE) {
|
||||
if(flag & VR_DEMAND_PAGING){panic("demand paging for large page is not available!");}
|
||||
unsigned long p_aligned;
|
||||
unsigned long old_aligned_end = aligned_end;
|
||||
|
||||
if ((aligned_end & (LARGE_PAGE_SIZE - 1)) != 0) {
|
||||
|
||||
aligned_end = (aligned_end + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
|
||||
/* Fill in the gap between old_aligned_end and aligned_end
|
||||
* with regular pages */
|
||||
if((p = ihk_mc_alloc_pages((aligned_end - old_aligned_end) >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT)) == NULL){
|
||||
return end;
|
||||
}
|
||||
if((rc = add_process_memory_range(vm, old_aligned_end,
|
||||
aligned_end, virt_to_phys(p), flag,
|
||||
LARGE_PAGE_SHIFT, NULL)) != 0){
|
||||
ihk_mc_free_pages(p, (aligned_end - old_aligned_end) >> PAGE_SHIFT);
|
||||
return end;
|
||||
}
|
||||
|
||||
dkprintf("filled in gap for LARGE_PAGE_SIZE aligned start: 0x%lX -> 0x%lX\n",
|
||||
old_aligned_end, aligned_end);
|
||||
}
|
||||
|
||||
/* Add large region for the actual mapping */
|
||||
aligned_new_end = (aligned_new_end + (aligned_end - old_aligned_end) +
|
||||
(LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
|
||||
address = aligned_new_end;
|
||||
|
||||
if((p = ihk_mc_alloc_pages((aligned_new_end - aligned_end + LARGE_PAGE_SIZE) >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT)) == NULL){
|
||||
return end;
|
||||
}
|
||||
|
||||
p_aligned = ((unsigned long)p + (LARGE_PAGE_SIZE - 1)) & LARGE_PAGE_MASK;
|
||||
|
||||
if (p_aligned > (unsigned long)p) {
|
||||
ihk_mc_free_pages(p, (p_aligned - (unsigned long)p) >> PAGE_SHIFT);
|
||||
}
|
||||
ihk_mc_free_pages(
|
||||
(void *)(p_aligned + aligned_new_end - aligned_end),
|
||||
(LARGE_PAGE_SIZE - (p_aligned - (unsigned long)p)) >> PAGE_SHIFT);
|
||||
|
||||
if((rc = add_process_memory_range(vm, aligned_end,
|
||||
aligned_new_end, virt_to_phys((void *)p_aligned),
|
||||
flag, LARGE_PAGE_SHIFT, NULL)) != 0){
|
||||
ihk_mc_free_pages(p, (aligned_new_end - aligned_end + LARGE_PAGE_SIZE) >> PAGE_SHIFT);
|
||||
return end;
|
||||
}
|
||||
|
||||
dkprintf("largePTE area: 0x%lX - 0x%lX (s: %lu) -> 0x%lX - \n",
|
||||
aligned_end, aligned_new_end,
|
||||
(aligned_new_end - aligned_end),
|
||||
virt_to_phys((void *)p_aligned));
|
||||
|
||||
return address;
|
||||
}
|
||||
#endif
|
||||
if(flag & VR_DEMAND_PAGING){
|
||||
// demand paging no need to allocate page now
|
||||
kprintf("demand page do not allocate page\n");
|
||||
p=0;
|
||||
}else{
|
||||
|
||||
p = ihk_mc_alloc_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT | IHK_MC_AP_USER);
|
||||
|
||||
if (!p) {
|
||||
return end;
|
||||
}
|
||||
}
|
||||
if ((rc = add_process_memory_range(vm, aligned_end, aligned_new_end,
|
||||
if ((rc = add_process_memory_range(vm, end_allocated, new_end_allocated,
|
||||
(p == 0 ? 0 : virt_to_phys(p)), flag, NULL, 0,
|
||||
PAGE_SHIFT, NULL)) != 0) {
|
||||
ihk_mc_free_pages(p, (aligned_new_end - aligned_end) >> PAGE_SHIFT);
|
||||
return end;
|
||||
align_p2align, NULL)) != 0) {
|
||||
ihk_mc_free_pages_user(p, (new_end_allocated - end_allocated) >> PAGE_SHIFT);
|
||||
return end_allocated;
|
||||
}
|
||||
|
||||
return address;
|
||||
dkprintf("%s: new_end_allocated: 0x%lu, align_size: %lu, align_mask: %lx\n",
|
||||
__FUNCTION__, new_end_allocated, align_size, align_mask);
|
||||
|
||||
return new_end_allocated;
|
||||
}
|
||||
|
||||
// Original version retained because dcfa (src/mccmd/client/ibmic/main.c) calls this
|
||||
@ -2165,10 +2226,17 @@ release_process(struct process *proc)
|
||||
}
|
||||
|
||||
if (proc->tids) kfree(proc->tids);
|
||||
#ifdef TRACK_SYSCALLS
|
||||
track_syscalls_print_proc_stats(proc);
|
||||
track_syscalls_dealloc_proc_counters(proc);
|
||||
#endif // TRACK_SYSCALLS
|
||||
#ifdef PROFILE_ENABLE
|
||||
if (proc->profile) {
|
||||
if (proc->nr_processes) {
|
||||
profile_accumulate_and_print_job_events(proc);
|
||||
}
|
||||
else {
|
||||
profile_print_proc_stats(proc);
|
||||
}
|
||||
}
|
||||
profile_dealloc_proc_events(proc);
|
||||
#endif // PROFILE_ENABLE
|
||||
kfree(proc);
|
||||
}
|
||||
|
||||
@ -2209,6 +2277,19 @@ release_process_vm(struct process_vm *vm)
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
long irqstate;
|
||||
struct mckfd *fdp;
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock);
|
||||
for (fdp = proc->mckfd; fdp; fdp = fdp->next) {
|
||||
if (fdp->close_cb) {
|
||||
fdp->close_cb(fdp, NULL);
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&proc->mckfd_lock, irqstate);
|
||||
}
|
||||
|
||||
if(vm->free_cb)
|
||||
vm->free_cb(vm, vm->opt);
|
||||
|
||||
@ -2342,28 +2423,32 @@ void destroy_thread(struct thread *thread)
|
||||
void release_thread(struct thread *thread)
|
||||
{
|
||||
struct process_vm *vm;
|
||||
struct mcs_rwlock_node lock;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
struct timespec ats;
|
||||
|
||||
if (!ihk_atomic_dec_and_test(&thread->refcount)) {
|
||||
return;
|
||||
}
|
||||
|
||||
mcs_rwlock_writer_lock_noirq(&thread->proc->update_lock, &lock);
|
||||
ts_add(&thread->proc->stime, &thread->stime);
|
||||
ts_add(&thread->proc->utime, &thread->utime);
|
||||
mcs_rwlock_writer_unlock_noirq(&thread->proc->update_lock, &lock);
|
||||
mcs_rwlock_writer_lock(&thread->proc->update_lock, &lock);
|
||||
tsc_to_ts(thread->system_tsc, &ats);
|
||||
ts_add(&thread->proc->stime, &ats);
|
||||
tsc_to_ts(thread->user_tsc, &ats);
|
||||
ts_add(&thread->proc->utime, &ats);
|
||||
mcs_rwlock_writer_unlock(&thread->proc->update_lock, &lock);
|
||||
|
||||
vm = thread->vm;
|
||||
|
||||
#ifdef TRACK_SYSCALLS
|
||||
track_syscalls_accumulate_counters(thread, thread->proc);
|
||||
//track_syscalls_print_thread_stats(thread);
|
||||
track_syscalls_dealloc_thread_counters(thread);
|
||||
#endif // TRACK_SYSCALLS
|
||||
#ifdef PROFILE_ENABLE
|
||||
profile_accumulate_events(thread, thread->proc);
|
||||
//profile_print_thread_stats(thread);
|
||||
profile_dealloc_thread_events(thread);
|
||||
#endif // PROFILE_ENABLE
|
||||
procfs_delete_thread(thread);
|
||||
destroy_thread(thread);
|
||||
|
||||
release_process_vm(vm);
|
||||
rusage_num_threads_dec();
|
||||
}
|
||||
|
||||
void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
|
||||
@ -2398,6 +2483,7 @@ static void do_migrate(void);
|
||||
static void idle(void)
|
||||
{
|
||||
struct cpu_local_var *v = get_this_cpu_local_var();
|
||||
struct ihk_os_cpu_monitor *monitor = v->monitor;
|
||||
|
||||
/* Release runq_lock before starting the idle loop.
|
||||
* See comments at release_runq_lock().
|
||||
@ -2458,8 +2544,11 @@ static void idle(void)
|
||||
v->status == CPU_STATUS_RESERVED) {
|
||||
/* No work to do? Consolidate the kmalloc free list */
|
||||
kmalloc_consolidate_free_list();
|
||||
monitor->status = IHK_OS_MONITOR_IDLE;
|
||||
cpu_local_var(current)->status = PS_INTERRUPTIBLE;
|
||||
cpu_safe_halt();
|
||||
monitor->status = IHK_OS_MONITOR_KERNEL;
|
||||
monitor->counter++;
|
||||
cpu_local_var(current)->status = PS_RUNNING;
|
||||
}
|
||||
else {
|
||||
@ -2627,6 +2716,8 @@ static void do_migrate(void)
|
||||
int cpu_id;
|
||||
int old_cpu_id;
|
||||
struct cpu_local_var *v;
|
||||
struct thread *thread;
|
||||
int clear_old_cpu = 1;
|
||||
|
||||
/* 0. check if migration is necessary */
|
||||
list_del(&req->list);
|
||||
@ -2651,11 +2742,28 @@ static void do_migrate(void)
|
||||
req->thread->cpu_id = cpu_id;
|
||||
list_add_tail(&req->thread->sched_list, &v->runq);
|
||||
v->runq_len += 1;
|
||||
|
||||
/* update cpu_set of the VM for remote TLB invalidation */
|
||||
cpu_clear_and_set(old_cpu_id, cpu_id,
|
||||
&req->thread->vm->address_space->cpu_set,
|
||||
&req->thread->vm->address_space->cpu_set_lock);
|
||||
|
||||
/* Find out whether there is another thread of the same process
|
||||
* on the source CPU */
|
||||
list_for_each_entry(thread, &(cur_v->runq), sched_list) {
|
||||
if (thread->vm && thread->vm == req->thread->vm) {
|
||||
clear_old_cpu = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update cpu_set of the VM for remote TLB invalidation */
|
||||
if (clear_old_cpu) {
|
||||
cpu_clear_and_set(old_cpu_id, cpu_id,
|
||||
&req->thread->vm->address_space->cpu_set,
|
||||
&req->thread->vm->address_space->cpu_set_lock);
|
||||
}
|
||||
else {
|
||||
cpu_set(cpu_id,
|
||||
&req->thread->vm->address_space->cpu_set,
|
||||
&req->thread->vm->address_space->cpu_set_lock);
|
||||
|
||||
}
|
||||
|
||||
dkprintf("%s: migrated TID %d from CPU %d to CPU %d\n",
|
||||
__FUNCTION__, req->thread->tid, old_cpu_id, cpu_id);
|
||||
@ -2789,11 +2897,16 @@ redo:
|
||||
} else {
|
||||
/* Pick a new running process or one that has a pending signal */
|
||||
list_for_each_entry_safe(thread, tmp, &(v->runq), sched_list) {
|
||||
if (thread->status == PS_RUNNING ||
|
||||
(thread->status == PS_INTERRUPTIBLE && hassigpending(thread))) {
|
||||
if (thread->status == PS_RUNNING &&
|
||||
thread->mod_clone == SPAWNING_TO_REMOTE){
|
||||
next = thread;
|
||||
break;
|
||||
}
|
||||
if (thread->status == PS_RUNNING ||
|
||||
(thread->status == PS_INTERRUPTIBLE && hassigpending(thread))) {
|
||||
if(!next)
|
||||
next = thread;
|
||||
}
|
||||
}
|
||||
|
||||
/* No process? Run idle.. */
|
||||
@ -2853,6 +2966,19 @@ redo:
|
||||
perf_start(next->proc->monitoring_event);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef PROFILE_ENABLE
|
||||
if (prev->profile && prev->profile_start_ts != 0) {
|
||||
prev->profile_elapsed_ts +=
|
||||
(rdtsc() - prev->profile_start_ts);
|
||||
prev->profile_start_ts = 0;
|
||||
}
|
||||
|
||||
if (next->profile && next->profile_start_ts == 0) {
|
||||
next->profile_start_ts = rdtsc();
|
||||
}
|
||||
#endif
|
||||
|
||||
if (prev) {
|
||||
last = ihk_mc_switch_context(&prev->ctx, &next->ctx, prev);
|
||||
}
|
||||
@ -3051,6 +3177,8 @@ void runq_add_thread(struct thread *thread, int cpu_id)
|
||||
|
||||
procfs_create_thread(thread);
|
||||
|
||||
rusage_num_threads_inc();
|
||||
|
||||
/* Kick scheduler */
|
||||
if (cpu_id != ihk_mc_get_processor_id())
|
||||
ihk_mc_interrupt_cpu(
|
||||
@ -3156,49 +3284,59 @@ debug_log(unsigned long arg)
|
||||
struct resource_set *rset = cpu_local_var(resource_set);
|
||||
struct process_hash *phash = rset->process_hash;
|
||||
struct thread_hash *thash = rset->thread_hash;
|
||||
struct process *pid1 = rset->pid1;
|
||||
int found = 0;
|
||||
|
||||
switch(arg){
|
||||
case 1:
|
||||
for(i = 0; i < HASH_SIZE; i++){
|
||||
__mcs_rwlock_reader_lock(&phash->lock[i], &lock);
|
||||
list_for_each_entry(p, &phash->list[i], hash_list){
|
||||
if (p == pid1)
|
||||
continue;
|
||||
found++;
|
||||
kprintf("pid=%d ppid=%d status=%d\n",
|
||||
p->pid, p->ppid_parent->pid, p->status);
|
||||
}
|
||||
__mcs_rwlock_reader_unlock(&phash->lock[i], &lock);
|
||||
}
|
||||
kprintf("%d processes are found.\n", found);
|
||||
break;
|
||||
case 2:
|
||||
for(i = 0; i < HASH_SIZE; i++){
|
||||
__mcs_rwlock_reader_lock(&thash->lock[i], &lock);
|
||||
list_for_each_entry(t, &thash->list[i], hash_list){
|
||||
found++;
|
||||
kprintf("cpu=%d pid=%d tid=%d status=%d offload=%d\n",
|
||||
t->cpu_id, t->proc->pid, t->tid,
|
||||
t->status, t->in_syscall_offload);
|
||||
}
|
||||
__mcs_rwlock_reader_unlock(&thash->lock[i], &lock);
|
||||
}
|
||||
kprintf("%d threads are found.\n", found);
|
||||
break;
|
||||
case 3:
|
||||
for(i = 0; i < HASH_SIZE; i++){
|
||||
if(phash->lock[i].node)
|
||||
kprintf("phash[i] is locked\n");
|
||||
list_for_each_entry(p, &phash->list[i], hash_list){
|
||||
if (p == pid1)
|
||||
continue;
|
||||
found++;
|
||||
kprintf("pid=%d ppid=%d status=%d\n",
|
||||
p->pid, p->ppid_parent->pid, p->status);
|
||||
}
|
||||
}
|
||||
kprintf("%d processes are found.\n", found);
|
||||
break;
|
||||
case 4:
|
||||
for(i = 0; i < HASH_SIZE; i++){
|
||||
if(thash->lock[i].node)
|
||||
kprintf("thash[i] is locked\n");
|
||||
list_for_each_entry(t, &thash->list[i], hash_list){
|
||||
found++;
|
||||
kprintf("cpu=%d pid=%d tid=%d status=%d\n",
|
||||
t->cpu_id, t->proc->pid, t->tid,
|
||||
t->status);
|
||||
}
|
||||
}
|
||||
kprintf("%d threads are found.\n", found);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,8 +17,8 @@
|
||||
#include <ihk/debug.h>
|
||||
#include <ihk/ikc.h>
|
||||
#include <ikc/master.h>
|
||||
#include <syscall.h>
|
||||
#include <cls.h>
|
||||
#include <syscall.h>
|
||||
#include <kmalloc.h>
|
||||
#include <process.h>
|
||||
#include <page.h>
|
||||
@ -47,7 +47,7 @@ procfs_thread_ctl(struct thread *thread, int msg)
|
||||
struct ihk_ikc_channel_desc *syscall_channel;
|
||||
struct ikc_scd_packet packet;
|
||||
|
||||
syscall_channel = cpu_local_var(syscall_channel);
|
||||
syscall_channel = cpu_local_var(ikc2linux);
|
||||
memset(&packet, '\0', sizeof packet);
|
||||
packet.arg = thread->tid;
|
||||
packet.msg = msg;
|
||||
@ -96,7 +96,7 @@ void process_procfs_request(struct ikc_scd_packet *rpacket)
|
||||
|
||||
dprintf("process_procfs_request: invoked.\n");
|
||||
|
||||
syscall_channel = get_cpu_local_var(0)->syscall_channel;
|
||||
syscall_channel = get_cpu_local_var(0)->ikc2linux;
|
||||
|
||||
dprintf("rarg: %x\n", rarg);
|
||||
parg = ihk_mc_map_memory(NULL, rarg, sizeof(struct procfs_read));
|
||||
|
||||
589
kernel/profile.c
Normal file
589
kernel/profile.c
Normal file
@ -0,0 +1,589 @@
|
||||
/**
|
||||
* \file profile.c
|
||||
* License details are found in the file LICENSE.
|
||||
*
|
||||
* \brief
|
||||
* Profiler code for various process statistics
|
||||
* \author Balazs Gerofi <bgerofi@riken.jp>
|
||||
* Copyright (C) 2017 RIKEN AICS
|
||||
*/
|
||||
|
||||
/*
|
||||
* HISTORY:
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <kmsg.h>
|
||||
#include <ihk/cpu.h>
|
||||
#include <cpulocal.h>
|
||||
#include <ihk/mm.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <ihk/ikc.h>
|
||||
#include <errno.h>
|
||||
#include <cls.h>
|
||||
#include <syscall.h>
|
||||
#include <page.h>
|
||||
#include <ihk/lock.h>
|
||||
#include <ctype.h>
|
||||
#include <waitq.h>
|
||||
#include <rlimit.h>
|
||||
#include <affinity.h>
|
||||
#include <time.h>
|
||||
#include <ihk/perfctr.h>
|
||||
#include <mman.h>
|
||||
#include <kmalloc.h>
|
||||
#include <memobj.h>
|
||||
#include <shm.h>
|
||||
#include <prio.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <limits.h>
|
||||
#include <march.h>
|
||||
#include <process.h>
|
||||
|
||||
extern char *syscall_name[];
|
||||
|
||||
#ifdef PROFILE_ENABLE
|
||||
|
||||
//#define DEBUG_PRINT_PROFILE
|
||||
|
||||
#ifdef DEBUG_PRINT_PROFILE
|
||||
#define dkprintf(...) kprintf(__VA_ARGS__)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#else
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
|
||||
char *profile_event_names[] =
|
||||
{
|
||||
"remote_tlb_invalidate",
|
||||
"page_fault",
|
||||
"page_fault_anon_clr_mem",
|
||||
"page_fault_file",
|
||||
"page_fault_dev_file",
|
||||
"page_fault_file_clr_mem",
|
||||
"mpol_alloc_missed",
|
||||
"mmap_anon_contig_phys",
|
||||
"mmap_anon_no_contig_phys",
|
||||
"mmap_regular_file",
|
||||
"mmap_device_file",
|
||||
""
|
||||
};
|
||||
|
||||
mcs_lock_node_t job_profile_lock = {0, NULL};
|
||||
struct profile_event *job_profile_events = NULL;
|
||||
int job_nr_processes = -1;
|
||||
int job_nr_processes_left = -1;
|
||||
unsigned long job_elapsed_ts;
|
||||
|
||||
|
||||
|
||||
enum profile_event_type profile_syscall2offload(enum profile_event_type sc)
|
||||
{
|
||||
return (PROFILE_SYSCALL_MAX + sc);
|
||||
}
|
||||
|
||||
void profile_event_add(enum profile_event_type type, uint64_t tsc)
|
||||
{
|
||||
struct profile_event *event = NULL;
|
||||
if (!cpu_local_var(current)->profile)
|
||||
return;
|
||||
|
||||
if (!cpu_local_var(current)->profile_events) {
|
||||
if (profile_alloc_events(cpu_local_var(current)) < 0)
|
||||
return;
|
||||
}
|
||||
|
||||
if (type < PROFILE_EVENT_MAX) {
|
||||
event = &cpu_local_var(current)->profile_events[type];
|
||||
}
|
||||
else {
|
||||
kprintf("%s: WARNING: unknown event type %d\n",
|
||||
__FUNCTION__, type);
|
||||
return;
|
||||
}
|
||||
|
||||
++event->cnt;
|
||||
event->tsc += tsc;
|
||||
}
|
||||
|
||||
void profile_print_thread_stats(struct thread *thread)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
if (!thread->profile_events)
|
||||
return;
|
||||
|
||||
/* Not yet accumulated period? */
|
||||
if (thread->profile_start_ts) {
|
||||
thread->profile_elapsed_ts += (rdtsc() - thread->profile_start_ts);
|
||||
}
|
||||
|
||||
flags = kprintf_lock();
|
||||
|
||||
__kprintf("TID: %4d elapsed cycles (excluding idle): %luk\n",
|
||||
thread->tid,
|
||||
thread->profile_elapsed_ts / 1000);
|
||||
|
||||
for (i = 0; i < PROFILE_SYSCALL_MAX; ++i) {
|
||||
if (!thread->profile_events[i].cnt &&
|
||||
!thread->profile_events[i + PROFILE_SYSCALL_MAX].cnt)
|
||||
continue;
|
||||
|
||||
__kprintf("TID: %4d (%3d,%20s): %6u %6luk offl: %6u %6luk (%2d.%2d%%)\n",
|
||||
thread->tid,
|
||||
i,
|
||||
syscall_name[i],
|
||||
thread->profile_events[i].cnt,
|
||||
(thread->profile_events[i].tsc /
|
||||
(thread->profile_events[i].cnt ?
|
||||
thread->profile_events[i].cnt : 1))
|
||||
/ 1000,
|
||||
thread->profile_events[i + PROFILE_SYSCALL_MAX].cnt,
|
||||
(thread->profile_events[i + PROFILE_SYSCALL_MAX].tsc /
|
||||
(thread->profile_events[i + PROFILE_SYSCALL_MAX].cnt ?
|
||||
thread->profile_events[i + PROFILE_SYSCALL_MAX].cnt : 1))
|
||||
/ 1000,
|
||||
(thread->profile_events[i].tsc ?
|
||||
thread->profile_events[i].tsc * 100
|
||||
/ thread->profile_elapsed_ts : 0),
|
||||
(thread->profile_events[i].tsc ?
|
||||
(thread->profile_events[i].tsc * 10000
|
||||
/ thread->profile_elapsed_ts) % 100 : 0)
|
||||
);
|
||||
}
|
||||
|
||||
for (i = PROFILE_EVENT_MIN; i < PROFILE_EVENT_MAX; ++i) {
|
||||
|
||||
if (!thread->profile_events[i].cnt)
|
||||
continue;
|
||||
|
||||
__kprintf("TID: %4d (%24s): %6u %6luk \n",
|
||||
thread->tid,
|
||||
profile_event_names[i - PROFILE_EVENT_MIN],
|
||||
thread->profile_events[i].cnt,
|
||||
(thread->profile_events[i].tsc /
|
||||
(thread->profile_events[i].cnt ?
|
||||
thread->profile_events[i].cnt : 1))
|
||||
/ 1000,
|
||||
(thread->profile_events[i].tsc ?
|
||||
thread->profile_events[i].tsc * 100
|
||||
/ thread->profile_elapsed_ts : 0),
|
||||
(thread->profile_events[i].tsc ?
|
||||
(thread->profile_events[i].tsc * 10000
|
||||
/ thread->profile_elapsed_ts) % 100 : 0)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
kprintf_unlock(flags);
|
||||
}
|
||||
|
||||
void profile_print_proc_stats(struct process *proc)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
if (!proc->profile_events || !proc->profile_elapsed_ts)
|
||||
return;
|
||||
|
||||
flags = kprintf_lock();
|
||||
__kprintf("PID: %4d elapsed cycles for all threads (excluding idle): %luk\n",
|
||||
proc->pid,
|
||||
proc->profile_elapsed_ts / 1000);
|
||||
|
||||
for (i = 0; i < PROFILE_SYSCALL_MAX; ++i) {
|
||||
if (!proc->profile_events[i].cnt &&
|
||||
!proc->profile_events[i + PROFILE_SYSCALL_MAX].cnt)
|
||||
continue;
|
||||
|
||||
__kprintf("PID: %4d (%3d,%20s): %6u %6luk offl: %6u %6luk (%2d.%2d%%)\n",
|
||||
proc->pid,
|
||||
i,
|
||||
syscall_name[i],
|
||||
proc->profile_events[i].cnt,
|
||||
(proc->profile_events[i].tsc /
|
||||
(proc->profile_events[i].cnt ?
|
||||
proc->profile_events[i].cnt : 1))
|
||||
/ 1000,
|
||||
proc->profile_events[i + PROFILE_SYSCALL_MAX].cnt,
|
||||
(proc->profile_events[i + PROFILE_SYSCALL_MAX].tsc /
|
||||
(proc->profile_events[i + PROFILE_SYSCALL_MAX].cnt ?
|
||||
proc->profile_events[i + PROFILE_SYSCALL_MAX].cnt : 1))
|
||||
/ 1000,
|
||||
(proc->profile_events[i].tsc ?
|
||||
proc->profile_events[i].tsc * 100
|
||||
/ proc->profile_elapsed_ts : 0),
|
||||
(proc->profile_events[i].tsc ?
|
||||
(proc->profile_events[i].tsc * 10000
|
||||
/ proc->profile_elapsed_ts) % 100 : 0)
|
||||
);
|
||||
}
|
||||
|
||||
for (i = PROFILE_EVENT_MIN; i < PROFILE_EVENT_MAX; ++i) {
|
||||
|
||||
if (!proc->profile_events[i].cnt)
|
||||
continue;
|
||||
|
||||
__kprintf("PID: %4d (%24s): %6u %6luk \n",
|
||||
proc->pid,
|
||||
profile_event_names[i - PROFILE_EVENT_MIN],
|
||||
proc->profile_events[i].cnt,
|
||||
(proc->profile_events[i].tsc /
|
||||
(proc->profile_events[i].cnt ?
|
||||
proc->profile_events[i].cnt : 1))
|
||||
/ 1000,
|
||||
(proc->profile_events[i].tsc &&
|
||||
proc->profile_elapsed_ts ?
|
||||
proc->profile_events[i].tsc * 100
|
||||
/ proc->profile_elapsed_ts : 0),
|
||||
(proc->profile_events[i].tsc &&
|
||||
proc->profile_elapsed_ts ?
|
||||
(proc->profile_events[i].tsc * 10000
|
||||
/ proc->profile_elapsed_ts) % 100 : 0)
|
||||
);
|
||||
}
|
||||
|
||||
kprintf_unlock(flags);
|
||||
}
|
||||
|
||||
int profile_accumulate_and_print_job_events(struct process *proc)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
struct mcs_lock_node mcs_node;
|
||||
|
||||
mcs_lock_lock(&job_profile_lock, &mcs_node);
|
||||
|
||||
/* First process? */
|
||||
if (job_nr_processes == -1) {
|
||||
job_nr_processes = proc->nr_processes;
|
||||
job_nr_processes_left = proc->nr_processes;
|
||||
job_elapsed_ts = 0;
|
||||
}
|
||||
|
||||
--job_nr_processes_left;
|
||||
|
||||
/* Allocate event counters */
|
||||
if (!job_profile_events) {
|
||||
|
||||
job_profile_events = kmalloc(sizeof(*job_profile_events) *
|
||||
PROFILE_EVENT_MAX, IHK_MC_AP_NOWAIT);
|
||||
|
||||
if (!job_profile_events) {
|
||||
kprintf("%s: ERROR: allocating job profile counters\n",
|
||||
__FUNCTION__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(job_profile_events, 0,
|
||||
sizeof(*job_profile_events) * PROFILE_EVENT_MAX);
|
||||
}
|
||||
|
||||
/* Accumulate process */
|
||||
for (i = 0; i < PROFILE_EVENT_MAX; ++i) {
|
||||
if (!proc->profile_events[i].tsc)
|
||||
continue;
|
||||
|
||||
job_profile_events[i].tsc += proc->profile_events[i].tsc;
|
||||
job_profile_events[i].cnt += proc->profile_events[i].cnt;
|
||||
proc->profile_events[i].tsc = 0;
|
||||
proc->profile_events[i].cnt = 0;
|
||||
}
|
||||
|
||||
job_elapsed_ts += proc->profile_elapsed_ts;
|
||||
|
||||
/* Last process? */
|
||||
if (job_nr_processes_left == 0) {
|
||||
flags = kprintf_lock();
|
||||
__kprintf("JOB: (%2d) elapsed cycles for all threads (excluding idle): %luk\n",
|
||||
job_nr_processes,
|
||||
job_elapsed_ts / 1000);
|
||||
|
||||
for (i = 0; i < PROFILE_SYSCALL_MAX; ++i) {
|
||||
if (!job_profile_events[i].cnt &&
|
||||
!job_profile_events[i + PROFILE_SYSCALL_MAX].cnt)
|
||||
continue;
|
||||
|
||||
__kprintf("JOB: (%2d) (%3d,%20s): %6u %6luk offl: %6u %6luk (%2d.%2d%%)\n",
|
||||
job_nr_processes,
|
||||
i,
|
||||
syscall_name[i],
|
||||
job_profile_events[i].cnt,
|
||||
(job_profile_events[i].tsc /
|
||||
(job_profile_events[i].cnt ?
|
||||
job_profile_events[i].cnt : 1))
|
||||
/ 1000,
|
||||
job_profile_events[i + PROFILE_SYSCALL_MAX].cnt,
|
||||
(job_profile_events[i + PROFILE_SYSCALL_MAX].tsc /
|
||||
(job_profile_events[i + PROFILE_SYSCALL_MAX].cnt ?
|
||||
job_profile_events[i + PROFILE_SYSCALL_MAX].cnt : 1))
|
||||
/ 1000,
|
||||
(job_profile_events[i].tsc ?
|
||||
job_profile_events[i].tsc * 100
|
||||
/ job_elapsed_ts : 0),
|
||||
(job_profile_events[i].tsc ?
|
||||
(job_profile_events[i].tsc * 10000
|
||||
/ job_elapsed_ts) % 100 : 0)
|
||||
);
|
||||
|
||||
job_profile_events[i].tsc = 0;
|
||||
job_profile_events[i].cnt = 0;
|
||||
job_profile_events[i + PROFILE_SYSCALL_MAX].tsc = 0;
|
||||
job_profile_events[i + PROFILE_SYSCALL_MAX].cnt = 0;
|
||||
}
|
||||
|
||||
for (i = PROFILE_EVENT_MIN; i < PROFILE_EVENT_MAX; ++i) {
|
||||
|
||||
if (!job_profile_events[i].cnt)
|
||||
continue;
|
||||
|
||||
__kprintf("JOB: (%2d) (%24s): %6u %6luk \n",
|
||||
job_nr_processes,
|
||||
profile_event_names[i - PROFILE_EVENT_MIN],
|
||||
job_profile_events[i].cnt,
|
||||
(job_profile_events[i].tsc /
|
||||
(job_profile_events[i].cnt ?
|
||||
job_profile_events[i].cnt : 1))
|
||||
/ 1000);
|
||||
|
||||
job_profile_events[i].tsc = 0;
|
||||
job_profile_events[i].cnt = 0;
|
||||
}
|
||||
|
||||
kprintf_unlock(flags);
|
||||
|
||||
/* Reset job process indicators */
|
||||
job_nr_processes = -1;
|
||||
job_nr_processes_left = -1;
|
||||
job_elapsed_ts = 0;
|
||||
}
|
||||
|
||||
mcs_lock_unlock(&job_profile_lock, &mcs_node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void profile_accumulate_events(struct thread *thread,
|
||||
struct process *proc)
|
||||
{
|
||||
int i;
|
||||
struct mcs_lock_node mcs_node;
|
||||
|
||||
if (!thread->profile_events || !proc->profile_events) return;
|
||||
|
||||
mcs_lock_lock(&proc->profile_lock, &mcs_node);
|
||||
|
||||
for (i = 0; i < PROFILE_EVENT_MAX; ++i) {
|
||||
proc->profile_events[i].tsc += thread->profile_events[i].tsc;
|
||||
proc->profile_events[i].cnt += thread->profile_events[i].cnt;
|
||||
thread->profile_events[i].tsc = 0;
|
||||
thread->profile_events[i].cnt = 0;
|
||||
}
|
||||
|
||||
proc->profile_elapsed_ts += thread->profile_elapsed_ts;
|
||||
if (thread->profile_start_ts) {
|
||||
proc->profile_elapsed_ts +=
|
||||
(rdtsc() - thread->profile_start_ts);
|
||||
}
|
||||
|
||||
mcs_lock_unlock(&proc->profile_lock, &mcs_node);
|
||||
}
|
||||
|
||||
int profile_alloc_events(struct thread *thread)
|
||||
{
|
||||
struct process *proc = thread->proc;
|
||||
struct mcs_lock_node mcs_node;
|
||||
|
||||
if (!thread->profile_events) {
|
||||
thread->profile_events = kmalloc(sizeof(*thread->profile_events) *
|
||||
PROFILE_EVENT_MAX, IHK_MC_AP_NOWAIT);
|
||||
|
||||
if (!thread->profile_events) {
|
||||
kprintf("%s: ERROR: allocating thread private profile counters\n",
|
||||
__FUNCTION__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(thread->profile_events, 0,
|
||||
sizeof(*thread->profile_events) * PROFILE_EVENT_MAX);
|
||||
}
|
||||
|
||||
mcs_lock_lock(&proc->profile_lock, &mcs_node);
|
||||
if (!proc->profile_events) {
|
||||
proc->profile_events = kmalloc(sizeof(*proc->profile_events) *
|
||||
PROFILE_EVENT_MAX, IHK_MC_AP_NOWAIT);
|
||||
|
||||
if (!proc->profile_events) {
|
||||
kprintf("%s: ERROR: allocating proc private profile counters\n",
|
||||
__FUNCTION__);
|
||||
mcs_lock_unlock(&proc->profile_lock, &mcs_node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(proc->profile_events, 0,
|
||||
sizeof(*thread->profile_events) * PROFILE_EVENT_MAX);
|
||||
|
||||
}
|
||||
mcs_lock_unlock(&proc->profile_lock, &mcs_node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void profile_dealloc_thread_events(struct thread *thread)
|
||||
{
|
||||
kfree(thread->profile_events);
|
||||
}
|
||||
|
||||
void profile_dealloc_proc_events(struct process *proc)
|
||||
{
|
||||
kfree(proc->profile_events);
|
||||
}
|
||||
|
||||
void static profile_clear_process(struct process *proc)
|
||||
{
|
||||
proc->profile_elapsed_ts = 0;
|
||||
if (!proc->profile_events) return;
|
||||
|
||||
memset(proc->profile_events, 0,
|
||||
sizeof(*proc->profile_events) * PROFILE_EVENT_MAX);
|
||||
}
|
||||
|
||||
void static profile_clear_thread(struct thread *thread)
|
||||
{
|
||||
thread->profile_start_ts = 0;
|
||||
thread->profile_elapsed_ts = 0;
|
||||
if (!thread->profile_events) return;
|
||||
|
||||
memset(thread->profile_events, 0,
|
||||
sizeof(*thread->profile_events) * PROFILE_EVENT_MAX);
|
||||
}
|
||||
|
||||
int do_profile(int flag)
|
||||
{
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
struct process *proc = thread->proc;
|
||||
unsigned long now_ts = rdtsc();
|
||||
|
||||
/* Job level? */
|
||||
if (flag & PROF_JOB) {
|
||||
dkprintf("%s: JOB %d, flag: 0x%lx\n",
|
||||
__FUNCTION__, proc->nr_processes, flag);
|
||||
if (flag & PROF_PRINT) {
|
||||
struct mcs_rwlock_node lock;
|
||||
struct thread *_thread;
|
||||
|
||||
/* Accumulate events from all threads to process level */
|
||||
mcs_rwlock_reader_lock_noirq(&proc->threads_lock, &lock);
|
||||
list_for_each_entry(_thread, &proc->threads_list,
|
||||
siblings_list) {
|
||||
profile_accumulate_events(_thread, proc);
|
||||
}
|
||||
mcs_rwlock_reader_unlock_noirq(&proc->threads_lock, &lock);
|
||||
|
||||
/* Accumulate events to job level */
|
||||
return profile_accumulate_and_print_job_events(proc);
|
||||
}
|
||||
}
|
||||
/* Process level? */
|
||||
else if (flag & PROF_PROC) {
|
||||
struct mcs_rwlock_node lock;
|
||||
struct thread *_thread;
|
||||
|
||||
dkprintf("%s: PID %d, flag: 0x%lx\n",
|
||||
__FUNCTION__, proc->pid, flag);
|
||||
/* Accumulate events from all threads */
|
||||
mcs_rwlock_reader_lock_noirq(&proc->threads_lock, &lock);
|
||||
|
||||
list_for_each_entry(_thread, &proc->threads_list,
|
||||
siblings_list) {
|
||||
if (flag & PROF_PRINT) {
|
||||
profile_accumulate_events(_thread, proc);
|
||||
}
|
||||
|
||||
if (flag & PROF_CLEAR) {
|
||||
profile_clear_thread(_thread);
|
||||
}
|
||||
|
||||
if (flag & PROF_ON) {
|
||||
_thread->profile = 1;
|
||||
}
|
||||
else if (flag & PROF_OFF) {
|
||||
if (_thread->profile) {
|
||||
_thread->profile = 0;
|
||||
if (_thread->profile_start_ts) {
|
||||
_thread->profile_elapsed_ts +=
|
||||
(now_ts - _thread->profile_start_ts);
|
||||
}
|
||||
_thread->profile_start_ts = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mcs_rwlock_reader_unlock_noirq(&proc->threads_lock, &lock);
|
||||
|
||||
if (flag & PROF_PRINT) {
|
||||
profile_print_proc_stats(proc);
|
||||
}
|
||||
|
||||
if (flag & PROF_CLEAR) {
|
||||
profile_clear_process(proc);
|
||||
}
|
||||
|
||||
/* Make sure future threads profile as well */
|
||||
if (flag & PROF_ON) {
|
||||
if (!proc->profile) {
|
||||
proc->profile = 1;
|
||||
}
|
||||
}
|
||||
else if (flag & PROF_OFF) {
|
||||
proc->profile = 0;
|
||||
}
|
||||
}
|
||||
/* Thread level */
|
||||
else {
|
||||
dkprintf("%s: TID %d, flag: 0x%lx\n",
|
||||
__FUNCTION__, thread->tid, flag);
|
||||
if (flag & PROF_PRINT) {
|
||||
profile_print_thread_stats(thread);
|
||||
}
|
||||
|
||||
if (flag & PROF_CLEAR) {
|
||||
profile_clear_thread(thread);
|
||||
/* If profiling, reset start and elapsed */
|
||||
if (thread->profile) {
|
||||
thread->profile_start_ts = 0;
|
||||
thread->profile_elapsed_ts = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (flag & PROF_ON) {
|
||||
if (!thread->profile) {
|
||||
thread->profile = 1;
|
||||
thread->profile_start_ts = 0;
|
||||
}
|
||||
}
|
||||
else if (flag & PROF_OFF) {
|
||||
if (thread->profile) {
|
||||
thread->profile = 0;
|
||||
if (thread->profile_start_ts) {
|
||||
thread->profile_elapsed_ts +=
|
||||
(now_ts - thread->profile_start_ts);
|
||||
}
|
||||
thread->profile_start_ts = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYSCALL_DECLARE(profile)
|
||||
{
|
||||
int flag = (int)ihk_mc_syscall_arg0(ctx);
|
||||
return do_profile(flag);
|
||||
}
|
||||
|
||||
#endif // PROFILE_ENABLE
|
||||
561
kernel/rbtree.c
Normal file
561
kernel/rbtree.c
Normal file
@ -0,0 +1,561 @@
|
||||
/*
|
||||
Red Black Trees
|
||||
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
||||
(C) 2002 David Woodhouse <dwmw2@infradead.org>
|
||||
(C) 2012 Michel Lespinasse <walken@google.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
linux/lib/rbtree.c
|
||||
*/
|
||||
|
||||
#include <rbtree_augmented.h>
|
||||
|
||||
#define EXPORT_SYMBOL(x)
|
||||
|
||||
/*
|
||||
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
|
||||
*
|
||||
* 1) A node is either red or black
|
||||
* 2) The root is black
|
||||
* 3) All leaves (NULL) are black
|
||||
* 4) Both children of every red node are black
|
||||
* 5) Every simple path from root to leaves contains the same number
|
||||
* of black nodes.
|
||||
*
|
||||
* 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
|
||||
* consecutive red nodes in a path and every red node is therefore followed by
|
||||
* a black. So if B is the number of black nodes on every simple path (as per
|
||||
* 5), then the longest possible path due to 4 is 2B.
|
||||
*
|
||||
* We shall indicate color with case, where black nodes are uppercase and red
|
||||
* nodes will be lowercase. Unknown color nodes shall be drawn as red within
|
||||
* parentheses and have some accompanying text comment.
|
||||
*/
|
||||
|
||||
static inline void rb_set_black(struct rb_node *rb)
|
||||
{
|
||||
rb->__rb_parent_color |= RB_BLACK;
|
||||
}
|
||||
|
||||
static inline struct rb_node *rb_red_parent(struct rb_node *red)
|
||||
{
|
||||
return (struct rb_node *)red->__rb_parent_color;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for rotations:
|
||||
* - old's parent and color get assigned to new
|
||||
* - old gets assigned new as a parent and 'color' as a color.
|
||||
*/
|
||||
static inline void
|
||||
__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
|
||||
struct rb_root *root, int color)
|
||||
{
|
||||
struct rb_node *parent = rb_parent(old);
|
||||
new->__rb_parent_color = old->__rb_parent_color;
|
||||
rb_set_parent_color(old, new, color);
|
||||
__rb_change_child(old, new, parent, root);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__rb_insert(struct rb_node *node, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||
{
|
||||
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
|
||||
|
||||
while (true) {
|
||||
/*
|
||||
* Loop invariant: node is red
|
||||
*
|
||||
* If there is a black parent, we are done.
|
||||
* Otherwise, take some corrective action as we don't
|
||||
* want a red root or two consecutive red nodes.
|
||||
*/
|
||||
if (!parent) {
|
||||
rb_set_parent_color(node, NULL, RB_BLACK);
|
||||
break;
|
||||
} else if (rb_is_black(parent))
|
||||
break;
|
||||
|
||||
gparent = rb_red_parent(parent);
|
||||
|
||||
tmp = gparent->rb_right;
|
||||
if (parent != tmp) { /* parent == gparent->rb_left */
|
||||
if (tmp && rb_is_red(tmp)) {
|
||||
/*
|
||||
* Case 1 - color flips
|
||||
*
|
||||
* G g
|
||||
* / \ / \
|
||||
* p u --> P U
|
||||
* / /
|
||||
* n N
|
||||
*
|
||||
* However, since g's parent might be red, and
|
||||
* 4) does not allow this, we need to recurse
|
||||
* at g.
|
||||
*/
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
rb_set_parent_color(parent, gparent, RB_BLACK);
|
||||
node = gparent;
|
||||
parent = rb_parent(node);
|
||||
rb_set_parent_color(node, parent, RB_RED);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = parent->rb_right;
|
||||
if (node == tmp) {
|
||||
/*
|
||||
* Case 2 - left rotate at parent
|
||||
*
|
||||
* G G
|
||||
* / \ / \
|
||||
* p U --> n U
|
||||
* \ /
|
||||
* n p
|
||||
*
|
||||
* This still leaves us in violation of 4), the
|
||||
* continuation into Case 3 will fix that.
|
||||
*/
|
||||
parent->rb_right = tmp = node->rb_left;
|
||||
node->rb_left = parent;
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, parent,
|
||||
RB_BLACK);
|
||||
rb_set_parent_color(parent, node, RB_RED);
|
||||
augment_rotate(parent, node);
|
||||
parent = node;
|
||||
tmp = node->rb_right;
|
||||
}
|
||||
|
||||
/*
|
||||
* Case 3 - right rotate at gparent
|
||||
*
|
||||
* G P
|
||||
* / \ / \
|
||||
* p U --> n g
|
||||
* / \
|
||||
* n U
|
||||
*/
|
||||
gparent->rb_left = tmp; /* == parent->rb_right */
|
||||
parent->rb_right = gparent;
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
||||
augment_rotate(gparent, parent);
|
||||
break;
|
||||
} else {
|
||||
tmp = gparent->rb_left;
|
||||
if (tmp && rb_is_red(tmp)) {
|
||||
/* Case 1 - color flips */
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
rb_set_parent_color(parent, gparent, RB_BLACK);
|
||||
node = gparent;
|
||||
parent = rb_parent(node);
|
||||
rb_set_parent_color(node, parent, RB_RED);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = parent->rb_left;
|
||||
if (node == tmp) {
|
||||
/* Case 2 - right rotate at parent */
|
||||
parent->rb_left = tmp = node->rb_right;
|
||||
node->rb_right = parent;
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, parent,
|
||||
RB_BLACK);
|
||||
rb_set_parent_color(parent, node, RB_RED);
|
||||
augment_rotate(parent, node);
|
||||
parent = node;
|
||||
tmp = node->rb_left;
|
||||
}
|
||||
|
||||
/* Case 3 - left rotate at gparent */
|
||||
gparent->rb_right = tmp; /* == parent->rb_left */
|
||||
parent->rb_left = gparent;
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
||||
augment_rotate(gparent, parent);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Inline version for rb_erase() use - we want to be able to inline
|
||||
* and eliminate the dummy_rotate callback there
|
||||
*/
|
||||
static __always_inline void
|
||||
____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||
{
|
||||
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
|
||||
|
||||
while (true) {
|
||||
/*
|
||||
* Loop invariants:
|
||||
* - node is black (or NULL on first iteration)
|
||||
* - node is not the root (parent is not NULL)
|
||||
* - All leaf paths going through parent and node have a
|
||||
* black node count that is 1 lower than other leaf paths.
|
||||
*/
|
||||
sibling = parent->rb_right;
|
||||
if (node != sibling) { /* node == parent->rb_left */
|
||||
if (rb_is_red(sibling)) {
|
||||
/*
|
||||
* Case 1 - left rotate at parent
|
||||
*
|
||||
* P S
|
||||
* / \ / \
|
||||
* N s --> p Sr
|
||||
* / \ / \
|
||||
* Sl Sr N Sl
|
||||
*/
|
||||
parent->rb_right = tmp1 = sibling->rb_left;
|
||||
sibling->rb_left = parent;
|
||||
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_RED);
|
||||
augment_rotate(parent, sibling);
|
||||
sibling = tmp1;
|
||||
}
|
||||
tmp1 = sibling->rb_right;
|
||||
if (!tmp1 || rb_is_black(tmp1)) {
|
||||
tmp2 = sibling->rb_left;
|
||||
if (!tmp2 || rb_is_black(tmp2)) {
|
||||
/*
|
||||
* Case 2 - sibling color flip
|
||||
* (p could be either color here)
|
||||
*
|
||||
* (p) (p)
|
||||
* / \ / \
|
||||
* N S --> N s
|
||||
* / \ / \
|
||||
* Sl Sr Sl Sr
|
||||
*
|
||||
* This leaves us violating 5) which
|
||||
* can be fixed by flipping p to black
|
||||
* if it was red, or by recursing at p.
|
||||
* p is red when coming from Case 1.
|
||||
*/
|
||||
rb_set_parent_color(sibling, parent,
|
||||
RB_RED);
|
||||
if (rb_is_red(parent))
|
||||
rb_set_black(parent);
|
||||
else {
|
||||
node = parent;
|
||||
parent = rb_parent(node);
|
||||
if (parent)
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Case 3 - right rotate at sibling
|
||||
* (p could be either color here)
|
||||
*
|
||||
* (p) (p)
|
||||
* / \ / \
|
||||
* N S --> N Sl
|
||||
* / \ \
|
||||
* sl Sr s
|
||||
* \
|
||||
* Sr
|
||||
*/
|
||||
sibling->rb_left = tmp1 = tmp2->rb_right;
|
||||
tmp2->rb_right = sibling;
|
||||
parent->rb_right = tmp2;
|
||||
if (tmp1)
|
||||
rb_set_parent_color(tmp1, sibling,
|
||||
RB_BLACK);
|
||||
augment_rotate(sibling, tmp2);
|
||||
tmp1 = sibling;
|
||||
sibling = tmp2;
|
||||
}
|
||||
/*
|
||||
* Case 4 - left rotate at parent + color flips
|
||||
* (p and sl could be either color here.
|
||||
* After rotation, p becomes black, s acquires
|
||||
* p's color, and sl keeps its color)
|
||||
*
|
||||
* (p) (s)
|
||||
* / \ / \
|
||||
* N S --> P Sr
|
||||
* / \ / \
|
||||
* (sl) sr N (sl)
|
||||
*/
|
||||
parent->rb_right = tmp2 = sibling->rb_left;
|
||||
sibling->rb_left = parent;
|
||||
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
||||
if (tmp2)
|
||||
rb_set_parent(tmp2, parent);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_BLACK);
|
||||
augment_rotate(parent, sibling);
|
||||
break;
|
||||
} else {
|
||||
sibling = parent->rb_left;
|
||||
if (rb_is_red(sibling)) {
|
||||
/* Case 1 - right rotate at parent */
|
||||
parent->rb_left = tmp1 = sibling->rb_right;
|
||||
sibling->rb_right = parent;
|
||||
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_RED);
|
||||
augment_rotate(parent, sibling);
|
||||
sibling = tmp1;
|
||||
}
|
||||
tmp1 = sibling->rb_left;
|
||||
if (!tmp1 || rb_is_black(tmp1)) {
|
||||
tmp2 = sibling->rb_right;
|
||||
if (!tmp2 || rb_is_black(tmp2)) {
|
||||
/* Case 2 - sibling color flip */
|
||||
rb_set_parent_color(sibling, parent,
|
||||
RB_RED);
|
||||
if (rb_is_red(parent))
|
||||
rb_set_black(parent);
|
||||
else {
|
||||
node = parent;
|
||||
parent = rb_parent(node);
|
||||
if (parent)
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
/* Case 3 - right rotate at sibling */
|
||||
sibling->rb_right = tmp1 = tmp2->rb_left;
|
||||
tmp2->rb_left = sibling;
|
||||
parent->rb_left = tmp2;
|
||||
if (tmp1)
|
||||
rb_set_parent_color(tmp1, sibling,
|
||||
RB_BLACK);
|
||||
augment_rotate(sibling, tmp2);
|
||||
tmp1 = sibling;
|
||||
sibling = tmp2;
|
||||
}
|
||||
/* Case 4 - left rotate at parent + color flips */
|
||||
parent->rb_left = tmp2 = sibling->rb_right;
|
||||
sibling->rb_right = parent;
|
||||
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
||||
if (tmp2)
|
||||
rb_set_parent(tmp2, parent);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_BLACK);
|
||||
augment_rotate(parent, sibling);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Non-inline version for rb_erase_augmented() use */
|
||||
void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||
{
|
||||
____rb_erase_color(parent, root, augment_rotate);
|
||||
}
|
||||
EXPORT_SYMBOL(__rb_erase_color);
|
||||
|
||||
/*
|
||||
* Non-augmented rbtree manipulation functions.
|
||||
*
|
||||
* We use dummy augmented callbacks here, and have the compiler optimize them
|
||||
* out of the rb_insert_color() and rb_erase() function definitions.
|
||||
*/
|
||||
|
||||
static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
|
||||
static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
|
||||
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
|
||||
|
||||
static const struct rb_augment_callbacks dummy_callbacks = {
|
||||
dummy_propagate, dummy_copy, dummy_rotate
|
||||
};
|
||||
|
||||
void rb_insert_color(struct rb_node *node, struct rb_root *root)
|
||||
{
|
||||
__rb_insert(node, root, dummy_rotate);
|
||||
}
|
||||
EXPORT_SYMBOL(rb_insert_color);
|
||||
|
||||
void rb_erase(struct rb_node *node, struct rb_root *root)
|
||||
{
|
||||
struct rb_node *rebalance;
|
||||
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
|
||||
if (rebalance)
|
||||
____rb_erase_color(rebalance, root, dummy_rotate);
|
||||
}
|
||||
EXPORT_SYMBOL(rb_erase);
|
||||
|
||||
/*
|
||||
* Augmented rbtree manipulation functions.
|
||||
*
|
||||
* This instantiates the same __always_inline functions as in the non-augmented
|
||||
* case, but this time with user-defined callbacks.
|
||||
*/
|
||||
|
||||
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||
{
|
||||
__rb_insert(node, root, augment_rotate);
|
||||
}
|
||||
EXPORT_SYMBOL(__rb_insert_augmented);
|
||||
|
||||
/*
|
||||
* This function returns the first node (in sort order) of the tree.
|
||||
*/
|
||||
struct rb_node *rb_first(const struct rb_root *root)
|
||||
{
|
||||
struct rb_node *n;
|
||||
|
||||
n = root->rb_node;
|
||||
if (!n)
|
||||
return NULL;
|
||||
while (n->rb_left)
|
||||
n = n->rb_left;
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_first);
|
||||
|
||||
struct rb_node *rb_last(const struct rb_root *root)
|
||||
{
|
||||
struct rb_node *n;
|
||||
|
||||
n = root->rb_node;
|
||||
if (!n)
|
||||
return NULL;
|
||||
while (n->rb_right)
|
||||
n = n->rb_right;
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_last);
|
||||
|
||||
struct rb_node *rb_next(const struct rb_node *node)
|
||||
{
|
||||
struct rb_node *parent;
|
||||
|
||||
if (RB_EMPTY_NODE(node))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If we have a right-hand child, go down and then left as far
|
||||
* as we can.
|
||||
*/
|
||||
if (node->rb_right) {
|
||||
node = node->rb_right;
|
||||
while (node->rb_left)
|
||||
node=node->rb_left;
|
||||
return (struct rb_node *)node;
|
||||
}
|
||||
|
||||
/*
|
||||
* No right-hand children. Everything down and left is smaller than us,
|
||||
* so any 'next' node must be in the general direction of our parent.
|
||||
* Go up the tree; any time the ancestor is a right-hand child of its
|
||||
* parent, keep going up. First time it's a left-hand child of its
|
||||
* parent, said parent is our 'next' node.
|
||||
*/
|
||||
while ((parent = rb_parent(node)) && node == parent->rb_right)
|
||||
node = parent;
|
||||
|
||||
return parent;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_next);
|
||||
|
||||
struct rb_node *rb_prev(const struct rb_node *node)
|
||||
{
|
||||
struct rb_node *parent;
|
||||
|
||||
if (RB_EMPTY_NODE(node))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If we have a left-hand child, go down and then right as far
|
||||
* as we can.
|
||||
*/
|
||||
if (node->rb_left) {
|
||||
node = node->rb_left;
|
||||
while (node->rb_right)
|
||||
node=node->rb_right;
|
||||
return (struct rb_node *)node;
|
||||
}
|
||||
|
||||
/*
|
||||
* No left-hand children. Go up till we find an ancestor which
|
||||
* is a right-hand child of its parent.
|
||||
*/
|
||||
while ((parent = rb_parent(node)) && node == parent->rb_left)
|
||||
node = parent;
|
||||
|
||||
return parent;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_prev);
|
||||
|
||||
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root)
|
||||
{
|
||||
struct rb_node *parent = rb_parent(victim);
|
||||
|
||||
/* Set the surrounding nodes to point to the replacement */
|
||||
__rb_change_child(victim, new, parent, root);
|
||||
if (victim->rb_left)
|
||||
rb_set_parent(victim->rb_left, new);
|
||||
if (victim->rb_right)
|
||||
rb_set_parent(victim->rb_right, new);
|
||||
|
||||
/* Copy the pointers/colour from the victim to the replacement */
|
||||
*new = *victim;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_replace_node);
|
||||
|
||||
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
|
||||
{
|
||||
for (;;) {
|
||||
if (node->rb_left)
|
||||
node = node->rb_left;
|
||||
else if (node->rb_right)
|
||||
node = node->rb_right;
|
||||
else
|
||||
return (struct rb_node *)node;
|
||||
}
|
||||
}
|
||||
|
||||
struct rb_node *rb_next_postorder(const struct rb_node *node)
|
||||
{
|
||||
const struct rb_node *parent;
|
||||
if (!node)
|
||||
return NULL;
|
||||
parent = rb_parent(node);
|
||||
|
||||
/* If we're sitting on node, we've already seen our children */
|
||||
if (parent && node == parent->rb_left && parent->rb_right) {
|
||||
/* If we are the parent's left node, go to the parent's right
|
||||
* node then all the way down to the left */
|
||||
return rb_left_deepest_node(parent->rb_right);
|
||||
} else
|
||||
/* Otherwise we are the parent's right node, and the parent
|
||||
* should be next */
|
||||
return (struct rb_node *)parent;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_next_postorder);
|
||||
|
||||
struct rb_node *rb_first_postorder(const struct rb_root *root)
|
||||
{
|
||||
if (!root->rb_node)
|
||||
return NULL;
|
||||
|
||||
return rb_left_deepest_node(root->rb_node);
|
||||
}
|
||||
EXPORT_SYMBOL(rb_first_postorder);
|
||||
@ -256,7 +256,7 @@ void shmobj_destroy(struct shmobj *obj)
|
||||
}
|
||||
|
||||
if (page_unmap(page)) {
|
||||
ihk_mc_free_pages(page_va, npages);
|
||||
ihk_mc_free_pages_user(page_va, npages);
|
||||
}
|
||||
#if 0
|
||||
dkprintf("shmobj_destroy(%p):"
|
||||
@ -406,7 +406,7 @@ static int shmobj_get_page(struct memobj *memobj, off_t off, int p2align,
|
||||
page = page_list_lookup(obj, off);
|
||||
if (!page) {
|
||||
npages = 1 << p2align;
|
||||
virt = ihk_mc_alloc_aligned_pages(npages, p2align,
|
||||
virt = ihk_mc_alloc_aligned_pages_user(npages, p2align,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (!virt) {
|
||||
error = -ENOMEM;
|
||||
@ -443,7 +443,7 @@ static int shmobj_get_page(struct memobj *memobj, off_t off, int p2align,
|
||||
out:
|
||||
memobj_unlock(&obj->memobj);
|
||||
if (virt) {
|
||||
ihk_mc_free_pages(virt, npages);
|
||||
ihk_mc_free_pages_user(virt, npages);
|
||||
}
|
||||
dkprintf("shmobj_get_page(%p,%#lx,%d,%p):%d\n",
|
||||
memobj, off, p2align, physp, error);
|
||||
@ -467,7 +467,8 @@ static int shmobj_invalidate_page(struct memobj *memobj, uintptr_t phys,
|
||||
|
||||
if (ihk_atomic_read(&page->count) == 1) {
|
||||
if (page_unmap(page)) {
|
||||
ihk_mc_free_pages(phys_to_virt(phys), pgsize/PAGE_SIZE);
|
||||
ihk_mc_free_pages_user(phys_to_virt(phys),
|
||||
pgsize/PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1505
kernel/syscall.c
1505
kernel/syscall.c
File diff suppressed because it is too large
Load Diff
@ -113,7 +113,7 @@ sysfs_createf(struct sysfs_ops *ops, void *instance, int mode,
|
||||
packet.msg = SCD_MSG_SYSFS_REQ_CREATE;
|
||||
packet.sysfs_arg1 = virt_to_phys(param);
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfs_createf:ihk_ikc_send failed. %d\n", error);
|
||||
goto out;
|
||||
@ -183,7 +183,7 @@ sysfs_mkdirf(sysfs_handle_t *dirhp, const char *fmt, ...)
|
||||
packet.msg = SCD_MSG_SYSFS_REQ_MKDIR;
|
||||
packet.sysfs_arg1 = virt_to_phys(param);
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfs_mkdirf:ihk_ikc_send failed. %d\n", error);
|
||||
goto out;
|
||||
@ -257,7 +257,7 @@ sysfs_symlinkf(sysfs_handle_t targeth, const char *fmt, ...)
|
||||
packet.msg = SCD_MSG_SYSFS_REQ_SYMLINK;
|
||||
packet.sysfs_arg1 = virt_to_phys(param);
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfs_symlinkf:ihk_ikc_send failed. %d\n", error);
|
||||
goto out;
|
||||
@ -328,7 +328,7 @@ sysfs_lookupf(sysfs_handle_t *objhp, const char *fmt, ...)
|
||||
packet.msg = SCD_MSG_SYSFS_REQ_LOOKUP;
|
||||
packet.sysfs_arg1 = virt_to_phys(param);
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfs_lookupf:ihk_ikc_send failed. %d\n", error);
|
||||
goto out;
|
||||
@ -402,7 +402,7 @@ sysfs_unlinkf(int flags, const char *fmt, ...)
|
||||
packet.msg = SCD_MSG_SYSFS_REQ_UNLINK;
|
||||
packet.sysfs_arg1 = virt_to_phys(param);
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfs_unlinkf:ihk_ikc_send failed. %d\n", error);
|
||||
goto out;
|
||||
@ -462,7 +462,7 @@ sysfss_req_show(long nodeh, struct sysfs_ops *ops, void *instance)
|
||||
packet.sysfs_arg1 = nodeh;
|
||||
packet.sysfs_arg2 = ssize;
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfss_req_show:ihk_ikc_send failed. %d\n", error);
|
||||
/* through */
|
||||
@ -508,7 +508,7 @@ sysfss_req_store(long nodeh, struct sysfs_ops *ops, void *instance,
|
||||
packet.sysfs_arg1 = nodeh;
|
||||
packet.sysfs_arg2 = ssize;
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfss_req_store:ihk_ikc_send failed. %d\n", error);
|
||||
/* through */
|
||||
@ -539,7 +539,7 @@ sysfss_req_release(long nodeh, struct sysfs_ops *ops, void *instance)
|
||||
packet.err = 0;
|
||||
packet.sysfs_arg1 = nodeh;
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfss_req_release:ihk_ikc_send failed. %d\n",
|
||||
error);
|
||||
@ -623,7 +623,7 @@ sysfs_init(void)
|
||||
packet.msg = SCD_MSG_SYSFS_REQ_SETUP;
|
||||
packet.sysfs_arg1 = virt_to_phys(param);
|
||||
|
||||
error = ihk_ikc_send(cpu_local_var(syscall_channel), &packet, 0);
|
||||
error = ihk_ikc_send(cpu_local_var(ikc2linux), &packet, 0);
|
||||
if (error) {
|
||||
ekprintf("sysfs_init:ihk_ikc_send failed. %d\n", error);
|
||||
goto out;
|
||||
|
||||
1682
kernel/xpmem.c
1682
kernel/xpmem.c
File diff suppressed because it is too large
Load Diff
@ -1,8 +1,17 @@
|
||||
#include <ihk/debug.h>
|
||||
#include <ihk/cpu.h>
|
||||
#include <cls.h>
|
||||
#include <ihk/rusage.h>
|
||||
|
||||
extern struct cpu_local_var *clv;
|
||||
|
||||
void panic(const char *msg)
|
||||
{
|
||||
if (clv) {
|
||||
struct ihk_os_cpu_monitor *monitor = cpu_local_var(monitor);
|
||||
|
||||
monitor->status = IHK_OS_MONITOR_PANIC;
|
||||
}
|
||||
cpu_disable_interrupt();
|
||||
|
||||
kprintf(msg);
|
||||
|
||||
@ -50,6 +50,7 @@ struct ihk_mc_cpu_info {
|
||||
int *hw_ids;
|
||||
int *nodes;
|
||||
int *linux_cpu_ids;
|
||||
int *ikc_cpus;
|
||||
};
|
||||
|
||||
struct ihk_mc_cpu_info *ihk_mc_get_cpu_info(void);
|
||||
@ -58,8 +59,11 @@ int ihk_mc_get_processor_id(void);
|
||||
int ihk_mc_get_hardware_processor_id(void);
|
||||
int ihk_mc_get_numa_id(void);
|
||||
int ihk_mc_get_nr_cores();
|
||||
int ihk_mc_get_nr_linux_cores();
|
||||
int ihk_mc_get_core(int id, unsigned long *linux_core_id, unsigned long *apic_id,
|
||||
int *numa_id);
|
||||
int ihk_mc_get_ikc_cpu(int id);
|
||||
int ihk_mc_get_apicid(int linux_core_id);
|
||||
|
||||
void ihk_mc_delay_us(int us);
|
||||
void ihk_mc_set_syscall_handler(long (*handler)(int, ihk_mc_user_context_t *));
|
||||
@ -100,10 +104,16 @@ enum ihk_asr_type {
|
||||
IHK_ASR_X86_GS,
|
||||
};
|
||||
|
||||
/* Local IRQ vectors */
|
||||
#define LOCAL_TIMER_VECTOR 0xef
|
||||
#define LOCAL_PERF_VECTOR 0xf0
|
||||
|
||||
#define IHK_TLB_FLUSH_IRQ_VECTOR_START 68
|
||||
#define IHK_TLB_FLUSH_IRQ_VECTOR_SIZE 64
|
||||
#define IHK_TLB_FLUSH_IRQ_VECTOR_END (IHK_TLB_FLUSH_IRQ_VECTOR_START + IHK_TLB_FLUSH_IRQ_VECTOR_SIZE)
|
||||
|
||||
#define LOCAL_SMP_FUNC_CALL_VECTOR 0xf1
|
||||
|
||||
int ihk_mc_arch_set_special_register(enum ihk_asr_type, unsigned long value);
|
||||
int ihk_mc_arch_get_special_register(enum ihk_asr_type, unsigned long *value);
|
||||
|
||||
|
||||
@ -54,6 +54,9 @@ typedef unsigned long ihk_mc_ap_flag;
|
||||
#define IHK_MC_AP_BANDWIDTH 0x010000
|
||||
#define IHK_MC_AP_LATENCY 0x020000
|
||||
|
||||
#define IHK_MC_PG_KERNEL 0
|
||||
#define IHK_MC_PG_USER 1
|
||||
|
||||
enum ihk_mc_pt_prepare_flag {
|
||||
IHK_MC_PT_FIRST_LEVEL,
|
||||
IHK_MC_PT_LAST_LEVEL,
|
||||
@ -86,8 +89,8 @@ void ihk_mc_reserve_arch_pages(struct ihk_page_allocator_desc *pa_allocator,
|
||||
unsigned long, unsigned long, int));
|
||||
|
||||
struct ihk_mc_pa_ops {
|
||||
void *(*alloc_page)(int, int, ihk_mc_ap_flag, int node);
|
||||
void (*free_page)(void *, int);
|
||||
void *(*alloc_page)(int, int, ihk_mc_ap_flag, int node, int is_user);
|
||||
void (*free_page)(void *, int, int is_user);
|
||||
|
||||
void *(*alloc)(int, ihk_mc_ap_flag);
|
||||
void (*free)(void *);
|
||||
@ -111,25 +114,43 @@ int ihk_mc_free_micpa(unsigned long mic_pa);
|
||||
void ihk_mc_clean_micpa(void);
|
||||
|
||||
void *_ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int node, char *file, int line);
|
||||
ihk_mc_ap_flag flag, int node, int is_user, char *file, int line);
|
||||
#define ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, node) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, node, __FILE__, __LINE__);\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, node, IHK_MC_PG_KERNEL, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
#define ihk_mc_alloc_aligned_pages_node_user(npages, p2align, flag, node) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, node, IHK_MC_PG_USER, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
|
||||
#define ihk_mc_alloc_aligned_pages(npages, p2align, flag) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, -1, __FILE__, __LINE__);\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, -1, IHK_MC_PG_KERNEL, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
|
||||
#define ihk_mc_alloc_aligned_pages_user(npages, p2align, flag) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, -1, IHK_MC_PG_USER, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
|
||||
#define ihk_mc_alloc_pages(npages, flag) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1, __FILE__, __LINE__);\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1, IHK_MC_PG_KERNEL, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
|
||||
void _ihk_mc_free_pages(void *ptr, int npages, char *file, int line);
|
||||
#define ihk_mc_alloc_pages_user(npages, flag) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1, IHK_MC_PG_USER, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
|
||||
void _ihk_mc_free_pages(void *ptr, int npages, int is_user, char *file, int line);
|
||||
#define ihk_mc_free_pages(p, npages) ({\
|
||||
_ihk_mc_free_pages(p, npages, __FILE__, __LINE__);\
|
||||
_ihk_mc_free_pages(p, npages, IHK_MC_PG_KERNEL, __FILE__, __LINE__);\
|
||||
})
|
||||
|
||||
#define ihk_mc_free_pages_user(p, npages) ({\
|
||||
_ihk_mc_free_pages(p, npages, IHK_MC_PG_USER, __FILE__, __LINE__);\
|
||||
})
|
||||
|
||||
void *ihk_mc_allocate(int size, int flag);
|
||||
@ -194,15 +215,23 @@ int ihk_mc_get_memory_chunk(int id,
|
||||
|
||||
void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
unsigned long addr, int cpu_id);
|
||||
void remote_flush_tlb_array_cpumask(struct process_vm *vm,
|
||||
unsigned long *addr,
|
||||
int nr_addr,
|
||||
int cpu_id);
|
||||
|
||||
int ihk_set_kmsg(unsigned long addr, unsigned long size);
|
||||
char *ihk_get_kargs();
|
||||
|
||||
int ihk_set_monitor(unsigned long addr, unsigned long size);
|
||||
int ihk_set_nmi_mode_addr(unsigned long addr);
|
||||
|
||||
extern void (*__tlb_flush_handler)(int vector);
|
||||
|
||||
struct tlb_flush_entry {
|
||||
struct process_vm *vm;
|
||||
unsigned long addr;
|
||||
unsigned long *addr;
|
||||
int nr_addr;
|
||||
ihk_atomic_t pending;
|
||||
ihk_spinlock_t lock;
|
||||
} __attribute__((aligned(64)));
|
||||
|
||||
@ -5,16 +5,19 @@
|
||||
* Declare functions acquire physical pages and assign virtual addresses
|
||||
* to them.
|
||||
* \author Taku Shimosawa <shimosawa@is.s.u-tokyo.ac.jp> \par
|
||||
* Copyright (C) 2011 - 2012 Taku Shimosawa
|
||||
* \author Balazs Gerofi <bgerofi@riken.jp> \par
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
* 2016/12 - bgerofi - NUMA support
|
||||
* 2017/06 - bgerofi - rewrite physical memory mngt for red-black trees
|
||||
*/
|
||||
|
||||
#ifndef __HEADER_GENERIC_IHK_PAGE_ALLOC
|
||||
#define __HEADER_GENERIC_IHK_PAGE_ALLOC
|
||||
|
||||
#include <list.h>
|
||||
#include <rbtree.h>
|
||||
|
||||
/* XXX: Physical memory management shouldn't be part of IHK */
|
||||
struct node_distance {
|
||||
@ -22,14 +25,41 @@ struct node_distance {
|
||||
int distance;
|
||||
};
|
||||
|
||||
#define IHK_RBTREE_ALLOCATOR
|
||||
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
struct free_chunk {
|
||||
unsigned long addr, size;
|
||||
struct rb_node node;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct ihk_mc_numa_node {
|
||||
int id;
|
||||
int linux_numa_id;
|
||||
int type;
|
||||
struct list_head allocators;
|
||||
struct node_distance *nodes_by_distance;
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
struct rb_root free_chunks;
|
||||
mcs_lock_node_t lock;
|
||||
|
||||
unsigned long nr_pages;
|
||||
unsigned long nr_free_pages;
|
||||
unsigned long min_addr;
|
||||
unsigned long max_addr;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
unsigned long ihk_numa_alloc_pages(struct ihk_mc_numa_node *node,
|
||||
int npages, int p2align);
|
||||
void ihk_numa_free_pages(struct ihk_mc_numa_node *node,
|
||||
unsigned long addr, int npages);
|
||||
int ihk_numa_add_free_pages(struct ihk_mc_numa_node *node,
|
||||
unsigned long addr, unsigned long size);
|
||||
#endif
|
||||
|
||||
struct ihk_page_allocator_desc {
|
||||
unsigned long start, end;
|
||||
unsigned int last;
|
||||
|
||||
102
lib/include/ihk/rusage.h
Normal file
102
lib/include/ihk/rusage.h
Normal file
@ -0,0 +1,102 @@
|
||||
#ifndef __IHK_RUSAGE_H
|
||||
#define __IHK_RUSAGE_H
|
||||
|
||||
struct ihk_os_cpu_monitor {
|
||||
int status;
|
||||
#define IHK_OS_MONITOR_NOT_BOOT 0
|
||||
#define IHK_OS_MONITOR_IDLE 1
|
||||
#define IHK_OS_MONITOR_USER 2
|
||||
#define IHK_OS_MONITOR_KERNEL 3
|
||||
#define IHK_OS_MONITOR_KERNEL_HEAVY 4
|
||||
#define IHK_OS_MONITOR_KERNEL_OFFLOAD 5
|
||||
#define IHK_OS_MONITOR_KERNEL_FREEZING 8
|
||||
#define IHK_OS_MONITOR_KERNEL_FROZEN 9
|
||||
#define IHK_OS_MONITOR_KERNEL_THAW 10
|
||||
#define IHK_OS_MONITOR_PANIC 99
|
||||
int status_bak;
|
||||
unsigned long counter;
|
||||
unsigned long ocounter;
|
||||
unsigned long user_tsc;
|
||||
unsigned long system_tsc;
|
||||
};
|
||||
|
||||
struct ihk_os_monitor {
|
||||
unsigned long rusage_max_num_threads;
|
||||
unsigned long rusage_num_threads;
|
||||
unsigned long rusage_rss_max;
|
||||
long rusage_rss_current;
|
||||
unsigned long rusage_kmem_usage;
|
||||
unsigned long rusage_kmem_max_usage;
|
||||
unsigned long rusage_hugetlb_usage;
|
||||
unsigned long rusage_hugetlb_max_usage;
|
||||
unsigned long rusage_total_memory;
|
||||
unsigned long rusage_total_memory_usage;
|
||||
unsigned long rusage_total_memory_max_usage;
|
||||
unsigned long num_numa_nodes;
|
||||
unsigned long num_processors;
|
||||
unsigned long ns_per_tsc;
|
||||
unsigned long reserve[128];
|
||||
unsigned long rusage_numa_stat[1024];
|
||||
|
||||
struct ihk_os_cpu_monitor cpu[0];
|
||||
};
|
||||
|
||||
enum RUSAGE_MEMBER {
|
||||
RUSAGE_RSS,
|
||||
RUSAGE_CACHE,
|
||||
RUSAGE_RSS_HUGE,
|
||||
RUSAGE_MAPPED_FILE,
|
||||
RUSAGE_MAX_USAGE,
|
||||
RUSAGE_KMEM_USAGE,
|
||||
RUSAGE_KMAX_USAGE,
|
||||
RUSAGE_NUM_NUMA_NODES,
|
||||
RUSAGE_NUMA_STAT,
|
||||
RUSAGE_HUGETLB ,
|
||||
RUSAGE_HUGETLB_MAX ,
|
||||
RUSAGE_STAT_SYSTEM ,
|
||||
RUSAGE_STAT_USER ,
|
||||
RUSAGE_USAGE ,
|
||||
RUSAGE_USAGE_PER_CPU ,
|
||||
RUSAGE_NUM_THREADS ,
|
||||
RUSAGE_MAX_NUM_THREADS
|
||||
};
|
||||
|
||||
struct r_data{
|
||||
unsigned long pid;
|
||||
unsigned long rss;
|
||||
unsigned long cache;
|
||||
unsigned long rss_huge;
|
||||
unsigned long mapped_file;
|
||||
unsigned long max_usage;
|
||||
unsigned long kmem_usage;
|
||||
unsigned long kmax_usage;
|
||||
unsigned long hugetlb;
|
||||
unsigned long hugetlb_max;
|
||||
unsigned long stat_system;
|
||||
unsigned long stat_user;
|
||||
unsigned long usage;
|
||||
struct r_data *next;
|
||||
} ;
|
||||
|
||||
enum ihk_os_status {
|
||||
IHK_STATUS_INACTIVE,
|
||||
IHK_STATUS_BOOTING,
|
||||
IHK_STATUS_RUNNING,
|
||||
IHK_STATUS_SHUTDOWN,
|
||||
IHK_STATUS_PANIC,
|
||||
IHK_STATUS_HUNGUP,
|
||||
IHK_STATUS_FREEZING,
|
||||
IHK_STATUS_FROZEN,
|
||||
};
|
||||
|
||||
enum sys_delegate_state_enum {
|
||||
ENTER_KERNEL,
|
||||
EXIT_KERNEL,
|
||||
};
|
||||
|
||||
extern struct ihk_os_monitor *monitor;
|
||||
|
||||
extern void ihk_mc_set_os_status(unsigned long st);
|
||||
extern unsigned long ihk_mc_get_os_status();
|
||||
|
||||
#endif
|
||||
@ -17,4 +17,8 @@
|
||||
#define INT_MIN -0x80000000
|
||||
#define IOV_MAX 1024
|
||||
|
||||
#ifndef PATH_MAX
|
||||
#define PATH_MAX 4096
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@ -37,6 +37,8 @@ int strlen_user(const char *s);
|
||||
int strcpy_from_user(char *dst, const char *src);
|
||||
long getlong_user(long *dest, const long *p);
|
||||
int getint_user(int *dest, const int *p);
|
||||
int verify_process_vm(struct process_vm *vm,
|
||||
const void *usrc, size_t size);
|
||||
int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t siz);
|
||||
int copy_to_user(void *dst, const void *src, size_t siz);
|
||||
int setlong_user(long *dst, long data);
|
||||
|
||||
@ -36,6 +36,10 @@ void *memset(void *s, int n, size_t l);
|
||||
#define fast_memcpy memcpy
|
||||
#endif
|
||||
|
||||
#ifdef ARCH_FAST_MEMSET
|
||||
#define memset __inline_memset
|
||||
#endif
|
||||
|
||||
extern int snprintf(char * buf, size_t size, const char *fmt, ...);
|
||||
extern int sprintf(char * buf, const char *fmt, ...);
|
||||
extern int sscanf(const char * buf, const char * fmt, ...);
|
||||
|
||||
361
lib/page_alloc.c
361
lib/page_alloc.c
@ -18,6 +18,15 @@
|
||||
#include <ihk/page_alloc.h>
|
||||
#include <memory.h>
|
||||
#include <bitops.h>
|
||||
#include <errno.h>
|
||||
|
||||
//#define DEBUG_PRINT_PAGE_ALLOC
|
||||
|
||||
#ifdef DEBUG_PRINT_PAGE_ALLOC
|
||||
#define dkprintf kprintf
|
||||
#else
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#endif
|
||||
|
||||
void free_pages(void *, int npages);
|
||||
|
||||
@ -219,7 +228,14 @@ void ihk_pagealloc_free(void *__desc, unsigned long address, int npages)
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
mi = (address - desc->start) >> desc->shift;
|
||||
for (i = 0; i < npages; i++, mi++) {
|
||||
desc->map[MAP_INDEX(mi)] &= ~(1UL << MAP_BIT(mi));
|
||||
if (!(desc->map[MAP_INDEX(mi)] & (1UL << MAP_BIT(mi)))) {
|
||||
kprintf("%s: double-freeing page 0x%lx\n",
|
||||
__FUNCTION__, address + i * PAGE_SIZE);
|
||||
panic("panic");
|
||||
}
|
||||
else {
|
||||
desc->map[MAP_INDEX(mi)] &= ~(1UL << MAP_BIT(mi));
|
||||
}
|
||||
}
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
}
|
||||
@ -301,3 +317,346 @@ kprintf("\nzeroing done\n");
|
||||
}
|
||||
|
||||
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
|
||||
/*
|
||||
* Simple red-black tree based physical memory management routines.
|
||||
*
|
||||
* Allocation grabs first suitable chunk (splits chunk if alignment requires it).
|
||||
* Deallocation merges with immediate neighbours.
|
||||
*
|
||||
* NOTE: invariant property: free_chunk structures are placed in the very front
|
||||
* of their corresponding memory (i.e., they are on the free memory chunk itself).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Free pages.
|
||||
* NOTE: locking must be managed by the caller.
|
||||
*/
|
||||
static int __page_alloc_rbtree_free_range(struct rb_root *root,
|
||||
unsigned long addr, unsigned long size)
|
||||
{
|
||||
struct rb_node **iter = &(root->rb_node), *parent = NULL;
|
||||
struct free_chunk *new_chunk;
|
||||
|
||||
/* Figure out where to put new node */
|
||||
while (*iter) {
|
||||
struct free_chunk *ichunk = container_of(*iter, struct free_chunk, node);
|
||||
parent = *iter;
|
||||
|
||||
if ((addr >= ichunk->addr) && (addr < ichunk->addr + ichunk->size)) {
|
||||
kprintf("%s: ERROR: free memory chunk: 0x%lx:%lu"
|
||||
" and requested range to be freed: 0x%lx:%lu are "
|
||||
"overlapping (double-free?)\n",
|
||||
__FUNCTION__,
|
||||
ichunk->addr, ichunk->size, addr, size);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
/* Is ichunk contigous from the left? */
|
||||
if (ichunk->addr + ichunk->size == addr) {
|
||||
struct rb_node *right;
|
||||
/* Extend it to the right */
|
||||
ichunk->size += size;
|
||||
dkprintf("%s: chunk extended to right: 0x%lx:%lu\n",
|
||||
__FUNCTION__, ichunk->addr, ichunk->size);
|
||||
|
||||
/* Have the right chunk of ichunk and ichunk become contigous? */
|
||||
right = rb_next(*iter);
|
||||
if (right) {
|
||||
struct free_chunk *right_chunk =
|
||||
container_of(right, struct free_chunk, node);
|
||||
|
||||
if (ichunk->addr + ichunk->size == right_chunk->addr) {
|
||||
ichunk->size += right_chunk->size;
|
||||
rb_erase(right, root);
|
||||
dkprintf("%s: chunk merged to right: 0x%lx:%lu\n",
|
||||
__FUNCTION__, ichunk->addr, ichunk->size);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Is ichunk contigous from the right? */
|
||||
if (addr + size == ichunk->addr) {
|
||||
struct rb_node *left;
|
||||
/* Extend it to the left */
|
||||
ichunk->addr -= size;
|
||||
ichunk->size += size;
|
||||
dkprintf("%s: chunk extended to left: 0x%lx:%lu\n",
|
||||
__FUNCTION__, ichunk->addr, ichunk->size);
|
||||
|
||||
/* Have the left chunk of ichunk and ichunk become contigous? */
|
||||
left = rb_prev(*iter);
|
||||
if (left) {
|
||||
struct free_chunk *left_chunk =
|
||||
container_of(left, struct free_chunk, node);
|
||||
|
||||
if (left_chunk->addr + left_chunk->size == ichunk->addr) {
|
||||
ichunk->addr -= left_chunk->size;
|
||||
ichunk->size += left_chunk->size;
|
||||
rb_erase(left, root);
|
||||
dkprintf("%s: chunk merged to left: 0x%lx:%lu\n",
|
||||
__FUNCTION__, ichunk->addr, ichunk->size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Move chunk structure to the front */
|
||||
new_chunk = (struct free_chunk *)phys_to_virt(ichunk->addr);
|
||||
*new_chunk = *ichunk;
|
||||
rb_replace_node(&ichunk->node, &new_chunk->node, root);
|
||||
dkprintf("%s: chunk moved to front: 0x%lx:%lu\n",
|
||||
__FUNCTION__, new_chunk->addr, new_chunk->size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (addr < ichunk->addr)
|
||||
iter = &((*iter)->rb_left);
|
||||
else
|
||||
iter = &((*iter)->rb_right);
|
||||
}
|
||||
|
||||
new_chunk = (struct free_chunk *)phys_to_virt(addr);
|
||||
new_chunk->addr = addr;
|
||||
new_chunk->size = size;
|
||||
dkprintf("%s: new chunk: 0x%lx:%lu\n",
|
||||
__FUNCTION__, new_chunk->addr, new_chunk->size);
|
||||
|
||||
/* Add new node and rebalance tree. */
|
||||
rb_link_node(&new_chunk->node, parent, iter);
|
||||
rb_insert_color(&new_chunk->node, root);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark address range as used (i.e., allocated).
|
||||
*
|
||||
* chunk is the free memory chunk in which
|
||||
* [aligned_addr, aligned_addr + size] resides.
|
||||
*
|
||||
* NOTE: locking must be managed by the caller.
|
||||
*/
|
||||
static int __page_alloc_rbtree_mark_range_allocated(struct rb_root *root,
|
||||
struct free_chunk *chunk,
|
||||
unsigned long aligned_addr, unsigned long size)
|
||||
{
|
||||
struct free_chunk *left_chunk = NULL, *right_chunk = NULL;
|
||||
|
||||
/* Is there leftover on the right? */
|
||||
if ((aligned_addr + size) < (chunk->addr + chunk->size)) {
|
||||
right_chunk = (struct free_chunk *)phys_to_virt(aligned_addr + size);
|
||||
right_chunk->addr = aligned_addr + size;
|
||||
right_chunk->size = (chunk->addr + chunk->size) - (aligned_addr + size);
|
||||
}
|
||||
|
||||
/* Is there leftover on the left? */
|
||||
if (aligned_addr != chunk->addr) {
|
||||
left_chunk = chunk;
|
||||
}
|
||||
|
||||
/* Update chunk's size, possibly becomes zero */
|
||||
chunk->size = (aligned_addr - chunk->addr);
|
||||
|
||||
if (left_chunk) {
|
||||
/* Left chunk reuses chunk, add right chunk */
|
||||
if (right_chunk) {
|
||||
dkprintf("%s: adding right chunk: 0x%lx:%lu\n",
|
||||
__FUNCTION__, right_chunk->addr, right_chunk->size);
|
||||
if (__page_alloc_rbtree_free_range(root,
|
||||
right_chunk->addr, right_chunk->size)) {
|
||||
kprintf("%s: ERROR: adding right chunk: 0x%lx:%lu\n",
|
||||
__FUNCTION__, right_chunk->addr, right_chunk->size);
|
||||
return EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Replace left with right */
|
||||
if (right_chunk) {
|
||||
rb_replace_node(&chunk->node, &right_chunk->node, root);
|
||||
dkprintf("%s: chunk replaced with right: 0x%lx:%lu\n",
|
||||
__FUNCTION__, right_chunk->addr, right_chunk->size);
|
||||
}
|
||||
/* No left chunk and no right chunk => chunk was exact match, delete it */
|
||||
else {
|
||||
rb_erase(&chunk->node, root);
|
||||
dkprintf("%s: chunk deleted: 0x%lx:%lu\n",
|
||||
__FUNCTION__, chunk->addr, chunk->size);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate pages.
|
||||
*
|
||||
* NOTE: locking must be managed by the caller.
|
||||
*/
|
||||
static unsigned long __page_alloc_rbtree_alloc_pages(struct rb_root *root,
|
||||
int npages, int p2align)
|
||||
{
|
||||
struct free_chunk *chunk;
|
||||
struct rb_node *node;
|
||||
unsigned long size = PAGE_SIZE * npages;
|
||||
unsigned long align_size = (PAGE_SIZE << p2align);
|
||||
unsigned long align_mask = ~(align_size - 1);
|
||||
unsigned long aligned_addr = 0;
|
||||
|
||||
for (node = rb_first(root); node; node = rb_next(node)) {
|
||||
chunk = container_of(node, struct free_chunk, node);
|
||||
aligned_addr = (chunk->addr + (align_size - 1)) & align_mask;
|
||||
|
||||
/* Is this a suitable chunk? */
|
||||
if ((aligned_addr + size) <= (chunk->addr + chunk->size)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* No matching chunk at all? */
|
||||
if (!node) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
dkprintf("%s: allocating: 0x%lx:%lu\n",
|
||||
__FUNCTION__, aligned_addr, size);
|
||||
if (__page_alloc_rbtree_mark_range_allocated(root, chunk,
|
||||
aligned_addr, size)) {
|
||||
kprintf("%s: ERROR: allocating 0x%lx:%lu\n",
|
||||
__FUNCTION__, aligned_addr, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return aligned_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve pages.
|
||||
*
|
||||
* NOTE: locking must be managed by the caller.
|
||||
*/
|
||||
static unsigned long __page_alloc_rbtree_reserve_pages(struct rb_root *root,
|
||||
unsigned long aligned_addr, int npages)
|
||||
{
|
||||
struct free_chunk *chunk;
|
||||
struct rb_node *node;
|
||||
unsigned long size = PAGE_SIZE * npages;
|
||||
|
||||
for (node = rb_first(root); node; node = rb_next(node)) {
|
||||
chunk = container_of(node, struct free_chunk, node);
|
||||
|
||||
/* Is this the containing chunk? */
|
||||
if (aligned_addr >= chunk->addr &&
|
||||
(aligned_addr + size) <= (chunk->addr + chunk->size)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* No matching chunk at all? */
|
||||
if (!node) {
|
||||
kprintf("%s: WARNING: attempted to reserve non-free"
|
||||
" physical range: 0x%lx:%lu\n",
|
||||
__FUNCTION__,
|
||||
aligned_addr, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dkprintf("%s: reserving: 0x%lx:%lu\n",
|
||||
__FUNCTION__, aligned_addr, size);
|
||||
if (__page_alloc_rbtree_mark_range_allocated(root, chunk,
|
||||
aligned_addr, size)) {
|
||||
kprintf("%s: ERROR: reserving 0x%lx:%lu\n",
|
||||
__FUNCTION__, aligned_addr, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return aligned_addr;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* External routines.
|
||||
*/
|
||||
int ihk_numa_add_free_pages(struct ihk_mc_numa_node *node,
|
||||
unsigned long addr, unsigned long size)
|
||||
{
|
||||
if (__page_alloc_rbtree_free_range(&node->free_chunks, addr, size)) {
|
||||
kprintf("%s: ERROR: adding 0x%lx:%lu\n",
|
||||
__FUNCTION__, addr, size);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
if (addr < node->min_addr)
|
||||
node->min_addr = addr;
|
||||
|
||||
if (addr + size > node->max_addr)
|
||||
node->max_addr = addr + size;
|
||||
|
||||
node->nr_pages += (size >> PAGE_SHIFT);
|
||||
node->nr_free_pages += (size >> PAGE_SHIFT);
|
||||
dkprintf("%s: added free pages 0x%lx:%lu\n",
|
||||
__FUNCTION__, addr, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
unsigned long ihk_numa_alloc_pages(struct ihk_mc_numa_node *node,
|
||||
int npages, int p2align)
|
||||
{
|
||||
unsigned long addr = 0;
|
||||
mcs_lock_node_t mcs_node;
|
||||
|
||||
mcs_lock_lock(&node->lock, &mcs_node);
|
||||
|
||||
if (node->nr_free_pages < npages) {
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
addr = __page_alloc_rbtree_alloc_pages(&node->free_chunks,
|
||||
npages, p2align);
|
||||
|
||||
/* Does not necessarily succeed due to alignment */
|
||||
if (addr) {
|
||||
node->nr_free_pages -= npages;
|
||||
dkprintf("%s: allocated pages 0x%lx:%lu\n",
|
||||
__FUNCTION__, addr, npages << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
unlock_out:
|
||||
mcs_lock_unlock(&node->lock, &mcs_node);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
void ihk_numa_free_pages(struct ihk_mc_numa_node *node,
|
||||
unsigned long addr, int npages)
|
||||
{
|
||||
mcs_lock_node_t mcs_node;
|
||||
|
||||
if (addr < node->min_addr ||
|
||||
(addr + (npages << PAGE_SHIFT)) > node->max_addr) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (npages <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
mcs_lock_lock(&node->lock, &mcs_node);
|
||||
if (__page_alloc_rbtree_free_range(&node->free_chunks, addr,
|
||||
npages << PAGE_SHIFT)) {
|
||||
kprintf("%s: ERROR: freeing 0x%lx:%lu\n",
|
||||
__FUNCTION__, addr, npages << PAGE_SHIFT);
|
||||
}
|
||||
else {
|
||||
node->nr_free_pages += npages;
|
||||
dkprintf("%s: freed pages 0x%lx:%lu\n",
|
||||
__FUNCTION__, addr, npages << PAGE_SHIFT);
|
||||
}
|
||||
mcs_lock_unlock(&node->lock, &mcs_node);
|
||||
}
|
||||
|
||||
#endif // IHK_RBTREE_ALLOCATOR
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
#include <kmalloc.h>
|
||||
#include <string.h>
|
||||
#include <memory.h>
|
||||
#include <arch-string.h>
|
||||
|
||||
size_t strlen(const char *p)
|
||||
{
|
||||
@ -153,6 +154,7 @@ void *memcpy_long(void *dest, const void *src, size_t n)
|
||||
return dest;
|
||||
}
|
||||
|
||||
#ifndef ARCH_FAST_MEMSET
|
||||
void *memset(void *s, int c, size_t n)
|
||||
{
|
||||
char *s_aligned = (void *)(((unsigned long)s + 7) & ~7);
|
||||
@ -187,6 +189,7 @@ void *memset(void *s, int c, size_t n)
|
||||
|
||||
return s;
|
||||
}
|
||||
#endif
|
||||
|
||||
int memcmp(const void *s1, const void *s2, size_t n)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user