Compare commits

..

45 Commits
1.2.1 ... 1.2.3

Author SHA1 Message Date
f81927b85b Revert "brk(): larger allocation units internally"
This reverts commit c58ab0f648.
2016-12-20 11:11:09 +09:00
701cdcdab1 use MCS locks in physical memory allocator 2016-12-19 12:57:59 +09:00
9635a628a9 fileobj/shmobj/devobj: add file size to memobj 2016-12-19 12:55:12 +09:00
3e1b16f3fc syscall_channel: increase queue size to avoid deadlock in ikc_send() 2016-12-18 21:12:38 +09:00
ff37ff9ccf memobj: synch prefetch among processes 2016-12-18 21:12:38 +09:00
5b7bcb7170 fileobj: use read/write MCS locks in page hash 2016-12-18 21:12:37 +09:00
6a5fe90f98 mcexec_get_cpuset(): save CPU set and IKC target cpu in per-process data 2016-12-18 21:12:37 +09:00
91373337ba mcctrl: add IKC target CPU to OS file release_handler 2016-12-18 21:12:37 +09:00
56ed726a88 pager_req_create(): prefetch for MPI library and zerofill for shm 2016-12-18 21:12:37 +09:00
bce10e11e4 fileobj: rewrite for scalability using per-file page hash 2016-12-18 21:12:37 +09:00
91cdb16158 MCS lock: separate IRQ disable/enable versions 2016-12-18 21:12:37 +09:00
c58ab0f648 brk(): larger allocation units internally 2016-12-18 21:12:37 +09:00
f410af1cfc xpmem: porting xpmem v2.6.3
implement xpmem_make, xpmem_remove
2016-12-16 17:00:09 +09:00
aa15e5eea8 mcexec: -t option and OMP_NUM_THREADS for thread pool size 2016-12-14 18:56:30 +09:00
df9f1f8f78 allocate_aligned_pages(): take user set NUMA policy into account 2016-12-13 17:51:39 +09:00
7ace35d737 mcexec_get_cpuset(): fix NUMA search bug 2016-12-13 17:50:50 +09:00
551999ff6b NUMA: order nodes based on distances 2016-12-13 10:46:17 +09:00
052b3f44ca mcexec: -n: topology aware partitioned execution 2016-12-10 16:27:57 +09:00
fdcf766337 prepare_process(): pass cpu_set in program_load_desc 2016-12-09 16:32:20 +09:00
7d13bfb14e set_mempolicy(): limit maxnode to PROCESS_NUMA_MASK_BITS 2016-12-08 21:05:10 +09:00
202bfd9955 IHK-API: expand and fix for ver 1.2. 2016-12-08 17:28:53 +09:00
c99e36235b execve(): disable debug warnings 2016-12-08 16:33:24 +09:00
3cecafac59 obtain_clone_cpuid(): respect parent's CPU set 2016-12-08 16:01:30 +09:00
61fc4c5e55 show_context_stack(): fix warning 2016-12-07 11:42:09 +09:00
fad73cacc1 x86: display call stack for IRQ 133 (for debug) 2016-12-07 11:32:02 +09:00
8fced29978 page_fault_handler(): improved debug msg format 2016-12-07 11:25:02 +09:00
b0f4ae4890 ihk_mc_pt_set_pte(): double check phys address alignment 2016-12-07 11:23:45 +09:00
7070094a31 ihk_mc_pt_print_pte(): handle large pages correctly 2016-12-07 11:13:53 +09:00
011185e3f7 __ihk_pagealloc_large(): fix 1GB page alignment bug 2016-12-07 09:38:37 +09:00
461881e46a /proc/mckernel to indicate McKernel 2016-12-06 14:29:25 +09:00
ddc33821cf sched_yield(): avoid schedule for single thread 2016-12-05 18:10:20 +09:00
0ab7d02994 disable syscall tracker and eliminate interrupt_syscall debug msg 2016-12-05 18:10:20 +09:00
a8c4ab221b use MCS locks in signal handling code 2016-12-05 18:10:20 +09:00
87d36a7752 mcreboot-smp-x86: -t to enable turbo boost 2016-12-05 18:10:20 +09:00
998ded414c mcreboot-smp-x86: shorter sleep in waiting for /proc 2016-12-05 18:10:20 +09:00
f78d031e64 syscall and offload tracking (disabled by default) 2016-12-05 18:10:20 +09:00
4ab37dd34a schedule(): only load page table during context switch if it's different 2016-12-05 18:10:20 +09:00
8129dec2f7 Fix out-of-tree build
<build>/ihk/cokernel/Makefile.common is not found when
<build>/mckernel/kernel/Makfile tries to perform
"Make -C <build>/ihk/{cokernel,ikc}" from mckernel/kernel
2016-12-01 16:44:01 +09:00
a1035a1878 fix out of tree build 2016-12-01 12:55:34 +09:00
db169c5f90 add gcc options (-ffreestanding -fno-tree-loop-distribute-patterns)
refs #299
2016-11-29 16:28:18 +09:00
bbb55ef261 sched_setparam: thread lock is necessary when update other thread data 2016-11-28 14:04:44 +09:00
1130cafe41 ptrace: fixed for threads. 2016-11-28 11:19:30 +09:00
a1cf27e232 sched_getaffinity(): fix error code for special invalid input 2016-11-28 05:50:01 +09:00
5a1ce99d87 mcexec: fix number of threads not to exceed thread_data array 2016-11-27 07:31:52 +09:00
c7db296e1b getcpu(): expose correct NUMA id 2016-11-26 09:29:09 +09:00
43 changed files with 2672 additions and 487 deletions

View File

@ -148,7 +148,7 @@ extern char page_fault[], general_protection_exception[];
extern char debug_exception[], int3_exception[]; extern char debug_exception[], int3_exception[];
uint64_t boot_pat_state = 0; uint64_t boot_pat_state = 0;
int no_turbo = 0; /* May be updated by early parsing of kargs */ int no_turbo = 1; /* May be updated by early parsing of kargs */
extern int num_processors; /* kernel/ap.c */ extern int num_processors; /* kernel/ap.c */
struct pvclock_vsyscall_time_info *pvti = NULL; struct pvclock_vsyscall_time_info *pvti = NULL;
@ -844,6 +844,25 @@ void set_signal(int sig, void *regs, struct siginfo *info);
void check_signal(unsigned long, void *, int); void check_signal(unsigned long, void *, int);
extern void tlb_flush_handler(int vector); extern void tlb_flush_handler(int vector);
void __show_stack(uintptr_t *sp) {
while (((uintptr_t)sp >= 0xffff800000000000)
&& ((uintptr_t)sp < 0xffffffff80000000)) {
uintptr_t fp;
uintptr_t ip;
fp = sp[0];
ip = sp[1];
kprintf("IP: %016lx, SP: %016lx, FP: %016lx\n", ip, (uintptr_t)sp, fp);
sp = (void *)fp;
}
return;
}
void show_context_stack(uintptr_t *rbp) {
__show_stack(rbp);
return;
}
void handle_interrupt(int vector, struct x86_user_context *regs) void handle_interrupt(int vector, struct x86_user_context *regs)
{ {
struct ihk_mc_interrupt_handler *h; struct ihk_mc_interrupt_handler *h;
@ -952,6 +971,9 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
tlb_flush_handler(vector); tlb_flush_handler(vector);
} }
else if (vector == 133) {
show_context_stack((uintptr_t *)regs->gpr.rbp);
}
else { else {
list_for_each_entry(h, &handlers[vector - 32], list) { list_for_each_entry(h, &handlers[vector - 32], list) {
if (h->func) { if (h->func) {
@ -1079,6 +1101,10 @@ unhandled_page_fault(struct thread *thread, void *fault_addr, void *regs)
kprintf_unlock(irqflags); kprintf_unlock(irqflags);
if (!(error & PF_USER)) {
panic("panic: kernel mode PF");
}
/* TODO */ /* TODO */
ihk_mc_debug_show_interrupt_context(regs); ihk_mc_debug_show_interrupt_context(regs);

View File

@ -131,6 +131,7 @@ static void __ihk_mc_spinlock_unlock(ihk_spinlock_t *lock, unsigned long flags)
typedef struct mcs_lock_node { typedef struct mcs_lock_node {
unsigned long locked; unsigned long locked;
struct mcs_lock_node *next; struct mcs_lock_node *next;
unsigned long irqsave;
} __attribute__((aligned(64))) mcs_lock_node_t; } __attribute__((aligned(64))) mcs_lock_node_t;
static void mcs_lock_init(struct mcs_lock_node *node) static void mcs_lock_init(struct mcs_lock_node *node)
@ -139,7 +140,7 @@ static void mcs_lock_init(struct mcs_lock_node *node)
node->next = NULL; node->next = NULL;
} }
static void mcs_lock_lock(struct mcs_lock_node *lock, static void __mcs_lock_lock(struct mcs_lock_node *lock,
struct mcs_lock_node *node) struct mcs_lock_node *node)
{ {
struct mcs_lock_node *pred; struct mcs_lock_node *pred;
@ -158,7 +159,7 @@ static void mcs_lock_lock(struct mcs_lock_node *lock,
} }
} }
static void mcs_lock_unlock(struct mcs_lock_node *lock, static void __mcs_lock_unlock(struct mcs_lock_node *lock,
struct mcs_lock_node *node) struct mcs_lock_node *node)
{ {
if (node->next == NULL) { if (node->next == NULL) {
@ -178,6 +179,35 @@ static void mcs_lock_unlock(struct mcs_lock_node *lock,
node->next->locked = 0; node->next->locked = 0;
} }
static void mcs_lock_lock_noirq(struct mcs_lock_node *lock,
struct mcs_lock_node *node)
{
preempt_disable();
__mcs_lock_lock(lock, node);
}
static void mcs_lock_unlock_noirq(struct mcs_lock_node *lock,
struct mcs_lock_node *node)
{
__mcs_lock_unlock(lock, node);
preempt_enable();
}
static void mcs_lock_lock(struct mcs_lock_node *lock,
struct mcs_lock_node *node)
{
node->irqsave = cpu_disable_interrupt_save();
mcs_lock_lock_noirq(lock, node);
}
static void mcs_lock_unlock(struct mcs_lock_node *lock,
struct mcs_lock_node *node)
{
mcs_lock_unlock_noirq(lock, node);
cpu_restore_interrupt(node->irqsave);
}
// reader/writer lock // reader/writer lock
typedef struct mcs_rwlock_node { typedef struct mcs_rwlock_node {
ihk_atomic_t count; // num of readers (use only common reader) ihk_atomic_t count; // num of readers (use only common reader)

View File

@ -22,7 +22,7 @@
SYSCALL_HANDLED(0, read) SYSCALL_HANDLED(0, read)
SYSCALL_DELEGATED(1, write) SYSCALL_DELEGATED(1, write)
SYSCALL_DELEGATED(2, open) SYSCALL_HANDLED(2, open)
SYSCALL_HANDLED(3, close) SYSCALL_HANDLED(3, close)
SYSCALL_DELEGATED(4, stat) SYSCALL_DELEGATED(4, stat)
SYSCALL_DELEGATED(5, fstat) SYSCALL_DELEGATED(5, fstat)
@ -150,5 +150,8 @@ SYSCALL_HANDLED(602, pmc_start)
SYSCALL_HANDLED(603, pmc_stop) SYSCALL_HANDLED(603, pmc_stop)
SYSCALL_HANDLED(604, pmc_reset) SYSCALL_HANDLED(604, pmc_reset)
SYSCALL_HANDLED(700, get_cpu_id) SYSCALL_HANDLED(700, get_cpu_id)
#ifdef TRACK_SYSCALLS
SYSCALL_HANDLED(701, syscall_offload_clr_cntrs)
#endif // TRACK_SYSCALLS
/**** End of File ****/ /**** End of File ****/

View File

@ -558,28 +558,34 @@ int ihk_mc_pt_print_pte(struct page_table *pt, void *virt)
GET_VIRT_INDICES(v, l4idx, l3idx, l2idx, l1idx); GET_VIRT_INDICES(v, l4idx, l3idx, l2idx, l1idx);
__kprintf("l4 table: 0x%lX l4idx: %d \n", virt_to_phys(pt), l4idx);
if (!(pt->entry[l4idx] & PFL4_PRESENT)) { if (!(pt->entry[l4idx] & PFL4_PRESENT)) {
__kprintf("0x%lX l4idx not present! \n", (unsigned long)virt); __kprintf("0x%lX l4idx not present! \n", (unsigned long)virt);
__kprintf("l4 entry: 0x%lX\n", pt->entry[l4idx]);
return -EFAULT; return -EFAULT;
} }
__kprintf("l4 entry: 0x%lX\n", pt->entry[l4idx]);
pt = phys_to_virt(pt->entry[l4idx] & PAGE_MASK); pt = phys_to_virt(pt->entry[l4idx] & PAGE_MASK);
__kprintf("l3 table: 0x%lX l3idx: %d \n", virt_to_phys(pt), l3idx); __kprintf("l3 table: 0x%lX l3idx: %d \n", virt_to_phys(pt), l3idx);
if (!(pt->entry[l3idx] & PFL3_PRESENT)) { if (!(pt->entry[l3idx] & PFL3_PRESENT)) {
__kprintf("0x%lX l3idx not present! \n", (unsigned long)virt); __kprintf("0x%lX l3idx not present! \n", (unsigned long)virt);
__kprintf("l3 entry: 0x%lX\n", pt->entry[l3idx]);
return -EFAULT; return -EFAULT;
} }
__kprintf("l3 entry: 0x%lX\n", pt->entry[l3idx]);
if ((pt->entry[l3idx] & PFL3_SIZE)) {
__kprintf("l3 entry is 1G page\n");
return 0;
}
pt = phys_to_virt(pt->entry[l3idx] & PAGE_MASK); pt = phys_to_virt(pt->entry[l3idx] & PAGE_MASK);
__kprintf("l2 table: 0x%lX l2idx: %d \n", virt_to_phys(pt), l2idx); __kprintf("l2 table: 0x%lX l2idx: %d \n", virt_to_phys(pt), l2idx);
if (!(pt->entry[l2idx] & PFL2_PRESENT)) { if (!(pt->entry[l2idx] & PFL2_PRESENT)) {
__kprintf("0x%lX l2idx not present! \n", (unsigned long)virt); __kprintf("0x%lX l2idx not present! \n", (unsigned long)virt);
__kprintf("l2 entry: 0x%lX\n", pt->entry[l2idx]);
return -EFAULT; return -EFAULT;
} }
__kprintf("l2 entry: 0x%lX\n", pt->entry[l2idx]);
if ((pt->entry[l2idx] & PFL2_SIZE)) { if ((pt->entry[l2idx] & PFL2_SIZE)) {
__kprintf("l2 entry is 2M page\n");
return 0; return 0;
} }
pt = phys_to_virt(pt->entry[l2idx] & PAGE_MASK); pt = phys_to_virt(pt->entry[l2idx] & PAGE_MASK);
@ -1773,9 +1779,19 @@ int ihk_mc_pt_set_pte(page_table_t pt, pte_t *ptep, size_t pgsize,
*ptep = phys | attr_to_l1attr(attr); *ptep = phys | attr_to_l1attr(attr);
} }
else if (pgsize == PTL2_SIZE) { else if (pgsize == PTL2_SIZE) {
if (phys & (PTL2_SIZE - 1)) {
kprintf("%s: error: phys needs to be PTL2_SIZE aligned\n", __FUNCTION__);
error = -1;
goto out;
}
*ptep = phys | attr_to_l2attr(attr | PTATTR_LARGEPAGE); *ptep = phys | attr_to_l2attr(attr | PTATTR_LARGEPAGE);
} }
else if ((pgsize == PTL3_SIZE) && (use_1gb_page)) { else if ((pgsize == PTL3_SIZE) && (use_1gb_page)) {
if (phys & (PTL3_SIZE - 1)) {
kprintf("%s: error: phys needs to be PTL3_SIZE aligned\n", __FUNCTION__);
error = -1;
goto out;
}
*ptep = phys | attr_to_l3attr(attr | PTATTR_LARGEPAGE); *ptep = phys | attr_to_l3attr(attr | PTATTR_LARGEPAGE);
} }
else { else {
@ -2367,8 +2383,18 @@ int write_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
return error; return error;
} }
if (pa < ihk_mc_get_memory_address(IHK_MC_GMA_MAP_START, 0) ||
pa >= ihk_mc_get_memory_address(IHK_MC_GMA_MAP_END, 0)) {
dkprintf("%s: pa is outside of LWK memory, from: %p,"
"pa: %p, cpsize: %d\n", __FUNCTION__, from, pa, cpsize);
va = ihk_mc_map_virtual(pa, 1, PTATTR_ACTIVE);
memcpy(va, from, cpsize);
ihk_mc_unmap_virtual(va, 1, 1);
}
else {
va = phys_to_virt(pa); va = phys_to_virt(pa);
memcpy(va, from, cpsize); memcpy(va, from, cpsize);
}
from += cpsize; from += cpsize;
to += cpsize; to += cpsize;

View File

@ -70,71 +70,37 @@ static struct vdso vdso;
static size_t container_size = 0; static size_t container_size = 0;
static ptrdiff_t vdso_offset; static ptrdiff_t vdso_offset;
/* extern int num_processors;
See dkprintf("BSP HW ID = %d, ", bsp_hw_id); (in ./mcos/kernel/ap.c)
Core with BSP HW ID 224 is 1st logical core of last physical core. int obtain_clone_cpuid(cpu_set_t *cpu_set) {
It boots first and is given SW-ID of 0 int min_queue_len = -1;
int cpu, min_cpu = -1;
Core with BSP HW ID 0 is 1st logical core of 1st physical core. /* Find the first allowed core with the shortest run queue */
It boots next and is given SW-ID of 1. for (cpu = 0; cpu < num_processors; ++cpu) {
Core with BSP HW ID 1 boots next and is given SW-ID of 2. struct cpu_local_var *v;
Core with BSP HW ID 2 boots next and is given SW-ID of 3. unsigned long irqstate;
Core with BSP HW ID 3 boots next and is given SW-ID of 4.
...
Core with BSP HW ID 220 is 1st logical core of 56-th physical core.
It boots next and is given SW-ID of 221.
Core with BSP HW ID 221 boots next and is given SW-ID of 222.
Core with BSP HW ID 222 boots next and is given SW-ID of 223.
Core with BSP HW ID 223 boots next and is given SW-ID of 224.
Core with BSP HW ID 225 is 2nd logical core of last physical core. if (!CPU_ISSET(cpu, cpu_set)) continue;
It boots next and is given SW-ID of 225.
Core with BSP HW ID 226 boots next and is given SW-ID of 226.
Core with BSP HW ID 227 boots next and is given SW-ID of 227.
*/
ihk_spinlock_t cpuid_head_lock = 0;
static int cpuid_head = 0;
/* archtecture-depended syscall handlers */ v = get_cpu_local_var(cpu);
int obtain_clone_cpuid() { irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
/* see above on BSP HW ID */ if (min_queue_len == -1 || v->runq_len < min_queue_len) {
struct ihk_mc_cpu_info *cpu_info = ihk_mc_get_cpu_info(); min_queue_len = v->runq_len;
int cpuid, nretry = 0; min_cpu = cpu;
ihk_mc_spinlock_lock_noirq(&cpuid_head_lock); }
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
/* Always start from 0 to fill in LWK cores linearily */ if (min_queue_len == 0)
cpuid_head = 0; break;
retry:
/* Try to obtain next physical core */
cpuid = cpuid_head;
/* A hyper-threading core on the same physical core as
the parent process might be chosen. Use sched_setaffinity
if you want to skip that kind of busy physical core for
performance reason. */
cpuid_head += 1;
if(cpuid_head >= cpu_info->ncpus) {
cpuid_head = 0;
} }
/* A hyper-threading core whose parent physical core has a if (min_cpu != -1) {
process on one of its hyper-threading core might if (get_cpu_local_var(min_cpu)->status != CPU_STATUS_RESERVED)
be chosen. Use sched_setaffinity if you want to skip that get_cpu_local_var(min_cpu)->status = CPU_STATUS_RESERVED;
kind of busy physical core for performance reason. */
if(get_cpu_local_var(cpuid)->status != CPU_STATUS_IDLE) {
nretry++;
if(nretry >= cpu_info->ncpus) {
cpuid = -1;
ihk_mc_spinlock_unlock_noirq(&cpuid_head_lock);
goto out;
} }
goto retry;
} return min_cpu;
get_cpu_local_var(cpuid)->status = CPU_STATUS_RESERVED;
ihk_mc_spinlock_unlock_noirq(&cpuid_head_lock);
out:
return cpuid;
} }
int int
@ -544,14 +510,14 @@ void ptrace_report_signal(struct thread *thread, int sig)
int parent_pid; int parent_pid;
struct siginfo info; struct siginfo info;
dkprintf("ptrace_report_signal,pid=%d\n", thread->proc->pid); dkprintf("ptrace_report_signal, tid=%d, pid=%d\n", thread->tid, thread->proc->pid);
mcs_rwlock_writer_lock(&proc->update_lock, &lock); mcs_rwlock_writer_lock(&proc->update_lock, &lock);
if(!(proc->ptrace & PT_TRACED)){ if(!(proc->ptrace & PT_TRACED)){
mcs_rwlock_writer_unlock(&proc->update_lock, &lock); mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
return; return;
} }
proc->exit_status = sig; thread->exit_status = sig;
/* Transition thread state */ /* Transition thread state */
proc->status = PS_TRACED; proc->status = PS_TRACED;
thread->status = PS_TRACED; thread->status = PS_TRACED;
@ -569,8 +535,8 @@ void ptrace_report_signal(struct thread *thread, int sig)
memset(&info, '\0', sizeof info); memset(&info, '\0', sizeof info);
info.si_signo = SIGCHLD; info.si_signo = SIGCHLD;
info.si_code = CLD_TRAPPED; info.si_code = CLD_TRAPPED;
info._sifields._sigchld.si_pid = thread->proc->pid; info._sifields._sigchld.si_pid = thread->tid;
info._sifields._sigchld.si_status = thread->proc->exit_status; info._sifields._sigchld.si_status = thread->exit_status;
do_kill(cpu_local_var(current), parent_pid, -1, SIGCHLD, &info, 0); do_kill(cpu_local_var(current), parent_pid, -1, SIGCHLD, &info, 0);
/* Wake parent (if sleeping in wait4()) */ /* Wake parent (if sleeping in wait4()) */
waitq_wakeup(&proc->parent->waitpid_q); waitq_wakeup(&proc->parent->waitpid_q);
@ -695,10 +661,10 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
int orgsig; int orgsig;
int ptraceflag = 0; int ptraceflag = 0;
struct mcs_rwlock_node_irqsave lock; struct mcs_rwlock_node_irqsave lock;
unsigned long irqstate; struct mcs_rwlock_node_irqsave mcs_rw_node;
for(w = pending->sigmask.__val[0], sig = 0; w; sig++, w >>= 1); for(w = pending->sigmask.__val[0], sig = 0; w; sig++, w >>= 1);
dkprintf("do_signal,pid=%d,sig=%d\n", proc->pid, sig); dkprintf("do_signal(): tid=%d, pid=%d, sig=%d\n", thread->tid, proc->pid, sig);
orgsig = sig; orgsig = sig;
if((proc->ptrace & PT_TRACED) && if((proc->ptrace & PT_TRACED) &&
@ -718,12 +684,12 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
rc = regs->gpr.rax; rc = regs->gpr.rax;
} }
irqstate = ihk_mc_spinlock_lock(&thread->sigcommon->lock); mcs_rwlock_writer_lock(&thread->sigcommon->lock, &mcs_rw_node);
k = thread->sigcommon->action + sig - 1; k = thread->sigcommon->action + sig - 1;
if(k->sa.sa_handler == SIG_IGN){ if(k->sa.sa_handler == SIG_IGN){
kfree(pending); kfree(pending);
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate); mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
return; return;
} }
else if(k->sa.sa_handler){ else if(k->sa.sa_handler){
@ -808,7 +774,7 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
if(copy_to_user(sigsp, &ksigsp, sizeof ksigsp)){ if(copy_to_user(sigsp, &ksigsp, sizeof ksigsp)){
kfree(pending); kfree(pending);
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate); mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
kprintf("do_signal,write_process_vm failed\n"); kprintf("do_signal,write_process_vm failed\n");
terminate(0, sig); terminate(0, sig);
return; return;
@ -827,7 +793,7 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
if(!(k->sa.sa_flags & SA_NODEFER)) if(!(k->sa.sa_flags & SA_NODEFER))
thread->sigmask.__val[0] |= pending->sigmask.__val[0]; thread->sigmask.__val[0] |= pending->sigmask.__val[0];
kfree(pending); kfree(pending);
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate); mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
if(regs->gpr.rflags & RFLAGS_TF){ if(regs->gpr.rflags & RFLAGS_TF){
struct siginfo info; struct siginfo info;
@ -853,7 +819,7 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
} }
else else
kfree(pending); kfree(pending);
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate); mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
switch (sig) { switch (sig) {
case SIGSTOP: case SIGSTOP:
case SIGTSTP: case SIGTSTP:
@ -885,7 +851,8 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
/* Wake up the parent who tried wait4 and sleeping */ /* Wake up the parent who tried wait4 and sleeping */
waitq_wakeup(&proc->parent->waitpid_q); waitq_wakeup(&proc->parent->waitpid_q);
dkprintf("do_signal,SIGSTOP,sleeping\n"); dkprintf("do_signal(): pid: %d, tid: %d SIGSTOP, sleeping\n",
proc->pid, thread->tid);
/* Sleep */ /* Sleep */
schedule(); schedule();
dkprintf("SIGSTOP(): woken up\n"); dkprintf("SIGSTOP(): woken up\n");
@ -899,7 +866,7 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
/* Update thread state in fork tree */ /* Update thread state in fork tree */
mcs_rwlock_writer_lock(&proc->update_lock, &lock); mcs_rwlock_writer_lock(&proc->update_lock, &lock);
proc->exit_status = SIGTRAP; thread->exit_status = SIGTRAP;
proc->status = PS_TRACED; proc->status = PS_TRACED;
thread->status = PS_TRACED; thread->status = PS_TRACED;
mcs_rwlock_writer_unlock(&proc->update_lock, &lock); mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
@ -953,11 +920,11 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
static struct sig_pending * static struct sig_pending *
getsigpending(struct thread *thread, int delflag){ getsigpending(struct thread *thread, int delflag){
struct list_head *head; struct list_head *head;
ihk_spinlock_t *lock; mcs_rwlock_lock_t *lock;
struct mcs_rwlock_node_irqsave mcs_rw_node;
struct sig_pending *next; struct sig_pending *next;
struct sig_pending *pending; struct sig_pending *pending;
__sigset_t w; __sigset_t w;
int irqstate;
__sigset_t x; __sigset_t x;
int sig; int sig;
struct k_sigaction *k; struct k_sigaction *k;
@ -967,7 +934,11 @@ getsigpending(struct thread *thread, int delflag){
lock = &thread->sigcommon->lock; lock = &thread->sigcommon->lock;
head = &thread->sigcommon->sigpending; head = &thread->sigcommon->sigpending;
for(;;) { for(;;) {
irqstate = ihk_mc_spinlock_lock(lock); if (delflag)
mcs_rwlock_writer_lock(lock, &mcs_rw_node);
else
mcs_rwlock_reader_lock(lock, &mcs_rw_node);
list_for_each_entry_safe(pending, next, head, list){ list_for_each_entry_safe(pending, next, head, list){
for(x = pending->sigmask.__val[0], sig = 0; x; sig++, x >>= 1); for(x = pending->sigmask.__val[0], sig = 0; x; sig++, x >>= 1);
k = thread->sigcommon->action + sig - 1; k = thread->sigcommon->action + sig - 1;
@ -978,15 +949,24 @@ getsigpending(struct thread *thread, int delflag){
if(!(pending->sigmask.__val[0] & w)){ if(!(pending->sigmask.__val[0] & w)){
if(delflag) if(delflag)
list_del(&pending->list); list_del(&pending->list);
ihk_mc_spinlock_unlock(lock, irqstate);
if (delflag)
mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
else
mcs_rwlock_reader_unlock(lock, &mcs_rw_node);
return pending; return pending;
} }
} }
} }
ihk_mc_spinlock_unlock(lock, irqstate);
if (delflag)
mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
else
mcs_rwlock_reader_unlock(lock, &mcs_rw_node);
if(lock == &thread->sigpendinglock) if(lock == &thread->sigpendinglock)
return NULL; return NULL;
lock = &thread->sigpendinglock; lock = &thread->sigpendinglock;
head = &thread->sigpending; head = &thread->sigpending;
} }
@ -1034,22 +1014,25 @@ check_signal(unsigned long rc, void *regs0, int num)
} }
} }
ihk_mc_spinlock_unlock(&(cpu_local_var(runq_lock)), irqstate); ihk_mc_spinlock_unlock(&(cpu_local_var(runq_lock)), irqstate);
return; goto out;
} }
if(regs != NULL && !interrupt_from_user(regs)) { if(regs != NULL && !interrupt_from_user(regs)) {
return; goto out;
} }
for(;;){ for(;;){
pending = getsigpending(thread, 1); pending = getsigpending(thread, 1);
if(!pending) { if(!pending) {
dkprintf("check_signal,queue is empty\n"); dkprintf("check_signal,queue is empty\n");
return; goto out;
} }
do_signal(rc, regs, thread, pending, num); do_signal(rc, regs, thread, pending, num);
} }
out:
return;
} }
unsigned long unsigned long
@ -1063,7 +1046,8 @@ do_kill(struct thread *thread, int pid, int tid, int sig, siginfo_t *info,
struct thread *tthread = NULL; struct thread *tthread = NULL;
int i; int i;
__sigset_t mask; __sigset_t mask;
ihk_spinlock_t *savelock = NULL; mcs_rwlock_lock_t *savelock = NULL;
struct mcs_rwlock_node mcs_rw_node;
struct list_head *head = NULL; struct list_head *head = NULL;
int rc; int rc;
unsigned long irqstate = 0; unsigned long irqstate = 0;
@ -1247,7 +1231,7 @@ done:
doint = 0; doint = 0;
ihk_mc_spinlock_lock_noirq(savelock); mcs_rwlock_writer_lock_noirq(savelock, &mcs_rw_node);
/* Put signal event even when handler is SIG_IGN or SIG_DFL /* Put signal event even when handler is SIG_IGN or SIG_DFL
because target ptraced thread must call ptrace_report_signal because target ptraced thread must call ptrace_report_signal
@ -1286,7 +1270,7 @@ done:
} }
} }
} }
ihk_mc_spinlock_unlock_noirq(savelock); mcs_rwlock_writer_unlock_noirq(savelock, &mcs_rw_node);
cpu_restore_interrupt(irqstate); cpu_restore_interrupt(irqstate);
if (doint && !(mask & tthread->sigmask.__val[0])) { if (doint && !(mask & tthread->sigmask.__val[0])) {

View File

@ -39,7 +39,9 @@ else
irqbalance_used="no" irqbalance_used="no"
fi fi
while getopts :i:k:c:m:o:f: OPT turbo=""
while getopts :ti:k:c:m:o:f: OPT
do do
case ${OPT} in case ${OPT} in
f) facility=${OPTARG} f) facility=${OPTARG}
@ -76,6 +78,8 @@ do
;; ;;
m) mem=${OPTARG} m) mem=${OPTARG}
;; ;;
t) turbo="turbo"
;;
*) echo "invalid option -${OPT}" >&2 *) echo "invalid option -${OPT}" >&2
exit 1 exit 1
esac esac
@ -340,7 +344,7 @@ if ! ${SBINDIR}/ihkosctl 0 load ${KERNDIR}/mckernel.img; then
fi fi
# Set kernel arguments # Set kernel arguments
if ! ${SBINDIR}/ihkosctl 0 kargs "hidos ksyslogd=${LOGMODE}"; then if ! ${SBINDIR}/ihkosctl 0 kargs "hidos ksyslogd=${LOGMODE} $turbo"; then
echo "error: setting kernel arguments" >&2 echo "error: setting kernel arguments" >&2
error_exit "os_created" error_exit "os_created"
fi fi
@ -374,7 +378,7 @@ if [ "$enable_mcoverlay" == "yes" ]; then
fi fi
while [ ! -e /proc/mcos0 ] while [ ! -e /proc/mcos0 ]
do do
sleep 1 sleep 0.1
done done
if [ ! -e /tmp/mcos/mcos0_proc ]; then mkdir -p /tmp/mcos/mcos0_proc; fi if [ ! -e /tmp/mcos/mcos0_proc ]; then mkdir -p /tmp/mcos/mcos0_proc; fi
if [ ! -e /tmp/mcos/mcos0_proc_upper ]; then mkdir -p /tmp/mcos/mcos0_proc_upper; fi if [ ! -e /tmp/mcos/mcos0_proc_upper ]; then mkdir -p /tmp/mcos/mcos0_proc_upper; fi
@ -400,6 +404,8 @@ if [ "$enable_mcoverlay" == "yes" ]; then
# TODO: How de we revert this in case of failure?? # TODO: How de we revert this in case of failure??
mount --make-rprivate /sys mount --make-rprivate /sys
touch /tmp/mcos/mcos0_proc/mckernel
rm -rf /tmp/mcos/mcos0_sys/setup_complete rm -rf /tmp/mcos/mcos0_sys/setup_complete
# Hide NUMA related files which are outside the LWK partition # Hide NUMA related files which are outside the LWK partition

2
configure vendored
View File

@ -2922,6 +2922,7 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $
ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_compiler_gnu=$ac_cv_c_compiler_gnu
XCC=$CC XCC=$CC
CFLAGS="$CFLAGS -ffreestanding -fno-tree-loop-distribute-patterns"
;; ;;
builtin-mic) builtin-mic)
ARCH=k1om ARCH=k1om
@ -3912,6 +3913,7 @@ fi
ac_config_headers="$ac_config_headers executer/config.h" ac_config_headers="$ac_config_headers executer/config.h"

View File

@ -70,6 +70,7 @@ case $WITH_TARGET in
ARCH=`uname -m` ARCH=`uname -m`
AC_PROG_CC AC_PROG_CC
XCC=$CC XCC=$CC
CFLAGS="$CFLAGS -ffreestanding -fno-tree-loop-distribute-patterns"
;; ;;
builtin-mic) builtin-mic)
ARCH=k1om ARCH=k1om
@ -286,6 +287,7 @@ AC_SUBST(ETCDIR)
AC_SUBST(KMODDIR) AC_SUBST(KMODDIR)
AC_SUBST(KERNDIR) AC_SUBST(KERNDIR)
AC_SUBST(MANDIR) AC_SUBST(MANDIR)
AC_SUBST(CFLAGS)
AC_SUBST(ENABLE_MCOVERLAYFS) AC_SUBST(ENABLE_MCOVERLAYFS)
AC_SUBST(IHK_VERSION) AC_SUBST(IHK_VERSION)

View File

@ -42,6 +42,7 @@
#define MCEXEC_UP_GET_CRED 0x30a0290a #define MCEXEC_UP_GET_CRED 0x30a0290a
#define MCEXEC_UP_GET_CREDV 0x30a0290b #define MCEXEC_UP_GET_CREDV 0x30a0290b
#define MCEXEC_UP_GET_NODES 0x30a0290c #define MCEXEC_UP_GET_NODES 0x30a0290c
#define MCEXEC_UP_GET_CPUSET 0x30a0290d
#define MCEXEC_UP_PREPARE_DMA 0x30a02910 #define MCEXEC_UP_PREPARE_DMA 0x30a02910
#define MCEXEC_UP_FREE_DMA 0x30a02911 #define MCEXEC_UP_FREE_DMA 0x30a02911
@ -79,6 +80,17 @@ struct program_image_section {
#define SHELL_PATH_MAX_LEN 1024 #define SHELL_PATH_MAX_LEN 1024
#define MCK_RLIM_MAX 20 #define MCK_RLIM_MAX 20
struct get_cpu_set_arg {
int nr_processes;
void *cpu_set;
size_t cpu_set_size; // Size in bytes
int *target_core;
};
#define PLD_CPU_SET_MAX_CPUS 1024
typedef unsigned long __cpu_set_unit;
#define PLD_CPU_SET_SIZE (PLD_CPU_SET_MAX_CPUS / (8 * sizeof(__cpu_set_unit)))
struct program_load_desc { struct program_load_desc {
int num_sections; int num_sections;
int status; int status;
@ -108,6 +120,7 @@ struct program_load_desc {
struct rlimit rlimit[MCK_RLIM_MAX]; struct rlimit rlimit[MCK_RLIM_MAX];
unsigned long interp_align; unsigned long interp_align;
char shell_path[SHELL_PATH_MAX_LEN]; char shell_path[SHELL_PATH_MAX_LEN];
__cpu_set_unit cpu_set[PLD_CPU_SET_SIZE];
struct program_image_section sections[0]; struct program_image_section sections[0];
}; };

View File

@ -34,6 +34,7 @@
#include <linux/version.h> #include <linux/version.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/io.h> #include <asm/io.h>
@ -292,8 +293,9 @@ int mcexec_transfer_image(ihk_os_t os, struct remote_transfer *__user upt)
//extern unsigned long last_thread_exec; //extern unsigned long last_thread_exec;
struct handlerinfo { struct release_handler_info {
int pid; int pid;
int cpu;
}; };
static long mcexec_debug_log(ihk_os_t os, unsigned long arg) static long mcexec_debug_log(ihk_os_t os, unsigned long arg)
@ -309,7 +311,7 @@ static long mcexec_debug_log(ihk_os_t os, unsigned long arg)
static void release_handler(ihk_os_t os, void *param) static void release_handler(ihk_os_t os, void *param)
{ {
struct handlerinfo *info = param; struct release_handler_info *info = param;
struct ikc_scd_packet isp; struct ikc_scd_packet isp;
int os_ind = ihk_host_os_get_index(os); int os_ind = ihk_host_os_get_index(os);
@ -317,10 +319,15 @@ static void release_handler(ihk_os_t os, void *param)
isp.msg = SCD_MSG_CLEANUP_PROCESS; isp.msg = SCD_MSG_CLEANUP_PROCESS;
isp.pid = info->pid; isp.pid = info->pid;
mcctrl_ikc_send(os, 0, &isp); dprintk("%s: SCD_MSG_CLEANUP_PROCESS, info: %p, cpu: %d\n",
if(os_ind >= 0) __FUNCTION__, info, info->cpu);
mcctrl_ikc_send(os, info->cpu, &isp);
if (os_ind >= 0) {
delete_pid_entry(os_ind, info->pid); delete_pid_entry(os_ind, info->pid);
}
kfree(param); kfree(param);
dprintk("%s: SCD_MSG_CLEANUP_PROCESS, info: %p OK\n",
__FUNCTION__, info);
} }
static long mcexec_newprocess(ihk_os_t os, static long mcexec_newprocess(ihk_os_t os,
@ -328,12 +335,12 @@ static long mcexec_newprocess(ihk_os_t os,
struct file *file) struct file *file)
{ {
struct newprocess_desc desc; struct newprocess_desc desc;
struct handlerinfo *info; struct release_handler_info *info;
if (copy_from_user(&desc, udesc, sizeof(struct newprocess_desc))) { if (copy_from_user(&desc, udesc, sizeof(struct newprocess_desc))) {
return -EFAULT; return -EFAULT;
} }
info = kmalloc(sizeof(struct handlerinfo), GFP_KERNEL); info = kmalloc(sizeof(struct release_handler_info), GFP_KERNEL);
info->pid = desc.pid; info->pid = desc.pid;
ihk_os_register_release_handler(file, release_handler, info); ihk_os_register_release_handler(file, release_handler, info);
return 0; return 0;
@ -347,7 +354,7 @@ static long mcexec_start_image(ihk_os_t os,
struct ikc_scd_packet isp; struct ikc_scd_packet isp;
struct mcctrl_channel *c; struct mcctrl_channel *c;
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os); struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
struct handlerinfo *info; struct release_handler_info *info;
desc = kmalloc(sizeof(*desc), GFP_KERNEL); desc = kmalloc(sizeof(*desc), GFP_KERNEL);
if (!desc) { if (!desc) {
@ -362,8 +369,9 @@ static long mcexec_start_image(ihk_os_t os,
return -EFAULT; return -EFAULT;
} }
info = kmalloc(sizeof(struct handlerinfo), GFP_KERNEL); info = kmalloc(sizeof(struct release_handler_info), GFP_KERNEL);
info->pid = desc->pid; info->pid = desc->pid;
info->cpu = desc->cpu;
ihk_os_register_release_handler(file, release_handler, info); ihk_os_register_release_handler(file, release_handler, info);
c = usrdata->channels + desc->cpu; c = usrdata->channels + desc->cpu;
@ -460,6 +468,199 @@ static long mcexec_get_nodes(ihk_os_t os)
return usrdata->mem_info->n_numa_nodes; return usrdata->mem_info->n_numa_nodes;
} }
extern int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id);
extern int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id);
static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
{
struct mcctrl_usrdata *udp = ihk_host_os_get_usrdata(os);
struct mcctrl_part_exec *pe;
struct get_cpu_set_arg req;
struct cpu_topology *cpu_top, *cpu_top_i;
struct cache_topology *cache_top;
int cpu, cpus_assigned, cpus_to_assign, cpu_prev;
int ret = 0;
cpumask_t cpus_used;
cpumask_t cpus_to_use;
struct mcctrl_per_proc_data *ppd;
if (!udp) {
return -EINVAL;
}
/* Look up per-process structure */
ppd = mcctrl_get_per_proc_data(udp, task_tgid_vnr(current));
if (!ppd) {
return -EINVAL;
}
pe = &udp->part_exec;
if (copy_from_user(&req, (void *)arg, sizeof(req))) {
printk("%s: error copying user request\n", __FUNCTION__);
return -EINVAL;
}
mutex_lock(&pe->lock);
memcpy(&cpus_used, &pe->cpus_used, sizeof(cpumask_t));
memset(&cpus_to_use, 0, sizeof(cpus_to_use));
/* First process to enter CPU partitioning */
if (pe->nr_processes == -1) {
pe->nr_processes = req.nr_processes;
pe->nr_processes_left = req.nr_processes;
dprintk("%s: nr_processes: %d (partitioned exec starts)\n",
__FUNCTION__,
pe->nr_processes);
}
if (pe->nr_processes != req.nr_processes) {
printk("%s: error: requested number of processes"
" doesn't match current partitioned execution\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
}
--pe->nr_processes_left;
dprintk("%s: nr_processes: %d, nr_processes_left: %d\n",
__FUNCTION__,
pe->nr_processes,
pe->nr_processes_left);
cpus_to_assign = udp->cpu_info->n_cpus / req.nr_processes;
/* Find the first unused CPU */
cpu = cpumask_next_zero(-1, &cpus_used);
if (cpu >= udp->cpu_info->n_cpus) {
printk("%s: error: no more CPUs available\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
}
cpu_set(cpu, cpus_used);
cpu_set(cpu, cpus_to_use);
cpu_prev = cpu;
dprintk("%s: CPU %d assigned (first)\n", __FUNCTION__, cpu);
for (cpus_assigned = 1; cpus_assigned < cpus_to_assign;
++cpus_assigned) {
int node;
cpu_top = NULL;
/* Find the topology object of the last core assigned */
list_for_each_entry(cpu_top_i, &udp->cpu_topology_list, chain) {
if (cpu_top_i->mckernel_cpu_id == cpu_prev) {
cpu_top = cpu_top_i;
break;
}
}
if (!cpu_top) {
printk("%s: error: couldn't find CPU topology info\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
}
/* Find a core sharing the same cache iterating caches from
* the most inner one outwards */
list_for_each_entry(cache_top, &cpu_top->cache_list, chain) {
for_each_cpu(cpu, &cache_top->shared_cpu_map) {
if (!cpu_isset(cpu, cpus_used)) {
cpu_set(cpu, cpus_used);
cpu_set(cpu, cpus_to_use);
cpu_prev = cpu;
dprintk("%s: CPU %d assigned (same cache L%lu)\n",
__FUNCTION__, cpu, cache_top->saved->level);
goto next_cpu;
}
}
}
/* No CPU? Find a core from the same NUMA node */
node = linux_numa_2_mckernel_numa(udp,
cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu_prev)));
for_each_cpu_not(cpu, &cpus_used) {
/* Invalid CPU? */
if (cpu >= udp->cpu_info->n_cpus)
break;
/* Found one */
if (node == linux_numa_2_mckernel_numa(udp,
cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu)))) {
cpu_set(cpu, cpus_used);
cpu_set(cpu, cpus_to_use);
cpu_prev = cpu;
dprintk("%s: CPU %d assigned (same NUMA)\n",
__FUNCTION__, cpu);
goto next_cpu;
}
}
/* No CPU? Simply find the next unused one */
cpu = cpumask_next_zero(-1, &cpus_used);
if (cpu >= udp->cpu_info->n_cpus) {
printk("%s: error: no more CPUs available\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
}
cpu_set(cpu, cpus_used);
cpu_set(cpu, cpus_to_use);
cpu_prev = cpu;
dprintk("%s: CPU %d assigned (unused)\n",
__FUNCTION__, cpu);
next_cpu:
continue;
}
/* Found all cores, let user know */
if (copy_to_user(req.cpu_set, &cpus_to_use,
(req.cpu_set_size < sizeof(cpus_to_use) ?
req.cpu_set_size : sizeof(cpus_to_use)))) {
printk("%s: error copying mask to user\n", __FUNCTION__);
ret = -EINVAL;
goto unlock_out;
}
cpu = cpumask_next(-1, &cpus_to_use);
if (copy_to_user(req.target_core, &cpu, sizeof(cpu))) {
printk("%s: error copying target core to user\n",
__FUNCTION__);
ret = -EINVAL;
goto unlock_out;
}
/* Save in per-process structure */
memcpy(&ppd->cpu_set, &cpus_to_use, sizeof(cpumask_t));
ppd->ikc_target_cpu = cpu;
/* Commit used cores to OS structure */
memcpy(&pe->cpus_used, &cpus_used, sizeof(cpus_used));
/* Reset if last process */
if (pe->nr_processes_left == 0) {
dprintk("%s: nr_processes: %d (partitioned exec ends)\n",
__FUNCTION__,
pe->nr_processes);
pe->nr_processes = -1;
memset(&pe->cpus_used, 0, sizeof(pe->cpus_used));
}
ret = 0;
unlock_out:
mutex_unlock(&pe->lock);
return ret;
}
int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid, int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid,
struct mcctrl_per_proc_data *ppd) struct mcctrl_per_proc_data *ppd)
{ {
@ -978,6 +1179,8 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
INIT_LIST_HEAD(&ppd->wq_req_list); INIT_LIST_HEAD(&ppd->wq_req_list);
INIT_LIST_HEAD(&ppd->wq_list_exact); INIT_LIST_HEAD(&ppd->wq_list_exact);
spin_lock_init(&ppd->wq_list_lock); spin_lock_init(&ppd->wq_list_lock);
memset(&ppd->cpu_set, 0, sizeof(cpumask_t));
ppd->ikc_target_cpu = 0;
for (i = 0; i < MCCTRL_PER_THREAD_DATA_HASH_SIZE; ++i) { for (i = 0; i < MCCTRL_PER_THREAD_DATA_HASH_SIZE; ++i) {
INIT_LIST_HEAD(&ppd->per_thread_data_hash[i]); INIT_LIST_HEAD(&ppd->per_thread_data_hash[i]);
@ -1279,6 +1482,9 @@ long __mcctrl_control(ihk_os_t os, unsigned int req, unsigned long arg,
case MCEXEC_UP_GET_NODES: case MCEXEC_UP_GET_NODES:
return mcexec_get_nodes(os); return mcexec_get_nodes(os);
case MCEXEC_UP_GET_CPUSET:
return mcexec_get_cpuset(os, arg);
case MCEXEC_UP_STRNCPY_FROM_USER: case MCEXEC_UP_STRNCPY_FROM_USER:
return mcexec_strncpy_from_user(os, return mcexec_strncpy_from_user(os,
(struct strncpy_from_user_desc *)arg); (struct strncpy_from_user_desc *)arg);

View File

@ -61,6 +61,7 @@ static struct ihk_os_user_call_handler mcctrl_uchs[] = {
{ .request = MCEXEC_UP_SEND_SIGNAL, .func = mcctrl_ioctl }, { .request = MCEXEC_UP_SEND_SIGNAL, .func = mcctrl_ioctl },
{ .request = MCEXEC_UP_GET_CPU, .func = mcctrl_ioctl }, { .request = MCEXEC_UP_GET_CPU, .func = mcctrl_ioctl },
{ .request = MCEXEC_UP_GET_NODES, .func = mcctrl_ioctl }, { .request = MCEXEC_UP_GET_NODES, .func = mcctrl_ioctl },
{ .request = MCEXEC_UP_GET_CPUSET, .func = mcctrl_ioctl },
{ .request = MCEXEC_UP_STRNCPY_FROM_USER, .func = mcctrl_ioctl }, { .request = MCEXEC_UP_STRNCPY_FROM_USER, .func = mcctrl_ioctl },
{ .request = MCEXEC_UP_NEW_PROCESS, .func = mcctrl_ioctl }, { .request = MCEXEC_UP_NEW_PROCESS, .func = mcctrl_ioctl },
{ .request = MCEXEC_UP_PREPARE_DMA, .func = mcctrl_ioctl }, { .request = MCEXEC_UP_PREPARE_DMA, .func = mcctrl_ioctl },

View File

@ -240,7 +240,7 @@ static struct ihk_ikc_listen_param listen_param = {
.port = 501, .port = 501,
.handler = connect_handler, .handler = connect_handler,
.pkt_size = sizeof(struct ikc_scd_packet), .pkt_size = sizeof(struct ikc_scd_packet),
.queue_size = PAGE_SIZE, .queue_size = PAGE_SIZE * 4,
.magic = 0x1129, .magic = 0x1129,
}; };
@ -248,7 +248,7 @@ static struct ihk_ikc_listen_param listen_param2 = {
.port = 502, .port = 502,
.handler = connect_handler2, .handler = connect_handler2,
.pkt_size = sizeof(struct ikc_scd_packet), .pkt_size = sizeof(struct ikc_scd_packet),
.queue_size = PAGE_SIZE, .queue_size = PAGE_SIZE * 4,
.magic = 0x1329, .magic = 0x1329,
}; };
@ -298,6 +298,9 @@ int prepare_ikc_channels(ihk_os_t os)
INIT_LIST_HEAD(&usrdata->cpu_topology_list); INIT_LIST_HEAD(&usrdata->cpu_topology_list);
INIT_LIST_HEAD(&usrdata->node_topology_list); INIT_LIST_HEAD(&usrdata->node_topology_list);
mutex_init(&usrdata->part_exec.lock);
usrdata->part_exec.nr_processes = -1;
return 0; return 0;
} }

View File

@ -198,6 +198,8 @@ struct mcctrl_per_proc_data {
struct list_head per_thread_data_hash[MCCTRL_PER_THREAD_DATA_HASH_SIZE]; struct list_head per_thread_data_hash[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
rwlock_t per_thread_data_hash_lock[MCCTRL_PER_THREAD_DATA_HASH_SIZE]; rwlock_t per_thread_data_hash_lock[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
cpumask_t cpu_set;
int ikc_target_cpu;
}; };
struct sysfsm_req { struct sysfsm_req {
@ -254,6 +256,13 @@ struct node_topology {
struct list_head chain; struct list_head chain;
}; };
struct mcctrl_part_exec {
struct mutex lock;
int nr_processes;
int nr_processes_left;
cpumask_t cpus_used;
};
#define CPU_LONGS (((NR_CPUS) + (BITS_PER_LONG) - 1) / (BITS_PER_LONG)) #define CPU_LONGS (((NR_CPUS) + (BITS_PER_LONG) - 1) / (BITS_PER_LONG))
#define MCCTRL_PER_PROC_DATA_HASH_SHIFT 7 #define MCCTRL_PER_PROC_DATA_HASH_SHIFT 7
@ -284,6 +293,7 @@ struct mcctrl_usrdata {
nodemask_t numa_online; nodemask_t numa_online;
struct list_head cpu_topology_list; struct list_head cpu_topology_list;
struct list_head node_topology_list; struct list_head node_topology_list;
struct mcctrl_part_exec part_exec;
}; };
struct mcctrl_signal { struct mcctrl_signal {

View File

@ -746,6 +746,18 @@ static struct list_head pager_list = LIST_HEAD_INIT(pager_list);
struct pager_create_result { struct pager_create_result {
uintptr_t handle; uintptr_t handle;
int maxprot; int maxprot;
uint32_t flags;
size_t size;
};
enum {
/* for memobj.flags */
MF_HAS_PAGER = 0x0001,
MF_SHMDT_OK = 0x0002,
MF_IS_REMOVABLE = 0x0004,
MF_PREFETCH = 0x0008,
MF_ZEROFILL = 0x0010,
MF_END
}; };
static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa) static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
@ -760,6 +772,7 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
struct pager *newpager = NULL; struct pager *newpager = NULL;
uintptr_t phys; uintptr_t phys;
struct kstat st; struct kstat st;
int mf_flags = 0;
dprintk("pager_req_create(%d,%lx)\n", fd, (long)result_pa); dprintk("pager_req_create(%d,%lx)\n", fd, (long)result_pa);
@ -827,6 +840,32 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
list_add(&newpager->list, &pager_list); list_add(&newpager->list, &pager_list);
pager = newpager; pager = newpager;
newpager = NULL; newpager = NULL;
/* Intel MPI library and shared memory "prefetch" */
{
char *pathbuf, *fullpath;
pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
if (pathbuf) {
fullpath = d_path(&file->f_path, pathbuf, PATH_MAX);
if (!IS_ERR(fullpath)) {
if (!strncmp("/dev/shm/Intel_MPI", fullpath, 18)) {
//mf_flags = (MF_PREFETCH | MF_ZEROFILL);
mf_flags = (MF_ZEROFILL);
dprintk("%s: filename: %s, zerofill\n",
__FUNCTION__, fullpath);
}
else if (strstr(fullpath, "libmpi") != NULL) {
mf_flags = MF_PREFETCH;
dprintk("%s: filename: %s, prefetch\n",
__FUNCTION__, fullpath);
}
}
kfree(pathbuf);
}
}
break; break;
} }
@ -856,6 +895,8 @@ found:
resp = ihk_device_map_virtual(dev, phys, sizeof(*resp), NULL, 0); resp = ihk_device_map_virtual(dev, phys, sizeof(*resp), NULL, 0);
resp->handle = (uintptr_t)pager; resp->handle = (uintptr_t)pager;
resp->maxprot = maxprot; resp->maxprot = maxprot;
resp->flags = mf_flags;
resp->size = st.size;
ihk_device_unmap_virtual(dev, resp, sizeof(*resp)); ihk_device_unmap_virtual(dev, resp, sizeof(*resp));
ihk_device_unmap_memory(dev, phys, sizeof(*resp)); ihk_device_unmap_memory(dev, phys, sizeof(*resp));

View File

@ -197,19 +197,19 @@ void free_topology_info(ihk_os_t os)
/* /*
* CPU and NUMA node mapping conversion functions. * CPU and NUMA node mapping conversion functions.
*/ */
static int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id) int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id)
{ {
return (cpu_id < udp->cpu_info->n_cpus) ? return (cpu_id < udp->cpu_info->n_cpus) ?
udp->cpu_info->mapping[cpu_id] : -1; udp->cpu_info->mapping[cpu_id] : -1;
} }
static int mckernel_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu_id) int mckernel_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu_id)
{ {
return (cpu_id < udp->cpu_info->n_cpus) ? return (cpu_id < udp->cpu_info->n_cpus) ?
udp->cpu_info->hw_ids[cpu_id] : -1; udp->cpu_info->hw_ids[cpu_id] : -1;
} }
static int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id) int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id)
{ {
int i; int i;
@ -222,7 +222,7 @@ static int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id)
} }
#if 0 #if 0
static int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id) int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id)
{ {
int i; int i;
@ -235,7 +235,7 @@ static int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id)
return -1; return -1;
} }
static int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id) int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id)
{ {
int i; int i;
@ -248,7 +248,7 @@ static int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id)
return -1; return -1;
} }
static int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu) int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu)
{ {
int mckernel_cpu = linux_cpu_2_mckernel_cpu(udp, cpu); int mckernel_cpu = linux_cpu_2_mckernel_cpu(udp, cpu);
@ -257,13 +257,13 @@ static int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu)
} }
#endif #endif
static int mckernel_numa_2_linux_numa(struct mcctrl_usrdata *udp, int numa_id) int mckernel_numa_2_linux_numa(struct mcctrl_usrdata *udp, int numa_id)
{ {
return (numa_id < udp->mem_info->n_numa_nodes) ? return (numa_id < udp->mem_info->n_numa_nodes) ?
udp->mem_info->numa_mapping[numa_id] : -1; udp->mem_info->numa_mapping[numa_id] : -1;
} }
static int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id) int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id)
{ {
int i; int i;

View File

@ -153,6 +153,10 @@ static const char rlimit_stack_envname[] = "MCKERNEL_RLIMIT_STACK";
static int ischild; static int ischild;
static int enable_vdso = 1; static int enable_vdso = 1;
/* Partitioned execution (e.g., for MPI) */
static int nr_processes = 0;
static int nr_threads = -1;
struct fork_sync { struct fork_sync {
pid_t pid; pid_t pid;
int status; int status;
@ -502,7 +506,7 @@ retry:
/* Check whether the resolved path is a symlink */ /* Check whether the resolved path is a symlink */
if (lstat(path, &sb) == -1) { if (lstat(path, &sb) == -1) {
fprintf(stderr, "lookup_exec_path(): error stat\n"); __dprintf(stderr, "lookup_exec_path(): error stat\n");
return errno; return errno;
} }
@ -1102,7 +1106,7 @@ static int reduce_stack(struct rlimit *orig_rlim, char *argv[])
void print_usage(char **argv) void print_usage(char **argv)
{ {
fprintf(stderr, "Usage: %s [-c target_core] [<mcos-id>] (program) [args...]\n", argv[0]); fprintf(stderr, "Usage: %s [-c target_core] [-n nr_partitions] [<mcos-id>] (program) [args...]\n", argv[0]);
} }
void init_sigaction(void) void init_sigaction(void)
@ -1329,12 +1333,20 @@ int main(int argc, char **argv)
} }
/* Parse options ("+" denotes stop at the first non-option) */ /* Parse options ("+" denotes stop at the first non-option) */
while ((opt = getopt_long(argc, argv, "+c:", mcexec_options, NULL)) != -1) { while ((opt = getopt_long(argc, argv, "+c:n:t:", mcexec_options, NULL)) != -1) {
switch (opt) { switch (opt) {
case 'c': case 'c':
target_core = atoi(optarg); target_core = atoi(optarg);
break; break;
case 'n':
nr_processes = atoi(optarg);
break;
case 't':
nr_threads = atoi(optarg);
break;
case 0: /* long opt */ case 0: /* long opt */
break; break;
@ -1550,7 +1562,16 @@ int main(int argc, char **argv)
return 1; return 1;
} }
n_threads = ncpu + 1; if (nr_threads > 0) {
n_threads = nr_threads;
}
else if (getenv("OMP_NUM_THREADS")) {
/* Leave some headroom for helper threads.. */
n_threads = atoi(getenv("OMP_NUM_THREADS")) + 4;
}
else {
n_threads = ncpu;
}
/* /*
* XXX: keep thread_data ncpu sized despite that there are only * XXX: keep thread_data ncpu sized despite that there are only
@ -1561,6 +1582,10 @@ int main(int argc, char **argv)
* TODO: implement dynaic thread pool resizing. * TODO: implement dynaic thread pool resizing.
*/ */
thread_data = (struct thread_data_s *)malloc(sizeof(struct thread_data_s) * (ncpu + 1)); thread_data = (struct thread_data_s *)malloc(sizeof(struct thread_data_s) * (ncpu + 1));
if (!thread_data) {
fprintf(stderr, "error: allocating thread pool data\n");
return 1;
}
memset(thread_data, '\0', sizeof(struct thread_data_s) * (ncpu + 1)); memset(thread_data, '\0', sizeof(struct thread_data_s) * (ncpu + 1));
#if 0 #if 0
@ -1595,6 +1620,24 @@ int main(int argc, char **argv)
exit(1); exit(1);
} }
/* Partitioned execution, obtain CPU set */
if (nr_processes > 0) {
struct get_cpu_set_arg cpu_set_arg;
cpu_set_arg.cpu_set = (void *)&desc->cpu_set;
cpu_set_arg.cpu_set_size = sizeof(desc->cpu_set);
cpu_set_arg.nr_processes = nr_processes;
cpu_set_arg.target_core = &target_core;
if (ioctl(fd, MCEXEC_UP_GET_CPUSET, (void *)&cpu_set_arg) != 0) {
perror("getting CPU set for partitioned execution");
close(fd);
return 1;
}
desc->cpu = target_core;
}
if (ioctl(fd, MCEXEC_UP_PREPARE_IMAGE, (unsigned long)desc) != 0) { if (ioctl(fd, MCEXEC_UP_PREPARE_IMAGE, (unsigned long)desc) != 0) {
perror("prepare"); perror("prepare");
close(fd); close(fd);
@ -1906,9 +1949,18 @@ int close_cloexec_fds(int mcos_fd)
return 0; return 0;
} }
void chgdevpath(char *in, char *buf)
{
if(!strcmp(in, "/dev/xpmem")){
sprintf(in, "/dev/null");
}
}
char * char *
chgpath(char *in, char *buf) chgpath(char *in, char *buf)
{ {
chgdevpath(in, buf);
#ifdef ENABLE_MCOVERLAYFS #ifdef ENABLE_MCOVERLAYFS
return in; return in;
#endif // ENABLE_MCOVERLAYFS #endif // ENABLE_MCOVERLAYFS

View File

@ -3,15 +3,15 @@ SRC=$(VPATH)
IHKDIR=$(IHKBASE)/$(TARGETDIR) IHKDIR=$(IHKBASE)/$(TARGETDIR)
OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o
OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o
OBJS += zeroobj.o procfs.o devobj.o sysfs.o OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o
DEPSRCS=$(wildcard $(SRC)/*.c) DEPSRCS=$(wildcard $(SRC)/*.c)
CFLAGS += -I$(SRC)/include -D__KERNEL__ -g CFLAGS += -I$(SRC)/include -D__KERNEL__ -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
LDFLAGS += -e arch_start LDFLAGS += -e arch_start
IHKOBJ = ihk/ihk.o IHKOBJ = ihk/ihk.o
include $(SRC)/config/config.$(TARGET) include $(SRC)/config/config.$(TARGET)
include $(IHKBASE)/Makefile.common include @abs_builddir@/../../ihk/cokernel/Makefile.common
# CFLAGS += -I$(SRC)/../arch/$(IHKARCH)/kernel/include -I$(SRC)/../lib/include # CFLAGS += -I$(SRC)/../arch/$(IHKARCH)/kernel/include -I$(SRC)/../lib/include

View File

@ -9,7 +9,7 @@ V ?= $(VERBOSE)
KERNEL = kernel.img KERNEL = kernel.img
KERNELS = $(addsuffix /$(KERNEL),$(addprefix $(O)/,$(BUILD_TARGET))) KERNELS = $(addsuffix /$(KERNEL),$(addprefix $(O)/,$(BUILD_TARGET)))
SUBCMD_OPTS = V='$(V)' SUBCMD_OPTS = V='$(V)' BUILD_IHK_COKERNEL=@abs_builddir@/../../ihk/cokernel
$(if $(O),,$(error Specify the compilation target directory)) $(if $(O),,$(error Specify the compilation target directory))
#$(if $(shell ls $(IHKBASE)/Makefile),,\ #$(if $(shell ls $(IHKBASE)/Makefile),,\

View File

@ -23,7 +23,7 @@
extern int num_processors; extern int num_processors;
struct cpu_local_var *clv; struct cpu_local_var *clv;
static int cpu_local_var_initialized = 0; int cpu_local_var_initialized = 0;
void cpu_local_var_init(void) void cpu_local_var_init(void)
{ {

View File

@ -127,6 +127,7 @@ int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxp
obj->memobj.ops = &devobj_ops; obj->memobj.ops = &devobj_ops;
obj->memobj.flags = MF_HAS_PAGER; obj->memobj.flags = MF_HAS_PAGER;
obj->memobj.size = len;
obj->handle = result.handle; obj->handle = result.handle;
obj->ref = 1; obj->ref = 1;
obj->pfn_pgoff = off / PAGE_SIZE; obj->pfn_pgoff = off / PAGE_SIZE;

View File

@ -29,22 +29,27 @@
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0) #define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
#define ekprintf(...) kprintf(__VA_ARGS__) #define ekprintf(...) kprintf(__VA_ARGS__)
static ihk_spinlock_t fileobj_list_lock = SPIN_LOCK_UNLOCKED; mcs_rwlock_lock_t fileobj_list_lock =
{{{0}, MCS_RWLOCK_TYPE_COMMON_READER, 0, 0, 0, NULL}, NULL};
static LIST_HEAD(fileobj_list); static LIST_HEAD(fileobj_list);
#define FILEOBJ_PAGE_HASH_SHIFT 9
#define FILEOBJ_PAGE_HASH_SIZE (1 << FILEOBJ_PAGE_HASH_SHIFT)
#define FILEOBJ_PAGE_HASH_MASK (FILEOBJ_PAGE_HASH_SIZE - 1)
struct fileobj { struct fileobj {
struct memobj memobj; /* must be first */ struct memobj memobj; /* must be first */
long sref; long sref;
long cref; long cref;
uintptr_t handle; uintptr_t handle;
struct list_head page_list;
struct list_head list; struct list_head list;
struct list_head page_hash[FILEOBJ_PAGE_HASH_SIZE];
mcs_rwlock_lock_t page_hash_locks[FILEOBJ_PAGE_HASH_SIZE];
}; };
static memobj_release_func_t fileobj_release; static memobj_release_func_t fileobj_release;
static memobj_ref_func_t fileobj_ref; static memobj_ref_func_t fileobj_ref;
static memobj_get_page_func_t fileobj_get_page; static memobj_get_page_func_t fileobj_get_page;
static memobj_copy_page_func_t fileobj_copy_page;
static memobj_flush_page_func_t fileobj_flush_page; static memobj_flush_page_func_t fileobj_flush_page;
static memobj_invalidate_page_func_t fileobj_invalidate_page; static memobj_invalidate_page_func_t fileobj_invalidate_page;
static memobj_lookup_page_func_t fileobj_lookup_page; static memobj_lookup_page_func_t fileobj_lookup_page;
@ -53,7 +58,7 @@ static struct memobj_ops fileobj_ops = {
.release = &fileobj_release, .release = &fileobj_release,
.ref = &fileobj_ref, .ref = &fileobj_ref,
.get_page = &fileobj_get_page, .get_page = &fileobj_get_page,
.copy_page = &fileobj_copy_page, .copy_page = NULL,
.flush_page = &fileobj_flush_page, .flush_page = &fileobj_flush_page,
.invalidate_page = &fileobj_invalidate_page, .invalidate_page = &fileobj_invalidate_page,
.lookup_page = &fileobj_lookup_page, .lookup_page = &fileobj_lookup_page,
@ -72,28 +77,36 @@ static struct memobj *to_memobj(struct fileobj *fileobj)
/*********************************************************************** /***********************************************************************
* page_list * page_list
*/ */
static void page_list_init(struct fileobj *obj) static void fileobj_page_hash_init(struct fileobj *obj)
{ {
INIT_LIST_HEAD(&obj->page_list); int i;
for (i = 0; i < FILEOBJ_PAGE_HASH_SIZE; ++i) {
mcs_rwlock_init(&obj->page_hash_locks[i]);
INIT_LIST_HEAD(&obj->page_hash[i]);
}
return; return;
} }
static void page_list_insert(struct fileobj *obj, struct page *page) /* NOTE: caller must hold page_hash_locks[hash] */
static void __fileobj_page_hash_insert(struct fileobj *obj,
struct page *page, int hash)
{ {
list_add(&page->list, &obj->page_list); list_add(&page->list, &obj->page_hash[hash]);
return;
} }
static void page_list_remove(struct fileobj *obj, struct page *page) /* NOTE: caller must hold page_hash_locks[hash] */
static void __fileobj_page_hash_remove(struct page *page)
{ {
list_del(&page->list); list_del(&page->list);
} }
static struct page *page_list_lookup(struct fileobj *obj, off_t off) /* NOTE: caller must hold page_hash_locks[hash] */
static struct page *__fileobj_page_hash_lookup(struct fileobj *obj,
int hash, off_t off)
{ {
struct page *page; struct page *page;
list_for_each_entry(page, &obj->page_list, list) { list_for_each_entry(page, &obj->page_hash[hash], list) {
if ((page->mode != PM_WILL_PAGEIO) if ((page->mode != PM_WILL_PAGEIO)
&& (page->mode != PM_PAGEIO) && (page->mode != PM_PAGEIO)
&& (page->mode != PM_DONE_PAGEIO) && (page->mode != PM_DONE_PAGEIO)
@ -104,6 +117,7 @@ static struct page *page_list_lookup(struct fileobj *obj, off_t off)
obj, off, page->mode); obj, off, page->mode);
panic("page_list_lookup:invalid obj page"); panic("page_list_lookup:invalid obj page");
} }
if (page->offset == off) { if (page->offset == off) {
goto out; goto out;
} }
@ -114,13 +128,22 @@ out:
return page; return page;
} }
static struct page *page_list_first(struct fileobj *obj) static struct page *fileobj_page_hash_first(struct fileobj *obj)
{ {
if (list_empty(&obj->page_list)) { int i;
return NULL;
for (i = 0; i < FILEOBJ_PAGE_HASH_SIZE; ++i) {
if (!list_empty(&obj->page_hash[i])) {
break;
}
} }
return list_first_entry(&obj->page_list, struct page, list); if (i != FILEOBJ_PAGE_HASH_SIZE) {
return list_first_entry(&obj->page_hash[i], struct page, list);
}
else {
return NULL;
}
} }
/*********************************************************************** /***********************************************************************
@ -163,10 +186,11 @@ static struct fileobj *obj_list_lookup(uintptr_t handle)
int fileobj_create(int fd, struct memobj **objp, int *maxprotp) int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
{ {
ihk_mc_user_context_t ctx; ihk_mc_user_context_t ctx;
struct pager_create_result result; // XXX: assumes contiguous physical struct pager_create_result result __attribute__((aligned(64)));
int error; int error;
struct fileobj *newobj = NULL; struct fileobj *newobj = NULL;
struct fileobj *obj; struct fileobj *obj;
struct mcs_rwlock_node node;
dkprintf("fileobj_create(%d)\n", fd); dkprintf("fileobj_create(%d)\n", fd);
newobj = kmalloc(sizeof(*newobj), IHK_MC_AP_NOWAIT); newobj = kmalloc(sizeof(*newobj), IHK_MC_AP_NOWAIT);
@ -179,6 +203,7 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_CREATE; ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_CREATE;
ihk_mc_syscall_arg1(&ctx) = fd; ihk_mc_syscall_arg1(&ctx) = fd;
ihk_mc_syscall_arg2(&ctx) = virt_to_phys(&result); ihk_mc_syscall_arg2(&ctx) = virt_to_phys(&result);
memset(&result, 0, sizeof(result));
error = syscall_generic_forwarding(__NR_mmap, &ctx); error = syscall_generic_forwarding(__NR_mmap, &ctx);
if (error) { if (error) {
@ -192,23 +217,39 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
newobj->handle = result.handle; newobj->handle = result.handle;
newobj->sref = 1; newobj->sref = 1;
newobj->cref = 1; newobj->cref = 1;
page_list_init(newobj); fileobj_page_hash_init(newobj);
ihk_mc_spinlock_init(&newobj->memobj.lock); ihk_mc_spinlock_init(&newobj->memobj.lock);
ihk_mc_spinlock_lock_noirq(&fileobj_list_lock); mcs_rwlock_writer_lock_noirq(&fileobj_list_lock, &node);
obj = obj_list_lookup(result.handle); obj = obj_list_lookup(result.handle);
if (!obj) { if (!obj) {
obj_list_insert(newobj); obj_list_insert(newobj);
obj = newobj; obj = newobj;
to_memobj(obj)->size = result.size;
to_memobj(obj)->flags |= result.flags;
to_memobj(obj)->status = MEMOBJ_READY;
if (to_memobj(obj)->flags & MF_PREFETCH) {
to_memobj(obj)->status = MEMOBJ_TO_BE_PREFETCHED;
}
newobj = NULL; newobj = NULL;
dkprintf("%s: new obj 0x%lx cref: %d, %s\n",
__FUNCTION__,
obj,
obj->cref,
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
} }
else { else {
++obj->sref; ++obj->sref;
++obj->cref; ++obj->cref;
memobj_unlock(&obj->memobj); /* locked by obj_list_lookup() */ memobj_unlock(&obj->memobj); /* locked by obj_list_lookup() */
dkprintf("%s: existing obj 0x%lx cref: %d, %s\n",
__FUNCTION__,
obj,
obj->cref,
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
} }
ihk_mc_spinlock_unlock_noirq(&fileobj_list_lock); mcs_rwlock_writer_unlock_noirq(&fileobj_list_lock, &node);
error = 0; error = 0;
*objp = to_memobj(obj); *objp = to_memobj(obj);
@ -239,6 +280,7 @@ static void fileobj_release(struct memobj *memobj)
long free_sref = 0; long free_sref = 0;
uintptr_t free_handle; uintptr_t free_handle;
struct fileobj *free_obj = NULL; struct fileobj *free_obj = NULL;
struct mcs_rwlock_node node;
dkprintf("fileobj_release(%p %lx)\n", obj, obj->handle); dkprintf("fileobj_release(%p %lx)\n", obj, obj->handle);
@ -254,17 +296,23 @@ static void fileobj_release(struct memobj *memobj)
memobj_unlock(&obj->memobj); memobj_unlock(&obj->memobj);
if (free_obj) { if (free_obj) {
ihk_mc_spinlock_lock_noirq(&fileobj_list_lock); dkprintf("%s: release obj 0x%lx cref: %d, free_obj: 0x%lx, %s\n",
__FUNCTION__,
obj,
obj->cref,
free_obj,
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
mcs_rwlock_writer_lock_noirq(&fileobj_list_lock, &node);
/* zap page_list */ /* zap page_list */
for (;;) { for (;;) {
struct page *page; struct page *page;
void *page_va; void *page_va;
page = page_list_first(obj); page = fileobj_page_hash_first(obj);
if (!page) { if (!page) {
break; break;
} }
page_list_remove(obj, page); __fileobj_page_hash_remove(page);
page_va = phys_to_virt(page_to_phys(page)); page_va = phys_to_virt(page_to_phys(page));
if (ihk_atomic_read(&page->count) != 1) { if (ihk_atomic_read(&page->count) != 1) {
@ -295,7 +343,7 @@ static void fileobj_release(struct memobj *memobj)
#endif #endif
} }
obj_list_remove(free_obj); obj_list_remove(free_obj);
ihk_mc_spinlock_unlock_noirq(&fileobj_list_lock); mcs_rwlock_writer_unlock_noirq(&fileobj_list_lock, &node);
kfree(free_obj); kfree(free_obj);
} }
@ -341,22 +389,33 @@ static void fileobj_do_pageio(void *args0)
struct page *page; struct page *page;
ihk_mc_user_context_t ctx; ihk_mc_user_context_t ctx;
ssize_t ss; ssize_t ss;
struct mcs_rwlock_node mcs_node;
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
memobj_lock(&obj->memobj); mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
page = page_list_lookup(obj, off); &mcs_node);
page = __fileobj_page_hash_lookup(obj, hash, off);
if (!page) { if (!page) {
goto out; goto out;
} }
while (page->mode == PM_PAGEIO) { while (page->mode == PM_PAGEIO) {
memobj_unlock(&obj->memobj); mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
&mcs_node);
cpu_pause(); cpu_pause();
memobj_lock(&obj->memobj); mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
&mcs_node);
} }
if (page->mode == PM_WILL_PAGEIO) { if (page->mode == PM_WILL_PAGEIO) {
if (to_memobj(obj)->flags & MF_ZEROFILL) {
void *virt = phys_to_virt(page_to_phys(page));
memset(virt, 0, PAGE_SIZE);
}
else {
page->mode = PM_PAGEIO; page->mode = PM_PAGEIO;
memobj_unlock(&obj->memobj); mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
&mcs_node);
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_READ; ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_READ;
ihk_mc_syscall_arg1(&ctx) = obj->handle; ihk_mc_syscall_arg1(&ctx) = obj->handle;
@ -364,9 +423,12 @@ static void fileobj_do_pageio(void *args0)
ihk_mc_syscall_arg3(&ctx) = pgsize; ihk_mc_syscall_arg3(&ctx) = pgsize;
ihk_mc_syscall_arg4(&ctx) = page_to_phys(page); ihk_mc_syscall_arg4(&ctx) = page_to_phys(page);
dkprintf("%s: __NR_mmap for handle 0x%lx\n",
__FUNCTION__, obj->handle);
ss = syscall_generic_forwarding(__NR_mmap, &ctx); ss = syscall_generic_forwarding(__NR_mmap, &ctx);
memobj_lock(&obj->memobj); mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
&mcs_node);
if (page->mode != PM_PAGEIO) { if (page->mode != PM_PAGEIO) {
kprintf("fileobj_do_pageio(%p,%lx,%lx):" kprintf("fileobj_do_pageio(%p,%lx,%lx):"
"invalid mode %x\n", "invalid mode %x\n",
@ -387,37 +449,41 @@ static void fileobj_do_pageio(void *args0)
page->mode = PM_PAGEIO_ERROR; page->mode = PM_PAGEIO_ERROR;
goto out; goto out;
} }
}
page->mode = PM_DONE_PAGEIO; page->mode = PM_DONE_PAGEIO;
} }
out: out:
memobj_unlock(&obj->memobj); mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
&mcs_node);
fileobj_release(&obj->memobj); /* got fileobj_get_page() */ fileobj_release(&obj->memobj); /* got fileobj_get_page() */
kfree(args0); kfree(args0);
dkprintf("fileobj_do_pageio(%p,%lx,%lx):\n", obj, off, pgsize); dkprintf("fileobj_do_pageio(%p,%lx,%lx):\n", obj, off, pgsize);
return; return;
} }
static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *pflag) static int fileobj_get_page(struct memobj *memobj, off_t off,
int p2align, uintptr_t *physp, unsigned long *pflag)
{ {
struct thread *proc = cpu_local_var(current); struct thread *proc = cpu_local_var(current);
struct fileobj *obj = to_fileobj(memobj); struct fileobj *obj = to_fileobj(memobj);
int error; int error = -1;
void *virt = NULL; void *virt = NULL;
int npages; int npages;
uintptr_t phys = -1; uintptr_t phys = -1;
struct page *page; struct page *page;
struct pageio_args *args = NULL; struct pageio_args *args = NULL;
struct mcs_rwlock_node mcs_node;
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
dkprintf("fileobj_get_page(%p,%lx,%x,%p)\n", obj, off, p2align, physp); dkprintf("fileobj_get_page(%p,%lx,%x,%p)\n", obj, off, p2align, physp);
memobj_lock(&obj->memobj);
if (p2align != PAGE_P2ALIGN) { if (p2align != PAGE_P2ALIGN) {
error = -ENOMEM; return -ENOMEM;
goto out;
} }
page = page_list_lookup(obj, off); mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
&mcs_node);
page = __fileobj_page_hash_lookup(obj, hash, off);
if (!page || (page->mode == PM_WILL_PAGEIO) if (!page || (page->mode == PM_WILL_PAGEIO)
|| (page->mode == PM_PAGEIO)) { || (page->mode == PM_PAGEIO)) {
args = kmalloc(sizeof(*args), IHK_MC_AP_NOWAIT); args = kmalloc(sizeof(*args), IHK_MC_AP_NOWAIT);
@ -445,13 +511,15 @@ static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintp
if (page->mode != PM_NONE) { if (page->mode != PM_NONE) {
panic("fileobj_get_page:invalid new page"); panic("fileobj_get_page:invalid new page");
} }
page->mode = PM_WILL_PAGEIO;
page->offset = off; page->offset = off;
ihk_atomic_set(&page->count, 1); ihk_atomic_set(&page->count, 1);
page_list_insert(obj, page); __fileobj_page_hash_insert(obj, page, hash);
page->mode = PM_WILL_PAGEIO;
} }
memobj_lock(&obj->memobj);
++obj->cref; /* for fileobj_do_pageio() */ ++obj->cref; /* for fileobj_do_pageio() */
memobj_unlock(&obj->memobj);
args->fileobj = obj; args->fileobj = obj;
args->objoff = off; args->objoff = off;
@ -483,7 +551,8 @@ static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintp
*physp = page_to_phys(page); *physp = page_to_phys(page);
virt = NULL; virt = NULL;
out: out:
memobj_unlock(&obj->memobj); mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
&mcs_node);
if (virt) { if (virt) {
ihk_mc_free_pages(virt, npages); ihk_mc_free_pages(virt, npages);
} }
@ -495,78 +564,6 @@ out:
return error; return error;
} }
static uintptr_t fileobj_copy_page(
struct memobj *memobj, uintptr_t orgpa, int p2align)
{
struct page *orgpage = phys_to_page(orgpa);
size_t pgsize = PAGE_SIZE << p2align;
int npages = 1 << p2align;
void *newkva = NULL;
uintptr_t newpa = -1;
void *orgkva;
int count;
dkprintf("fileobj_copy_page(%p,%lx,%d)\n", memobj, orgpa, p2align);
if (p2align != PAGE_P2ALIGN) {
panic("p2align");
}
memobj_lock(memobj);
for (;;) {
if (!orgpage || orgpage->mode != PM_MAPPED) {
kprintf("fileobj_copy_page(%p,%lx,%d):"
"invalid cow page. %x\n",
memobj, orgpa, p2align, orgpage ? orgpage->mode : 0);
panic("fileobj_copy_page:invalid cow page");
}
count = ihk_atomic_read(&orgpage->count);
if (count == 2) { // XXX: private only
list_del(&orgpage->list);
ihk_atomic_dec(&orgpage->count);
orgpage->mode = PM_NONE;
newpa = orgpa;
break;
}
if (count <= 0) {
kprintf("fileobj_copy_page(%p,%lx,%d):"
"orgpage count corrupted. %x\n",
memobj, orgpa, p2align, count);
panic("fileobj_copy_page:orgpage count corrupted");
}
if (newkva) {
orgkva = phys_to_virt(orgpa);
memcpy(newkva, orgkva, pgsize);
ihk_atomic_dec(&orgpage->count);
newpa = virt_to_phys(newkva);
if (phys_to_page(newpa)) {
page_map(phys_to_page(newpa));
}
newkva = NULL; /* avoid ihk_mc_free_pages() */
break;
}
memobj_unlock(memobj);
newkva = ihk_mc_alloc_aligned_pages(npages, p2align,
IHK_MC_AP_NOWAIT);
if (!newkva) {
kprintf("fileobj_copy_page(%p,%lx,%d):"
"alloc page failed\n",
memobj, orgpa, p2align);
goto out;
}
memobj_lock(memobj);
}
memobj_unlock(memobj);
out:
if (newkva) {
ihk_mc_free_pages(newkva, npages);
}
dkprintf("fileobj_copy_page(%p,%lx,%d): %lx\n",
memobj, orgpa, p2align, newpa);
return newpa;
}
static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys, static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
size_t pgsize) size_t pgsize)
{ {
@ -575,6 +572,10 @@ static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
ihk_mc_user_context_t ctx; ihk_mc_user_context_t ctx;
ssize_t ss; ssize_t ss;
if (to_memobj(obj)->flags & MF_ZEROFILL) {
return 0;
}
page = phys_to_page(phys); page = phys_to_page(phys);
if (!page) { if (!page) {
kprintf("%s: warning: tried to flush non-existing page for phys addr: 0x%lx\n", kprintf("%s: warning: tried to flush non-existing page for phys addr: 0x%lx\n",
@ -603,63 +604,48 @@ static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
static int fileobj_invalidate_page(struct memobj *memobj, uintptr_t phys, static int fileobj_invalidate_page(struct memobj *memobj, uintptr_t phys,
size_t pgsize) size_t pgsize)
{ {
struct fileobj *obj = to_fileobj(memobj);
int error;
struct page *page;
dkprintf("fileobj_invalidate_page(%p,%#lx,%#lx)\n", dkprintf("fileobj_invalidate_page(%p,%#lx,%#lx)\n",
memobj, phys, pgsize); memobj, phys, pgsize);
if (!(page = phys_to_page(phys)) /* TODO: keep track of reverse mappings so that invalidation
|| !(page = page_list_lookup(obj, page->offset))) { * can be performed */
error = 0; kprintf("%s: WARNING: file mapping invalidation not supported\n",
goto out; __FUNCTION__);
return 0;
} }
if (ihk_atomic_read(&page->count) == 1) { static int fileobj_lookup_page(struct memobj *memobj, off_t off,
if (page_unmap(page)) { int p2align, uintptr_t *physp, unsigned long *pflag)
ihk_mc_free_pages(phys_to_virt(phys),
pgsize/PAGE_SIZE);
}
}
error = 0;
out:
dkprintf("fileobj_invalidate_page(%p,%#lx,%#lx):%d\n",
memobj, phys, pgsize, error);
return error;
}
static int fileobj_lookup_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *pflag)
{ {
struct fileobj *obj = to_fileobj(memobj); struct fileobj *obj = to_fileobj(memobj);
int error; int error = -1;
uintptr_t phys = -1;
struct page *page; struct page *page;
struct mcs_rwlock_node mcs_node;
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p)\n", obj, off, p2align, physp); dkprintf("fileobj_lookup_page(%p,%lx,%x,%p)\n", obj, off, p2align, physp);
memobj_lock(&obj->memobj);
if (p2align != PAGE_P2ALIGN) { if (p2align != PAGE_P2ALIGN) {
error = -ENOMEM; return -ENOMEM;
goto out;
} }
page = page_list_lookup(obj, off); mcs_rwlock_reader_lock_noirq(&obj->page_hash_locks[hash],
&mcs_node);
page = __fileobj_page_hash_lookup(obj, hash, off);
if (!page) { if (!page) {
error = -ENOENT;
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): page not found. %d\n", obj, off, p2align, physp, error);
goto out; goto out;
} }
phys = page_to_phys(page);
*physp = page_to_phys(page);
error = 0; error = 0;
if (physp) {
*physp = phys;
}
out: out:
memobj_unlock(&obj->memobj); mcs_rwlock_reader_unlock_noirq(&obj->page_hash_locks[hash],
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): %d %lx\n", &mcs_node);
obj, off, p2align, physp, error, phys);
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): %d \n",
obj, off, p2align, physp, error);
return error; return error;
} }

View File

@ -393,7 +393,9 @@ static int process_msg_prepare_process(unsigned long rphys)
memcpy_long(pn, p, sizeof(struct program_load_desc) memcpy_long(pn, p, sizeof(struct program_load_desc)
+ sizeof(struct program_image_section) * n); + sizeof(struct program_image_section) * n);
if((thread = create_thread(p->entry)) == NULL){ if ((thread = create_thread(p->entry,
(unsigned long *)&p->cpu_set,
sizeof(p->cpu_set))) == NULL) {
kfree(pn); kfree(pn);
ihk_mc_unmap_virtual(p, npages, 1); ihk_mc_unmap_virtual(p, npages, 1);
ihk_mc_unmap_memory(NULL, phys, sz); ihk_mc_unmap_memory(NULL, phys, sz);
@ -579,14 +581,16 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
break; break;
case SCD_MSG_SCHEDULE_PROCESS: case SCD_MSG_SCHEDULE_PROCESS:
cpuid = obtain_clone_cpuid(); thread = (struct thread *)packet->arg;
cpuid = obtain_clone_cpuid(&thread->cpu_set);
if (cpuid == -1) { if (cpuid == -1) {
kprintf("No CPU available\n"); kprintf("No CPU available\n");
ret = -1; ret = -1;
break; break;
} }
dkprintf("SCD_MSG_SCHEDULE_PROCESS: %lx\n", packet->arg); dkprintf("SCD_MSG_SCHEDULE_PROCESS: %lx\n", packet->arg);
thread = (struct thread *)packet->arg;
proc = thread->proc; proc = thread->proc;
thread->tid = proc->pid; thread->tid = proc->pid;
proc->status = PS_RUNNING; proc->status = PS_RUNNING;
@ -595,7 +599,6 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
chain_process(proc); chain_process(proc);
runq_add_thread(thread, cpuid); runq_add_thread(thread, cpuid);
//cpu_local_var(next) = (struct thread *)packet->arg;
ret = 0; ret = 0;
break; break;
@ -683,7 +686,7 @@ void init_host_syscall_channel(void)
param.port = 501; param.port = 501;
param.pkt_size = sizeof(struct ikc_scd_packet); param.pkt_size = sizeof(struct ikc_scd_packet);
param.queue_size = PAGE_SIZE; param.queue_size = PAGE_SIZE * 4;
param.magic = 0x1129; param.magic = 0x1129;
param.handler = syscall_packet_handler; param.handler = syscall_packet_handler;
@ -710,7 +713,7 @@ void init_host_syscall_channel2(void)
param.port = 502; param.port = 502;
param.pkt_size = sizeof(struct ikc_scd_packet); param.pkt_size = sizeof(struct ikc_scd_packet);
param.queue_size = PAGE_SIZE; param.queue_size = PAGE_SIZE * 4;
param.magic = 0x1329; param.magic = 0x1329;
param.handler = syscall_packet_handler; param.handler = syscall_packet_handler;

View File

@ -16,7 +16,7 @@
extern void arch_init(void); extern void arch_init(void);
extern void kmsg_init(int); extern void kmsg_init(int);
extern void mem_init(void); extern void mem_init(void);
extern void ikc_master_init(void); extern void ihk_ikc_master_init(void);
extern void ap_init(void); extern void ap_init(void);
extern void arch_ready(void); extern void arch_ready(void);
extern void mc_ikc_test_init(void); extern void mc_ikc_test_init(void);
@ -32,4 +32,6 @@ extern void cpu_sysfs_setup(void);
extern char *find_command_line(char *name); extern char *find_command_line(char *name);
extern int num_processors;
#endif #endif

View File

@ -32,12 +32,19 @@ enum {
MF_HAS_PAGER = 0x0001, MF_HAS_PAGER = 0x0001,
MF_SHMDT_OK = 0x0002, MF_SHMDT_OK = 0x0002,
MF_IS_REMOVABLE = 0x0004, MF_IS_REMOVABLE = 0x0004,
MF_PREFETCH = 0x0008,
MF_ZEROFILL = 0x0010,
MF_END
}; };
#define MEMOBJ_READY 0
#define MEMOBJ_TO_BE_PREFETCHED 1
struct memobj { struct memobj {
struct memobj_ops *ops; struct memobj_ops *ops;
uint32_t flags; uint32_t flags;
int8_t padding[4]; uint32_t status;
size_t size;
ihk_spinlock_t lock; ihk_spinlock_t lock;
}; };

View File

@ -30,7 +30,8 @@ enum pager_op {
struct pager_create_result { struct pager_create_result {
uintptr_t handle; uintptr_t handle;
int maxprot; int maxprot;
int8_t padding[4]; uint32_t flags;
size_t size;
}; };
/* /*

View File

@ -166,7 +166,7 @@
#define NOPHYS ((uintptr_t)-1) #define NOPHYS ((uintptr_t)-1)
#define PROCESS_NUMA_MASK_BITS 64 #define PROCESS_NUMA_MASK_BITS 256
/* /*
* Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
@ -232,6 +232,8 @@ enum mpol_rebind_step {
#include <waitq.h> #include <waitq.h>
#include <futex.h> #include <futex.h>
//#define TRACK_SYSCALLS
struct resource_set; struct resource_set;
struct process_hash; struct process_hash;
struct thread_hash; struct thread_hash;
@ -405,7 +407,7 @@ struct mckfd {
#define SFD_NONBLOCK 04000 #define SFD_NONBLOCK 04000
struct sig_common { struct sig_common {
ihk_spinlock_t lock; mcs_rwlock_lock_t lock;
ihk_atomic_t use; ihk_atomic_t use;
struct k_sigaction action[_NSIG]; struct k_sigaction action[_NSIG];
struct list_head sigpending; struct list_head sigpending;
@ -466,7 +468,7 @@ struct process {
// V +---- | // V +---- |
// PS_STOPPED -----+ // PS_STOPPED -----+
// (PS_TRACED) // (PS_TRACED)
int exit_status; int exit_status; // only for zombie
/* Store exit_status for a group of threads when stopped by SIGSTOP. /* Store exit_status for a group of threads when stopped by SIGSTOP.
exit_status can't be used because values of exit_status of threads exit_status can't be used because values of exit_status of threads
@ -578,6 +580,7 @@ struct thread {
// PS_TRACED // PS_TRACED
// PS_INTERRPUTIBLE // PS_INTERRPUTIBLE
// PS_UNINTERRUPTIBLE // PS_UNINTERRUPTIBLE
int exit_status;
// process vm // process vm
struct process_vm *vm; struct process_vm *vm;
@ -608,12 +611,20 @@ struct thread {
fp_regs_struct *fp_regs; fp_regs_struct *fp_regs;
int in_syscall_offload; int in_syscall_offload;
#ifdef TRACK_SYSCALLS
int socc_enabled;
uint64_t *syscall_times;
uint32_t *syscall_cnts;
uint64_t *offload_times;
uint32_t *offload_cnts;
#endif // TRACK_SYSCALLS
// signal // signal
struct sig_common *sigcommon; struct sig_common *sigcommon;
sigset_t sigmask; sigset_t sigmask;
stack_t sigstack; stack_t sigstack;
struct list_head sigpending; struct list_head sigpending;
ihk_spinlock_t sigpendinglock; mcs_rwlock_lock_t sigpendinglock;
volatile int sigevent; volatile int sigevent;
// gpio // gpio
@ -689,7 +700,8 @@ static inline int has_cap_sys_admin(struct thread *th)
void hold_address_space(struct address_space *); void hold_address_space(struct address_space *);
void release_address_space(struct address_space *); void release_address_space(struct address_space *);
struct thread *create_thread(unsigned long user_pc); struct thread *create_thread(unsigned long user_pc,
unsigned long *__cpu_set, size_t cpu_set_size);
struct thread *clone_thread(struct thread *org, unsigned long pc, struct thread *clone_thread(struct thread *org, unsigned long pc,
unsigned long sp, int clone_flags); unsigned long sp, int clone_flags);
void destroy_thread(struct thread *thread); void destroy_thread(struct thread *thread);

View File

@ -149,6 +149,10 @@ struct program_image_section {
#define MCK_RLIMIT_SIGPENDING 14 #define MCK_RLIMIT_SIGPENDING 14
#define MCK_RLIMIT_STACK 15 #define MCK_RLIMIT_STACK 15
#define PLD_CPU_SET_MAX_CPUS 1024
typedef unsigned long __cpu_set_unit;
#define PLD_CPU_SET_SIZE (PLD_CPU_SET_MAX_CPUS / (8 * sizeof(__cpu_set_unit)))
struct program_load_desc { struct program_load_desc {
int num_sections; int num_sections;
int status; int status;
@ -178,6 +182,7 @@ struct program_load_desc {
struct rlimit rlimit[MCK_RLIM_MAX]; struct rlimit rlimit[MCK_RLIM_MAX];
unsigned long interp_align; unsigned long interp_align;
char shell_path[SHELL_PATH_MAX_LEN]; char shell_path[SHELL_PATH_MAX_LEN];
__cpu_set_unit cpu_set[PLD_CPU_SET_SIZE];
struct program_image_section sections[0]; struct program_image_section sections[0];
}; };
@ -387,6 +392,7 @@ extern struct tod_data_s tod_data; /* residing in arch-dependent file */
void reset_cputime(); void reset_cputime();
void set_cputime(int mode); void set_cputime(int mode);
int do_munmap(void *addr, size_t len);
intptr_t do_mmap(intptr_t addr0, size_t len0, int prot, int flags, int fd, intptr_t do_mmap(intptr_t addr0, size_t len0, int prot, int flags, int fd,
off_t off0); off_t off0);
void clear_host_pte(uintptr_t addr, size_t len); void clear_host_pte(uintptr_t addr, size_t len);

21
kernel/include/xpmem.h Normal file
View File

@ -0,0 +1,21 @@
/**
* \file xpmem.h
* License details are found in the file LICENSE.
* \brief
* Structures and functions of xpmem
*/
/*
* HISTORY
*/
#ifndef _XPMEM_H
#define _XPMEM_H
#include <ihk/context.h>
#define XPMEM_DEV_PATH "/dev/xpmem"
extern int xpmem_open(ihk_mc_user_context_t *ctx);
#endif /* _XPMEM_H */

View File

@ -0,0 +1,388 @@
/**
* \file xpmem_private.h
* License details are found in the file LICENSE.
* \brief
* Private Cross Partition Memory (XPMEM) structures and macros.
*/
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
* Copyright 2009, 2010, 2014 Cray Inc. All Rights Reserved
* Copyright (c) 2014-2016 Los Alamos National Security, LCC. All rights
* reserved.
*/
/*
* HISTORY
*/
#ifndef _XPMEM_PRIVATE_H
#define _XPMEM_PRIVATE_H
#include <mc_xpmem.h>
#include <xpmem.h>
#define XPMEM_CURRENT_VERSION 0x00026003
//#define DEBUG_PRINT_XPMEM
#ifdef DEBUG_PRINT_XPMEM
#define dkprintf(...) kprintf(__VA_ARGS__)
#define ekprintf(...) kprintf(__VA_ARGS__)
#define XPMEM_DEBUG(format, a...) kprintf("[%d] %s: "format"\n", cpu_local_var(current)->proc->rgid, __func__, ##a)
#else
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
#define ekprintf(...) kprintf(__VA_ARGS__)
#define XPMEM_DEBUG(format, a...) do { if (0) kprintf("\n"); } while (0)
#endif
//#define USE_DBUG_ON
#ifdef USE_DBUG_ON
#define DBUG_ON(condition) do { if (condition) kprintf("[%d] BUG: func=%s\n", cpu_local_var(current)->proc->rgid, __func__); } while (0)
#else
#define DBUG_ON(condition)
#endif
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
#define min(x, y) ({ \
__typeof__(x) _min1 = (x); \
__typeof__(y) _min2 = (y); \
(void) (&_min1 == &_min2); \
_min1 < _min2 ? _min1 : _min2;})
#define max(x, y) ({ \
__typeof__(x) _max1 = (x); \
__typeof__(y) _max2 = (y); \
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2;})
#define MAX_ERRNO 4095
#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
static inline void * ERR_PTR(long error)
{
return (void *)error;
}
static inline long PTR_ERR(const void *ptr)
{
return (long)ptr;
}
static inline long IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
static inline long IS_ERR_OR_NULL(const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}
/*
* Both the xpmem_segid_t and xpmem_apid_t are of type __s64 and designed
* to be opaque to the user. Both consist of the same underlying fields.
*
* The 'uniq' field is designed to give each segid or apid a unique value.
* Each type is only unique with respect to itself.
*
* An ID is never less than or equal to zero.
*/
struct xpmem_id {
pid_t tgid; /* thread group that owns ID */
unsigned int uniq; /* this value makes the ID unique */
};
typedef union {
struct xpmem_id xpmem_id;
xpmem_segid_t segid;
xpmem_apid_t apid;
} xpmem_id_t;
/* Shift INT_MAX by one so we can tell when we overflow. */
#define XPMEM_MAX_UNIQ_ID (INT_MAX >> 1)
static inline pid_t xpmem_segid_to_tgid(xpmem_segid_t segid)
{
DBUG_ON(segid <= 0);
return ((xpmem_id_t *)&segid)->xpmem_id.tgid;
}
static inline pid_t xpmem_apid_to_tgid(xpmem_apid_t apid)
{
DBUG_ON(apid <= 0);
return ((xpmem_id_t *)&apid)->xpmem_id.tgid;
}
/*
* Hash Tables
*
* XPMEM utilizes hash tables to enable faster lookups of list entries.
* These hash tables are implemented as arrays. A simple modulus of the hash
* key yields the appropriate array index. A hash table's array element (i.e.,
* hash table bucket) consists of a hash list and the lock that protects it.
*
* XPMEM has the following two hash tables:
*
* table bucket key
* part->tg_hashtable list of struct xpmem_thread_group tgid
* tg->ap_hashtable list of struct xpmem_access_permit apid.uniq
*/
struct xpmem_hashlist {
mcs_rwlock_lock_t lock; /* lock for hash list */
struct list_head list; /* hash list */
};
#define XPMEM_TG_HASHTABLE_SIZE 8
#define XPMEM_AP_HASHTABLE_SIZE 8
static inline int xpmem_tg_hashtable_index(pid_t tgid)
{
int index;
index = (unsigned int)tgid % XPMEM_TG_HASHTABLE_SIZE;
XPMEM_DEBUG("return: tgid=%lu, index=%d", tgid, index);
return index;
}
static inline int xpmem_ap_hashtable_index(xpmem_apid_t apid)
{
int index;
DBUG_ON(apid <= 0);
index = ((xpmem_id_t *)&apid)->xpmem_id.uniq % XPMEM_AP_HASHTABLE_SIZE;
XPMEM_DEBUG("return: apid=%lu, index=%d", apid, index);
return index;
}
/*
* general internal driver structures
*/
struct xpmem_thread_group {
ihk_spinlock_t lock; /* tg lock */
pid_t tgid; /* tg's tgid */
uid_t uid; /* tg's uid */
gid_t gid; /* tg's gid */
volatile int flags; /* tg attributes and state */
ihk_atomic_t uniq_segid;
ihk_atomic_t uniq_apid;
mcs_rwlock_lock_t seg_list_lock;
struct list_head seg_list; /* tg's list of segs */
ihk_atomic_t refcnt; /* references to tg */
ihk_atomic_t n_pinned; /* #of pages pinned by this tg */
struct list_head tg_hashlist; /* tg hash list */
struct thread *group_leader; /* thread group leader */
struct process_vm *vm; /* tg's mm */
ihk_atomic_t n_recall_PFNs; /* #of recall of PFNs in progress */
struct xpmem_hashlist ap_hashtable[]; /* locks + ap hash lists */
};
struct xpmem_segment {
ihk_spinlock_t lock; /* seg lock */
mcs_rwlock_lock_t seg_lock; /* seg sema */
xpmem_segid_t segid; /* unique segid */
unsigned long vaddr; /* starting address */
size_t size; /* size of seg */
int permit_type; /* permission scheme */
void *permit_value; /* permission data */
volatile int flags; /* seg attributes and state */
ihk_atomic_t refcnt; /* references to seg */
struct xpmem_thread_group *tg; /* creator tg */
struct list_head ap_list; /* local access permits of seg */
struct list_head seg_list; /* tg's list of segs */
};
struct xpmem_access_permit {
ihk_spinlock_t lock; /* access permit lock */
xpmem_apid_t apid; /* unique apid */
int mode; /* read/write mode */
volatile int flags; /* access permit attributes and state */
ihk_atomic_t refcnt; /* references to access permit */
struct xpmem_segment *seg; /* seg permitted to be accessed */
struct xpmem_thread_group *tg; /* access permit's tg */
struct list_head att_list; /* atts of this access permit's seg */
struct list_head ap_list; /* access permits linked to seg */
struct list_head ap_hashlist; /* access permit hash list */
};
struct xpmem_attachment {
mcs_rwlock_lock_t at_lock; /* att lock for serialization */
struct mcs_rwlock_node_irqsave at_irqsave; /* att lock for serialization */
unsigned long vaddr; /* starting address of seg attached */
unsigned long at_vaddr; /* address where seg is attached */
size_t at_size; /* size of seg attachment */
struct vm_range *at_vma; /* vma where seg is attachment */
volatile int flags; /* att attributes and state */
ihk_atomic_t refcnt; /* references to att */
struct xpmem_access_permit *ap; /* associated access permit */
struct list_head att_list; /* atts linked to access permit */
struct process_vm *vm; /* mm struct attached to */
mcs_rwlock_lock_t invalidate_lock; /* to serialize page table invalidates */
};
struct xpmem_partition {
ihk_atomic_t n_opened; /* # of /dev/xpmem opened */
struct xpmem_hashlist tg_hashtable[]; /* locks + tg hash lists */
};
#define XPMEM_FLAG_DESTROYING 0x00040 /* being destroyed */
#define XPMEM_FLAG_DESTROYED 0x00080 /* 'being destroyed' finished */
#define XPMEM_FLAG_VALIDPTEs 0x00200 /* valid PTEs exist */
struct xpmem_perm {
uid_t uid;
gid_t gid;
unsigned long mode;
};
#define XPMEM_PERM_IRUSR 00400
#define XPMEM_PERM_IWUSR 00200
static int xpmem_ioctl(struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
static int xpmem_close( struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
static int xpmem_init(void);
static void xpmem_exit(void);
static int __xpmem_open(void);
static void xpmem_destroy_tg(struct xpmem_thread_group *);
static int xpmem_make(unsigned long, size_t, int, void *, xpmem_segid_t *);
static xpmem_segid_t xpmem_make_segid(struct xpmem_thread_group *);
static int xpmem_remove(xpmem_segid_t);
static void xpmem_remove_seg(struct xpmem_thread_group *,
struct xpmem_segment *);
static void xpmem_clear_PTEs(struct xpmem_segment *);
extern struct xpmem_partition *xpmem_my_part;
static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal(
pid_t, int, int);
static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid(
pid_t tgid,
int return_destroying)
{
struct xpmem_thread_group *tg;
int index;
struct mcs_rwlock_node_irqsave lock;
XPMEM_DEBUG("call: tgid=%d, return_destroying=%d",
tgid, return_destroying);
index = xpmem_tg_hashtable_index(tgid);
mcs_rwlock_reader_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid, index,
return_destroying);
mcs_rwlock_reader_unlock(&xpmem_my_part->tg_hashtable[index].lock,
&lock);
XPMEM_DEBUG("return: tg=0x%p", tg);
return tg;
}
static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid_nolock(
pid_t tgid,
int return_destroying)
{
struct xpmem_thread_group *tg;
XPMEM_DEBUG("call: tgid=%d, return_destroying=%d",
tgid, return_destroying);
tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid,
xpmem_tg_hashtable_index(tgid), return_destroying);
XPMEM_DEBUG("return: tg=0x%p", tg);
return tg;
}
#define xpmem_tg_ref_by_tgid(t) __xpmem_tg_ref_by_tgid(t, 0)
#define xpmem_tg_ref_by_tgid_all(t) __xpmem_tg_ref_by_tgid(t, 1)
#define xpmem_tg_ref_by_tgid_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 0)
#define xpmem_tg_ref_by_tgid_all_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 1)
static struct xpmem_thread_group * xpmem_tg_ref_by_segid(xpmem_segid_t);
static void xpmem_tg_deref(struct xpmem_thread_group *);
static struct xpmem_segment *xpmem_seg_ref_by_segid(struct xpmem_thread_group *,
xpmem_segid_t);
static void xpmem_seg_deref(struct xpmem_segment *);
/*
* Inlines that mark an internal driver structure as being destroyable or not.
* The idea is to set the refcnt to 1 at structure creation time and then
* drop that reference at the time the structure is to be destroyed.
*/
static inline void xpmem_tg_not_destroyable(
struct xpmem_thread_group *tg)
{
ihk_atomic_set(&tg->refcnt, 1);
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
}
static inline void xpmem_tg_destroyable(
struct xpmem_thread_group *tg)
{
XPMEM_DEBUG("call: ");
xpmem_tg_deref(tg);
XPMEM_DEBUG("return: ");
}
static inline void xpmem_seg_not_destroyable(
struct xpmem_segment *seg)
{
ihk_atomic_set(&seg->refcnt, 1);
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
}
static inline void xpmem_seg_destroyable(
struct xpmem_segment *seg)
{
XPMEM_DEBUG("call: ");
xpmem_seg_deref(seg);
XPMEM_DEBUG("return: ");
}
/*
* Inlines that increment the refcnt for the specified structure.
*/
static inline void xpmem_tg_ref(
struct xpmem_thread_group *tg)
{
DBUG_ON(ihk_atomic_read(&tg->refcnt) <= 0);
ihk_atomic_inc(&tg->refcnt);
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
}
static inline void xpmem_seg_ref(
struct xpmem_segment *seg)
{
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
ihk_atomic_inc(&seg->refcnt);
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
}
#endif /* _XPMEM_PRIVATE_H */

View File

@ -108,11 +108,11 @@ static void dma_test(void)
} }
#endif #endif
extern char *ihk_mc_get_kernel_args(void); extern char *ihk_get_kargs(void);
char *find_command_line(char *name) char *find_command_line(char *name)
{ {
char *cmdline = ihk_mc_get_kernel_args(); char *cmdline = ihk_get_kargs();
if (!cmdline) { if (!cmdline) {
return NULL; return NULL;
@ -122,7 +122,7 @@ char *find_command_line(char *name)
static void parse_kargs(void) static void parse_kargs(void)
{ {
kprintf("KCommand Line: %s\n", ihk_mc_get_kernel_args()); kprintf("KCommand Line: %s\n", ihk_get_kargs());
if (1) { if (1) {
char *key = "osnum="; char *key = "osnum=";
@ -254,7 +254,7 @@ static void rest_init(void)
time_init(); time_init();
kmalloc_init(); kmalloc_init();
ikc_master_init(); ihk_ikc_master_init();
proc_init(); proc_init();
@ -373,6 +373,7 @@ int main(void)
kputs("IHK/McKernel started.\n"); kputs("IHK/McKernel started.\n");
ihk_set_kmsg(virt_to_phys(&kmsg_buf), IHK_KMSG_SIZE);
arch_init(); arch_init();
/* /*

View File

@ -494,18 +494,96 @@ static void reserve_pages(struct ihk_page_allocator_desc *pa_allocator,
ihk_pagealloc_reserve(pa_allocator, start, end); ihk_pagealloc_reserve(pa_allocator, start, end);
} }
extern int cpu_local_var_initialized;
static void *allocate_aligned_pages(int npages, int p2align, static void *allocate_aligned_pages(int npages, int p2align,
enum ihk_mc_ap_flag flag) enum ihk_mc_ap_flag flag)
{ {
unsigned long pa; unsigned long pa = 0;
int i; int i, node;
/* TODO: match NUMA id and distance matrix with allocating core */
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
struct ihk_page_allocator_desc *pa_allocator; struct ihk_page_allocator_desc *pa_allocator;
/* Not yet initialized or idle process */
if (!cpu_local_var_initialized ||
!cpu_local_var(current) ||
!cpu_local_var(current)->vm)
goto distance_based;
/* User requested policy? */
switch (cpu_local_var(current)->vm->numa_mem_policy) {
case MPOL_BIND:
case MPOL_PREFERRED:
for_each_set_bit(node,
cpu_local_var(current)->proc->vm->numa_mask,
ihk_mc_get_nr_numa_nodes()) {
list_for_each_entry(pa_allocator, list_for_each_entry(pa_allocator,
&memory_nodes[(ihk_mc_get_numa_id() + i) % &memory_nodes[node].allocators, list) {
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
if (pa) {
dkprintf("%s: policy: CPU @ node %d allocated "
"%d pages from node %d\n",
__FUNCTION__,
ihk_mc_get_numa_id(),
npages, node);
break;
}
}
if (pa) break;
}
break;
case MPOL_INTERLEAVE:
/* TODO: */
break;
default:
break;
}
if (pa)
return phys_to_virt(pa);
distance_based:
node = ihk_mc_get_numa_id();
/* Look at nodes in the order of distance */
if (!memory_nodes[node].nodes_by_distance)
goto order_based;
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
list_for_each_entry(pa_allocator,
&memory_nodes[memory_nodes[node].
nodes_by_distance[i].id].allocators, list) {
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
if (pa) {
dkprintf("%s: distance: CPU @ node %d allocated "
"%d pages from node %d\n",
__FUNCTION__,
ihk_mc_get_numa_id(),
npages,
memory_nodes[node].nodes_by_distance[i].id);
break;
}
}
if (pa) break;
}
if (pa)
return phys_to_virt(pa);
order_based:
node = ihk_mc_get_numa_id();
/* Fall back to regular order */
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
list_for_each_entry(pa_allocator,
&memory_nodes[(node + i) %
ihk_mc_get_nr_numa_nodes()].allocators, list) { ihk_mc_get_nr_numa_nodes()].allocators, list) {
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align); pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
@ -754,6 +832,8 @@ void remote_flush_tlb_cpumask(struct process_vm *vm,
flush_tlb(); flush_tlb();
} }
/* Flush on this core */
flush_tlb_single(addr & PAGE_MASK);
/* Wait for all cores */ /* Wait for all cores */
while (ihk_atomic_read(&flush_entry->pending) != 0) { while (ihk_atomic_read(&flush_entry->pending) != 0) {
cpu_pause(); cpu_pause();
@ -804,8 +884,8 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
int error; int error;
set_cputime(interrupt_from_user(regs)? 1: 2); set_cputime(interrupt_from_user(regs)? 1: 2);
dkprintf("[%d]page_fault_handler(%p,%lx,%p)\n", dkprintf("%s: addr: %p, reason: %lx, regs: %p\n",
ihk_mc_get_processor_id(), fault_addr, reason, regs); __FUNCTION__, fault_addr, reason, regs);
preempt_disable(); preempt_disable();
@ -860,9 +940,8 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
error = 0; error = 0;
preempt_enable(); preempt_enable();
out: out:
dkprintf("[%d]page_fault_handler(%p,%lx,%p): (%d)\n", dkprintf("%s: addr: %p, reason: %lx, regs: %p -> error: %d\n",
ihk_mc_get_processor_id(), fault_addr, reason, __FUNCTION__, fault_addr, reason, regs, error);
regs, error);
check_need_resched(); check_need_resched();
set_cputime(0); set_cputime(0);
return; return;
@ -930,6 +1009,7 @@ static void numa_init(void)
memory_nodes[i].linux_numa_id = linux_numa_id; memory_nodes[i].linux_numa_id = linux_numa_id;
memory_nodes[i].type = type; memory_nodes[i].type = type;
INIT_LIST_HEAD(&memory_nodes[i].allocators); INIT_LIST_HEAD(&memory_nodes[i].allocators);
memory_nodes[i].nodes_by_distance = 0;
kprintf("NUMA: %d, Linux NUMA: %d, type: %d\n", kprintf("NUMA: %d, Linux NUMA: %d, type: %d\n",
i, linux_numa_id, type); i, linux_numa_id, type);
@ -953,6 +1033,72 @@ static void numa_init(void)
} }
} }
static void numa_distances_init()
{
int i, j, swapped;
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
/* TODO: allocate on target node */
memory_nodes[i].nodes_by_distance =
ihk_mc_alloc_pages((sizeof(struct node_distance) *
ihk_mc_get_nr_numa_nodes() + PAGE_SIZE - 1)
>> PAGE_SHIFT, IHK_MC_AP_NOWAIT);
if (!memory_nodes[i].nodes_by_distance) {
kprintf("%s: error: allocating nodes_by_distance\n",
__FUNCTION__);
continue;
}
for (j = 0; j < ihk_mc_get_nr_numa_nodes(); ++j) {
memory_nodes[i].nodes_by_distance[j].id = j;
memory_nodes[i].nodes_by_distance[j].distance =
ihk_mc_get_numa_distance(i, j);
}
/* Sort by distance and node ID */
swapped = 1;
while (swapped) {
swapped = 0;
for (j = 1; j < ihk_mc_get_nr_numa_nodes(); ++j) {
if ((memory_nodes[i].nodes_by_distance[j - 1].distance >
memory_nodes[i].nodes_by_distance[j].distance) ||
((memory_nodes[i].nodes_by_distance[j - 1].distance ==
memory_nodes[i].nodes_by_distance[j].distance) &&
(memory_nodes[i].nodes_by_distance[j - 1].id >
memory_nodes[i].nodes_by_distance[j].id))) {
memory_nodes[i].nodes_by_distance[j - 1].id ^=
memory_nodes[i].nodes_by_distance[j].id;
memory_nodes[i].nodes_by_distance[j].id ^=
memory_nodes[i].nodes_by_distance[j - 1].id;
memory_nodes[i].nodes_by_distance[j - 1].id ^=
memory_nodes[i].nodes_by_distance[j].id;
memory_nodes[i].nodes_by_distance[j - 1].distance ^=
memory_nodes[i].nodes_by_distance[j].distance;
memory_nodes[i].nodes_by_distance[j].distance ^=
memory_nodes[i].nodes_by_distance[j - 1].distance;
memory_nodes[i].nodes_by_distance[j - 1].distance ^=
memory_nodes[i].nodes_by_distance[j].distance;
swapped = 1;
}
}
}
{
char buf[1024];
char *pbuf = buf;
pbuf += sprintf(pbuf, "NUMA %d distances: ", i);
for (j = 0; j < ihk_mc_get_nr_numa_nodes(); ++j) {
pbuf += sprintf(pbuf, "%d (%d), ",
memory_nodes[i].nodes_by_distance[j].id,
memory_nodes[i].nodes_by_distance[j].distance);
}
kprintf("%s\n", buf);
}
}
}
#define PHYS_PAGE_HASH_SHIFT (10) #define PHYS_PAGE_HASH_SHIFT (10)
#define PHYS_PAGE_HASH_SIZE (1 << PHYS_PAGE_HASH_SHIFT) #define PHYS_PAGE_HASH_SIZE (1 << PHYS_PAGE_HASH_SHIFT)
#define PHYS_PAGE_HASH_MASK (PHYS_PAGE_HASH_SIZE - 1) #define PHYS_PAGE_HASH_MASK (PHYS_PAGE_HASH_SIZE - 1)
@ -1234,6 +1380,9 @@ void mem_init(void)
kprintf("Demand paging on ANONYMOUS mappings enabled.\n"); kprintf("Demand paging on ANONYMOUS mappings enabled.\n");
anon_on_demand = 1; anon_on_demand = 1;
} }
/* Init distance vectors */
numa_distances_init();
} }
#define KMALLOC_TRACK_HASH_SHIFT (8) #define KMALLOC_TRACK_HASH_SHIFT (8)

View File

@ -21,7 +21,7 @@ static struct ihk_ikc_channel_desc *mchannel;
static int arch_master_channel_packet_handler(struct ihk_ikc_channel_desc *, static int arch_master_channel_packet_handler(struct ihk_ikc_channel_desc *,
void *__packet, void *arg); void *__packet, void *arg);
void ikc_master_init(void) void ihk_ikc_master_init(void)
{ {
mchannel = kmalloc(sizeof(struct ihk_ikc_channel_desc) + mchannel = kmalloc(sizeof(struct ihk_ikc_channel_desc) +
sizeof(struct ihk_ikc_master_packet), sizeof(struct ihk_ikc_master_packet),

View File

@ -74,7 +74,6 @@ init_process(struct process *proc, struct process *parent)
{ {
/* These will be filled out when changing status */ /* These will be filled out when changing status */
proc->pid = -1; proc->pid = -1;
proc->exit_status = -1;
proc->status = PS_RUNNING; proc->status = PS_RUNNING;
if(parent){ if(parent){
@ -234,13 +233,15 @@ init_process_vm(struct process *owner, struct address_space *asp, struct process
return 0; return 0;
} }
struct thread * struct thread *create_thread(unsigned long user_pc,
create_thread(unsigned long user_pc) unsigned long *__cpu_set, size_t cpu_set_size)
{ {
struct thread *thread; struct thread *thread;
struct process *proc; struct process *proc;
struct process_vm *vm = NULL; struct process_vm *vm = NULL;
struct address_space *asp = NULL; struct address_space *asp = NULL;
int cpu;
int cpu_set_empty = 1;
thread = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, IHK_MC_AP_NOWAIT); thread = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, IHK_MC_AP_NOWAIT);
if (!thread) if (!thread)
@ -256,7 +257,22 @@ create_thread(unsigned long user_pc)
memset(vm, 0, sizeof(struct process_vm)); memset(vm, 0, sizeof(struct process_vm));
init_process(proc, cpu_local_var(resource_set)->pid1); init_process(proc, cpu_local_var(resource_set)->pid1);
if (1) { /* Use requested CPU cores */
for_each_set_bit(cpu, __cpu_set, cpu_set_size * BITS_PER_BYTE) {
if (cpu >= num_processors) {
kprintf("%s: invalid CPU requested in initial cpu_set\n",
__FUNCTION__);
goto err;
}
dkprintf("%s: pid: %d, CPU: %d\n",
__FUNCTION__, proc->pid, cpu);
CPU_SET(cpu, &thread->cpu_set);
cpu_set_empty = 0;
}
/* Default allows all cores */
if (cpu_set_empty) {
struct ihk_mc_cpu_info *infop; struct ihk_mc_cpu_info *infop;
int i; int i;
@ -278,10 +294,10 @@ create_thread(unsigned long user_pc)
dkprintf("fork(): sigshared\n"); dkprintf("fork(): sigshared\n");
ihk_atomic_set(&thread->sigcommon->use, 1); ihk_atomic_set(&thread->sigcommon->use, 1);
ihk_mc_spinlock_init(&thread->sigcommon->lock); mcs_rwlock_init(&thread->sigcommon->lock);
INIT_LIST_HEAD(&thread->sigcommon->sigpending); INIT_LIST_HEAD(&thread->sigcommon->sigpending);
ihk_mc_spinlock_init(&thread->sigpendinglock); mcs_rwlock_init(&thread->sigpendinglock);
INIT_LIST_HEAD(&thread->sigpending); INIT_LIST_HEAD(&thread->sigpending);
thread->sigstack.ss_sp = NULL; thread->sigstack.ss_sp = NULL;
@ -298,6 +314,7 @@ create_thread(unsigned long user_pc)
if(init_process_vm(proc, asp, vm) != 0){ if(init_process_vm(proc, asp, vm) != 0){
goto err; goto err;
} }
thread->exit_status = -1;
cpu_set(ihk_mc_get_processor_id(), &thread->vm->address_space->cpu_set, cpu_set(ihk_mc_get_processor_id(), &thread->vm->address_space->cpu_set,
&thread->vm->address_space->cpu_set_lock); &thread->vm->address_space->cpu_set_lock);
@ -441,11 +458,11 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
memcpy(thread->sigcommon->action, org->sigcommon->action, memcpy(thread->sigcommon->action, org->sigcommon->action,
sizeof(struct k_sigaction) * _NSIG); sizeof(struct k_sigaction) * _NSIG);
ihk_atomic_set(&thread->sigcommon->use, 1); ihk_atomic_set(&thread->sigcommon->use, 1);
ihk_mc_spinlock_init(&thread->sigcommon->lock); mcs_rwlock_init(&thread->sigcommon->lock);
INIT_LIST_HEAD(&thread->sigcommon->sigpending); INIT_LIST_HEAD(&thread->sigcommon->sigpending);
// TODO: copy signalfd // TODO: copy signalfd
} }
ihk_mc_spinlock_init(&thread->sigpendinglock); mcs_rwlock_init(&thread->sigpendinglock);
INIT_LIST_HEAD(&thread->sigpending); INIT_LIST_HEAD(&thread->sigpending);
thread->sigmask = org->sigmask; thread->sigmask = org->sigmask;
@ -1367,6 +1384,11 @@ static int sync_one_page(void *arg0, page_table_t pt, pte_t *ptep,
flush_tlb_single((uintptr_t)pgaddr); /* XXX: TLB flush */ flush_tlb_single((uintptr_t)pgaddr); /* XXX: TLB flush */
phys = pte_get_phys(ptep); phys = pte_get_phys(ptep);
if (args->memobj->flags & MF_ZEROFILL) {
error = 0;
goto out;
}
error = memobj_flush_page(args->memobj, phys, pgsize); error = memobj_flush_page(args->memobj, phys, pgsize);
if (error) { if (error) {
ekprintf("sync_one_page(%p,%p,%p %#lx,%p,%d):" ekprintf("sync_one_page(%p,%p,%p %#lx,%p,%d):"
@ -1394,11 +1416,19 @@ int sync_process_memory_range(struct process_vm *vm, struct vm_range *range,
args.memobj = range->memobj; args.memobj = range->memobj;
ihk_mc_spinlock_lock_noirq(&vm->page_table_lock); ihk_mc_spinlock_lock_noirq(&vm->page_table_lock);
if (!(range->memobj->flags & MF_ZEROFILL)) {
memobj_lock(range->memobj); memobj_lock(range->memobj);
}
error = visit_pte_range(vm->address_space->page_table, (void *)start, error = visit_pte_range(vm->address_space->page_table, (void *)start,
(void *)end, range->pgshift, VPTEF_SKIP_NULL, (void *)end, range->pgshift, VPTEF_SKIP_NULL,
&sync_one_page, &args); &sync_one_page, &args);
if (!(range->memobj->flags & MF_ZEROFILL)) {
memobj_unlock(range->memobj); memobj_unlock(range->memobj);
}
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock); ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
if (error) { if (error) {
ekprintf("sync_process_memory_range(%p,%p,%#lx,%#lx):" ekprintf("sync_process_memory_range(%p,%p,%#lx,%#lx):"
@ -1690,10 +1720,9 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
range = lookup_process_memory_range(vm, fault_addr, fault_addr+1); range = lookup_process_memory_range(vm, fault_addr, fault_addr+1);
if (range == NULL) { if (range == NULL) {
error = -EFAULT; error = -EFAULT;
dkprintf("[%d]do_page_fault_process_vm(%p,%lx,%lx):" dkprintf("do_page_fault_process_vm(): vm: %p, addr: %p, reason: %lx):"
"out of range. %d\n", "out of range: %d\n",
ihk_mc_get_processor_id(), vm, vm, fault_addr0, reason, error);
fault_addr0, reason, error);
goto out; goto out;
} }
@ -1723,10 +1752,18 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
kprintf("if (((range->flag & VR_PROT_MASK) == VR_PROT_NONE))\n"); kprintf("if (((range->flag & VR_PROT_MASK) == VR_PROT_NONE))\n");
if (((reason & PF_WRITE) && !(reason & PF_PATCH))) if (((reason & PF_WRITE) && !(reason & PF_PATCH)))
kprintf("if (((reason & PF_WRITE) && !(reason & PF_PATCH)))\n"); kprintf("if (((reason & PF_WRITE) && !(reason & PF_PATCH)))\n");
if (!(range->flag & VR_PROT_WRITE)) if (!(range->flag & VR_PROT_WRITE)) {
kprintf("if (!(range->flag & VR_PROT_WRITE))\n"); kprintf("if (!(range->flag & VR_PROT_WRITE))\n");
if ((reason & PF_INSTR) && !(range->flag & VR_PROT_EXEC)) //kprintf("setting VR_PROT_WRITE\n");
//range->flag |= VR_PROT_WRITE;
//goto cont;
}
if ((reason & PF_INSTR) && !(range->flag & VR_PROT_EXEC)) {
kprintf("if ((reason & PF_INSTR) && !(range->flag & VR_PROT_EXEC))\n"); kprintf("if ((reason & PF_INSTR) && !(range->flag & VR_PROT_EXEC))\n");
//kprintf("setting VR_PROT_EXEC\n");
//range->flag |= VR_PROT_EXEC;
//goto cont;
}
goto out; goto out;
} }
@ -2184,9 +2221,10 @@ int populate_process_memory(struct process_vm *vm, void *start, size_t len)
for (addr = (uintptr_t)start; addr < end; addr += PAGE_SIZE) { for (addr = (uintptr_t)start; addr < end; addr += PAGE_SIZE) {
error = page_fault_process_vm(vm, (void *)addr, reason); error = page_fault_process_vm(vm, (void *)addr, reason);
if (error) { if (error) {
ekprintf("populate_process_range:page_fault_process_vm" ekprintf("%s: WARNING: page_fault_process_vm(): vm: %p, "
"(%p,%lx,%lx) failed %d\n", "addr: %lx, reason: %lx, off: %lu, len: %lu returns %d\n",
vm, addr, reason, error); __FUNCTION__, vm, addr, reason,
((void *)addr - start), len, error);
goto out; goto out;
} }
} }
@ -2717,6 +2755,8 @@ redo:
restore_fp_regs(next); restore_fp_regs(next);
} }
if (prev && prev->vm->address_space->page_table !=
next->vm->address_space->page_table)
ihk_mc_load_page_table(next->vm->address_space->page_table); ihk_mc_load_page_table(next->vm->address_space->page_table);
dkprintf("[%d] schedule: tlsblock_base: 0x%lX\n", dkprintf("[%d] schedule: tlsblock_base: 0x%lX\n",
@ -2949,6 +2989,7 @@ find_thread(int pid, int tid, struct mcs_rwlock_node_irqsave *lock)
if(tid <= 0) if(tid <= 0)
return NULL; return NULL;
mcs_rwlock_reader_lock(&thash->lock[hash], lock); mcs_rwlock_reader_lock(&thash->lock[hash], lock);
retry:
list_for_each_entry(thread, &thash->list[hash], hash_list){ list_for_each_entry(thread, &thash->list[hash], hash_list){
if(thread->tid == tid){ if(thread->tid == tid){
if(pid <= 0) if(pid <= 0)
@ -2957,6 +2998,13 @@ find_thread(int pid, int tid, struct mcs_rwlock_node_irqsave *lock)
return thread; return thread;
} }
} }
/* If no thread with pid == tid was found, then we may be looking for a
* specific thread (not the main thread of the process), try to find it
* based on tid only */
if (pid > 0 && pid == tid) {
pid = 0;
goto retry;
}
mcs_rwlock_reader_unlock(&thash->lock[hash], lock); mcs_rwlock_reader_unlock(&thash->lock[hash], lock);
return NULL; return NULL;
} }

View File

@ -24,6 +24,7 @@
#include <page.h> #include <page.h>
#include <mman.h> #include <mman.h>
#include <bitmap.h> #include <bitmap.h>
#include <init.h>
//#define DEBUG_PRINT_PROCFS //#define DEBUG_PRINT_PROCFS

View File

@ -179,6 +179,7 @@ int shmobj_create(struct shmid_ds *ds, struct memobj **objp)
memset(obj, 0, sizeof(*obj)); memset(obj, 0, sizeof(*obj));
obj->memobj.ops = &shmobj_ops; obj->memobj.ops = &shmobj_ops;
obj->memobj.size = ds->shm_segsz;
obj->ds = *ds; obj->ds = *ds;
obj->ds.shm_perm.seq = the_seq++; obj->ds.shm_perm.seq = the_seq++;
obj->ds.shm_nattch = 1; obj->ds.shm_nattch = 1;

View File

@ -54,6 +54,7 @@
#include <process.h> #include <process.h>
#include <bitops.h> #include <bitops.h>
#include <bitmap.h> #include <bitmap.h>
#include <xpmem.h>
/* Headers taken from kitten LWK */ /* Headers taken from kitten LWK */
#include <lwk/stddef.h> #include <lwk/stddef.h>
@ -129,6 +130,95 @@ int prepare_process_ranges_args_envs(struct thread *thread,
static void do_mod_exit(int status); static void do_mod_exit(int status);
#endif #endif
#ifdef TRACK_SYSCALLS
#define SOCC_CLEAR 1
#define SOCC_ON 2
#define SOCC_OFF 4
#define SOCC_PRINT 8
void print_syscall_stats(struct thread *thread)
{
int i;
unsigned long flags;
flags = kprintf_lock();
for (i = 0; i < 300; ++i) {
if (!thread->syscall_cnts[i] &&
!thread->offload_cnts[i]) continue;
//__kprintf("(%20s): sys.cnt: %3lu (%15lukC)\n",
__kprintf("(%3d,%20s): sys.cnt: %5lu (%10lukC), offl.cnt: %5lu (%10lukC)\n",
i,
syscall_name[i],
thread->syscall_cnts[i],
(thread->syscall_times[i] /
(thread->syscall_cnts[i] ? thread->syscall_cnts[i] : 1))
/ 1000,
thread->offload_cnts[i],
(thread->offload_times[i] /
(thread->offload_cnts[i] ? thread->offload_cnts[i] : 1))
/ 1000
);
}
kprintf_unlock(flags);
}
void alloc_syscall_counters(struct thread *thread)
{
thread->syscall_times = kmalloc(sizeof(*thread->syscall_times) * 300, IHK_MC_AP_NOWAIT);
thread->syscall_cnts = kmalloc(sizeof(*thread->syscall_cnts) * 300, IHK_MC_AP_NOWAIT);
thread->offload_times = kmalloc(sizeof(*thread->offload_times) * 300, IHK_MC_AP_NOWAIT);
thread->offload_cnts = kmalloc(sizeof(*thread->offload_cnts) * 300, IHK_MC_AP_NOWAIT);
if (!thread->syscall_times ||
!thread->syscall_cnts ||
!thread->offload_times ||
!thread->offload_cnts) {
kprintf("ERROR: allocating counters\n");
panic("");
}
memset(thread->syscall_times, 0, sizeof(*thread->syscall_times) * 300);
memset(thread->syscall_cnts, 0, sizeof(*thread->syscall_cnts) * 300);
memset(thread->offload_times, 0, sizeof(*thread->offload_times) * 300);
memset(thread->offload_cnts, 0, sizeof(*thread->offload_cnts) * 300);
}
SYSCALL_DECLARE(syscall_offload_clr_cntrs)
{
int flag = (int)ihk_mc_syscall_arg0(ctx);
struct thread *thread = cpu_local_var(current);
int i;
if (flag & SOCC_PRINT)
print_syscall_stats(thread);
if (flag & SOCC_CLEAR) {
for (i = 0; i < 300; ++i) {
if (!thread->syscall_cnts[i] &&
!thread->offload_cnts[i]) continue;
thread->syscall_cnts[i] = 0;
thread->syscall_times[i] = 0;
thread->offload_cnts[i] = 0;
thread->offload_times[i] = 0;
}
}
if (flag & SOCC_ON) {
thread->socc_enabled = 1;
}
else if (flag & SOCC_OFF) {
thread->socc_enabled = 0;
}
return 0;
}
#endif // TRACK_SYSCALLS
static void send_syscall(struct syscall_request *req, int cpu, int pid, struct syscall_response *res) static void send_syscall(struct syscall_request *req, int cpu, int pid, struct syscall_response *res)
{ {
struct ikc_scd_packet packet IHK_DMA_ALIGN; struct ikc_scd_packet packet IHK_DMA_ALIGN;
@ -189,6 +279,10 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
unsigned long irqstate; unsigned long irqstate;
struct thread *thread = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
struct process *proc = thread->proc; struct process *proc = thread->proc;
#ifdef TRACK_SYSCALLS
uint64_t t_s;
t_s = rdtsc();
#endif // TRACK_SYSCALLS
dkprintf("SC(%d)[%3d] sending syscall\n", dkprintf("SC(%d)[%3d] sending syscall\n",
ihk_mc_get_processor_id(), ihk_mc_get_processor_id(),
@ -307,6 +401,23 @@ long do_syscall(struct syscall_request *req, int cpu, int pid)
if(req->number != __NR_exit_group){ if(req->number != __NR_exit_group){
--thread->in_syscall_offload; --thread->in_syscall_offload;
} }
#ifdef TRACK_SYSCALLS
if (req->number < 300) {
if (!cpu_local_var(current)->offload_cnts) {
alloc_syscall_counters(cpu_local_var(current));
}
if (cpu_local_var(current)->socc_enabled) {
cpu_local_var(current)->offload_times[req->number] +=
(rdtsc() - t_s);
cpu_local_var(current)->offload_cnts[req->number]++;
}
}
else {
dkprintf("offload syscall > 300?? : %d\n", req->number);
}
#endif // TRACK_SYSCALLS
return rc; return rc;
} }
@ -347,16 +458,16 @@ static int wait_zombie(struct thread *thread, struct process *child, int *status
return ret; return ret;
} }
static int wait_stopped(struct thread *thread, struct process *child, int *status, int options) static int wait_stopped(struct thread *thread, struct process *child, struct thread *c_thread, int *status, int options)
{ {
dkprintf("wait_stopped,proc->pid=%d,child->pid=%d,options=%08x\n", dkprintf("wait_stopped,proc->pid=%d,child->pid=%d,options=%08x\n",
thread->proc->pid, child->pid, options); thread->proc->pid, child->pid, options);
int ret; int ret;
/* Copy exit_status created in do_signal */ /* Copy exit_status created in do_signal */
int *exit_status = child->status == PS_STOPPED ? int *exit_status = (child->status == PS_STOPPED || !c_thread) ?
&child->group_exit_status : &child->group_exit_status :
&child->exit_status; &c_thread->exit_status;
/* Skip this process because exit_status has been reaped. */ /* Skip this process because exit_status has been reaped. */
if (!*exit_status) { if (!*exit_status) {
@ -400,6 +511,26 @@ static int wait_continued(struct thread *thread, struct process *child, int *sta
return ret; return ret;
} }
struct thread *find_thread_of_process(struct process *child, int pid)
{
int c_found = 0;
struct mcs_rwlock_node c_lock;
struct thread *c_thread = NULL;
mcs_rwlock_reader_lock_noirq(&child->threads_lock, &c_lock);
list_for_each_entry(c_thread, &child->threads_list, siblings_list) {
if (c_thread->tid == pid) {
c_found = 1;
break;
}
}
mcs_rwlock_reader_unlock_noirq(&child->threads_lock, &c_lock);
if (!c_found) c_thread = NULL;
return c_thread;
}
/* /*
* From glibc: INLINE_SYSCALL (wait4, 4, pid, stat_loc, options, NULL); * From glibc: INLINE_SYSCALL (wait4, 4, pid, stat_loc, options, NULL);
*/ */
@ -415,22 +546,30 @@ do_wait(int pid, int *status, int options, void *rusage)
int empty = 1; int empty = 1;
int orgpid = pid; int orgpid = pid;
struct mcs_rwlock_node lock; struct mcs_rwlock_node lock;
struct thread *c_thread = NULL;
dkprintf("wait4,thread->pid=%d,pid=%d\n", thread->proc->pid, pid); dkprintf("wait4(): current->proc->pid: %d, pid: %d\n", thread->proc->pid, pid);
rescan: rescan:
pid = orgpid; pid = orgpid;
mcs_rwlock_writer_lock_noirq(&thread->proc->children_lock, &lock); mcs_rwlock_writer_lock_noirq(&thread->proc->children_lock, &lock);
list_for_each_entry_safe(child, next, &proc->children_list, siblings_list) { list_for_each_entry_safe(child, next, &proc->children_list, siblings_list) {
/*
if (!(!!(options & __WCLONE) ^ (child->termsig == SIGCHLD))) { if (!(!!(options & __WCLONE) ^ (child->termsig == SIGCHLD))) {
continue; continue;
} }
*/
/* Find thread with pid == tid, this will be either the main thread
* or the one we are looking for specifically when __WCLONE is passed */
//if (options & __WCLONE)
c_thread = find_thread_of_process(child, pid);
if ((pid < 0 && -pid == child->pgid) || if ((pid < 0 && -pid == child->pgid) ||
pid == -1 || pid == -1 ||
(pid == 0 && pgid == child->pgid) || (pid == 0 && pgid == child->pgid) ||
(pid > 0 && pid == child->pid)) { (pid > 0 && pid == child->pid) || c_thread != NULL) {
empty = 0; empty = 0;
@ -478,8 +617,11 @@ do_wait(int pid, int *status, int options, void *rusage)
if(!(child->ptrace & PT_TRACED) && if(!(child->ptrace & PT_TRACED) &&
(child->signal_flags & SIGNAL_STOP_STOPPED) && (child->signal_flags & SIGNAL_STOP_STOPPED) &&
(options & WUNTRACED)) { (options & WUNTRACED)) {
/* Find main thread of process if pid == -1 */
if (pid == -1)
c_thread = find_thread_of_process(child, child->pid);
/* Not ptraced and in stopped state and WUNTRACED is specified */ /* Not ptraced and in stopped state and WUNTRACED is specified */
ret = wait_stopped(thread, child, status, options); ret = wait_stopped(thread, child, c_thread, status, options);
if(!(options & WNOWAIT)){ if(!(options & WNOWAIT)){
child->signal_flags &= ~SIGNAL_STOP_STOPPED; child->signal_flags &= ~SIGNAL_STOP_STOPPED;
} }
@ -489,8 +631,15 @@ do_wait(int pid, int *status, int options, void *rusage)
if((child->ptrace & PT_TRACED) && if((child->ptrace & PT_TRACED) &&
(child->status & (PS_STOPPED | PS_TRACED))) { (child->status & (PS_STOPPED | PS_TRACED))) {
ret = wait_stopped(thread, child, status, options); /* Find main thread of process if pid == -1 */
if(ret == child->pid){ if (pid == -1)
c_thread = find_thread_of_process(child, child->pid);
ret = wait_stopped(thread, child, c_thread, status, options);
if(c_thread && ret == child->pid){
/* Are we looking for a specific thread? */
if (pid == c_thread->tid) {
ret = c_thread->tid;
}
if(!(options & WNOWAIT)){ if(!(options & WNOWAIT)){
child->signal_flags &= ~SIGNAL_STOP_STOPPED; child->signal_flags &= ~SIGNAL_STOP_STOPPED;
} }
@ -639,6 +788,7 @@ terminate(int rc, int sig)
int n; int n;
int *ids = NULL; int *ids = NULL;
struct syscall_request request IHK_DMA_ALIGN; struct syscall_request request IHK_DMA_ALIGN;
int exit_status;
// sync perf info // sync perf info
if(proc->monitoring_event) if(proc->monitoring_event)
@ -655,7 +805,7 @@ terminate(int rc, int sig)
// no return // no return
return; return;
} }
proc->exit_status = ((rc & 0x00ff) << 8) | (sig & 0xff); exit_status = mythread->exit_status = ((rc & 0x00ff) << 8) | (sig & 0xff);
proc->status = PS_EXITED; proc->status = PS_EXITED;
mcs_rwlock_writer_unlock_noirq(&proc->update_lock, &updatelock); mcs_rwlock_writer_unlock_noirq(&proc->update_lock, &updatelock);
mcs_rwlock_reader_unlock(&proc->threads_lock, &lock); mcs_rwlock_reader_unlock(&proc->threads_lock, &lock);
@ -791,7 +941,7 @@ terminate(int rc, int sig)
// clean up memory // clean up memory
if(!proc->nohost){ if(!proc->nohost){
request.number = __NR_exit_group; request.number = __NR_exit_group;
request.args[0] = proc->exit_status; request.args[0] = exit_status;
do_syscall(&request, ihk_mc_get_processor_id(), proc->pid); do_syscall(&request, ihk_mc_get_processor_id(), proc->pid);
proc->nohost = 1; proc->nohost = 1;
} }
@ -803,6 +953,7 @@ terminate(int rc, int sig)
} }
else { else {
proc->status = PS_ZOMBIE; proc->status = PS_ZOMBIE;
proc->exit_status = exit_status;
dkprintf("terminate,wakeup\n"); dkprintf("terminate,wakeup\n");
@ -813,11 +964,11 @@ terminate(int rc, int sig)
memset(&info, '\0', sizeof info); memset(&info, '\0', sizeof info);
info.si_signo = SIGCHLD; info.si_signo = SIGCHLD;
info.si_code = (proc->exit_status & 0x7f)? info.si_code = (exit_status & 0x7f)?
((proc->exit_status & 0x80)? ((exit_status & 0x80)?
CLD_DUMPED: CLD_KILLED): CLD_EXITED; CLD_DUMPED: CLD_KILLED): CLD_EXITED;
info._sifields._sigchld.si_pid = proc->pid; info._sifields._sigchld.si_pid = proc->pid;
info._sifields._sigchld.si_status = proc->exit_status; info._sifields._sigchld.si_status = exit_status;
error = do_kill(NULL, proc->parent->pid, -1, SIGCHLD, &info, 0); error = do_kill(NULL, proc->parent->pid, -1, SIGCHLD, &info, 0);
dkprintf("terminate,klll %d,error=%d\n", dkprintf("terminate,klll %d,error=%d\n",
proc->termsig, error); proc->termsig, error);
@ -855,7 +1006,7 @@ interrupt_syscall(int pid, int tid)
ihk_mc_user_context_t ctx; ihk_mc_user_context_t ctx;
long lerror; long lerror;
kprintf("interrupt_syscall pid=%d tid=%d\n", pid, tid); dkprintf("interrupt_syscall pid=%d tid=%d\n", pid, tid);
ihk_mc_syscall_arg0(&ctx) = pid; ihk_mc_syscall_arg0(&ctx) = pid;
ihk_mc_syscall_arg1(&ctx) = tid; ihk_mc_syscall_arg1(&ctx) = tid;
@ -912,7 +1063,7 @@ out:
return (int)lerror; return (int)lerror;
} }
static int do_munmap(void *addr, size_t len) int do_munmap(void *addr, size_t len)
{ {
int error; int error;
int ro_freed; int ro_freed;
@ -1065,7 +1216,8 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
} }
else { else {
/* choose mapping address */ /* choose mapping address */
error = search_free_space(len, region->map_end, error = search_free_space(len, region->map_end +
(fd > 0) ? PTL4_SIZE : 0,
PAGE_SHIFT+p2align, &addr); PAGE_SHIFT+p2align, &addr);
if (error) { if (error) {
ekprintf("do_mmap:search_free_space(%lx,%lx,%d) failed. %d\n", ekprintf("do_mmap:search_free_space(%lx,%lx,%d) failed. %d\n",
@ -1211,6 +1363,13 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
goto out; goto out;
} }
memobj_lock(memobj);
if (memobj->status == MEMOBJ_TO_BE_PREFETCHED) {
memobj->status = MEMOBJ_READY;
populated_mapping = 1;
}
memobj_unlock(memobj);
error = 0; error = 0;
p = NULL; p = NULL;
memobj = NULL; memobj = NULL;
@ -1222,11 +1381,12 @@ out:
} }
ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock); ihk_mc_spinlock_unlock_noirq(&thread->vm->memory_range_lock);
if (!error && populated_mapping) { if (!error && populated_mapping && !((vrflags & VR_PROT_MASK) == VR_PROT_NONE)) {
error = populate_process_memory(thread->vm, (void *)addr, len); error = populate_process_memory(thread->vm, (void *)addr, len);
if (error) { if (error) {
ekprintf("%s: error :populate_process_memory" ekprintf("%s: WARNING: populate_process_memory(): "
"vm: %p, addr: %p, len: %d (flags: %s%s) failed %d\n", __FUNCTION__, "vm: %p, addr: %p, len: %d (flags: %s%s) failed %d\n",
__FUNCTION__,
thread->vm, (void *)addr, len, thread->vm, (void *)addr, len,
(flags & MAP_POPULATE) ? "MAP_POPULATE " : "", (flags & MAP_POPULATE) ? "MAP_POPULATE " : "",
(flags & MAP_LOCKED) ? "MAP_LOCKED ": "", (flags & MAP_LOCKED) ? "MAP_LOCKED ": "",
@ -1595,7 +1755,7 @@ static int ptrace_report_clone(struct thread *thread, struct thread *new, int ev
/* Save reason why stopped and process state for wait4() to reap */ /* Save reason why stopped and process state for wait4() to reap */
mcs_rwlock_writer_lock_noirq(&thread->proc->update_lock, &lock); mcs_rwlock_writer_lock_noirq(&thread->proc->update_lock, &lock);
thread->proc->exit_status = (SIGTRAP | (event << 8)); thread->exit_status = (SIGTRAP | (event << 8));
/* Transition process state */ /* Transition process state */
thread->proc->status = PS_TRACED; thread->proc->status = PS_TRACED;
thread->status = PS_TRACED; thread->status = PS_TRACED;
@ -1610,24 +1770,26 @@ static int ptrace_report_clone(struct thread *thread, struct thread *new, int ev
mcs_rwlock_writer_lock_noirq(&new->proc->update_lock, &updatelock); mcs_rwlock_writer_lock_noirq(&new->proc->update_lock, &updatelock);
/* set ptrace features to new process */ /* set ptrace features to new process */
new->proc->ptrace = thread->proc->ptrace; new->proc->ptrace = thread->proc->ptrace;
if (event != PTRACE_EVENT_CLONE) {
new->proc->ppid_parent = new->proc->parent; /* maybe proc */ new->proc->ppid_parent = new->proc->parent; /* maybe proc */
}
if ((new->proc->ptrace & PT_TRACED) && new->ptrace_debugreg == NULL) { if ((new->proc->ptrace & PT_TRACED) && new->ptrace_debugreg == NULL) {
alloc_debugreg(new); alloc_debugreg(new);
} }
if (event != PTRACE_EVENT_CLONE) {
mcs_rwlock_writer_lock_noirq(&new->proc->parent->children_lock, &lock); mcs_rwlock_writer_lock_noirq(&new->proc->parent->children_lock, &lock);
list_del(&new->proc->siblings_list); list_del(&new->proc->siblings_list);
list_add_tail(&new->proc->ptraced_siblings_list, &new->proc->parent->ptraced_children_list); list_add_tail(&new->proc->ptraced_siblings_list, &new->proc->parent->ptraced_children_list);
mcs_rwlock_writer_unlock_noirq(&new->proc->parent->children_lock, &lock); mcs_rwlock_writer_unlock_noirq(&new->proc->parent->children_lock, &lock);
new->proc->parent = thread->proc->parent; /* new ptracing parent */ new->proc->parent = thread->proc->parent; /* new ptracing parent */
mcs_rwlock_writer_lock_noirq(&new->proc->parent->children_lock, &lock); mcs_rwlock_writer_lock_noirq(&new->proc->parent->children_lock, &lock);
list_add_tail(&new->proc->siblings_list, &new->proc->parent->children_list); list_add_tail(&new->proc->siblings_list, &new->proc->parent->children_list);
mcs_rwlock_writer_unlock_noirq(&new->proc->parent->children_lock, &lock); mcs_rwlock_writer_unlock_noirq(&new->proc->parent->children_lock, &lock);
}
/* trace and SIGSTOP */ /* trace and SIGSTOP */
new->proc->exit_status = SIGSTOP; new->exit_status = SIGSTOP;
new->proc->status = PS_TRACED; new->proc->status = PS_TRACED;
new->status = PS_TRACED; new->status = PS_TRACED;
@ -1639,7 +1801,7 @@ static int ptrace_report_clone(struct thread *thread, struct thread *new, int ev
info.si_signo = SIGCHLD; info.si_signo = SIGCHLD;
info.si_code = CLD_TRAPPED; info.si_code = CLD_TRAPPED;
info._sifields._sigchld.si_pid = thread->proc->pid; info._sifields._sigchld.si_pid = thread->proc->pid;
info._sifields._sigchld.si_status = thread->proc->exit_status; info._sifields._sigchld.si_status = thread->exit_status;
rc = do_kill(cpu_local_var(current), parent_pid, -1, SIGCHLD, &info, 0); rc = do_kill(cpu_local_var(current), parent_pid, -1, SIGCHLD, &info, 0);
if(rc < 0) { if(rc < 0) {
dkprintf("ptrace_report_clone,do_kill failed\n"); dkprintf("ptrace_report_clone,do_kill failed\n");
@ -1731,7 +1893,7 @@ SYSCALL_DECLARE(execve)
ret = do_syscall(&request, ihk_mc_get_processor_id(), 0); ret = do_syscall(&request, ihk_mc_get_processor_id(), 0);
if (ret != 0) { if (ret != 0) {
kprintf("execve(): ERROR: host failed to load elf header, errno: %d\n", dkprintf("execve(): ERROR: host failed to load elf header, errno: %d\n",
ret); ret);
return -ret; return -ret;
} }
@ -1886,7 +2048,7 @@ unsigned long do_fork(int clone_flags, unsigned long newsp,
return -EINVAL; return -EINVAL;
} }
cpuid = obtain_clone_cpuid(); cpuid = obtain_clone_cpuid(&old->cpu_set);
if (cpuid == -1) { if (cpuid == -1) {
kprintf("do_fork,core not available\n"); kprintf("do_fork,core not available\n");
return -EAGAIN; return -EAGAIN;
@ -2473,16 +2635,16 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
{ {
struct thread *thread = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
struct k_sigaction *k; struct k_sigaction *k;
long irqstate; struct mcs_rwlock_node_irqsave mcs_rw_node;
ihk_mc_user_context_t ctx0; ihk_mc_user_context_t ctx0;
irqstate = ihk_mc_spinlock_lock(&thread->sigcommon->lock); mcs_rwlock_writer_lock(&thread->sigcommon->lock, &mcs_rw_node);
k = thread->sigcommon->action + sig - 1; k = thread->sigcommon->action + sig - 1;
if(oact) if(oact)
memcpy(oact, k, sizeof(struct k_sigaction)); memcpy(oact, k, sizeof(struct k_sigaction));
if(act) if(act)
memcpy(k, act, sizeof(struct k_sigaction)); memcpy(k, act, sizeof(struct k_sigaction));
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate); mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
if(act){ if(act){
ihk_mc_syscall_arg0(&ctx0) = sig; ihk_mc_syscall_arg0(&ctx0) = sig;
@ -2543,6 +2705,21 @@ SYSCALL_DECLARE(ioctl)
return rc; return rc;
} }
SYSCALL_DECLARE(open)
{
const char *pathname = (const char *)ihk_mc_syscall_arg0(ctx);
long rc;
dkprintf("open(): pathname=%s\n", pathname);
if (!strcmp(pathname, XPMEM_DEV_PATH)) {
rc = xpmem_open(ctx);
} else {
rc = syscall_generic_forwarding(__NR_open, ctx);
}
return rc;
}
SYSCALL_DECLARE(close) SYSCALL_DECLARE(close)
{ {
int fd = ihk_mc_syscall_arg0(ctx); int fd = ihk_mc_syscall_arg0(ctx);
@ -2654,10 +2831,10 @@ fault:
SYSCALL_DECLARE(rt_sigpending) SYSCALL_DECLARE(rt_sigpending)
{ {
int flag;
struct sig_pending *pending; struct sig_pending *pending;
struct list_head *head; struct list_head *head;
ihk_spinlock_t *lock; mcs_rwlock_lock_t *lock;
struct mcs_rwlock_node_irqsave mcs_rw_node;
__sigset_t w = 0; __sigset_t w = 0;
struct thread *thread = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
sigset_t *set = (sigset_t *)ihk_mc_syscall_arg0(ctx); sigset_t *set = (sigset_t *)ihk_mc_syscall_arg0(ctx);
@ -2668,19 +2845,19 @@ SYSCALL_DECLARE(rt_sigpending)
lock = &thread->sigcommon->lock; lock = &thread->sigcommon->lock;
head = &thread->sigcommon->sigpending; head = &thread->sigcommon->sigpending;
flag = ihk_mc_spinlock_lock(lock); mcs_rwlock_writer_lock(lock, &mcs_rw_node);
list_for_each_entry(pending, head, list){ list_for_each_entry(pending, head, list){
w |= pending->sigmask.__val[0]; w |= pending->sigmask.__val[0];
} }
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
lock = &thread->sigpendinglock; lock = &thread->sigpendinglock;
head = &thread->sigpending; head = &thread->sigpending;
flag = ihk_mc_spinlock_lock(lock); mcs_rwlock_writer_lock(lock, &mcs_rw_node);
list_for_each_entry(pending, head, list){ list_for_each_entry(pending, head, list){
w |= pending->sigmask.__val[0]; w |= pending->sigmask.__val[0];
} }
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
if(copy_to_user(set->__val, &w, sizeof w)) if(copy_to_user(set->__val, &w, sizeof w))
return -EFAULT; return -EFAULT;
@ -3215,10 +3392,10 @@ SYSCALL_DECLARE(rt_sigtimedwait)
__sigset_t wset; __sigset_t wset;
__sigset_t nset; __sigset_t nset;
struct timespec wtimeout; struct timespec wtimeout;
unsigned long flag;
struct sig_pending *pending; struct sig_pending *pending;
struct list_head *head; struct list_head *head;
ihk_spinlock_t *lock; mcs_rwlock_lock_t *lock;
struct mcs_rwlock_node_irqsave mcs_rw_node;
int w; int w;
int sig; int sig;
struct timespec ats; struct timespec ats;
@ -3284,18 +3461,18 @@ SYSCALL_DECLARE(rt_sigtimedwait)
lock = &thread->sigcommon->lock; lock = &thread->sigcommon->lock;
head = &thread->sigcommon->sigpending; head = &thread->sigcommon->sigpending;
flag = ihk_mc_spinlock_lock(lock); mcs_rwlock_writer_lock(lock, &mcs_rw_node);
list_for_each_entry(pending, head, list){ list_for_each_entry(pending, head, list){
if(pending->sigmask.__val[0] & wset) if(pending->sigmask.__val[0] & wset)
break; break;
} }
if(&pending->list == head){ if(&pending->list == head){
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
lock = &thread->sigpendinglock; lock = &thread->sigpendinglock;
head = &thread->sigpending; head = &thread->sigpending;
flag = ihk_mc_spinlock_lock(lock); mcs_rwlock_writer_lock(lock, &mcs_rw_node);
list_for_each_entry(pending, head, list){ list_for_each_entry(pending, head, list){
if(pending->sigmask.__val[0] & wset) if(pending->sigmask.__val[0] & wset)
break; break;
@ -3305,25 +3482,25 @@ SYSCALL_DECLARE(rt_sigtimedwait)
if(&pending->list != head){ if(&pending->list != head){
list_del(&pending->list); list_del(&pending->list);
thread->sigmask.__val[0] = bset; thread->sigmask.__val[0] = bset;
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
break; break;
} }
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
lock = &thread->sigcommon->lock; lock = &thread->sigcommon->lock;
head = &thread->sigcommon->sigpending; head = &thread->sigcommon->sigpending;
flag = ihk_mc_spinlock_lock(lock); mcs_rwlock_writer_lock(lock, &mcs_rw_node);
list_for_each_entry(pending, head, list){ list_for_each_entry(pending, head, list){
if(pending->sigmask.__val[0] & nset) if(pending->sigmask.__val[0] & nset)
break; break;
} }
if(&pending->list == head){ if(&pending->list == head){
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
lock = &thread->sigpendinglock; lock = &thread->sigpendinglock;
head = &thread->sigpending; head = &thread->sigpending;
flag = ihk_mc_spinlock_lock(lock); mcs_rwlock_writer_lock(lock, &mcs_rw_node);
list_for_each_entry(pending, head, list){ list_for_each_entry(pending, head, list){
if(pending->sigmask.__val[0] & nset) if(pending->sigmask.__val[0] & nset)
break; break;
@ -3333,11 +3510,11 @@ SYSCALL_DECLARE(rt_sigtimedwait)
if(&pending->list != head){ if(&pending->list != head){
list_del(&pending->list); list_del(&pending->list);
thread->sigmask.__val[0] = bset; thread->sigmask.__val[0] = bset;
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
do_signal(-EINTR, NULL, thread, pending, 0); do_signal(-EINTR, NULL, thread, pending, 0);
return -EINTR; return -EINTR;
} }
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
thread->sigevent = 0; thread->sigevent = 0;
} }
@ -3374,10 +3551,10 @@ do_sigsuspend(struct thread *thread, const sigset_t *set)
{ {
__sigset_t wset; __sigset_t wset;
__sigset_t bset; __sigset_t bset;
unsigned long flag;
struct sig_pending *pending; struct sig_pending *pending;
struct list_head *head; struct list_head *head;
ihk_spinlock_t *lock; mcs_rwlock_lock_t *lock;
struct mcs_rwlock_node_irqsave mcs_rw_node;
wset = set->__val[0]; wset = set->__val[0];
wset &= ~__sigmask(SIGKILL); wset &= ~__sigmask(SIGKILL);
@ -3392,31 +3569,31 @@ do_sigsuspend(struct thread *thread, const sigset_t *set)
lock = &thread->sigcommon->lock; lock = &thread->sigcommon->lock;
head = &thread->sigcommon->sigpending; head = &thread->sigcommon->sigpending;
flag = ihk_mc_spinlock_lock(lock); mcs_rwlock_writer_lock(lock, &mcs_rw_node);
list_for_each_entry(pending, head, list){ list_for_each_entry(pending, head, list){
if(!(pending->sigmask.__val[0] & wset)) if(!(pending->sigmask.__val[0] & wset))
break; break;
} }
if(&pending->list == head){ if(&pending->list == head){
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
lock = &thread->sigpendinglock; lock = &thread->sigpendinglock;
head = &thread->sigpending; head = &thread->sigpending;
flag = ihk_mc_spinlock_lock(lock); mcs_rwlock_writer_lock(lock, &mcs_rw_node);
list_for_each_entry(pending, head, list){ list_for_each_entry(pending, head, list){
if(!(pending->sigmask.__val[0] & wset)) if(!(pending->sigmask.__val[0] & wset))
break; break;
} }
} }
if(&pending->list == head){ if(&pending->list == head){
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
thread->sigevent = 0; thread->sigevent = 0;
continue; continue;
} }
list_del(&pending->list); list_del(&pending->list);
ihk_mc_spinlock_unlock(lock, flag); mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
thread->sigmask.__val[0] = bset; thread->sigmask.__val[0] = bset;
do_signal(-EINTR, NULL, thread, pending, 0); do_signal(-EINTR, NULL, thread, pending, 0);
break; break;
@ -5183,19 +5360,26 @@ static int ptrace_attach(int pid)
goto out; goto out;
} }
child = thread->proc; child = thread->proc;
dkprintf("ptrace_attach,pid=%d,thread->proc->parent=%p\n", thread->proc->pid, thread->proc->parent); dkprintf("ptrace_attach(): pid requested:%d, thread->tid:%d, thread->proc->pid=%d, thread->proc->parent=%p\n", pid, thread->tid, thread->proc->pid, thread->proc->parent);
mcs_rwlock_writer_lock_noirq(&child->update_lock, &updatelock); mcs_rwlock_writer_lock_noirq(&child->update_lock, &updatelock);
/* Only for the first thread of a process XXX: fix this */
if (thread->tid == child->pid) {
if (thread->proc->ptrace & PT_TRACED) { if (thread->proc->ptrace & PT_TRACED) {
mcs_rwlock_writer_unlock_noirq(&child->update_lock, &updatelock); mcs_rwlock_writer_unlock_noirq(&child->update_lock, &updatelock);
thread_unlock(thread, &lock); thread_unlock(thread, &lock);
dkprintf("ptrace_attach: -EPERM\n");
error = -EPERM; error = -EPERM;
goto out; goto out;
} }
}
parent = child->parent; parent = child->parent;
/* XXX: tmp */
if (parent != proc) {
dkprintf("ptrace_attach,parent->pid=%d\n", parent->pid); dkprintf("ptrace_attach() parent->pid=%d\n", parent->pid);
mcs_rwlock_writer_lock_noirq(&parent->children_lock, &childlock); mcs_rwlock_writer_lock_noirq(&parent->children_lock, &childlock);
list_del(&child->siblings_list); list_del(&child->siblings_list);
@ -5206,6 +5390,7 @@ static int ptrace_attach(int pid)
list_add_tail(&child->siblings_list, &proc->children_list); list_add_tail(&child->siblings_list, &proc->children_list);
child->parent = proc; child->parent = proc;
mcs_rwlock_writer_unlock_noirq(&proc->children_lock, &childlock); mcs_rwlock_writer_unlock_noirq(&proc->children_lock, &childlock);
}
child->ptrace = PT_TRACED | PT_TRACE_EXEC; child->ptrace = PT_TRACED | PT_TRACE_EXEC;
@ -5227,7 +5412,7 @@ static int ptrace_attach(int pid)
info.si_signo = SIGSTOP; info.si_signo = SIGSTOP;
info.si_code = SI_USER; info.si_code = SI_USER;
info._sifields._kill.si_pid = proc->pid; info._sifields._kill.si_pid = proc->pid;
error = do_kill(mythread, pid, -1, SIGSTOP, &info, 2); error = do_kill(mythread, -1, pid, SIGSTOP, &info, 2);
if (error < 0) { if (error < 0) {
goto out; goto out;
} }
@ -5539,8 +5724,8 @@ SYSCALL_DECLARE(sched_setparam)
struct sched_param param; struct sched_param param;
struct thread *thread = cpu_local_var(current); struct thread *thread = cpu_local_var(current);
struct mcs_rwlock_node_irqsave lock; struct mcs_rwlock_node_irqsave lock;
struct syscall_request request1 IHK_DMA_ALIGN; struct syscall_request request1 IHK_DMA_ALIGN;
int other_thread = 0;
dkprintf("sched_setparam: pid: %d, uparam: 0x%lx\n", pid, uparam); dkprintf("sched_setparam: pid: %d, uparam: 0x%lx\n", pid, uparam);
@ -5552,12 +5737,11 @@ SYSCALL_DECLARE(sched_setparam)
pid = thread->proc->pid; pid = thread->proc->pid;
if (thread->proc->pid != pid) { if (thread->proc->pid != pid) {
other_thread = 1;
thread = find_thread(pid, pid, &lock); thread = find_thread(pid, pid, &lock);
if (!thread) { if (!thread) {
return -ESRCH; return -ESRCH;
} }
// TODO: unlock 場所のチェック
// 何をしようとしているのか理解
thread_unlock(thread, &lock); thread_unlock(thread, &lock);
/* Ask Linux about ownership.. */ /* Ask Linux about ownership.. */
@ -5576,7 +5760,17 @@ SYSCALL_DECLARE(sched_setparam)
return -EFAULT; return -EFAULT;
} }
return setscheduler(thread, thread->sched_policy, &param); if (other_thread) {
thread = find_thread(pid, pid, &lock);
if (!thread) {
return -ESRCH;
}
}
retval = setscheduler(thread, thread->sched_policy, &param);
if (other_thread) {
thread_unlock(thread, &lock);
}
return retval;
} }
SYSCALL_DECLARE(sched_getparam) SYSCALL_DECLARE(sched_getparam)
@ -5856,7 +6050,7 @@ SYSCALL_DECLARE(sched_getaffinity)
int ret; int ret;
dkprintf("%s() len: %d, mask: %p\n", __FUNCTION__, len, u_cpu_set); dkprintf("%s() len: %d, mask: %p\n", __FUNCTION__, len, u_cpu_set);
if (!len) if (!len || u_cpu_set == (cpu_set_t *)-1)
return -EINVAL; return -EINVAL;
if ((len * BITS_PER_BYTE) < __CPU_SETSIZE) if ((len * BITS_PER_BYTE) < __CPU_SETSIZE)
@ -6279,7 +6473,23 @@ SYSCALL_DECLARE(nanosleep)
SYSCALL_DECLARE(sched_yield) SYSCALL_DECLARE(sched_yield)
{ {
struct cpu_local_var *v;
int do_schedule = 0;
long runq_irqstate;
runq_irqstate =
ihk_mc_spinlock_lock(&(get_this_cpu_local_var()->runq_lock));
v = get_this_cpu_local_var();
if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1) {
do_schedule = 1;
}
ihk_mc_spinlock_unlock(&v->runq_lock, runq_irqstate);
if (do_schedule) {
schedule(); schedule();
}
return 0; return 0;
} }
@ -7066,7 +7276,7 @@ SYSCALL_DECLARE(getcpu)
const uintptr_t cpup = ihk_mc_syscall_arg0(ctx); const uintptr_t cpup = ihk_mc_syscall_arg0(ctx);
const uintptr_t nodep = ihk_mc_syscall_arg1(ctx); const uintptr_t nodep = ihk_mc_syscall_arg1(ctx);
const int cpu = ihk_mc_get_processor_id(); const int cpu = ihk_mc_get_processor_id();
const int node = 0; const int node = ihk_mc_get_numa_id();
int error; int error;
if (cpup) { if (cpup) {
@ -7214,7 +7424,9 @@ SYSCALL_DECLARE(mbind)
} }
/* Verify NUMA mask */ /* Verify NUMA mask */
for_each_set_bit(bit, numa_mask, maxnode) { for_each_set_bit(bit, numa_mask,
maxnode < PROCESS_NUMA_MASK_BITS ?
maxnode : PROCESS_NUMA_MASK_BITS) {
if (bit >= ihk_mc_get_nr_numa_nodes()) { if (bit >= ihk_mc_get_nr_numa_nodes()) {
dkprintf("%s: %d is bigger than # of NUMA nodes\n", dkprintf("%s: %d is bigger than # of NUMA nodes\n",
__FUNCTION__, bit); __FUNCTION__, bit);
@ -7517,7 +7729,9 @@ SYSCALL_DECLARE(set_mempolicy)
/* Verify NUMA mask */ /* Verify NUMA mask */
valid_mask = 0; valid_mask = 0;
for_each_set_bit(bit, numa_mask, maxnode) { for_each_set_bit(bit, numa_mask,
maxnode < PROCESS_NUMA_MASK_BITS ?
maxnode : PROCESS_NUMA_MASK_BITS) {
if (bit >= ihk_mc_get_nr_numa_nodes()) { if (bit >= ihk_mc_get_nr_numa_nodes()) {
dkprintf("%s: %d is bigger than # of NUMA nodes\n", dkprintf("%s: %d is bigger than # of NUMA nodes\n",
__FUNCTION__, bit); __FUNCTION__, bit);
@ -7539,7 +7753,9 @@ SYSCALL_DECLARE(set_mempolicy)
} }
/* Update current mask by clearing non-requested nodes */ /* Update current mask by clearing non-requested nodes */
for_each_set_bit(bit, vm->numa_mask, maxnode) { for_each_set_bit(bit, vm->numa_mask,
maxnode < PROCESS_NUMA_MASK_BITS ?
maxnode : PROCESS_NUMA_MASK_BITS) {
if (!test_bit(bit, numa_mask)) { if (!test_bit(bit, numa_mask)) {
clear_bit(bit, vm->numa_mask); clear_bit(bit, vm->numa_mask);
} }
@ -8224,6 +8440,9 @@ set_cputime(int mode)
long syscall(int num, ihk_mc_user_context_t *ctx) long syscall(int num, ihk_mc_user_context_t *ctx)
{ {
long l; long l;
#ifdef TRACK_SYSCALLS
uint64_t t_s;
#endif // TRACK_SYSCALLS
set_cputime(1); set_cputime(1);
if(cpu_local_var(current)->proc->status == PS_EXITED && if(cpu_local_var(current)->proc->status == PS_EXITED &&
@ -8265,6 +8484,9 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
#endif #endif
dkprintf("\n"); dkprintf("\n");
#ifdef TRACK_SYSCALLS
t_s = rdtsc();
#endif // TRACK_SYSCALLS
if ((0 <= num) && (num < (sizeof(syscall_table) / sizeof(syscall_table[0]))) if ((0 <= num) && (num < (sizeof(syscall_table) / sizeof(syscall_table[0])))
&& (syscall_table[num] != NULL)) { && (syscall_table[num] != NULL)) {
@ -8281,8 +8503,31 @@ long syscall(int num, ihk_mc_user_context_t *ctx)
l = syscall_generic_forwarding(num, ctx); l = syscall_generic_forwarding(num, ctx);
} }
if (num != __NR_sched_yield &&
num != __NR_futex) {
check_signal(l, NULL, num); check_signal(l, NULL, num);
}
#ifdef TRACK_SYSCALLS
if (num < 300) {
if (!cpu_local_var(current)->syscall_cnts) {
alloc_syscall_counters(cpu_local_var(current));
}
if (cpu_local_var(current)->socc_enabled) {
cpu_local_var(current)->syscall_times[num] += (rdtsc() - t_s);
cpu_local_var(current)->syscall_cnts[num]++;
}
}
else {
if (num != 701)
kprintf("syscall > 300?? : %d\n", num);
}
#endif // TRACK_SYSCALLS
if (num != __NR_sched_yield &&
num != __NR_futex) {
check_need_resched(); check_need_resched();
}
if (cpu_local_var(current)->proc->ptrace) { if (cpu_local_var(current)->proc->ptrace) {
ptrace_syscall_exit(cpu_local_var(current)); ptrace_syscall_exit(cpu_local_var(current));

739
kernel/xpmem.c Normal file
View File

@ -0,0 +1,739 @@
/**
* \file xpmem.c
* License details are found in the file LICENSE.
* \brief
* Cross Partition Memory (XPMEM) support.
*/
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
* Copyright 2010, 2014 Cray Inc. All Rights Reserved
* Copyright 2015-2016 Los Alamos National Security, LLC. All rights reserved.
*/
/*
* HISTORY
*/
#include <errno.h>
#include <kmalloc.h>
#include <limits.h>
#include <memobj.h>
#include <mman.h>
#include <string.h>
#include <types.h>
#include <vsprintf.h>
#include <ihk/lock.h>
#include <ihk/mm.h>
#include <xpmem_private.h>
struct xpmem_partition *xpmem_my_part = NULL; /* pointer to this partition */
int xpmem_open(
ihk_mc_user_context_t *ctx)
{
const char *pathname = (const char *)ihk_mc_syscall_arg0(ctx);
int flags = (int)ihk_mc_syscall_arg1(ctx);
int ret;
struct thread *thread = cpu_local_var(current);
struct process *proc = thread->proc;
struct syscall_request request IHK_DMA_ALIGN;
int fd;
struct mckfd *mckfd;
long irqstate;
XPMEM_DEBUG("call: pathname=%s, flags=%d", pathname, flags);
if (!xpmem_my_part) {
ret = xpmem_init();
if (ret) {
return ret;
}
}
request.number = __NR_open;
request.args[0] = (unsigned long)pathname;
request.args[1] = flags;
fd = do_syscall(&request, ihk_mc_get_processor_id(), 0);
if(fd < 0){
XPMEM_DEBUG("__NR_open error: fd=%d", fd);
return fd;
}
ret = __xpmem_open();
if (ret) {
XPMEM_DEBUG("return: ret=%d", ret);
return ret;
}
mckfd = kmalloc(sizeof(struct mckfd), IHK_MC_AP_NOWAIT);
if(!mckfd) {
return -ENOMEM;
}
XPMEM_DEBUG("kmalloc(): mckfd=0x%p", mckfd);
memset(mckfd, 0, sizeof(struct mckfd));
mckfd->fd = fd;
mckfd->sig_no = -1;
mckfd->ioctl_cb = xpmem_ioctl;
mckfd->close_cb = xpmem_close;
irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock);
if(proc->mckfd == NULL) {
proc->mckfd = mckfd;
mckfd->next = NULL;
} else {
mckfd->next = proc->mckfd;
proc->mckfd = mckfd;
}
ihk_mc_spinlock_unlock(&proc->mckfd_lock, irqstate);
ihk_atomic_inc_return(&xpmem_my_part->n_opened);
XPMEM_DEBUG("return: ret=%d", mckfd->fd);
return mckfd->fd;
}
static int xpmem_ioctl(
struct mckfd *mckfd,
ihk_mc_user_context_t *ctx)
{
int ret;
unsigned int cmd = ihk_mc_syscall_arg1(ctx);
unsigned long arg = ihk_mc_syscall_arg2(ctx);
XPMEM_DEBUG("call: cmd=0x%x, arg=0x%lx", cmd, arg);
switch (cmd) {
case XPMEM_CMD_VERSION: {
ret = XPMEM_CURRENT_VERSION;
XPMEM_DEBUG("return: cmd=0x%x, ret=0x%lx", cmd, ret);
return ret;
}
case XPMEM_CMD_MAKE: {
struct xpmem_cmd_make make_info;
xpmem_segid_t segid = 0;
if (copy_from_user(&make_info, (void __user *)arg,
sizeof(struct xpmem_cmd_make)))
return -EFAULT;
ret = xpmem_make(make_info.vaddr, make_info.size,
make_info.permit_type,
(void *)make_info.permit_value, &segid);
if (ret != 0) {
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
if (copy_to_user(&((struct xpmem_cmd_make __user *)arg)->segid,
(void *)&segid, sizeof(xpmem_segid_t))) {
(void)xpmem_remove(segid);
return -EFAULT;
}
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
case XPMEM_CMD_REMOVE: {
struct xpmem_cmd_remove remove_info;
if (copy_from_user(&remove_info, (void __user *)arg,
sizeof(struct xpmem_cmd_remove)))
return -EFAULT;
ret = xpmem_remove(remove_info.segid);
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
case XPMEM_CMD_GET: {
struct xpmem_cmd_get get_info;
// xpmem_apid_t apid = 0;
if (copy_from_user(&get_info, (void __user *)arg,
sizeof(struct xpmem_cmd_get)))
return -EFAULT;
// ret = xpmem_get(get_info.segid, get_info.flags,
// get_info.permit_type,
// (void *)get_info.permit_value, &apid); // TODO
ret = -EINVAL;
if (ret != 0) {
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
// if (copy_to_user(&((struct xpmem_cmd_get __user *)arg)->apid,
// (void *)&apid, sizeof(xpmem_apid_t))) {
// (void)xpmem_release(apid);
// return -EFAULT;
// }
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
case XPMEM_CMD_RELEASE: {
struct xpmem_cmd_release release_info;
if (copy_from_user(&release_info, (void __user *)arg,
sizeof(struct xpmem_cmd_release)))
return -EFAULT;
// ret = xpmem_release(release_info.apid); // TODO
ret = -EINVAL;
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
case XPMEM_CMD_ATTACH: {
struct xpmem_cmd_attach attach_info;
// unsigned long at_vaddr = 0;
if (copy_from_user(&attach_info, (void __user *)arg,
sizeof(struct xpmem_cmd_attach)))
return -EFAULT;
// ret = xpmem_attach(mckfd, attach_info.apid, attach_info.offset,
// attach_info.size, attach_info.vaddr,
// attach_info.fd, attach_info.flags,
// &at_vaddr); // TODO
ret = -EINVAL;
if (ret != 0) {
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
// if (copy_to_user(
// &((struct xpmem_cmd_attach __user *)arg)->vaddr,
// (void *)&at_vaddr, sizeof(unsigned long))) {
// (void)xpmem_detach(at_vaddr);
// return -EFAULT;
// }
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
case XPMEM_CMD_DETACH: {
struct xpmem_cmd_detach detach_info;
if (copy_from_user(&detach_info, (void __user *)arg,
sizeof(struct xpmem_cmd_detach)))
return -EFAULT;
// ret = xpmem_detach(detach_info.vaddr); // TODO
ret = -EINVAL;
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
return ret;
}
default:
break;
}
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, -EINVAL);
return -EINVAL;
}
static int xpmem_close(
struct mckfd *mckfd,
ihk_mc_user_context_t *ctx)
{
struct xpmem_thread_group *tg;
int index;
struct mcs_rwlock_node_irqsave lock;
int n_opened;
XPMEM_DEBUG("call: fd=%d", mckfd->fd);
n_opened = ihk_atomic_dec_return(&xpmem_my_part->n_opened);
if (n_opened) {
XPMEM_DEBUG("return: ret=%d, n_opened=%d", 0, n_opened);
return 0;
}
XPMEM_DEBUG("n_opened=%d", n_opened);
index = xpmem_tg_hashtable_index(cpu_local_var(current)->proc->pid);
mcs_rwlock_writer_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
tg = xpmem_tg_ref_by_tgid_all_nolock(
cpu_local_var(current)->proc->pid);
if (!tg) {
mcs_rwlock_writer_unlock(
&xpmem_my_part->tg_hashtable[index].lock, &lock);
return 0;
}
list_del_init(&tg->tg_hashlist);
mcs_rwlock_writer_unlock(&xpmem_my_part->tg_hashtable[index].lock,
&lock);
XPMEM_DEBUG("tg->vm=0x%p", tg->vm);
xpmem_destroy_tg(tg);
if (!n_opened) {
xpmem_exit();
}
XPMEM_DEBUG("return: ret=%d", 0);
return 0;
}
static int xpmem_init(void)
{
int i;
XPMEM_DEBUG("call: ");
xpmem_my_part = kmalloc(sizeof(struct xpmem_partition) +
sizeof(struct xpmem_hashlist) * XPMEM_TG_HASHTABLE_SIZE,
IHK_MC_AP_NOWAIT);
if (xpmem_my_part == NULL) {
return -ENOMEM;
}
XPMEM_DEBUG("kmalloc(): xpmem_my_part=0x%p", xpmem_my_part);
memset(xpmem_my_part, 0, sizeof(struct xpmem_partition) +
sizeof(struct xpmem_hashlist) * XPMEM_TG_HASHTABLE_SIZE);
for (i = 0; i < XPMEM_TG_HASHTABLE_SIZE; i++) {
mcs_rwlock_init(&xpmem_my_part->tg_hashtable[i].lock);
INIT_LIST_HEAD(&xpmem_my_part->tg_hashtable[i].list);
}
ihk_atomic_set(&xpmem_my_part->n_opened, 0);
XPMEM_DEBUG("return: ret=%d", 0);
return 0;
}
static void xpmem_exit(void)
{
XPMEM_DEBUG("call: ");
if (xpmem_my_part) {
XPMEM_DEBUG("kfree(): 0x%p", xpmem_my_part);
kfree(xpmem_my_part);
xpmem_my_part = NULL;
}
XPMEM_DEBUG("return: ");
}
static int __xpmem_open(void)
{
struct xpmem_thread_group *tg;
int index;
struct mcs_rwlock_node_irqsave lock;
XPMEM_DEBUG("call: ");
tg = xpmem_tg_ref_by_tgid(cpu_local_var(current)->proc->pid);
if (!IS_ERR(tg)) {
xpmem_tg_deref(tg);
XPMEM_DEBUG("return: ret=%d, tg=0x%p", 0, tg);
return 0;
}
tg = kmalloc(sizeof(struct xpmem_thread_group) +
sizeof(struct xpmem_hashlist) * XPMEM_AP_HASHTABLE_SIZE,
IHK_MC_AP_NOWAIT);
if (tg == NULL) {
return -ENOMEM;
}
XPMEM_DEBUG("kmalloc(): tg=0x%p", tg);
memset(tg, 0, sizeof(struct xpmem_thread_group) +
sizeof(struct xpmem_hashlist) * XPMEM_AP_HASHTABLE_SIZE);
ihk_mc_spinlock_init(&tg->lock);
tg->tgid = cpu_local_var(current)->proc->pid;
tg->uid = cpu_local_var(current)->proc->ruid;
tg->gid = cpu_local_var(current)->proc->rgid;
ihk_atomic_set(&tg->uniq_segid, 0);
ihk_atomic_set(&tg->uniq_apid, 0);
mcs_rwlock_init(&tg->seg_list_lock);
INIT_LIST_HEAD(&tg->seg_list);
ihk_atomic_set(&tg->n_pinned, 0);
INIT_LIST_HEAD(&tg->tg_hashlist);
tg->vm = cpu_local_var(current)->vm;
ihk_atomic_set(&tg->n_recall_PFNs, 0);
for (index = 0; index < XPMEM_AP_HASHTABLE_SIZE; index++) {
mcs_rwlock_init(&tg->ap_hashtable[index].lock);
INIT_LIST_HEAD(&tg->ap_hashtable[index].list);
}
xpmem_tg_not_destroyable(tg);
index = xpmem_tg_hashtable_index(tg->tgid);
mcs_rwlock_writer_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
list_add_tail(&tg->tg_hashlist,
&xpmem_my_part->tg_hashtable[index].list);
mcs_rwlock_writer_unlock(&xpmem_my_part->tg_hashtable[index].lock,
&lock);
tg->group_leader = cpu_local_var(current);
XPMEM_DEBUG("return: ret=%d", 0);
return 0;
}
static void xpmem_destroy_tg(
struct xpmem_thread_group *tg)
{
XPMEM_DEBUG("call: tg=0x%p", tg);
XPMEM_DEBUG("tg->vm=0x%p", tg->vm);
xpmem_tg_destroyable(tg);
xpmem_tg_deref(tg);
XPMEM_DEBUG("return: ");
}
static int xpmem_make(
unsigned long vaddr,
size_t size,
int permit_type,
void *permit_value,
xpmem_segid_t *segid_p)
{
xpmem_segid_t segid;
struct xpmem_thread_group *seg_tg;
struct xpmem_segment *seg;
struct mcs_rwlock_node_irqsave lock;
XPMEM_DEBUG("call: vaddr=0x%lx, size=%lu, permit_type=%d, "
"permit_value=0%04lo",
vaddr, size, permit_type,
(unsigned long)(uintptr_t)permit_value);
if (permit_type != XPMEM_PERMIT_MODE ||
((unsigned long)(uintptr_t)permit_value & ~00777) ||
size == 0) {
XPMEM_DEBUG("return: ret=%d", -EINVAL);
return -EINVAL;
}
seg_tg = xpmem_tg_ref_by_tgid(cpu_local_var(current)->proc->pid);
if (IS_ERR(seg_tg)) {
DBUG_ON(PTR_ERR(seg_tg) != -ENOENT);
return -XPMEM_ERRNO_NOPROC;
}
/*
* The start of the segment must be page aligned and it must be a
* multiple of pages in size.
*/
if (offset_in_page(vaddr) != 0 || offset_in_page(size) != 0) {
xpmem_tg_deref(seg_tg);
XPMEM_DEBUG("return: ret=%d", -EINVAL);
return -EINVAL;
}
segid = xpmem_make_segid(seg_tg);
if (segid < 0) {
xpmem_tg_deref(seg_tg);
return segid;
}
/* create a new struct xpmem_segment structure with a unique segid */
seg = kmalloc(sizeof(struct xpmem_segment), IHK_MC_AP_NOWAIT);
if (seg == NULL) {
xpmem_tg_deref(seg_tg);
return -ENOMEM;
}
XPMEM_DEBUG("kmalloc(): seg=0x%p", seg);
memset(seg, 0, sizeof(struct xpmem_segment));
ihk_mc_spinlock_init(&seg->lock);
mcs_rwlock_init(&seg->seg_lock);
seg->segid = segid;
seg->vaddr = vaddr;
seg->size = size;
seg->permit_type = permit_type;
seg->permit_value = permit_value;
seg->tg = seg_tg;
INIT_LIST_HEAD(&seg->ap_list);
INIT_LIST_HEAD(&seg->seg_list);
xpmem_seg_not_destroyable(seg);
/* add seg to its tg's list of segs */
mcs_rwlock_writer_lock(&seg_tg->seg_list_lock, &lock);
list_add_tail(&seg->seg_list, &seg_tg->seg_list);
mcs_rwlock_writer_unlock(&seg_tg->seg_list_lock, &lock);
xpmem_tg_deref(seg_tg);
*segid_p = segid;
XPMEM_DEBUG("return: ret=%d, segid=0x%lx", 0, *segid_p);
return 0;
}
static xpmem_segid_t xpmem_make_segid(
struct xpmem_thread_group *seg_tg)
{
struct xpmem_id segid;
xpmem_segid_t *segid_p = (xpmem_segid_t *)&segid;
int uniq;
XPMEM_DEBUG("call: seg_tg=0x%p, uniq_segid=%d",
seg_tg, ihk_atomic_read(&seg_tg->uniq_segid));
DBUG_ON(sizeof(struct xpmem_id) != sizeof(xpmem_segid_t));
uniq = ihk_atomic_inc_return(&seg_tg->uniq_segid);
if (uniq > XPMEM_MAX_UNIQ_ID) {
ihk_atomic_dec(&seg_tg->uniq_segid);
return -EBUSY;
}
*segid_p = 0;
segid.tgid = seg_tg->tgid;
segid.uniq = (unsigned long)uniq;
DBUG_ON(*segid_p <= 0);
XPMEM_DEBUG("return: segid=0x%lx, segid.tgid=%d, segid.uniq=%d",
segid, segid.tgid, segid.uniq);
return *segid_p;
}
static int xpmem_remove(
xpmem_segid_t segid)
{
struct xpmem_thread_group *seg_tg;
struct xpmem_segment *seg;
XPMEM_DEBUG("call: segid=0x%lx", segid);
if (segid <= 0) {
XPMEM_DEBUG("return: ret=%d", -EINVAL);
return -EINVAL;
}
seg_tg = xpmem_tg_ref_by_segid(segid);
if (IS_ERR(seg_tg))
return PTR_ERR(seg_tg);
if (cpu_local_var(current)->proc->pid != seg_tg->tgid) {
xpmem_tg_deref(seg_tg);
XPMEM_DEBUG("return: ret=%d", -EACCES);
return -EACCES;
}
seg = xpmem_seg_ref_by_segid(seg_tg, segid);
if (IS_ERR(seg)) {
xpmem_tg_deref(seg_tg);
return PTR_ERR(seg);
}
DBUG_ON(seg->tg != seg_tg);
xpmem_remove_seg(seg_tg, seg);
xpmem_seg_deref(seg);
xpmem_tg_deref(seg_tg);
XPMEM_DEBUG("return: ret=%d", 0);
return 0;
}
static void xpmem_remove_seg(
struct xpmem_thread_group *seg_tg,
struct xpmem_segment *seg)
{
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
struct mcs_rwlock_node_irqsave seg_lock;
struct mcs_rwlock_node_irqsave lock;
XPMEM_DEBUG("call: tgid=%d, segid=0x%lx", seg_tg->tgid, seg->segid);
ihk_mc_spinlock_lock(&seg->lock);
if (seg->flags & XPMEM_FLAG_DESTROYING) {
ihk_mc_spinlock_unlock_noirq(&seg->lock);
schedule();
return;
}
seg->flags |= XPMEM_FLAG_DESTROYING;
ihk_mc_spinlock_unlock_noirq(&seg->lock);
mcs_rwlock_writer_lock(&seg->seg_lock, &seg_lock);
/* unpin pages and clear PTEs for each attachment to this segment */
xpmem_clear_PTEs(seg);
/* indicate that the segment has been destroyed */
ihk_mc_spinlock_lock(&seg->lock);
seg->flags |= XPMEM_FLAG_DESTROYED;
ihk_mc_spinlock_unlock_noirq(&seg->lock);
/* Remove segment structure from its tg's list of segs */
mcs_rwlock_writer_lock(&seg_tg->seg_list_lock, &lock);
list_del_init(&seg->seg_list);
mcs_rwlock_writer_unlock(&seg_tg->seg_list_lock, &lock);
mcs_rwlock_writer_unlock(&seg->seg_lock, &seg_lock);
xpmem_seg_destroyable(seg);
XPMEM_DEBUG("return: ");
}
static void xpmem_clear_PTEs(
struct xpmem_segment *seg)
{
XPMEM_DEBUG("call: seg=0x%p", seg);
// xpmem_clear_PTEs_range(seg, seg->vaddr, seg->vaddr + seg->size, 0); // TODO
XPMEM_DEBUG("return: ");
}
static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal(
pid_t tgid,
int index,
int return_destroying)
{
struct xpmem_thread_group *tg;
XPMEM_DEBUG("call: tgid=%d, index=%d, return_destroying=%d",
tgid, index, return_destroying);
list_for_each_entry(tg, &xpmem_my_part->tg_hashtable[index].list,
tg_hashlist) {
if (tg->tgid == tgid) {
if ((tg->flags & XPMEM_FLAG_DESTROYING) &&
!return_destroying) {
continue;
}
xpmem_tg_ref(tg);
XPMEM_DEBUG("return: tg=0x%p", tg);
return tg;
}
}
XPMEM_DEBUG("return: tg=0x%p", ERR_PTR(-ENOENT));
return ERR_PTR(-ENOENT);
}
static struct xpmem_thread_group * xpmem_tg_ref_by_segid(
xpmem_segid_t segid)
{
struct xpmem_thread_group *tg;
XPMEM_DEBUG("call: segid=0x%lx", segid);
tg = xpmem_tg_ref_by_tgid(xpmem_segid_to_tgid(segid));
XPMEM_DEBUG("return: tg=0x%p", tg);
return tg;
}
static void xpmem_tg_deref(
struct xpmem_thread_group *tg)
{
XPMEM_DEBUG("call: tg=0x%p", tg);
DBUG_ON(ihk_atomic_read(&tg->refcnt) <= 0);
if (ihk_atomic_dec_return(&tg->refcnt) != 0) {
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
return;
}
XPMEM_DEBUG("kfree(): tg=0x%p", tg);
kfree(tg);
XPMEM_DEBUG("return: ");
}
static struct xpmem_segment * xpmem_seg_ref_by_segid(
struct xpmem_thread_group *seg_tg,
xpmem_segid_t segid)
{
struct xpmem_segment *seg;
struct mcs_rwlock_node_irqsave lock;
XPMEM_DEBUG("call: seg_tg=0x%p, segid=0x%lx", seg_tg, segid);
mcs_rwlock_reader_lock(&seg_tg->seg_list_lock, &lock);
list_for_each_entry(seg, &seg_tg->seg_list, seg_list) {
if (seg->segid == segid) {
if (seg->flags & XPMEM_FLAG_DESTROYING)
continue;
xpmem_seg_ref(seg);
mcs_rwlock_reader_unlock(&seg_tg->seg_list_lock, &lock);
return seg;
}
}
mcs_rwlock_reader_unlock(&seg_tg->seg_list_lock, &lock);
return ERR_PTR(-ENOENT);
}
static void xpmem_seg_deref(
struct xpmem_segment *seg)
{
XPMEM_DEBUG("call: seg=0x%p", seg);
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
if (ihk_atomic_dec_return(&seg->refcnt) != 0) {
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
return;
}
DBUG_ON(!(seg->flags & XPMEM_FLAG_DESTROYING));
XPMEM_DEBUG("kfree(): seg=0x%p", seg);
kfree(seg);
XPMEM_DEBUG("return: ");
}

View File

@ -102,6 +102,7 @@ static int alloc_zeroobj(void)
memset(obj, 0, sizeof(*obj)); memset(obj, 0, sizeof(*obj));
obj->memobj.ops = &zeroobj_ops; obj->memobj.ops = &zeroobj_ops;
obj->memobj.size = 0;
page_list_init(obj); page_list_init(obj);
ihk_mc_spinlock_init(&obj->memobj.lock); ihk_mc_spinlock_init(&obj->memobj.lock);

View File

@ -49,6 +49,7 @@ struct ihk_mc_cpu_info {
int ncpus; int ncpus;
int *hw_ids; int *hw_ids;
int *nodes; int *nodes;
int *linux_cpu_ids;
}; };
struct ihk_mc_cpu_info *ihk_mc_get_cpu_info(void); struct ihk_mc_cpu_info *ihk_mc_get_cpu_info(void);
@ -56,6 +57,9 @@ void ihk_mc_boot_cpu(int cpuid, unsigned long pc);
int ihk_mc_get_processor_id(void); int ihk_mc_get_processor_id(void);
int ihk_mc_get_hardware_processor_id(void); int ihk_mc_get_hardware_processor_id(void);
int ihk_mc_get_numa_id(void); int ihk_mc_get_numa_id(void);
int ihk_mc_get_nr_cores();
int ihk_mc_get_core(int id, unsigned long *linux_core_id, unsigned long *apic_id,
int *numa_id);
void ihk_mc_delay_us(int us); void ihk_mc_delay_us(int us);
void ihk_mc_set_syscall_handler(long (*handler)(int, ihk_mc_user_context_t *)); void ihk_mc_set_syscall_handler(long (*handler)(int, ihk_mc_user_context_t *));

View File

@ -185,6 +185,9 @@ int ihk_mc_get_memory_chunk(int id,
void remote_flush_tlb_cpumask(struct process_vm *vm, void remote_flush_tlb_cpumask(struct process_vm *vm,
unsigned long addr, int cpu_id); unsigned long addr, int cpu_id);
int ihk_set_kmsg(unsigned long addr, unsigned long size);
char *ihk_get_kargs();
extern void (*__tlb_flush_handler)(int vector); extern void (*__tlb_flush_handler)(int vector);
struct tlb_flush_entry { struct tlb_flush_entry {

View File

@ -17,11 +17,17 @@
#include <list.h> #include <list.h>
/* XXX: Physical memory management shouldn't be part of IHK */ /* XXX: Physical memory management shouldn't be part of IHK */
struct node_distance {
int id;
int distance;
};
struct ihk_mc_numa_node { struct ihk_mc_numa_node {
int id; int id;
int linux_numa_id; int linux_numa_id;
int type; int type;
struct list_head allocators; struct list_head allocators;
struct node_distance *nodes_by_distance;
}; };
struct ihk_page_allocator_desc { struct ihk_page_allocator_desc {
@ -30,7 +36,7 @@ struct ihk_page_allocator_desc {
unsigned int count; unsigned int count;
unsigned int flag; unsigned int flag;
unsigned int shift; unsigned int shift;
ihk_spinlock_t lock; mcs_lock_node_t lock;
struct list_head list; struct list_head list;
unsigned long map[0]; unsigned long map[0];

153
lib/include/mc_xpmem.h Normal file
View File

@ -0,0 +1,153 @@
/**
* \file mc_xpmem.h
* License details are found in the file LICENSE.
* \brief
* Cross Partition Memory (XPMEM) structures and macros.
*/
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* HISTORY
*/
#ifndef _MC_XPMEM_H
#define _MC_XPMEM_H
#ifndef __KERNEL__
#include <sys/types.h>
#endif
/*
* _IOC definitions for McKernel
*/
#define _IOC_NRBITS 8
#define _IOC_TYPEBITS 8
#define _IOC_SIZEBITS 14
#define _IOC_DIRBITS 2
#define _IOC_NRSHIFT 0
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
#define _IOC_NONE 0U
#define _IOC(dir,type,nr,size) \
(((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT))
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
/*
* basic argument type definitions for McKernel
*/
typedef uint64_t u64;
typedef uint64_t __u64;
typedef int64_t __s64;
/*
* basic argument type definitions
*/
typedef __s64 xpmem_segid_t; /* segid returned from xpmem_make() */
typedef __s64 xpmem_apid_t; /* apid returned from xpmem_get() */
struct xpmem_addr {
xpmem_apid_t apid; /* apid that represents memory */
off_t offset; /* offset into apid's memory */
};
#define XPMEM_MAXADDR_SIZE (size_t)(-1L)
/*
* path to XPMEM device
*/
#define XPMEM_DEV_PATH "/dev/xpmem"
/*
* The following are the possible XPMEM related errors.
*/
#define XPMEM_ERRNO_NOPROC 2004 /* unknown thread due to fork() */
/*
* flags for segment permissions
*/
#define XPMEM_RDONLY 0x1
#define XPMEM_RDWR 0x2
/*
* Valid permit_type values for xpmem_make().
*/
#define XPMEM_PERMIT_MODE 0x1
/*
* ioctl() commands used to interface to the kernel module.
*/
#define XPMEM_IOC_MAGIC 'x'
#define XPMEM_CMD_VERSION _IO(XPMEM_IOC_MAGIC, 0)
#define XPMEM_CMD_MAKE _IO(XPMEM_IOC_MAGIC, 1)
#define XPMEM_CMD_REMOVE _IO(XPMEM_IOC_MAGIC, 2)
#define XPMEM_CMD_GET _IO(XPMEM_IOC_MAGIC, 3)
#define XPMEM_CMD_RELEASE _IO(XPMEM_IOC_MAGIC, 4)
#define XPMEM_CMD_ATTACH _IO(XPMEM_IOC_MAGIC, 5)
#define XPMEM_CMD_DETACH _IO(XPMEM_IOC_MAGIC, 6)
/*
* Structures used with the preceding ioctl() commands to pass data.
*/
struct xpmem_cmd_make {
__u64 vaddr;
size_t size;
int permit_type;
__u64 permit_value;
xpmem_segid_t segid; /* returned on success */
};
struct xpmem_cmd_remove {
xpmem_segid_t segid;
};
struct xpmem_cmd_get {
xpmem_segid_t segid;
int flags;
int permit_type;
__u64 permit_value;
xpmem_apid_t apid; /* returned on success */
};
struct xpmem_cmd_release {
xpmem_apid_t apid;
};
struct xpmem_cmd_attach {
xpmem_apid_t apid;
off_t offset;
size_t size;
__u64 vaddr;
int fd;
int flags;
};
struct xpmem_cmd_detach {
__u64 vaddr;
};
#ifndef __KERNEL__
extern int xpmem_version(void);
extern xpmem_segid_t xpmem_make(void *, size_t, int, void *);
extern int xpmem_remove(xpmem_segid_t);
extern xpmem_apid_t xpmem_get(xpmem_segid_t, int, int, void *);
extern int xpmem_release(xpmem_apid_t);
extern void *xpmem_attach(struct xpmem_addr, size_t, void *);
extern int xpmem_detach(void *);
#endif
#endif /* _MC_XPMEM_H */

View File

@ -73,7 +73,7 @@ void *__ihk_pagealloc_init(unsigned long start, unsigned long size,
//kprintf("page allocator @ %lx - %lx (%d)\n", start, start + size, //kprintf("page allocator @ %lx - %lx (%d)\n", start, start + size,
// page_shift); // page_shift);
ihk_mc_spinlock_init(&desc->lock); mcs_lock_init(&desc->lock);
/* Reserve align padding area */ /* Reserve align padding area */
for (i = mapsize; i < mapaligned * 8; i++) { for (i = mapsize; i < mapaligned * 8; i++) {
@ -99,12 +99,12 @@ void ihk_pagealloc_destroy(void *__desc)
static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc, static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
int npages, int p2align) int npages, int p2align)
{ {
unsigned long flags;
unsigned int i, j, mi; unsigned int i, j, mi;
int nblocks; int nblocks;
int nfrags; int nfrags;
unsigned long mask; unsigned long mask;
int mialign; unsigned long align_mask = ((PAGE_SIZE << p2align) - 1);
mcs_lock_node_t node;
nblocks = (npages / 64); nblocks = (npages / 64);
mask = -1; mask = -1;
@ -113,14 +113,13 @@ static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
++nblocks; ++nblocks;
mask = (1UL << nfrags) - 1; mask = (1UL << nfrags) - 1;
} }
mialign = (p2align <= 6)? 1: (1 << (p2align - 6));
flags = ihk_mc_spinlock_lock(&desc->lock); mcs_lock_lock(&desc->lock, &node);
for (i = 0, mi = desc->last; i < desc->count; i++, mi++) { for (i = 0, mi = desc->last; i < desc->count; i++, mi++) {
if (mi >= desc->count) { if (mi >= desc->count) {
mi = 0; mi = 0;
} }
if ((mi + nblocks >= desc->count) || (mi % mialign)) { if ((mi + nblocks >= desc->count) || (ADDRESS(desc, mi, 0) & align_mask)) {
continue; continue;
} }
for (j = mi; j < mi + nblocks - 1; j++) { for (j = mi; j < mi + nblocks - 1; j++) {
@ -133,11 +132,11 @@ static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
desc->map[j] = (unsigned long)-1; desc->map[j] = (unsigned long)-1;
} }
desc->map[j] |= mask; desc->map[j] |= mask;
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
return ADDRESS(desc, mi, 0); return ADDRESS(desc, mi, 0);
} }
} }
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
return 0; return 0;
} }
@ -147,8 +146,9 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
struct ihk_page_allocator_desc *desc = __desc; struct ihk_page_allocator_desc *desc = __desc;
unsigned int i, mi; unsigned int i, mi;
int j; int j;
unsigned long v, mask, flags; unsigned long v, mask;
int jalign; int jalign;
mcs_lock_node_t node;
if ((npages >= 32) || (p2align >= 5)) { if ((npages >= 32) || (p2align >= 5)) {
return __ihk_pagealloc_large(desc, npages, p2align); return __ihk_pagealloc_large(desc, npages, p2align);
@ -157,7 +157,7 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
mask = (1UL << npages) - 1; mask = (1UL << npages) - 1;
jalign = (p2align <= 0)? 1: (1 << p2align); jalign = (p2align <= 0)? 1: (1 << p2align);
flags = ihk_mc_spinlock_lock(&desc->lock); mcs_lock_lock(&desc->lock, &node);
for (i = 0, mi = desc->last; i < desc->count; i++, mi++) { for (i = 0, mi = desc->last; i < desc->count; i++, mi++) {
if (mi >= desc->count) { if (mi >= desc->count) {
mi = 0; mi = 0;
@ -174,12 +174,12 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
if (!(v & (mask << j))) { /* free */ if (!(v & (mask << j))) { /* free */
desc->map[mi] |= (mask << j); desc->map[mi] |= (mask << j);
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
return ADDRESS(desc, mi, j); return ADDRESS(desc, mi, j);
} }
} }
} }
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
/* We use null pointer for failure */ /* We use null pointer for failure */
return 0; return 0;
@ -189,7 +189,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
{ {
int i, n; int i, n;
struct ihk_page_allocator_desc *desc = __desc; struct ihk_page_allocator_desc *desc = __desc;
unsigned long flags; mcs_lock_node_t node;
n = (end + (1 << desc->shift) - 1 - desc->start) >> desc->shift; n = (end + (1 << desc->shift) - 1 - desc->start) >> desc->shift;
i = ((start - desc->start) >> desc->shift); i = ((start - desc->start) >> desc->shift);
@ -197,7 +197,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
return; return;
} }
flags = ihk_mc_spinlock_lock(&desc->lock); mcs_lock_lock(&desc->lock, &node);
for (; i < n; i++) { for (; i < n; i++) {
if (!(i & 63) && i + 63 < n) { if (!(i & 63) && i + 63 < n) {
desc->map[MAP_INDEX(i)] = (unsigned long)-1L; desc->map[MAP_INDEX(i)] = (unsigned long)-1L;
@ -206,7 +206,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
desc->map[MAP_INDEX(i)] |= (1UL << MAP_BIT(i)); desc->map[MAP_INDEX(i)] |= (1UL << MAP_BIT(i));
} }
} }
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
} }
void ihk_pagealloc_free(void *__desc, unsigned long address, int npages) void ihk_pagealloc_free(void *__desc, unsigned long address, int npages)
@ -214,24 +214,24 @@ void ihk_pagealloc_free(void *__desc, unsigned long address, int npages)
struct ihk_page_allocator_desc *desc = __desc; struct ihk_page_allocator_desc *desc = __desc;
int i; int i;
unsigned mi; unsigned mi;
unsigned long flags; mcs_lock_node_t node;
/* XXX: Parameter check */ /* XXX: Parameter check */
flags = ihk_mc_spinlock_lock(&desc->lock); mcs_lock_lock(&desc->lock, &node);
mi = (address - desc->start) >> desc->shift; mi = (address - desc->start) >> desc->shift;
for (i = 0; i < npages; i++, mi++) { for (i = 0; i < npages; i++, mi++) {
desc->map[MAP_INDEX(mi)] &= ~(1UL << MAP_BIT(mi)); desc->map[MAP_INDEX(mi)] &= ~(1UL << MAP_BIT(mi));
} }
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
} }
unsigned long ihk_pagealloc_count(void *__desc) unsigned long ihk_pagealloc_count(void *__desc)
{ {
struct ihk_page_allocator_desc *desc = __desc; struct ihk_page_allocator_desc *desc = __desc;
unsigned long i, j, n = 0; unsigned long i, j, n = 0;
unsigned long flags; mcs_lock_node_t node;
flags = ihk_mc_spinlock_lock(&desc->lock); mcs_lock_lock(&desc->lock, &node);
/* XXX: Very silly counting */ /* XXX: Very silly counting */
for (i = 0; i < desc->count; i++) { for (i = 0; i < desc->count; i++) {
for (j = 0; j < 64; j++) { for (j = 0; j < 64; j++) {
@ -240,7 +240,7 @@ unsigned long ihk_pagealloc_count(void *__desc)
} }
} }
} }
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
return n; return n;
} }
@ -250,10 +250,11 @@ int ihk_pagealloc_query_free(void *__desc)
struct ihk_page_allocator_desc *desc = __desc; struct ihk_page_allocator_desc *desc = __desc;
unsigned int mi; unsigned int mi;
int j; int j;
unsigned long v, flags; unsigned long v;
int npages = 0; int npages = 0;
mcs_lock_node_t node;
flags = ihk_mc_spinlock_lock(&desc->lock); mcs_lock_lock(&desc->lock, &node);
for (mi = 0; mi < desc->count; mi++) { for (mi = 0; mi < desc->count; mi++) {
v = desc->map[mi]; v = desc->map[mi];
@ -266,7 +267,7 @@ int ihk_pagealloc_query_free(void *__desc)
} }
} }
} }
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
return npages; return npages;
} }
@ -276,11 +277,12 @@ void __ihk_pagealloc_zero_free_pages(void *__desc)
struct ihk_page_allocator_desc *desc = __desc; struct ihk_page_allocator_desc *desc = __desc;
unsigned int mi; unsigned int mi;
int j; int j;
unsigned long v, flags; unsigned long v;
mcs_lock_node_t node;
kprintf("zeroing free memory... "); kprintf("zeroing free memory... ");
flags = ihk_mc_spinlock_lock(&desc->lock); mcs_lock_lock(&desc->lock, &node);
for (mi = 0; mi < desc->count; mi++) { for (mi = 0; mi < desc->count; mi++) {
v = desc->map[mi]; v = desc->map[mi];
@ -294,7 +296,7 @@ kprintf("zeroing free memory... ");
} }
} }
} }
ihk_mc_spinlock_unlock(&desc->lock, flags); mcs_lock_unlock(&desc->lock, &node);
kprintf("\nzeroing done\n"); kprintf("\nzeroing done\n");
} }