Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| f81927b85b | |||
| 701cdcdab1 | |||
| 9635a628a9 | |||
| 3e1b16f3fc | |||
| ff37ff9ccf | |||
| 5b7bcb7170 | |||
| 6a5fe90f98 | |||
| 91373337ba | |||
| 56ed726a88 | |||
| bce10e11e4 | |||
| 91cdb16158 | |||
| c58ab0f648 | |||
| f410af1cfc | |||
| aa15e5eea8 | |||
| df9f1f8f78 | |||
| 7ace35d737 | |||
| 551999ff6b | |||
| 052b3f44ca | |||
| fdcf766337 | |||
| 7d13bfb14e | |||
| 202bfd9955 | |||
| c99e36235b | |||
| 3cecafac59 | |||
| 61fc4c5e55 | |||
| fad73cacc1 | |||
| 8fced29978 | |||
| b0f4ae4890 | |||
| 7070094a31 | |||
| 011185e3f7 | |||
| 461881e46a |
@ -844,6 +844,25 @@ void set_signal(int sig, void *regs, struct siginfo *info);
|
||||
void check_signal(unsigned long, void *, int);
|
||||
extern void tlb_flush_handler(int vector);
|
||||
|
||||
void __show_stack(uintptr_t *sp) {
|
||||
while (((uintptr_t)sp >= 0xffff800000000000)
|
||||
&& ((uintptr_t)sp < 0xffffffff80000000)) {
|
||||
uintptr_t fp;
|
||||
uintptr_t ip;
|
||||
|
||||
fp = sp[0];
|
||||
ip = sp[1];
|
||||
kprintf("IP: %016lx, SP: %016lx, FP: %016lx\n", ip, (uintptr_t)sp, fp);
|
||||
sp = (void *)fp;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void show_context_stack(uintptr_t *rbp) {
|
||||
__show_stack(rbp);
|
||||
return;
|
||||
}
|
||||
|
||||
void handle_interrupt(int vector, struct x86_user_context *regs)
|
||||
{
|
||||
struct ihk_mc_interrupt_handler *h;
|
||||
@ -952,6 +971,9 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
|
||||
|
||||
tlb_flush_handler(vector);
|
||||
}
|
||||
else if (vector == 133) {
|
||||
show_context_stack((uintptr_t *)regs->gpr.rbp);
|
||||
}
|
||||
else {
|
||||
list_for_each_entry(h, &handlers[vector - 32], list) {
|
||||
if (h->func) {
|
||||
|
||||
@ -131,6 +131,7 @@ static void __ihk_mc_spinlock_unlock(ihk_spinlock_t *lock, unsigned long flags)
|
||||
typedef struct mcs_lock_node {
|
||||
unsigned long locked;
|
||||
struct mcs_lock_node *next;
|
||||
unsigned long irqsave;
|
||||
} __attribute__((aligned(64))) mcs_lock_node_t;
|
||||
|
||||
static void mcs_lock_init(struct mcs_lock_node *node)
|
||||
@ -139,7 +140,7 @@ static void mcs_lock_init(struct mcs_lock_node *node)
|
||||
node->next = NULL;
|
||||
}
|
||||
|
||||
static void mcs_lock_lock(struct mcs_lock_node *lock,
|
||||
static void __mcs_lock_lock(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
struct mcs_lock_node *pred;
|
||||
@ -158,7 +159,7 @@ static void mcs_lock_lock(struct mcs_lock_node *lock,
|
||||
}
|
||||
}
|
||||
|
||||
static void mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
static void __mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
if (node->next == NULL) {
|
||||
@ -178,6 +179,35 @@ static void mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
node->next->locked = 0;
|
||||
}
|
||||
|
||||
static void mcs_lock_lock_noirq(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
preempt_disable();
|
||||
__mcs_lock_lock(lock, node);
|
||||
}
|
||||
|
||||
static void mcs_lock_unlock_noirq(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
__mcs_lock_unlock(lock, node);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void mcs_lock_lock(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
node->irqsave = cpu_disable_interrupt_save();
|
||||
mcs_lock_lock_noirq(lock, node);
|
||||
}
|
||||
|
||||
static void mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
mcs_lock_unlock_noirq(lock, node);
|
||||
cpu_restore_interrupt(node->irqsave);
|
||||
}
|
||||
|
||||
|
||||
// reader/writer lock
|
||||
typedef struct mcs_rwlock_node {
|
||||
ihk_atomic_t count; // num of readers (use only common reader)
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
|
||||
SYSCALL_HANDLED(0, read)
|
||||
SYSCALL_DELEGATED(1, write)
|
||||
SYSCALL_DELEGATED(2, open)
|
||||
SYSCALL_HANDLED(2, open)
|
||||
SYSCALL_HANDLED(3, close)
|
||||
SYSCALL_DELEGATED(4, stat)
|
||||
SYSCALL_DELEGATED(5, fstat)
|
||||
|
||||
@ -558,28 +558,34 @@ int ihk_mc_pt_print_pte(struct page_table *pt, void *virt)
|
||||
|
||||
GET_VIRT_INDICES(v, l4idx, l3idx, l2idx, l1idx);
|
||||
|
||||
__kprintf("l4 table: 0x%lX l4idx: %d \n", virt_to_phys(pt), l4idx);
|
||||
if (!(pt->entry[l4idx] & PFL4_PRESENT)) {
|
||||
__kprintf("0x%lX l4idx not present! \n", (unsigned long)virt);
|
||||
__kprintf("l4 entry: 0x%lX\n", pt->entry[l4idx]);
|
||||
return -EFAULT;
|
||||
}
|
||||
__kprintf("l4 entry: 0x%lX\n", pt->entry[l4idx]);
|
||||
pt = phys_to_virt(pt->entry[l4idx] & PAGE_MASK);
|
||||
|
||||
__kprintf("l3 table: 0x%lX l3idx: %d \n", virt_to_phys(pt), l3idx);
|
||||
if (!(pt->entry[l3idx] & PFL3_PRESENT)) {
|
||||
__kprintf("0x%lX l3idx not present! \n", (unsigned long)virt);
|
||||
__kprintf("l3 entry: 0x%lX\n", pt->entry[l3idx]);
|
||||
return -EFAULT;
|
||||
}
|
||||
__kprintf("l3 entry: 0x%lX\n", pt->entry[l3idx]);
|
||||
if ((pt->entry[l3idx] & PFL3_SIZE)) {
|
||||
__kprintf("l3 entry is 1G page\n");
|
||||
return 0;
|
||||
}
|
||||
pt = phys_to_virt(pt->entry[l3idx] & PAGE_MASK);
|
||||
|
||||
__kprintf("l2 table: 0x%lX l2idx: %d \n", virt_to_phys(pt), l2idx);
|
||||
if (!(pt->entry[l2idx] & PFL2_PRESENT)) {
|
||||
__kprintf("0x%lX l2idx not present! \n", (unsigned long)virt);
|
||||
__kprintf("l2 entry: 0x%lX\n", pt->entry[l2idx]);
|
||||
return -EFAULT;
|
||||
}
|
||||
__kprintf("l2 entry: 0x%lX\n", pt->entry[l2idx]);
|
||||
if ((pt->entry[l2idx] & PFL2_SIZE)) {
|
||||
__kprintf("l2 entry is 2M page\n");
|
||||
return 0;
|
||||
}
|
||||
pt = phys_to_virt(pt->entry[l2idx] & PAGE_MASK);
|
||||
@ -1773,9 +1779,19 @@ int ihk_mc_pt_set_pte(page_table_t pt, pte_t *ptep, size_t pgsize,
|
||||
*ptep = phys | attr_to_l1attr(attr);
|
||||
}
|
||||
else if (pgsize == PTL2_SIZE) {
|
||||
if (phys & (PTL2_SIZE - 1)) {
|
||||
kprintf("%s: error: phys needs to be PTL2_SIZE aligned\n", __FUNCTION__);
|
||||
error = -1;
|
||||
goto out;
|
||||
}
|
||||
*ptep = phys | attr_to_l2attr(attr | PTATTR_LARGEPAGE);
|
||||
}
|
||||
else if ((pgsize == PTL3_SIZE) && (use_1gb_page)) {
|
||||
if (phys & (PTL3_SIZE - 1)) {
|
||||
kprintf("%s: error: phys needs to be PTL3_SIZE aligned\n", __FUNCTION__);
|
||||
error = -1;
|
||||
goto out;
|
||||
}
|
||||
*ptep = phys | attr_to_l3attr(attr | PTATTR_LARGEPAGE);
|
||||
}
|
||||
else {
|
||||
|
||||
@ -70,71 +70,37 @@ static struct vdso vdso;
|
||||
static size_t container_size = 0;
|
||||
static ptrdiff_t vdso_offset;
|
||||
|
||||
/*
|
||||
See dkprintf("BSP HW ID = %d, ", bsp_hw_id); (in ./mcos/kernel/ap.c)
|
||||
extern int num_processors;
|
||||
|
||||
Core with BSP HW ID 224 is 1st logical core of last physical core.
|
||||
It boots first and is given SW-ID of 0
|
||||
int obtain_clone_cpuid(cpu_set_t *cpu_set) {
|
||||
int min_queue_len = -1;
|
||||
int cpu, min_cpu = -1;
|
||||
|
||||
Core with BSP HW ID 0 is 1st logical core of 1st physical core.
|
||||
It boots next and is given SW-ID of 1.
|
||||
Core with BSP HW ID 1 boots next and is given SW-ID of 2.
|
||||
Core with BSP HW ID 2 boots next and is given SW-ID of 3.
|
||||
Core with BSP HW ID 3 boots next and is given SW-ID of 4.
|
||||
...
|
||||
Core with BSP HW ID 220 is 1st logical core of 56-th physical core.
|
||||
It boots next and is given SW-ID of 221.
|
||||
Core with BSP HW ID 221 boots next and is given SW-ID of 222.
|
||||
Core with BSP HW ID 222 boots next and is given SW-ID of 223.
|
||||
Core with BSP HW ID 223 boots next and is given SW-ID of 224.
|
||||
/* Find the first allowed core with the shortest run queue */
|
||||
for (cpu = 0; cpu < num_processors; ++cpu) {
|
||||
struct cpu_local_var *v;
|
||||
unsigned long irqstate;
|
||||
|
||||
Core with BSP HW ID 225 is 2nd logical core of last physical core.
|
||||
It boots next and is given SW-ID of 225.
|
||||
Core with BSP HW ID 226 boots next and is given SW-ID of 226.
|
||||
Core with BSP HW ID 227 boots next and is given SW-ID of 227.
|
||||
*/
|
||||
ihk_spinlock_t cpuid_head_lock = 0;
|
||||
static int cpuid_head = 0;
|
||||
if (!CPU_ISSET(cpu, cpu_set)) continue;
|
||||
|
||||
/* archtecture-depended syscall handlers */
|
||||
int obtain_clone_cpuid() {
|
||||
/* see above on BSP HW ID */
|
||||
struct ihk_mc_cpu_info *cpu_info = ihk_mc_get_cpu_info();
|
||||
int cpuid, nretry = 0;
|
||||
ihk_mc_spinlock_lock_noirq(&cpuid_head_lock);
|
||||
|
||||
/* Always start from 0 to fill in LWK cores linearily */
|
||||
cpuid_head = 0;
|
||||
retry:
|
||||
/* Try to obtain next physical core */
|
||||
cpuid = cpuid_head;
|
||||
v = get_cpu_local_var(cpu);
|
||||
irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
|
||||
if (min_queue_len == -1 || v->runq_len < min_queue_len) {
|
||||
min_queue_len = v->runq_len;
|
||||
min_cpu = cpu;
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
|
||||
|
||||
/* A hyper-threading core on the same physical core as
|
||||
the parent process might be chosen. Use sched_setaffinity
|
||||
if you want to skip that kind of busy physical core for
|
||||
performance reason. */
|
||||
cpuid_head += 1;
|
||||
if(cpuid_head >= cpu_info->ncpus) {
|
||||
cpuid_head = 0;
|
||||
}
|
||||
if (min_queue_len == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* A hyper-threading core whose parent physical core has a
|
||||
process on one of its hyper-threading core might
|
||||
be chosen. Use sched_setaffinity if you want to skip that
|
||||
kind of busy physical core for performance reason. */
|
||||
if(get_cpu_local_var(cpuid)->status != CPU_STATUS_IDLE) {
|
||||
nretry++;
|
||||
if(nretry >= cpu_info->ncpus) {
|
||||
cpuid = -1;
|
||||
ihk_mc_spinlock_unlock_noirq(&cpuid_head_lock);
|
||||
goto out;
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
get_cpu_local_var(cpuid)->status = CPU_STATUS_RESERVED;
|
||||
ihk_mc_spinlock_unlock_noirq(&cpuid_head_lock);
|
||||
out:
|
||||
return cpuid;
|
||||
if (min_cpu != -1) {
|
||||
if (get_cpu_local_var(min_cpu)->status != CPU_STATUS_RESERVED)
|
||||
get_cpu_local_var(min_cpu)->status = CPU_STATUS_RESERVED;
|
||||
}
|
||||
|
||||
return min_cpu;
|
||||
}
|
||||
|
||||
int
|
||||
|
||||
@ -404,6 +404,8 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
# TODO: How de we revert this in case of failure??
|
||||
mount --make-rprivate /sys
|
||||
|
||||
touch /tmp/mcos/mcos0_proc/mckernel
|
||||
|
||||
rm -rf /tmp/mcos/mcos0_sys/setup_complete
|
||||
|
||||
# Hide NUMA related files which are outside the LWK partition
|
||||
|
||||
@ -42,6 +42,7 @@
|
||||
#define MCEXEC_UP_GET_CRED 0x30a0290a
|
||||
#define MCEXEC_UP_GET_CREDV 0x30a0290b
|
||||
#define MCEXEC_UP_GET_NODES 0x30a0290c
|
||||
#define MCEXEC_UP_GET_CPUSET 0x30a0290d
|
||||
|
||||
#define MCEXEC_UP_PREPARE_DMA 0x30a02910
|
||||
#define MCEXEC_UP_FREE_DMA 0x30a02911
|
||||
@ -79,6 +80,17 @@ struct program_image_section {
|
||||
#define SHELL_PATH_MAX_LEN 1024
|
||||
#define MCK_RLIM_MAX 20
|
||||
|
||||
struct get_cpu_set_arg {
|
||||
int nr_processes;
|
||||
void *cpu_set;
|
||||
size_t cpu_set_size; // Size in bytes
|
||||
int *target_core;
|
||||
};
|
||||
|
||||
#define PLD_CPU_SET_MAX_CPUS 1024
|
||||
typedef unsigned long __cpu_set_unit;
|
||||
#define PLD_CPU_SET_SIZE (PLD_CPU_SET_MAX_CPUS / (8 * sizeof(__cpu_set_unit)))
|
||||
|
||||
struct program_load_desc {
|
||||
int num_sections;
|
||||
int status;
|
||||
@ -108,6 +120,7 @@ struct program_load_desc {
|
||||
struct rlimit rlimit[MCK_RLIM_MAX];
|
||||
unsigned long interp_align;
|
||||
char shell_path[SHELL_PATH_MAX_LEN];
|
||||
__cpu_set_unit cpu_set[PLD_CPU_SET_SIZE];
|
||||
struct program_image_section sections[0];
|
||||
};
|
||||
|
||||
|
||||
@ -34,6 +34,7 @@
|
||||
#include <linux/version.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/io.h>
|
||||
@ -292,8 +293,9 @@ int mcexec_transfer_image(ihk_os_t os, struct remote_transfer *__user upt)
|
||||
|
||||
//extern unsigned long last_thread_exec;
|
||||
|
||||
struct handlerinfo {
|
||||
int pid;
|
||||
struct release_handler_info {
|
||||
int pid;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
static long mcexec_debug_log(ihk_os_t os, unsigned long arg)
|
||||
@ -309,7 +311,7 @@ static long mcexec_debug_log(ihk_os_t os, unsigned long arg)
|
||||
|
||||
static void release_handler(ihk_os_t os, void *param)
|
||||
{
|
||||
struct handlerinfo *info = param;
|
||||
struct release_handler_info *info = param;
|
||||
struct ikc_scd_packet isp;
|
||||
int os_ind = ihk_host_os_get_index(os);
|
||||
|
||||
@ -317,10 +319,15 @@ static void release_handler(ihk_os_t os, void *param)
|
||||
isp.msg = SCD_MSG_CLEANUP_PROCESS;
|
||||
isp.pid = info->pid;
|
||||
|
||||
mcctrl_ikc_send(os, 0, &isp);
|
||||
if(os_ind >= 0)
|
||||
dprintk("%s: SCD_MSG_CLEANUP_PROCESS, info: %p, cpu: %d\n",
|
||||
__FUNCTION__, info, info->cpu);
|
||||
mcctrl_ikc_send(os, info->cpu, &isp);
|
||||
if (os_ind >= 0) {
|
||||
delete_pid_entry(os_ind, info->pid);
|
||||
}
|
||||
kfree(param);
|
||||
dprintk("%s: SCD_MSG_CLEANUP_PROCESS, info: %p OK\n",
|
||||
__FUNCTION__, info);
|
||||
}
|
||||
|
||||
static long mcexec_newprocess(ihk_os_t os,
|
||||
@ -328,12 +335,12 @@ static long mcexec_newprocess(ihk_os_t os,
|
||||
struct file *file)
|
||||
{
|
||||
struct newprocess_desc desc;
|
||||
struct handlerinfo *info;
|
||||
struct release_handler_info *info;
|
||||
|
||||
if (copy_from_user(&desc, udesc, sizeof(struct newprocess_desc))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
info = kmalloc(sizeof(struct handlerinfo), GFP_KERNEL);
|
||||
info = kmalloc(sizeof(struct release_handler_info), GFP_KERNEL);
|
||||
info->pid = desc.pid;
|
||||
ihk_os_register_release_handler(file, release_handler, info);
|
||||
return 0;
|
||||
@ -347,7 +354,7 @@ static long mcexec_start_image(ihk_os_t os,
|
||||
struct ikc_scd_packet isp;
|
||||
struct mcctrl_channel *c;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct handlerinfo *info;
|
||||
struct release_handler_info *info;
|
||||
|
||||
desc = kmalloc(sizeof(*desc), GFP_KERNEL);
|
||||
if (!desc) {
|
||||
@ -362,8 +369,9 @@ static long mcexec_start_image(ihk_os_t os,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
info = kmalloc(sizeof(struct handlerinfo), GFP_KERNEL);
|
||||
info = kmalloc(sizeof(struct release_handler_info), GFP_KERNEL);
|
||||
info->pid = desc->pid;
|
||||
info->cpu = desc->cpu;
|
||||
ihk_os_register_release_handler(file, release_handler, info);
|
||||
|
||||
c = usrdata->channels + desc->cpu;
|
||||
@ -460,6 +468,199 @@ static long mcexec_get_nodes(ihk_os_t os)
|
||||
return usrdata->mem_info->n_numa_nodes;
|
||||
}
|
||||
|
||||
extern int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id);
|
||||
extern int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id);
|
||||
|
||||
static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
|
||||
{
|
||||
struct mcctrl_usrdata *udp = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_part_exec *pe;
|
||||
struct get_cpu_set_arg req;
|
||||
struct cpu_topology *cpu_top, *cpu_top_i;
|
||||
struct cache_topology *cache_top;
|
||||
int cpu, cpus_assigned, cpus_to_assign, cpu_prev;
|
||||
int ret = 0;
|
||||
cpumask_t cpus_used;
|
||||
cpumask_t cpus_to_use;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
|
||||
if (!udp) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(udp, task_tgid_vnr(current));
|
||||
if (!ppd) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pe = &udp->part_exec;
|
||||
|
||||
if (copy_from_user(&req, (void *)arg, sizeof(req))) {
|
||||
printk("%s: error copying user request\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&pe->lock);
|
||||
|
||||
memcpy(&cpus_used, &pe->cpus_used, sizeof(cpumask_t));
|
||||
memset(&cpus_to_use, 0, sizeof(cpus_to_use));
|
||||
|
||||
/* First process to enter CPU partitioning */
|
||||
if (pe->nr_processes == -1) {
|
||||
pe->nr_processes = req.nr_processes;
|
||||
pe->nr_processes_left = req.nr_processes;
|
||||
dprintk("%s: nr_processes: %d (partitioned exec starts)\n",
|
||||
__FUNCTION__,
|
||||
pe->nr_processes);
|
||||
}
|
||||
|
||||
if (pe->nr_processes != req.nr_processes) {
|
||||
printk("%s: error: requested number of processes"
|
||||
" doesn't match current partitioned execution\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
--pe->nr_processes_left;
|
||||
dprintk("%s: nr_processes: %d, nr_processes_left: %d\n",
|
||||
__FUNCTION__,
|
||||
pe->nr_processes,
|
||||
pe->nr_processes_left);
|
||||
|
||||
cpus_to_assign = udp->cpu_info->n_cpus / req.nr_processes;
|
||||
|
||||
/* Find the first unused CPU */
|
||||
cpu = cpumask_next_zero(-1, &cpus_used);
|
||||
if (cpu >= udp->cpu_info->n_cpus) {
|
||||
printk("%s: error: no more CPUs available\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
cpu_set(cpu, cpus_used);
|
||||
cpu_set(cpu, cpus_to_use);
|
||||
cpu_prev = cpu;
|
||||
dprintk("%s: CPU %d assigned (first)\n", __FUNCTION__, cpu);
|
||||
|
||||
for (cpus_assigned = 1; cpus_assigned < cpus_to_assign;
|
||||
++cpus_assigned) {
|
||||
int node;
|
||||
|
||||
cpu_top = NULL;
|
||||
/* Find the topology object of the last core assigned */
|
||||
list_for_each_entry(cpu_top_i, &udp->cpu_topology_list, chain) {
|
||||
if (cpu_top_i->mckernel_cpu_id == cpu_prev) {
|
||||
cpu_top = cpu_top_i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpu_top) {
|
||||
printk("%s: error: couldn't find CPU topology info\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
/* Find a core sharing the same cache iterating caches from
|
||||
* the most inner one outwards */
|
||||
list_for_each_entry(cache_top, &cpu_top->cache_list, chain) {
|
||||
for_each_cpu(cpu, &cache_top->shared_cpu_map) {
|
||||
if (!cpu_isset(cpu, cpus_used)) {
|
||||
cpu_set(cpu, cpus_used);
|
||||
cpu_set(cpu, cpus_to_use);
|
||||
cpu_prev = cpu;
|
||||
dprintk("%s: CPU %d assigned (same cache L%lu)\n",
|
||||
__FUNCTION__, cpu, cache_top->saved->level);
|
||||
goto next_cpu;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* No CPU? Find a core from the same NUMA node */
|
||||
node = linux_numa_2_mckernel_numa(udp,
|
||||
cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu_prev)));
|
||||
|
||||
for_each_cpu_not(cpu, &cpus_used) {
|
||||
/* Invalid CPU? */
|
||||
if (cpu >= udp->cpu_info->n_cpus)
|
||||
break;
|
||||
|
||||
/* Found one */
|
||||
if (node == linux_numa_2_mckernel_numa(udp,
|
||||
cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu)))) {
|
||||
cpu_set(cpu, cpus_used);
|
||||
cpu_set(cpu, cpus_to_use);
|
||||
cpu_prev = cpu;
|
||||
dprintk("%s: CPU %d assigned (same NUMA)\n",
|
||||
__FUNCTION__, cpu);
|
||||
goto next_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
/* No CPU? Simply find the next unused one */
|
||||
cpu = cpumask_next_zero(-1, &cpus_used);
|
||||
if (cpu >= udp->cpu_info->n_cpus) {
|
||||
printk("%s: error: no more CPUs available\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
cpu_set(cpu, cpus_used);
|
||||
cpu_set(cpu, cpus_to_use);
|
||||
cpu_prev = cpu;
|
||||
dprintk("%s: CPU %d assigned (unused)\n",
|
||||
__FUNCTION__, cpu);
|
||||
|
||||
next_cpu:
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Found all cores, let user know */
|
||||
if (copy_to_user(req.cpu_set, &cpus_to_use,
|
||||
(req.cpu_set_size < sizeof(cpus_to_use) ?
|
||||
req.cpu_set_size : sizeof(cpus_to_use)))) {
|
||||
printk("%s: error copying mask to user\n", __FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
cpu = cpumask_next(-1, &cpus_to_use);
|
||||
if (copy_to_user(req.target_core, &cpu, sizeof(cpu))) {
|
||||
printk("%s: error copying target core to user\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
/* Save in per-process structure */
|
||||
memcpy(&ppd->cpu_set, &cpus_to_use, sizeof(cpumask_t));
|
||||
ppd->ikc_target_cpu = cpu;
|
||||
|
||||
/* Commit used cores to OS structure */
|
||||
memcpy(&pe->cpus_used, &cpus_used, sizeof(cpus_used));
|
||||
|
||||
/* Reset if last process */
|
||||
if (pe->nr_processes_left == 0) {
|
||||
dprintk("%s: nr_processes: %d (partitioned exec ends)\n",
|
||||
__FUNCTION__,
|
||||
pe->nr_processes);
|
||||
pe->nr_processes = -1;
|
||||
memset(&pe->cpus_used, 0, sizeof(pe->cpus_used));
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
unlock_out:
|
||||
mutex_unlock(&pe->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid,
|
||||
struct mcctrl_per_proc_data *ppd)
|
||||
{
|
||||
@ -978,6 +1179,8 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
INIT_LIST_HEAD(&ppd->wq_req_list);
|
||||
INIT_LIST_HEAD(&ppd->wq_list_exact);
|
||||
spin_lock_init(&ppd->wq_list_lock);
|
||||
memset(&ppd->cpu_set, 0, sizeof(cpumask_t));
|
||||
ppd->ikc_target_cpu = 0;
|
||||
|
||||
for (i = 0; i < MCCTRL_PER_THREAD_DATA_HASH_SIZE; ++i) {
|
||||
INIT_LIST_HEAD(&ppd->per_thread_data_hash[i]);
|
||||
@ -1279,6 +1482,9 @@ long __mcctrl_control(ihk_os_t os, unsigned int req, unsigned long arg,
|
||||
case MCEXEC_UP_GET_NODES:
|
||||
return mcexec_get_nodes(os);
|
||||
|
||||
case MCEXEC_UP_GET_CPUSET:
|
||||
return mcexec_get_cpuset(os, arg);
|
||||
|
||||
case MCEXEC_UP_STRNCPY_FROM_USER:
|
||||
return mcexec_strncpy_from_user(os,
|
||||
(struct strncpy_from_user_desc *)arg);
|
||||
|
||||
@ -61,6 +61,7 @@ static struct ihk_os_user_call_handler mcctrl_uchs[] = {
|
||||
{ .request = MCEXEC_UP_SEND_SIGNAL, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_CPU, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_NODES, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_CPUSET, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_STRNCPY_FROM_USER, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_NEW_PROCESS, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_PREPARE_DMA, .func = mcctrl_ioctl },
|
||||
|
||||
@ -240,7 +240,7 @@ static struct ihk_ikc_listen_param listen_param = {
|
||||
.port = 501,
|
||||
.handler = connect_handler,
|
||||
.pkt_size = sizeof(struct ikc_scd_packet),
|
||||
.queue_size = PAGE_SIZE,
|
||||
.queue_size = PAGE_SIZE * 4,
|
||||
.magic = 0x1129,
|
||||
};
|
||||
|
||||
@ -248,7 +248,7 @@ static struct ihk_ikc_listen_param listen_param2 = {
|
||||
.port = 502,
|
||||
.handler = connect_handler2,
|
||||
.pkt_size = sizeof(struct ikc_scd_packet),
|
||||
.queue_size = PAGE_SIZE,
|
||||
.queue_size = PAGE_SIZE * 4,
|
||||
.magic = 0x1329,
|
||||
};
|
||||
|
||||
@ -298,6 +298,9 @@ int prepare_ikc_channels(ihk_os_t os)
|
||||
INIT_LIST_HEAD(&usrdata->cpu_topology_list);
|
||||
INIT_LIST_HEAD(&usrdata->node_topology_list);
|
||||
|
||||
mutex_init(&usrdata->part_exec.lock);
|
||||
usrdata->part_exec.nr_processes = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -198,6 +198,8 @@ struct mcctrl_per_proc_data {
|
||||
|
||||
struct list_head per_thread_data_hash[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
|
||||
rwlock_t per_thread_data_hash_lock[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
|
||||
cpumask_t cpu_set;
|
||||
int ikc_target_cpu;
|
||||
};
|
||||
|
||||
struct sysfsm_req {
|
||||
@ -254,6 +256,13 @@ struct node_topology {
|
||||
struct list_head chain;
|
||||
};
|
||||
|
||||
struct mcctrl_part_exec {
|
||||
struct mutex lock;
|
||||
int nr_processes;
|
||||
int nr_processes_left;
|
||||
cpumask_t cpus_used;
|
||||
};
|
||||
|
||||
#define CPU_LONGS (((NR_CPUS) + (BITS_PER_LONG) - 1) / (BITS_PER_LONG))
|
||||
|
||||
#define MCCTRL_PER_PROC_DATA_HASH_SHIFT 7
|
||||
@ -284,6 +293,7 @@ struct mcctrl_usrdata {
|
||||
nodemask_t numa_online;
|
||||
struct list_head cpu_topology_list;
|
||||
struct list_head node_topology_list;
|
||||
struct mcctrl_part_exec part_exec;
|
||||
};
|
||||
|
||||
struct mcctrl_signal {
|
||||
|
||||
@ -746,6 +746,18 @@ static struct list_head pager_list = LIST_HEAD_INIT(pager_list);
|
||||
struct pager_create_result {
|
||||
uintptr_t handle;
|
||||
int maxprot;
|
||||
uint32_t flags;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
enum {
|
||||
/* for memobj.flags */
|
||||
MF_HAS_PAGER = 0x0001,
|
||||
MF_SHMDT_OK = 0x0002,
|
||||
MF_IS_REMOVABLE = 0x0004,
|
||||
MF_PREFETCH = 0x0008,
|
||||
MF_ZEROFILL = 0x0010,
|
||||
MF_END
|
||||
};
|
||||
|
||||
static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
@ -760,6 +772,7 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
struct pager *newpager = NULL;
|
||||
uintptr_t phys;
|
||||
struct kstat st;
|
||||
int mf_flags = 0;
|
||||
|
||||
dprintk("pager_req_create(%d,%lx)\n", fd, (long)result_pa);
|
||||
|
||||
@ -827,6 +840,32 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
list_add(&newpager->list, &pager_list);
|
||||
pager = newpager;
|
||||
newpager = NULL;
|
||||
|
||||
/* Intel MPI library and shared memory "prefetch" */
|
||||
{
|
||||
char *pathbuf, *fullpath;
|
||||
|
||||
pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
|
||||
if (pathbuf) {
|
||||
fullpath = d_path(&file->f_path, pathbuf, PATH_MAX);
|
||||
if (!IS_ERR(fullpath)) {
|
||||
if (!strncmp("/dev/shm/Intel_MPI", fullpath, 18)) {
|
||||
//mf_flags = (MF_PREFETCH | MF_ZEROFILL);
|
||||
mf_flags = (MF_ZEROFILL);
|
||||
dprintk("%s: filename: %s, zerofill\n",
|
||||
__FUNCTION__, fullpath);
|
||||
}
|
||||
else if (strstr(fullpath, "libmpi") != NULL) {
|
||||
mf_flags = MF_PREFETCH;
|
||||
dprintk("%s: filename: %s, prefetch\n",
|
||||
__FUNCTION__, fullpath);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(pathbuf);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@ -856,6 +895,8 @@ found:
|
||||
resp = ihk_device_map_virtual(dev, phys, sizeof(*resp), NULL, 0);
|
||||
resp->handle = (uintptr_t)pager;
|
||||
resp->maxprot = maxprot;
|
||||
resp->flags = mf_flags;
|
||||
resp->size = st.size;
|
||||
ihk_device_unmap_virtual(dev, resp, sizeof(*resp));
|
||||
ihk_device_unmap_memory(dev, phys, sizeof(*resp));
|
||||
|
||||
|
||||
@ -197,19 +197,19 @@ void free_topology_info(ihk_os_t os)
|
||||
/*
|
||||
* CPU and NUMA node mapping conversion functions.
|
||||
*/
|
||||
static int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
{
|
||||
return (cpu_id < udp->cpu_info->n_cpus) ?
|
||||
udp->cpu_info->mapping[cpu_id] : -1;
|
||||
}
|
||||
|
||||
static int mckernel_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
int mckernel_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
{
|
||||
return (cpu_id < udp->cpu_info->n_cpus) ?
|
||||
udp->cpu_info->hw_ids[cpu_id] : -1;
|
||||
}
|
||||
|
||||
static int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -222,7 +222,7 @@ static int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -235,7 +235,7 @@ static int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -248,7 +248,7 @@ static int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu)
|
||||
int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu)
|
||||
{
|
||||
int mckernel_cpu = linux_cpu_2_mckernel_cpu(udp, cpu);
|
||||
|
||||
@ -257,13 +257,13 @@ static int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int mckernel_numa_2_linux_numa(struct mcctrl_usrdata *udp, int numa_id)
|
||||
int mckernel_numa_2_linux_numa(struct mcctrl_usrdata *udp, int numa_id)
|
||||
{
|
||||
return (numa_id < udp->mem_info->n_numa_nodes) ?
|
||||
udp->mem_info->numa_mapping[numa_id] : -1;
|
||||
}
|
||||
|
||||
static int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id)
|
||||
int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
||||
@ -153,6 +153,10 @@ static const char rlimit_stack_envname[] = "MCKERNEL_RLIMIT_STACK";
|
||||
static int ischild;
|
||||
static int enable_vdso = 1;
|
||||
|
||||
/* Partitioned execution (e.g., for MPI) */
|
||||
static int nr_processes = 0;
|
||||
static int nr_threads = -1;
|
||||
|
||||
struct fork_sync {
|
||||
pid_t pid;
|
||||
int status;
|
||||
@ -502,7 +506,7 @@ retry:
|
||||
|
||||
/* Check whether the resolved path is a symlink */
|
||||
if (lstat(path, &sb) == -1) {
|
||||
fprintf(stderr, "lookup_exec_path(): error stat\n");
|
||||
__dprintf(stderr, "lookup_exec_path(): error stat\n");
|
||||
return errno;
|
||||
}
|
||||
|
||||
@ -1102,7 +1106,7 @@ static int reduce_stack(struct rlimit *orig_rlim, char *argv[])
|
||||
|
||||
void print_usage(char **argv)
|
||||
{
|
||||
fprintf(stderr, "Usage: %s [-c target_core] [<mcos-id>] (program) [args...]\n", argv[0]);
|
||||
fprintf(stderr, "Usage: %s [-c target_core] [-n nr_partitions] [<mcos-id>] (program) [args...]\n", argv[0]);
|
||||
}
|
||||
|
||||
void init_sigaction(void)
|
||||
@ -1329,12 +1333,20 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* Parse options ("+" denotes stop at the first non-option) */
|
||||
while ((opt = getopt_long(argc, argv, "+c:", mcexec_options, NULL)) != -1) {
|
||||
while ((opt = getopt_long(argc, argv, "+c:n:t:", mcexec_options, NULL)) != -1) {
|
||||
switch (opt) {
|
||||
case 'c':
|
||||
target_core = atoi(optarg);
|
||||
break;
|
||||
|
||||
|
||||
case 'n':
|
||||
nr_processes = atoi(optarg);
|
||||
break;
|
||||
|
||||
case 't':
|
||||
nr_threads = atoi(optarg);
|
||||
break;
|
||||
|
||||
case 0: /* long opt */
|
||||
break;
|
||||
|
||||
@ -1550,7 +1562,16 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
n_threads = ncpu;
|
||||
if (nr_threads > 0) {
|
||||
n_threads = nr_threads;
|
||||
}
|
||||
else if (getenv("OMP_NUM_THREADS")) {
|
||||
/* Leave some headroom for helper threads.. */
|
||||
n_threads = atoi(getenv("OMP_NUM_THREADS")) + 4;
|
||||
}
|
||||
else {
|
||||
n_threads = ncpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: keep thread_data ncpu sized despite that there are only
|
||||
@ -1599,6 +1620,24 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Partitioned execution, obtain CPU set */
|
||||
if (nr_processes > 0) {
|
||||
struct get_cpu_set_arg cpu_set_arg;
|
||||
|
||||
cpu_set_arg.cpu_set = (void *)&desc->cpu_set;
|
||||
cpu_set_arg.cpu_set_size = sizeof(desc->cpu_set);
|
||||
cpu_set_arg.nr_processes = nr_processes;
|
||||
cpu_set_arg.target_core = &target_core;
|
||||
|
||||
if (ioctl(fd, MCEXEC_UP_GET_CPUSET, (void *)&cpu_set_arg) != 0) {
|
||||
perror("getting CPU set for partitioned execution");
|
||||
close(fd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
desc->cpu = target_core;
|
||||
}
|
||||
|
||||
if (ioctl(fd, MCEXEC_UP_PREPARE_IMAGE, (unsigned long)desc) != 0) {
|
||||
perror("prepare");
|
||||
close(fd);
|
||||
@ -1910,9 +1949,18 @@ int close_cloexec_fds(int mcos_fd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void chgdevpath(char *in, char *buf)
|
||||
{
|
||||
if(!strcmp(in, "/dev/xpmem")){
|
||||
sprintf(in, "/dev/null");
|
||||
}
|
||||
}
|
||||
|
||||
char *
|
||||
chgpath(char *in, char *buf)
|
||||
{
|
||||
chgdevpath(in, buf);
|
||||
|
||||
#ifdef ENABLE_MCOVERLAYFS
|
||||
return in;
|
||||
#endif // ENABLE_MCOVERLAYFS
|
||||
|
||||
@ -3,7 +3,7 @@ SRC=$(VPATH)
|
||||
IHKDIR=$(IHKBASE)/$(TARGETDIR)
|
||||
OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o
|
||||
OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o
|
||||
OBJS += zeroobj.o procfs.o devobj.o sysfs.o
|
||||
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o
|
||||
DEPSRCS=$(wildcard $(SRC)/*.c)
|
||||
|
||||
CFLAGS += -I$(SRC)/include -D__KERNEL__ -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
extern int num_processors;
|
||||
|
||||
struct cpu_local_var *clv;
|
||||
static int cpu_local_var_initialized = 0;
|
||||
int cpu_local_var_initialized = 0;
|
||||
|
||||
void cpu_local_var_init(void)
|
||||
{
|
||||
|
||||
@ -127,6 +127,7 @@ int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxp
|
||||
|
||||
obj->memobj.ops = &devobj_ops;
|
||||
obj->memobj.flags = MF_HAS_PAGER;
|
||||
obj->memobj.size = len;
|
||||
obj->handle = result.handle;
|
||||
obj->ref = 1;
|
||||
obj->pfn_pgoff = off / PAGE_SIZE;
|
||||
|
||||
352
kernel/fileobj.c
352
kernel/fileobj.c
@ -29,22 +29,27 @@
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
|
||||
static ihk_spinlock_t fileobj_list_lock = SPIN_LOCK_UNLOCKED;
|
||||
mcs_rwlock_lock_t fileobj_list_lock =
|
||||
{{{0}, MCS_RWLOCK_TYPE_COMMON_READER, 0, 0, 0, NULL}, NULL};
|
||||
static LIST_HEAD(fileobj_list);
|
||||
|
||||
#define FILEOBJ_PAGE_HASH_SHIFT 9
|
||||
#define FILEOBJ_PAGE_HASH_SIZE (1 << FILEOBJ_PAGE_HASH_SHIFT)
|
||||
#define FILEOBJ_PAGE_HASH_MASK (FILEOBJ_PAGE_HASH_SIZE - 1)
|
||||
|
||||
struct fileobj {
|
||||
struct memobj memobj; /* must be first */
|
||||
long sref;
|
||||
long cref;
|
||||
uintptr_t handle;
|
||||
struct list_head page_list;
|
||||
struct list_head list;
|
||||
struct memobj memobj; /* must be first */
|
||||
long sref;
|
||||
long cref;
|
||||
uintptr_t handle;
|
||||
struct list_head list;
|
||||
struct list_head page_hash[FILEOBJ_PAGE_HASH_SIZE];
|
||||
mcs_rwlock_lock_t page_hash_locks[FILEOBJ_PAGE_HASH_SIZE];
|
||||
};
|
||||
|
||||
static memobj_release_func_t fileobj_release;
|
||||
static memobj_ref_func_t fileobj_ref;
|
||||
static memobj_get_page_func_t fileobj_get_page;
|
||||
static memobj_copy_page_func_t fileobj_copy_page;
|
||||
static memobj_flush_page_func_t fileobj_flush_page;
|
||||
static memobj_invalidate_page_func_t fileobj_invalidate_page;
|
||||
static memobj_lookup_page_func_t fileobj_lookup_page;
|
||||
@ -53,7 +58,7 @@ static struct memobj_ops fileobj_ops = {
|
||||
.release = &fileobj_release,
|
||||
.ref = &fileobj_ref,
|
||||
.get_page = &fileobj_get_page,
|
||||
.copy_page = &fileobj_copy_page,
|
||||
.copy_page = NULL,
|
||||
.flush_page = &fileobj_flush_page,
|
||||
.invalidate_page = &fileobj_invalidate_page,
|
||||
.lookup_page = &fileobj_lookup_page,
|
||||
@ -72,28 +77,36 @@ static struct memobj *to_memobj(struct fileobj *fileobj)
|
||||
/***********************************************************************
|
||||
* page_list
|
||||
*/
|
||||
static void page_list_init(struct fileobj *obj)
|
||||
static void fileobj_page_hash_init(struct fileobj *obj)
|
||||
{
|
||||
INIT_LIST_HEAD(&obj->page_list);
|
||||
int i;
|
||||
for (i = 0; i < FILEOBJ_PAGE_HASH_SIZE; ++i) {
|
||||
mcs_rwlock_init(&obj->page_hash_locks[i]);
|
||||
INIT_LIST_HEAD(&obj->page_hash[i]);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void page_list_insert(struct fileobj *obj, struct page *page)
|
||||
/* NOTE: caller must hold page_hash_locks[hash] */
|
||||
static void __fileobj_page_hash_insert(struct fileobj *obj,
|
||||
struct page *page, int hash)
|
||||
{
|
||||
list_add(&page->list, &obj->page_list);
|
||||
return;
|
||||
list_add(&page->list, &obj->page_hash[hash]);
|
||||
}
|
||||
|
||||
static void page_list_remove(struct fileobj *obj, struct page *page)
|
||||
/* NOTE: caller must hold page_hash_locks[hash] */
|
||||
static void __fileobj_page_hash_remove(struct page *page)
|
||||
{
|
||||
list_del(&page->list);
|
||||
}
|
||||
|
||||
static struct page *page_list_lookup(struct fileobj *obj, off_t off)
|
||||
/* NOTE: caller must hold page_hash_locks[hash] */
|
||||
static struct page *__fileobj_page_hash_lookup(struct fileobj *obj,
|
||||
int hash, off_t off)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
list_for_each_entry(page, &obj->page_list, list) {
|
||||
list_for_each_entry(page, &obj->page_hash[hash], list) {
|
||||
if ((page->mode != PM_WILL_PAGEIO)
|
||||
&& (page->mode != PM_PAGEIO)
|
||||
&& (page->mode != PM_DONE_PAGEIO)
|
||||
@ -104,6 +117,7 @@ static struct page *page_list_lookup(struct fileobj *obj, off_t off)
|
||||
obj, off, page->mode);
|
||||
panic("page_list_lookup:invalid obj page");
|
||||
}
|
||||
|
||||
if (page->offset == off) {
|
||||
goto out;
|
||||
}
|
||||
@ -114,13 +128,22 @@ out:
|
||||
return page;
|
||||
}
|
||||
|
||||
static struct page *page_list_first(struct fileobj *obj)
|
||||
static struct page *fileobj_page_hash_first(struct fileobj *obj)
|
||||
{
|
||||
if (list_empty(&obj->page_list)) {
|
||||
return NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FILEOBJ_PAGE_HASH_SIZE; ++i) {
|
||||
if (!list_empty(&obj->page_hash[i])) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return list_first_entry(&obj->page_list, struct page, list);
|
||||
if (i != FILEOBJ_PAGE_HASH_SIZE) {
|
||||
return list_first_entry(&obj->page_hash[i], struct page, list);
|
||||
}
|
||||
else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
@ -163,10 +186,11 @@ static struct fileobj *obj_list_lookup(uintptr_t handle)
|
||||
int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
|
||||
{
|
||||
ihk_mc_user_context_t ctx;
|
||||
struct pager_create_result result; // XXX: assumes contiguous physical
|
||||
struct pager_create_result result __attribute__((aligned(64)));
|
||||
int error;
|
||||
struct fileobj *newobj = NULL;
|
||||
struct fileobj *obj;
|
||||
struct mcs_rwlock_node node;
|
||||
|
||||
dkprintf("fileobj_create(%d)\n", fd);
|
||||
newobj = kmalloc(sizeof(*newobj), IHK_MC_AP_NOWAIT);
|
||||
@ -179,6 +203,7 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_CREATE;
|
||||
ihk_mc_syscall_arg1(&ctx) = fd;
|
||||
ihk_mc_syscall_arg2(&ctx) = virt_to_phys(&result);
|
||||
memset(&result, 0, sizeof(result));
|
||||
|
||||
error = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
if (error) {
|
||||
@ -192,23 +217,39 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
|
||||
newobj->handle = result.handle;
|
||||
newobj->sref = 1;
|
||||
newobj->cref = 1;
|
||||
page_list_init(newobj);
|
||||
fileobj_page_hash_init(newobj);
|
||||
ihk_mc_spinlock_init(&newobj->memobj.lock);
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&fileobj_list_lock);
|
||||
mcs_rwlock_writer_lock_noirq(&fileobj_list_lock, &node);
|
||||
obj = obj_list_lookup(result.handle);
|
||||
if (!obj) {
|
||||
obj_list_insert(newobj);
|
||||
obj = newobj;
|
||||
to_memobj(obj)->size = result.size;
|
||||
to_memobj(obj)->flags |= result.flags;
|
||||
to_memobj(obj)->status = MEMOBJ_READY;
|
||||
if (to_memobj(obj)->flags & MF_PREFETCH) {
|
||||
to_memobj(obj)->status = MEMOBJ_TO_BE_PREFETCHED;
|
||||
}
|
||||
newobj = NULL;
|
||||
dkprintf("%s: new obj 0x%lx cref: %d, %s\n",
|
||||
__FUNCTION__,
|
||||
obj,
|
||||
obj->cref,
|
||||
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
|
||||
}
|
||||
else {
|
||||
++obj->sref;
|
||||
++obj->cref;
|
||||
memobj_unlock(&obj->memobj); /* locked by obj_list_lookup() */
|
||||
dkprintf("%s: existing obj 0x%lx cref: %d, %s\n",
|
||||
__FUNCTION__,
|
||||
obj,
|
||||
obj->cref,
|
||||
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock_noirq(&fileobj_list_lock);
|
||||
mcs_rwlock_writer_unlock_noirq(&fileobj_list_lock, &node);
|
||||
|
||||
error = 0;
|
||||
*objp = to_memobj(obj);
|
||||
@ -239,6 +280,7 @@ static void fileobj_release(struct memobj *memobj)
|
||||
long free_sref = 0;
|
||||
uintptr_t free_handle;
|
||||
struct fileobj *free_obj = NULL;
|
||||
struct mcs_rwlock_node node;
|
||||
|
||||
dkprintf("fileobj_release(%p %lx)\n", obj, obj->handle);
|
||||
|
||||
@ -254,17 +296,23 @@ static void fileobj_release(struct memobj *memobj)
|
||||
memobj_unlock(&obj->memobj);
|
||||
|
||||
if (free_obj) {
|
||||
ihk_mc_spinlock_lock_noirq(&fileobj_list_lock);
|
||||
dkprintf("%s: release obj 0x%lx cref: %d, free_obj: 0x%lx, %s\n",
|
||||
__FUNCTION__,
|
||||
obj,
|
||||
obj->cref,
|
||||
free_obj,
|
||||
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
|
||||
mcs_rwlock_writer_lock_noirq(&fileobj_list_lock, &node);
|
||||
/* zap page_list */
|
||||
for (;;) {
|
||||
struct page *page;
|
||||
void *page_va;
|
||||
|
||||
page = page_list_first(obj);
|
||||
page = fileobj_page_hash_first(obj);
|
||||
if (!page) {
|
||||
break;
|
||||
}
|
||||
page_list_remove(obj, page);
|
||||
__fileobj_page_hash_remove(page);
|
||||
page_va = phys_to_virt(page_to_phys(page));
|
||||
|
||||
if (ihk_atomic_read(&page->count) != 1) {
|
||||
@ -295,7 +343,7 @@ static void fileobj_release(struct memobj *memobj)
|
||||
#endif
|
||||
}
|
||||
obj_list_remove(free_obj);
|
||||
ihk_mc_spinlock_unlock_noirq(&fileobj_list_lock);
|
||||
mcs_rwlock_writer_unlock_noirq(&fileobj_list_lock, &node);
|
||||
kfree(free_obj);
|
||||
}
|
||||
|
||||
@ -341,83 +389,101 @@ static void fileobj_do_pageio(void *args0)
|
||||
struct page *page;
|
||||
ihk_mc_user_context_t ctx;
|
||||
ssize_t ss;
|
||||
struct mcs_rwlock_node mcs_node;
|
||||
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
page = page_list_lookup(obj, off);
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
page = __fileobj_page_hash_lookup(obj, hash, off);
|
||||
if (!page) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (page->mode == PM_PAGEIO) {
|
||||
memobj_unlock(&obj->memobj);
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
cpu_pause();
|
||||
memobj_lock(&obj->memobj);
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
}
|
||||
|
||||
if (page->mode == PM_WILL_PAGEIO) {
|
||||
page->mode = PM_PAGEIO;
|
||||
memobj_unlock(&obj->memobj);
|
||||
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_READ;
|
||||
ihk_mc_syscall_arg1(&ctx) = obj->handle;
|
||||
ihk_mc_syscall_arg2(&ctx) = off;
|
||||
ihk_mc_syscall_arg3(&ctx) = pgsize;
|
||||
ihk_mc_syscall_arg4(&ctx) = page_to_phys(page);
|
||||
|
||||
ss = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
if (page->mode != PM_PAGEIO) {
|
||||
kprintf("fileobj_do_pageio(%p,%lx,%lx):"
|
||||
"invalid mode %x\n",
|
||||
obj, off, pgsize, page->mode);
|
||||
panic("fileobj_do_pageio:invalid page mode");
|
||||
if (to_memobj(obj)->flags & MF_ZEROFILL) {
|
||||
void *virt = phys_to_virt(page_to_phys(page));
|
||||
memset(virt, 0, PAGE_SIZE);
|
||||
}
|
||||
else {
|
||||
page->mode = PM_PAGEIO;
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
|
||||
if (ss == 0) {
|
||||
dkprintf("fileobj_do_pageio(%p,%lx,%lx):EOF? %ld\n",
|
||||
obj, off, pgsize, ss);
|
||||
page->mode = PM_PAGEIO_EOF;
|
||||
goto out;
|
||||
}
|
||||
else if (ss != pgsize) {
|
||||
kprintf("fileobj_do_pageio(%p,%lx,%lx):"
|
||||
"read failed. %ld\n",
|
||||
obj, off, pgsize, ss);
|
||||
page->mode = PM_PAGEIO_ERROR;
|
||||
goto out;
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_READ;
|
||||
ihk_mc_syscall_arg1(&ctx) = obj->handle;
|
||||
ihk_mc_syscall_arg2(&ctx) = off;
|
||||
ihk_mc_syscall_arg3(&ctx) = pgsize;
|
||||
ihk_mc_syscall_arg4(&ctx) = page_to_phys(page);
|
||||
|
||||
dkprintf("%s: __NR_mmap for handle 0x%lx\n",
|
||||
__FUNCTION__, obj->handle);
|
||||
ss = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
if (page->mode != PM_PAGEIO) {
|
||||
kprintf("fileobj_do_pageio(%p,%lx,%lx):"
|
||||
"invalid mode %x\n",
|
||||
obj, off, pgsize, page->mode);
|
||||
panic("fileobj_do_pageio:invalid page mode");
|
||||
}
|
||||
|
||||
if (ss == 0) {
|
||||
dkprintf("fileobj_do_pageio(%p,%lx,%lx):EOF? %ld\n",
|
||||
obj, off, pgsize, ss);
|
||||
page->mode = PM_PAGEIO_EOF;
|
||||
goto out;
|
||||
}
|
||||
else if (ss != pgsize) {
|
||||
kprintf("fileobj_do_pageio(%p,%lx,%lx):"
|
||||
"read failed. %ld\n",
|
||||
obj, off, pgsize, ss);
|
||||
page->mode = PM_PAGEIO_ERROR;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
page->mode = PM_DONE_PAGEIO;
|
||||
}
|
||||
out:
|
||||
memobj_unlock(&obj->memobj);
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
fileobj_release(&obj->memobj); /* got fileobj_get_page() */
|
||||
kfree(args0);
|
||||
dkprintf("fileobj_do_pageio(%p,%lx,%lx):\n", obj, off, pgsize);
|
||||
return;
|
||||
}
|
||||
|
||||
static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *pflag)
|
||||
static int fileobj_get_page(struct memobj *memobj, off_t off,
|
||||
int p2align, uintptr_t *physp, unsigned long *pflag)
|
||||
{
|
||||
struct thread *proc = cpu_local_var(current);
|
||||
struct fileobj *obj = to_fileobj(memobj);
|
||||
int error;
|
||||
int error = -1;
|
||||
void *virt = NULL;
|
||||
int npages;
|
||||
uintptr_t phys = -1;
|
||||
struct page *page;
|
||||
struct pageio_args *args = NULL;
|
||||
struct mcs_rwlock_node mcs_node;
|
||||
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
|
||||
|
||||
dkprintf("fileobj_get_page(%p,%lx,%x,%p)\n", obj, off, p2align, physp);
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
if (p2align != PAGE_P2ALIGN) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
page = page_list_lookup(obj, off);
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
page = __fileobj_page_hash_lookup(obj, hash, off);
|
||||
if (!page || (page->mode == PM_WILL_PAGEIO)
|
||||
|| (page->mode == PM_PAGEIO)) {
|
||||
args = kmalloc(sizeof(*args), IHK_MC_AP_NOWAIT);
|
||||
@ -445,13 +511,15 @@ static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintp
|
||||
if (page->mode != PM_NONE) {
|
||||
panic("fileobj_get_page:invalid new page");
|
||||
}
|
||||
page->mode = PM_WILL_PAGEIO;
|
||||
page->offset = off;
|
||||
ihk_atomic_set(&page->count, 1);
|
||||
page_list_insert(obj, page);
|
||||
__fileobj_page_hash_insert(obj, page, hash);
|
||||
page->mode = PM_WILL_PAGEIO;
|
||||
}
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
++obj->cref; /* for fileobj_do_pageio() */
|
||||
memobj_unlock(&obj->memobj);
|
||||
|
||||
args->fileobj = obj;
|
||||
args->objoff = off;
|
||||
@ -483,7 +551,8 @@ static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintp
|
||||
*physp = page_to_phys(page);
|
||||
virt = NULL;
|
||||
out:
|
||||
memobj_unlock(&obj->memobj);
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
if (virt) {
|
||||
ihk_mc_free_pages(virt, npages);
|
||||
}
|
||||
@ -495,78 +564,6 @@ out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static uintptr_t fileobj_copy_page(
|
||||
struct memobj *memobj, uintptr_t orgpa, int p2align)
|
||||
{
|
||||
struct page *orgpage = phys_to_page(orgpa);
|
||||
size_t pgsize = PAGE_SIZE << p2align;
|
||||
int npages = 1 << p2align;
|
||||
void *newkva = NULL;
|
||||
uintptr_t newpa = -1;
|
||||
void *orgkva;
|
||||
int count;
|
||||
|
||||
dkprintf("fileobj_copy_page(%p,%lx,%d)\n", memobj, orgpa, p2align);
|
||||
if (p2align != PAGE_P2ALIGN) {
|
||||
panic("p2align");
|
||||
}
|
||||
|
||||
memobj_lock(memobj);
|
||||
for (;;) {
|
||||
if (!orgpage || orgpage->mode != PM_MAPPED) {
|
||||
kprintf("fileobj_copy_page(%p,%lx,%d):"
|
||||
"invalid cow page. %x\n",
|
||||
memobj, orgpa, p2align, orgpage ? orgpage->mode : 0);
|
||||
panic("fileobj_copy_page:invalid cow page");
|
||||
}
|
||||
count = ihk_atomic_read(&orgpage->count);
|
||||
if (count == 2) { // XXX: private only
|
||||
list_del(&orgpage->list);
|
||||
ihk_atomic_dec(&orgpage->count);
|
||||
orgpage->mode = PM_NONE;
|
||||
newpa = orgpa;
|
||||
break;
|
||||
}
|
||||
if (count <= 0) {
|
||||
kprintf("fileobj_copy_page(%p,%lx,%d):"
|
||||
"orgpage count corrupted. %x\n",
|
||||
memobj, orgpa, p2align, count);
|
||||
panic("fileobj_copy_page:orgpage count corrupted");
|
||||
}
|
||||
if (newkva) {
|
||||
orgkva = phys_to_virt(orgpa);
|
||||
memcpy(newkva, orgkva, pgsize);
|
||||
ihk_atomic_dec(&orgpage->count);
|
||||
newpa = virt_to_phys(newkva);
|
||||
if (phys_to_page(newpa)) {
|
||||
page_map(phys_to_page(newpa));
|
||||
}
|
||||
newkva = NULL; /* avoid ihk_mc_free_pages() */
|
||||
break;
|
||||
}
|
||||
|
||||
memobj_unlock(memobj);
|
||||
newkva = ihk_mc_alloc_aligned_pages(npages, p2align,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (!newkva) {
|
||||
kprintf("fileobj_copy_page(%p,%lx,%d):"
|
||||
"alloc page failed\n",
|
||||
memobj, orgpa, p2align);
|
||||
goto out;
|
||||
}
|
||||
memobj_lock(memobj);
|
||||
}
|
||||
memobj_unlock(memobj);
|
||||
|
||||
out:
|
||||
if (newkva) {
|
||||
ihk_mc_free_pages(newkva, npages);
|
||||
}
|
||||
dkprintf("fileobj_copy_page(%p,%lx,%d): %lx\n",
|
||||
memobj, orgpa, p2align, newpa);
|
||||
return newpa;
|
||||
}
|
||||
|
||||
static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
|
||||
size_t pgsize)
|
||||
{
|
||||
@ -575,6 +572,10 @@ static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
|
||||
ihk_mc_user_context_t ctx;
|
||||
ssize_t ss;
|
||||
|
||||
if (to_memobj(obj)->flags & MF_ZEROFILL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
page = phys_to_page(phys);
|
||||
if (!page) {
|
||||
kprintf("%s: warning: tried to flush non-existing page for phys addr: 0x%lx\n",
|
||||
@ -603,63 +604,48 @@ static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
|
||||
static int fileobj_invalidate_page(struct memobj *memobj, uintptr_t phys,
|
||||
size_t pgsize)
|
||||
{
|
||||
struct fileobj *obj = to_fileobj(memobj);
|
||||
int error;
|
||||
struct page *page;
|
||||
|
||||
dkprintf("fileobj_invalidate_page(%p,%#lx,%#lx)\n",
|
||||
memobj, phys, pgsize);
|
||||
|
||||
if (!(page = phys_to_page(phys))
|
||||
|| !(page = page_list_lookup(obj, page->offset))) {
|
||||
error = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ihk_atomic_read(&page->count) == 1) {
|
||||
if (page_unmap(page)) {
|
||||
ihk_mc_free_pages(phys_to_virt(phys),
|
||||
pgsize/PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
error = 0;
|
||||
out:
|
||||
dkprintf("fileobj_invalidate_page(%p,%#lx,%#lx):%d\n",
|
||||
memobj, phys, pgsize, error);
|
||||
return error;
|
||||
/* TODO: keep track of reverse mappings so that invalidation
|
||||
* can be performed */
|
||||
kprintf("%s: WARNING: file mapping invalidation not supported\n",
|
||||
__FUNCTION__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fileobj_lookup_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *pflag)
|
||||
static int fileobj_lookup_page(struct memobj *memobj, off_t off,
|
||||
int p2align, uintptr_t *physp, unsigned long *pflag)
|
||||
{
|
||||
struct fileobj *obj = to_fileobj(memobj);
|
||||
int error;
|
||||
uintptr_t phys = -1;
|
||||
int error = -1;
|
||||
struct page *page;
|
||||
struct mcs_rwlock_node mcs_node;
|
||||
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
|
||||
|
||||
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p)\n", obj, off, p2align, physp);
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
if (p2align != PAGE_P2ALIGN) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
page = page_list_lookup(obj, off);
|
||||
mcs_rwlock_reader_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
|
||||
page = __fileobj_page_hash_lookup(obj, hash, off);
|
||||
if (!page) {
|
||||
error = -ENOENT;
|
||||
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): page not found. %d\n", obj, off, p2align, physp, error);
|
||||
goto out;
|
||||
}
|
||||
phys = page_to_phys(page);
|
||||
|
||||
*physp = page_to_phys(page);
|
||||
error = 0;
|
||||
if (physp) {
|
||||
*physp = phys;
|
||||
}
|
||||
|
||||
out:
|
||||
memobj_unlock(&obj->memobj);
|
||||
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): %d %lx\n",
|
||||
obj, off, p2align, physp, error, phys);
|
||||
mcs_rwlock_reader_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
|
||||
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): %d \n",
|
||||
obj, off, p2align, physp, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
@ -393,7 +393,9 @@ static int process_msg_prepare_process(unsigned long rphys)
|
||||
memcpy_long(pn, p, sizeof(struct program_load_desc)
|
||||
+ sizeof(struct program_image_section) * n);
|
||||
|
||||
if((thread = create_thread(p->entry)) == NULL){
|
||||
if ((thread = create_thread(p->entry,
|
||||
(unsigned long *)&p->cpu_set,
|
||||
sizeof(p->cpu_set))) == NULL) {
|
||||
kfree(pn);
|
||||
ihk_mc_unmap_virtual(p, npages, 1);
|
||||
ihk_mc_unmap_memory(NULL, phys, sz);
|
||||
@ -579,14 +581,16 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
break;
|
||||
|
||||
case SCD_MSG_SCHEDULE_PROCESS:
|
||||
cpuid = obtain_clone_cpuid();
|
||||
if(cpuid == -1){
|
||||
thread = (struct thread *)packet->arg;
|
||||
|
||||
cpuid = obtain_clone_cpuid(&thread->cpu_set);
|
||||
if (cpuid == -1) {
|
||||
kprintf("No CPU available\n");
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
dkprintf("SCD_MSG_SCHEDULE_PROCESS: %lx\n", packet->arg);
|
||||
thread = (struct thread *)packet->arg;
|
||||
proc = thread->proc;
|
||||
thread->tid = proc->pid;
|
||||
proc->status = PS_RUNNING;
|
||||
@ -594,8 +598,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
chain_thread(thread);
|
||||
chain_process(proc);
|
||||
runq_add_thread(thread, cpuid);
|
||||
|
||||
//cpu_local_var(next) = (struct thread *)packet->arg;
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
@ -683,7 +686,7 @@ void init_host_syscall_channel(void)
|
||||
|
||||
param.port = 501;
|
||||
param.pkt_size = sizeof(struct ikc_scd_packet);
|
||||
param.queue_size = PAGE_SIZE;
|
||||
param.queue_size = PAGE_SIZE * 4;
|
||||
param.magic = 0x1129;
|
||||
param.handler = syscall_packet_handler;
|
||||
|
||||
@ -710,7 +713,7 @@ void init_host_syscall_channel2(void)
|
||||
|
||||
param.port = 502;
|
||||
param.pkt_size = sizeof(struct ikc_scd_packet);
|
||||
param.queue_size = PAGE_SIZE;
|
||||
param.queue_size = PAGE_SIZE * 4;
|
||||
param.magic = 0x1329;
|
||||
param.handler = syscall_packet_handler;
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
extern void arch_init(void);
|
||||
extern void kmsg_init(int);
|
||||
extern void mem_init(void);
|
||||
extern void ikc_master_init(void);
|
||||
extern void ihk_ikc_master_init(void);
|
||||
extern void ap_init(void);
|
||||
extern void arch_ready(void);
|
||||
extern void mc_ikc_test_init(void);
|
||||
|
||||
@ -32,13 +32,20 @@ enum {
|
||||
MF_HAS_PAGER = 0x0001,
|
||||
MF_SHMDT_OK = 0x0002,
|
||||
MF_IS_REMOVABLE = 0x0004,
|
||||
MF_PREFETCH = 0x0008,
|
||||
MF_ZEROFILL = 0x0010,
|
||||
MF_END
|
||||
};
|
||||
|
||||
#define MEMOBJ_READY 0
|
||||
#define MEMOBJ_TO_BE_PREFETCHED 1
|
||||
|
||||
struct memobj {
|
||||
struct memobj_ops * ops;
|
||||
uint32_t flags;
|
||||
int8_t padding[4];
|
||||
ihk_spinlock_t lock;
|
||||
struct memobj_ops *ops;
|
||||
uint32_t flags;
|
||||
uint32_t status;
|
||||
size_t size;
|
||||
ihk_spinlock_t lock;
|
||||
};
|
||||
|
||||
typedef void memobj_release_func_t(struct memobj *obj);
|
||||
|
||||
@ -30,7 +30,8 @@ enum pager_op {
|
||||
struct pager_create_result {
|
||||
uintptr_t handle;
|
||||
int maxprot;
|
||||
int8_t padding[4];
|
||||
uint32_t flags;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@ -166,7 +166,7 @@
|
||||
|
||||
#define NOPHYS ((uintptr_t)-1)
|
||||
|
||||
#define PROCESS_NUMA_MASK_BITS 64
|
||||
#define PROCESS_NUMA_MASK_BITS 256
|
||||
|
||||
/*
|
||||
* Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
|
||||
@ -700,7 +700,8 @@ static inline int has_cap_sys_admin(struct thread *th)
|
||||
|
||||
void hold_address_space(struct address_space *);
|
||||
void release_address_space(struct address_space *);
|
||||
struct thread *create_thread(unsigned long user_pc);
|
||||
struct thread *create_thread(unsigned long user_pc,
|
||||
unsigned long *__cpu_set, size_t cpu_set_size);
|
||||
struct thread *clone_thread(struct thread *org, unsigned long pc,
|
||||
unsigned long sp, int clone_flags);
|
||||
void destroy_thread(struct thread *thread);
|
||||
|
||||
@ -149,6 +149,10 @@ struct program_image_section {
|
||||
#define MCK_RLIMIT_SIGPENDING 14
|
||||
#define MCK_RLIMIT_STACK 15
|
||||
|
||||
#define PLD_CPU_SET_MAX_CPUS 1024
|
||||
typedef unsigned long __cpu_set_unit;
|
||||
#define PLD_CPU_SET_SIZE (PLD_CPU_SET_MAX_CPUS / (8 * sizeof(__cpu_set_unit)))
|
||||
|
||||
struct program_load_desc {
|
||||
int num_sections;
|
||||
int status;
|
||||
@ -178,6 +182,7 @@ struct program_load_desc {
|
||||
struct rlimit rlimit[MCK_RLIM_MAX];
|
||||
unsigned long interp_align;
|
||||
char shell_path[SHELL_PATH_MAX_LEN];
|
||||
__cpu_set_unit cpu_set[PLD_CPU_SET_SIZE];
|
||||
struct program_image_section sections[0];
|
||||
};
|
||||
|
||||
@ -387,6 +392,7 @@ extern struct tod_data_s tod_data; /* residing in arch-dependent file */
|
||||
|
||||
void reset_cputime();
|
||||
void set_cputime(int mode);
|
||||
int do_munmap(void *addr, size_t len);
|
||||
intptr_t do_mmap(intptr_t addr0, size_t len0, int prot, int flags, int fd,
|
||||
off_t off0);
|
||||
void clear_host_pte(uintptr_t addr, size_t len);
|
||||
|
||||
21
kernel/include/xpmem.h
Normal file
21
kernel/include/xpmem.h
Normal file
@ -0,0 +1,21 @@
|
||||
/**
|
||||
* \file xpmem.h
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Structures and functions of xpmem
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#ifndef _XPMEM_H
|
||||
#define _XPMEM_H
|
||||
|
||||
#include <ihk/context.h>
|
||||
|
||||
#define XPMEM_DEV_PATH "/dev/xpmem"
|
||||
|
||||
extern int xpmem_open(ihk_mc_user_context_t *ctx);
|
||||
|
||||
#endif /* _XPMEM_H */
|
||||
|
||||
388
kernel/include/xpmem_private.h
Normal file
388
kernel/include/xpmem_private.h
Normal file
@ -0,0 +1,388 @@
|
||||
/**
|
||||
* \file xpmem_private.h
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Private Cross Partition Memory (XPMEM) structures and macros.
|
||||
*/
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright 2009, 2010, 2014 Cray Inc. All Rights Reserved
|
||||
* Copyright (c) 2014-2016 Los Alamos National Security, LCC. All rights
|
||||
* reserved.
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#ifndef _XPMEM_PRIVATE_H
|
||||
#define _XPMEM_PRIVATE_H
|
||||
|
||||
#include <mc_xpmem.h>
|
||||
#include <xpmem.h>
|
||||
|
||||
#define XPMEM_CURRENT_VERSION 0x00026003
|
||||
|
||||
//#define DEBUG_PRINT_XPMEM
|
||||
|
||||
#ifdef DEBUG_PRINT_XPMEM
|
||||
#define dkprintf(...) kprintf(__VA_ARGS__)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#define XPMEM_DEBUG(format, a...) kprintf("[%d] %s: "format"\n", cpu_local_var(current)->proc->rgid, __func__, ##a)
|
||||
#else
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#define XPMEM_DEBUG(format, a...) do { if (0) kprintf("\n"); } while (0)
|
||||
#endif
|
||||
|
||||
//#define USE_DBUG_ON
|
||||
|
||||
#ifdef USE_DBUG_ON
|
||||
#define DBUG_ON(condition) do { if (condition) kprintf("[%d] BUG: func=%s\n", cpu_local_var(current)->proc->rgid, __func__); } while (0)
|
||||
#else
|
||||
#define DBUG_ON(condition)
|
||||
#endif
|
||||
|
||||
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
|
||||
|
||||
#define min(x, y) ({ \
|
||||
__typeof__(x) _min1 = (x); \
|
||||
__typeof__(y) _min2 = (y); \
|
||||
(void) (&_min1 == &_min2); \
|
||||
_min1 < _min2 ? _min1 : _min2;})
|
||||
|
||||
#define max(x, y) ({ \
|
||||
__typeof__(x) _max1 = (x); \
|
||||
__typeof__(y) _max2 = (y); \
|
||||
(void) (&_max1 == &_max2); \
|
||||
_max1 > _max2 ? _max1 : _max2;})
|
||||
|
||||
#define MAX_ERRNO 4095
|
||||
|
||||
#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
|
||||
|
||||
static inline void * ERR_PTR(long error)
|
||||
{
|
||||
return (void *)error;
|
||||
}
|
||||
|
||||
static inline long PTR_ERR(const void *ptr)
|
||||
{
|
||||
return (long)ptr;
|
||||
}
|
||||
|
||||
static inline long IS_ERR(const void *ptr)
|
||||
{
|
||||
return IS_ERR_VALUE((unsigned long)ptr);
|
||||
}
|
||||
|
||||
static inline long IS_ERR_OR_NULL(const void *ptr)
|
||||
{
|
||||
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Both the xpmem_segid_t and xpmem_apid_t are of type __s64 and designed
|
||||
* to be opaque to the user. Both consist of the same underlying fields.
|
||||
*
|
||||
* The 'uniq' field is designed to give each segid or apid a unique value.
|
||||
* Each type is only unique with respect to itself.
|
||||
*
|
||||
* An ID is never less than or equal to zero.
|
||||
*/
|
||||
struct xpmem_id {
|
||||
pid_t tgid; /* thread group that owns ID */
|
||||
unsigned int uniq; /* this value makes the ID unique */
|
||||
};
|
||||
|
||||
typedef union {
|
||||
struct xpmem_id xpmem_id;
|
||||
xpmem_segid_t segid;
|
||||
xpmem_apid_t apid;
|
||||
} xpmem_id_t;
|
||||
|
||||
/* Shift INT_MAX by one so we can tell when we overflow. */
|
||||
#define XPMEM_MAX_UNIQ_ID (INT_MAX >> 1)
|
||||
|
||||
static inline pid_t xpmem_segid_to_tgid(xpmem_segid_t segid)
|
||||
{
|
||||
DBUG_ON(segid <= 0);
|
||||
return ((xpmem_id_t *)&segid)->xpmem_id.tgid;
|
||||
}
|
||||
|
||||
static inline pid_t xpmem_apid_to_tgid(xpmem_apid_t apid)
|
||||
{
|
||||
DBUG_ON(apid <= 0);
|
||||
return ((xpmem_id_t *)&apid)->xpmem_id.tgid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hash Tables
|
||||
*
|
||||
* XPMEM utilizes hash tables to enable faster lookups of list entries.
|
||||
* These hash tables are implemented as arrays. A simple modulus of the hash
|
||||
* key yields the appropriate array index. A hash table's array element (i.e.,
|
||||
* hash table bucket) consists of a hash list and the lock that protects it.
|
||||
*
|
||||
* XPMEM has the following two hash tables:
|
||||
*
|
||||
* table bucket key
|
||||
* part->tg_hashtable list of struct xpmem_thread_group tgid
|
||||
* tg->ap_hashtable list of struct xpmem_access_permit apid.uniq
|
||||
*/
|
||||
struct xpmem_hashlist {
|
||||
mcs_rwlock_lock_t lock; /* lock for hash list */
|
||||
struct list_head list; /* hash list */
|
||||
};
|
||||
|
||||
#define XPMEM_TG_HASHTABLE_SIZE 8
|
||||
#define XPMEM_AP_HASHTABLE_SIZE 8
|
||||
|
||||
static inline int xpmem_tg_hashtable_index(pid_t tgid)
|
||||
{
|
||||
int index;
|
||||
|
||||
index = (unsigned int)tgid % XPMEM_TG_HASHTABLE_SIZE;
|
||||
|
||||
XPMEM_DEBUG("return: tgid=%lu, index=%d", tgid, index);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
static inline int xpmem_ap_hashtable_index(xpmem_apid_t apid)
|
||||
{
|
||||
int index;
|
||||
|
||||
DBUG_ON(apid <= 0);
|
||||
|
||||
index = ((xpmem_id_t *)&apid)->xpmem_id.uniq % XPMEM_AP_HASHTABLE_SIZE;
|
||||
|
||||
XPMEM_DEBUG("return: apid=%lu, index=%d", apid, index);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
/*
|
||||
* general internal driver structures
|
||||
*/
|
||||
struct xpmem_thread_group {
|
||||
ihk_spinlock_t lock; /* tg lock */
|
||||
pid_t tgid; /* tg's tgid */
|
||||
uid_t uid; /* tg's uid */
|
||||
gid_t gid; /* tg's gid */
|
||||
volatile int flags; /* tg attributes and state */
|
||||
ihk_atomic_t uniq_segid;
|
||||
ihk_atomic_t uniq_apid;
|
||||
mcs_rwlock_lock_t seg_list_lock;
|
||||
struct list_head seg_list; /* tg's list of segs */
|
||||
ihk_atomic_t refcnt; /* references to tg */
|
||||
ihk_atomic_t n_pinned; /* #of pages pinned by this tg */
|
||||
struct list_head tg_hashlist; /* tg hash list */
|
||||
struct thread *group_leader; /* thread group leader */
|
||||
struct process_vm *vm; /* tg's mm */
|
||||
ihk_atomic_t n_recall_PFNs; /* #of recall of PFNs in progress */
|
||||
struct xpmem_hashlist ap_hashtable[]; /* locks + ap hash lists */
|
||||
};
|
||||
|
||||
struct xpmem_segment {
|
||||
ihk_spinlock_t lock; /* seg lock */
|
||||
mcs_rwlock_lock_t seg_lock; /* seg sema */
|
||||
xpmem_segid_t segid; /* unique segid */
|
||||
unsigned long vaddr; /* starting address */
|
||||
size_t size; /* size of seg */
|
||||
int permit_type; /* permission scheme */
|
||||
void *permit_value; /* permission data */
|
||||
volatile int flags; /* seg attributes and state */
|
||||
ihk_atomic_t refcnt; /* references to seg */
|
||||
struct xpmem_thread_group *tg; /* creator tg */
|
||||
struct list_head ap_list; /* local access permits of seg */
|
||||
struct list_head seg_list; /* tg's list of segs */
|
||||
};
|
||||
|
||||
struct xpmem_access_permit {
|
||||
ihk_spinlock_t lock; /* access permit lock */
|
||||
xpmem_apid_t apid; /* unique apid */
|
||||
int mode; /* read/write mode */
|
||||
volatile int flags; /* access permit attributes and state */
|
||||
ihk_atomic_t refcnt; /* references to access permit */
|
||||
struct xpmem_segment *seg; /* seg permitted to be accessed */
|
||||
struct xpmem_thread_group *tg; /* access permit's tg */
|
||||
struct list_head att_list; /* atts of this access permit's seg */
|
||||
struct list_head ap_list; /* access permits linked to seg */
|
||||
struct list_head ap_hashlist; /* access permit hash list */
|
||||
};
|
||||
|
||||
struct xpmem_attachment {
|
||||
mcs_rwlock_lock_t at_lock; /* att lock for serialization */
|
||||
struct mcs_rwlock_node_irqsave at_irqsave; /* att lock for serialization */
|
||||
unsigned long vaddr; /* starting address of seg attached */
|
||||
unsigned long at_vaddr; /* address where seg is attached */
|
||||
size_t at_size; /* size of seg attachment */
|
||||
struct vm_range *at_vma; /* vma where seg is attachment */
|
||||
volatile int flags; /* att attributes and state */
|
||||
ihk_atomic_t refcnt; /* references to att */
|
||||
struct xpmem_access_permit *ap; /* associated access permit */
|
||||
struct list_head att_list; /* atts linked to access permit */
|
||||
struct process_vm *vm; /* mm struct attached to */
|
||||
mcs_rwlock_lock_t invalidate_lock; /* to serialize page table invalidates */
|
||||
};
|
||||
|
||||
struct xpmem_partition {
|
||||
ihk_atomic_t n_opened; /* # of /dev/xpmem opened */
|
||||
struct xpmem_hashlist tg_hashtable[]; /* locks + tg hash lists */
|
||||
};
|
||||
|
||||
#define XPMEM_FLAG_DESTROYING 0x00040 /* being destroyed */
|
||||
#define XPMEM_FLAG_DESTROYED 0x00080 /* 'being destroyed' finished */
|
||||
|
||||
#define XPMEM_FLAG_VALIDPTEs 0x00200 /* valid PTEs exist */
|
||||
|
||||
struct xpmem_perm {
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
unsigned long mode;
|
||||
};
|
||||
|
||||
#define XPMEM_PERM_IRUSR 00400
|
||||
#define XPMEM_PERM_IWUSR 00200
|
||||
|
||||
static int xpmem_ioctl(struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
|
||||
static int xpmem_close( struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
|
||||
|
||||
static int xpmem_init(void);
|
||||
static void xpmem_exit(void);
|
||||
static int __xpmem_open(void);
|
||||
static void xpmem_destroy_tg(struct xpmem_thread_group *);
|
||||
|
||||
static int xpmem_make(unsigned long, size_t, int, void *, xpmem_segid_t *);
|
||||
static xpmem_segid_t xpmem_make_segid(struct xpmem_thread_group *);
|
||||
|
||||
static int xpmem_remove(xpmem_segid_t);
|
||||
static void xpmem_remove_seg(struct xpmem_thread_group *,
|
||||
struct xpmem_segment *);
|
||||
|
||||
static void xpmem_clear_PTEs(struct xpmem_segment *);
|
||||
|
||||
extern struct xpmem_partition *xpmem_my_part;
|
||||
|
||||
static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal(
|
||||
pid_t, int, int);
|
||||
|
||||
static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid(
|
||||
pid_t tgid,
|
||||
int return_destroying)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
int index;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: tgid=%d, return_destroying=%d",
|
||||
tgid, return_destroying);
|
||||
|
||||
index = xpmem_tg_hashtable_index(tgid);
|
||||
mcs_rwlock_reader_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
|
||||
tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid, index,
|
||||
return_destroying);
|
||||
mcs_rwlock_reader_unlock(&xpmem_my_part->tg_hashtable[index].lock,
|
||||
&lock);
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", tg);
|
||||
|
||||
return tg;
|
||||
}
|
||||
|
||||
static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid_nolock(
|
||||
pid_t tgid,
|
||||
int return_destroying)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
|
||||
XPMEM_DEBUG("call: tgid=%d, return_destroying=%d",
|
||||
tgid, return_destroying);
|
||||
|
||||
tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid,
|
||||
xpmem_tg_hashtable_index(tgid), return_destroying);
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", tg);
|
||||
|
||||
return tg;
|
||||
}
|
||||
|
||||
#define xpmem_tg_ref_by_tgid(t) __xpmem_tg_ref_by_tgid(t, 0)
|
||||
#define xpmem_tg_ref_by_tgid_all(t) __xpmem_tg_ref_by_tgid(t, 1)
|
||||
#define xpmem_tg_ref_by_tgid_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 0)
|
||||
#define xpmem_tg_ref_by_tgid_all_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 1)
|
||||
|
||||
static struct xpmem_thread_group * xpmem_tg_ref_by_segid(xpmem_segid_t);
|
||||
static void xpmem_tg_deref(struct xpmem_thread_group *);
|
||||
static struct xpmem_segment *xpmem_seg_ref_by_segid(struct xpmem_thread_group *,
|
||||
xpmem_segid_t);
|
||||
static void xpmem_seg_deref(struct xpmem_segment *);
|
||||
|
||||
/*
|
||||
* Inlines that mark an internal driver structure as being destroyable or not.
|
||||
* The idea is to set the refcnt to 1 at structure creation time and then
|
||||
* drop that reference at the time the structure is to be destroyed.
|
||||
*/
|
||||
static inline void xpmem_tg_not_destroyable(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
ihk_atomic_set(&tg->refcnt, 1);
|
||||
|
||||
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_tg_destroyable(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
xpmem_tg_deref(tg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
static inline void xpmem_seg_not_destroyable(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
ihk_atomic_set(&seg->refcnt, 1);
|
||||
|
||||
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_seg_destroyable(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
xpmem_seg_deref(seg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
/*
|
||||
* Inlines that increment the refcnt for the specified structure.
|
||||
*/
|
||||
static inline void xpmem_tg_ref(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
DBUG_ON(ihk_atomic_read(&tg->refcnt) <= 0);
|
||||
ihk_atomic_inc(&tg->refcnt);
|
||||
|
||||
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_seg_ref(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
|
||||
ihk_atomic_inc(&seg->refcnt);
|
||||
|
||||
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
|
||||
}
|
||||
|
||||
#endif /* _XPMEM_PRIVATE_H */
|
||||
|
||||
@ -108,11 +108,11 @@ static void dma_test(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern char *ihk_mc_get_kernel_args(void);
|
||||
extern char *ihk_get_kargs(void);
|
||||
|
||||
char *find_command_line(char *name)
|
||||
{
|
||||
char *cmdline = ihk_mc_get_kernel_args();
|
||||
char *cmdline = ihk_get_kargs();
|
||||
|
||||
if (!cmdline) {
|
||||
return NULL;
|
||||
@ -122,7 +122,7 @@ char *find_command_line(char *name)
|
||||
|
||||
static void parse_kargs(void)
|
||||
{
|
||||
kprintf("KCommand Line: %s\n", ihk_mc_get_kernel_args());
|
||||
kprintf("KCommand Line: %s\n", ihk_get_kargs());
|
||||
|
||||
if (1) {
|
||||
char *key = "osnum=";
|
||||
@ -254,7 +254,7 @@ static void rest_init(void)
|
||||
time_init();
|
||||
kmalloc_init();
|
||||
|
||||
ikc_master_init();
|
||||
ihk_ikc_master_init();
|
||||
|
||||
proc_init();
|
||||
|
||||
@ -373,6 +373,7 @@ int main(void)
|
||||
|
||||
kputs("IHK/McKernel started.\n");
|
||||
|
||||
ihk_set_kmsg(virt_to_phys(&kmsg_buf), IHK_KMSG_SIZE);
|
||||
arch_init();
|
||||
|
||||
/*
|
||||
|
||||
169
kernel/mem.c
169
kernel/mem.c
@ -494,18 +494,96 @@ static void reserve_pages(struct ihk_page_allocator_desc *pa_allocator,
|
||||
ihk_pagealloc_reserve(pa_allocator, start, end);
|
||||
}
|
||||
|
||||
static void *allocate_aligned_pages(int npages, int p2align,
|
||||
extern int cpu_local_var_initialized;
|
||||
static void *allocate_aligned_pages(int npages, int p2align,
|
||||
enum ihk_mc_ap_flag flag)
|
||||
{
|
||||
unsigned long pa;
|
||||
int i;
|
||||
unsigned long pa = 0;
|
||||
int i, node;
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
|
||||
/* Not yet initialized or idle process */
|
||||
if (!cpu_local_var_initialized ||
|
||||
!cpu_local_var(current) ||
|
||||
!cpu_local_var(current)->vm)
|
||||
goto distance_based;
|
||||
|
||||
/* User requested policy? */
|
||||
switch (cpu_local_var(current)->vm->numa_mem_policy) {
|
||||
case MPOL_BIND:
|
||||
case MPOL_PREFERRED:
|
||||
for_each_set_bit(node,
|
||||
cpu_local_var(current)->proc->vm->numa_mask,
|
||||
ihk_mc_get_nr_numa_nodes()) {
|
||||
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[node].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
|
||||
if (pa) {
|
||||
dkprintf("%s: policy: CPU @ node %d allocated "
|
||||
"%d pages from node %d\n",
|
||||
__FUNCTION__,
|
||||
ihk_mc_get_numa_id(),
|
||||
npages, node);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pa) break;
|
||||
}
|
||||
break;
|
||||
|
||||
case MPOL_INTERLEAVE:
|
||||
/* TODO: */
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (pa)
|
||||
return phys_to_virt(pa);
|
||||
|
||||
distance_based:
|
||||
node = ihk_mc_get_numa_id();
|
||||
|
||||
/* Look at nodes in the order of distance */
|
||||
if (!memory_nodes[node].nodes_by_distance)
|
||||
goto order_based;
|
||||
|
||||
/* TODO: match NUMA id and distance matrix with allocating core */
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[(ihk_mc_get_numa_id() + i) %
|
||||
&memory_nodes[memory_nodes[node].
|
||||
nodes_by_distance[i].id].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
|
||||
if (pa) {
|
||||
dkprintf("%s: distance: CPU @ node %d allocated "
|
||||
"%d pages from node %d\n",
|
||||
__FUNCTION__,
|
||||
ihk_mc_get_numa_id(),
|
||||
npages,
|
||||
memory_nodes[node].nodes_by_distance[i].id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pa) break;
|
||||
}
|
||||
|
||||
if (pa)
|
||||
return phys_to_virt(pa);
|
||||
|
||||
order_based:
|
||||
node = ihk_mc_get_numa_id();
|
||||
|
||||
/* Fall back to regular order */
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[(node + i) %
|
||||
ihk_mc_get_nr_numa_nodes()].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
|
||||
@ -806,8 +884,8 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
|
||||
int error;
|
||||
|
||||
set_cputime(interrupt_from_user(regs)? 1: 2);
|
||||
dkprintf("[%d]page_fault_handler(%p,%lx,%p)\n",
|
||||
ihk_mc_get_processor_id(), fault_addr, reason, regs);
|
||||
dkprintf("%s: addr: %p, reason: %lx, regs: %p\n",
|
||||
__FUNCTION__, fault_addr, reason, regs);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
@ -862,9 +940,8 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
|
||||
error = 0;
|
||||
preempt_enable();
|
||||
out:
|
||||
dkprintf("[%d]page_fault_handler(%p,%lx,%p): (%d)\n",
|
||||
ihk_mc_get_processor_id(), fault_addr, reason,
|
||||
regs, error);
|
||||
dkprintf("%s: addr: %p, reason: %lx, regs: %p -> error: %d\n",
|
||||
__FUNCTION__, fault_addr, reason, regs, error);
|
||||
check_need_resched();
|
||||
set_cputime(0);
|
||||
return;
|
||||
@ -932,6 +1009,7 @@ static void numa_init(void)
|
||||
memory_nodes[i].linux_numa_id = linux_numa_id;
|
||||
memory_nodes[i].type = type;
|
||||
INIT_LIST_HEAD(&memory_nodes[i].allocators);
|
||||
memory_nodes[i].nodes_by_distance = 0;
|
||||
|
||||
kprintf("NUMA: %d, Linux NUMA: %d, type: %d\n",
|
||||
i, linux_numa_id, type);
|
||||
@ -955,6 +1033,72 @@ static void numa_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void numa_distances_init()
|
||||
{
|
||||
int i, j, swapped;
|
||||
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
/* TODO: allocate on target node */
|
||||
memory_nodes[i].nodes_by_distance =
|
||||
ihk_mc_alloc_pages((sizeof(struct node_distance) *
|
||||
ihk_mc_get_nr_numa_nodes() + PAGE_SIZE - 1)
|
||||
>> PAGE_SHIFT, IHK_MC_AP_NOWAIT);
|
||||
|
||||
if (!memory_nodes[i].nodes_by_distance) {
|
||||
kprintf("%s: error: allocating nodes_by_distance\n",
|
||||
__FUNCTION__);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (j = 0; j < ihk_mc_get_nr_numa_nodes(); ++j) {
|
||||
memory_nodes[i].nodes_by_distance[j].id = j;
|
||||
memory_nodes[i].nodes_by_distance[j].distance =
|
||||
ihk_mc_get_numa_distance(i, j);
|
||||
}
|
||||
|
||||
/* Sort by distance and node ID */
|
||||
swapped = 1;
|
||||
while (swapped) {
|
||||
swapped = 0;
|
||||
for (j = 1; j < ihk_mc_get_nr_numa_nodes(); ++j) {
|
||||
if ((memory_nodes[i].nodes_by_distance[j - 1].distance >
|
||||
memory_nodes[i].nodes_by_distance[j].distance) ||
|
||||
((memory_nodes[i].nodes_by_distance[j - 1].distance ==
|
||||
memory_nodes[i].nodes_by_distance[j].distance) &&
|
||||
(memory_nodes[i].nodes_by_distance[j - 1].id >
|
||||
memory_nodes[i].nodes_by_distance[j].id))) {
|
||||
memory_nodes[i].nodes_by_distance[j - 1].id ^=
|
||||
memory_nodes[i].nodes_by_distance[j].id;
|
||||
memory_nodes[i].nodes_by_distance[j].id ^=
|
||||
memory_nodes[i].nodes_by_distance[j - 1].id;
|
||||
memory_nodes[i].nodes_by_distance[j - 1].id ^=
|
||||
memory_nodes[i].nodes_by_distance[j].id;
|
||||
|
||||
memory_nodes[i].nodes_by_distance[j - 1].distance ^=
|
||||
memory_nodes[i].nodes_by_distance[j].distance;
|
||||
memory_nodes[i].nodes_by_distance[j].distance ^=
|
||||
memory_nodes[i].nodes_by_distance[j - 1].distance;
|
||||
memory_nodes[i].nodes_by_distance[j - 1].distance ^=
|
||||
memory_nodes[i].nodes_by_distance[j].distance;
|
||||
swapped = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
char buf[1024];
|
||||
char *pbuf = buf;
|
||||
|
||||
pbuf += sprintf(pbuf, "NUMA %d distances: ", i);
|
||||
for (j = 0; j < ihk_mc_get_nr_numa_nodes(); ++j) {
|
||||
pbuf += sprintf(pbuf, "%d (%d), ",
|
||||
memory_nodes[i].nodes_by_distance[j].id,
|
||||
memory_nodes[i].nodes_by_distance[j].distance);
|
||||
}
|
||||
kprintf("%s\n", buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define PHYS_PAGE_HASH_SHIFT (10)
|
||||
#define PHYS_PAGE_HASH_SIZE (1 << PHYS_PAGE_HASH_SHIFT)
|
||||
#define PHYS_PAGE_HASH_MASK (PHYS_PAGE_HASH_SIZE - 1)
|
||||
@ -1236,6 +1380,9 @@ void mem_init(void)
|
||||
kprintf("Demand paging on ANONYMOUS mappings enabled.\n");
|
||||
anon_on_demand = 1;
|
||||
}
|
||||
|
||||
/* Init distance vectors */
|
||||
numa_distances_init();
|
||||
}
|
||||
|
||||
#define KMALLOC_TRACK_HASH_SHIFT (8)
|
||||
|
||||
@ -21,7 +21,7 @@ static struct ihk_ikc_channel_desc *mchannel;
|
||||
static int arch_master_channel_packet_handler(struct ihk_ikc_channel_desc *,
|
||||
void *__packet, void *arg);
|
||||
|
||||
void ikc_master_init(void)
|
||||
void ihk_ikc_master_init(void)
|
||||
{
|
||||
mchannel = kmalloc(sizeof(struct ihk_ikc_channel_desc) +
|
||||
sizeof(struct ihk_ikc_master_packet),
|
||||
|
||||
@ -233,13 +233,15 @@ init_process_vm(struct process *owner, struct address_space *asp, struct process
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct thread *
|
||||
create_thread(unsigned long user_pc)
|
||||
struct thread *create_thread(unsigned long user_pc,
|
||||
unsigned long *__cpu_set, size_t cpu_set_size)
|
||||
{
|
||||
struct thread *thread;
|
||||
struct process *proc;
|
||||
struct process_vm *vm = NULL;
|
||||
struct address_space *asp = NULL;
|
||||
int cpu;
|
||||
int cpu_set_empty = 1;
|
||||
|
||||
thread = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, IHK_MC_AP_NOWAIT);
|
||||
if (!thread)
|
||||
@ -255,7 +257,22 @@ create_thread(unsigned long user_pc)
|
||||
memset(vm, 0, sizeof(struct process_vm));
|
||||
init_process(proc, cpu_local_var(resource_set)->pid1);
|
||||
|
||||
if (1) {
|
||||
/* Use requested CPU cores */
|
||||
for_each_set_bit(cpu, __cpu_set, cpu_set_size * BITS_PER_BYTE) {
|
||||
if (cpu >= num_processors) {
|
||||
kprintf("%s: invalid CPU requested in initial cpu_set\n",
|
||||
__FUNCTION__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
dkprintf("%s: pid: %d, CPU: %d\n",
|
||||
__FUNCTION__, proc->pid, cpu);
|
||||
CPU_SET(cpu, &thread->cpu_set);
|
||||
cpu_set_empty = 0;
|
||||
}
|
||||
|
||||
/* Default allows all cores */
|
||||
if (cpu_set_empty) {
|
||||
struct ihk_mc_cpu_info *infop;
|
||||
int i;
|
||||
|
||||
@ -1367,6 +1384,11 @@ static int sync_one_page(void *arg0, page_table_t pt, pte_t *ptep,
|
||||
flush_tlb_single((uintptr_t)pgaddr); /* XXX: TLB flush */
|
||||
|
||||
phys = pte_get_phys(ptep);
|
||||
if (args->memobj->flags & MF_ZEROFILL) {
|
||||
error = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = memobj_flush_page(args->memobj, phys, pgsize);
|
||||
if (error) {
|
||||
ekprintf("sync_one_page(%p,%p,%p %#lx,%p,%d):"
|
||||
@ -1394,11 +1416,19 @@ int sync_process_memory_range(struct process_vm *vm, struct vm_range *range,
|
||||
args.memobj = range->memobj;
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&vm->page_table_lock);
|
||||
memobj_lock(range->memobj);
|
||||
|
||||
if (!(range->memobj->flags & MF_ZEROFILL)) {
|
||||
memobj_lock(range->memobj);
|
||||
}
|
||||
|
||||
error = visit_pte_range(vm->address_space->page_table, (void *)start,
|
||||
(void *)end, range->pgshift, VPTEF_SKIP_NULL,
|
||||
&sync_one_page, &args);
|
||||
memobj_unlock(range->memobj);
|
||||
(void *)end, range->pgshift, VPTEF_SKIP_NULL,
|
||||
&sync_one_page, &args);
|
||||
|
||||
if (!(range->memobj->flags & MF_ZEROFILL)) {
|
||||
memobj_unlock(range->memobj);
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
|
||||
if (error) {
|
||||
ekprintf("sync_process_memory_range(%p,%p,%#lx,%#lx):"
|
||||
@ -2191,9 +2221,10 @@ int populate_process_memory(struct process_vm *vm, void *start, size_t len)
|
||||
for (addr = (uintptr_t)start; addr < end; addr += PAGE_SIZE) {
|
||||
error = page_fault_process_vm(vm, (void *)addr, reason);
|
||||
if (error) {
|
||||
ekprintf("populate_process_range:page_fault_process_vm"
|
||||
"(%p,%lx,%lx) failed %d\n",
|
||||
vm, addr, reason, error);
|
||||
ekprintf("%s: WARNING: page_fault_process_vm(): vm: %p, "
|
||||
"addr: %lx, reason: %lx, off: %lu, len: %lu returns %d\n",
|
||||
__FUNCTION__, vm, addr, reason,
|
||||
((void *)addr - start), len, error);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
@ -179,6 +179,7 @@ int shmobj_create(struct shmid_ds *ds, struct memobj **objp)
|
||||
|
||||
memset(obj, 0, sizeof(*obj));
|
||||
obj->memobj.ops = &shmobj_ops;
|
||||
obj->memobj.size = ds->shm_segsz;
|
||||
obj->ds = *ds;
|
||||
obj->ds.shm_perm.seq = the_seq++;
|
||||
obj->ds.shm_nattch = 1;
|
||||
|
||||
@ -54,6 +54,7 @@
|
||||
#include <process.h>
|
||||
#include <bitops.h>
|
||||
#include <bitmap.h>
|
||||
#include <xpmem.h>
|
||||
|
||||
/* Headers taken from kitten LWK */
|
||||
#include <lwk/stddef.h>
|
||||
@ -1062,7 +1063,7 @@ out:
|
||||
return (int)lerror;
|
||||
}
|
||||
|
||||
static int do_munmap(void *addr, size_t len)
|
||||
int do_munmap(void *addr, size_t len)
|
||||
{
|
||||
int error;
|
||||
int ro_freed;
|
||||
@ -1362,6 +1363,13 @@ do_mmap(const intptr_t addr0, const size_t len0, const int prot,
|
||||
goto out;
|
||||
}
|
||||
|
||||
memobj_lock(memobj);
|
||||
if (memobj->status == MEMOBJ_TO_BE_PREFETCHED) {
|
||||
memobj->status = MEMOBJ_READY;
|
||||
populated_mapping = 1;
|
||||
}
|
||||
memobj_unlock(memobj);
|
||||
|
||||
error = 0;
|
||||
p = NULL;
|
||||
memobj = NULL;
|
||||
@ -1376,8 +1384,9 @@ out:
|
||||
if (!error && populated_mapping && !((vrflags & VR_PROT_MASK) == VR_PROT_NONE)) {
|
||||
error = populate_process_memory(thread->vm, (void *)addr, len);
|
||||
if (error) {
|
||||
ekprintf("%s: error :populate_process_memory"
|
||||
"vm: %p, addr: %p, len: %d (flags: %s%s) failed %d\n", __FUNCTION__,
|
||||
ekprintf("%s: WARNING: populate_process_memory(): "
|
||||
"vm: %p, addr: %p, len: %d (flags: %s%s) failed %d\n",
|
||||
__FUNCTION__,
|
||||
thread->vm, (void *)addr, len,
|
||||
(flags & MAP_POPULATE) ? "MAP_POPULATE " : "",
|
||||
(flags & MAP_LOCKED) ? "MAP_LOCKED ": "",
|
||||
@ -1884,7 +1893,7 @@ SYSCALL_DECLARE(execve)
|
||||
ret = do_syscall(&request, ihk_mc_get_processor_id(), 0);
|
||||
|
||||
if (ret != 0) {
|
||||
kprintf("execve(): ERROR: host failed to load elf header, errno: %d\n",
|
||||
dkprintf("execve(): ERROR: host failed to load elf header, errno: %d\n",
|
||||
ret);
|
||||
return -ret;
|
||||
}
|
||||
@ -2039,7 +2048,7 @@ unsigned long do_fork(int clone_flags, unsigned long newsp,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpuid = obtain_clone_cpuid();
|
||||
cpuid = obtain_clone_cpuid(&old->cpu_set);
|
||||
if (cpuid == -1) {
|
||||
kprintf("do_fork,core not available\n");
|
||||
return -EAGAIN;
|
||||
@ -2696,6 +2705,21 @@ SYSCALL_DECLARE(ioctl)
|
||||
return rc;
|
||||
}
|
||||
|
||||
SYSCALL_DECLARE(open)
|
||||
{
|
||||
const char *pathname = (const char *)ihk_mc_syscall_arg0(ctx);
|
||||
long rc;
|
||||
|
||||
dkprintf("open(): pathname=%s\n", pathname);
|
||||
if (!strcmp(pathname, XPMEM_DEV_PATH)) {
|
||||
rc = xpmem_open(ctx);
|
||||
} else {
|
||||
rc = syscall_generic_forwarding(__NR_open, ctx);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
SYSCALL_DECLARE(close)
|
||||
{
|
||||
int fd = ihk_mc_syscall_arg0(ctx);
|
||||
@ -7400,7 +7424,9 @@ SYSCALL_DECLARE(mbind)
|
||||
}
|
||||
|
||||
/* Verify NUMA mask */
|
||||
for_each_set_bit(bit, numa_mask, maxnode) {
|
||||
for_each_set_bit(bit, numa_mask,
|
||||
maxnode < PROCESS_NUMA_MASK_BITS ?
|
||||
maxnode : PROCESS_NUMA_MASK_BITS) {
|
||||
if (bit >= ihk_mc_get_nr_numa_nodes()) {
|
||||
dkprintf("%s: %d is bigger than # of NUMA nodes\n",
|
||||
__FUNCTION__, bit);
|
||||
@ -7703,7 +7729,9 @@ SYSCALL_DECLARE(set_mempolicy)
|
||||
|
||||
/* Verify NUMA mask */
|
||||
valid_mask = 0;
|
||||
for_each_set_bit(bit, numa_mask, maxnode) {
|
||||
for_each_set_bit(bit, numa_mask,
|
||||
maxnode < PROCESS_NUMA_MASK_BITS ?
|
||||
maxnode : PROCESS_NUMA_MASK_BITS) {
|
||||
if (bit >= ihk_mc_get_nr_numa_nodes()) {
|
||||
dkprintf("%s: %d is bigger than # of NUMA nodes\n",
|
||||
__FUNCTION__, bit);
|
||||
@ -7725,7 +7753,9 @@ SYSCALL_DECLARE(set_mempolicy)
|
||||
}
|
||||
|
||||
/* Update current mask by clearing non-requested nodes */
|
||||
for_each_set_bit(bit, vm->numa_mask, maxnode) {
|
||||
for_each_set_bit(bit, vm->numa_mask,
|
||||
maxnode < PROCESS_NUMA_MASK_BITS ?
|
||||
maxnode : PROCESS_NUMA_MASK_BITS) {
|
||||
if (!test_bit(bit, numa_mask)) {
|
||||
clear_bit(bit, vm->numa_mask);
|
||||
}
|
||||
|
||||
739
kernel/xpmem.c
Normal file
739
kernel/xpmem.c
Normal file
@ -0,0 +1,739 @@
|
||||
/**
|
||||
* \file xpmem.c
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Cross Partition Memory (XPMEM) support.
|
||||
*/
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright 2010, 2014 Cray Inc. All Rights Reserved
|
||||
* Copyright 2015-2016 Los Alamos National Security, LLC. All rights reserved.
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <kmalloc.h>
|
||||
#include <limits.h>
|
||||
#include <memobj.h>
|
||||
#include <mman.h>
|
||||
#include <string.h>
|
||||
#include <types.h>
|
||||
#include <vsprintf.h>
|
||||
#include <ihk/lock.h>
|
||||
#include <ihk/mm.h>
|
||||
#include <xpmem_private.h>
|
||||
|
||||
|
||||
struct xpmem_partition *xpmem_my_part = NULL; /* pointer to this partition */
|
||||
|
||||
|
||||
int xpmem_open(
|
||||
ihk_mc_user_context_t *ctx)
|
||||
{
|
||||
const char *pathname = (const char *)ihk_mc_syscall_arg0(ctx);
|
||||
int flags = (int)ihk_mc_syscall_arg1(ctx);
|
||||
int ret;
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
struct process *proc = thread->proc;
|
||||
struct syscall_request request IHK_DMA_ALIGN;
|
||||
int fd;
|
||||
struct mckfd *mckfd;
|
||||
long irqstate;
|
||||
|
||||
XPMEM_DEBUG("call: pathname=%s, flags=%d", pathname, flags);
|
||||
|
||||
if (!xpmem_my_part) {
|
||||
ret = xpmem_init();
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
request.number = __NR_open;
|
||||
request.args[0] = (unsigned long)pathname;
|
||||
request.args[1] = flags;
|
||||
fd = do_syscall(&request, ihk_mc_get_processor_id(), 0);
|
||||
if(fd < 0){
|
||||
XPMEM_DEBUG("__NR_open error: fd=%d", fd);
|
||||
return fd;
|
||||
}
|
||||
|
||||
ret = __xpmem_open();
|
||||
if (ret) {
|
||||
XPMEM_DEBUG("return: ret=%d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mckfd = kmalloc(sizeof(struct mckfd), IHK_MC_AP_NOWAIT);
|
||||
if(!mckfd) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
XPMEM_DEBUG("kmalloc(): mckfd=0x%p", mckfd);
|
||||
memset(mckfd, 0, sizeof(struct mckfd));
|
||||
mckfd->fd = fd;
|
||||
mckfd->sig_no = -1;
|
||||
mckfd->ioctl_cb = xpmem_ioctl;
|
||||
mckfd->close_cb = xpmem_close;
|
||||
irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock);
|
||||
|
||||
if(proc->mckfd == NULL) {
|
||||
proc->mckfd = mckfd;
|
||||
mckfd->next = NULL;
|
||||
} else {
|
||||
mckfd->next = proc->mckfd;
|
||||
proc->mckfd = mckfd;
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock(&proc->mckfd_lock, irqstate);
|
||||
|
||||
ihk_atomic_inc_return(&xpmem_my_part->n_opened);
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", mckfd->fd);
|
||||
|
||||
return mckfd->fd;
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_ioctl(
|
||||
struct mckfd *mckfd,
|
||||
ihk_mc_user_context_t *ctx)
|
||||
{
|
||||
int ret;
|
||||
unsigned int cmd = ihk_mc_syscall_arg1(ctx);
|
||||
unsigned long arg = ihk_mc_syscall_arg2(ctx);
|
||||
|
||||
XPMEM_DEBUG("call: cmd=0x%x, arg=0x%lx", cmd, arg);
|
||||
|
||||
switch (cmd) {
|
||||
case XPMEM_CMD_VERSION: {
|
||||
ret = XPMEM_CURRENT_VERSION;
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=0x%lx", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_MAKE: {
|
||||
struct xpmem_cmd_make make_info;
|
||||
xpmem_segid_t segid = 0;
|
||||
|
||||
if (copy_from_user(&make_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_make)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = xpmem_make(make_info.vaddr, make_info.size,
|
||||
make_info.permit_type,
|
||||
(void *)make_info.permit_value, &segid);
|
||||
if (ret != 0) {
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (copy_to_user(&((struct xpmem_cmd_make __user *)arg)->segid,
|
||||
(void *)&segid, sizeof(xpmem_segid_t))) {
|
||||
(void)xpmem_remove(segid);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_REMOVE: {
|
||||
struct xpmem_cmd_remove remove_info;
|
||||
|
||||
if (copy_from_user(&remove_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_remove)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = xpmem_remove(remove_info.segid);
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_GET: {
|
||||
struct xpmem_cmd_get get_info;
|
||||
// xpmem_apid_t apid = 0;
|
||||
|
||||
if (copy_from_user(&get_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_get)))
|
||||
return -EFAULT;
|
||||
|
||||
// ret = xpmem_get(get_info.segid, get_info.flags,
|
||||
// get_info.permit_type,
|
||||
// (void *)get_info.permit_value, &apid); // TODO
|
||||
ret = -EINVAL;
|
||||
if (ret != 0) {
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// if (copy_to_user(&((struct xpmem_cmd_get __user *)arg)->apid,
|
||||
// (void *)&apid, sizeof(xpmem_apid_t))) {
|
||||
// (void)xpmem_release(apid);
|
||||
// return -EFAULT;
|
||||
// }
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_RELEASE: {
|
||||
struct xpmem_cmd_release release_info;
|
||||
|
||||
if (copy_from_user(&release_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_release)))
|
||||
return -EFAULT;
|
||||
|
||||
// ret = xpmem_release(release_info.apid); // TODO
|
||||
ret = -EINVAL;
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_ATTACH: {
|
||||
struct xpmem_cmd_attach attach_info;
|
||||
// unsigned long at_vaddr = 0;
|
||||
|
||||
if (copy_from_user(&attach_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_attach)))
|
||||
return -EFAULT;
|
||||
|
||||
// ret = xpmem_attach(mckfd, attach_info.apid, attach_info.offset,
|
||||
// attach_info.size, attach_info.vaddr,
|
||||
// attach_info.fd, attach_info.flags,
|
||||
// &at_vaddr); // TODO
|
||||
ret = -EINVAL;
|
||||
if (ret != 0) {
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// if (copy_to_user(
|
||||
// &((struct xpmem_cmd_attach __user *)arg)->vaddr,
|
||||
// (void *)&at_vaddr, sizeof(unsigned long))) {
|
||||
// (void)xpmem_detach(at_vaddr);
|
||||
// return -EFAULT;
|
||||
// }
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_DETACH: {
|
||||
struct xpmem_cmd_detach detach_info;
|
||||
|
||||
if (copy_from_user(&detach_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_detach)))
|
||||
return -EFAULT;
|
||||
|
||||
// ret = xpmem_detach(detach_info.vaddr); // TODO
|
||||
ret = -EINVAL;
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, -EINVAL);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_close(
|
||||
struct mckfd *mckfd,
|
||||
ihk_mc_user_context_t *ctx)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
int index;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
int n_opened;
|
||||
|
||||
XPMEM_DEBUG("call: fd=%d", mckfd->fd);
|
||||
|
||||
n_opened = ihk_atomic_dec_return(&xpmem_my_part->n_opened);
|
||||
if (n_opened) {
|
||||
XPMEM_DEBUG("return: ret=%d, n_opened=%d", 0, n_opened);
|
||||
return 0;
|
||||
}
|
||||
XPMEM_DEBUG("n_opened=%d", n_opened);
|
||||
|
||||
index = xpmem_tg_hashtable_index(cpu_local_var(current)->proc->pid);
|
||||
|
||||
mcs_rwlock_writer_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
|
||||
|
||||
tg = xpmem_tg_ref_by_tgid_all_nolock(
|
||||
cpu_local_var(current)->proc->pid);
|
||||
if (!tg) {
|
||||
mcs_rwlock_writer_unlock(
|
||||
&xpmem_my_part->tg_hashtable[index].lock, &lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
list_del_init(&tg->tg_hashlist);
|
||||
|
||||
mcs_rwlock_writer_unlock(&xpmem_my_part->tg_hashtable[index].lock,
|
||||
&lock);
|
||||
|
||||
XPMEM_DEBUG("tg->vm=0x%p", tg->vm);
|
||||
|
||||
xpmem_destroy_tg(tg);
|
||||
|
||||
if (!n_opened) {
|
||||
xpmem_exit();
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
xpmem_my_part = kmalloc(sizeof(struct xpmem_partition) +
|
||||
sizeof(struct xpmem_hashlist) * XPMEM_TG_HASHTABLE_SIZE,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (xpmem_my_part == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
XPMEM_DEBUG("kmalloc(): xpmem_my_part=0x%p", xpmem_my_part);
|
||||
memset(xpmem_my_part, 0, sizeof(struct xpmem_partition) +
|
||||
sizeof(struct xpmem_hashlist) * XPMEM_TG_HASHTABLE_SIZE);
|
||||
|
||||
for (i = 0; i < XPMEM_TG_HASHTABLE_SIZE; i++) {
|
||||
mcs_rwlock_init(&xpmem_my_part->tg_hashtable[i].lock);
|
||||
INIT_LIST_HEAD(&xpmem_my_part->tg_hashtable[i].list);
|
||||
}
|
||||
|
||||
ihk_atomic_set(&xpmem_my_part->n_opened, 0);
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_exit(void)
|
||||
{
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
if (xpmem_my_part) {
|
||||
XPMEM_DEBUG("kfree(): 0x%p", xpmem_my_part);
|
||||
kfree(xpmem_my_part);
|
||||
xpmem_my_part = NULL;
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static int __xpmem_open(void)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
int index;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
tg = xpmem_tg_ref_by_tgid(cpu_local_var(current)->proc->pid);
|
||||
if (!IS_ERR(tg)) {
|
||||
xpmem_tg_deref(tg);
|
||||
XPMEM_DEBUG("return: ret=%d, tg=0x%p", 0, tg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
tg = kmalloc(sizeof(struct xpmem_thread_group) +
|
||||
sizeof(struct xpmem_hashlist) * XPMEM_AP_HASHTABLE_SIZE,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (tg == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
XPMEM_DEBUG("kmalloc(): tg=0x%p", tg);
|
||||
memset(tg, 0, sizeof(struct xpmem_thread_group) +
|
||||
sizeof(struct xpmem_hashlist) * XPMEM_AP_HASHTABLE_SIZE);
|
||||
|
||||
ihk_mc_spinlock_init(&tg->lock);
|
||||
tg->tgid = cpu_local_var(current)->proc->pid;
|
||||
tg->uid = cpu_local_var(current)->proc->ruid;
|
||||
tg->gid = cpu_local_var(current)->proc->rgid;
|
||||
ihk_atomic_set(&tg->uniq_segid, 0);
|
||||
ihk_atomic_set(&tg->uniq_apid, 0);
|
||||
mcs_rwlock_init(&tg->seg_list_lock);
|
||||
INIT_LIST_HEAD(&tg->seg_list);
|
||||
ihk_atomic_set(&tg->n_pinned, 0);
|
||||
INIT_LIST_HEAD(&tg->tg_hashlist);
|
||||
tg->vm = cpu_local_var(current)->vm;
|
||||
ihk_atomic_set(&tg->n_recall_PFNs, 0);
|
||||
|
||||
for (index = 0; index < XPMEM_AP_HASHTABLE_SIZE; index++) {
|
||||
mcs_rwlock_init(&tg->ap_hashtable[index].lock);
|
||||
INIT_LIST_HEAD(&tg->ap_hashtable[index].list);
|
||||
}
|
||||
|
||||
xpmem_tg_not_destroyable(tg);
|
||||
|
||||
index = xpmem_tg_hashtable_index(tg->tgid);
|
||||
mcs_rwlock_writer_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
|
||||
|
||||
list_add_tail(&tg->tg_hashlist,
|
||||
&xpmem_my_part->tg_hashtable[index].list);
|
||||
|
||||
mcs_rwlock_writer_unlock(&xpmem_my_part->tg_hashtable[index].lock,
|
||||
&lock);
|
||||
|
||||
tg->group_leader = cpu_local_var(current);
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_destroy_tg(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
XPMEM_DEBUG("call: tg=0x%p", tg);
|
||||
|
||||
XPMEM_DEBUG("tg->vm=0x%p", tg->vm);
|
||||
|
||||
xpmem_tg_destroyable(tg);
|
||||
xpmem_tg_deref(tg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_make(
|
||||
unsigned long vaddr,
|
||||
size_t size,
|
||||
int permit_type,
|
||||
void *permit_value,
|
||||
xpmem_segid_t *segid_p)
|
||||
{
|
||||
xpmem_segid_t segid;
|
||||
struct xpmem_thread_group *seg_tg;
|
||||
struct xpmem_segment *seg;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: vaddr=0x%lx, size=%lu, permit_type=%d, "
|
||||
"permit_value=0%04lo",
|
||||
vaddr, size, permit_type,
|
||||
(unsigned long)(uintptr_t)permit_value);
|
||||
|
||||
if (permit_type != XPMEM_PERMIT_MODE ||
|
||||
((unsigned long)(uintptr_t)permit_value & ~00777) ||
|
||||
size == 0) {
|
||||
XPMEM_DEBUG("return: ret=%d", -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
seg_tg = xpmem_tg_ref_by_tgid(cpu_local_var(current)->proc->pid);
|
||||
if (IS_ERR(seg_tg)) {
|
||||
DBUG_ON(PTR_ERR(seg_tg) != -ENOENT);
|
||||
return -XPMEM_ERRNO_NOPROC;
|
||||
}
|
||||
|
||||
/*
|
||||
* The start of the segment must be page aligned and it must be a
|
||||
* multiple of pages in size.
|
||||
*/
|
||||
if (offset_in_page(vaddr) != 0 || offset_in_page(size) != 0) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
XPMEM_DEBUG("return: ret=%d", -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
segid = xpmem_make_segid(seg_tg);
|
||||
if (segid < 0) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
return segid;
|
||||
}
|
||||
|
||||
/* create a new struct xpmem_segment structure with a unique segid */
|
||||
seg = kmalloc(sizeof(struct xpmem_segment), IHK_MC_AP_NOWAIT);
|
||||
if (seg == NULL) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
XPMEM_DEBUG("kmalloc(): seg=0x%p", seg);
|
||||
memset(seg, 0, sizeof(struct xpmem_segment));
|
||||
|
||||
ihk_mc_spinlock_init(&seg->lock);
|
||||
mcs_rwlock_init(&seg->seg_lock);
|
||||
seg->segid = segid;
|
||||
seg->vaddr = vaddr;
|
||||
seg->size = size;
|
||||
seg->permit_type = permit_type;
|
||||
seg->permit_value = permit_value;
|
||||
seg->tg = seg_tg;
|
||||
INIT_LIST_HEAD(&seg->ap_list);
|
||||
INIT_LIST_HEAD(&seg->seg_list);
|
||||
|
||||
xpmem_seg_not_destroyable(seg);
|
||||
|
||||
/* add seg to its tg's list of segs */
|
||||
mcs_rwlock_writer_lock(&seg_tg->seg_list_lock, &lock);
|
||||
list_add_tail(&seg->seg_list, &seg_tg->seg_list);
|
||||
mcs_rwlock_writer_unlock(&seg_tg->seg_list_lock, &lock);
|
||||
|
||||
xpmem_tg_deref(seg_tg);
|
||||
|
||||
*segid_p = segid;
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d, segid=0x%lx", 0, *segid_p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static xpmem_segid_t xpmem_make_segid(
|
||||
struct xpmem_thread_group *seg_tg)
|
||||
{
|
||||
struct xpmem_id segid;
|
||||
xpmem_segid_t *segid_p = (xpmem_segid_t *)&segid;
|
||||
int uniq;
|
||||
|
||||
XPMEM_DEBUG("call: seg_tg=0x%p, uniq_segid=%d",
|
||||
seg_tg, ihk_atomic_read(&seg_tg->uniq_segid));
|
||||
|
||||
DBUG_ON(sizeof(struct xpmem_id) != sizeof(xpmem_segid_t));
|
||||
|
||||
uniq = ihk_atomic_inc_return(&seg_tg->uniq_segid);
|
||||
if (uniq > XPMEM_MAX_UNIQ_ID) {
|
||||
ihk_atomic_dec(&seg_tg->uniq_segid);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
*segid_p = 0;
|
||||
segid.tgid = seg_tg->tgid;
|
||||
segid.uniq = (unsigned long)uniq;
|
||||
|
||||
DBUG_ON(*segid_p <= 0);
|
||||
|
||||
XPMEM_DEBUG("return: segid=0x%lx, segid.tgid=%d, segid.uniq=%d",
|
||||
segid, segid.tgid, segid.uniq);
|
||||
|
||||
return *segid_p;
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_remove(
|
||||
xpmem_segid_t segid)
|
||||
{
|
||||
struct xpmem_thread_group *seg_tg;
|
||||
struct xpmem_segment *seg;
|
||||
|
||||
XPMEM_DEBUG("call: segid=0x%lx", segid);
|
||||
|
||||
if (segid <= 0) {
|
||||
XPMEM_DEBUG("return: ret=%d", -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
seg_tg = xpmem_tg_ref_by_segid(segid);
|
||||
if (IS_ERR(seg_tg))
|
||||
return PTR_ERR(seg_tg);
|
||||
|
||||
if (cpu_local_var(current)->proc->pid != seg_tg->tgid) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
XPMEM_DEBUG("return: ret=%d", -EACCES);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
seg = xpmem_seg_ref_by_segid(seg_tg, segid);
|
||||
if (IS_ERR(seg)) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
return PTR_ERR(seg);
|
||||
}
|
||||
DBUG_ON(seg->tg != seg_tg);
|
||||
|
||||
xpmem_remove_seg(seg_tg, seg);
|
||||
xpmem_seg_deref(seg);
|
||||
xpmem_tg_deref(seg_tg);
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_remove_seg(
|
||||
struct xpmem_thread_group *seg_tg,
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
|
||||
struct mcs_rwlock_node_irqsave seg_lock;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: tgid=%d, segid=0x%lx", seg_tg->tgid, seg->segid);
|
||||
|
||||
ihk_mc_spinlock_lock(&seg->lock);
|
||||
if (seg->flags & XPMEM_FLAG_DESTROYING) {
|
||||
ihk_mc_spinlock_unlock_noirq(&seg->lock);
|
||||
schedule();
|
||||
return;
|
||||
}
|
||||
seg->flags |= XPMEM_FLAG_DESTROYING;
|
||||
ihk_mc_spinlock_unlock_noirq(&seg->lock);
|
||||
|
||||
mcs_rwlock_writer_lock(&seg->seg_lock, &seg_lock);
|
||||
|
||||
/* unpin pages and clear PTEs for each attachment to this segment */
|
||||
xpmem_clear_PTEs(seg);
|
||||
|
||||
/* indicate that the segment has been destroyed */
|
||||
ihk_mc_spinlock_lock(&seg->lock);
|
||||
seg->flags |= XPMEM_FLAG_DESTROYED;
|
||||
ihk_mc_spinlock_unlock_noirq(&seg->lock);
|
||||
|
||||
/* Remove segment structure from its tg's list of segs */
|
||||
mcs_rwlock_writer_lock(&seg_tg->seg_list_lock, &lock);
|
||||
list_del_init(&seg->seg_list);
|
||||
mcs_rwlock_writer_unlock(&seg_tg->seg_list_lock, &lock);
|
||||
|
||||
mcs_rwlock_writer_unlock(&seg->seg_lock, &seg_lock);
|
||||
|
||||
xpmem_seg_destroyable(seg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_clear_PTEs(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
XPMEM_DEBUG("call: seg=0x%p", seg);
|
||||
|
||||
// xpmem_clear_PTEs_range(seg, seg->vaddr, seg->vaddr + seg->size, 0); // TODO
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal(
|
||||
pid_t tgid,
|
||||
int index,
|
||||
int return_destroying)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
|
||||
XPMEM_DEBUG("call: tgid=%d, index=%d, return_destroying=%d",
|
||||
tgid, index, return_destroying);
|
||||
|
||||
list_for_each_entry(tg, &xpmem_my_part->tg_hashtable[index].list,
|
||||
tg_hashlist) {
|
||||
if (tg->tgid == tgid) {
|
||||
if ((tg->flags & XPMEM_FLAG_DESTROYING) &&
|
||||
!return_destroying) {
|
||||
continue;
|
||||
}
|
||||
|
||||
xpmem_tg_ref(tg);
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", tg);
|
||||
return tg;
|
||||
}
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", ERR_PTR(-ENOENT));
|
||||
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
|
||||
static struct xpmem_thread_group * xpmem_tg_ref_by_segid(
|
||||
xpmem_segid_t segid)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
|
||||
XPMEM_DEBUG("call: segid=0x%lx", segid);
|
||||
|
||||
tg = xpmem_tg_ref_by_tgid(xpmem_segid_to_tgid(segid));
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", tg);
|
||||
|
||||
return tg;
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_tg_deref(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
XPMEM_DEBUG("call: tg=0x%p", tg);
|
||||
|
||||
DBUG_ON(ihk_atomic_read(&tg->refcnt) <= 0);
|
||||
if (ihk_atomic_dec_return(&tg->refcnt) != 0) {
|
||||
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
|
||||
return;
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("kfree(): tg=0x%p", tg);
|
||||
kfree(tg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static struct xpmem_segment * xpmem_seg_ref_by_segid(
|
||||
struct xpmem_thread_group *seg_tg,
|
||||
xpmem_segid_t segid)
|
||||
{
|
||||
struct xpmem_segment *seg;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: seg_tg=0x%p, segid=0x%lx", seg_tg, segid);
|
||||
|
||||
mcs_rwlock_reader_lock(&seg_tg->seg_list_lock, &lock);
|
||||
|
||||
list_for_each_entry(seg, &seg_tg->seg_list, seg_list) {
|
||||
if (seg->segid == segid) {
|
||||
if (seg->flags & XPMEM_FLAG_DESTROYING)
|
||||
continue;
|
||||
|
||||
xpmem_seg_ref(seg);
|
||||
mcs_rwlock_reader_unlock(&seg_tg->seg_list_lock, &lock);
|
||||
return seg;
|
||||
}
|
||||
}
|
||||
|
||||
mcs_rwlock_reader_unlock(&seg_tg->seg_list_lock, &lock);
|
||||
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_seg_deref(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
XPMEM_DEBUG("call: seg=0x%p", seg);
|
||||
|
||||
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
|
||||
if (ihk_atomic_dec_return(&seg->refcnt) != 0) {
|
||||
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
|
||||
return;
|
||||
}
|
||||
|
||||
DBUG_ON(!(seg->flags & XPMEM_FLAG_DESTROYING));
|
||||
|
||||
XPMEM_DEBUG("kfree(): seg=0x%p", seg);
|
||||
kfree(seg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
@ -102,6 +102,7 @@ static int alloc_zeroobj(void)
|
||||
|
||||
memset(obj, 0, sizeof(*obj));
|
||||
obj->memobj.ops = &zeroobj_ops;
|
||||
obj->memobj.size = 0;
|
||||
page_list_init(obj);
|
||||
ihk_mc_spinlock_init(&obj->memobj.lock);
|
||||
|
||||
|
||||
@ -49,6 +49,7 @@ struct ihk_mc_cpu_info {
|
||||
int ncpus;
|
||||
int *hw_ids;
|
||||
int *nodes;
|
||||
int *linux_cpu_ids;
|
||||
};
|
||||
|
||||
struct ihk_mc_cpu_info *ihk_mc_get_cpu_info(void);
|
||||
@ -56,6 +57,9 @@ void ihk_mc_boot_cpu(int cpuid, unsigned long pc);
|
||||
int ihk_mc_get_processor_id(void);
|
||||
int ihk_mc_get_hardware_processor_id(void);
|
||||
int ihk_mc_get_numa_id(void);
|
||||
int ihk_mc_get_nr_cores();
|
||||
int ihk_mc_get_core(int id, unsigned long *linux_core_id, unsigned long *apic_id,
|
||||
int *numa_id);
|
||||
|
||||
void ihk_mc_delay_us(int us);
|
||||
void ihk_mc_set_syscall_handler(long (*handler)(int, ihk_mc_user_context_t *));
|
||||
|
||||
@ -185,6 +185,9 @@ int ihk_mc_get_memory_chunk(int id,
|
||||
void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
unsigned long addr, int cpu_id);
|
||||
|
||||
int ihk_set_kmsg(unsigned long addr, unsigned long size);
|
||||
char *ihk_get_kargs();
|
||||
|
||||
extern void (*__tlb_flush_handler)(int vector);
|
||||
|
||||
struct tlb_flush_entry {
|
||||
|
||||
@ -17,11 +17,17 @@
|
||||
#include <list.h>
|
||||
|
||||
/* XXX: Physical memory management shouldn't be part of IHK */
|
||||
struct node_distance {
|
||||
int id;
|
||||
int distance;
|
||||
};
|
||||
|
||||
struct ihk_mc_numa_node {
|
||||
int id;
|
||||
int linux_numa_id;
|
||||
int type;
|
||||
struct list_head allocators;
|
||||
struct node_distance *nodes_by_distance;
|
||||
};
|
||||
|
||||
struct ihk_page_allocator_desc {
|
||||
@ -30,7 +36,7 @@ struct ihk_page_allocator_desc {
|
||||
unsigned int count;
|
||||
unsigned int flag;
|
||||
unsigned int shift;
|
||||
ihk_spinlock_t lock;
|
||||
mcs_lock_node_t lock;
|
||||
struct list_head list;
|
||||
|
||||
unsigned long map[0];
|
||||
|
||||
153
lib/include/mc_xpmem.h
Normal file
153
lib/include/mc_xpmem.h
Normal file
@ -0,0 +1,153 @@
|
||||
/**
|
||||
* \file mc_xpmem.h
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Cross Partition Memory (XPMEM) structures and macros.
|
||||
*/
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#ifndef _MC_XPMEM_H
|
||||
#define _MC_XPMEM_H
|
||||
|
||||
#ifndef __KERNEL__
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* _IOC definitions for McKernel
|
||||
*/
|
||||
#define _IOC_NRBITS 8
|
||||
#define _IOC_TYPEBITS 8
|
||||
|
||||
#define _IOC_SIZEBITS 14
|
||||
|
||||
#define _IOC_DIRBITS 2
|
||||
|
||||
#define _IOC_NRSHIFT 0
|
||||
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
|
||||
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
|
||||
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
|
||||
|
||||
#define _IOC_NONE 0U
|
||||
|
||||
#define _IOC(dir,type,nr,size) \
|
||||
(((dir) << _IOC_DIRSHIFT) | \
|
||||
((type) << _IOC_TYPESHIFT) | \
|
||||
((nr) << _IOC_NRSHIFT) | \
|
||||
((size) << _IOC_SIZESHIFT))
|
||||
|
||||
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
|
||||
|
||||
/*
|
||||
* basic argument type definitions for McKernel
|
||||
*/
|
||||
typedef uint64_t u64;
|
||||
typedef uint64_t __u64;
|
||||
typedef int64_t __s64;
|
||||
|
||||
/*
|
||||
* basic argument type definitions
|
||||
*/
|
||||
typedef __s64 xpmem_segid_t; /* segid returned from xpmem_make() */
|
||||
typedef __s64 xpmem_apid_t; /* apid returned from xpmem_get() */
|
||||
|
||||
struct xpmem_addr {
|
||||
xpmem_apid_t apid; /* apid that represents memory */
|
||||
off_t offset; /* offset into apid's memory */
|
||||
};
|
||||
|
||||
#define XPMEM_MAXADDR_SIZE (size_t)(-1L)
|
||||
|
||||
/*
|
||||
* path to XPMEM device
|
||||
*/
|
||||
#define XPMEM_DEV_PATH "/dev/xpmem"
|
||||
|
||||
/*
|
||||
* The following are the possible XPMEM related errors.
|
||||
*/
|
||||
#define XPMEM_ERRNO_NOPROC 2004 /* unknown thread due to fork() */
|
||||
|
||||
/*
|
||||
* flags for segment permissions
|
||||
*/
|
||||
#define XPMEM_RDONLY 0x1
|
||||
#define XPMEM_RDWR 0x2
|
||||
|
||||
/*
|
||||
* Valid permit_type values for xpmem_make().
|
||||
*/
|
||||
#define XPMEM_PERMIT_MODE 0x1
|
||||
|
||||
/*
|
||||
* ioctl() commands used to interface to the kernel module.
|
||||
*/
|
||||
#define XPMEM_IOC_MAGIC 'x'
|
||||
#define XPMEM_CMD_VERSION _IO(XPMEM_IOC_MAGIC, 0)
|
||||
#define XPMEM_CMD_MAKE _IO(XPMEM_IOC_MAGIC, 1)
|
||||
#define XPMEM_CMD_REMOVE _IO(XPMEM_IOC_MAGIC, 2)
|
||||
#define XPMEM_CMD_GET _IO(XPMEM_IOC_MAGIC, 3)
|
||||
#define XPMEM_CMD_RELEASE _IO(XPMEM_IOC_MAGIC, 4)
|
||||
#define XPMEM_CMD_ATTACH _IO(XPMEM_IOC_MAGIC, 5)
|
||||
#define XPMEM_CMD_DETACH _IO(XPMEM_IOC_MAGIC, 6)
|
||||
|
||||
/*
|
||||
* Structures used with the preceding ioctl() commands to pass data.
|
||||
*/
|
||||
struct xpmem_cmd_make {
|
||||
__u64 vaddr;
|
||||
size_t size;
|
||||
int permit_type;
|
||||
__u64 permit_value;
|
||||
xpmem_segid_t segid; /* returned on success */
|
||||
};
|
||||
|
||||
struct xpmem_cmd_remove {
|
||||
xpmem_segid_t segid;
|
||||
};
|
||||
|
||||
struct xpmem_cmd_get {
|
||||
xpmem_segid_t segid;
|
||||
int flags;
|
||||
int permit_type;
|
||||
__u64 permit_value;
|
||||
xpmem_apid_t apid; /* returned on success */
|
||||
};
|
||||
|
||||
struct xpmem_cmd_release {
|
||||
xpmem_apid_t apid;
|
||||
};
|
||||
|
||||
struct xpmem_cmd_attach {
|
||||
xpmem_apid_t apid;
|
||||
off_t offset;
|
||||
size_t size;
|
||||
__u64 vaddr;
|
||||
int fd;
|
||||
int flags;
|
||||
};
|
||||
|
||||
struct xpmem_cmd_detach {
|
||||
__u64 vaddr;
|
||||
};
|
||||
|
||||
#ifndef __KERNEL__
|
||||
extern int xpmem_version(void);
|
||||
extern xpmem_segid_t xpmem_make(void *, size_t, int, void *);
|
||||
extern int xpmem_remove(xpmem_segid_t);
|
||||
extern xpmem_apid_t xpmem_get(xpmem_segid_t, int, int, void *);
|
||||
extern int xpmem_release(xpmem_apid_t);
|
||||
extern void *xpmem_attach(struct xpmem_addr, size_t, void *);
|
||||
extern int xpmem_detach(void *);
|
||||
#endif
|
||||
|
||||
#endif /* _MC_XPMEM_H */
|
||||
@ -73,7 +73,7 @@ void *__ihk_pagealloc_init(unsigned long start, unsigned long size,
|
||||
//kprintf("page allocator @ %lx - %lx (%d)\n", start, start + size,
|
||||
// page_shift);
|
||||
|
||||
ihk_mc_spinlock_init(&desc->lock);
|
||||
mcs_lock_init(&desc->lock);
|
||||
|
||||
/* Reserve align padding area */
|
||||
for (i = mapsize; i < mapaligned * 8; i++) {
|
||||
@ -99,12 +99,12 @@ void ihk_pagealloc_destroy(void *__desc)
|
||||
static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
|
||||
int npages, int p2align)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int i, j, mi;
|
||||
int nblocks;
|
||||
int nfrags;
|
||||
unsigned long mask;
|
||||
int mialign;
|
||||
unsigned long align_mask = ((PAGE_SIZE << p2align) - 1);
|
||||
mcs_lock_node_t node;
|
||||
|
||||
nblocks = (npages / 64);
|
||||
mask = -1;
|
||||
@ -113,14 +113,13 @@ static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
|
||||
++nblocks;
|
||||
mask = (1UL << nfrags) - 1;
|
||||
}
|
||||
mialign = (p2align <= 6)? 1: (1 << (p2align - 6));
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (i = 0, mi = desc->last; i < desc->count; i++, mi++) {
|
||||
if (mi >= desc->count) {
|
||||
mi = 0;
|
||||
}
|
||||
if ((mi + nblocks >= desc->count) || (mi % mialign)) {
|
||||
if ((mi + nblocks >= desc->count) || (ADDRESS(desc, mi, 0) & align_mask)) {
|
||||
continue;
|
||||
}
|
||||
for (j = mi; j < mi + nblocks - 1; j++) {
|
||||
@ -133,11 +132,11 @@ static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
|
||||
desc->map[j] = (unsigned long)-1;
|
||||
}
|
||||
desc->map[j] |= mask;
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
return ADDRESS(desc, mi, 0);
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -147,8 +146,9 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned int i, mi;
|
||||
int j;
|
||||
unsigned long v, mask, flags;
|
||||
unsigned long v, mask;
|
||||
int jalign;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
if ((npages >= 32) || (p2align >= 5)) {
|
||||
return __ihk_pagealloc_large(desc, npages, p2align);
|
||||
@ -157,7 +157,7 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
|
||||
mask = (1UL << npages) - 1;
|
||||
jalign = (p2align <= 0)? 1: (1 << p2align);
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (i = 0, mi = desc->last; i < desc->count; i++, mi++) {
|
||||
if (mi >= desc->count) {
|
||||
mi = 0;
|
||||
@ -174,12 +174,12 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
|
||||
if (!(v & (mask << j))) { /* free */
|
||||
desc->map[mi] |= (mask << j);
|
||||
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
return ADDRESS(desc, mi, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
/* We use null pointer for failure */
|
||||
return 0;
|
||||
@ -189,7 +189,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
|
||||
{
|
||||
int i, n;
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned long flags;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
n = (end + (1 << desc->shift) - 1 - desc->start) >> desc->shift;
|
||||
i = ((start - desc->start) >> desc->shift);
|
||||
@ -197,7 +197,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
|
||||
return;
|
||||
}
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (; i < n; i++) {
|
||||
if (!(i & 63) && i + 63 < n) {
|
||||
desc->map[MAP_INDEX(i)] = (unsigned long)-1L;
|
||||
@ -206,7 +206,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
|
||||
desc->map[MAP_INDEX(i)] |= (1UL << MAP_BIT(i));
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
}
|
||||
|
||||
void ihk_pagealloc_free(void *__desc, unsigned long address, int npages)
|
||||
@ -214,24 +214,24 @@ void ihk_pagealloc_free(void *__desc, unsigned long address, int npages)
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
int i;
|
||||
unsigned mi;
|
||||
unsigned long flags;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
/* XXX: Parameter check */
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
mi = (address - desc->start) >> desc->shift;
|
||||
for (i = 0; i < npages; i++, mi++) {
|
||||
desc->map[MAP_INDEX(mi)] &= ~(1UL << MAP_BIT(mi));
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
}
|
||||
|
||||
unsigned long ihk_pagealloc_count(void *__desc)
|
||||
{
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned long i, j, n = 0;
|
||||
unsigned long flags;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
/* XXX: Very silly counting */
|
||||
for (i = 0; i < desc->count; i++) {
|
||||
for (j = 0; j < 64; j++) {
|
||||
@ -240,7 +240,7 @@ unsigned long ihk_pagealloc_count(void *__desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
return n;
|
||||
}
|
||||
@ -250,10 +250,11 @@ int ihk_pagealloc_query_free(void *__desc)
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned int mi;
|
||||
int j;
|
||||
unsigned long v, flags;
|
||||
unsigned long v;
|
||||
int npages = 0;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (mi = 0; mi < desc->count; mi++) {
|
||||
|
||||
v = desc->map[mi];
|
||||
@ -266,7 +267,7 @@ int ihk_pagealloc_query_free(void *__desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
return npages;
|
||||
}
|
||||
@ -276,11 +277,12 @@ void __ihk_pagealloc_zero_free_pages(void *__desc)
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned int mi;
|
||||
int j;
|
||||
unsigned long v, flags;
|
||||
unsigned long v;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
kprintf("zeroing free memory... ");
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (mi = 0; mi < desc->count; mi++) {
|
||||
|
||||
v = desc->map[mi];
|
||||
@ -294,7 +296,7 @@ kprintf("zeroing free memory... ");
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
kprintf("\nzeroing done\n");
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user