diff --git a/arch/x86/kernel/include/syscall_list.h b/arch/x86/kernel/include/syscall_list.h index 5b409a5f..275e1e5a 100644 --- a/arch/x86/kernel/include/syscall_list.h +++ b/arch/x86/kernel/include/syscall_list.h @@ -22,7 +22,7 @@ SYSCALL_HANDLED(0, read) SYSCALL_DELEGATED(1, write) -SYSCALL_DELEGATED(2, open) +SYSCALL_HANDLED(2, open) SYSCALL_HANDLED(3, close) SYSCALL_DELEGATED(4, stat) SYSCALL_DELEGATED(5, fstat) diff --git a/executer/user/mcexec.c b/executer/user/mcexec.c index d6ccac6f..eb9fdae7 100644 --- a/executer/user/mcexec.c +++ b/executer/user/mcexec.c @@ -1949,9 +1949,18 @@ int close_cloexec_fds(int mcos_fd) return 0; } +void chgdevpath(char *in, char *buf) +{ + if(!strcmp(in, "/dev/xpmem")){ + sprintf(in, "/dev/null"); + } +} + char * chgpath(char *in, char *buf) { + chgdevpath(in, buf); + #ifdef ENABLE_MCOVERLAYFS return in; #endif // ENABLE_MCOVERLAYFS diff --git a/kernel/Makefile.build.in b/kernel/Makefile.build.in index 98097ca6..c3537e58 100644 --- a/kernel/Makefile.build.in +++ b/kernel/Makefile.build.in @@ -3,7 +3,7 @@ SRC=$(VPATH) IHKDIR=$(IHKBASE)/$(TARGETDIR) OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o -OBJS += zeroobj.o procfs.o devobj.o sysfs.o +OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o DEPSRCS=$(wildcard $(SRC)/*.c) CFLAGS += -I$(SRC)/include -D__KERNEL__ -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions diff --git a/kernel/include/syscall.h b/kernel/include/syscall.h index 12fa8382..c03cb4db 100644 --- a/kernel/include/syscall.h +++ b/kernel/include/syscall.h @@ -392,6 +392,7 @@ extern struct tod_data_s tod_data; /* residing in arch-dependent file */ void reset_cputime(); void set_cputime(int mode); +int do_munmap(void *addr, size_t len); intptr_t do_mmap(intptr_t addr0, size_t len0, int prot, int flags, int fd, off_t off0); void clear_host_pte(uintptr_t addr, size_t len); diff --git a/kernel/include/xpmem.h b/kernel/include/xpmem.h new file mode 100644 index 00000000..e8f5f8b2 --- /dev/null +++ b/kernel/include/xpmem.h @@ -0,0 +1,21 @@ +/** + * \file xpmem.h + * License details are found in the file LICENSE. + * \brief + * Structures and functions of xpmem + */ +/* + * HISTORY + */ + +#ifndef _XPMEM_H +#define _XPMEM_H + +#include + +#define XPMEM_DEV_PATH "/dev/xpmem" + +extern int xpmem_open(ihk_mc_user_context_t *ctx); + +#endif /* _XPMEM_H */ + diff --git a/kernel/include/xpmem_private.h b/kernel/include/xpmem_private.h new file mode 100644 index 00000000..279e5ba3 --- /dev/null +++ b/kernel/include/xpmem_private.h @@ -0,0 +1,388 @@ +/** + * \file xpmem_private.h + * License details are found in the file LICENSE. + * \brief + * Private Cross Partition Memory (XPMEM) structures and macros. + */ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. + * Copyright 2009, 2010, 2014 Cray Inc. All Rights Reserved + * Copyright (c) 2014-2016 Los Alamos National Security, LCC. All rights + * reserved. + */ +/* + * HISTORY + */ + +#ifndef _XPMEM_PRIVATE_H +#define _XPMEM_PRIVATE_H + +#include +#include + +#define XPMEM_CURRENT_VERSION 0x00026003 + +//#define DEBUG_PRINT_XPMEM + +#ifdef DEBUG_PRINT_XPMEM +#define dkprintf(...) kprintf(__VA_ARGS__) +#define ekprintf(...) kprintf(__VA_ARGS__) +#define XPMEM_DEBUG(format, a...) kprintf("[%d] %s: "format"\n", cpu_local_var(current)->proc->rgid, __func__, ##a) +#else +#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0) +#define ekprintf(...) kprintf(__VA_ARGS__) +#define XPMEM_DEBUG(format, a...) do { if (0) kprintf("\n"); } while (0) +#endif + +//#define USE_DBUG_ON + +#ifdef USE_DBUG_ON +#define DBUG_ON(condition) do { if (condition) kprintf("[%d] BUG: func=%s\n", cpu_local_var(current)->proc->rgid, __func__); } while (0) +#else +#define DBUG_ON(condition) +#endif + +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) + +#define min(x, y) ({ \ + __typeof__(x) _min1 = (x); \ + __typeof__(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2;}) + +#define max(x, y) ({ \ + __typeof__(x) _max1 = (x); \ + __typeof__(y) _max2 = (y); \ + (void) (&_max1 == &_max2); \ + _max1 > _max2 ? _max1 : _max2;}) + +#define MAX_ERRNO 4095 + +#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO) + +static inline void * ERR_PTR(long error) +{ + return (void *)error; +} + +static inline long PTR_ERR(const void *ptr) +{ + return (long)ptr; +} + +static inline long IS_ERR(const void *ptr) +{ + return IS_ERR_VALUE((unsigned long)ptr); +} + +static inline long IS_ERR_OR_NULL(const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} + +/* + * Both the xpmem_segid_t and xpmem_apid_t are of type __s64 and designed + * to be opaque to the user. Both consist of the same underlying fields. + * + * The 'uniq' field is designed to give each segid or apid a unique value. + * Each type is only unique with respect to itself. + * + * An ID is never less than or equal to zero. + */ +struct xpmem_id { + pid_t tgid; /* thread group that owns ID */ + unsigned int uniq; /* this value makes the ID unique */ +}; + +typedef union { + struct xpmem_id xpmem_id; + xpmem_segid_t segid; + xpmem_apid_t apid; +} xpmem_id_t; + +/* Shift INT_MAX by one so we can tell when we overflow. */ +#define XPMEM_MAX_UNIQ_ID (INT_MAX >> 1) + +static inline pid_t xpmem_segid_to_tgid(xpmem_segid_t segid) +{ + DBUG_ON(segid <= 0); + return ((xpmem_id_t *)&segid)->xpmem_id.tgid; +} + +static inline pid_t xpmem_apid_to_tgid(xpmem_apid_t apid) +{ + DBUG_ON(apid <= 0); + return ((xpmem_id_t *)&apid)->xpmem_id.tgid; +} + +/* + * Hash Tables + * + * XPMEM utilizes hash tables to enable faster lookups of list entries. + * These hash tables are implemented as arrays. A simple modulus of the hash + * key yields the appropriate array index. A hash table's array element (i.e., + * hash table bucket) consists of a hash list and the lock that protects it. + * + * XPMEM has the following two hash tables: + * + * table bucket key + * part->tg_hashtable list of struct xpmem_thread_group tgid + * tg->ap_hashtable list of struct xpmem_access_permit apid.uniq + */ +struct xpmem_hashlist { + mcs_rwlock_lock_t lock; /* lock for hash list */ + struct list_head list; /* hash list */ +}; + +#define XPMEM_TG_HASHTABLE_SIZE 8 +#define XPMEM_AP_HASHTABLE_SIZE 8 + +static inline int xpmem_tg_hashtable_index(pid_t tgid) +{ + int index; + + index = (unsigned int)tgid % XPMEM_TG_HASHTABLE_SIZE; + + XPMEM_DEBUG("return: tgid=%lu, index=%d", tgid, index); + + return index; +} + +static inline int xpmem_ap_hashtable_index(xpmem_apid_t apid) +{ + int index; + + DBUG_ON(apid <= 0); + + index = ((xpmem_id_t *)&apid)->xpmem_id.uniq % XPMEM_AP_HASHTABLE_SIZE; + + XPMEM_DEBUG("return: apid=%lu, index=%d", apid, index); + + return index; +} + +/* + * general internal driver structures + */ +struct xpmem_thread_group { + ihk_spinlock_t lock; /* tg lock */ + pid_t tgid; /* tg's tgid */ + uid_t uid; /* tg's uid */ + gid_t gid; /* tg's gid */ + volatile int flags; /* tg attributes and state */ + ihk_atomic_t uniq_segid; + ihk_atomic_t uniq_apid; + mcs_rwlock_lock_t seg_list_lock; + struct list_head seg_list; /* tg's list of segs */ + ihk_atomic_t refcnt; /* references to tg */ + ihk_atomic_t n_pinned; /* #of pages pinned by this tg */ + struct list_head tg_hashlist; /* tg hash list */ + struct thread *group_leader; /* thread group leader */ + struct process_vm *vm; /* tg's mm */ + ihk_atomic_t n_recall_PFNs; /* #of recall of PFNs in progress */ + struct xpmem_hashlist ap_hashtable[]; /* locks + ap hash lists */ +}; + +struct xpmem_segment { + ihk_spinlock_t lock; /* seg lock */ + mcs_rwlock_lock_t seg_lock; /* seg sema */ + xpmem_segid_t segid; /* unique segid */ + unsigned long vaddr; /* starting address */ + size_t size; /* size of seg */ + int permit_type; /* permission scheme */ + void *permit_value; /* permission data */ + volatile int flags; /* seg attributes and state */ + ihk_atomic_t refcnt; /* references to seg */ + struct xpmem_thread_group *tg; /* creator tg */ + struct list_head ap_list; /* local access permits of seg */ + struct list_head seg_list; /* tg's list of segs */ +}; + +struct xpmem_access_permit { + ihk_spinlock_t lock; /* access permit lock */ + xpmem_apid_t apid; /* unique apid */ + int mode; /* read/write mode */ + volatile int flags; /* access permit attributes and state */ + ihk_atomic_t refcnt; /* references to access permit */ + struct xpmem_segment *seg; /* seg permitted to be accessed */ + struct xpmem_thread_group *tg; /* access permit's tg */ + struct list_head att_list; /* atts of this access permit's seg */ + struct list_head ap_list; /* access permits linked to seg */ + struct list_head ap_hashlist; /* access permit hash list */ +}; + +struct xpmem_attachment { + mcs_rwlock_lock_t at_lock; /* att lock for serialization */ + struct mcs_rwlock_node_irqsave at_irqsave; /* att lock for serialization */ + unsigned long vaddr; /* starting address of seg attached */ + unsigned long at_vaddr; /* address where seg is attached */ + size_t at_size; /* size of seg attachment */ + struct vm_range *at_vma; /* vma where seg is attachment */ + volatile int flags; /* att attributes and state */ + ihk_atomic_t refcnt; /* references to att */ + struct xpmem_access_permit *ap; /* associated access permit */ + struct list_head att_list; /* atts linked to access permit */ + struct process_vm *vm; /* mm struct attached to */ + mcs_rwlock_lock_t invalidate_lock; /* to serialize page table invalidates */ +}; + +struct xpmem_partition { + ihk_atomic_t n_opened; /* # of /dev/xpmem opened */ + struct xpmem_hashlist tg_hashtable[]; /* locks + tg hash lists */ +}; + +#define XPMEM_FLAG_DESTROYING 0x00040 /* being destroyed */ +#define XPMEM_FLAG_DESTROYED 0x00080 /* 'being destroyed' finished */ + +#define XPMEM_FLAG_VALIDPTEs 0x00200 /* valid PTEs exist */ + +struct xpmem_perm { + uid_t uid; + gid_t gid; + unsigned long mode; +}; + +#define XPMEM_PERM_IRUSR 00400 +#define XPMEM_PERM_IWUSR 00200 + +static int xpmem_ioctl(struct mckfd *mckfd, ihk_mc_user_context_t *ctx); +static int xpmem_close( struct mckfd *mckfd, ihk_mc_user_context_t *ctx); + +static int xpmem_init(void); +static void xpmem_exit(void); +static int __xpmem_open(void); +static void xpmem_destroy_tg(struct xpmem_thread_group *); + +static int xpmem_make(unsigned long, size_t, int, void *, xpmem_segid_t *); +static xpmem_segid_t xpmem_make_segid(struct xpmem_thread_group *); + +static int xpmem_remove(xpmem_segid_t); +static void xpmem_remove_seg(struct xpmem_thread_group *, + struct xpmem_segment *); + +static void xpmem_clear_PTEs(struct xpmem_segment *); + +extern struct xpmem_partition *xpmem_my_part; + +static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal( + pid_t, int, int); + +static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid( + pid_t tgid, + int return_destroying) +{ + struct xpmem_thread_group *tg; + int index; + struct mcs_rwlock_node_irqsave lock; + + XPMEM_DEBUG("call: tgid=%d, return_destroying=%d", + tgid, return_destroying); + + index = xpmem_tg_hashtable_index(tgid); + mcs_rwlock_reader_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock); + tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid, index, + return_destroying); + mcs_rwlock_reader_unlock(&xpmem_my_part->tg_hashtable[index].lock, + &lock); + + XPMEM_DEBUG("return: tg=0x%p", tg); + + return tg; +} + +static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid_nolock( + pid_t tgid, + int return_destroying) +{ + struct xpmem_thread_group *tg; + + XPMEM_DEBUG("call: tgid=%d, return_destroying=%d", + tgid, return_destroying); + + tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid, + xpmem_tg_hashtable_index(tgid), return_destroying); + + XPMEM_DEBUG("return: tg=0x%p", tg); + + return tg; +} + +#define xpmem_tg_ref_by_tgid(t) __xpmem_tg_ref_by_tgid(t, 0) +#define xpmem_tg_ref_by_tgid_all(t) __xpmem_tg_ref_by_tgid(t, 1) +#define xpmem_tg_ref_by_tgid_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 0) +#define xpmem_tg_ref_by_tgid_all_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 1) + +static struct xpmem_thread_group * xpmem_tg_ref_by_segid(xpmem_segid_t); +static void xpmem_tg_deref(struct xpmem_thread_group *); +static struct xpmem_segment *xpmem_seg_ref_by_segid(struct xpmem_thread_group *, + xpmem_segid_t); +static void xpmem_seg_deref(struct xpmem_segment *); + +/* + * Inlines that mark an internal driver structure as being destroyable or not. + * The idea is to set the refcnt to 1 at structure creation time and then + * drop that reference at the time the structure is to be destroyed. + */ +static inline void xpmem_tg_not_destroyable( + struct xpmem_thread_group *tg) +{ + ihk_atomic_set(&tg->refcnt, 1); + + XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt); +} + +static inline void xpmem_tg_destroyable( + struct xpmem_thread_group *tg) +{ + XPMEM_DEBUG("call: "); + + xpmem_tg_deref(tg); + + XPMEM_DEBUG("return: "); +} + +static inline void xpmem_seg_not_destroyable( + struct xpmem_segment *seg) +{ + ihk_atomic_set(&seg->refcnt, 1); + + XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt); +} + +static inline void xpmem_seg_destroyable( + struct xpmem_segment *seg) +{ + XPMEM_DEBUG("call: "); + + xpmem_seg_deref(seg); + + XPMEM_DEBUG("return: "); +} + +/* + * Inlines that increment the refcnt for the specified structure. + */ +static inline void xpmem_tg_ref( + struct xpmem_thread_group *tg) +{ + DBUG_ON(ihk_atomic_read(&tg->refcnt) <= 0); + ihk_atomic_inc(&tg->refcnt); + + XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt); +} + +static inline void xpmem_seg_ref( + struct xpmem_segment *seg) +{ + DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0); + ihk_atomic_inc(&seg->refcnt); + + XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt); +} + +#endif /* _XPMEM_PRIVATE_H */ + diff --git a/kernel/syscall.c b/kernel/syscall.c index 6e0cfe92..51a63942 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -54,6 +54,7 @@ #include #include #include +#include /* Headers taken from kitten LWK */ #include @@ -1062,7 +1063,7 @@ out: return (int)lerror; } -static int do_munmap(void *addr, size_t len) +int do_munmap(void *addr, size_t len) { int error; int ro_freed; @@ -2696,6 +2697,21 @@ SYSCALL_DECLARE(ioctl) return rc; } +SYSCALL_DECLARE(open) +{ + const char *pathname = (const char *)ihk_mc_syscall_arg0(ctx); + long rc; + + dkprintf("open(): pathname=%s\n", pathname); + if (!strcmp(pathname, XPMEM_DEV_PATH)) { + rc = xpmem_open(ctx); + } else { + rc = syscall_generic_forwarding(__NR_open, ctx); + } + + return rc; +} + SYSCALL_DECLARE(close) { int fd = ihk_mc_syscall_arg0(ctx); diff --git a/kernel/xpmem.c b/kernel/xpmem.c new file mode 100644 index 00000000..129aaae0 --- /dev/null +++ b/kernel/xpmem.c @@ -0,0 +1,739 @@ +/** + * \file xpmem.c + * License details are found in the file LICENSE. + * \brief + * Cross Partition Memory (XPMEM) support. + */ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. + * Copyright 2010, 2014 Cray Inc. All Rights Reserved + * Copyright 2015-2016 Los Alamos National Security, LLC. All rights reserved. + */ +/* + * HISTORY + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +struct xpmem_partition *xpmem_my_part = NULL; /* pointer to this partition */ + + +int xpmem_open( + ihk_mc_user_context_t *ctx) +{ + const char *pathname = (const char *)ihk_mc_syscall_arg0(ctx); + int flags = (int)ihk_mc_syscall_arg1(ctx); + int ret; + struct thread *thread = cpu_local_var(current); + struct process *proc = thread->proc; + struct syscall_request request IHK_DMA_ALIGN; + int fd; + struct mckfd *mckfd; + long irqstate; + + XPMEM_DEBUG("call: pathname=%s, flags=%d", pathname, flags); + + if (!xpmem_my_part) { + ret = xpmem_init(); + if (ret) { + return ret; + } + } + + request.number = __NR_open; + request.args[0] = (unsigned long)pathname; + request.args[1] = flags; + fd = do_syscall(&request, ihk_mc_get_processor_id(), 0); + if(fd < 0){ + XPMEM_DEBUG("__NR_open error: fd=%d", fd); + return fd; + } + + ret = __xpmem_open(); + if (ret) { + XPMEM_DEBUG("return: ret=%d", ret); + return ret; + } + + mckfd = kmalloc(sizeof(struct mckfd), IHK_MC_AP_NOWAIT); + if(!mckfd) { + return -ENOMEM; + } + XPMEM_DEBUG("kmalloc(): mckfd=0x%p", mckfd); + memset(mckfd, 0, sizeof(struct mckfd)); + mckfd->fd = fd; + mckfd->sig_no = -1; + mckfd->ioctl_cb = xpmem_ioctl; + mckfd->close_cb = xpmem_close; + irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock); + + if(proc->mckfd == NULL) { + proc->mckfd = mckfd; + mckfd->next = NULL; + } else { + mckfd->next = proc->mckfd; + proc->mckfd = mckfd; + } + + ihk_mc_spinlock_unlock(&proc->mckfd_lock, irqstate); + + ihk_atomic_inc_return(&xpmem_my_part->n_opened); + + XPMEM_DEBUG("return: ret=%d", mckfd->fd); + + return mckfd->fd; +} + + +static int xpmem_ioctl( + struct mckfd *mckfd, + ihk_mc_user_context_t *ctx) +{ + int ret; + unsigned int cmd = ihk_mc_syscall_arg1(ctx); + unsigned long arg = ihk_mc_syscall_arg2(ctx); + + XPMEM_DEBUG("call: cmd=0x%x, arg=0x%lx", cmd, arg); + + switch (cmd) { + case XPMEM_CMD_VERSION: { + ret = XPMEM_CURRENT_VERSION; + + XPMEM_DEBUG("return: cmd=0x%x, ret=0x%lx", cmd, ret); + + return ret; + } + case XPMEM_CMD_MAKE: { + struct xpmem_cmd_make make_info; + xpmem_segid_t segid = 0; + + if (copy_from_user(&make_info, (void __user *)arg, + sizeof(struct xpmem_cmd_make))) + return -EFAULT; + + ret = xpmem_make(make_info.vaddr, make_info.size, + make_info.permit_type, + (void *)make_info.permit_value, &segid); + if (ret != 0) { + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + return ret; + } + + if (copy_to_user(&((struct xpmem_cmd_make __user *)arg)->segid, + (void *)&segid, sizeof(xpmem_segid_t))) { + (void)xpmem_remove(segid); + return -EFAULT; + } + + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + + return ret; + } + case XPMEM_CMD_REMOVE: { + struct xpmem_cmd_remove remove_info; + + if (copy_from_user(&remove_info, (void __user *)arg, + sizeof(struct xpmem_cmd_remove))) + return -EFAULT; + + ret = xpmem_remove(remove_info.segid); + + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + + return ret; + } + case XPMEM_CMD_GET: { + struct xpmem_cmd_get get_info; +// xpmem_apid_t apid = 0; + + if (copy_from_user(&get_info, (void __user *)arg, + sizeof(struct xpmem_cmd_get))) + return -EFAULT; + +// ret = xpmem_get(get_info.segid, get_info.flags, +// get_info.permit_type, +// (void *)get_info.permit_value, &apid); // TODO + ret = -EINVAL; + if (ret != 0) { + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + return ret; + } + +// if (copy_to_user(&((struct xpmem_cmd_get __user *)arg)->apid, +// (void *)&apid, sizeof(xpmem_apid_t))) { +// (void)xpmem_release(apid); +// return -EFAULT; +// } + + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + + return ret; + } + case XPMEM_CMD_RELEASE: { + struct xpmem_cmd_release release_info; + + if (copy_from_user(&release_info, (void __user *)arg, + sizeof(struct xpmem_cmd_release))) + return -EFAULT; + +// ret = xpmem_release(release_info.apid); // TODO + ret = -EINVAL; + + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + + return ret; + } + case XPMEM_CMD_ATTACH: { + struct xpmem_cmd_attach attach_info; +// unsigned long at_vaddr = 0; + + if (copy_from_user(&attach_info, (void __user *)arg, + sizeof(struct xpmem_cmd_attach))) + return -EFAULT; + +// ret = xpmem_attach(mckfd, attach_info.apid, attach_info.offset, +// attach_info.size, attach_info.vaddr, +// attach_info.fd, attach_info.flags, +// &at_vaddr); // TODO + ret = -EINVAL; + if (ret != 0) { + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + return ret; + } + +// if (copy_to_user( +// &((struct xpmem_cmd_attach __user *)arg)->vaddr, +// (void *)&at_vaddr, sizeof(unsigned long))) { +// (void)xpmem_detach(at_vaddr); +// return -EFAULT; +// } + + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + + return ret; + } + case XPMEM_CMD_DETACH: { + struct xpmem_cmd_detach detach_info; + + if (copy_from_user(&detach_info, (void __user *)arg, + sizeof(struct xpmem_cmd_detach))) + return -EFAULT; + +// ret = xpmem_detach(detach_info.vaddr); // TODO + ret = -EINVAL; + + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret); + + return ret; + } + default: + break; + } + + XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, -EINVAL); + + return -EINVAL; +} + + +static int xpmem_close( + struct mckfd *mckfd, + ihk_mc_user_context_t *ctx) +{ + struct xpmem_thread_group *tg; + int index; + struct mcs_rwlock_node_irqsave lock; + int n_opened; + + XPMEM_DEBUG("call: fd=%d", mckfd->fd); + + n_opened = ihk_atomic_dec_return(&xpmem_my_part->n_opened); + if (n_opened) { + XPMEM_DEBUG("return: ret=%d, n_opened=%d", 0, n_opened); + return 0; + } + XPMEM_DEBUG("n_opened=%d", n_opened); + + index = xpmem_tg_hashtable_index(cpu_local_var(current)->proc->pid); + + mcs_rwlock_writer_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock); + + tg = xpmem_tg_ref_by_tgid_all_nolock( + cpu_local_var(current)->proc->pid); + if (!tg) { + mcs_rwlock_writer_unlock( + &xpmem_my_part->tg_hashtable[index].lock, &lock); + return 0; + } + + list_del_init(&tg->tg_hashlist); + + mcs_rwlock_writer_unlock(&xpmem_my_part->tg_hashtable[index].lock, + &lock); + + XPMEM_DEBUG("tg->vm=0x%p", tg->vm); + + xpmem_destroy_tg(tg); + + if (!n_opened) { + xpmem_exit(); + } + + XPMEM_DEBUG("return: ret=%d", 0); + + return 0; +} + + +static int xpmem_init(void) +{ + int i; + + XPMEM_DEBUG("call: "); + + xpmem_my_part = kmalloc(sizeof(struct xpmem_partition) + + sizeof(struct xpmem_hashlist) * XPMEM_TG_HASHTABLE_SIZE, + IHK_MC_AP_NOWAIT); + if (xpmem_my_part == NULL) { + return -ENOMEM; + } + XPMEM_DEBUG("kmalloc(): xpmem_my_part=0x%p", xpmem_my_part); + memset(xpmem_my_part, 0, sizeof(struct xpmem_partition) + + sizeof(struct xpmem_hashlist) * XPMEM_TG_HASHTABLE_SIZE); + + for (i = 0; i < XPMEM_TG_HASHTABLE_SIZE; i++) { + mcs_rwlock_init(&xpmem_my_part->tg_hashtable[i].lock); + INIT_LIST_HEAD(&xpmem_my_part->tg_hashtable[i].list); + } + + ihk_atomic_set(&xpmem_my_part->n_opened, 0); + + XPMEM_DEBUG("return: ret=%d", 0); + + return 0; +} + + +static void xpmem_exit(void) +{ + XPMEM_DEBUG("call: "); + + if (xpmem_my_part) { + XPMEM_DEBUG("kfree(): 0x%p", xpmem_my_part); + kfree(xpmem_my_part); + xpmem_my_part = NULL; + } + + XPMEM_DEBUG("return: "); +} + + +static int __xpmem_open(void) +{ + struct xpmem_thread_group *tg; + int index; + struct mcs_rwlock_node_irqsave lock; + + XPMEM_DEBUG("call: "); + + tg = xpmem_tg_ref_by_tgid(cpu_local_var(current)->proc->pid); + if (!IS_ERR(tg)) { + xpmem_tg_deref(tg); + XPMEM_DEBUG("return: ret=%d, tg=0x%p", 0, tg); + return 0; + } + + tg = kmalloc(sizeof(struct xpmem_thread_group) + + sizeof(struct xpmem_hashlist) * XPMEM_AP_HASHTABLE_SIZE, + IHK_MC_AP_NOWAIT); + if (tg == NULL) { + return -ENOMEM; + } + XPMEM_DEBUG("kmalloc(): tg=0x%p", tg); + memset(tg, 0, sizeof(struct xpmem_thread_group) + + sizeof(struct xpmem_hashlist) * XPMEM_AP_HASHTABLE_SIZE); + + ihk_mc_spinlock_init(&tg->lock); + tg->tgid = cpu_local_var(current)->proc->pid; + tg->uid = cpu_local_var(current)->proc->ruid; + tg->gid = cpu_local_var(current)->proc->rgid; + ihk_atomic_set(&tg->uniq_segid, 0); + ihk_atomic_set(&tg->uniq_apid, 0); + mcs_rwlock_init(&tg->seg_list_lock); + INIT_LIST_HEAD(&tg->seg_list); + ihk_atomic_set(&tg->n_pinned, 0); + INIT_LIST_HEAD(&tg->tg_hashlist); + tg->vm = cpu_local_var(current)->vm; + ihk_atomic_set(&tg->n_recall_PFNs, 0); + + for (index = 0; index < XPMEM_AP_HASHTABLE_SIZE; index++) { + mcs_rwlock_init(&tg->ap_hashtable[index].lock); + INIT_LIST_HEAD(&tg->ap_hashtable[index].list); + } + + xpmem_tg_not_destroyable(tg); + + index = xpmem_tg_hashtable_index(tg->tgid); + mcs_rwlock_writer_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock); + + list_add_tail(&tg->tg_hashlist, + &xpmem_my_part->tg_hashtable[index].list); + + mcs_rwlock_writer_unlock(&xpmem_my_part->tg_hashtable[index].lock, + &lock); + + tg->group_leader = cpu_local_var(current); + + XPMEM_DEBUG("return: ret=%d", 0); + + return 0; +} + + +static void xpmem_destroy_tg( + struct xpmem_thread_group *tg) +{ + XPMEM_DEBUG("call: tg=0x%p", tg); + + XPMEM_DEBUG("tg->vm=0x%p", tg->vm); + + xpmem_tg_destroyable(tg); + xpmem_tg_deref(tg); + + XPMEM_DEBUG("return: "); +} + + +static int xpmem_make( + unsigned long vaddr, + size_t size, + int permit_type, + void *permit_value, + xpmem_segid_t *segid_p) +{ + xpmem_segid_t segid; + struct xpmem_thread_group *seg_tg; + struct xpmem_segment *seg; + struct mcs_rwlock_node_irqsave lock; + + XPMEM_DEBUG("call: vaddr=0x%lx, size=%lu, permit_type=%d, " + "permit_value=0%04lo", + vaddr, size, permit_type, + (unsigned long)(uintptr_t)permit_value); + + if (permit_type != XPMEM_PERMIT_MODE || + ((unsigned long)(uintptr_t)permit_value & ~00777) || + size == 0) { + XPMEM_DEBUG("return: ret=%d", -EINVAL); + return -EINVAL; + } + + seg_tg = xpmem_tg_ref_by_tgid(cpu_local_var(current)->proc->pid); + if (IS_ERR(seg_tg)) { + DBUG_ON(PTR_ERR(seg_tg) != -ENOENT); + return -XPMEM_ERRNO_NOPROC; + } + + /* + * The start of the segment must be page aligned and it must be a + * multiple of pages in size. + */ + if (offset_in_page(vaddr) != 0 || offset_in_page(size) != 0) { + xpmem_tg_deref(seg_tg); + XPMEM_DEBUG("return: ret=%d", -EINVAL); + return -EINVAL; + } + + segid = xpmem_make_segid(seg_tg); + if (segid < 0) { + xpmem_tg_deref(seg_tg); + return segid; + } + + /* create a new struct xpmem_segment structure with a unique segid */ + seg = kmalloc(sizeof(struct xpmem_segment), IHK_MC_AP_NOWAIT); + if (seg == NULL) { + xpmem_tg_deref(seg_tg); + return -ENOMEM; + } + XPMEM_DEBUG("kmalloc(): seg=0x%p", seg); + memset(seg, 0, sizeof(struct xpmem_segment)); + + ihk_mc_spinlock_init(&seg->lock); + mcs_rwlock_init(&seg->seg_lock); + seg->segid = segid; + seg->vaddr = vaddr; + seg->size = size; + seg->permit_type = permit_type; + seg->permit_value = permit_value; + seg->tg = seg_tg; + INIT_LIST_HEAD(&seg->ap_list); + INIT_LIST_HEAD(&seg->seg_list); + + xpmem_seg_not_destroyable(seg); + + /* add seg to its tg's list of segs */ + mcs_rwlock_writer_lock(&seg_tg->seg_list_lock, &lock); + list_add_tail(&seg->seg_list, &seg_tg->seg_list); + mcs_rwlock_writer_unlock(&seg_tg->seg_list_lock, &lock); + + xpmem_tg_deref(seg_tg); + + *segid_p = segid; + + XPMEM_DEBUG("return: ret=%d, segid=0x%lx", 0, *segid_p); + + return 0; +} + + +static xpmem_segid_t xpmem_make_segid( + struct xpmem_thread_group *seg_tg) +{ + struct xpmem_id segid; + xpmem_segid_t *segid_p = (xpmem_segid_t *)&segid; + int uniq; + + XPMEM_DEBUG("call: seg_tg=0x%p, uniq_segid=%d", + seg_tg, ihk_atomic_read(&seg_tg->uniq_segid)); + + DBUG_ON(sizeof(struct xpmem_id) != sizeof(xpmem_segid_t)); + + uniq = ihk_atomic_inc_return(&seg_tg->uniq_segid); + if (uniq > XPMEM_MAX_UNIQ_ID) { + ihk_atomic_dec(&seg_tg->uniq_segid); + return -EBUSY; + } + + *segid_p = 0; + segid.tgid = seg_tg->tgid; + segid.uniq = (unsigned long)uniq; + + DBUG_ON(*segid_p <= 0); + + XPMEM_DEBUG("return: segid=0x%lx, segid.tgid=%d, segid.uniq=%d", + segid, segid.tgid, segid.uniq); + + return *segid_p; +} + + +static int xpmem_remove( + xpmem_segid_t segid) +{ + struct xpmem_thread_group *seg_tg; + struct xpmem_segment *seg; + + XPMEM_DEBUG("call: segid=0x%lx", segid); + + if (segid <= 0) { + XPMEM_DEBUG("return: ret=%d", -EINVAL); + return -EINVAL; + } + + seg_tg = xpmem_tg_ref_by_segid(segid); + if (IS_ERR(seg_tg)) + return PTR_ERR(seg_tg); + + if (cpu_local_var(current)->proc->pid != seg_tg->tgid) { + xpmem_tg_deref(seg_tg); + XPMEM_DEBUG("return: ret=%d", -EACCES); + return -EACCES; + } + + seg = xpmem_seg_ref_by_segid(seg_tg, segid); + if (IS_ERR(seg)) { + xpmem_tg_deref(seg_tg); + return PTR_ERR(seg); + } + DBUG_ON(seg->tg != seg_tg); + + xpmem_remove_seg(seg_tg, seg); + xpmem_seg_deref(seg); + xpmem_tg_deref(seg_tg); + + XPMEM_DEBUG("return: ret=%d", 0); + + return 0; +} + + +static void xpmem_remove_seg( + struct xpmem_thread_group *seg_tg, + struct xpmem_segment *seg) +{ + DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0); + struct mcs_rwlock_node_irqsave seg_lock; + struct mcs_rwlock_node_irqsave lock; + + XPMEM_DEBUG("call: tgid=%d, segid=0x%lx", seg_tg->tgid, seg->segid); + + ihk_mc_spinlock_lock(&seg->lock); + if (seg->flags & XPMEM_FLAG_DESTROYING) { + ihk_mc_spinlock_unlock_noirq(&seg->lock); + schedule(); + return; + } + seg->flags |= XPMEM_FLAG_DESTROYING; + ihk_mc_spinlock_unlock_noirq(&seg->lock); + + mcs_rwlock_writer_lock(&seg->seg_lock, &seg_lock); + + /* unpin pages and clear PTEs for each attachment to this segment */ + xpmem_clear_PTEs(seg); + + /* indicate that the segment has been destroyed */ + ihk_mc_spinlock_lock(&seg->lock); + seg->flags |= XPMEM_FLAG_DESTROYED; + ihk_mc_spinlock_unlock_noirq(&seg->lock); + + /* Remove segment structure from its tg's list of segs */ + mcs_rwlock_writer_lock(&seg_tg->seg_list_lock, &lock); + list_del_init(&seg->seg_list); + mcs_rwlock_writer_unlock(&seg_tg->seg_list_lock, &lock); + + mcs_rwlock_writer_unlock(&seg->seg_lock, &seg_lock); + + xpmem_seg_destroyable(seg); + + XPMEM_DEBUG("return: "); +} + + +static void xpmem_clear_PTEs( + struct xpmem_segment *seg) +{ + XPMEM_DEBUG("call: seg=0x%p", seg); + +// xpmem_clear_PTEs_range(seg, seg->vaddr, seg->vaddr + seg->size, 0); // TODO + + XPMEM_DEBUG("return: "); +} + + +static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal( + pid_t tgid, + int index, + int return_destroying) +{ + struct xpmem_thread_group *tg; + + XPMEM_DEBUG("call: tgid=%d, index=%d, return_destroying=%d", + tgid, index, return_destroying); + + list_for_each_entry(tg, &xpmem_my_part->tg_hashtable[index].list, + tg_hashlist) { + if (tg->tgid == tgid) { + if ((tg->flags & XPMEM_FLAG_DESTROYING) && + !return_destroying) { + continue; + } + + xpmem_tg_ref(tg); + + XPMEM_DEBUG("return: tg=0x%p", tg); + return tg; + } + } + + XPMEM_DEBUG("return: tg=0x%p", ERR_PTR(-ENOENT)); + + return ERR_PTR(-ENOENT); +} + + +static struct xpmem_thread_group * xpmem_tg_ref_by_segid( + xpmem_segid_t segid) +{ + struct xpmem_thread_group *tg; + + XPMEM_DEBUG("call: segid=0x%lx", segid); + + tg = xpmem_tg_ref_by_tgid(xpmem_segid_to_tgid(segid)); + + XPMEM_DEBUG("return: tg=0x%p", tg); + + return tg; +} + + +static void xpmem_tg_deref( + struct xpmem_thread_group *tg) +{ + XPMEM_DEBUG("call: tg=0x%p", tg); + + DBUG_ON(ihk_atomic_read(&tg->refcnt) <= 0); + if (ihk_atomic_dec_return(&tg->refcnt) != 0) { + XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt); + return; + } + + XPMEM_DEBUG("kfree(): tg=0x%p", tg); + kfree(tg); + + XPMEM_DEBUG("return: "); +} + + +static struct xpmem_segment * xpmem_seg_ref_by_segid( + struct xpmem_thread_group *seg_tg, + xpmem_segid_t segid) +{ + struct xpmem_segment *seg; + struct mcs_rwlock_node_irqsave lock; + + XPMEM_DEBUG("call: seg_tg=0x%p, segid=0x%lx", seg_tg, segid); + + mcs_rwlock_reader_lock(&seg_tg->seg_list_lock, &lock); + + list_for_each_entry(seg, &seg_tg->seg_list, seg_list) { + if (seg->segid == segid) { + if (seg->flags & XPMEM_FLAG_DESTROYING) + continue; + + xpmem_seg_ref(seg); + mcs_rwlock_reader_unlock(&seg_tg->seg_list_lock, &lock); + return seg; + } + } + + mcs_rwlock_reader_unlock(&seg_tg->seg_list_lock, &lock); + + return ERR_PTR(-ENOENT); +} + + +static void xpmem_seg_deref( + struct xpmem_segment *seg) +{ + XPMEM_DEBUG("call: seg=0x%p", seg); + + DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0); + if (ihk_atomic_dec_return(&seg->refcnt) != 0) { + XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt); + return; + } + + DBUG_ON(!(seg->flags & XPMEM_FLAG_DESTROYING)); + + XPMEM_DEBUG("kfree(): seg=0x%p", seg); + kfree(seg); + + XPMEM_DEBUG("return: "); +} + diff --git a/lib/include/mc_xpmem.h b/lib/include/mc_xpmem.h new file mode 100644 index 00000000..fdde30a3 --- /dev/null +++ b/lib/include/mc_xpmem.h @@ -0,0 +1,153 @@ +/** + * \file mc_xpmem.h + * License details are found in the file LICENSE. + * \brief + * Cross Partition Memory (XPMEM) structures and macros. + */ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved. + */ +/* + * HISTORY + */ + +#ifndef _MC_XPMEM_H +#define _MC_XPMEM_H + +#ifndef __KERNEL__ +#include +#endif + +/* + * _IOC definitions for McKernel + */ +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 + +#define _IOC_SIZEBITS 14 + +#define _IOC_DIRBITS 2 + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +#define _IOC_NONE 0U + +#define _IOC(dir,type,nr,size) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + +#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) + +/* + * basic argument type definitions for McKernel + */ +typedef uint64_t u64; +typedef uint64_t __u64; +typedef int64_t __s64; + +/* + * basic argument type definitions + */ +typedef __s64 xpmem_segid_t; /* segid returned from xpmem_make() */ +typedef __s64 xpmem_apid_t; /* apid returned from xpmem_get() */ + +struct xpmem_addr { + xpmem_apid_t apid; /* apid that represents memory */ + off_t offset; /* offset into apid's memory */ +}; + +#define XPMEM_MAXADDR_SIZE (size_t)(-1L) + +/* + * path to XPMEM device + */ +#define XPMEM_DEV_PATH "/dev/xpmem" + +/* + * The following are the possible XPMEM related errors. + */ +#define XPMEM_ERRNO_NOPROC 2004 /* unknown thread due to fork() */ + +/* + * flags for segment permissions + */ +#define XPMEM_RDONLY 0x1 +#define XPMEM_RDWR 0x2 + +/* + * Valid permit_type values for xpmem_make(). + */ +#define XPMEM_PERMIT_MODE 0x1 + +/* + * ioctl() commands used to interface to the kernel module. + */ +#define XPMEM_IOC_MAGIC 'x' +#define XPMEM_CMD_VERSION _IO(XPMEM_IOC_MAGIC, 0) +#define XPMEM_CMD_MAKE _IO(XPMEM_IOC_MAGIC, 1) +#define XPMEM_CMD_REMOVE _IO(XPMEM_IOC_MAGIC, 2) +#define XPMEM_CMD_GET _IO(XPMEM_IOC_MAGIC, 3) +#define XPMEM_CMD_RELEASE _IO(XPMEM_IOC_MAGIC, 4) +#define XPMEM_CMD_ATTACH _IO(XPMEM_IOC_MAGIC, 5) +#define XPMEM_CMD_DETACH _IO(XPMEM_IOC_MAGIC, 6) + +/* + * Structures used with the preceding ioctl() commands to pass data. + */ +struct xpmem_cmd_make { + __u64 vaddr; + size_t size; + int permit_type; + __u64 permit_value; + xpmem_segid_t segid; /* returned on success */ +}; + +struct xpmem_cmd_remove { + xpmem_segid_t segid; +}; + +struct xpmem_cmd_get { + xpmem_segid_t segid; + int flags; + int permit_type; + __u64 permit_value; + xpmem_apid_t apid; /* returned on success */ +}; + +struct xpmem_cmd_release { + xpmem_apid_t apid; +}; + +struct xpmem_cmd_attach { + xpmem_apid_t apid; + off_t offset; + size_t size; + __u64 vaddr; + int fd; + int flags; +}; + +struct xpmem_cmd_detach { + __u64 vaddr; +}; + +#ifndef __KERNEL__ +extern int xpmem_version(void); +extern xpmem_segid_t xpmem_make(void *, size_t, int, void *); +extern int xpmem_remove(xpmem_segid_t); +extern xpmem_apid_t xpmem_get(xpmem_segid_t, int, int, void *); +extern int xpmem_release(xpmem_apid_t); +extern void *xpmem_attach(struct xpmem_addr, size_t, void *); +extern int xpmem_detach(void *); +#endif + +#endif /* _MC_XPMEM_H */