Compare commits
126 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| eca4018ecb | |||
| e936b2ebe1 | |||
| d8112f92f8 | |||
| 1076010de4 | |||
| da4a5ec44b | |||
| d35aa9b100 | |||
| ba8dbf1b19 | |||
| 6213f0e488 | |||
| 4ef82c2683 | |||
| e066a8798c | |||
| b702c9691e | |||
| addbe91e59 | |||
| b812848a0e | |||
| ad214c8206 | |||
| 1bc3218fc1 | |||
| 5cc420a6c3 | |||
| c7686fdf4e | |||
| c1dae4d8b0 | |||
| 2473025201 | |||
| fa5c1b23ca | |||
| f2f499aace | |||
| bd47b909bf | |||
| d646c2a4b9 | |||
| 865ada46bf | |||
| cdffc5e853 | |||
| 0e67e9266b | |||
| 1ff0afe6fb | |||
| d34884f9a4 | |||
| 7a0c204dc1 | |||
| 25f67c9ef8 | |||
| a776464a7e | |||
| c40e7105e6 | |||
| 5bac38ce8b | |||
| e3f0662130 | |||
| 21df56b233 | |||
| 393cec513c | |||
| 4437ecc69a | |||
| 40d75baca2 | |||
| 00f3fe0840 | |||
| 47a8b5bda5 | |||
| ec75095073 | |||
| 1794232989 | |||
| 40978d162e | |||
| 536ce9f927 | |||
| 4e5ec74ffe | |||
| a6d8125fd7 | |||
| 15d3a0361e | |||
| 6ad84a96a3 | |||
| 16e846e9b6 | |||
| 5bc7185f07 | |||
| 32462dfb2d | |||
| e3ef88c0cf | |||
| 829aae7b8d | |||
| b836b84825 | |||
| 3e1f154412 | |||
| e7af537452 | |||
| 3565959af7 | |||
| 4667136a4c | |||
| 972d14611a | |||
| e90eef8910 | |||
| f81927b85b | |||
| 701cdcdab1 | |||
| 9635a628a9 | |||
| 3e1b16f3fc | |||
| ff37ff9ccf | |||
| 5b7bcb7170 | |||
| 6a5fe90f98 | |||
| 91373337ba | |||
| 56ed726a88 | |||
| bce10e11e4 | |||
| 91cdb16158 | |||
| c58ab0f648 | |||
| f410af1cfc | |||
| aa15e5eea8 | |||
| df9f1f8f78 | |||
| 7ace35d737 | |||
| 551999ff6b | |||
| 052b3f44ca | |||
| fdcf766337 | |||
| 7d13bfb14e | |||
| 202bfd9955 | |||
| c99e36235b | |||
| 3cecafac59 | |||
| 61fc4c5e55 | |||
| fad73cacc1 | |||
| 8fced29978 | |||
| b0f4ae4890 | |||
| 7070094a31 | |||
| 011185e3f7 | |||
| 461881e46a | |||
| ddc33821cf | |||
| 0ab7d02994 | |||
| a8c4ab221b | |||
| 87d36a7752 | |||
| 998ded414c | |||
| f78d031e64 | |||
| 4ab37dd34a | |||
| 8129dec2f7 | |||
| a1035a1878 | |||
| db169c5f90 | |||
| bbb55ef261 | |||
| 1130cafe41 | |||
| a1cf27e232 | |||
| 5a1ce99d87 | |||
| c7db296e1b | |||
| f634a750c5 | |||
| d07a196c8e | |||
| 8c56c75d2c | |||
| e54895efde | |||
| 2f8cca2d6d | |||
| 64607152ee | |||
| 20383ad3d0 | |||
| 787d34f650 | |||
| ae618a0c68 | |||
| f480376153 | |||
| e4b3a88fc6 | |||
| 69a5c53074 | |||
| 259583e936 | |||
| 0f826290d0 | |||
| e46f027894 | |||
| 3e093f6a40 | |||
| 00996b551f | |||
| 24d8697cef | |||
| be4f6741f9 | |||
| 7a2f67f5f0 | |||
| bba0425267 |
@ -49,6 +49,7 @@ install::
|
||||
mkdir -p -m 755 $(SBINDIR); \
|
||||
install -m 755 arch/x86/tools/mcreboot-smp-x86.sh $(SBINDIR)/mcreboot.sh; \
|
||||
install -m 755 arch/x86/tools/mcstop+release-smp-x86.sh $(SBINDIR)/mcstop+release.sh; \
|
||||
install -m 755 arch/x86/tools/eclair-dump-backtrace.exp $(SBINDIR)/eclair-dump-backtrace.exp;\
|
||||
mkdir -p -m 755 $(ETCDIR); \
|
||||
install -m 644 arch/x86/tools/irqbalance_mck.service $(ETCDIR)/irqbalance_mck.service; \
|
||||
install -m 644 arch/x86/tools/irqbalance_mck.in $(ETCDIR)/irqbalance_mck.in; \
|
||||
|
||||
@ -148,7 +148,7 @@ extern char page_fault[], general_protection_exception[];
|
||||
extern char debug_exception[], int3_exception[];
|
||||
|
||||
uint64_t boot_pat_state = 0;
|
||||
int no_turbo = 0; /* May be updated by early parsing of kargs */
|
||||
int no_turbo = 1; /* May be updated by early parsing of kargs */
|
||||
|
||||
extern int num_processors; /* kernel/ap.c */
|
||||
struct pvclock_vsyscall_time_info *pvti = NULL;
|
||||
@ -844,6 +844,25 @@ void set_signal(int sig, void *regs, struct siginfo *info);
|
||||
void check_signal(unsigned long, void *, int);
|
||||
extern void tlb_flush_handler(int vector);
|
||||
|
||||
void __show_stack(uintptr_t *sp) {
|
||||
while (((uintptr_t)sp >= 0xffff800000000000)
|
||||
&& ((uintptr_t)sp < 0xffffffff80000000)) {
|
||||
uintptr_t fp;
|
||||
uintptr_t ip;
|
||||
|
||||
fp = sp[0];
|
||||
ip = sp[1];
|
||||
kprintf("IP: %016lx, SP: %016lx, FP: %016lx\n", ip, (uintptr_t)sp, fp);
|
||||
sp = (void *)fp;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
void show_context_stack(uintptr_t *rbp) {
|
||||
__show_stack(rbp);
|
||||
return;
|
||||
}
|
||||
|
||||
void handle_interrupt(int vector, struct x86_user_context *regs)
|
||||
{
|
||||
struct ihk_mc_interrupt_handler *h;
|
||||
@ -952,6 +971,9 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
|
||||
|
||||
tlb_flush_handler(vector);
|
||||
}
|
||||
else if (vector == 133) {
|
||||
show_context_stack((uintptr_t *)regs->gpr.rbp);
|
||||
}
|
||||
else {
|
||||
list_for_each_entry(h, &handlers[vector - 32], list) {
|
||||
if (h->func) {
|
||||
@ -1079,6 +1101,10 @@ unhandled_page_fault(struct thread *thread, void *fault_addr, void *regs)
|
||||
|
||||
kprintf_unlock(irqflags);
|
||||
|
||||
if (!(error & PF_USER)) {
|
||||
panic("panic: kernel mode PF");
|
||||
}
|
||||
|
||||
/* TODO */
|
||||
ihk_mc_debug_show_interrupt_context(regs);
|
||||
|
||||
|
||||
@ -131,6 +131,7 @@ static void __ihk_mc_spinlock_unlock(ihk_spinlock_t *lock, unsigned long flags)
|
||||
typedef struct mcs_lock_node {
|
||||
unsigned long locked;
|
||||
struct mcs_lock_node *next;
|
||||
unsigned long irqsave;
|
||||
} __attribute__((aligned(64))) mcs_lock_node_t;
|
||||
|
||||
static void mcs_lock_init(struct mcs_lock_node *node)
|
||||
@ -139,7 +140,7 @@ static void mcs_lock_init(struct mcs_lock_node *node)
|
||||
node->next = NULL;
|
||||
}
|
||||
|
||||
static void mcs_lock_lock(struct mcs_lock_node *lock,
|
||||
static void __mcs_lock_lock(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
struct mcs_lock_node *pred;
|
||||
@ -158,7 +159,7 @@ static void mcs_lock_lock(struct mcs_lock_node *lock,
|
||||
}
|
||||
}
|
||||
|
||||
static void mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
static void __mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
if (node->next == NULL) {
|
||||
@ -178,6 +179,35 @@ static void mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
node->next->locked = 0;
|
||||
}
|
||||
|
||||
static void mcs_lock_lock_noirq(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
preempt_disable();
|
||||
__mcs_lock_lock(lock, node);
|
||||
}
|
||||
|
||||
static void mcs_lock_unlock_noirq(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
__mcs_lock_unlock(lock, node);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void mcs_lock_lock(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
node->irqsave = cpu_disable_interrupt_save();
|
||||
mcs_lock_lock_noirq(lock, node);
|
||||
}
|
||||
|
||||
static void mcs_lock_unlock(struct mcs_lock_node *lock,
|
||||
struct mcs_lock_node *node)
|
||||
{
|
||||
mcs_lock_unlock_noirq(lock, node);
|
||||
cpu_restore_interrupt(node->irqsave);
|
||||
}
|
||||
|
||||
|
||||
// reader/writer lock
|
||||
typedef struct mcs_rwlock_node {
|
||||
ihk_atomic_t count; // num of readers (use only common reader)
|
||||
|
||||
23
arch/x86/kernel/include/arch-string.h
Normal file
23
arch/x86/kernel/include/arch-string.h
Normal file
@ -0,0 +1,23 @@
|
||||
#ifndef _ASM_X86_STRING_H
|
||||
#define _ASM_X86_STRING_H
|
||||
|
||||
#define ARCH_FAST_MEMCPY
|
||||
|
||||
static inline void *__inline_memcpy(void *to, const void *from, size_t n)
|
||||
{
|
||||
unsigned long d0, d1, d2;
|
||||
asm volatile("rep ; movsl\n\t"
|
||||
"testb $2,%b4\n\t"
|
||||
"je 1f\n\t"
|
||||
"movsw\n"
|
||||
"1:\ttestb $1,%b4\n\t"
|
||||
"je 2f\n\t"
|
||||
"movsb\n"
|
||||
"2:"
|
||||
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
||||
: "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
|
||||
: "memory");
|
||||
return to;
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -22,7 +22,7 @@
|
||||
|
||||
SYSCALL_HANDLED(0, read)
|
||||
SYSCALL_DELEGATED(1, write)
|
||||
SYSCALL_DELEGATED(2, open)
|
||||
SYSCALL_HANDLED(2, open)
|
||||
SYSCALL_HANDLED(3, close)
|
||||
SYSCALL_DELEGATED(4, stat)
|
||||
SYSCALL_DELEGATED(5, fstat)
|
||||
@ -150,5 +150,8 @@ SYSCALL_HANDLED(602, pmc_start)
|
||||
SYSCALL_HANDLED(603, pmc_stop)
|
||||
SYSCALL_HANDLED(604, pmc_reset)
|
||||
SYSCALL_HANDLED(700, get_cpu_id)
|
||||
#ifdef TRACK_SYSCALLS
|
||||
SYSCALL_HANDLED(__NR_track_syscalls, track_syscalls)
|
||||
#endif // TRACK_SYSCALLS
|
||||
|
||||
/**** End of File ****/
|
||||
|
||||
@ -45,7 +45,11 @@ void *early_alloc_pages(int nr_pages)
|
||||
last_page = phys_to_virt(virt_to_phys(last_page));
|
||||
} else if (last_page == (void *)-1) {
|
||||
panic("Early allocator is already finalized. Do not use it.\n");
|
||||
}
|
||||
} else {
|
||||
if(virt_to_phys(last_page) >= bootstrap_mem_end) {
|
||||
panic("Early allocator: Out of memory\n");
|
||||
}
|
||||
}
|
||||
p = last_page;
|
||||
last_page += (nr_pages * PAGE_SIZE);
|
||||
|
||||
@ -179,7 +183,7 @@ static void init_normal_area(struct page_table *pt)
|
||||
}
|
||||
}
|
||||
|
||||
static struct page_table *__alloc_new_pt(enum ihk_mc_ap_flag ap_flag)
|
||||
static struct page_table *__alloc_new_pt(ihk_mc_ap_flag ap_flag)
|
||||
{
|
||||
struct page_table *newpt = ihk_mc_alloc_pages(1, ap_flag);
|
||||
|
||||
@ -278,7 +282,7 @@ void set_pte(pte_t *ppte, unsigned long phys, enum ihk_mc_pt_attribute attr)
|
||||
* and returns a pointer to the PTE corresponding to the
|
||||
* virtual address.
|
||||
*/
|
||||
pte_t *get_pte(struct page_table *pt, void *virt, enum ihk_mc_pt_attribute attr, enum ihk_mc_ap_flag ap_flag)
|
||||
pte_t *get_pte(struct page_table *pt, void *virt, enum ihk_mc_pt_attribute attr, ihk_mc_ap_flag ap_flag)
|
||||
{
|
||||
int l4idx, l3idx, l2idx, l1idx;
|
||||
unsigned long v = (unsigned long)virt;
|
||||
@ -339,7 +343,7 @@ static int __set_pt_page(struct page_table *pt, void *virt, unsigned long phys,
|
||||
int l4idx, l3idx, l2idx, l1idx;
|
||||
unsigned long v = (unsigned long)virt;
|
||||
struct page_table *newpt;
|
||||
enum ihk_mc_ap_flag ap_flag;
|
||||
ihk_mc_ap_flag ap_flag;
|
||||
int in_kernel =
|
||||
(((unsigned long long)virt) >= 0xffff000000000000ULL);
|
||||
unsigned long init_pt_lock_flags;
|
||||
@ -490,8 +494,10 @@ uint64_t ihk_mc_pt_virt_to_pagemap(struct page_table *pt, unsigned long virt)
|
||||
return pagemap;
|
||||
}
|
||||
|
||||
int ihk_mc_pt_virt_to_phys(struct page_table *pt,
|
||||
const void *virt, unsigned long *phys)
|
||||
int ihk_mc_pt_virt_to_phys_size(struct page_table *pt,
|
||||
const void *virt,
|
||||
unsigned long *phys,
|
||||
unsigned long *size)
|
||||
{
|
||||
int l4idx, l3idx, l2idx, l1idx;
|
||||
unsigned long v = (unsigned long)virt;
|
||||
@ -513,6 +519,7 @@ int ihk_mc_pt_virt_to_phys(struct page_table *pt,
|
||||
if ((pt->entry[l3idx] & PFL3_SIZE)) {
|
||||
*phys = pte_get_phys(&pt->entry[l3idx])
|
||||
| (v & (PTL3_SIZE - 1));
|
||||
if (size) *size = PTL3_SIZE;
|
||||
return 0;
|
||||
}
|
||||
pt = phys_to_virt(pte_get_phys(&pt->entry[l3idx]));
|
||||
@ -523,6 +530,7 @@ int ihk_mc_pt_virt_to_phys(struct page_table *pt,
|
||||
if ((pt->entry[l2idx] & PFL2_SIZE)) {
|
||||
*phys = pte_get_phys(&pt->entry[l2idx])
|
||||
| (v & (PTL2_SIZE - 1));
|
||||
if (size) *size = PTL2_SIZE;
|
||||
return 0;
|
||||
}
|
||||
pt = phys_to_virt(pte_get_phys(&pt->entry[l2idx]));
|
||||
@ -532,9 +540,17 @@ int ihk_mc_pt_virt_to_phys(struct page_table *pt,
|
||||
}
|
||||
|
||||
*phys = pte_get_phys(&pt->entry[l1idx]) | (v & (PTL1_SIZE - 1));
|
||||
if (size) *size = PTL1_SIZE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ihk_mc_pt_virt_to_phys(struct page_table *pt,
|
||||
const void *virt, unsigned long *phys)
|
||||
{
|
||||
return ihk_mc_pt_virt_to_phys_size(pt, virt, phys, NULL);
|
||||
}
|
||||
|
||||
|
||||
int ihk_mc_pt_print_pte(struct page_table *pt, void *virt)
|
||||
{
|
||||
int l4idx, l3idx, l2idx, l1idx;
|
||||
@ -546,28 +562,34 @@ int ihk_mc_pt_print_pte(struct page_table *pt, void *virt)
|
||||
|
||||
GET_VIRT_INDICES(v, l4idx, l3idx, l2idx, l1idx);
|
||||
|
||||
__kprintf("l4 table: 0x%lX l4idx: %d \n", virt_to_phys(pt), l4idx);
|
||||
if (!(pt->entry[l4idx] & PFL4_PRESENT)) {
|
||||
__kprintf("0x%lX l4idx not present! \n", (unsigned long)virt);
|
||||
__kprintf("l4 entry: 0x%lX\n", pt->entry[l4idx]);
|
||||
return -EFAULT;
|
||||
}
|
||||
__kprintf("l4 entry: 0x%lX\n", pt->entry[l4idx]);
|
||||
pt = phys_to_virt(pt->entry[l4idx] & PAGE_MASK);
|
||||
|
||||
__kprintf("l3 table: 0x%lX l3idx: %d \n", virt_to_phys(pt), l3idx);
|
||||
if (!(pt->entry[l3idx] & PFL3_PRESENT)) {
|
||||
__kprintf("0x%lX l3idx not present! \n", (unsigned long)virt);
|
||||
__kprintf("l3 entry: 0x%lX\n", pt->entry[l3idx]);
|
||||
return -EFAULT;
|
||||
}
|
||||
__kprintf("l3 entry: 0x%lX\n", pt->entry[l3idx]);
|
||||
if ((pt->entry[l3idx] & PFL3_SIZE)) {
|
||||
__kprintf("l3 entry is 1G page\n");
|
||||
return 0;
|
||||
}
|
||||
pt = phys_to_virt(pt->entry[l3idx] & PAGE_MASK);
|
||||
|
||||
__kprintf("l2 table: 0x%lX l2idx: %d \n", virt_to_phys(pt), l2idx);
|
||||
if (!(pt->entry[l2idx] & PFL2_PRESENT)) {
|
||||
__kprintf("0x%lX l2idx not present! \n", (unsigned long)virt);
|
||||
__kprintf("l2 entry: 0x%lX\n", pt->entry[l2idx]);
|
||||
return -EFAULT;
|
||||
}
|
||||
__kprintf("l2 entry: 0x%lX\n", pt->entry[l2idx]);
|
||||
if ((pt->entry[l2idx] & PFL2_SIZE)) {
|
||||
__kprintf("l2 entry is 2M page\n");
|
||||
return 0;
|
||||
}
|
||||
pt = phys_to_virt(pt->entry[l2idx] & PAGE_MASK);
|
||||
@ -646,7 +668,7 @@ int ihk_mc_pt_prepare_map(page_table_t p, void *virt, unsigned long size,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct page_table *ihk_mc_pt_create(enum ihk_mc_ap_flag ap_flag)
|
||||
struct page_table *ihk_mc_pt_create(ihk_mc_ap_flag ap_flag)
|
||||
{
|
||||
struct page_table *pt = ihk_mc_alloc_pages(1, ap_flag);
|
||||
|
||||
@ -1079,7 +1101,8 @@ static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
|
||||
page = phys_to_page(phys);
|
||||
}
|
||||
|
||||
if (page && page_is_in_memobj(page) && (old & PFL1_DIRTY)) {
|
||||
if (page && page_is_in_memobj(page) && (old & PFL1_DIRTY) &&
|
||||
!(args->memobj->flags & MF_ZEROFILL)) {
|
||||
memobj_flush_page(args->memobj, phys, PTL1_SIZE);
|
||||
}
|
||||
|
||||
@ -1253,6 +1276,9 @@ static int clear_range(struct page_table *pt, struct process_vm *vm,
|
||||
}
|
||||
|
||||
args.free_physical = free_physical;
|
||||
if (memobj && (memobj->flags & MF_DEV_FILE)) {
|
||||
args.free_physical = 0;
|
||||
}
|
||||
args.memobj = memobj;
|
||||
args.vm = vm;
|
||||
|
||||
@ -1761,9 +1787,19 @@ int ihk_mc_pt_set_pte(page_table_t pt, pte_t *ptep, size_t pgsize,
|
||||
*ptep = phys | attr_to_l1attr(attr);
|
||||
}
|
||||
else if (pgsize == PTL2_SIZE) {
|
||||
if (phys & (PTL2_SIZE - 1)) {
|
||||
kprintf("%s: error: phys needs to be PTL2_SIZE aligned\n", __FUNCTION__);
|
||||
error = -1;
|
||||
goto out;
|
||||
}
|
||||
*ptep = phys | attr_to_l2attr(attr | PTATTR_LARGEPAGE);
|
||||
}
|
||||
else if ((pgsize == PTL3_SIZE) && (use_1gb_page)) {
|
||||
if (phys & (PTL3_SIZE - 1)) {
|
||||
kprintf("%s: error: phys needs to be PTL3_SIZE aligned\n", __FUNCTION__);
|
||||
error = -1;
|
||||
goto out;
|
||||
}
|
||||
*ptep = phys | attr_to_l3attr(attr | PTATTR_LARGEPAGE);
|
||||
}
|
||||
else {
|
||||
@ -2201,30 +2237,28 @@ int strcpy_from_user(char *dst, const char *src)
|
||||
return err;
|
||||
}
|
||||
|
||||
long getlong_user(const long *p)
|
||||
long getlong_user(long *dest, const long *p)
|
||||
{
|
||||
int error;
|
||||
long l;
|
||||
|
||||
error = copy_from_user(&l, p, sizeof(l));
|
||||
error = copy_from_user(dest, p, sizeof(long));
|
||||
if (error) {
|
||||
return error;
|
||||
}
|
||||
|
||||
return l;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int getint_user(const int *p)
|
||||
int getint_user(int *dest, const int *p)
|
||||
{
|
||||
int error;
|
||||
int i;
|
||||
|
||||
error = copy_from_user(&i, p, sizeof(i));
|
||||
error = copy_from_user(dest, p, sizeof(int));
|
||||
if (error) {
|
||||
return error;
|
||||
}
|
||||
|
||||
return i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t siz)
|
||||
@ -2355,8 +2389,18 @@ int write_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
|
||||
return error;
|
||||
}
|
||||
|
||||
va = phys_to_virt(pa);
|
||||
memcpy(va, from, cpsize);
|
||||
if (pa < ihk_mc_get_memory_address(IHK_MC_GMA_MAP_START, 0) ||
|
||||
pa >= ihk_mc_get_memory_address(IHK_MC_GMA_MAP_END, 0)) {
|
||||
dkprintf("%s: pa is outside of LWK memory, from: %p,"
|
||||
"pa: %p, cpsize: %d\n", __FUNCTION__, from, pa, cpsize);
|
||||
va = ihk_mc_map_virtual(pa, 1, PTATTR_ACTIVE);
|
||||
memcpy(va, from, cpsize);
|
||||
ihk_mc_unmap_virtual(va, 1, 1);
|
||||
}
|
||||
else {
|
||||
va = phys_to_virt(pa);
|
||||
memcpy(va, from, cpsize);
|
||||
}
|
||||
|
||||
from += cpsize;
|
||||
to += cpsize;
|
||||
@ -2380,7 +2424,7 @@ int patch_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
|
||||
unsigned long pa;
|
||||
void *va;
|
||||
|
||||
kprintf("patch_process_vm(%p,%p,%p,%lx)\n", vm, udst, ksrc, siz);
|
||||
dkprintf("patch_process_vm(%p,%p,%p,%lx)\n", vm, udst, ksrc, siz);
|
||||
if ((ustart < vm->region.user_start)
|
||||
|| (vm->region.user_end <= ustart)
|
||||
|| ((vm->region.user_end - ustart) < siz)) {
|
||||
@ -2430,6 +2474,6 @@ int patch_process_vm(struct process_vm *vm, void *udst, const void *ksrc, size_t
|
||||
remain -= cpsize;
|
||||
}
|
||||
|
||||
kprintf("patch_process_vm(%p,%p,%p,%lx):%d\n", vm, udst, ksrc, siz, 0);
|
||||
dkprintf("patch_process_vm(%p,%p,%p,%lx):%d\n", vm, udst, ksrc, siz, 0);
|
||||
return 0;
|
||||
} /* patch_process_vm() */
|
||||
|
||||
@ -30,7 +30,7 @@ int ihk_mc_ikc_init_first_local(struct ihk_ikc_channel_desc *channel,
|
||||
|
||||
memset(channel, 0, sizeof(struct ihk_ikc_channel_desc));
|
||||
|
||||
mikc_queue_pages = ((num_processors * MASTER_IKCQ_PKTSIZE)
|
||||
mikc_queue_pages = ((2 * num_processors * MASTER_IKCQ_PKTSIZE)
|
||||
+ (PAGE_SIZE - 1)) / PAGE_SIZE;
|
||||
|
||||
/* Place both sides in this side */
|
||||
|
||||
@ -70,71 +70,37 @@ static struct vdso vdso;
|
||||
static size_t container_size = 0;
|
||||
static ptrdiff_t vdso_offset;
|
||||
|
||||
/*
|
||||
See dkprintf("BSP HW ID = %d, ", bsp_hw_id); (in ./mcos/kernel/ap.c)
|
||||
extern int num_processors;
|
||||
|
||||
Core with BSP HW ID 224 is 1st logical core of last physical core.
|
||||
It boots first and is given SW-ID of 0
|
||||
int obtain_clone_cpuid(cpu_set_t *cpu_set) {
|
||||
int min_queue_len = -1;
|
||||
int cpu, min_cpu = -1;
|
||||
|
||||
Core with BSP HW ID 0 is 1st logical core of 1st physical core.
|
||||
It boots next and is given SW-ID of 1.
|
||||
Core with BSP HW ID 1 boots next and is given SW-ID of 2.
|
||||
Core with BSP HW ID 2 boots next and is given SW-ID of 3.
|
||||
Core with BSP HW ID 3 boots next and is given SW-ID of 4.
|
||||
...
|
||||
Core with BSP HW ID 220 is 1st logical core of 56-th physical core.
|
||||
It boots next and is given SW-ID of 221.
|
||||
Core with BSP HW ID 221 boots next and is given SW-ID of 222.
|
||||
Core with BSP HW ID 222 boots next and is given SW-ID of 223.
|
||||
Core with BSP HW ID 223 boots next and is given SW-ID of 224.
|
||||
/* Find the first allowed core with the shortest run queue */
|
||||
for (cpu = 0; cpu < num_processors; ++cpu) {
|
||||
struct cpu_local_var *v;
|
||||
unsigned long irqstate;
|
||||
|
||||
Core with BSP HW ID 225 is 2nd logical core of last physical core.
|
||||
It boots next and is given SW-ID of 225.
|
||||
Core with BSP HW ID 226 boots next and is given SW-ID of 226.
|
||||
Core with BSP HW ID 227 boots next and is given SW-ID of 227.
|
||||
*/
|
||||
ihk_spinlock_t cpuid_head_lock = 0;
|
||||
static int cpuid_head = 0;
|
||||
if (!CPU_ISSET(cpu, cpu_set)) continue;
|
||||
|
||||
/* archtecture-depended syscall handlers */
|
||||
int obtain_clone_cpuid() {
|
||||
/* see above on BSP HW ID */
|
||||
struct ihk_mc_cpu_info *cpu_info = ihk_mc_get_cpu_info();
|
||||
int cpuid, nretry = 0;
|
||||
ihk_mc_spinlock_lock_noirq(&cpuid_head_lock);
|
||||
|
||||
/* Always start from 0 to fill in LWK cores linearily */
|
||||
cpuid_head = 0;
|
||||
retry:
|
||||
/* Try to obtain next physical core */
|
||||
cpuid = cpuid_head;
|
||||
v = get_cpu_local_var(cpu);
|
||||
irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
|
||||
if (min_queue_len == -1 || v->runq_len < min_queue_len) {
|
||||
min_queue_len = v->runq_len;
|
||||
min_cpu = cpu;
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
|
||||
|
||||
/* A hyper-threading core on the same physical core as
|
||||
the parent process might be chosen. Use sched_setaffinity
|
||||
if you want to skip that kind of busy physical core for
|
||||
performance reason. */
|
||||
cpuid_head += 1;
|
||||
if(cpuid_head >= cpu_info->ncpus) {
|
||||
cpuid_head = 0;
|
||||
}
|
||||
if (min_queue_len == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* A hyper-threading core whose parent physical core has a
|
||||
process on one of its hyper-threading core might
|
||||
be chosen. Use sched_setaffinity if you want to skip that
|
||||
kind of busy physical core for performance reason. */
|
||||
if(get_cpu_local_var(cpuid)->status != CPU_STATUS_IDLE) {
|
||||
nretry++;
|
||||
if(nretry >= cpu_info->ncpus) {
|
||||
cpuid = -1;
|
||||
ihk_mc_spinlock_unlock_noirq(&cpuid_head_lock);
|
||||
goto out;
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
get_cpu_local_var(cpuid)->status = CPU_STATUS_RESERVED;
|
||||
ihk_mc_spinlock_unlock_noirq(&cpuid_head_lock);
|
||||
out:
|
||||
return cpuid;
|
||||
if (min_cpu != -1) {
|
||||
if (get_cpu_local_var(min_cpu)->status != CPU_STATUS_RESERVED)
|
||||
get_cpu_local_var(min_cpu)->status = CPU_STATUS_RESERVED;
|
||||
}
|
||||
|
||||
return min_cpu;
|
||||
}
|
||||
|
||||
int
|
||||
@ -544,14 +510,14 @@ void ptrace_report_signal(struct thread *thread, int sig)
|
||||
int parent_pid;
|
||||
struct siginfo info;
|
||||
|
||||
dkprintf("ptrace_report_signal,pid=%d\n", thread->proc->pid);
|
||||
dkprintf("ptrace_report_signal, tid=%d, pid=%d\n", thread->tid, thread->proc->pid);
|
||||
|
||||
mcs_rwlock_writer_lock(&proc->update_lock, &lock);
|
||||
if(!(proc->ptrace & PT_TRACED)){
|
||||
mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
|
||||
return;
|
||||
}
|
||||
proc->exit_status = sig;
|
||||
thread->exit_status = sig;
|
||||
/* Transition thread state */
|
||||
proc->status = PS_TRACED;
|
||||
thread->status = PS_TRACED;
|
||||
@ -569,8 +535,8 @@ void ptrace_report_signal(struct thread *thread, int sig)
|
||||
memset(&info, '\0', sizeof info);
|
||||
info.si_signo = SIGCHLD;
|
||||
info.si_code = CLD_TRAPPED;
|
||||
info._sifields._sigchld.si_pid = thread->proc->pid;
|
||||
info._sifields._sigchld.si_status = thread->proc->exit_status;
|
||||
info._sifields._sigchld.si_pid = thread->tid;
|
||||
info._sifields._sigchld.si_status = thread->exit_status;
|
||||
do_kill(cpu_local_var(current), parent_pid, -1, SIGCHLD, &info, 0);
|
||||
/* Wake parent (if sleeping in wait4()) */
|
||||
waitq_wakeup(&proc->parent->waitpid_q);
|
||||
@ -695,10 +661,10 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
int orgsig;
|
||||
int ptraceflag = 0;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
unsigned long irqstate;
|
||||
struct mcs_rwlock_node_irqsave mcs_rw_node;
|
||||
|
||||
for(w = pending->sigmask.__val[0], sig = 0; w; sig++, w >>= 1);
|
||||
dkprintf("do_signal,pid=%d,sig=%d\n", proc->pid, sig);
|
||||
dkprintf("do_signal(): tid=%d, pid=%d, sig=%d\n", thread->tid, proc->pid, sig);
|
||||
orgsig = sig;
|
||||
|
||||
if((proc->ptrace & PT_TRACED) &&
|
||||
@ -718,12 +684,12 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
rc = regs->gpr.rax;
|
||||
}
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&thread->sigcommon->lock);
|
||||
mcs_rwlock_writer_lock(&thread->sigcommon->lock, &mcs_rw_node);
|
||||
k = thread->sigcommon->action + sig - 1;
|
||||
|
||||
if(k->sa.sa_handler == SIG_IGN){
|
||||
kfree(pending);
|
||||
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate);
|
||||
mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
|
||||
return;
|
||||
}
|
||||
else if(k->sa.sa_handler){
|
||||
@ -808,7 +774,7 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
|
||||
if(copy_to_user(sigsp, &ksigsp, sizeof ksigsp)){
|
||||
kfree(pending);
|
||||
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate);
|
||||
mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
|
||||
kprintf("do_signal,write_process_vm failed\n");
|
||||
terminate(0, sig);
|
||||
return;
|
||||
@ -827,7 +793,7 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
if(!(k->sa.sa_flags & SA_NODEFER))
|
||||
thread->sigmask.__val[0] |= pending->sigmask.__val[0];
|
||||
kfree(pending);
|
||||
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate);
|
||||
mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
|
||||
if(regs->gpr.rflags & RFLAGS_TF){
|
||||
struct siginfo info;
|
||||
|
||||
@ -853,7 +819,7 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
}
|
||||
else
|
||||
kfree(pending);
|
||||
ihk_mc_spinlock_unlock(&thread->sigcommon->lock, irqstate);
|
||||
mcs_rwlock_writer_unlock(&thread->sigcommon->lock, &mcs_rw_node);
|
||||
switch (sig) {
|
||||
case SIGSTOP:
|
||||
case SIGTSTP:
|
||||
@ -885,7 +851,8 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
/* Wake up the parent who tried wait4 and sleeping */
|
||||
waitq_wakeup(&proc->parent->waitpid_q);
|
||||
|
||||
dkprintf("do_signal,SIGSTOP,sleeping\n");
|
||||
dkprintf("do_signal(): pid: %d, tid: %d SIGSTOP, sleeping\n",
|
||||
proc->pid, thread->tid);
|
||||
/* Sleep */
|
||||
schedule();
|
||||
dkprintf("SIGSTOP(): woken up\n");
|
||||
@ -899,7 +866,7 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
|
||||
/* Update thread state in fork tree */
|
||||
mcs_rwlock_writer_lock(&proc->update_lock, &lock);
|
||||
proc->exit_status = SIGTRAP;
|
||||
thread->exit_status = SIGTRAP;
|
||||
proc->status = PS_TRACED;
|
||||
thread->status = PS_TRACED;
|
||||
mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
|
||||
@ -953,11 +920,11 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
|
||||
static struct sig_pending *
|
||||
getsigpending(struct thread *thread, int delflag){
|
||||
struct list_head *head;
|
||||
ihk_spinlock_t *lock;
|
||||
mcs_rwlock_lock_t *lock;
|
||||
struct mcs_rwlock_node_irqsave mcs_rw_node;
|
||||
struct sig_pending *next;
|
||||
struct sig_pending *pending;
|
||||
__sigset_t w;
|
||||
int irqstate;
|
||||
__sigset_t x;
|
||||
int sig;
|
||||
struct k_sigaction *k;
|
||||
@ -966,8 +933,12 @@ getsigpending(struct thread *thread, int delflag){
|
||||
|
||||
lock = &thread->sigcommon->lock;
|
||||
head = &thread->sigcommon->sigpending;
|
||||
for(;;){
|
||||
irqstate = ihk_mc_spinlock_lock(lock);
|
||||
for(;;) {
|
||||
if (delflag)
|
||||
mcs_rwlock_writer_lock(lock, &mcs_rw_node);
|
||||
else
|
||||
mcs_rwlock_reader_lock(lock, &mcs_rw_node);
|
||||
|
||||
list_for_each_entry_safe(pending, next, head, list){
|
||||
for(x = pending->sigmask.__val[0], sig = 0; x; sig++, x >>= 1);
|
||||
k = thread->sigcommon->action + sig - 1;
|
||||
@ -976,17 +947,26 @@ getsigpending(struct thread *thread, int delflag){
|
||||
(k->sa.sa_handler != (void *)1 &&
|
||||
k->sa.sa_handler != NULL)){
|
||||
if(!(pending->sigmask.__val[0] & w)){
|
||||
if(delflag)
|
||||
if(delflag)
|
||||
list_del(&pending->list);
|
||||
ihk_mc_spinlock_unlock(lock, irqstate);
|
||||
|
||||
if (delflag)
|
||||
mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
|
||||
else
|
||||
mcs_rwlock_reader_unlock(lock, &mcs_rw_node);
|
||||
return pending;
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(lock, irqstate);
|
||||
|
||||
if (delflag)
|
||||
mcs_rwlock_writer_unlock(lock, &mcs_rw_node);
|
||||
else
|
||||
mcs_rwlock_reader_unlock(lock, &mcs_rw_node);
|
||||
|
||||
if(lock == &thread->sigpendinglock)
|
||||
return NULL;
|
||||
|
||||
lock = &thread->sigpendinglock;
|
||||
head = &thread->sigpending;
|
||||
}
|
||||
@ -1034,22 +1014,25 @@ check_signal(unsigned long rc, void *regs0, int num)
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&(cpu_local_var(runq_lock)), irqstate);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if(regs != NULL && !interrupt_from_user(regs)) {
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for(;;){
|
||||
pending = getsigpending(thread, 1);
|
||||
if(!pending) {
|
||||
dkprintf("check_signal,queue is empty\n");
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do_signal(rc, regs, thread, pending, num);
|
||||
}
|
||||
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
@ -1063,7 +1046,8 @@ do_kill(struct thread *thread, int pid, int tid, int sig, siginfo_t *info,
|
||||
struct thread *tthread = NULL;
|
||||
int i;
|
||||
__sigset_t mask;
|
||||
ihk_spinlock_t *savelock = NULL;
|
||||
mcs_rwlock_lock_t *savelock = NULL;
|
||||
struct mcs_rwlock_node mcs_rw_node;
|
||||
struct list_head *head = NULL;
|
||||
int rc;
|
||||
unsigned long irqstate = 0;
|
||||
@ -1247,7 +1231,7 @@ done:
|
||||
|
||||
doint = 0;
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(savelock);
|
||||
mcs_rwlock_writer_lock_noirq(savelock, &mcs_rw_node);
|
||||
|
||||
/* Put signal event even when handler is SIG_IGN or SIG_DFL
|
||||
because target ptraced thread must call ptrace_report_signal
|
||||
@ -1286,7 +1270,7 @@ done:
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock_noirq(savelock);
|
||||
mcs_rwlock_writer_unlock_noirq(savelock, &mcs_rw_node);
|
||||
cpu_restore_interrupt(irqstate);
|
||||
|
||||
if (doint && !(mask & tthread->sigmask.__val[0])) {
|
||||
@ -1757,7 +1741,8 @@ int arch_map_vdso(struct process_vm *vm)
|
||||
vrflags = VR_REMOTE;
|
||||
vrflags |= VR_PROT_READ | VR_PROT_EXEC;
|
||||
vrflags |= VRFLAG_PROT_TO_MAXPROT(vrflags);
|
||||
error = add_process_memory_range(vm, (intptr_t)s, (intptr_t)e, NOPHYS, vrflags, NULL, 0, PAGE_SHIFT);
|
||||
error = add_process_memory_range(vm, (intptr_t)s, (intptr_t)e,
|
||||
NOPHYS, vrflags, NULL, 0, PAGE_SHIFT, NULL);
|
||||
if (error) {
|
||||
ekprintf("ERROR: adding memory range for vdso. %d\n", error);
|
||||
goto out;
|
||||
@ -1788,7 +1773,8 @@ int arch_map_vdso(struct process_vm *vm)
|
||||
vrflags = VR_REMOTE;
|
||||
vrflags |= VR_PROT_READ;
|
||||
vrflags |= VRFLAG_PROT_TO_MAXPROT(vrflags);
|
||||
error = add_process_memory_range(vm, (intptr_t)s, (intptr_t)e, NOPHYS, vrflags, NULL, 0, PAGE_SHIFT);
|
||||
error = add_process_memory_range(vm, (intptr_t)s, (intptr_t)e,
|
||||
NOPHYS, vrflags, NULL, 0, PAGE_SHIFT, NULL);
|
||||
if (error) {
|
||||
ekprintf("ERROR: adding memory range for vvar. %d\n", error);
|
||||
goto out;
|
||||
|
||||
67
arch/x86/tools/eclair-dump-backtrace.exp.in
Executable file
67
arch/x86/tools/eclair-dump-backtrace.exp.in
Executable file
@ -0,0 +1,67 @@
|
||||
#!/usr/bin/expect
|
||||
|
||||
set INST_DIR "@prefix@"
|
||||
|
||||
spawn $INST_DIR/bin/eclair -d /tmp/mckernel.dump -k $INST_DIR/smp-x86/kernel/mckernel.img -i
|
||||
|
||||
set state "init"
|
||||
set thread_id 0
|
||||
|
||||
expect {
|
||||
"in ?? ()" {
|
||||
switch -- $state {
|
||||
"thread_chosen" {
|
||||
set state "thread_skip"
|
||||
}
|
||||
"thread_bt" {
|
||||
set state "thread_skip"
|
||||
}
|
||||
}
|
||||
|
||||
exp_continue
|
||||
}
|
||||
"(eclair) " {
|
||||
switch -- $state {
|
||||
"init" {
|
||||
set state "threads_list"
|
||||
send "info threads\r"
|
||||
}
|
||||
"threads_list" {
|
||||
incr thread_id
|
||||
set state "thread_chosen"
|
||||
send "thread $thread_id\r"
|
||||
}
|
||||
"thread_skip" {
|
||||
incr thread_id
|
||||
set state "thread_chosen"
|
||||
send "thread $thread_id\r"
|
||||
}
|
||||
"thread_chosen" {
|
||||
set state "thread_bt"
|
||||
send "bt\r"
|
||||
}
|
||||
}
|
||||
|
||||
exp_continue
|
||||
}
|
||||
"Type <return> to continue, or q <return> to quit" {
|
||||
switch -- $state {
|
||||
"threads_list" {
|
||||
send "\r"
|
||||
}
|
||||
"thread_bt" {
|
||||
send "\r"
|
||||
}
|
||||
"thread_skip" {
|
||||
send "q\r"
|
||||
}
|
||||
}
|
||||
exp_continue
|
||||
}
|
||||
" not known." {
|
||||
expect "(eclair) " { send "quit\r" }
|
||||
expect "Quit anyway? (y or n) " { send "y\r" }
|
||||
exit 0
|
||||
}
|
||||
}
|
||||
|
||||
@ -3,7 +3,7 @@ Description=irqbalance daemon
|
||||
After=syslog.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=@ETCDIR@/irqbalance_mck
|
||||
EnvironmentFile=/tmp/irqbalance_mck
|
||||
ExecStart=/usr/sbin/irqbalance --foreground $IRQBALANCE_ARGS
|
||||
|
||||
[Install]
|
||||
|
||||
@ -39,7 +39,9 @@ else
|
||||
irqbalance_used="no"
|
||||
fi
|
||||
|
||||
while getopts :i:k:c:m:o:f: OPT
|
||||
turbo=""
|
||||
|
||||
while getopts :ti:k:c:m:o:f: OPT
|
||||
do
|
||||
case ${OPT} in
|
||||
f) facility=${OPTARG}
|
||||
@ -76,6 +78,8 @@ do
|
||||
;;
|
||||
m) mem=${OPTARG}
|
||||
;;
|
||||
t) turbo="turbo"
|
||||
;;
|
||||
*) echo "invalid option -${OPT}" >&2
|
||||
exit 1
|
||||
esac
|
||||
@ -100,7 +104,7 @@ error_exit() {
|
||||
;&
|
||||
mcoverlayfs_loaded)
|
||||
if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
rmmod mcoverlay
|
||||
rmmod mcoverlay 2>/dev/null
|
||||
fi
|
||||
;&
|
||||
linux_proc_bind_mounted)
|
||||
@ -130,15 +134,7 @@ error_exit() {
|
||||
fi
|
||||
;&
|
||||
mcctrl_loaded)
|
||||
rmmod mcctrl || echo "warning: failed to remove mcctrl" >&2
|
||||
;&
|
||||
mem_reserved)
|
||||
mem=`${SBINDIR}/ihkconfig 0 query mem`
|
||||
if [ "${mem}" != "" ]; then
|
||||
if ! ${SBINDIR}/ihkconfig 0 release mem $mem > /dev/null; then
|
||||
echo "warning: failed to release memory" >&2
|
||||
fi
|
||||
fi
|
||||
rmmod mcctrl 2>/dev/null || echo "warning: failed to remove mcctrl" >&2
|
||||
;&
|
||||
cpus_reserved)
|
||||
cpus=`${SBINDIR}/ihkconfig 0 query cpu`
|
||||
@ -148,11 +144,19 @@ error_exit() {
|
||||
fi
|
||||
fi
|
||||
;&
|
||||
mem_reserved)
|
||||
mem=`${SBINDIR}/ihkconfig 0 query mem`
|
||||
if [ "${mem}" != "" ]; then
|
||||
if ! ${SBINDIR}/ihkconfig 0 release mem $mem > /dev/null; then
|
||||
echo "warning: failed to release memory" >&2
|
||||
fi
|
||||
fi
|
||||
;&
|
||||
ihk_smp_loaded)
|
||||
rmmod ihk_smp_x86 || echo "warning: failed to remove ihk_smp_x86" >&2
|
||||
rmmod ihk_smp_x86 2>/dev/null || echo "warning: failed to remove ihk_smp_x86" >&2
|
||||
;&
|
||||
ihk_loaded)
|
||||
rmmod ihk || echo "warning: failed to remove ihk" >&2
|
||||
rmmod ihk 2>/dev/null || echo "warning: failed to remove ihk" >&2
|
||||
;&
|
||||
irqbalance_stopped)
|
||||
if [ "`systemctl status irqbalance_mck.service 2> /dev/null |grep -E 'Active: active'`" != "" ]; then
|
||||
@ -170,6 +174,11 @@ error_exit() {
|
||||
fi
|
||||
fi
|
||||
;&
|
||||
aslr_disabled)
|
||||
if [ -f /tmp/mckernel_randomize_va_space ]; then
|
||||
cat /tmp/mckernel_randomize_va_space > /proc/sys/kernel/randomize_va_space
|
||||
fi
|
||||
;&
|
||||
initial)
|
||||
# Nothing more to revert
|
||||
;;
|
||||
@ -197,6 +206,9 @@ if [ "${ENABLE_MCOVERLAYFS}" == "yes" ]; then
|
||||
if [ ${linux_version_code} -ge 262144 -a ${linux_version_code} -lt 262400 ]; then
|
||||
enable_mcoverlay="yes"
|
||||
fi
|
||||
if [ ${linux_version_code} -ge 263680 -a ${linux_version_code} -lt 263936 ]; then
|
||||
enable_mcoverlay="yes"
|
||||
fi
|
||||
else
|
||||
if [ ${linux_version_code} -eq 199168 -a ${rhel_release} -ge 327 ]; then
|
||||
enable_mcoverlay="yes"
|
||||
@ -218,17 +230,23 @@ if [ "$cpus" == "" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# Disable address space layout randomization
|
||||
if [ -f /proc/sys/kernel/randomize_va_space ] && [ "`cat /proc/sys/kernel/randomize_va_space`" != "0" ]; then
|
||||
cat /proc/sys/kernel/randomize_va_space > /tmp/mckernel_randomize_va_space
|
||||
echo "0" > /proc/sys/kernel/randomize_va_space
|
||||
fi
|
||||
|
||||
# Remove mcoverlay if loaded
|
||||
if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
if [ "`lsmod | grep mcoverlay`" != "" ]; then
|
||||
if grep mcoverlay /proc/modules &>/dev/null; then
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos/mcos0_sys`" != "" ]; then umount -l /tmp/mcos/mcos0_sys; fi
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos/mcos0_proc`" != "" ]; then umount -l /tmp/mcos/mcos0_proc; fi
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos/linux_proc`" != "" ]; then umount -l /tmp/mcos/linux_proc; fi
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos`" != "" ]; then umount -l /tmp/mcos; fi
|
||||
if [ -e /tmp/mcos ]; then rm -rf /tmp/mcos; fi
|
||||
if ! rmmod mcoverlay; then
|
||||
if ! rmmod mcoverlay 2>/dev/null; then
|
||||
echo "error: removing mcoverlay" >&2
|
||||
error_exit "initial"
|
||||
error_exit "aslr_disabled"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -238,7 +256,7 @@ if [ "${irqbalance_used}" == "yes" ]; then
|
||||
systemctl stop irqbalance_mck.service 2>/dev/null
|
||||
if ! systemctl stop irqbalance.service 2>/dev/null ; then
|
||||
echo "error: stopping irqbalance" >&2
|
||||
error_exit "initial"
|
||||
error_exit "aslr_disabled"
|
||||
fi;
|
||||
fi
|
||||
|
||||
@ -253,21 +271,26 @@ if [ ${LOGMODE} -ne 0 ]; then
|
||||
fi
|
||||
|
||||
# Load IHK if not loaded
|
||||
if [ "`lsmod | grep ihk`" == "" ]; then
|
||||
if ! insmod ${KMODDIR}/ihk.ko; then
|
||||
if ! grep -E 'ihk\s' /proc/modules &>/dev/null; then
|
||||
if ! insmod ${KMODDIR}/ihk.ko 2>/dev/null; then
|
||||
echo "error: loading ihk" >&2
|
||||
error_exit "irqbalance_stopped"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Increase swappiness so that we have better chance to allocate memory for IHK
|
||||
echo 100 > /proc/sys/vm/swappiness
|
||||
|
||||
# Drop Linux caches to free memory
|
||||
sync && echo 3 > /proc/sys/vm/drop_caches
|
||||
|
||||
# Merge free memory areas into large, physically contigous ones
|
||||
echo 1 > /proc/sys/vm/compact_memory 2>/dev/null
|
||||
|
||||
sync
|
||||
|
||||
# Load IHK-SMP if not loaded and reserve CPUs and memory
|
||||
if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then
|
||||
if ! grep ihk_smp_x86 /proc/modules &>/dev/null; then
|
||||
ihk_irq=""
|
||||
for i in `seq 64 255`; do
|
||||
if [ ! -d /proc/irq/$i ] && [ "`cat /proc/interrupts | grep ":" | awk '{print $1}' | grep -o '[0-9]*' | grep -e '^$i$'`" == "" ]; then
|
||||
@ -279,25 +302,38 @@ if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then
|
||||
echo "error: no IRQ available" >&2
|
||||
error_exit "ihk_loaded"
|
||||
fi
|
||||
if ! insmod ${KMODDIR}/ihk-smp-x86.ko ihk_start_irq=$ihk_irq ihk_ikc_irq_core=$ihk_ikc_irq_core; then
|
||||
if ! insmod ${KMODDIR}/ihk-smp-x86.ko ihk_start_irq=$ihk_irq ihk_ikc_irq_core=$ihk_ikc_irq_core 2>/dev/null; then
|
||||
echo "error: loading ihk-smp-x86" >&2
|
||||
error_exit "ihk_loaded"
|
||||
fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve cpu ${cpus}; then
|
||||
echo "error: reserving CPUs" >&2;
|
||||
error_exit "ihk_smp_loaded"
|
||||
|
||||
# Free MCDRAM (special case for OFP SNC-4 mode)
|
||||
if [ "`hostname | grep "c[0-9][0-9][0-9][0-9].ofp"`" != "" ] && [ "`cat /sys/devices/system/node/online`" == "0-7" ]; then
|
||||
for i in 4 5 6 7; do
|
||||
find /sys/devices/system/node/node$i/memory*/ -name "online" | while read f; do
|
||||
echo 0 > $f 2>&1 > /dev/null;
|
||||
done
|
||||
find /sys/devices/system/node/node$i/memory*/ -name "online" | while read f; do
|
||||
echo 1 > $f 2>&1 > /dev/null;
|
||||
done
|
||||
done
|
||||
fi
|
||||
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve mem ${mem}; then
|
||||
echo "error: reserving memory" >&2
|
||||
error_exit "cpus_reserved"
|
||||
error_exit "ihk_smp_loaded"
|
||||
fi
|
||||
if ! ${SBINDIR}/ihkconfig 0 reserve cpu ${cpus}; then
|
||||
echo "error: reserving CPUs" >&2;
|
||||
error_exit "mem_reserved"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Load mcctrl if not loaded
|
||||
if [ "`lsmod | grep mcctrl`" == "" ]; then
|
||||
if ! insmod ${KMODDIR}/mcctrl.ko; then
|
||||
if ! grep mcctrl /proc/modules &>/dev/null; then
|
||||
if ! insmod ${KMODDIR}/mcctrl.ko 2>/dev/null; then
|
||||
echo "error: inserting mcctrl.ko" >&2
|
||||
error_exit "mem_reserved"
|
||||
error_exit "cpus_reserved"
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -337,7 +373,7 @@ if ! ${SBINDIR}/ihkosctl 0 load ${KERNDIR}/mckernel.img; then
|
||||
fi
|
||||
|
||||
# Set kernel arguments
|
||||
if ! ${SBINDIR}/ihkosctl 0 kargs "hidos ksyslogd=${LOGMODE}"; then
|
||||
if ! ${SBINDIR}/ihkosctl 0 kargs "hidos ksyslogd=${LOGMODE} $turbo"; then
|
||||
echo "error: setting kernel arguments" >&2
|
||||
error_exit "os_created"
|
||||
fi
|
||||
@ -355,27 +391,37 @@ fi
|
||||
|
||||
# Overlay /proc, /sys with McKernel specific contents
|
||||
if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
if [ ! -e /tmp/mcos ]; then mkdir -p /tmp/mcos; fi
|
||||
if [ ! -e /tmp/mcos ]; then
|
||||
mkdir -p /tmp/mcos;
|
||||
fi
|
||||
if ! mount -t tmpfs tmpfs /tmp/mcos; then
|
||||
echo "error: mount /tmp/mcos" >&2
|
||||
error_exit "tmp_mcos_created"
|
||||
fi
|
||||
if [ ! -e /tmp/mcos/linux_proc ]; then mkdir -p /tmp/mcos/linux_proc; fi
|
||||
if [ ! -e /tmp/mcos/linux_proc ]; then
|
||||
mkdir -p /tmp/mcos/linux_proc;
|
||||
fi
|
||||
if ! mount --bind /proc /tmp/mcos/linux_proc; then
|
||||
echo "error: mount /tmp/mcos/linux_proc" >&2
|
||||
error_exit "tmp_mcos_mounted"
|
||||
fi
|
||||
if ! insmod ${KMODDIR}/mcoverlay.ko; then
|
||||
if ! insmod ${KMODDIR}/mcoverlay.ko 2>/dev/null; then
|
||||
echo "error: inserting mcoverlay.ko" >&2
|
||||
error_exit "linux_proc_bind_mounted"
|
||||
fi
|
||||
while [ ! -e /proc/mcos0 ]
|
||||
do
|
||||
sleep 1
|
||||
sleep 0.1
|
||||
done
|
||||
if [ ! -e /tmp/mcos/mcos0_proc ]; then mkdir -p /tmp/mcos/mcos0_proc; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_proc_upper ]; then mkdir -p /tmp/mcos/mcos0_proc_upper; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_proc_work ]; then mkdir -p /tmp/mcos/mcos0_proc_work; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_proc ]; then
|
||||
mkdir -p /tmp/mcos/mcos0_proc;
|
||||
fi
|
||||
if [ ! -e /tmp/mcos/mcos0_proc_upper ]; then
|
||||
mkdir -p /tmp/mcos/mcos0_proc_upper;
|
||||
fi
|
||||
if [ ! -e /tmp/mcos/mcos0_proc_work ]; then
|
||||
mkdir -p /tmp/mcos/mcos0_proc_work;
|
||||
fi
|
||||
if ! mount -t mcoverlay mcoverlay -o lowerdir=/proc/mcos0:/proc,upperdir=/tmp/mcos/mcos0_proc_upper,workdir=/tmp/mcos/mcos0_proc_work,nocopyupw,nofscheck /tmp/mcos/mcos0_proc; then
|
||||
echo "error: mounting /tmp/mcos/mcos0_proc" >&2
|
||||
error_exit "mcoverlayfs_loaded"
|
||||
@ -387,21 +433,32 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
do
|
||||
sleep 0.1
|
||||
done
|
||||
if [ ! -e /tmp/mcos/mcos0_sys ]; then mkdir -p /tmp/mcos/mcos0_sys; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_sys_upper ]; then mkdir -p /tmp/mcos/mcos0_sys_upper; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_sys_work ]; then mkdir -p /tmp/mcos/mcos0_sys_work; fi
|
||||
if [ ! -e /tmp/mcos/mcos0_sys ]; then
|
||||
mkdir -p /tmp/mcos/mcos0_sys;
|
||||
fi
|
||||
if [ ! -e /tmp/mcos/mcos0_sys_upper ]; then
|
||||
mkdir -p /tmp/mcos/mcos0_sys_upper;
|
||||
fi
|
||||
if [ ! -e /tmp/mcos/mcos0_sys_work ]; then
|
||||
mkdir -p /tmp/mcos/mcos0_sys_work;
|
||||
fi
|
||||
if ! mount -t mcoverlay mcoverlay -o lowerdir=/sys/devices/virtual/mcos/mcos0/sys:/sys,upperdir=/tmp/mcos/mcos0_sys_upper,workdir=/tmp/mcos/mcos0_sys_work,nocopyupw,nofscheck /tmp/mcos/mcos0_sys; then
|
||||
echo "error: mount /tmp/mcos/mcos0_sys" >&2
|
||||
error_exit "mcos_proc_mounted"
|
||||
fi
|
||||
# TODO: How de we revert this in case of failure??
|
||||
mount --make-rprivate /sys
|
||||
|
||||
touch /tmp/mcos/mcos0_proc/mckernel
|
||||
|
||||
rm -rf /tmp/mcos/mcos0_sys/setup_complete
|
||||
|
||||
# Hide NUMA related files which are outside the LWK partition
|
||||
for cpuid in `find /sys/devices/system/cpu/* -maxdepth 0 -name "cpu[0123456789]*" -printf "%f "`; do
|
||||
if [ ! -e "/sys/devices/virtual/mcos/mcos0/sys/devices/system/cpu/$cpuid" ]; then
|
||||
rm -rf /tmp/mcos/mcos0_sys/devices/system/cpu/$cpuid
|
||||
rm -rf /tmp/mcos/mcos0_sys/bus/cpu/devices/$cpuid
|
||||
rm -rf /tmp/mcos/mcos0_sys/bus/cpu/drivers/processor/$cpuid
|
||||
else
|
||||
for nodeid in `find /sys/devices/system/cpu/$cpuid/* -maxdepth 0 -name "node[0123456789]*" -printf "%f "`; do
|
||||
if [ ! -e "/sys/devices/virtual/mcos/mcos0/sys/devices/system/cpu/$cpuid/$nodeid" ]; then
|
||||
@ -412,7 +469,8 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
done
|
||||
for nodeid in `find /sys/devices/system/node/* -maxdepth 0 -name "node[0123456789]*" -printf "%f "`; do
|
||||
if [ ! -e "/sys/devices/virtual/mcos/mcos0/sys/devices/system/node/$nodeid" ]; then
|
||||
rm -rf /tmp/mcos/mcos0_sys/devices/system/node/$nodeid
|
||||
rm -rf /tmp/mcos/mcos0_sys/devices/system/node/$nodeid/*
|
||||
rm -rf /tmp/mcos/mcos0_sys/bus/node/devices/$nodeid
|
||||
else
|
||||
# Delete non-existent symlinks
|
||||
for cpuid in `find /sys/devices/system/node/$nodeid/* -maxdepth 0 -name "cpu[0123456789]*" -printf "%f "`; do
|
||||
@ -424,6 +482,7 @@ if [ "$enable_mcoverlay" == "yes" ]; then
|
||||
rm -f /tmp/mcos/mcos0_sys/devices/system/node/$nodeid/memory*
|
||||
fi
|
||||
done
|
||||
rm -f /tmp/mcos/mcos0_sys/devices/system/node/has_*
|
||||
for cpuid in `find /sys/bus/cpu/devices/* -maxdepth 0 -name "cpu[0123456789]*" -printf "%f "`; do
|
||||
if [ ! -e "/sys/devices/virtual/mcos/mcos0/sys/bus/cpu/devices/$cpuid" ]; then
|
||||
rm -rf /tmp/mcos/mcos0_sys/bus/cpu/devices/$cpuid
|
||||
@ -448,8 +507,9 @@ if [ "${irqbalance_used}" == "yes" ]; then
|
||||
|
||||
banirq=`cat /proc/interrupts| perl -e 'while(<>) { if(/^\s*(\d+).*IHK\-SMP\s*$/) {print $1;}}'`
|
||||
|
||||
sed "s/%mask%/$smp_affinity_mask/g" $ETCDIR/irqbalance_mck.in | sed "s/%banirq%/$banirq/g" > $ETCDIR/irqbalance_mck
|
||||
if ! systemctl link $ETCDIR/irqbalance_mck.service >/dev/null 2>/dev/null; then
|
||||
sed "s/%mask%/$smp_affinity_mask/g" $ETCDIR/irqbalance_mck.in | sed "s/%banirq%/$banirq/g" > /tmp/irqbalance_mck
|
||||
systemctl disable irqbalance_mck.service >/dev/null 2>/dev/null
|
||||
if ! systemctl link $ETCDIR/irqbalance_mck.service >/dev/null 2>/dev/null; then
|
||||
echo "error: linking irqbalance_mck" >&2
|
||||
error_exit "mcos_sys_mounted"
|
||||
fi
|
||||
|
||||
@ -18,7 +18,7 @@ mem=""
|
||||
cpus=""
|
||||
|
||||
# No SMP module? Exit.
|
||||
if [ "`lsmod | grep ihk_smp_x86`" == "" ]; then exit 0; fi
|
||||
if ! grep ihk_smp_x86 /proc/modules &>/dev/null; then exit 0; fi
|
||||
|
||||
# Destroy all LWK instances
|
||||
if ls /dev/mcos* 1>/dev/null 2>&1; then
|
||||
@ -59,36 +59,36 @@ if [ "${mem}" != "" ]; then
|
||||
fi
|
||||
|
||||
# Remove delegator if loaded
|
||||
if [ "`lsmod | grep mcctrl`" != "" ]; then
|
||||
if ! rmmod mcctrl; then
|
||||
if grep mcctrl /proc/modules &>/dev/null; then
|
||||
if ! rmmod mcctrl 2>/dev/null; then
|
||||
echo "error: removing mcctrl" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove mcoverlay if loaded
|
||||
if [ "`lsmod | grep mcoverlay`" != "" ]; then
|
||||
if grep mcoverlay /proc/modules &>/dev/null; then
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos/mcos0_sys`" != "" ]; then umount -l /tmp/mcos/mcos0_sys; fi
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos/mcos0_proc`" != "" ]; then umount -l /tmp/mcos/mcos0_proc; fi
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos/linux_proc`" != "" ]; then umount -l /tmp/mcos/linux_proc; fi
|
||||
if [ "`cat /proc/mounts | grep /tmp/mcos`" != "" ]; then umount -l /tmp/mcos; fi
|
||||
if [ -e /tmp/mcos ]; then rm -rf /tmp/mcos; fi
|
||||
if ! rmmod mcoverlay; then
|
||||
if ! rmmod mcoverlay 2>/dev/null; then
|
||||
echo "warning: failed to remove mcoverlay" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove SMP module
|
||||
if [ "`lsmod | grep ihk_smp_x86`" != "" ]; then
|
||||
if ! rmmod ihk_smp_x86; then
|
||||
if grep ihk_smp_x86 /proc/modules &>/dev/null; then
|
||||
if ! rmmod ihk_smp_x86 2>/dev/null; then
|
||||
echo "error: removing ihk_smp_x86" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove core module
|
||||
if [ "`lsmod | grep -E 'ihk\s' | awk '{print $1}'`" != "" ]; then
|
||||
if ! rmmod ihk; then
|
||||
if grep -E 'ihk\s' /proc/modules &>/dev/null; then
|
||||
if ! rmmod ihk 2>/dev/null; then
|
||||
echo "error: removing ihk" >&2
|
||||
exit 1
|
||||
fi
|
||||
@ -113,3 +113,10 @@ if [ "`systemctl status irqbalance_mck.service 2> /dev/null |grep -E 'Active: ac
|
||||
fi
|
||||
fi
|
||||
|
||||
# Re-enable ASLR
|
||||
if [ -f /tmp/mckernel_randomize_va_space ]; then
|
||||
cat /tmp/mckernel_randomize_va_space > /proc/sys/kernel/randomize_va_space
|
||||
fi
|
||||
|
||||
# Set back default swappiness
|
||||
echo 60 > /proc/sys/vm/swappiness
|
||||
|
||||
31
configure
vendored
31
configure
vendored
@ -2922,6 +2922,7 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $
|
||||
ac_compiler_gnu=$ac_cv_c_compiler_gnu
|
||||
|
||||
XCC=$CC
|
||||
CFLAGS="$CFLAGS -ffreestanding -fno-tree-loop-distribute-patterns"
|
||||
;;
|
||||
builtin-mic)
|
||||
ARCH=k1om
|
||||
@ -3117,6 +3118,31 @@ _ACEOF
|
||||
fi
|
||||
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking System.map for symbol sys_umount" >&5
|
||||
$as_echo_n "checking System.map for symbol sys_umount... " >&6; }
|
||||
mcctrl_addr=`eval $MCCTRL_LINUX_SYMTAB_CMD | grep " sys_umount\$" | cut -d\ -f1`
|
||||
if test -z $mcctrl_addr; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5
|
||||
$as_echo "not found" >&6; }
|
||||
else
|
||||
mcctrl_result=$mcctrl_addr
|
||||
mcctrl_addr="0x$mcctrl_addr"
|
||||
|
||||
if `eval $MCCTRL_LINUX_SYMTAB_CMD | grep " __ksymtab_sys_umount\$" >/dev/null`; then
|
||||
mcctrl_result="exported"
|
||||
mcctrl_addr="0"
|
||||
fi
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $mcctrl_result" >&5
|
||||
$as_echo "$mcctrl_result" >&6; }
|
||||
|
||||
cat >>confdefs.h <<_ACEOF
|
||||
#define MCCTRL_KSYM_sys_umount $mcctrl_addr
|
||||
_ACEOF
|
||||
|
||||
fi
|
||||
|
||||
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking System.map for symbol sys_unshare" >&5
|
||||
$as_echo_n "checking System.map for symbol sys_unshare... " >&6; }
|
||||
mcctrl_addr=`eval $MCCTRL_LINUX_SYMTAB_CMD | grep " sys_unshare\$" | cut -d\ -f1`
|
||||
@ -3887,11 +3913,12 @@ fi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
ac_config_headers="$ac_config_headers executer/config.h"
|
||||
|
||||
ac_config_files="$ac_config_files Makefile executer/user/Makefile executer/kernel/mcctrl/Makefile executer/kernel/mcctrl/arch/x86_64/Makefile executer/kernel/mcoverlayfs/Makefile executer/kernel/mcoverlayfs/linux-3.10.0-327.36.1.el7/Makefile executer/kernel/mcoverlayfs/linux-4.0.9/Makefile kernel/Makefile kernel/Makefile.build arch/x86/tools/mcreboot-attached-mic.sh arch/x86/tools/mcshutdown-attached-mic.sh arch/x86/tools/mcreboot-builtin-x86.sh arch/x86/tools/mcreboot-smp-x86.sh arch/x86/tools/mcstop+release-smp-x86.sh arch/x86/tools/mcshutdown-builtin-x86.sh arch/x86/tools/mcreboot.1:arch/x86/tools/mcreboot.1in arch/x86/tools/irqbalance_mck.service arch/x86/tools/irqbalance_mck.in"
|
||||
ac_config_files="$ac_config_files Makefile executer/user/Makefile executer/kernel/mcctrl/Makefile executer/kernel/mcctrl/arch/x86_64/Makefile executer/kernel/mcoverlayfs/Makefile executer/kernel/mcoverlayfs/linux-3.10.0-327.36.1.el7/Makefile executer/kernel/mcoverlayfs/linux-4.0.9/Makefile executer/kernel/mcoverlayfs/linux-4.6.7/Makefile kernel/Makefile kernel/Makefile.build arch/x86/tools/mcreboot-attached-mic.sh arch/x86/tools/mcshutdown-attached-mic.sh arch/x86/tools/mcreboot-builtin-x86.sh arch/x86/tools/mcreboot-smp-x86.sh arch/x86/tools/mcstop+release-smp-x86.sh arch/x86/tools/eclair-dump-backtrace.exp arch/x86/tools/mcshutdown-builtin-x86.sh arch/x86/tools/mcreboot.1:arch/x86/tools/mcreboot.1in arch/x86/tools/irqbalance_mck.service arch/x86/tools/irqbalance_mck.in"
|
||||
|
||||
|
||||
if test "x$enable_dcfa" = xyes; then :
|
||||
@ -4597,6 +4624,7 @@ do
|
||||
"executer/kernel/mcoverlayfs/Makefile") CONFIG_FILES="$CONFIG_FILES executer/kernel/mcoverlayfs/Makefile" ;;
|
||||
"executer/kernel/mcoverlayfs/linux-3.10.0-327.36.1.el7/Makefile") CONFIG_FILES="$CONFIG_FILES executer/kernel/mcoverlayfs/linux-3.10.0-327.36.1.el7/Makefile" ;;
|
||||
"executer/kernel/mcoverlayfs/linux-4.0.9/Makefile") CONFIG_FILES="$CONFIG_FILES executer/kernel/mcoverlayfs/linux-4.0.9/Makefile" ;;
|
||||
"executer/kernel/mcoverlayfs/linux-4.6.7/Makefile") CONFIG_FILES="$CONFIG_FILES executer/kernel/mcoverlayfs/linux-4.6.7/Makefile" ;;
|
||||
"kernel/Makefile") CONFIG_FILES="$CONFIG_FILES kernel/Makefile" ;;
|
||||
"kernel/Makefile.build") CONFIG_FILES="$CONFIG_FILES kernel/Makefile.build" ;;
|
||||
"arch/x86/tools/mcreboot-attached-mic.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcreboot-attached-mic.sh" ;;
|
||||
@ -4604,6 +4632,7 @@ do
|
||||
"arch/x86/tools/mcreboot-builtin-x86.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcreboot-builtin-x86.sh" ;;
|
||||
"arch/x86/tools/mcreboot-smp-x86.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcreboot-smp-x86.sh" ;;
|
||||
"arch/x86/tools/mcstop+release-smp-x86.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcstop+release-smp-x86.sh" ;;
|
||||
"arch/x86/tools/eclair-dump-backtrace.exp") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/eclair-dump-backtrace.exp" ;;
|
||||
"arch/x86/tools/mcshutdown-builtin-x86.sh") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcshutdown-builtin-x86.sh" ;;
|
||||
"arch/x86/tools/mcreboot.1") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/mcreboot.1:arch/x86/tools/mcreboot.1in" ;;
|
||||
"arch/x86/tools/irqbalance_mck.service") CONFIG_FILES="$CONFIG_FILES arch/x86/tools/irqbalance_mck.service" ;;
|
||||
|
||||
@ -70,6 +70,7 @@ case $WITH_TARGET in
|
||||
ARCH=`uname -m`
|
||||
AC_PROG_CC
|
||||
XCC=$CC
|
||||
CFLAGS="$CFLAGS -ffreestanding -fno-tree-loop-distribute-patterns"
|
||||
;;
|
||||
builtin-mic)
|
||||
ARCH=k1om
|
||||
@ -221,6 +222,7 @@ AC_DEFUN([MCCTRL_FIND_KSYM],[
|
||||
])
|
||||
|
||||
MCCTRL_FIND_KSYM([sys_mount])
|
||||
MCCTRL_FIND_KSYM([sys_umount])
|
||||
MCCTRL_FIND_KSYM([sys_unshare])
|
||||
MCCTRL_FIND_KSYM([zap_page_range])
|
||||
MCCTRL_FIND_KSYM([vdso_image_64])
|
||||
@ -285,6 +287,7 @@ AC_SUBST(ETCDIR)
|
||||
AC_SUBST(KMODDIR)
|
||||
AC_SUBST(KERNDIR)
|
||||
AC_SUBST(MANDIR)
|
||||
AC_SUBST(CFLAGS)
|
||||
AC_SUBST(ENABLE_MCOVERLAYFS)
|
||||
|
||||
AC_SUBST(IHK_VERSION)
|
||||
@ -304,6 +307,7 @@ AC_CONFIG_FILES([
|
||||
executer/kernel/mcoverlayfs/Makefile
|
||||
executer/kernel/mcoverlayfs/linux-3.10.0-327.36.1.el7/Makefile
|
||||
executer/kernel/mcoverlayfs/linux-4.0.9/Makefile
|
||||
executer/kernel/mcoverlayfs/linux-4.6.7/Makefile
|
||||
kernel/Makefile
|
||||
kernel/Makefile.build
|
||||
arch/x86/tools/mcreboot-attached-mic.sh
|
||||
@ -311,6 +315,7 @@ AC_CONFIG_FILES([
|
||||
arch/x86/tools/mcreboot-builtin-x86.sh
|
||||
arch/x86/tools/mcreboot-smp-x86.sh
|
||||
arch/x86/tools/mcstop+release-smp-x86.sh
|
||||
arch/x86/tools/eclair-dump-backtrace.exp
|
||||
arch/x86/tools/mcshutdown-builtin-x86.sh
|
||||
arch/x86/tools/mcreboot.1:arch/x86/tools/mcreboot.1in
|
||||
arch/x86/tools/irqbalance_mck.service
|
||||
|
||||
@ -51,6 +51,9 @@
|
||||
/* Define to address of kernel symbol sys_readlink, or 0 if exported */
|
||||
#undef MCCTRL_KSYM_sys_readlink
|
||||
|
||||
/* Define to address of kernel symbol sys_umount, or 0 if exported */
|
||||
#undef MCCTRL_KSYM_sys_umount
|
||||
|
||||
/* Define to address of kernel symbol sys_unshare, or 0 if exported */
|
||||
#undef MCCTRL_KSYM_sys_unshare
|
||||
|
||||
|
||||
@ -41,6 +41,8 @@
|
||||
#define MCEXEC_UP_NEW_PROCESS 0x30a02909
|
||||
#define MCEXEC_UP_GET_CRED 0x30a0290a
|
||||
#define MCEXEC_UP_GET_CREDV 0x30a0290b
|
||||
#define MCEXEC_UP_GET_NODES 0x30a0290c
|
||||
#define MCEXEC_UP_GET_CPUSET 0x30a0290d
|
||||
|
||||
#define MCEXEC_UP_PREPARE_DMA 0x30a02910
|
||||
#define MCEXEC_UP_FREE_DMA 0x30a02911
|
||||
@ -49,7 +51,8 @@
|
||||
#define MCEXEC_UP_CLOSE_EXEC 0x30a02913
|
||||
|
||||
#define MCEXEC_UP_SYS_MOUNT 0x30a02914
|
||||
#define MCEXEC_UP_SYS_UNSHARE 0x30a02915
|
||||
#define MCEXEC_UP_SYS_UMOUNT 0x30a02915
|
||||
#define MCEXEC_UP_SYS_UNSHARE 0x30a02916
|
||||
|
||||
#define MCEXEC_UP_DEBUG_LOG 0x40000000
|
||||
|
||||
@ -77,6 +80,18 @@ struct program_image_section {
|
||||
#define SHELL_PATH_MAX_LEN 1024
|
||||
#define MCK_RLIM_MAX 20
|
||||
|
||||
struct get_cpu_set_arg {
|
||||
int nr_processes;
|
||||
void *cpu_set;
|
||||
size_t cpu_set_size; // Size in bytes
|
||||
int *target_core;
|
||||
int *mcexec_linux_numa; // NUMA domain to bind mcexec to
|
||||
};
|
||||
|
||||
#define PLD_CPU_SET_MAX_CPUS 1024
|
||||
typedef unsigned long __cpu_set_unit;
|
||||
#define PLD_CPU_SET_SIZE (PLD_CPU_SET_MAX_CPUS / (8 * sizeof(__cpu_set_unit)))
|
||||
|
||||
struct program_load_desc {
|
||||
int num_sections;
|
||||
int status;
|
||||
@ -106,6 +121,7 @@ struct program_load_desc {
|
||||
struct rlimit rlimit[MCK_RLIM_MAX];
|
||||
unsigned long interp_align;
|
||||
char shell_path[SHELL_PATH_MAX_LEN];
|
||||
__cpu_set_unit cpu_set[PLD_CPU_SET_SIZE];
|
||||
struct program_image_section sections[0];
|
||||
};
|
||||
|
||||
@ -196,6 +212,10 @@ struct sys_mount_desc {
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct sys_umount_desc {
|
||||
char *dir_name;
|
||||
};
|
||||
|
||||
struct sys_unshare_desc {
|
||||
unsigned long unshare_flags;
|
||||
};
|
||||
|
||||
@ -64,6 +64,8 @@ reserve_user_space(struct mcctrl_usrdata *usrdata, unsigned long *startp, unsign
|
||||
unsigned long start = 0L;
|
||||
unsigned long end;
|
||||
|
||||
mutex_lock(&usrdata->reserve_lock);
|
||||
|
||||
#define DESIRED_USER_END 0x800000000000
|
||||
#define GAP_FOR_MCEXEC 0x008000000000UL
|
||||
end = DESIRED_USER_END;
|
||||
@ -81,6 +83,8 @@ reserve_user_space(struct mcctrl_usrdata *usrdata, unsigned long *startp, unsign
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
#endif
|
||||
|
||||
mutex_unlock(&usrdata->reserve_lock);
|
||||
|
||||
if (IS_ERR_VALUE(start)) {
|
||||
return start;
|
||||
}
|
||||
|
||||
@ -34,6 +34,7 @@
|
||||
#include <linux/version.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/io.h>
|
||||
@ -66,21 +67,33 @@ int (*mcctrl_sys_mount)(char *dev_name,char *dir_name, char *type, unsigned long
|
||||
(int_star_fn_char_char_char_ulong_void_t)
|
||||
MCCTRL_KSYM_sys_mount;
|
||||
#else // exported
|
||||
int (*mcctrl_sys_mount)(char *dev_name,char *dir_name, char *type, unsigned long flags, void *data) = NULL;
|
||||
int (*mcctrl_sys_mount)(char *dev_name,char *dir_name, char *type, unsigned long flags, void *data) = sys_mount;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef MCCTRL_KSYM_sys_umount
|
||||
#if MCCTRL_KSYM_sys_umount
|
||||
typedef int (*int_fn_char_star_int_t)(char *, int);
|
||||
int (*mcctrl_sys_umount)(char *dir_name, int flags) =
|
||||
(int_fn_char_star_int_t)
|
||||
MCCTRL_KSYM_sys_umount;
|
||||
#else // exported
|
||||
int (*mcctrl_sys_umount)(char *dir_name, int flags) = sys_umount;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
//static DECLARE_WAIT_QUEUE_HEAD(wq_prepare);
|
||||
//extern struct mcctrl_channel *channels;
|
||||
int mcctrl_ikc_set_recv_cpu(ihk_os_t os, int cpu);
|
||||
|
||||
static long mcexec_prepare_image(ihk_os_t os,
|
||||
struct program_load_desc * __user udesc)
|
||||
{
|
||||
struct program_load_desc *desc, *pdesc;
|
||||
struct program_load_desc *desc = NULL;
|
||||
struct program_load_desc *pdesc = NULL;
|
||||
struct ikc_scd_packet isp;
|
||||
void *args, *envs;
|
||||
long ret = 0;
|
||||
void *args = NULL;
|
||||
void *envs = NULL;
|
||||
int ret = 0;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
int num_sections;
|
||||
@ -96,48 +109,59 @@ static long mcexec_prepare_image(ihk_os_t os,
|
||||
sizeof(struct program_load_desc))) {
|
||||
printk("%s: error: copying program_load_desc\n",
|
||||
__FUNCTION__);
|
||||
kfree(desc);
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto free_out;
|
||||
}
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, desc->pid);
|
||||
if (!ppd) {
|
||||
printk("%s: ERROR: no per process data for PID %d\n",
|
||||
__FUNCTION__, desc->pid);
|
||||
ret = -EINVAL;
|
||||
goto free_out;
|
||||
}
|
||||
|
||||
num_sections = desc->num_sections;
|
||||
|
||||
if (num_sections <= 0 || num_sections > 16) {
|
||||
printk("# of sections: %d\n", num_sections);
|
||||
return -EINVAL;
|
||||
printk("%s: ERROR: # of sections: %d\n",
|
||||
__FUNCTION__, num_sections);
|
||||
ret = -EINVAL;
|
||||
goto put_and_free_out;
|
||||
}
|
||||
|
||||
pdesc = kmalloc(sizeof(struct program_load_desc) +
|
||||
sizeof(struct program_image_section)
|
||||
* num_sections, GFP_KERNEL);
|
||||
memcpy(pdesc, desc, sizeof(struct program_load_desc));
|
||||
|
||||
if (copy_from_user(pdesc->sections, udesc->sections,
|
||||
sizeof(struct program_image_section)
|
||||
* num_sections)) {
|
||||
kfree(desc);
|
||||
kfree(pdesc);
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto put_and_free_out;
|
||||
}
|
||||
|
||||
kfree(desc);
|
||||
desc = NULL;
|
||||
|
||||
pdesc->pid = task_tgid_vnr(current);
|
||||
|
||||
if (reserve_user_space(usrdata, &pdesc->user_start, &pdesc->user_end)) {
|
||||
kfree(pdesc);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto put_and_free_out;
|
||||
}
|
||||
|
||||
args = kmalloc(pdesc->args_len, GFP_KERNEL);
|
||||
if (copy_from_user(args, pdesc->args, pdesc->args_len)) {
|
||||
kfree(args);
|
||||
kfree(pdesc);
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto put_and_free_out;
|
||||
}
|
||||
|
||||
envs = kmalloc(pdesc->envs_len, GFP_KERNEL);
|
||||
if (copy_from_user(envs, pdesc->envs, pdesc->envs_len)) {
|
||||
ret = -EFAULT;
|
||||
goto free_out;
|
||||
goto put_and_free_out;
|
||||
}
|
||||
|
||||
pdesc->args = (void*)virt_to_phys(args);
|
||||
@ -155,20 +179,18 @@ static long mcexec_prepare_image(ihk_os_t os,
|
||||
dprintk("%p (%lx)\n", pdesc, isp.arg);
|
||||
|
||||
pdesc->status = 0;
|
||||
mb();
|
||||
mcctrl_ikc_send(os, pdesc->cpu, &isp);
|
||||
|
||||
while (wait_event_interruptible(usrdata->wq_prepare, pdesc->status) != 0);
|
||||
|
||||
if(pdesc->err < 0){
|
||||
ret = pdesc->err;
|
||||
goto free_out;
|
||||
ret = wait_event_interruptible(ppd->wq_prepare, pdesc->status);
|
||||
if (ret < 0) {
|
||||
printk("%s: ERROR after wait: %d\n", __FUNCTION__, ret);
|
||||
goto put_and_free_out;
|
||||
}
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
if (!ppd) {
|
||||
printk("ERROR: no per process data for PID %d\n", task_tgid_vnr(current));
|
||||
ret = -EINVAL;
|
||||
goto free_out;
|
||||
if (pdesc->err < 0) {
|
||||
ret = pdesc->err;
|
||||
goto put_and_free_out;
|
||||
}
|
||||
|
||||
/* Update rpgtable */
|
||||
@ -177,7 +199,7 @@ static long mcexec_prepare_image(ihk_os_t os,
|
||||
if (copy_to_user(udesc, pdesc, sizeof(struct program_load_desc) +
|
||||
sizeof(struct program_image_section) * num_sections)) {
|
||||
ret = -EFAULT;
|
||||
goto free_out;
|
||||
goto put_and_free_out;
|
||||
}
|
||||
|
||||
dprintk("%s: pid %d, rpgtable: 0x%lx added\n",
|
||||
@ -185,10 +207,13 @@ static long mcexec_prepare_image(ihk_os_t os,
|
||||
|
||||
ret = 0;
|
||||
|
||||
put_and_free_out:
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
free_out:
|
||||
kfree(args);
|
||||
kfree(pdesc);
|
||||
kfree(envs);
|
||||
kfree(desc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -281,8 +306,9 @@ int mcexec_transfer_image(ihk_os_t os, struct remote_transfer *__user upt)
|
||||
|
||||
//extern unsigned long last_thread_exec;
|
||||
|
||||
struct handlerinfo {
|
||||
int pid;
|
||||
struct release_handler_info {
|
||||
int pid;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
static long mcexec_debug_log(ihk_os_t os, unsigned long arg)
|
||||
@ -296,20 +322,35 @@ static long mcexec_debug_log(ihk_os_t os, unsigned long arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mcexec_close_exec(ihk_os_t os);
|
||||
|
||||
static void release_handler(ihk_os_t os, void *param)
|
||||
{
|
||||
struct handlerinfo *info = param;
|
||||
struct release_handler_info *info = param;
|
||||
struct ikc_scd_packet isp;
|
||||
int os_ind = ihk_host_os_get_index(os);
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, info->pid);
|
||||
if (ppd) {
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
mcexec_close_exec(os);
|
||||
}
|
||||
|
||||
memset(&isp, '\0', sizeof isp);
|
||||
isp.msg = SCD_MSG_CLEANUP_PROCESS;
|
||||
isp.pid = info->pid;
|
||||
|
||||
mcctrl_ikc_send(os, 0, &isp);
|
||||
if(os_ind >= 0)
|
||||
dprintk("%s: SCD_MSG_CLEANUP_PROCESS, info: %p, cpu: %d\n",
|
||||
__FUNCTION__, info, info->cpu);
|
||||
mcctrl_ikc_send(os, info->cpu, &isp);
|
||||
if (os_ind >= 0) {
|
||||
delete_pid_entry(os_ind, info->pid);
|
||||
}
|
||||
kfree(param);
|
||||
dprintk("%s: SCD_MSG_CLEANUP_PROCESS, info: %p OK\n",
|
||||
__FUNCTION__, info);
|
||||
}
|
||||
|
||||
static long mcexec_newprocess(ihk_os_t os,
|
||||
@ -317,12 +358,12 @@ static long mcexec_newprocess(ihk_os_t os,
|
||||
struct file *file)
|
||||
{
|
||||
struct newprocess_desc desc;
|
||||
struct handlerinfo *info;
|
||||
struct release_handler_info *info;
|
||||
|
||||
if (copy_from_user(&desc, udesc, sizeof(struct newprocess_desc))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
info = kmalloc(sizeof(struct handlerinfo), GFP_KERNEL);
|
||||
info = kmalloc(sizeof(struct release_handler_info), GFP_KERNEL);
|
||||
info->pid = desc.pid;
|
||||
ihk_os_register_release_handler(file, release_handler, info);
|
||||
return 0;
|
||||
@ -336,7 +377,7 @@ static long mcexec_start_image(ihk_os_t os,
|
||||
struct ikc_scd_packet isp;
|
||||
struct mcctrl_channel *c;
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct handlerinfo *info;
|
||||
struct release_handler_info *info;
|
||||
|
||||
desc = kmalloc(sizeof(*desc), GFP_KERNEL);
|
||||
if (!desc) {
|
||||
@ -351,8 +392,9 @@ static long mcexec_start_image(ihk_os_t os,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
info = kmalloc(sizeof(struct handlerinfo), GFP_KERNEL);
|
||||
info = kmalloc(sizeof(struct release_handler_info), GFP_KERNEL);
|
||||
info->pid = desc->pid;
|
||||
info->cpu = desc->cpu;
|
||||
ihk_os_register_release_handler(file, release_handler, info);
|
||||
|
||||
c = usrdata->channels + desc->cpu;
|
||||
@ -403,7 +445,7 @@ static long mcexec_send_signal(ihk_os_t os, struct signal_desc *sigparam)
|
||||
isp.pid = sig.pid;
|
||||
isp.arg = virt_to_phys(msigp);
|
||||
|
||||
if((rc = mcctrl_ikc_send(os, sig.cpu, &isp)) < 0){
|
||||
if ((rc = mcctrl_ikc_send(os, sig.cpu, &isp)) < 0) {
|
||||
printk("mcexec_send_signal: mcctrl_ikc_send ret=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
@ -439,6 +481,246 @@ static long mcexec_get_cpu(ihk_os_t os)
|
||||
return info->n_cpus;
|
||||
}
|
||||
|
||||
static long mcexec_get_nodes(ihk_os_t os)
|
||||
{
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
|
||||
if (!usrdata || !usrdata->mem_info)
|
||||
return -EINVAL;
|
||||
|
||||
return usrdata->mem_info->n_numa_nodes;
|
||||
}
|
||||
|
||||
extern int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id);
|
||||
extern int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id);
|
||||
|
||||
static long mcexec_get_cpuset(ihk_os_t os, unsigned long arg)
|
||||
{
|
||||
struct mcctrl_usrdata *udp = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_part_exec *pe;
|
||||
struct get_cpu_set_arg req;
|
||||
struct cpu_topology *cpu_top, *cpu_top_i;
|
||||
struct cache_topology *cache_top;
|
||||
int cpu, cpus_assigned, cpus_to_assign, cpu_prev;
|
||||
int ret = 0;
|
||||
int mcexec_linux_numa;
|
||||
cpumask_t cpus_used;
|
||||
cpumask_t cpus_to_use;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
|
||||
if (!udp) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Look up per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(udp, task_tgid_vnr(current));
|
||||
if (!ppd) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pe = &udp->part_exec;
|
||||
|
||||
if (copy_from_user(&req, (void *)arg, sizeof(req))) {
|
||||
printk("%s: error copying user request\n", __FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto put_and_unlock_out;
|
||||
}
|
||||
|
||||
mutex_lock(&pe->lock);
|
||||
|
||||
memcpy(&cpus_used, &pe->cpus_used, sizeof(cpumask_t));
|
||||
memset(&cpus_to_use, 0, sizeof(cpus_to_use));
|
||||
|
||||
/* First process to enter CPU partitioning */
|
||||
if (pe->nr_processes == -1) {
|
||||
pe->nr_processes = req.nr_processes;
|
||||
pe->nr_processes_left = req.nr_processes;
|
||||
dprintk("%s: nr_processes: %d (partitioned exec starts)\n",
|
||||
__FUNCTION__,
|
||||
pe->nr_processes);
|
||||
}
|
||||
|
||||
if (pe->nr_processes != req.nr_processes) {
|
||||
printk("%s: error: requested number of processes"
|
||||
" doesn't match current partitioned execution\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto put_and_unlock_out;
|
||||
}
|
||||
|
||||
--pe->nr_processes_left;
|
||||
dprintk("%s: nr_processes: %d, nr_processes_left: %d\n",
|
||||
__FUNCTION__,
|
||||
pe->nr_processes,
|
||||
pe->nr_processes_left);
|
||||
|
||||
cpus_to_assign = udp->cpu_info->n_cpus / req.nr_processes;
|
||||
|
||||
/* Find the first unused CPU */
|
||||
cpu = cpumask_next_zero(-1, &cpus_used);
|
||||
if (cpu >= udp->cpu_info->n_cpus) {
|
||||
printk("%s: error: no more CPUs available\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto put_and_unlock_out;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
|
||||
cpumask_set_cpu(cpu, &cpus_used);
|
||||
cpumask_set_cpu(cpu, &cpus_to_use);
|
||||
#else
|
||||
cpu_set(cpu, cpus_used);
|
||||
cpu_set(cpu, cpus_to_use);
|
||||
#endif
|
||||
cpu_prev = cpu;
|
||||
dprintk("%s: CPU %d assigned (first)\n", __FUNCTION__, cpu);
|
||||
|
||||
for (cpus_assigned = 1; cpus_assigned < cpus_to_assign;
|
||||
++cpus_assigned) {
|
||||
int node;
|
||||
|
||||
cpu_top = NULL;
|
||||
/* Find the topology object of the last core assigned */
|
||||
list_for_each_entry(cpu_top_i, &udp->cpu_topology_list, chain) {
|
||||
if (cpu_top_i->mckernel_cpu_id == cpu_prev) {
|
||||
cpu_top = cpu_top_i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpu_top) {
|
||||
printk("%s: error: couldn't find CPU topology info\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto put_and_unlock_out;
|
||||
}
|
||||
|
||||
/* Find a core sharing the same cache iterating caches from
|
||||
* the most inner one outwards */
|
||||
list_for_each_entry(cache_top, &cpu_top->cache_list, chain) {
|
||||
for_each_cpu(cpu, &cache_top->shared_cpu_map) {
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
|
||||
if (!cpumask_test_cpu(cpu, &cpus_used)) {
|
||||
#else
|
||||
if (!cpu_isset(cpu, cpus_used)) {
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
|
||||
cpumask_set_cpu(cpu, &cpus_used);
|
||||
cpumask_set_cpu(cpu, &cpus_to_use);
|
||||
#else
|
||||
cpu_set(cpu, cpus_used);
|
||||
cpu_set(cpu, cpus_to_use);
|
||||
#endif
|
||||
cpu_prev = cpu;
|
||||
dprintk("%s: CPU %d assigned (same cache L%lu)\n",
|
||||
__FUNCTION__, cpu, cache_top->saved->level);
|
||||
goto next_cpu;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* No CPU? Find a core from the same NUMA node */
|
||||
node = linux_numa_2_mckernel_numa(udp,
|
||||
cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu_prev)));
|
||||
|
||||
for_each_cpu_not(cpu, &cpus_used) {
|
||||
/* Invalid CPU? */
|
||||
if (cpu >= udp->cpu_info->n_cpus)
|
||||
break;
|
||||
|
||||
/* Found one */
|
||||
if (node == linux_numa_2_mckernel_numa(udp,
|
||||
cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu)))) {
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
|
||||
cpumask_set_cpu(cpu, &cpus_used);
|
||||
cpumask_set_cpu(cpu, &cpus_to_use);
|
||||
#else
|
||||
cpu_set(cpu, cpus_used);
|
||||
cpu_set(cpu, cpus_to_use);
|
||||
#endif
|
||||
cpu_prev = cpu;
|
||||
dprintk("%s: CPU %d assigned (same NUMA)\n",
|
||||
__FUNCTION__, cpu);
|
||||
goto next_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
/* No CPU? Simply find the next unused one */
|
||||
cpu = cpumask_next_zero(-1, &cpus_used);
|
||||
if (cpu >= udp->cpu_info->n_cpus) {
|
||||
printk("%s: error: no more CPUs available\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto put_and_unlock_out;
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
|
||||
cpumask_set_cpu(cpu, &cpus_used);
|
||||
cpumask_set_cpu(cpu, &cpus_to_use);
|
||||
#else
|
||||
cpu_set(cpu, cpus_used);
|
||||
cpu_set(cpu, cpus_to_use);
|
||||
#endif
|
||||
cpu_prev = cpu;
|
||||
dprintk("%s: CPU %d assigned (unused)\n",
|
||||
__FUNCTION__, cpu);
|
||||
|
||||
next_cpu:
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Found all cores, let user know */
|
||||
if (copy_to_user(req.cpu_set, &cpus_to_use,
|
||||
(req.cpu_set_size < sizeof(cpus_to_use) ?
|
||||
req.cpu_set_size : sizeof(cpus_to_use)))) {
|
||||
printk("%s: error copying mask to user\n", __FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto put_and_unlock_out;
|
||||
}
|
||||
|
||||
/* Copy IKC target core and mcexec Linux NUMA id */
|
||||
cpu = cpumask_next(-1, &cpus_to_use);
|
||||
if (copy_to_user(req.target_core, &cpu, sizeof(cpu))) {
|
||||
printk("%s: error copying target core to user\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto put_and_unlock_out;
|
||||
}
|
||||
|
||||
mcexec_linux_numa = cpu_to_node(mckernel_cpu_2_linux_cpu(udp, cpu));
|
||||
if (copy_to_user(req.mcexec_linux_numa, &mcexec_linux_numa,
|
||||
sizeof(mcexec_linux_numa))) {
|
||||
printk("%s: error copying mcexec Linux NUMA id\n",
|
||||
__FUNCTION__);
|
||||
ret = -EINVAL;
|
||||
goto put_and_unlock_out;
|
||||
}
|
||||
|
||||
/* Save in per-process structure */
|
||||
memcpy(&ppd->cpu_set, &cpus_to_use, sizeof(cpumask_t));
|
||||
ppd->ikc_target_cpu = cpu;
|
||||
|
||||
/* Commit used cores to OS structure */
|
||||
memcpy(&pe->cpus_used, &cpus_used, sizeof(cpus_used));
|
||||
|
||||
/* Reset if last process */
|
||||
if (pe->nr_processes_left == 0) {
|
||||
dprintk("%s: nr_processes: %d (partitioned exec ends)\n",
|
||||
__FUNCTION__,
|
||||
pe->nr_processes);
|
||||
pe->nr_processes = -1;
|
||||
memset(&pe->cpus_used, 0, sizeof(pe->cpus_used));
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
put_and_unlock_out:
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
mutex_unlock(&pe->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid,
|
||||
struct mcctrl_per_proc_data *ppd)
|
||||
{
|
||||
@ -463,34 +745,10 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mcctrl_delete_per_proc_data(struct mcctrl_usrdata *ud, int pid)
|
||||
{
|
||||
struct mcctrl_per_proc_data *ppd_iter, *ppd = NULL;
|
||||
int hash = (pid & MCCTRL_PER_PROC_DATA_HASH_MASK);
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
list_for_each_entry(ppd_iter, &ud->per_proc_data_hash[hash], hash) {
|
||||
if (ppd_iter->pid == pid) {
|
||||
ppd = ppd_iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ppd) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_del(&ppd->hash);
|
||||
|
||||
out:
|
||||
write_unlock_irqrestore(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
|
||||
/* NOTE: per-process data is refcounted.
|
||||
* For every get call the user should call put. */
|
||||
struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
|
||||
struct mcctrl_usrdata *ud, int pid)
|
||||
{
|
||||
struct mcctrl_per_proc_data *ppd_iter, *ppd = NULL;
|
||||
@ -499,7 +757,6 @@ inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
|
||||
|
||||
/* Check if data for this process exists and return it */
|
||||
read_lock_irqsave(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
|
||||
list_for_each_entry(ppd_iter, &ud->per_proc_data_hash[hash], hash) {
|
||||
if (ppd_iter->pid == pid) {
|
||||
ppd = ppd_iter;
|
||||
@ -507,10 +764,57 @@ inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
|
||||
}
|
||||
}
|
||||
|
||||
if (ppd) {
|
||||
atomic_inc(&ppd->refcount);
|
||||
}
|
||||
|
||||
read_unlock_irqrestore(&ud->per_proc_data_hash_lock[hash], flags);
|
||||
|
||||
return ppd;
|
||||
}
|
||||
|
||||
/* Drop reference. If zero, remove and deallocate */
|
||||
void mcctrl_put_per_proc_data(struct mcctrl_per_proc_data *ppd)
|
||||
{
|
||||
int hash;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
if (!ppd)
|
||||
return;
|
||||
|
||||
if (!atomic_dec_and_test(&ppd->refcount))
|
||||
return;
|
||||
|
||||
dprintk("%s: deallocating PPD for pid %d\n", __FUNCTION__, ppd->pid);
|
||||
hash = (ppd->pid & MCCTRL_PER_PROC_DATA_HASH_MASK);
|
||||
|
||||
write_lock_irqsave(&ppd->ud->per_proc_data_hash_lock[hash], flags);
|
||||
list_del(&ppd->hash);
|
||||
write_unlock_irqrestore(&ppd->ud->per_proc_data_hash_lock[hash], flags);
|
||||
|
||||
for (i = 0; i < MCCTRL_PER_THREAD_DATA_HASH_SIZE; i++) {
|
||||
struct mcctrl_per_thread_data *ptd;
|
||||
struct mcctrl_per_thread_data *next;
|
||||
struct ikc_scd_packet *packet;
|
||||
|
||||
list_for_each_entry_safe(ptd, next,
|
||||
ppd->per_thread_data_hash + i, hash) {
|
||||
packet = ptd->data;
|
||||
list_del(&ptd->hash);
|
||||
kfree(ptd);
|
||||
__return_syscall(ppd->ud->os, packet, -EINTR,
|
||||
task_pid_vnr(current));
|
||||
ihk_ikc_release_packet(
|
||||
(struct ihk_ikc_free_packet *)packet,
|
||||
(ppd->ud->channels + packet->ref)->c);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(ppd);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Called indirectly from the IKC message handler.
|
||||
*/
|
||||
@ -523,7 +827,7 @@ int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet)
|
||||
unsigned long flags;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
|
||||
/* Look up per-process structure */
|
||||
/* Get a reference to per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(ud, pid);
|
||||
|
||||
if (unlikely(!ppd)) {
|
||||
@ -549,7 +853,7 @@ int mcexec_syscall(struct mcctrl_usrdata *ud, struct ikc_scd_packet *packet)
|
||||
/* Is this a request for a specific thread? See if it's waiting */
|
||||
if (unlikely(packet->req.ttid)) {
|
||||
list_for_each_entry(wqhln_iter, &ppd->wq_list_exact, list) {
|
||||
if (packet->req.ttid != task_pid_vnr(wqhln_iter->task))
|
||||
if (packet->req.ttid != wqhln_iter->rtid)
|
||||
continue;
|
||||
|
||||
wqhln = wqhln_iter;
|
||||
@ -588,8 +892,10 @@ retry_alloc:
|
||||
|
||||
wqhln->packet = packet;
|
||||
wqhln->req = 1;
|
||||
wake_up(&wqhln->wq_syscall);
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, flags);
|
||||
wake_up(&wqhln->wq_syscall);
|
||||
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -607,7 +913,7 @@ int mcexec_wait_syscall(ihk_os_t os, struct syscall_wait_desc *__user req)
|
||||
unsigned long irqflags;
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
|
||||
/* Look up per-process structure */
|
||||
/* Get a reference to per-process structure */
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
|
||||
if (unlikely(!ppd)) {
|
||||
@ -620,7 +926,8 @@ int mcexec_wait_syscall(ihk_os_t os, struct syscall_wait_desc *__user req)
|
||||
if (packet) {
|
||||
printk("%s: ERROR: packet %p is already registered for thread %d\n",
|
||||
__FUNCTION__, packet, task_pid_vnr(current));
|
||||
return -EBUSY;
|
||||
ret = -EBUSY;
|
||||
goto put_ppd_out;
|
||||
}
|
||||
|
||||
retry:
|
||||
@ -646,12 +953,13 @@ retry_alloc:
|
||||
|
||||
wqhln->task = current;
|
||||
wqhln->req = 0;
|
||||
wqhln->packet = NULL;
|
||||
init_waitqueue_head(&wqhln->wq_syscall);
|
||||
|
||||
/* Wait for a request.. */
|
||||
list_add(&wqhln->list, &ppd->wq_list);
|
||||
ihk_ikc_spinlock_unlock(&ppd->wq_list_lock, irqflags);
|
||||
|
||||
/* Wait for a request.. */
|
||||
ret = wait_event_interruptible(wqhln->wq_syscall, wqhln->req);
|
||||
|
||||
/* Remove per-thread wait queue head */
|
||||
@ -663,7 +971,8 @@ retry_alloc:
|
||||
if (ret && !wqhln->req) {
|
||||
kfree(wqhln);
|
||||
wqhln = NULL;
|
||||
return -EINTR;
|
||||
ret = -EINTR;
|
||||
goto put_ppd_out;
|
||||
}
|
||||
|
||||
packet = wqhln->packet;
|
||||
@ -699,7 +1008,8 @@ retry_alloc:
|
||||
|
||||
if (mcctrl_add_per_thread_data(ppd, current, packet) < 0) {
|
||||
kprintf("%s: error adding per-thread data\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;;
|
||||
goto put_ppd_out;
|
||||
}
|
||||
|
||||
if (__do_in_kernel_syscall(os, packet)) {
|
||||
@ -708,11 +1018,13 @@ retry_alloc:
|
||||
|
||||
if (mcctrl_delete_per_thread_data(ppd, current) < 0) {
|
||||
kprintf("%s: error deleting per-thread data\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
return -EFAULT;
|
||||
ret = -EINVAL;;
|
||||
goto put_ppd_out;
|
||||
}
|
||||
return 0;
|
||||
|
||||
ret = 0;
|
||||
goto put_ppd_out;
|
||||
}
|
||||
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
|
||||
@ -720,10 +1032,15 @@ retry_alloc:
|
||||
|
||||
if (mcctrl_delete_per_thread_data(ppd, current) < 0) {
|
||||
kprintf("%s: error deleting per-thread data\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;;
|
||||
goto put_ppd_out;
|
||||
}
|
||||
|
||||
goto retry;
|
||||
|
||||
put_ppd_out:
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
long mcexec_pin_region(ihk_os_t os, unsigned long *__user arg)
|
||||
@ -833,6 +1150,7 @@ long mcexec_ret_syscall(ihk_os_t os, struct syscall_ret_desc *__user arg)
|
||||
if (!packet) {
|
||||
kprintf("%s: ERROR: no packet registered for TID %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current));
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -851,6 +1169,7 @@ long mcexec_ret_syscall(ihk_os_t os, struct syscall_ret_desc *__user arg)
|
||||
ret.size, NULL, 0);
|
||||
#endif
|
||||
if (copy_from_user(rpm, (void *__user)ret.src, ret.size)) {
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@ -868,6 +1187,7 @@ long mcexec_ret_syscall(ihk_os_t os, struct syscall_ret_desc *__user arg)
|
||||
ihk_ikc_release_packet((struct ihk_ikc_free_packet *)packet,
|
||||
(usrdata->channels + packet->ref)->c);
|
||||
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -936,7 +1256,7 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
int i;
|
||||
|
||||
if (os_ind < 0) {
|
||||
return EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
@ -948,6 +1268,7 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ppd->ud = usrdata;
|
||||
ppd->pid = task_tgid_vnr(current);
|
||||
/*
|
||||
* XXX: rpgtable will be updated in __do_in_kernel_syscall()
|
||||
@ -956,7 +1277,13 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
INIT_LIST_HEAD(&ppd->wq_list);
|
||||
INIT_LIST_HEAD(&ppd->wq_req_list);
|
||||
INIT_LIST_HEAD(&ppd->wq_list_exact);
|
||||
init_waitqueue_head(&ppd->wq_prepare);
|
||||
init_waitqueue_head(&ppd->wq_procfs);
|
||||
spin_lock_init(&ppd->wq_list_lock);
|
||||
memset(&ppd->cpu_set, 0, sizeof(cpumask_t));
|
||||
ppd->ikc_target_cpu = 0;
|
||||
/* Final ref will be dropped in close_exec() */
|
||||
atomic_set(&ppd->refcount, 1);
|
||||
|
||||
for (i = 0; i < MCCTRL_PER_THREAD_DATA_HASH_SIZE; ++i) {
|
||||
INIT_LIST_HEAD(&ppd->per_thread_data_hash[i]);
|
||||
@ -965,36 +1292,33 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
|
||||
if (mcctrl_add_per_proc_data(usrdata, ppd->pid, ppd) < 0) {
|
||||
printk("%s: error adding per process data\n", __FUNCTION__);
|
||||
retval = EINVAL;
|
||||
goto out_free_ppd;
|
||||
retval = -EINVAL;
|
||||
kfree(ppd);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Only deallocate in case of an error if we added it above */
|
||||
ppd = NULL;
|
||||
}
|
||||
|
||||
pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
|
||||
if (!pathbuf) {
|
||||
retval = ENOMEM;
|
||||
goto out_error_drop_ppd;
|
||||
retval = -ENOMEM;
|
||||
goto out_put_ppd;
|
||||
}
|
||||
|
||||
file = open_exec(filename);
|
||||
retval = PTR_ERR(file);
|
||||
if (IS_ERR(file)) {
|
||||
goto out_error_free;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
fullpath = d_path(&file->f_path, pathbuf, PATH_MAX);
|
||||
if (IS_ERR(fullpath)) {
|
||||
retval = PTR_ERR(fullpath);
|
||||
goto out_error_free;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
mcef = kmalloc(sizeof(*mcef), GFP_KERNEL);
|
||||
if (!mcef) {
|
||||
retval = ENOMEM;
|
||||
retval = -ENOMEM;
|
||||
goto out_put_file;
|
||||
}
|
||||
|
||||
@ -1029,13 +1353,12 @@ int mcexec_open_exec(ihk_os_t os, char * __user filename)
|
||||
|
||||
out_put_file:
|
||||
fput(file);
|
||||
out_error_free:
|
||||
out_free:
|
||||
kfree(pathbuf);
|
||||
out_error_drop_ppd:
|
||||
if (ppd) mcctrl_delete_per_proc_data(usrdata, ppd->pid);
|
||||
out_free_ppd:
|
||||
if (ppd) kfree(ppd);
|
||||
return -retval;
|
||||
out_put_ppd:
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
out:
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
@ -1050,12 +1373,12 @@ int mcexec_close_exec(ihk_os_t os)
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, task_tgid_vnr(current));
|
||||
|
||||
if (ppd) {
|
||||
mcctrl_delete_per_proc_data(usrdata, ppd->pid);
|
||||
/* One for the reference and one for deallocation */
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
|
||||
dprintk("pid: %d, tid: %d: rpgtable for %d (0x%lx) removed\n",
|
||||
task_tgid_vnr(current), current->pid, ppd->pid, ppd->rpgtable);
|
||||
|
||||
kfree(ppd);
|
||||
}
|
||||
else {
|
||||
printk("WARNING: no per process data for pid %d ?\n",
|
||||
@ -1154,7 +1477,7 @@ long mcexec_sys_mount(struct sys_mount_desc *__user arg)
|
||||
cap_raise(promoted->cap_effective, CAP_SYS_ADMIN);
|
||||
original = override_creds(promoted);
|
||||
|
||||
#if MCCTRL_KSYM_sys_mount
|
||||
#ifdef MCCTRL_KSYM_sys_mount
|
||||
ret = mcctrl_sys_mount(desc.dev_name, desc.dir_name, desc.type,
|
||||
desc.flags, desc.data);
|
||||
#else
|
||||
@ -1167,6 +1490,36 @@ long mcexec_sys_mount(struct sys_mount_desc *__user arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
long mcexec_sys_umount(struct sys_mount_desc *__user arg)
|
||||
{
|
||||
struct sys_umount_desc desc;
|
||||
struct cred *promoted;
|
||||
const struct cred *original;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&desc, arg, sizeof(desc))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
promoted = prepare_creds();
|
||||
if (!promoted) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
cap_raise(promoted->cap_effective, CAP_SYS_ADMIN);
|
||||
original = override_creds(promoted);
|
||||
|
||||
#ifdef MCCTRL_KSYM_sys_umount
|
||||
ret = mcctrl_sys_umount(desc.dir_name, MNT_FORCE);
|
||||
#else
|
||||
ret = -EFAULT;
|
||||
#endif
|
||||
|
||||
revert_creds(original);
|
||||
put_cred(promoted);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
long mcexec_sys_unshare(struct sys_unshare_desc *__user arg)
|
||||
{
|
||||
struct sys_unshare_desc desc;
|
||||
@ -1225,6 +1578,12 @@ long __mcctrl_control(ihk_os_t os, unsigned int req, unsigned long arg,
|
||||
case MCEXEC_UP_GET_CPU:
|
||||
return mcexec_get_cpu(os);
|
||||
|
||||
case MCEXEC_UP_GET_NODES:
|
||||
return mcexec_get_nodes(os);
|
||||
|
||||
case MCEXEC_UP_GET_CPUSET:
|
||||
return mcexec_get_cpuset(os, arg);
|
||||
|
||||
case MCEXEC_UP_STRNCPY_FROM_USER:
|
||||
return mcexec_strncpy_from_user(os,
|
||||
(struct strncpy_from_user_desc *)arg);
|
||||
@ -1254,6 +1613,9 @@ long __mcctrl_control(ihk_os_t os, unsigned int req, unsigned long arg,
|
||||
case MCEXEC_UP_SYS_MOUNT:
|
||||
return mcexec_sys_mount((struct sys_mount_desc *)arg);
|
||||
|
||||
case MCEXEC_UP_SYS_UMOUNT:
|
||||
return mcexec_sys_umount((struct sys_mount_desc *)arg);
|
||||
|
||||
case MCEXEC_UP_SYS_UNSHARE:
|
||||
return mcexec_sys_unshare((struct sys_unshare_desc *)arg);
|
||||
|
||||
@ -1267,10 +1629,20 @@ void mcexec_prepare_ack(ihk_os_t os, unsigned long arg, int err)
|
||||
{
|
||||
struct program_load_desc *desc = phys_to_virt(arg);
|
||||
struct mcctrl_usrdata *usrdata = ihk_host_os_get_usrdata(os);
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, desc->pid);
|
||||
if (!ppd) {
|
||||
printk("%s: ERROR: no per process data for PID %d\n",
|
||||
__FUNCTION__, desc->pid);
|
||||
return;
|
||||
}
|
||||
|
||||
desc->err = err;
|
||||
desc->status = 1;
|
||||
mb();
|
||||
|
||||
wake_up_all(&usrdata->wq_prepare);
|
||||
wake_up_all(&ppd->wq_prepare);
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
}
|
||||
|
||||
|
||||
@ -60,6 +60,8 @@ static struct ihk_os_user_call_handler mcctrl_uchs[] = {
|
||||
{ .request = MCEXEC_UP_LOAD_SYSCALL, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_SEND_SIGNAL, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_CPU, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_NODES, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_CPUSET, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_STRNCPY_FROM_USER, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_NEW_PROCESS, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_PREPARE_DMA, .func = mcctrl_ioctl },
|
||||
@ -69,6 +71,7 @@ static struct ihk_os_user_call_handler mcctrl_uchs[] = {
|
||||
{ .request = MCEXEC_UP_GET_CRED, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_GET_CREDV, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_SYS_MOUNT, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_SYS_UMOUNT, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_SYS_UNSHARE, .func = mcctrl_ioctl },
|
||||
{ .request = MCEXEC_UP_DEBUG_LOG, .func = mcctrl_ioctl },
|
||||
};
|
||||
|
||||
@ -80,7 +80,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
break;
|
||||
|
||||
case SCD_MSG_PROCFS_ANSWER:
|
||||
procfs_answer(pisp->arg, pisp->err);
|
||||
procfs_answer(usrdata, pisp->pid);
|
||||
break;
|
||||
|
||||
case SCD_MSG_SEND_SIGNAL:
|
||||
@ -240,7 +240,7 @@ static struct ihk_ikc_listen_param listen_param = {
|
||||
.port = 501,
|
||||
.handler = connect_handler,
|
||||
.pkt_size = sizeof(struct ikc_scd_packet),
|
||||
.queue_size = PAGE_SIZE,
|
||||
.queue_size = PAGE_SIZE * 4,
|
||||
.magic = 0x1129,
|
||||
};
|
||||
|
||||
@ -248,7 +248,7 @@ static struct ihk_ikc_listen_param listen_param2 = {
|
||||
.port = 502,
|
||||
.handler = connect_handler2,
|
||||
.pkt_size = sizeof(struct ikc_scd_packet),
|
||||
.queue_size = PAGE_SIZE,
|
||||
.queue_size = PAGE_SIZE * 4,
|
||||
.magic = 0x1329,
|
||||
};
|
||||
|
||||
@ -283,12 +283,13 @@ int prepare_ikc_channels(ihk_os_t os)
|
||||
}
|
||||
|
||||
usrdata->os = os;
|
||||
init_waitqueue_head(&usrdata->wq_prepare);
|
||||
ihk_host_os_set_usrdata(os, usrdata);
|
||||
memcpy(&usrdata->listen_param, &listen_param, sizeof listen_param);
|
||||
ihk_ikc_listen_port(os, &usrdata->listen_param);
|
||||
memcpy(&usrdata->listen_param2, &listen_param2, sizeof listen_param2);
|
||||
ihk_ikc_listen_port(os, &usrdata->listen_param2);
|
||||
init_waitqueue_head(&usrdata->wq_procfs);
|
||||
mutex_init(&usrdata->reserve_lock);
|
||||
|
||||
for (i = 0; i < MCCTRL_PER_PROC_DATA_HASH_SIZE; ++i) {
|
||||
INIT_LIST_HEAD(&usrdata->per_proc_data_hash[i]);
|
||||
@ -298,6 +299,9 @@ int prepare_ikc_channels(ihk_os_t os)
|
||||
INIT_LIST_HEAD(&usrdata->cpu_topology_list);
|
||||
INIT_LIST_HEAD(&usrdata->node_topology_list);
|
||||
|
||||
mutex_init(&usrdata->part_exec.lock);
|
||||
usrdata->part_exec.nr_processes = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -323,7 +327,6 @@ void destroy_ikc_channels(ihk_os_t os)
|
||||
// ihk_ikc_disconnect(usrdata->channels[i].c);
|
||||
ihk_ikc_free_channel(usrdata->channels[i].c);
|
||||
__destroy_ikc_channel(os, usrdata->channels + i);
|
||||
printk("Channel #%d freed.\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -187,6 +187,7 @@ struct mcctrl_per_thread_data {
|
||||
#define MCCTRL_PER_THREAD_DATA_HASH_MASK (MCCTRL_PER_THREAD_DATA_HASH_SIZE - 1)
|
||||
|
||||
struct mcctrl_per_proc_data {
|
||||
struct mcctrl_usrdata *ud;
|
||||
struct list_head hash;
|
||||
int pid;
|
||||
unsigned long rpgtable; /* per process, not per OS */
|
||||
@ -195,9 +196,14 @@ struct mcctrl_per_proc_data {
|
||||
struct list_head wq_req_list;
|
||||
struct list_head wq_list_exact;
|
||||
ihk_spinlock_t wq_list_lock;
|
||||
wait_queue_head_t wq_prepare;
|
||||
wait_queue_head_t wq_procfs;
|
||||
|
||||
struct list_head per_thread_data_hash[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
|
||||
rwlock_t per_thread_data_hash_lock[MCCTRL_PER_THREAD_DATA_HASH_SIZE];
|
||||
cpumask_t cpu_set;
|
||||
int ikc_target_cpu;
|
||||
atomic_t refcount;
|
||||
};
|
||||
|
||||
struct sysfsm_req {
|
||||
@ -254,6 +260,13 @@ struct node_topology {
|
||||
struct list_head chain;
|
||||
};
|
||||
|
||||
struct mcctrl_part_exec {
|
||||
struct mutex lock;
|
||||
int nr_processes;
|
||||
int nr_processes_left;
|
||||
cpumask_t cpus_used;
|
||||
};
|
||||
|
||||
#define CPU_LONGS (((NR_CPUS) + (BITS_PER_LONG) - 1) / (BITS_PER_LONG))
|
||||
|
||||
#define MCCTRL_PER_PROC_DATA_HASH_SHIFT 7
|
||||
@ -270,9 +283,9 @@ struct mcctrl_usrdata {
|
||||
int base_cpu;
|
||||
int job_pos;
|
||||
int mcctrl_dma_abort;
|
||||
struct mutex reserve_lock;
|
||||
unsigned long last_thread_exec;
|
||||
wait_queue_head_t wq_prepare;
|
||||
|
||||
wait_queue_head_t wq_procfs;
|
||||
struct list_head per_proc_data_hash[MCCTRL_PER_PROC_DATA_HASH_SIZE];
|
||||
rwlock_t per_proc_data_hash_lock[MCCTRL_PER_PROC_DATA_HASH_SIZE];
|
||||
|
||||
@ -281,8 +294,10 @@ struct mcctrl_usrdata {
|
||||
unsigned long cpu_online[CPU_LONGS];
|
||||
struct ihk_cpu_info *cpu_info;
|
||||
struct ihk_mem_info *mem_info;
|
||||
nodemask_t numa_online;
|
||||
struct list_head cpu_topology_list;
|
||||
struct list_head node_topology_list;
|
||||
struct mcctrl_part_exec part_exec;
|
||||
};
|
||||
|
||||
struct mcctrl_signal {
|
||||
@ -304,8 +319,9 @@ int __do_in_kernel_syscall(ihk_os_t os, struct ikc_scd_packet *packet);
|
||||
int mcctrl_add_per_proc_data(struct mcctrl_usrdata *ud, int pid,
|
||||
struct mcctrl_per_proc_data *ppd);
|
||||
int mcctrl_delete_per_proc_data(struct mcctrl_usrdata *ud, int pid);
|
||||
inline struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
|
||||
struct mcctrl_per_proc_data *mcctrl_get_per_proc_data(
|
||||
struct mcctrl_usrdata *ud, int pid);
|
||||
void mcctrl_put_per_proc_data(struct mcctrl_per_proc_data *ppd);
|
||||
|
||||
int mcctrl_add_per_thread_data(struct mcctrl_per_proc_data* ppd,
|
||||
struct task_struct *task, void *data);
|
||||
@ -337,7 +353,7 @@ struct procfs_file {
|
||||
char fname[PROCFS_NAME_MAX]; /* procfs filename (request) */
|
||||
};
|
||||
|
||||
void procfs_answer(unsigned int arg, int err);
|
||||
void procfs_answer(struct mcctrl_usrdata *ud, int pid);
|
||||
int procfsm_packet_handler(void *os, int msg, int pid, unsigned long arg);
|
||||
void add_tid_entry(int osnum, int pid, int tid);
|
||||
void add_pid_entry(int osnum, int pid);
|
||||
|
||||
@ -59,7 +59,6 @@ static const struct procfs_entry base_entry_stuff[];
|
||||
static const struct file_operations mckernel_forward_ro;
|
||||
static const struct file_operations mckernel_forward;
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(procfsq);
|
||||
static ssize_t mckernel_procfs_read(struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *ppos);
|
||||
|
||||
@ -106,14 +105,28 @@ getpath(struct procfs_list_entry *e, char *buf, int bufsize)
|
||||
/**
|
||||
* \brief Process SCD_MSG_PROCFS_ANSWER message.
|
||||
*
|
||||
* \param arg sent argument
|
||||
* \param err error info (redundant)
|
||||
* \param ud mcctrl_usrdata pointer
|
||||
* \param pid PID of the requesting process
|
||||
*/
|
||||
void
|
||||
procfs_answer(unsigned int arg, int err)
|
||||
void procfs_answer(struct mcctrl_usrdata *ud, int pid)
|
||||
{
|
||||
dprintk("procfs: received SCD_MSG_PROCFS_ANSWER message(err = %d).\n", err);
|
||||
wake_up_interruptible(&procfsq);
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
if (pid > 0) {
|
||||
ppd = mcctrl_get_per_proc_data(ud, pid);
|
||||
|
||||
if (unlikely(!ppd)) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d\n",
|
||||
__FUNCTION__, pid);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
wake_up_all(pid > 0 ? &ppd->wq_procfs : &ud->wq_procfs);
|
||||
|
||||
if (pid > 0) {
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
}
|
||||
}
|
||||
|
||||
static struct procfs_list_entry *
|
||||
@ -248,9 +261,11 @@ get_pid_cred(int pid)
|
||||
{
|
||||
struct task_struct *task = NULL;
|
||||
|
||||
if(pid > 0){
|
||||
if (pid > 0) {
|
||||
rcu_read_lock();
|
||||
task = pid_task(find_vpid(pid), PIDTYPE_PID);
|
||||
if(task){
|
||||
rcu_read_unlock();
|
||||
if (task) {
|
||||
return __task_cred(task);
|
||||
}
|
||||
}
|
||||
@ -493,36 +508,84 @@ procfs_exit(int osnum)
|
||||
* This function conforms to the 2) way of fs/proc/generic.c
|
||||
* from linux-2.6.39.4.
|
||||
*/
|
||||
static ssize_t
|
||||
mckernel_procfs_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
loff_t *ppos)
|
||||
static ssize_t __mckernel_procfs_read_write(
|
||||
struct file *file,
|
||||
char __user *buf, size_t nbytes,
|
||||
loff_t *ppos, int read_write)
|
||||
{
|
||||
struct inode * inode = file->f_path.dentry->d_inode;
|
||||
struct inode * inode = file->f_inode;
|
||||
char *kern_buffer = NULL;
|
||||
int order = 0;
|
||||
volatile struct procfs_read *r = NULL;
|
||||
struct ikc_scd_packet isp;
|
||||
int ret;
|
||||
int ret, osnum, pid, retw;
|
||||
unsigned long pbuf;
|
||||
unsigned long count = nbytes;
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
|
||||
struct proc_dir_entry *dp = PDE(inode);
|
||||
struct procfs_list_entry *e = dp->data;
|
||||
#else
|
||||
#else
|
||||
struct procfs_list_entry *e = PDE_DATA(inode);
|
||||
#endif
|
||||
#endif
|
||||
loff_t offset = *ppos;
|
||||
char pathbuf[PROCFS_NAME_MAX];
|
||||
char *path;
|
||||
char *path, *p;
|
||||
ihk_os_t os = NULL;
|
||||
struct mcctrl_usrdata *udp = NULL;
|
||||
struct mcctrl_per_proc_data *ppd = NULL;
|
||||
|
||||
path = getpath(e, pathbuf, 256);
|
||||
dprintk("mckernel_procfs_read: invoked for %s, offset: %lu, count: %d\n",
|
||||
path, offset, count);
|
||||
|
||||
if (count <= 0 || offset < 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
path = getpath(e, pathbuf, PROCFS_NAME_MAX);
|
||||
dprintk("%s: invoked for %s, offset: %lu, count: %lu\n",
|
||||
__FUNCTION__, path,
|
||||
(unsigned long)offset, count);
|
||||
|
||||
/* Verify OS number */
|
||||
ret = sscanf(path, "mcos%d/", &osnum);
|
||||
if (ret != 1) {
|
||||
printk("%s: error: couldn't determine OS number\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (osnum != e->osnum) {
|
||||
printk("%s: error: OS numbers don't match\n", __FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Is this request for a specific process? */
|
||||
p = strchr(path, '/') + 1;
|
||||
ret = sscanf(p, "%d/", &pid);
|
||||
if (ret != 1) {
|
||||
pid = -1;
|
||||
}
|
||||
|
||||
os = osnum_to_os(osnum);
|
||||
if (!os) {
|
||||
printk("%s: error: no IHK OS data found for OS %d\n",
|
||||
__FUNCTION__, osnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
udp = ihk_host_os_get_usrdata(os);
|
||||
if (!udp) {
|
||||
printk("%s: error: no MCCTRL data found for OS %d\n",
|
||||
__FUNCTION__, osnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pid > 0) {
|
||||
ppd = mcctrl_get_per_proc_data(udp, pid);
|
||||
|
||||
if (unlikely(!ppd)) {
|
||||
printk("%s: error: no per-process structure for PID %d",
|
||||
__FUNCTION__, pid);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
while ((1 << order) < count) ++order;
|
||||
if (order > 12) {
|
||||
order -= 12;
|
||||
@ -534,10 +597,11 @@ mckernel_procfs_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
/* NOTE: we need physically contigous memory to pass through IKC */
|
||||
kern_buffer = (char *)__get_free_pages(GFP_KERNEL, order);
|
||||
if (!kern_buffer) {
|
||||
printk("mckernel_procfs_read(): ERROR: allocating kernel buffer\n");
|
||||
return -ENOMEM;
|
||||
printk("%s: ERROR: allocating kernel buffer\n", __FUNCTION__);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
pbuf = virt_to_phys(kern_buffer);
|
||||
|
||||
r = kmalloc(sizeof(struct procfs_read), GFP_KERNEL);
|
||||
@ -551,152 +615,96 @@ mckernel_procfs_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
r->status = 0;
|
||||
r->offset = offset;
|
||||
r->count = count;
|
||||
r->readwrite = 0;
|
||||
r->readwrite = read_write;
|
||||
strncpy((char *)r->fname, path, PROCFS_NAME_MAX);
|
||||
isp.msg = SCD_MSG_PROCFS_REQUEST;
|
||||
isp.ref = 0;
|
||||
isp.arg = virt_to_phys(r);
|
||||
|
||||
ret = mcctrl_ikc_send(osnum_to_os(e->osnum), 0, &isp);
|
||||
|
||||
isp.pid = pid;
|
||||
|
||||
ret = mcctrl_ikc_send(osnum_to_os(e->osnum),
|
||||
(pid > 0) ? ppd->ikc_target_cpu : 0, &isp);
|
||||
|
||||
if (ret < 0) {
|
||||
goto out; /* error */
|
||||
}
|
||||
|
||||
|
||||
/* Wait for a reply. */
|
||||
ret = -EIO; /* default exit code */
|
||||
dprintk("now wait for a relpy\n");
|
||||
|
||||
/* Wait for the status field of the procfs_read structure set ready. */
|
||||
if (wait_event_interruptible_timeout(procfsq, r->status != 0, HZ) == 0) {
|
||||
kprintf("ERROR: mckernel_procfs_read: timeout (1 sec).\n");
|
||||
dprintk("%s: waiting for reply\n", __FUNCTION__);
|
||||
|
||||
retry_wait:
|
||||
/* Wait for the status field of the procfs_read structure,
|
||||
* wait on per-process or OS specific data depending on
|
||||
* who the request is for.
|
||||
*/
|
||||
if (pid > 0) {
|
||||
retw = wait_event_interruptible_timeout(ppd->wq_procfs,
|
||||
r->status != 0, HZ);
|
||||
}
|
||||
else {
|
||||
retw = wait_event_interruptible_timeout(udp->wq_procfs,
|
||||
r->status != 0, HZ);
|
||||
}
|
||||
|
||||
/* Timeout? */
|
||||
if (retw == 0 && r->status == 0) {
|
||||
printk("%s: error: timeout (1 sec)\n", __FUNCTION__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Wake up and check the result. */
|
||||
dprintk("mckernel_procfs_read: woke up. ret: %d, eof: %d\n", r->ret, r->eof);
|
||||
|
||||
if (r->ret > 0) {
|
||||
if (copy_to_user(buf, kern_buffer, r->ret)) {
|
||||
kprintf("ERROR: mckernel_procfs_read: copy_to_user failed.\n");
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
/* Interrupted? */
|
||||
else if (retw == -ERESTARTSYS) {
|
||||
ret = -ERESTART;
|
||||
goto out;
|
||||
}
|
||||
/* Were we woken up by a reply to another procfs request? */
|
||||
else if (r->status == 0) {
|
||||
/* TODO: r->status is not set atomically, we could be woken
|
||||
* up with status == 0 and it could change to 1 while in this
|
||||
* code, we could potentially miss the wake_up()...
|
||||
*/
|
||||
printk("%s: stale wake-up, retrying\n", __FUNCTION__);
|
||||
goto retry_wait;
|
||||
}
|
||||
|
||||
/* Wake up and check the result. */
|
||||
dprintk("%s: woke up. ret: %d, eof: %d\n",
|
||||
__FUNCTION__, r->ret, r->eof);
|
||||
|
||||
if (r->ret > 0) {
|
||||
if (read_write == 0) {
|
||||
if (copy_to_user(buf, kern_buffer, r->ret)) {
|
||||
printk("%s: ERROR: copy_to_user failed.\n", __FUNCTION__);
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
*ppos += r->ret;
|
||||
}
|
||||
ret = r->ret;
|
||||
|
||||
out:
|
||||
if(kern_buffer)
|
||||
if (ppd)
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
if (kern_buffer)
|
||||
free_pages((uintptr_t)kern_buffer, order);
|
||||
if(r)
|
||||
if (r)
|
||||
kfree((void *)r);
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
mckernel_procfs_write(struct file *file, const char __user *buf, size_t nbytes,
|
||||
loff_t *ppos)
|
||||
static ssize_t mckernel_procfs_read(struct file *file,
|
||||
char __user *buf, size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
struct inode * inode = file->f_path.dentry->d_inode;
|
||||
char *kern_buffer = NULL;
|
||||
int order = 0;
|
||||
volatile struct procfs_read *r = NULL;
|
||||
struct ikc_scd_packet isp;
|
||||
int ret;
|
||||
unsigned long pbuf;
|
||||
unsigned long count = nbytes;
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
|
||||
struct proc_dir_entry *dp = PDE(inode);
|
||||
struct procfs_list_entry *e = dp->data;
|
||||
#else
|
||||
struct procfs_list_entry *e = PDE_DATA(inode);
|
||||
#endif
|
||||
loff_t offset = *ppos;
|
||||
char pathbuf[PROCFS_NAME_MAX];
|
||||
char *path;
|
||||
return __mckernel_procfs_read_write(file, buf, nbytes, ppos, 0);
|
||||
}
|
||||
|
||||
path = getpath(e, pathbuf, 256);
|
||||
dprintk("mckernel_procfs_read: invoked for %s, offset: %lu, count: %d\n",
|
||||
path, offset, count);
|
||||
|
||||
if (count <= 0 || offset < 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
while ((1 << order) < count) ++order;
|
||||
if (order > 12) {
|
||||
order -= 12;
|
||||
}
|
||||
else {
|
||||
order = 1;
|
||||
}
|
||||
|
||||
/* NOTE: we need physically contigous memory to pass through IKC */
|
||||
kern_buffer = (char *)__get_free_pages(GFP_KERNEL, order);
|
||||
if (!kern_buffer) {
|
||||
printk("mckernel_procfs_read(): ERROR: allocating kernel buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (copy_from_user(kern_buffer, buf, nbytes)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pbuf = virt_to_phys(kern_buffer);
|
||||
|
||||
r = kmalloc(sizeof(struct procfs_read), GFP_KERNEL);
|
||||
if (r == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
dprintk("offset: %lx, count: %d, cpu: %d\n", offset, count, e->cpu);
|
||||
|
||||
r->pbuf = pbuf;
|
||||
r->eof = 0;
|
||||
r->ret = -EIO; /* default */
|
||||
r->status = 0;
|
||||
r->offset = offset;
|
||||
r->count = count;
|
||||
r->readwrite = 1;
|
||||
strncpy((char *)r->fname, path, PROCFS_NAME_MAX);
|
||||
isp.msg = SCD_MSG_PROCFS_REQUEST;
|
||||
isp.ref = 0;
|
||||
isp.arg = virt_to_phys(r);
|
||||
|
||||
ret = mcctrl_ikc_send(osnum_to_os(e->osnum), 0, &isp);
|
||||
|
||||
if (ret < 0) {
|
||||
goto out; /* error */
|
||||
}
|
||||
|
||||
/* Wait for a reply. */
|
||||
ret = -EIO; /* default exit code */
|
||||
dprintk("now wait for a relpy\n");
|
||||
|
||||
/* Wait for the status field of the procfs_read structure set ready. */
|
||||
if (wait_event_interruptible_timeout(procfsq, r->status != 0, HZ) == 0) {
|
||||
kprintf("ERROR: mckernel_procfs_read: timeout (1 sec).\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Wake up and check the result. */
|
||||
dprintk("mckernel_procfs_read: woke up. ret: %d, eof: %d\n", r->ret, r->eof);
|
||||
|
||||
if (r->ret > 0) {
|
||||
*ppos += r->ret;
|
||||
}
|
||||
ret = r->ret;
|
||||
|
||||
out:
|
||||
if(kern_buffer)
|
||||
free_pages((uintptr_t)kern_buffer, order);
|
||||
if(r)
|
||||
kfree((void *)r);
|
||||
|
||||
return ret;
|
||||
static ssize_t mckernel_procfs_write(struct file *file,
|
||||
const char __user *buf, size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
return __mckernel_procfs_read_write(file,
|
||||
(char __user *)buf, nbytes, ppos, 1);
|
||||
}
|
||||
|
||||
static loff_t
|
||||
|
||||
@ -306,7 +306,7 @@ static int remote_page_fault(struct mcctrl_usrdata *usrdata, void *fault_addr, u
|
||||
error = -ENOENT;
|
||||
printk("%s: no packet registered for TID %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current));
|
||||
goto out_no_unmap;
|
||||
goto out_put_ppd;
|
||||
}
|
||||
|
||||
req = &packet->req;
|
||||
@ -326,6 +326,9 @@ retry_alloc:
|
||||
|
||||
/* Prepare per-thread wait queue head */
|
||||
wqhln->task = current;
|
||||
/* Save the TID explicitly, because mcexec_syscall(), where the request
|
||||
* will be matched, is in IRQ context and can't call task_pid_vnr() */
|
||||
wqhln->rtid = task_pid_vnr(current);
|
||||
wqhln->req = 0;
|
||||
init_waitqueue_head(&wqhln->wq_syscall);
|
||||
|
||||
@ -434,9 +437,11 @@ out:
|
||||
ihk_device_unmap_virtual(ihk_os_to_dev(usrdata->os), resp, sizeof(*resp));
|
||||
ihk_device_unmap_memory(ihk_os_to_dev(usrdata->os), phys, sizeof(*resp));
|
||||
|
||||
out_no_unmap:
|
||||
out_put_ppd:
|
||||
dprintk("%s: tid: %d, fault_addr: %lu, reason: %lu, error: %d\n",
|
||||
__FUNCTION__, task_pid_vnr(current), fault_addr, reason, error);
|
||||
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -574,6 +579,7 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
size_t pix;
|
||||
#endif
|
||||
struct mcctrl_per_proc_data *ppd;
|
||||
int ret = 0;
|
||||
|
||||
dprintk("mcctrl:page fault:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
@ -584,7 +590,6 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
ppd = mcctrl_get_per_proc_data(usrdata, vma->vm_mm->owner->pid);
|
||||
}
|
||||
|
||||
|
||||
if (!ppd) {
|
||||
kprintf("%s: ERROR: no per-process structure for PID %d??\n",
|
||||
__FUNCTION__, task_tgid_vnr(current));
|
||||
@ -618,7 +623,8 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
if (error) {
|
||||
printk("mcctrl:page fault error:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
return VM_FAULT_SIGBUS;
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto put_and_out;
|
||||
}
|
||||
|
||||
rva = (unsigned long)vmf->virtual_address & ~(pgsize - 1);
|
||||
@ -655,10 +661,15 @@ static int rus_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
if (error) {
|
||||
printk("mcctrl:page fault:remap error:flags %#x pgoff %#lx va %p page %p\n",
|
||||
vmf->flags, vmf->pgoff, vmf->virtual_address, vmf->page);
|
||||
return VM_FAULT_SIGBUS;
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto put_and_out;
|
||||
}
|
||||
|
||||
return VM_FAULT_NOPAGE;
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
|
||||
put_and_out:
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct vm_operations_struct rus_vmops = {
|
||||
@ -746,6 +757,20 @@ static struct list_head pager_list = LIST_HEAD_INIT(pager_list);
|
||||
struct pager_create_result {
|
||||
uintptr_t handle;
|
||||
int maxprot;
|
||||
uint32_t flags;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
enum {
|
||||
/* for memobj.flags */
|
||||
MF_HAS_PAGER = 0x0001,
|
||||
MF_SHMDT_OK = 0x0002,
|
||||
MF_IS_REMOVABLE = 0x0004,
|
||||
MF_PREFETCH = 0x0008,
|
||||
MF_ZEROFILL = 0x0010,
|
||||
MF_REG_FILE = 0x1000,
|
||||
MF_DEV_FILE = 0x2000,
|
||||
MF_END
|
||||
};
|
||||
|
||||
static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
@ -760,6 +785,7 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
struct pager *newpager = NULL;
|
||||
uintptr_t phys;
|
||||
struct kstat st;
|
||||
int mf_flags = 0;
|
||||
|
||||
dprintk("pager_req_create(%d,%lx)\n", fd, (long)result_pa);
|
||||
|
||||
@ -827,6 +853,32 @@ static int pager_req_create(ihk_os_t os, int fd, uintptr_t result_pa)
|
||||
list_add(&newpager->list, &pager_list);
|
||||
pager = newpager;
|
||||
newpager = NULL;
|
||||
|
||||
/* Intel MPI library and shared memory "prefetch" */
|
||||
{
|
||||
char *pathbuf, *fullpath;
|
||||
|
||||
pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
|
||||
if (pathbuf) {
|
||||
fullpath = d_path(&file->f_path, pathbuf, PATH_MAX);
|
||||
if (!IS_ERR(fullpath)) {
|
||||
if (!strncmp("/dev/shm/Intel_MPI", fullpath, 18)) {
|
||||
//mf_flags = (MF_PREFETCH | MF_ZEROFILL);
|
||||
mf_flags = (MF_ZEROFILL);
|
||||
dprintk("%s: filename: %s, zerofill\n",
|
||||
__FUNCTION__, fullpath);
|
||||
}
|
||||
else if (strstr(fullpath, "libmpi") != NULL) {
|
||||
mf_flags = MF_PREFETCH;
|
||||
dprintk("%s: filename: %s, prefetch\n",
|
||||
__FUNCTION__, fullpath);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(pathbuf);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
@ -856,6 +908,8 @@ found:
|
||||
resp = ihk_device_map_virtual(dev, phys, sizeof(*resp), NULL, 0);
|
||||
resp->handle = (uintptr_t)pager;
|
||||
resp->maxprot = maxprot;
|
||||
resp->flags = mf_flags;
|
||||
resp->size = st.size;
|
||||
ihk_device_unmap_virtual(dev, resp, sizeof(*resp));
|
||||
ihk_device_unmap_memory(dev, phys, sizeof(*resp));
|
||||
|
||||
@ -1582,6 +1636,7 @@ int __do_in_kernel_syscall(ihk_os_t os, struct ikc_scd_packet *packet)
|
||||
|
||||
dprintk("%s: pid: %d, rpgtable: 0x%lx updated\n",
|
||||
__FUNCTION__, ppd->pid, ppd->rpgtable);
|
||||
mcctrl_put_per_proc_data(ppd);
|
||||
}
|
||||
|
||||
ret = clear_pte_range(sc->args[0], sc->args[1]);
|
||||
|
||||
@ -92,27 +92,19 @@ void setup_local_snooping_samples(ihk_os_t os)
|
||||
|
||||
void setup_local_snooping_files(ihk_os_t os)
|
||||
{
|
||||
struct ihk_cpu_info *info;
|
||||
struct mcctrl_usrdata *udp = ihk_host_os_get_usrdata(os);
|
||||
struct sysfsm_bitmap_param param;
|
||||
static unsigned long cpu_offline = 0x0;
|
||||
int i;
|
||||
int error;
|
||||
|
||||
info = ihk_os_get_cpu_info(os);
|
||||
if (!info) {
|
||||
eprintk("mcctrl:ihk_os_get_cpu_info failed.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
memset(udp->cpu_online, 0, sizeof(udp->cpu_online));
|
||||
for (i = 0; i < info->n_cpus; i++) {
|
||||
udp->cpu_online[i / BITS_PER_LONG] =
|
||||
udp->cpu_online[i / BITS_PER_LONG] | (1 << (i % BITS_PER_LONG));
|
||||
for (i = 0; i < udp->cpu_info->n_cpus; i++) {
|
||||
set_bit(i, udp->cpu_online);
|
||||
}
|
||||
|
||||
param.nbits = CPU_LONGS * BITS_PER_LONG;
|
||||
param.ptr = udp->cpu_online;
|
||||
param.ptr = &udp->cpu_online;
|
||||
dprintk("mcctrl:setup_local_snooping_files: CPU_LONGS=%d, BITS_PER_LONG=%d\n",
|
||||
CPU_LONGS, BITS_PER_LONG);
|
||||
|
||||
@ -205,19 +197,19 @@ void free_topology_info(ihk_os_t os)
|
||||
/*
|
||||
* CPU and NUMA node mapping conversion functions.
|
||||
*/
|
||||
static int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
int mckernel_cpu_2_linux_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
{
|
||||
return (cpu_id < udp->cpu_info->n_cpus) ?
|
||||
udp->cpu_info->mapping[cpu_id] : -1;
|
||||
}
|
||||
|
||||
static int mckernel_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
int mckernel_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
{
|
||||
return (cpu_id < udp->cpu_info->n_cpus) ?
|
||||
udp->cpu_info->hw_ids[cpu_id] : -1;
|
||||
}
|
||||
|
||||
static int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -230,7 +222,7 @@ static int linux_cpu_2_mckernel_cpu(struct mcctrl_usrdata *udp, int cpu_id)
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -243,7 +235,7 @@ static int hw_id_2_mckernel_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -256,7 +248,7 @@ static int hw_id_2_linux_cpu(struct mcctrl_usrdata *udp, int hw_id)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu)
|
||||
int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu)
|
||||
{
|
||||
int mckernel_cpu = linux_cpu_2_mckernel_cpu(udp, cpu);
|
||||
|
||||
@ -265,13 +257,13 @@ static int linux_cpu_2_hw_id(struct mcctrl_usrdata *udp, int cpu)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int mckernel_numa_2_linux_numa(struct mcctrl_usrdata *udp, int numa_id)
|
||||
int mckernel_numa_2_linux_numa(struct mcctrl_usrdata *udp, int numa_id)
|
||||
{
|
||||
return (numa_id < udp->mem_info->n_numa_nodes) ?
|
||||
udp->mem_info->numa_mapping[numa_id] : -1;
|
||||
}
|
||||
|
||||
static int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id)
|
||||
int linux_numa_2_mckernel_numa(struct mcctrl_usrdata *udp, int numa_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -666,6 +658,7 @@ out:
|
||||
static int setup_node_files(struct mcctrl_usrdata *udp)
|
||||
{
|
||||
int error;
|
||||
int node;
|
||||
struct node_topology *p;
|
||||
struct sysfsm_bitmap_param param;
|
||||
|
||||
@ -677,10 +670,21 @@ static int setup_node_files(struct mcctrl_usrdata *udp)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&udp->numa_online, 0, sizeof(udp->numa_online));
|
||||
for (node = 0; node < udp->mem_info->n_numa_nodes; ++node) {
|
||||
node_set(node, udp->numa_online);
|
||||
}
|
||||
|
||||
param.nbits = MAX_NUMNODES;
|
||||
param.ptr = &udp->numa_online;
|
||||
sysfsm_createf(udp->os, SYSFS_SNOOPING_OPS_pbl, ¶m, 0444,
|
||||
"/sys/devices/system/node/online");
|
||||
sysfsm_createf(udp->os, SYSFS_SNOOPING_OPS_pbl, ¶m, 0444,
|
||||
"/sys/devices/system/node/possible");
|
||||
|
||||
list_for_each_entry(p, &udp->node_topology_list, chain) {
|
||||
struct sysfs_handle handle;
|
||||
int cpu;
|
||||
int node;
|
||||
size_t offset = 0;
|
||||
param.nbits = nr_cpumask_bits;
|
||||
param.ptr = &p->cpumap;
|
||||
@ -697,8 +701,6 @@ static int setup_node_files(struct mcctrl_usrdata *udp)
|
||||
mckernel_numa_2_linux_numa(udp, node)
|
||||
));
|
||||
}
|
||||
offset += snprintf(&p->mckernel_numa_distance_s[offset],
|
||||
NODE_DISTANCE_S_SIZE - offset, "%s", "\n");
|
||||
|
||||
sysfsm_createf(udp->os, SYSFS_SNOOPING_OPS_s,
|
||||
p->mckernel_numa_distance_s, 0444,
|
||||
|
||||
@ -14,6 +14,9 @@ ifeq ($(BUILD_MODULE_TMP),org)
|
||||
ifeq ($(BUILD_MODULE),none)
|
||||
BUILD_MODULE=$(shell if [ ${LINUX_VERSION_CODE} -ge 262144 -a ${LINUX_VERSION_CODE} -lt 262400 ]; then echo "linux-4.0.9"; else echo "none"; fi)
|
||||
endif
|
||||
ifeq ($(BUILD_MODULE),none)
|
||||
BUILD_MODULE=$(shell if [ ${LINUX_VERSION_CODE} -ge 243680 -a ${LINUX_VERSION_CODE} -lt 263936 ]; then echo "linux-4.6.7"; else echo "none"; fi)
|
||||
endif
|
||||
endif
|
||||
ifeq ($(BUILD_MODULE_TMP),rhel)
|
||||
ifeq ($(BUILD_MODULE),none)
|
||||
@ -32,6 +35,7 @@ endif
|
||||
clean:
|
||||
@(cd linux-3.10.0-327.36.1.el7; make clean)
|
||||
@(cd linux-4.0.9; make clean)
|
||||
@(cd linux-4.6.7; make clean)
|
||||
|
||||
install:
|
||||
ifneq ($(BUILD_MODULE),none)
|
||||
|
||||
21
executer/kernel/mcoverlayfs/linux-4.6.7/Makefile.in
Normal file
21
executer/kernel/mcoverlayfs/linux-4.6.7/Makefile.in
Normal file
@ -0,0 +1,21 @@
|
||||
KDIR ?= @KDIR@
|
||||
ARCH ?= @ARCH@
|
||||
KMODDIR = @KMODDIR@
|
||||
src = @abs_srcdir@
|
||||
|
||||
obj-m += mcoverlay.o
|
||||
|
||||
mcoverlay-y := copy_up.o dir.o inode.o readdir.o super.o
|
||||
|
||||
.PHONY: clean install modules
|
||||
|
||||
modules:
|
||||
$(MAKE) -C $(KDIR) M=$(PWD) SUBDIRS=$(PWD) ARCH=$(ARCH) modules
|
||||
|
||||
clean:
|
||||
$(RM) .*.cmd *.mod.c *.o *.ko* Module.symvers modules.order -r .tmp*
|
||||
|
||||
install:
|
||||
mkdir -p -m 755 $(KMODDIR)
|
||||
install -m 644 mcoverlay.ko $(KMODDIR)
|
||||
|
||||
460
executer/kernel/mcoverlayfs/linux-4.6.7/copy_up.c
Normal file
460
executer/kernel/mcoverlayfs/linux-4.6.7/copy_up.c
Normal file
@ -0,0 +1,460 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (C) 2011 Novell Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/splice.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include "overlayfs.h"
|
||||
|
||||
#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
|
||||
|
||||
static bool __read_mostly ovl_check_copy_up;
|
||||
module_param_named(check_copy_up, ovl_check_copy_up, bool,
|
||||
S_IWUSR | S_IRUGO);
|
||||
MODULE_PARM_DESC(ovl_check_copy_up,
|
||||
"Warn on copy-up when causing process also has a R/O fd open");
|
||||
|
||||
static int ovl_check_fd(const void *data, struct file *f, unsigned int fd)
|
||||
{
|
||||
const struct dentry *dentry = data;
|
||||
|
||||
if (f->f_inode == d_inode(dentry))
|
||||
pr_warn_ratelimited("overlayfs: Warning: Copying up %pD, but open R/O on fd %u which will cease to be coherent [pid=%d %s]\n",
|
||||
f, fd, current->pid, current->comm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the fds open by this process and warn if something like the following
|
||||
* scenario is about to occur:
|
||||
*
|
||||
* fd1 = open("foo", O_RDONLY);
|
||||
* fd2 = open("foo", O_RDWR);
|
||||
*/
|
||||
static void ovl_do_check_copy_up(struct dentry *dentry)
|
||||
{
|
||||
if (ovl_check_copy_up)
|
||||
iterate_fd(current->files, 0, ovl_check_fd, dentry);
|
||||
}
|
||||
|
||||
int ovl_copy_xattr(struct dentry *old, struct dentry *new, unsigned opt)
|
||||
{
|
||||
ssize_t list_size, size, value_size = 0;
|
||||
char *buf, *name, *value = NULL;
|
||||
int uninitialized_var(error);
|
||||
|
||||
if (!old->d_inode->i_op->getxattr ||
|
||||
!new->d_inode->i_op->getxattr)
|
||||
return 0;
|
||||
|
||||
list_size = vfs_listxattr(old, NULL, 0);
|
||||
if (list_size <= 0) {
|
||||
if (list_size == -EOPNOTSUPP)
|
||||
return 0;
|
||||
return list_size;
|
||||
}
|
||||
|
||||
buf = kzalloc(list_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
list_size = vfs_listxattr(old, buf, list_size);
|
||||
if (list_size <= 0) {
|
||||
error = list_size;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
|
||||
retry:
|
||||
size = vfs_getxattr(old, name, value, value_size);
|
||||
if (size == -ERANGE)
|
||||
size = vfs_getxattr(old, name, NULL, 0);
|
||||
|
||||
if (size < 0) {
|
||||
if (OVL_OPT_NOFSCHECK(opt)) {
|
||||
OVL_DEBUG("fail: old=%pd4, i_ino=%lu, name=%s\n",
|
||||
old, old->d_inode->i_ino, name);
|
||||
continue;
|
||||
} else {
|
||||
error = size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
OVL_DEBUG("success: old=%pd4, i_ino=%lu, name=%s\n",
|
||||
old, old->d_inode->i_ino, name);
|
||||
|
||||
if (size > value_size) {
|
||||
void *new;
|
||||
|
||||
new = krealloc(value, size, GFP_KERNEL);
|
||||
if (!new) {
|
||||
error = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
value = new;
|
||||
value_size = size;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
error = vfs_setxattr(new, name, value, size, 0);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
kfree(value);
|
||||
out:
|
||||
kfree(buf);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
|
||||
{
|
||||
struct file *old_file;
|
||||
struct file *new_file;
|
||||
loff_t old_pos = 0;
|
||||
loff_t new_pos = 0;
|
||||
int error = 0;
|
||||
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY);
|
||||
if (IS_ERR(old_file))
|
||||
return PTR_ERR(old_file);
|
||||
|
||||
new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY);
|
||||
if (IS_ERR(new_file)) {
|
||||
error = PTR_ERR(new_file);
|
||||
goto out_fput;
|
||||
}
|
||||
|
||||
/* FIXME: copy up sparse files efficiently */
|
||||
while (len) {
|
||||
size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
|
||||
long bytes;
|
||||
|
||||
if (len < this_len)
|
||||
this_len = len;
|
||||
|
||||
if (signal_pending_state(TASK_KILLABLE, current)) {
|
||||
error = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
||||
bytes = do_splice_direct(old_file, &old_pos,
|
||||
new_file, &new_pos,
|
||||
this_len, SPLICE_F_MOVE);
|
||||
if (bytes <= 0) {
|
||||
error = bytes;
|
||||
break;
|
||||
}
|
||||
WARN_ON(old_pos != new_pos);
|
||||
|
||||
len -= bytes;
|
||||
}
|
||||
|
||||
fput(new_file);
|
||||
out_fput:
|
||||
fput(old_file);
|
||||
return error;
|
||||
}
|
||||
|
||||
static char *ovl_read_symlink(struct dentry *realdentry)
|
||||
{
|
||||
int res;
|
||||
char *buf;
|
||||
struct inode *inode = realdentry->d_inode;
|
||||
mm_segment_t old_fs;
|
||||
|
||||
res = -EINVAL;
|
||||
if (!inode->i_op->readlink)
|
||||
goto err;
|
||||
|
||||
res = -ENOMEM;
|
||||
buf = (char *) __get_free_page(GFP_KERNEL);
|
||||
if (!buf)
|
||||
goto err;
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
/* The cast to a user pointer is valid due to the set_fs() */
|
||||
res = inode->i_op->readlink(realdentry,
|
||||
(char __user *)buf, PAGE_SIZE - 1);
|
||||
set_fs(old_fs);
|
||||
if (res < 0) {
|
||||
free_page((unsigned long) buf);
|
||||
goto err;
|
||||
}
|
||||
buf[res] = '\0';
|
||||
|
||||
return buf;
|
||||
|
||||
err:
|
||||
return ERR_PTR(res);
|
||||
}
|
||||
|
||||
static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
|
||||
{
|
||||
struct iattr attr = {
|
||||
.ia_valid =
|
||||
ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
|
||||
.ia_atime = stat->atime,
|
||||
.ia_mtime = stat->mtime,
|
||||
};
|
||||
|
||||
return notify_change(upperdentry, &attr, NULL);
|
||||
}
|
||||
|
||||
int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (!S_ISLNK(stat->mode)) {
|
||||
struct iattr attr = {
|
||||
.ia_valid = ATTR_MODE,
|
||||
.ia_mode = stat->mode,
|
||||
};
|
||||
err = notify_change(upperdentry, &attr, NULL);
|
||||
}
|
||||
if (!err) {
|
||||
struct iattr attr = {
|
||||
.ia_valid = ATTR_UID | ATTR_GID,
|
||||
.ia_uid = stat->uid,
|
||||
.ia_gid = stat->gid,
|
||||
};
|
||||
err = notify_change(upperdentry, &attr, NULL);
|
||||
}
|
||||
if (!err)
|
||||
ovl_set_timestamps(upperdentry, stat);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
|
||||
struct dentry *dentry, struct path *lowerpath,
|
||||
struct kstat *stat, const char *link)
|
||||
{
|
||||
struct inode *wdir = workdir->d_inode;
|
||||
struct inode *udir = upperdir->d_inode;
|
||||
struct dentry *newdentry = NULL;
|
||||
struct dentry *upper = NULL;
|
||||
umode_t mode = stat->mode;
|
||||
unsigned opt = ovl_get_config_opt(dentry);
|
||||
int err;
|
||||
|
||||
newdentry = ovl_lookup_temp(workdir, dentry);
|
||||
err = PTR_ERR(newdentry);
|
||||
if (IS_ERR(newdentry))
|
||||
goto out;
|
||||
|
||||
upper = lookup_one_len(dentry->d_name.name, upperdir,
|
||||
dentry->d_name.len);
|
||||
err = PTR_ERR(upper);
|
||||
if (IS_ERR(upper))
|
||||
goto out1;
|
||||
|
||||
/* Can't properly set mode on creation because of the umask */
|
||||
stat->mode &= S_IFMT;
|
||||
err = ovl_create_real(wdir, newdentry, stat, link, NULL, true);
|
||||
stat->mode = mode;
|
||||
if (err)
|
||||
goto out2;
|
||||
|
||||
if (S_ISREG(stat->mode)) {
|
||||
struct path upperpath;
|
||||
|
||||
ovl_path_upper(dentry, &upperpath);
|
||||
BUG_ON(upperpath.dentry != NULL);
|
||||
upperpath.dentry = newdentry;
|
||||
|
||||
err = ovl_copy_up_data(lowerpath, &upperpath, stat->size);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
err = ovl_copy_xattr(lowerpath->dentry, newdentry, opt);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
inode_lock(newdentry->d_inode);
|
||||
err = ovl_set_attr(newdentry, stat);
|
||||
inode_unlock(newdentry->d_inode);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
ovl_dentry_update(dentry, newdentry);
|
||||
newdentry = NULL;
|
||||
|
||||
/*
|
||||
* Non-directores become opaque when copied up.
|
||||
*/
|
||||
if (!S_ISDIR(stat->mode))
|
||||
ovl_dentry_set_opaque(dentry, true);
|
||||
out2:
|
||||
dput(upper);
|
||||
out1:
|
||||
dput(newdentry);
|
||||
out:
|
||||
return err;
|
||||
|
||||
out_cleanup:
|
||||
ovl_cleanup(wdir, newdentry);
|
||||
goto out2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy up a single dentry
|
||||
*
|
||||
* Directory renames only allowed on "pure upper" (already created on
|
||||
* upper filesystem, never copied up). Directories which are on lower or
|
||||
* are merged may not be renamed. For these -EXDEV is returned and
|
||||
* userspace has to deal with it. This means, when copying up a
|
||||
* directory we can rely on it and ancestors being stable.
|
||||
*
|
||||
* Non-directory renames start with copy up of source if necessary. The
|
||||
* actual rename will only proceed once the copy up was successful. Copy
|
||||
* up uses upper parent i_mutex for exclusion. Since rename can change
|
||||
* d_parent it is possible that the copy up will lock the old parent. At
|
||||
* that point the file will have already been copied up anyway.
|
||||
*/
|
||||
int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
|
||||
struct path *lowerpath, struct kstat *stat)
|
||||
{
|
||||
struct dentry *workdir = ovl_workdir(dentry);
|
||||
int err;
|
||||
struct kstat pstat;
|
||||
struct path parentpath;
|
||||
struct dentry *upperdir;
|
||||
struct dentry *upperdentry;
|
||||
const struct cred *old_cred;
|
||||
struct cred *override_cred;
|
||||
char *link = NULL;
|
||||
|
||||
if (WARN_ON(!workdir))
|
||||
return -EROFS;
|
||||
|
||||
ovl_do_check_copy_up(lowerpath->dentry);
|
||||
|
||||
ovl_path_upper(parent, &parentpath);
|
||||
upperdir = parentpath.dentry;
|
||||
|
||||
err = vfs_getattr(&parentpath, &pstat);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (S_ISLNK(stat->mode)) {
|
||||
link = ovl_read_symlink(lowerpath->dentry);
|
||||
if (IS_ERR(link))
|
||||
return PTR_ERR(link);
|
||||
}
|
||||
|
||||
err = -ENOMEM;
|
||||
override_cred = prepare_creds();
|
||||
if (!override_cred)
|
||||
goto out_free_link;
|
||||
|
||||
override_cred->fsuid = stat->uid;
|
||||
override_cred->fsgid = stat->gid;
|
||||
/*
|
||||
* CAP_SYS_ADMIN for copying up extended attributes
|
||||
* CAP_DAC_OVERRIDE for create
|
||||
* CAP_FOWNER for chmod, timestamp update
|
||||
* CAP_FSETID for chmod
|
||||
* CAP_CHOWN for chown
|
||||
* CAP_MKNOD for mknod
|
||||
*/
|
||||
cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
|
||||
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
|
||||
cap_raise(override_cred->cap_effective, CAP_FOWNER);
|
||||
cap_raise(override_cred->cap_effective, CAP_FSETID);
|
||||
cap_raise(override_cred->cap_effective, CAP_CHOWN);
|
||||
cap_raise(override_cred->cap_effective, CAP_MKNOD);
|
||||
old_cred = override_creds(override_cred);
|
||||
|
||||
err = -EIO;
|
||||
if (lock_rename(workdir, upperdir) != NULL) {
|
||||
pr_err("overlayfs: failed to lock workdir+upperdir\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
upperdentry = ovl_dentry_upper(dentry);
|
||||
if (upperdentry) {
|
||||
/* Raced with another copy-up? Nothing to do, then... */
|
||||
err = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath,
|
||||
stat, link);
|
||||
if (!err) {
|
||||
/* Restore timestamps on parent (best effort) */
|
||||
ovl_set_timestamps(upperdir, &pstat);
|
||||
}
|
||||
out_unlock:
|
||||
unlock_rename(workdir, upperdir);
|
||||
revert_creds(old_cred);
|
||||
put_cred(override_cred);
|
||||
|
||||
out_free_link:
|
||||
if (link)
|
||||
free_page((unsigned long) link);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int ovl_copy_up(struct dentry *dentry)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = 0;
|
||||
while (!err) {
|
||||
struct dentry *next;
|
||||
struct dentry *parent;
|
||||
struct path lowerpath;
|
||||
struct kstat stat;
|
||||
enum ovl_path_type type = ovl_path_type(dentry);
|
||||
|
||||
if (OVL_TYPE_UPPER(type))
|
||||
break;
|
||||
|
||||
next = dget(dentry);
|
||||
/* find the topmost dentry not yet copied up */
|
||||
for (;;) {
|
||||
parent = dget_parent(next);
|
||||
|
||||
type = ovl_path_type(parent);
|
||||
if (OVL_TYPE_UPPER(type))
|
||||
break;
|
||||
|
||||
dput(next);
|
||||
next = parent;
|
||||
}
|
||||
|
||||
ovl_path_lower(next, &lowerpath);
|
||||
err = vfs_getattr(&lowerpath, &stat);
|
||||
if (!err)
|
||||
err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
|
||||
|
||||
dput(parent);
|
||||
dput(next);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
969
executer/kernel/mcoverlayfs/linux-4.6.7/dir.c
Normal file
969
executer/kernel/mcoverlayfs/linux-4.6.7/dir.c
Normal file
@ -0,0 +1,969 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (C) 2011 Novell Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/cred.h>
|
||||
#include "overlayfs.h"
|
||||
|
||||
void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
|
||||
{
|
||||
int err;
|
||||
|
||||
dget(wdentry);
|
||||
if (d_is_dir(wdentry))
|
||||
err = ovl_do_rmdir(wdir, wdentry);
|
||||
else
|
||||
err = ovl_do_unlink(wdir, wdentry);
|
||||
dput(wdentry);
|
||||
|
||||
if (err) {
|
||||
pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n",
|
||||
wdentry, err);
|
||||
}
|
||||
}
|
||||
|
||||
struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
|
||||
{
|
||||
struct dentry *temp;
|
||||
char name[20];
|
||||
|
||||
snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
|
||||
|
||||
temp = lookup_one_len(name, workdir, strlen(name));
|
||||
if (!IS_ERR(temp) && temp->d_inode) {
|
||||
pr_err("overlayfs: workdir/%s already exists\n", name);
|
||||
dput(temp);
|
||||
temp = ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
/* caller holds i_mutex on workdir */
|
||||
static struct dentry *ovl_whiteout(struct dentry *workdir,
|
||||
struct dentry *dentry)
|
||||
{
|
||||
int err;
|
||||
struct dentry *whiteout;
|
||||
struct inode *wdir = workdir->d_inode;
|
||||
|
||||
whiteout = ovl_lookup_temp(workdir, dentry);
|
||||
if (IS_ERR(whiteout))
|
||||
return whiteout;
|
||||
|
||||
err = ovl_do_whiteout(wdir, whiteout);
|
||||
if (err) {
|
||||
dput(whiteout);
|
||||
whiteout = ERR_PTR(err);
|
||||
}
|
||||
|
||||
return whiteout;
|
||||
}
|
||||
|
||||
int ovl_create_real(struct inode *dir, struct dentry *newdentry,
|
||||
struct kstat *stat, const char *link,
|
||||
struct dentry *hardlink, bool debug)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (newdentry->d_inode)
|
||||
return -ESTALE;
|
||||
|
||||
if (hardlink) {
|
||||
err = ovl_do_link(hardlink, dir, newdentry, debug);
|
||||
} else {
|
||||
switch (stat->mode & S_IFMT) {
|
||||
case S_IFREG:
|
||||
err = ovl_do_create(dir, newdentry, stat->mode, debug);
|
||||
break;
|
||||
|
||||
case S_IFDIR:
|
||||
err = ovl_do_mkdir(dir, newdentry, stat->mode, debug);
|
||||
break;
|
||||
|
||||
case S_IFCHR:
|
||||
case S_IFBLK:
|
||||
case S_IFIFO:
|
||||
case S_IFSOCK:
|
||||
err = ovl_do_mknod(dir, newdentry,
|
||||
stat->mode, stat->rdev, debug);
|
||||
break;
|
||||
|
||||
case S_IFLNK:
|
||||
err = ovl_do_symlink(dir, newdentry, link, debug);
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -EPERM;
|
||||
}
|
||||
}
|
||||
if (!err && WARN_ON(!newdentry->d_inode)) {
|
||||
/*
|
||||
* Not quite sure if non-instantiated dentry is legal or not.
|
||||
* VFS doesn't seem to care so check and warn here.
|
||||
*/
|
||||
err = -ENOENT;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_set_opaque(struct dentry *upperdentry)
|
||||
{
|
||||
return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
|
||||
}
|
||||
|
||||
static void ovl_remove_opaque(struct dentry *upperdentry)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE);
|
||||
if (err) {
|
||||
pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
|
||||
upperdentry->d_name.name, err);
|
||||
}
|
||||
}
|
||||
|
||||
static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct kstat *stat)
|
||||
{
|
||||
int err;
|
||||
enum ovl_path_type type;
|
||||
struct path realpath;
|
||||
|
||||
type = ovl_path_real(dentry, &realpath);
|
||||
err = vfs_getattr(&realpath, stat);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
stat->dev = dentry->d_sb->s_dev;
|
||||
stat->ino = dentry->d_inode->i_ino;
|
||||
|
||||
/*
|
||||
* It's probably not worth it to count subdirs to get the
|
||||
* correct link count. nlink=1 seems to pacify 'find' and
|
||||
* other utilities.
|
||||
*/
|
||||
if (OVL_TYPE_MERGE(type))
|
||||
stat->nlink = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
|
||||
struct kstat *stat, const char *link,
|
||||
struct dentry *hardlink)
|
||||
{
|
||||
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
|
||||
struct inode *udir = upperdir->d_inode;
|
||||
struct dentry *newdentry;
|
||||
int err;
|
||||
|
||||
inode_lock_nested(udir, I_MUTEX_PARENT);
|
||||
newdentry = lookup_one_len(dentry->d_name.name, upperdir,
|
||||
dentry->d_name.len);
|
||||
err = PTR_ERR(newdentry);
|
||||
if (IS_ERR(newdentry))
|
||||
goto out_unlock;
|
||||
err = ovl_create_real(udir, newdentry, stat, link, hardlink, false);
|
||||
if (err)
|
||||
goto out_dput;
|
||||
|
||||
ovl_dentry_version_inc(dentry->d_parent);
|
||||
ovl_dentry_update(dentry, newdentry);
|
||||
ovl_copyattr(newdentry->d_inode, inode);
|
||||
d_instantiate(dentry, inode);
|
||||
newdentry = NULL;
|
||||
out_dput:
|
||||
dput(newdentry);
|
||||
out_unlock:
|
||||
inode_unlock(udir);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_lock_rename_workdir(struct dentry *workdir,
|
||||
struct dentry *upperdir)
|
||||
{
|
||||
/* Workdir should not be the same as upperdir */
|
||||
if (workdir == upperdir)
|
||||
goto err;
|
||||
|
||||
/* Workdir should not be subdir of upperdir and vice versa */
|
||||
if (lock_rename(workdir, upperdir) != NULL)
|
||||
goto err_unlock;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
unlock_rename(workdir, upperdir);
|
||||
err:
|
||||
pr_err("overlayfs: failed to lock workdir+upperdir\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static struct dentry *ovl_clear_empty(struct dentry *dentry,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct dentry *workdir = ovl_workdir(dentry);
|
||||
struct inode *wdir = workdir->d_inode;
|
||||
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
|
||||
struct inode *udir = upperdir->d_inode;
|
||||
struct path upperpath;
|
||||
struct dentry *upper;
|
||||
struct dentry *opaquedir;
|
||||
struct kstat stat;
|
||||
unsigned opt = ovl_get_config_opt(dentry);
|
||||
int err;
|
||||
|
||||
if (WARN_ON(!workdir))
|
||||
return ERR_PTR(-EROFS);
|
||||
|
||||
err = ovl_lock_rename_workdir(workdir, upperdir);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ovl_path_upper(dentry, &upperpath);
|
||||
err = vfs_getattr(&upperpath, &stat);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
err = -ESTALE;
|
||||
if (!S_ISDIR(stat.mode))
|
||||
goto out_unlock;
|
||||
upper = upperpath.dentry;
|
||||
if (upper->d_parent->d_inode != udir)
|
||||
goto out_unlock;
|
||||
|
||||
opaquedir = ovl_lookup_temp(workdir, dentry);
|
||||
err = PTR_ERR(opaquedir);
|
||||
if (IS_ERR(opaquedir))
|
||||
goto out_unlock;
|
||||
|
||||
err = ovl_create_real(wdir, opaquedir, &stat, NULL, NULL, true);
|
||||
if (err)
|
||||
goto out_dput;
|
||||
|
||||
err = ovl_copy_xattr(upper, opaquedir, opt);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
err = ovl_set_opaque(opaquedir);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
inode_lock(opaquedir->d_inode);
|
||||
err = ovl_set_attr(opaquedir, &stat);
|
||||
inode_unlock(opaquedir->d_inode);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
ovl_cleanup_whiteouts(upper, list);
|
||||
ovl_cleanup(wdir, upper);
|
||||
unlock_rename(workdir, upperdir);
|
||||
|
||||
/* dentry's upper doesn't match now, get rid of it */
|
||||
d_drop(dentry);
|
||||
|
||||
return opaquedir;
|
||||
|
||||
out_cleanup:
|
||||
ovl_cleanup(wdir, opaquedir);
|
||||
out_dput:
|
||||
dput(opaquedir);
|
||||
out_unlock:
|
||||
unlock_rename(workdir, upperdir);
|
||||
out:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry)
|
||||
{
|
||||
int err;
|
||||
struct dentry *ret = NULL;
|
||||
LIST_HEAD(list);
|
||||
|
||||
err = ovl_check_empty_dir(dentry, &list);
|
||||
if (err)
|
||||
ret = ERR_PTR(err);
|
||||
else {
|
||||
/*
|
||||
* If no upperdentry then skip clearing whiteouts.
|
||||
*
|
||||
* Can race with copy-up, since we don't hold the upperdir
|
||||
* mutex. Doesn't matter, since copy-up can't create a
|
||||
* non-empty directory from an empty one.
|
||||
*/
|
||||
if (ovl_dentry_upper(dentry))
|
||||
ret = ovl_clear_empty(dentry, &list);
|
||||
}
|
||||
|
||||
ovl_cache_free(&list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
|
||||
struct kstat *stat, const char *link,
|
||||
struct dentry *hardlink)
|
||||
{
|
||||
struct dentry *workdir = ovl_workdir(dentry);
|
||||
struct inode *wdir = workdir->d_inode;
|
||||
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
|
||||
struct inode *udir = upperdir->d_inode;
|
||||
struct dentry *upper;
|
||||
struct dentry *newdentry;
|
||||
int err;
|
||||
|
||||
if (WARN_ON(!workdir))
|
||||
return -EROFS;
|
||||
|
||||
err = ovl_lock_rename_workdir(workdir, upperdir);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
newdentry = ovl_lookup_temp(workdir, dentry);
|
||||
err = PTR_ERR(newdentry);
|
||||
if (IS_ERR(newdentry))
|
||||
goto out_unlock;
|
||||
|
||||
upper = lookup_one_len(dentry->d_name.name, upperdir,
|
||||
dentry->d_name.len);
|
||||
err = PTR_ERR(upper);
|
||||
if (IS_ERR(upper))
|
||||
goto out_dput;
|
||||
|
||||
err = ovl_create_real(wdir, newdentry, stat, link, hardlink, true);
|
||||
if (err)
|
||||
goto out_dput2;
|
||||
|
||||
if (S_ISDIR(stat->mode)) {
|
||||
err = ovl_set_opaque(newdentry);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
err = ovl_do_rename(wdir, newdentry, udir, upper,
|
||||
RENAME_EXCHANGE);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
|
||||
ovl_cleanup(wdir, upper);
|
||||
} else {
|
||||
err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
|
||||
if (err)
|
||||
goto out_cleanup;
|
||||
}
|
||||
ovl_dentry_version_inc(dentry->d_parent);
|
||||
ovl_dentry_update(dentry, newdentry);
|
||||
ovl_copyattr(newdentry->d_inode, inode);
|
||||
d_instantiate(dentry, inode);
|
||||
newdentry = NULL;
|
||||
out_dput2:
|
||||
dput(upper);
|
||||
out_dput:
|
||||
dput(newdentry);
|
||||
out_unlock:
|
||||
unlock_rename(workdir, upperdir);
|
||||
out:
|
||||
return err;
|
||||
|
||||
out_cleanup:
|
||||
ovl_cleanup(wdir, newdentry);
|
||||
goto out_dput2;
|
||||
}
|
||||
|
||||
static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
|
||||
const char *link, struct dentry *hardlink)
|
||||
{
|
||||
int err;
|
||||
struct inode *inode;
|
||||
struct kstat stat = {
|
||||
.mode = mode,
|
||||
.rdev = rdev,
|
||||
};
|
||||
|
||||
err = -ENOMEM;
|
||||
inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata);
|
||||
if (!inode)
|
||||
goto out;
|
||||
|
||||
err = ovl_copy_up(dentry->d_parent);
|
||||
if (err)
|
||||
goto out_iput;
|
||||
|
||||
if (!ovl_dentry_is_opaque(dentry)) {
|
||||
err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
|
||||
} else {
|
||||
const struct cred *old_cred;
|
||||
struct cred *override_cred;
|
||||
|
||||
err = -ENOMEM;
|
||||
override_cred = prepare_creds();
|
||||
if (!override_cred)
|
||||
goto out_iput;
|
||||
|
||||
/*
|
||||
* CAP_SYS_ADMIN for setting opaque xattr
|
||||
* CAP_DAC_OVERRIDE for create in workdir, rename
|
||||
* CAP_FOWNER for removing whiteout from sticky dir
|
||||
*/
|
||||
cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
|
||||
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
|
||||
cap_raise(override_cred->cap_effective, CAP_FOWNER);
|
||||
old_cred = override_creds(override_cred);
|
||||
|
||||
err = ovl_create_over_whiteout(dentry, inode, &stat, link,
|
||||
hardlink);
|
||||
|
||||
revert_creds(old_cred);
|
||||
put_cred(override_cred);
|
||||
}
|
||||
|
||||
if (!err)
|
||||
inode = NULL;
|
||||
out_iput:
|
||||
iput(inode);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
|
||||
const char *link)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = ovl_want_write(dentry);
|
||||
if (!err) {
|
||||
err = ovl_create_or_link(dentry, mode, rdev, link, NULL);
|
||||
ovl_drop_write(dentry);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
bool excl)
|
||||
{
|
||||
return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
|
||||
}
|
||||
|
||||
static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
|
||||
{
|
||||
return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
|
||||
}
|
||||
|
||||
static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
dev_t rdev)
|
||||
{
|
||||
/* Don't allow creation of "whiteout" on overlay */
|
||||
if (S_ISCHR(mode) && rdev == WHITEOUT_DEV)
|
||||
return -EPERM;
|
||||
|
||||
return ovl_create_object(dentry, mode, rdev, NULL);
|
||||
}
|
||||
|
||||
static int ovl_symlink(struct inode *dir, struct dentry *dentry,
|
||||
const char *link)
|
||||
{
|
||||
return ovl_create_object(dentry, S_IFLNK, 0, link);
|
||||
}
|
||||
|
||||
static int ovl_link(struct dentry *old, struct inode *newdir,
|
||||
struct dentry *new)
|
||||
{
|
||||
int err;
|
||||
struct dentry *upper;
|
||||
|
||||
err = ovl_want_write(old);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ovl_copy_up(old);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
|
||||
upper = ovl_dentry_upper(old);
|
||||
err = ovl_create_or_link(new, upper->d_inode->i_mode, 0, NULL, upper);
|
||||
|
||||
out_drop_write:
|
||||
ovl_drop_write(old);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
|
||||
{
|
||||
struct dentry *workdir = ovl_workdir(dentry);
|
||||
struct inode *wdir = workdir->d_inode;
|
||||
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
|
||||
struct inode *udir = upperdir->d_inode;
|
||||
struct dentry *whiteout;
|
||||
struct dentry *upper;
|
||||
struct dentry *opaquedir = NULL;
|
||||
int err;
|
||||
int flags = 0;
|
||||
|
||||
if (WARN_ON(!workdir))
|
||||
return -EROFS;
|
||||
|
||||
if (is_dir) {
|
||||
if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
|
||||
opaquedir = ovl_check_empty_and_clear(dentry);
|
||||
err = PTR_ERR(opaquedir);
|
||||
if (IS_ERR(opaquedir))
|
||||
goto out;
|
||||
} else {
|
||||
LIST_HEAD(list);
|
||||
|
||||
/*
|
||||
* When removing an empty opaque directory, then it
|
||||
* makes no sense to replace it with an exact replica of
|
||||
* itself. But emptiness still needs to be checked.
|
||||
*/
|
||||
err = ovl_check_empty_dir(dentry, &list);
|
||||
ovl_cache_free(&list);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
err = ovl_lock_rename_workdir(workdir, upperdir);
|
||||
if (err)
|
||||
goto out_dput;
|
||||
|
||||
upper = lookup_one_len(dentry->d_name.name, upperdir,
|
||||
dentry->d_name.len);
|
||||
err = PTR_ERR(upper);
|
||||
if (IS_ERR(upper))
|
||||
goto out_unlock;
|
||||
|
||||
err = -ESTALE;
|
||||
if ((opaquedir && upper != opaquedir) ||
|
||||
(!opaquedir && ovl_dentry_upper(dentry) &&
|
||||
upper != ovl_dentry_upper(dentry))) {
|
||||
goto out_dput_upper;
|
||||
}
|
||||
|
||||
whiteout = ovl_whiteout(workdir, dentry);
|
||||
err = PTR_ERR(whiteout);
|
||||
if (IS_ERR(whiteout))
|
||||
goto out_dput_upper;
|
||||
|
||||
if (d_is_dir(upper))
|
||||
flags = RENAME_EXCHANGE;
|
||||
|
||||
err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
|
||||
if (err)
|
||||
goto kill_whiteout;
|
||||
if (flags)
|
||||
ovl_cleanup(wdir, upper);
|
||||
|
||||
ovl_dentry_version_inc(dentry->d_parent);
|
||||
out_d_drop:
|
||||
d_drop(dentry);
|
||||
dput(whiteout);
|
||||
out_dput_upper:
|
||||
dput(upper);
|
||||
out_unlock:
|
||||
unlock_rename(workdir, upperdir);
|
||||
out_dput:
|
||||
dput(opaquedir);
|
||||
out:
|
||||
return err;
|
||||
|
||||
kill_whiteout:
|
||||
ovl_cleanup(wdir, whiteout);
|
||||
goto out_d_drop;
|
||||
}
|
||||
|
||||
static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
|
||||
{
|
||||
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
|
||||
struct inode *dir = upperdir->d_inode;
|
||||
struct dentry *upper;
|
||||
int err;
|
||||
|
||||
inode_lock_nested(dir, I_MUTEX_PARENT);
|
||||
upper = lookup_one_len(dentry->d_name.name, upperdir,
|
||||
dentry->d_name.len);
|
||||
err = PTR_ERR(upper);
|
||||
if (IS_ERR(upper))
|
||||
goto out_unlock;
|
||||
|
||||
err = -ESTALE;
|
||||
if (upper == ovl_dentry_upper(dentry)) {
|
||||
if (is_dir)
|
||||
err = vfs_rmdir(dir, upper);
|
||||
else
|
||||
err = vfs_unlink(dir, upper, NULL);
|
||||
ovl_dentry_version_inc(dentry->d_parent);
|
||||
}
|
||||
dput(upper);
|
||||
|
||||
/*
|
||||
* Keeping this dentry hashed would mean having to release
|
||||
* upperpath/lowerpath, which could only be done if we are the
|
||||
* sole user of this dentry. Too tricky... Just unhash for
|
||||
* now.
|
||||
*/
|
||||
if (!err)
|
||||
d_drop(dentry);
|
||||
out_unlock:
|
||||
inode_unlock(dir);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_check_sticky(struct dentry *dentry)
|
||||
{
|
||||
struct inode *dir = ovl_dentry_real(dentry->d_parent)->d_inode;
|
||||
struct inode *inode = ovl_dentry_real(dentry)->d_inode;
|
||||
|
||||
if (check_sticky(dir, inode))
|
||||
return -EPERM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ovl_do_remove(struct dentry *dentry, bool is_dir)
|
||||
{
|
||||
enum ovl_path_type type;
|
||||
int err;
|
||||
|
||||
err = ovl_check_sticky(dentry);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ovl_want_write(dentry);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ovl_copy_up(dentry->d_parent);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
|
||||
type = ovl_path_type(dentry);
|
||||
if (OVL_TYPE_PURE_UPPER(type)) {
|
||||
err = ovl_remove_upper(dentry, is_dir);
|
||||
} else {
|
||||
const struct cred *old_cred;
|
||||
struct cred *override_cred;
|
||||
|
||||
err = -ENOMEM;
|
||||
override_cred = prepare_creds();
|
||||
if (!override_cred)
|
||||
goto out_drop_write;
|
||||
|
||||
/*
|
||||
* CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
|
||||
* CAP_DAC_OVERRIDE for create in workdir, rename
|
||||
* CAP_FOWNER for removing whiteout from sticky dir
|
||||
* CAP_FSETID for chmod of opaque dir
|
||||
* CAP_CHOWN for chown of opaque dir
|
||||
*/
|
||||
cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
|
||||
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
|
||||
cap_raise(override_cred->cap_effective, CAP_FOWNER);
|
||||
cap_raise(override_cred->cap_effective, CAP_FSETID);
|
||||
cap_raise(override_cred->cap_effective, CAP_CHOWN);
|
||||
old_cred = override_creds(override_cred);
|
||||
|
||||
err = ovl_remove_and_whiteout(dentry, is_dir);
|
||||
|
||||
revert_creds(old_cred);
|
||||
put_cred(override_cred);
|
||||
}
|
||||
out_drop_write:
|
||||
ovl_drop_write(dentry);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_unlink(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
return ovl_do_remove(dentry, false);
|
||||
}
|
||||
|
||||
static int ovl_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
return ovl_do_remove(dentry, true);
|
||||
}
|
||||
|
||||
static int ovl_rename2(struct inode *olddir, struct dentry *old,
|
||||
struct inode *newdir, struct dentry *new,
|
||||
unsigned int flags)
|
||||
{
|
||||
int err;
|
||||
enum ovl_path_type old_type;
|
||||
enum ovl_path_type new_type;
|
||||
struct dentry *old_upperdir;
|
||||
struct dentry *new_upperdir;
|
||||
struct dentry *olddentry;
|
||||
struct dentry *newdentry;
|
||||
struct dentry *trap;
|
||||
bool old_opaque;
|
||||
bool new_opaque;
|
||||
bool cleanup_whiteout = false;
|
||||
bool overwrite = !(flags & RENAME_EXCHANGE);
|
||||
bool is_dir = d_is_dir(old);
|
||||
bool new_is_dir = false;
|
||||
struct dentry *opaquedir = NULL;
|
||||
const struct cred *old_cred = NULL;
|
||||
struct cred *override_cred = NULL;
|
||||
|
||||
err = -EINVAL;
|
||||
if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE))
|
||||
goto out;
|
||||
|
||||
flags &= ~RENAME_NOREPLACE;
|
||||
|
||||
err = ovl_check_sticky(old);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Don't copy up directory trees */
|
||||
old_type = ovl_path_type(old);
|
||||
err = -EXDEV;
|
||||
if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir)
|
||||
goto out;
|
||||
|
||||
if (new->d_inode) {
|
||||
err = ovl_check_sticky(new);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (d_is_dir(new))
|
||||
new_is_dir = true;
|
||||
|
||||
new_type = ovl_path_type(new);
|
||||
err = -EXDEV;
|
||||
if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir)
|
||||
goto out;
|
||||
|
||||
err = 0;
|
||||
if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) {
|
||||
if (ovl_dentry_lower(old)->d_inode ==
|
||||
ovl_dentry_lower(new)->d_inode)
|
||||
goto out;
|
||||
}
|
||||
if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) {
|
||||
if (ovl_dentry_upper(old)->d_inode ==
|
||||
ovl_dentry_upper(new)->d_inode)
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
if (ovl_dentry_is_opaque(new))
|
||||
new_type = __OVL_PATH_UPPER;
|
||||
else
|
||||
new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE;
|
||||
}
|
||||
|
||||
err = ovl_want_write(old);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ovl_copy_up(old);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
|
||||
err = ovl_copy_up(new->d_parent);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
if (!overwrite) {
|
||||
err = ovl_copy_up(new);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
}
|
||||
|
||||
old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
|
||||
new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
|
||||
|
||||
if (old_opaque || new_opaque) {
|
||||
err = -ENOMEM;
|
||||
override_cred = prepare_creds();
|
||||
if (!override_cred)
|
||||
goto out_drop_write;
|
||||
|
||||
/*
|
||||
* CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
|
||||
* CAP_DAC_OVERRIDE for create in workdir
|
||||
* CAP_FOWNER for removing whiteout from sticky dir
|
||||
* CAP_FSETID for chmod of opaque dir
|
||||
* CAP_CHOWN for chown of opaque dir
|
||||
*/
|
||||
cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
|
||||
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
|
||||
cap_raise(override_cred->cap_effective, CAP_FOWNER);
|
||||
cap_raise(override_cred->cap_effective, CAP_FSETID);
|
||||
cap_raise(override_cred->cap_effective, CAP_CHOWN);
|
||||
old_cred = override_creds(override_cred);
|
||||
}
|
||||
|
||||
if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
|
||||
opaquedir = ovl_check_empty_and_clear(new);
|
||||
err = PTR_ERR(opaquedir);
|
||||
if (IS_ERR(opaquedir)) {
|
||||
opaquedir = NULL;
|
||||
goto out_revert_creds;
|
||||
}
|
||||
}
|
||||
|
||||
if (overwrite) {
|
||||
if (old_opaque) {
|
||||
if (new->d_inode || !new_opaque) {
|
||||
/* Whiteout source */
|
||||
flags |= RENAME_WHITEOUT;
|
||||
} else {
|
||||
/* Switch whiteouts */
|
||||
flags |= RENAME_EXCHANGE;
|
||||
}
|
||||
} else if (is_dir && !new->d_inode && new_opaque) {
|
||||
flags |= RENAME_EXCHANGE;
|
||||
cleanup_whiteout = true;
|
||||
}
|
||||
}
|
||||
|
||||
old_upperdir = ovl_dentry_upper(old->d_parent);
|
||||
new_upperdir = ovl_dentry_upper(new->d_parent);
|
||||
|
||||
trap = lock_rename(new_upperdir, old_upperdir);
|
||||
|
||||
|
||||
olddentry = lookup_one_len(old->d_name.name, old_upperdir,
|
||||
old->d_name.len);
|
||||
err = PTR_ERR(olddentry);
|
||||
if (IS_ERR(olddentry))
|
||||
goto out_unlock;
|
||||
|
||||
err = -ESTALE;
|
||||
if (olddentry != ovl_dentry_upper(old))
|
||||
goto out_dput_old;
|
||||
|
||||
newdentry = lookup_one_len(new->d_name.name, new_upperdir,
|
||||
new->d_name.len);
|
||||
err = PTR_ERR(newdentry);
|
||||
if (IS_ERR(newdentry))
|
||||
goto out_dput_old;
|
||||
|
||||
err = -ESTALE;
|
||||
if (ovl_dentry_upper(new)) {
|
||||
if (opaquedir) {
|
||||
if (newdentry != opaquedir)
|
||||
goto out_dput;
|
||||
} else {
|
||||
if (newdentry != ovl_dentry_upper(new))
|
||||
goto out_dput;
|
||||
}
|
||||
} else {
|
||||
if (!d_is_negative(newdentry) &&
|
||||
(!new_opaque || !ovl_is_whiteout(newdentry)))
|
||||
goto out_dput;
|
||||
}
|
||||
|
||||
if (olddentry == trap)
|
||||
goto out_dput;
|
||||
if (newdentry == trap)
|
||||
goto out_dput;
|
||||
|
||||
if (is_dir && !old_opaque && new_opaque) {
|
||||
err = ovl_set_opaque(olddentry);
|
||||
if (err)
|
||||
goto out_dput;
|
||||
}
|
||||
if (!overwrite && new_is_dir && old_opaque && !new_opaque) {
|
||||
err = ovl_set_opaque(newdentry);
|
||||
if (err)
|
||||
goto out_dput;
|
||||
}
|
||||
|
||||
if (old_opaque || new_opaque) {
|
||||
err = ovl_do_rename(old_upperdir->d_inode, olddentry,
|
||||
new_upperdir->d_inode, newdentry,
|
||||
flags);
|
||||
} else {
|
||||
/* No debug for the plain case */
|
||||
BUG_ON(flags & ~RENAME_EXCHANGE);
|
||||
err = vfs_rename(old_upperdir->d_inode, olddentry,
|
||||
new_upperdir->d_inode, newdentry,
|
||||
NULL, flags);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
if (is_dir && !old_opaque && new_opaque)
|
||||
ovl_remove_opaque(olddentry);
|
||||
if (!overwrite && new_is_dir && old_opaque && !new_opaque)
|
||||
ovl_remove_opaque(newdentry);
|
||||
goto out_dput;
|
||||
}
|
||||
|
||||
if (is_dir && old_opaque && !new_opaque)
|
||||
ovl_remove_opaque(olddentry);
|
||||
if (!overwrite && new_is_dir && !old_opaque && new_opaque)
|
||||
ovl_remove_opaque(newdentry);
|
||||
|
||||
/*
|
||||
* Old dentry now lives in different location. Dentries in
|
||||
* lowerstack are stale. We cannot drop them here because
|
||||
* access to them is lockless. This could be only pure upper
|
||||
* or opaque directory - numlower is zero. Or upper non-dir
|
||||
* entry - its pureness is tracked by flag opaque.
|
||||
*/
|
||||
if (old_opaque != new_opaque) {
|
||||
ovl_dentry_set_opaque(old, new_opaque);
|
||||
if (!overwrite)
|
||||
ovl_dentry_set_opaque(new, old_opaque);
|
||||
}
|
||||
|
||||
if (cleanup_whiteout)
|
||||
ovl_cleanup(old_upperdir->d_inode, newdentry);
|
||||
|
||||
ovl_dentry_version_inc(old->d_parent);
|
||||
ovl_dentry_version_inc(new->d_parent);
|
||||
|
||||
out_dput:
|
||||
dput(newdentry);
|
||||
out_dput_old:
|
||||
dput(olddentry);
|
||||
out_unlock:
|
||||
unlock_rename(new_upperdir, old_upperdir);
|
||||
out_revert_creds:
|
||||
if (old_opaque || new_opaque) {
|
||||
revert_creds(old_cred);
|
||||
put_cred(override_cred);
|
||||
}
|
||||
out_drop_write:
|
||||
ovl_drop_write(old);
|
||||
out:
|
||||
dput(opaquedir);
|
||||
return err;
|
||||
}
|
||||
|
||||
const struct inode_operations ovl_dir_inode_operations = {
|
||||
.lookup = ovl_lookup,
|
||||
.mkdir = ovl_mkdir,
|
||||
.symlink = ovl_symlink,
|
||||
.unlink = ovl_unlink,
|
||||
.rmdir = ovl_rmdir,
|
||||
.rename2 = ovl_rename2,
|
||||
.link = ovl_link,
|
||||
.setattr = ovl_setattr,
|
||||
.create = ovl_create,
|
||||
.mknod = ovl_mknod,
|
||||
.permission = ovl_permission,
|
||||
.getattr = ovl_dir_getattr,
|
||||
.setxattr = ovl_setxattr,
|
||||
.getxattr = ovl_getxattr,
|
||||
.listxattr = ovl_listxattr,
|
||||
.removexattr = ovl_removexattr,
|
||||
};
|
||||
494
executer/kernel/mcoverlayfs/linux-4.6.7/inode.c
Normal file
494
executer/kernel/mcoverlayfs/linux-4.6.7/inode.c
Normal file
@ -0,0 +1,494 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (C) 2011 Novell Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/xattr.h>
|
||||
#include "overlayfs.h"
|
||||
|
||||
static int ovl_copy_up_truncate(struct dentry *dentry)
|
||||
{
|
||||
int err;
|
||||
struct dentry *parent;
|
||||
struct kstat stat;
|
||||
struct path lowerpath;
|
||||
|
||||
parent = dget_parent(dentry);
|
||||
err = ovl_copy_up(parent);
|
||||
if (err)
|
||||
goto out_dput_parent;
|
||||
|
||||
ovl_path_lower(dentry, &lowerpath);
|
||||
err = vfs_getattr(&lowerpath, &stat);
|
||||
if (err)
|
||||
goto out_dput_parent;
|
||||
|
||||
stat.size = 0;
|
||||
err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
|
||||
|
||||
out_dput_parent:
|
||||
dput(parent);
|
||||
return err;
|
||||
}
|
||||
|
||||
int ovl_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
{
|
||||
int err;
|
||||
struct dentry *upperdentry;
|
||||
unsigned opt = ovl_get_config_opt(dentry);
|
||||
|
||||
if (OVL_OPT_NOCOPYUPW(opt)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for permissions before trying to copy-up. This is redundant
|
||||
* since it will be rechecked later by ->setattr() on upper dentry. But
|
||||
* without this, copy-up can be triggered by just about anybody.
|
||||
*
|
||||
* We don't initialize inode->size, which just means that
|
||||
* inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
|
||||
* check for a swapfile (which this won't be anyway).
|
||||
*/
|
||||
err = inode_change_ok(dentry->d_inode, attr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ovl_want_write(dentry);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (attr->ia_valid & ATTR_SIZE) {
|
||||
struct inode *realinode = d_inode(ovl_dentry_real(dentry));
|
||||
|
||||
err = -ETXTBSY;
|
||||
if (atomic_read(&realinode->i_writecount) < 0)
|
||||
goto out_drop_write;
|
||||
}
|
||||
|
||||
err = ovl_copy_up(dentry);
|
||||
if (!err) {
|
||||
struct inode *winode = NULL;
|
||||
|
||||
upperdentry = ovl_dentry_upper(dentry);
|
||||
|
||||
if (attr->ia_valid & ATTR_SIZE) {
|
||||
winode = d_inode(upperdentry);
|
||||
err = get_write_access(winode);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
}
|
||||
|
||||
if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
|
||||
attr->ia_valid &= ~ATTR_MODE;
|
||||
|
||||
inode_lock(upperdentry->d_inode);
|
||||
err = notify_change(upperdentry, attr, NULL);
|
||||
if (!err)
|
||||
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
|
||||
inode_unlock(upperdentry->d_inode);
|
||||
|
||||
if (winode)
|
||||
put_write_access(winode);
|
||||
}
|
||||
out_drop_write:
|
||||
ovl_drop_write(dentry);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
struct kstat *stat)
|
||||
{
|
||||
struct path realpath;
|
||||
|
||||
ovl_path_real(dentry, &realpath);
|
||||
return vfs_getattr(&realpath, stat);
|
||||
}
|
||||
|
||||
int ovl_permission(struct inode *inode, int mask)
|
||||
{
|
||||
struct ovl_entry *oe;
|
||||
struct dentry *alias = NULL;
|
||||
struct inode *realinode;
|
||||
struct dentry *realdentry;
|
||||
bool is_upper;
|
||||
int err;
|
||||
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
oe = inode->i_private;
|
||||
} else if (mask & MAY_NOT_BLOCK) {
|
||||
return -ECHILD;
|
||||
} else {
|
||||
/*
|
||||
* For non-directories find an alias and get the info
|
||||
* from there.
|
||||
*/
|
||||
alias = d_find_any_alias(inode);
|
||||
if (WARN_ON(!alias))
|
||||
return -ENOENT;
|
||||
|
||||
oe = alias->d_fsdata;
|
||||
|
||||
ovl_reset_ovl_entry(&oe, alias);
|
||||
}
|
||||
|
||||
realdentry = ovl_entry_real(oe, &is_upper);
|
||||
|
||||
if (ovl_is_default_permissions(inode)) {
|
||||
struct kstat stat;
|
||||
struct path realpath = { .dentry = realdentry };
|
||||
|
||||
if (mask & MAY_NOT_BLOCK)
|
||||
return -ECHILD;
|
||||
|
||||
realpath.mnt = ovl_entry_mnt_real(oe, inode, is_upper);
|
||||
|
||||
err = vfs_getattr(&realpath, &stat);
|
||||
if (err)
|
||||
goto out_dput;
|
||||
|
||||
err = -ESTALE;
|
||||
if ((stat.mode ^ inode->i_mode) & S_IFMT)
|
||||
goto out_dput;
|
||||
|
||||
inode->i_mode = stat.mode;
|
||||
inode->i_uid = stat.uid;
|
||||
inode->i_gid = stat.gid;
|
||||
|
||||
err = generic_permission(inode, mask);
|
||||
goto out_dput;
|
||||
}
|
||||
|
||||
/* Careful in RCU walk mode */
|
||||
realinode = ACCESS_ONCE(realdentry->d_inode);
|
||||
if (!realinode) {
|
||||
WARN_ON(!(mask & MAY_NOT_BLOCK));
|
||||
err = -ENOENT;
|
||||
goto out_dput;
|
||||
}
|
||||
|
||||
if (mask & MAY_WRITE) {
|
||||
umode_t mode = realinode->i_mode;
|
||||
|
||||
/*
|
||||
* Writes will always be redirected to upper layer, so
|
||||
* ignore lower layer being read-only.
|
||||
*
|
||||
* If the overlay itself is read-only then proceed
|
||||
* with the permission check, don't return EROFS.
|
||||
* This will only happen if this is the lower layer of
|
||||
* another overlayfs.
|
||||
*
|
||||
* If upper fs becomes read-only after the overlay was
|
||||
* constructed return EROFS to prevent modification of
|
||||
* upper layer.
|
||||
*/
|
||||
err = -EROFS;
|
||||
if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
|
||||
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
|
||||
goto out_dput;
|
||||
}
|
||||
|
||||
err = __inode_permission(realinode, mask);
|
||||
out_dput:
|
||||
dput(alias);
|
||||
return err;
|
||||
}
|
||||
|
||||
static const char *ovl_get_link(struct dentry *dentry,
|
||||
struct inode *inode,
|
||||
struct delayed_call *done)
|
||||
{
|
||||
struct dentry *realdentry;
|
||||
struct inode *realinode;
|
||||
|
||||
if (!dentry)
|
||||
return ERR_PTR(-ECHILD);
|
||||
|
||||
realdentry = ovl_dentry_real(dentry);
|
||||
realinode = realdentry->d_inode;
|
||||
|
||||
if (WARN_ON(!realinode->i_op->get_link))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
return realinode->i_op->get_link(realdentry, realinode, done);
|
||||
}
|
||||
|
||||
static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
|
||||
{
|
||||
struct path realpath;
|
||||
struct inode *realinode;
|
||||
|
||||
ovl_path_real(dentry, &realpath);
|
||||
realinode = realpath.dentry->d_inode;
|
||||
|
||||
if (!realinode->i_op->readlink)
|
||||
return -EINVAL;
|
||||
|
||||
touch_atime(&realpath);
|
||||
|
||||
return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
|
||||
}
|
||||
|
||||
|
||||
static bool ovl_is_private_xattr(const char *name)
|
||||
{
|
||||
return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
|
||||
}
|
||||
|
||||
int ovl_setxattr(struct dentry *dentry, const char *name,
|
||||
const void *value, size_t size, int flags)
|
||||
{
|
||||
int err;
|
||||
struct dentry *upperdentry;
|
||||
unsigned opt = ovl_get_config_opt(dentry);
|
||||
|
||||
if (OVL_OPT_NOCOPYUPW(opt)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = ovl_want_write(dentry);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = -EPERM;
|
||||
if (ovl_is_private_xattr(name))
|
||||
goto out_drop_write;
|
||||
|
||||
err = ovl_copy_up(dentry);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
|
||||
upperdentry = ovl_dentry_upper(dentry);
|
||||
err = vfs_setxattr(upperdentry, name, value, size, flags);
|
||||
|
||||
out_drop_write:
|
||||
ovl_drop_write(dentry);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool ovl_need_xattr_filter(struct dentry *dentry,
|
||||
enum ovl_path_type type)
|
||||
{
|
||||
if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
|
||||
return S_ISDIR(dentry->d_inode->i_mode);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
|
||||
void *value, size_t size)
|
||||
{
|
||||
struct path realpath;
|
||||
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
|
||||
|
||||
if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
|
||||
return -ENODATA;
|
||||
|
||||
return vfs_getxattr(realpath.dentry, name, value, size);
|
||||
}
|
||||
|
||||
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
|
||||
{
|
||||
struct path realpath;
|
||||
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
|
||||
ssize_t res;
|
||||
int off;
|
||||
|
||||
res = vfs_listxattr(realpath.dentry, list, size);
|
||||
if (res <= 0 || size == 0)
|
||||
return res;
|
||||
|
||||
if (!ovl_need_xattr_filter(dentry, type))
|
||||
return res;
|
||||
|
||||
/* filter out private xattrs */
|
||||
for (off = 0; off < res;) {
|
||||
char *s = list + off;
|
||||
size_t slen = strlen(s) + 1;
|
||||
|
||||
BUG_ON(off + slen > res);
|
||||
|
||||
if (ovl_is_private_xattr(s)) {
|
||||
res -= slen;
|
||||
memmove(s, s + slen, res - off);
|
||||
} else {
|
||||
off += slen;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
int ovl_removexattr(struct dentry *dentry, const char *name)
|
||||
{
|
||||
int err;
|
||||
struct path realpath;
|
||||
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
|
||||
unsigned opt = ovl_get_config_opt(dentry);
|
||||
|
||||
if (OVL_OPT_NOCOPYUPW(opt)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = ovl_want_write(dentry);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = -ENODATA;
|
||||
if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
|
||||
goto out_drop_write;
|
||||
|
||||
if (!OVL_TYPE_UPPER(type)) {
|
||||
err = vfs_getxattr(realpath.dentry, name, NULL, 0);
|
||||
if (err < 0)
|
||||
goto out_drop_write;
|
||||
|
||||
err = ovl_copy_up(dentry);
|
||||
if (err)
|
||||
goto out_drop_write;
|
||||
|
||||
ovl_path_upper(dentry, &realpath);
|
||||
}
|
||||
|
||||
err = vfs_removexattr(realpath.dentry, name);
|
||||
out_drop_write:
|
||||
ovl_drop_write(dentry);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
|
||||
struct dentry *realdentry)
|
||||
{
|
||||
if (OVL_TYPE_UPPER(type))
|
||||
return false;
|
||||
|
||||
if (special_file(realdentry->d_inode->i_mode))
|
||||
return false;
|
||||
|
||||
if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
|
||||
{
|
||||
int err;
|
||||
struct path realpath;
|
||||
enum ovl_path_type type;
|
||||
unsigned opt = ovl_get_config_opt(dentry);
|
||||
|
||||
if (d_is_dir(dentry))
|
||||
return d_backing_inode(dentry);
|
||||
|
||||
type = ovl_path_real(dentry, &realpath);
|
||||
if (!OVL_OPT_NOCOPYUPW(opt) &&
|
||||
ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
|
||||
OVL_DEBUG("copyup: realpath.dentry=%pd4, i_ino=%lu\n",
|
||||
realpath.dentry, realpath.dentry->d_inode->i_ino);
|
||||
err = ovl_want_write(dentry);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (file_flags & O_TRUNC)
|
||||
err = ovl_copy_up_truncate(dentry);
|
||||
else
|
||||
err = ovl_copy_up(dentry);
|
||||
ovl_drop_write(dentry);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
ovl_path_upper(dentry, &realpath);
|
||||
}
|
||||
|
||||
if (realpath.dentry->d_flags & DCACHE_OP_SELECT_INODE)
|
||||
return realpath.dentry->d_op->d_select_inode(realpath.dentry, file_flags);
|
||||
|
||||
if (OVL_OPT_NOFSCHECK(opt)) {
|
||||
if (realpath.dentry->d_inode->i_sb->s_magic == SYSFS_MAGIC) {
|
||||
OVL_DEBUG("sysfs: dentry=%pd4, i_ino=%lu\n",
|
||||
dentry, dentry->d_inode->i_ino);
|
||||
OVL_DEBUG("sysfs: realpath.dentry=%pd4, i_ino=%lu\n",
|
||||
realpath.dentry, realpath.dentry->d_inode->i_ino);
|
||||
if (!dentry->d_inode->i_private) {
|
||||
dentry->d_inode->i_private = dentry->d_fsdata;
|
||||
dentry->d_fsdata = realpath.dentry->d_fsdata;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return d_backing_inode(realpath.dentry);
|
||||
}
|
||||
|
||||
static const struct inode_operations ovl_file_inode_operations = {
|
||||
.setattr = ovl_setattr,
|
||||
.permission = ovl_permission,
|
||||
.getattr = ovl_getattr,
|
||||
.setxattr = ovl_setxattr,
|
||||
.getxattr = ovl_getxattr,
|
||||
.listxattr = ovl_listxattr,
|
||||
.removexattr = ovl_removexattr,
|
||||
};
|
||||
|
||||
static const struct inode_operations ovl_symlink_inode_operations = {
|
||||
.setattr = ovl_setattr,
|
||||
.get_link = ovl_get_link,
|
||||
.readlink = ovl_readlink,
|
||||
.getattr = ovl_getattr,
|
||||
.setxattr = ovl_setxattr,
|
||||
.getxattr = ovl_getxattr,
|
||||
.listxattr = ovl_listxattr,
|
||||
.removexattr = ovl_removexattr,
|
||||
};
|
||||
|
||||
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
|
||||
struct ovl_entry *oe)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
inode = new_inode(sb);
|
||||
if (!inode)
|
||||
return NULL;
|
||||
|
||||
inode->i_ino = get_next_ino();
|
||||
inode->i_mode = mode;
|
||||
inode->i_flags |= S_NOATIME | S_NOCMTIME;
|
||||
|
||||
mode &= S_IFMT;
|
||||
switch (mode) {
|
||||
case S_IFDIR:
|
||||
inode->i_private = oe;
|
||||
inode->i_op = &ovl_dir_inode_operations;
|
||||
inode->i_fop = &ovl_dir_operations;
|
||||
break;
|
||||
|
||||
case S_IFLNK:
|
||||
inode->i_op = &ovl_symlink_inode_operations;
|
||||
break;
|
||||
|
||||
case S_IFREG:
|
||||
case S_IFSOCK:
|
||||
case S_IFBLK:
|
||||
case S_IFCHR:
|
||||
case S_IFIFO:
|
||||
inode->i_op = &ovl_file_inode_operations;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN(1, "illegal file type: %i\n", mode);
|
||||
iput(inode);
|
||||
inode = NULL;
|
||||
}
|
||||
|
||||
return inode;
|
||||
}
|
||||
222
executer/kernel/mcoverlayfs/linux-4.6.7/overlayfs.h
Normal file
222
executer/kernel/mcoverlayfs/linux-4.6.7/overlayfs.h
Normal file
@ -0,0 +1,222 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (C) 2011 Novell Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
//#define DEBUG
|
||||
#ifdef DEBUG
|
||||
#define OVL_DEBUG(format, ...) pr_err("[DEBUG] %s(): " format, __FUNCTION__, ##__VA_ARGS__)
|
||||
#else
|
||||
#define OVL_DEBUG(format, ...) {}
|
||||
#endif
|
||||
|
||||
struct ovl_entry;
|
||||
|
||||
enum ovl_path_type {
|
||||
__OVL_PATH_PURE = (1 << 0),
|
||||
__OVL_PATH_UPPER = (1 << 1),
|
||||
__OVL_PATH_MERGE = (1 << 2),
|
||||
};
|
||||
|
||||
#define OVL_TYPE_UPPER(type) ((type) & __OVL_PATH_UPPER)
|
||||
#define OVL_TYPE_MERGE(type) ((type) & __OVL_PATH_MERGE)
|
||||
#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE)
|
||||
#define OVL_TYPE_MERGE_OR_LOWER(type) \
|
||||
(OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
|
||||
|
||||
#define OVL_XATTR_PRE_NAME "trusted.overlay."
|
||||
#define OVL_XATTR_PRE_LEN 16
|
||||
#define OVL_XATTR_OPAQUE OVL_XATTR_PRE_NAME"opaque"
|
||||
|
||||
enum ovl_opt_bit {
|
||||
__OVL_OPT_DEFAULT = 0,
|
||||
__OVL_OPT_NOCOPYUPW = (1 << 0),
|
||||
__OVL_OPT_NOFSCHECK = (1 << 1),
|
||||
};
|
||||
|
||||
#define OVL_OPT_NOCOPYUPW(opt) ((opt) & __OVL_OPT_NOCOPYUPW)
|
||||
#define OVL_OPT_NOFSCHECK(opt) ((opt) & __OVL_OPT_NOFSCHECK)
|
||||
|
||||
static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
int err = vfs_rmdir(dir, dentry);
|
||||
pr_debug("rmdir(%pd2) = %i\n", dentry, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
int err = vfs_unlink(dir, dentry, NULL);
|
||||
pr_debug("unlink(%pd2) = %i\n", dentry, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir,
|
||||
struct dentry *new_dentry, bool debug)
|
||||
{
|
||||
int err = vfs_link(old_dentry, dir, new_dentry, NULL);
|
||||
if (debug) {
|
||||
pr_debug("link(%pd2, %pd2) = %i\n",
|
||||
old_dentry, new_dentry, err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_create(struct inode *dir, struct dentry *dentry,
|
||||
umode_t mode, bool debug)
|
||||
{
|
||||
int err = vfs_create(dir, dentry, mode, true);
|
||||
if (debug)
|
||||
pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry,
|
||||
umode_t mode, bool debug)
|
||||
{
|
||||
int err = vfs_mkdir(dir, dentry, mode);
|
||||
if (debug)
|
||||
pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry,
|
||||
umode_t mode, dev_t dev, bool debug)
|
||||
{
|
||||
int err = vfs_mknod(dir, dentry, mode, dev);
|
||||
if (debug) {
|
||||
pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n",
|
||||
dentry, mode, dev, err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_symlink(struct inode *dir, struct dentry *dentry,
|
||||
const char *oldname, bool debug)
|
||||
{
|
||||
int err = vfs_symlink(dir, dentry, oldname);
|
||||
if (debug)
|
||||
pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
|
||||
const void *value, size_t size, int flags)
|
||||
{
|
||||
int err = vfs_setxattr(dentry, name, value, size, flags);
|
||||
pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
|
||||
dentry, name, (int) size, (char *) value, flags, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_removexattr(struct dentry *dentry, const char *name)
|
||||
{
|
||||
int err = vfs_removexattr(dentry, name);
|
||||
pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry,
|
||||
struct inode *newdir, struct dentry *newdentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
pr_debug("rename2(%pd2, %pd2, 0x%x)\n",
|
||||
olddentry, newdentry, flags);
|
||||
|
||||
err = vfs_rename(olddir, olddentry, newdir, newdentry, NULL, flags);
|
||||
|
||||
if (err) {
|
||||
pr_debug("...rename2(%pd2, %pd2, ...) = %i\n",
|
||||
olddentry, newdentry, err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
int err = vfs_whiteout(dir, dentry);
|
||||
pr_debug("whiteout(%pd2) = %i\n", dentry, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
unsigned ovl_get_config_opt(struct dentry *dentry);
|
||||
void ovl_reset_ovl_entry(struct ovl_entry **oe, struct dentry *dentry);
|
||||
enum ovl_path_type ovl_path_type(struct dentry *dentry);
|
||||
u64 ovl_dentry_version_get(struct dentry *dentry);
|
||||
void ovl_dentry_version_inc(struct dentry *dentry);
|
||||
void ovl_path_upper(struct dentry *dentry, struct path *path);
|
||||
void ovl_path_lower(struct dentry *dentry, struct path *path);
|
||||
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
|
||||
int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
|
||||
struct dentry *ovl_dentry_upper(struct dentry *dentry);
|
||||
struct dentry *ovl_dentry_lower(struct dentry *dentry);
|
||||
struct dentry *ovl_dentry_real(struct dentry *dentry);
|
||||
struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper);
|
||||
struct vfsmount *ovl_entry_mnt_real(struct ovl_entry *oe, struct inode *inode,
|
||||
bool is_upper);
|
||||
struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
|
||||
bool ovl_is_default_permissions(struct inode *inode);
|
||||
void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
|
||||
struct dentry *ovl_workdir(struct dentry *dentry);
|
||||
int ovl_want_write(struct dentry *dentry);
|
||||
void ovl_drop_write(struct dentry *dentry);
|
||||
bool ovl_dentry_is_opaque(struct dentry *dentry);
|
||||
void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
|
||||
bool ovl_is_whiteout(struct dentry *dentry);
|
||||
void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
|
||||
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
|
||||
unsigned int flags);
|
||||
struct file *ovl_path_open(struct path *path, int flags);
|
||||
|
||||
struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
|
||||
struct kstat *stat, const char *link);
|
||||
|
||||
/* readdir.c */
|
||||
extern const struct file_operations ovl_dir_operations;
|
||||
int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
|
||||
void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
|
||||
void ovl_cache_free(struct list_head *list);
|
||||
int ovl_check_d_type_supported(struct path *realpath);
|
||||
|
||||
/* inode.c */
|
||||
int ovl_setattr(struct dentry *dentry, struct iattr *attr);
|
||||
int ovl_permission(struct inode *inode, int mask);
|
||||
int ovl_setxattr(struct dentry *dentry, const char *name,
|
||||
const void *value, size_t size, int flags);
|
||||
ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
|
||||
void *value, size_t size);
|
||||
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
|
||||
int ovl_removexattr(struct dentry *dentry, const char *name);
|
||||
struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
|
||||
|
||||
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
|
||||
struct ovl_entry *oe);
|
||||
static inline void ovl_copyattr(struct inode *from, struct inode *to)
|
||||
{
|
||||
to->i_uid = from->i_uid;
|
||||
to->i_gid = from->i_gid;
|
||||
to->i_mode = from->i_mode;
|
||||
}
|
||||
|
||||
/* dir.c */
|
||||
extern const struct inode_operations ovl_dir_inode_operations;
|
||||
struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry);
|
||||
int ovl_create_real(struct inode *dir, struct dentry *newdentry,
|
||||
struct kstat *stat, const char *link,
|
||||
struct dentry *hardlink, bool debug);
|
||||
void ovl_cleanup(struct inode *dir, struct dentry *dentry);
|
||||
|
||||
/* copy_up.c */
|
||||
int ovl_copy_up(struct dentry *dentry);
|
||||
int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
|
||||
struct path *lowerpath, struct kstat *stat);
|
||||
int ovl_copy_xattr(struct dentry *old, struct dentry *new, unsigned opt);
|
||||
int ovl_set_attr(struct dentry *upper, struct kstat *stat);
|
||||
616
executer/kernel/mcoverlayfs/linux-4.6.7/readdir.c
Normal file
616
executer/kernel/mcoverlayfs/linux-4.6.7/readdir.c
Normal file
@ -0,0 +1,616 @@
|
||||
/*
|
||||
*
|
||||
* Copyright (C) 2011 Novell Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/cred.h>
|
||||
#include "overlayfs.h"
|
||||
|
||||
struct ovl_cache_entry {
|
||||
unsigned int len;
|
||||
unsigned int type;
|
||||
u64 ino;
|
||||
struct list_head l_node;
|
||||
struct rb_node node;
|
||||
struct ovl_cache_entry *next_maybe_whiteout;
|
||||
bool is_whiteout;
|
||||
char name[];
|
||||
};
|
||||
|
||||
struct ovl_dir_cache {
|
||||
long refcount;
|
||||
u64 version;
|
||||
struct list_head entries;
|
||||
};
|
||||
|
||||
struct ovl_readdir_data {
|
||||
struct dir_context ctx;
|
||||
bool is_lowest;
|
||||
struct rb_root root;
|
||||
struct list_head *list;
|
||||
struct list_head middle;
|
||||
struct ovl_cache_entry *first_maybe_whiteout;
|
||||
int count;
|
||||
int err;
|
||||
bool d_type_supported;
|
||||
};
|
||||
|
||||
struct ovl_dir_file {
|
||||
bool is_real;
|
||||
bool is_upper;
|
||||
struct ovl_dir_cache *cache;
|
||||
struct list_head *cursor;
|
||||
struct file *realfile;
|
||||
struct file *upperfile;
|
||||
};
|
||||
|
||||
static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
|
||||
{
|
||||
return container_of(n, struct ovl_cache_entry, node);
|
||||
}
|
||||
|
||||
static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
|
||||
const char *name, int len)
|
||||
{
|
||||
struct rb_node *node = root->rb_node;
|
||||
int cmp;
|
||||
|
||||
while (node) {
|
||||
struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
|
||||
|
||||
cmp = strncmp(name, p->name, len);
|
||||
if (cmp > 0)
|
||||
node = p->node.rb_right;
|
||||
else if (cmp < 0 || len < p->len)
|
||||
node = p->node.rb_left;
|
||||
else
|
||||
return p;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
|
||||
const char *name, int len,
|
||||
u64 ino, unsigned int d_type)
|
||||
{
|
||||
struct ovl_cache_entry *p;
|
||||
size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
|
||||
|
||||
p = kmalloc(size, GFP_KERNEL);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
memcpy(p->name, name, len);
|
||||
p->name[len] = '\0';
|
||||
p->len = len;
|
||||
p->type = d_type;
|
||||
p->ino = ino;
|
||||
p->is_whiteout = false;
|
||||
|
||||
if (d_type == DT_CHR) {
|
||||
p->next_maybe_whiteout = rdd->first_maybe_whiteout;
|
||||
rdd->first_maybe_whiteout = p;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
|
||||
const char *name, int len, u64 ino,
|
||||
unsigned int d_type)
|
||||
{
|
||||
struct rb_node **newp = &rdd->root.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct ovl_cache_entry *p;
|
||||
|
||||
while (*newp) {
|
||||
int cmp;
|
||||
struct ovl_cache_entry *tmp;
|
||||
|
||||
parent = *newp;
|
||||
tmp = ovl_cache_entry_from_node(*newp);
|
||||
cmp = strncmp(name, tmp->name, len);
|
||||
if (cmp > 0)
|
||||
newp = &tmp->node.rb_right;
|
||||
else if (cmp < 0 || len < tmp->len)
|
||||
newp = &tmp->node.rb_left;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
|
||||
if (p == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
list_add_tail(&p->l_node, rdd->list);
|
||||
rb_link_node(&p->node, parent, newp);
|
||||
rb_insert_color(&p->node, &rdd->root);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ovl_fill_lowest(struct ovl_readdir_data *rdd,
|
||||
const char *name, int namelen,
|
||||
loff_t offset, u64 ino, unsigned int d_type)
|
||||
{
|
||||
struct ovl_cache_entry *p;
|
||||
|
||||
p = ovl_cache_entry_find(&rdd->root, name, namelen);
|
||||
if (p) {
|
||||
list_move_tail(&p->l_node, &rdd->middle);
|
||||
} else {
|
||||
p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
|
||||
if (p == NULL)
|
||||
rdd->err = -ENOMEM;
|
||||
else
|
||||
list_add_tail(&p->l_node, &rdd->middle);
|
||||
}
|
||||
|
||||
return rdd->err;
|
||||
}
|
||||
|
||||
void ovl_cache_free(struct list_head *list)
|
||||
{
|
||||
struct ovl_cache_entry *p;
|
||||
struct ovl_cache_entry *n;
|
||||
|
||||
list_for_each_entry_safe(p, n, list, l_node)
|
||||
kfree(p);
|
||||
|
||||
INIT_LIST_HEAD(list);
|
||||
}
|
||||
|
||||
static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
|
||||
{
|
||||
struct ovl_dir_cache *cache = od->cache;
|
||||
|
||||
WARN_ON(cache->refcount <= 0);
|
||||
cache->refcount--;
|
||||
if (!cache->refcount) {
|
||||
if (ovl_dir_cache(dentry) == cache)
|
||||
ovl_set_dir_cache(dentry, NULL);
|
||||
|
||||
ovl_cache_free(&cache->entries);
|
||||
kfree(cache);
|
||||
}
|
||||
}
|
||||
|
||||
static int ovl_fill_merge(struct dir_context *ctx, const char *name,
|
||||
int namelen, loff_t offset, u64 ino,
|
||||
unsigned int d_type)
|
||||
{
|
||||
struct ovl_readdir_data *rdd =
|
||||
container_of(ctx, struct ovl_readdir_data, ctx);
|
||||
|
||||
rdd->count++;
|
||||
if (!rdd->is_lowest)
|
||||
return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
|
||||
else
|
||||
return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
|
||||
}
|
||||
|
||||
static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
|
||||
{
|
||||
int err;
|
||||
struct ovl_cache_entry *p;
|
||||
struct dentry *dentry;
|
||||
const struct cred *old_cred;
|
||||
struct cred *override_cred;
|
||||
|
||||
override_cred = prepare_creds();
|
||||
if (!override_cred)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* CAP_DAC_OVERRIDE for lookup
|
||||
*/
|
||||
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
|
||||
old_cred = override_creds(override_cred);
|
||||
|
||||
err = mutex_lock_killable(&dir->d_inode->i_mutex);
|
||||
if (!err) {
|
||||
while (rdd->first_maybe_whiteout) {
|
||||
p = rdd->first_maybe_whiteout;
|
||||
rdd->first_maybe_whiteout = p->next_maybe_whiteout;
|
||||
dentry = lookup_one_len(p->name, dir, p->len);
|
||||
if (!IS_ERR(dentry)) {
|
||||
p->is_whiteout = ovl_is_whiteout(dentry);
|
||||
dput(dentry);
|
||||
}
|
||||
}
|
||||
inode_unlock(dir->d_inode);
|
||||
}
|
||||
revert_creds(old_cred);
|
||||
put_cred(override_cred);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int ovl_dir_read(struct path *realpath,
|
||||
struct ovl_readdir_data *rdd)
|
||||
{
|
||||
struct file *realfile;
|
||||
int err;
|
||||
|
||||
realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
|
||||
if (IS_ERR(realfile))
|
||||
return PTR_ERR(realfile);
|
||||
|
||||
rdd->first_maybe_whiteout = NULL;
|
||||
rdd->ctx.pos = 0;
|
||||
do {
|
||||
rdd->count = 0;
|
||||
rdd->err = 0;
|
||||
err = iterate_dir(realfile, &rdd->ctx);
|
||||
if (err >= 0)
|
||||
err = rdd->err;
|
||||
} while (!err && rdd->count);
|
||||
|
||||
if (!err && rdd->first_maybe_whiteout)
|
||||
err = ovl_check_whiteouts(realpath->dentry, rdd);
|
||||
|
||||
fput(realfile);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ovl_dir_reset(struct file *file)
|
||||
{
|
||||
struct ovl_dir_file *od = file->private_data;
|
||||
struct ovl_dir_cache *cache = od->cache;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
enum ovl_path_type type = ovl_path_type(dentry);
|
||||
|
||||
if (cache && ovl_dentry_version_get(dentry) != cache->version) {
|
||||
ovl_cache_put(od, dentry);
|
||||
od->cache = NULL;
|
||||
od->cursor = NULL;
|
||||
}
|
||||
WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
|
||||
if (od->is_real && OVL_TYPE_MERGE(type))
|
||||
od->is_real = false;
|
||||
}
|
||||
|
||||
static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
|
||||
{
|
||||
int err;
|
||||
struct path realpath;
|
||||
struct ovl_readdir_data rdd = {
|
||||
.ctx.actor = ovl_fill_merge,
|
||||
.list = list,
|
||||
.root = RB_ROOT,
|
||||
.is_lowest = false,
|
||||
};
|
||||
int idx, next;
|
||||
|
||||
for (idx = 0; idx != -1; idx = next) {
|
||||
next = ovl_path_next(idx, dentry, &realpath);
|
||||
|
||||
if (next != -1) {
|
||||
err = ovl_dir_read(&realpath, &rdd);
|
||||
if (err)
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* Insert lowest layer entries before upper ones, this
|
||||
* allows offsets to be reasonably constant
|
||||
*/
|
||||
list_add(&rdd.middle, rdd.list);
|
||||
rdd.is_lowest = true;
|
||||
err = ovl_dir_read(&realpath, &rdd);
|
||||
list_del(&rdd.middle);
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
|
||||
{
|
||||
struct list_head *p;
|
||||
loff_t off = 0;
|
||||
|
||||
list_for_each(p, &od->cache->entries) {
|
||||
if (off >= pos)
|
||||
break;
|
||||
off++;
|
||||
}
|
||||
/* Cursor is safe since the cache is stable */
|
||||
od->cursor = p;
|
||||
}
|
||||
|
||||
static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
|
||||
{
|
||||
int res;
|
||||
struct ovl_dir_cache *cache;
|
||||
|
||||
cache = ovl_dir_cache(dentry);
|
||||
if (cache && ovl_dentry_version_get(dentry) == cache->version) {
|
||||
cache->refcount++;
|
||||
return cache;
|
||||
}
|
||||
ovl_set_dir_cache(dentry, NULL);
|
||||
|
||||
cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
|
||||
if (!cache)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cache->refcount = 1;
|
||||
INIT_LIST_HEAD(&cache->entries);
|
||||
|
||||
res = ovl_dir_read_merged(dentry, &cache->entries);
|
||||
if (res) {
|
||||
ovl_cache_free(&cache->entries);
|
||||
kfree(cache);
|
||||
return ERR_PTR(res);
|
||||
}
|
||||
|
||||
cache->version = ovl_dentry_version_get(dentry);
|
||||
ovl_set_dir_cache(dentry, cache);
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
||||
static int ovl_iterate(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
struct ovl_dir_file *od = file->private_data;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct ovl_cache_entry *p;
|
||||
|
||||
if (!ctx->pos)
|
||||
ovl_dir_reset(file);
|
||||
|
||||
if (od->is_real)
|
||||
return iterate_dir(od->realfile, ctx);
|
||||
|
||||
if (!od->cache) {
|
||||
struct ovl_dir_cache *cache;
|
||||
|
||||
cache = ovl_cache_get(dentry);
|
||||
if (IS_ERR(cache))
|
||||
return PTR_ERR(cache);
|
||||
|
||||
od->cache = cache;
|
||||
ovl_seek_cursor(od, ctx->pos);
|
||||
}
|
||||
|
||||
while (od->cursor != &od->cache->entries) {
|
||||
p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
|
||||
if (!p->is_whiteout)
|
||||
if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
|
||||
break;
|
||||
od->cursor = p->l_node.next;
|
||||
ctx->pos++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
|
||||
{
|
||||
loff_t res;
|
||||
struct ovl_dir_file *od = file->private_data;
|
||||
|
||||
inode_lock(file_inode(file));
|
||||
if (!file->f_pos)
|
||||
ovl_dir_reset(file);
|
||||
|
||||
if (od->is_real) {
|
||||
res = vfs_llseek(od->realfile, offset, origin);
|
||||
file->f_pos = od->realfile->f_pos;
|
||||
} else {
|
||||
res = -EINVAL;
|
||||
|
||||
switch (origin) {
|
||||
case SEEK_CUR:
|
||||
offset += file->f_pos;
|
||||
break;
|
||||
case SEEK_SET:
|
||||
break;
|
||||
default:
|
||||
goto out_unlock;
|
||||
}
|
||||
if (offset < 0)
|
||||
goto out_unlock;
|
||||
|
||||
if (offset != file->f_pos) {
|
||||
file->f_pos = offset;
|
||||
if (od->cache)
|
||||
ovl_seek_cursor(od, offset);
|
||||
}
|
||||
res = offset;
|
||||
}
|
||||
out_unlock:
|
||||
inode_unlock(file_inode(file));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
|
||||
int datasync)
|
||||
{
|
||||
struct ovl_dir_file *od = file->private_data;
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct file *realfile = od->realfile;
|
||||
|
||||
/*
|
||||
* Need to check if we started out being a lower dir, but got copied up
|
||||
*/
|
||||
if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
|
||||
struct inode *inode = file_inode(file);
|
||||
|
||||
realfile = lockless_dereference(od->upperfile);
|
||||
if (!realfile) {
|
||||
struct path upperpath;
|
||||
|
||||
ovl_path_upper(dentry, &upperpath);
|
||||
realfile = ovl_path_open(&upperpath, O_RDONLY);
|
||||
smp_mb__before_spinlock();
|
||||
inode_lock(inode);
|
||||
if (!od->upperfile) {
|
||||
if (IS_ERR(realfile)) {
|
||||
inode_unlock(inode);
|
||||
return PTR_ERR(realfile);
|
||||
}
|
||||
od->upperfile = realfile;
|
||||
} else {
|
||||
/* somebody has beaten us to it */
|
||||
if (!IS_ERR(realfile))
|
||||
fput(realfile);
|
||||
realfile = od->upperfile;
|
||||
}
|
||||
inode_unlock(inode);
|
||||
}
|
||||
}
|
||||
|
||||
return vfs_fsync_range(realfile, start, end, datasync);
|
||||
}
|
||||
|
||||
static int ovl_dir_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct ovl_dir_file *od = file->private_data;
|
||||
|
||||
if (od->cache) {
|
||||
inode_lock(inode);
|
||||
ovl_cache_put(od, file->f_path.dentry);
|
||||
inode_unlock(inode);
|
||||
}
|
||||
fput(od->realfile);
|
||||
if (od->upperfile)
|
||||
fput(od->upperfile);
|
||||
kfree(od);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ovl_dir_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct path realpath;
|
||||
struct file *realfile;
|
||||
struct ovl_dir_file *od;
|
||||
enum ovl_path_type type;
|
||||
|
||||
od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
|
||||
if (!od)
|
||||
return -ENOMEM;
|
||||
|
||||
type = ovl_path_real(file->f_path.dentry, &realpath);
|
||||
realfile = ovl_path_open(&realpath, file->f_flags);
|
||||
if (IS_ERR(realfile)) {
|
||||
kfree(od);
|
||||
return PTR_ERR(realfile);
|
||||
}
|
||||
od->realfile = realfile;
|
||||
od->is_real = !OVL_TYPE_MERGE(type);
|
||||
od->is_upper = OVL_TYPE_UPPER(type);
|
||||
file->private_data = od;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations ovl_dir_operations = {
|
||||
.read = generic_read_dir,
|
||||
.open = ovl_dir_open,
|
||||
.iterate = ovl_iterate,
|
||||
.llseek = ovl_dir_llseek,
|
||||
.fsync = ovl_dir_fsync,
|
||||
.release = ovl_dir_release,
|
||||
};
|
||||
|
||||
int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
|
||||
{
|
||||
int err;
|
||||
struct ovl_cache_entry *p;
|
||||
|
||||
err = ovl_dir_read_merged(dentry, list);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = 0;
|
||||
|
||||
list_for_each_entry(p, list, l_node) {
|
||||
if (p->is_whiteout)
|
||||
continue;
|
||||
|
||||
if (p->name[0] == '.') {
|
||||
if (p->len == 1)
|
||||
continue;
|
||||
if (p->len == 2 && p->name[1] == '.')
|
||||
continue;
|
||||
}
|
||||
err = -ENOTEMPTY;
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
|
||||
{
|
||||
struct ovl_cache_entry *p;
|
||||
|
||||
inode_lock_nested(upper->d_inode, I_MUTEX_CHILD);
|
||||
list_for_each_entry(p, list, l_node) {
|
||||
struct dentry *dentry;
|
||||
|
||||
if (!p->is_whiteout)
|
||||
continue;
|
||||
|
||||
dentry = lookup_one_len(p->name, upper, p->len);
|
||||
if (IS_ERR(dentry)) {
|
||||
pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
|
||||
upper->d_name.name, p->len, p->name,
|
||||
(int) PTR_ERR(dentry));
|
||||
continue;
|
||||
}
|
||||
if (dentry->d_inode)
|
||||
ovl_cleanup(upper->d_inode, dentry);
|
||||
dput(dentry);
|
||||
}
|
||||
inode_unlock(upper->d_inode);
|
||||
}
|
||||
|
||||
static int ovl_check_d_type(struct dir_context *ctx, const char *name,
|
||||
int namelen, loff_t offset, u64 ino,
|
||||
unsigned int d_type)
|
||||
{
|
||||
struct ovl_readdir_data *rdd =
|
||||
container_of(ctx, struct ovl_readdir_data, ctx);
|
||||
|
||||
/* Even if d_type is not supported, DT_DIR is returned for . and .. */
|
||||
if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen))
|
||||
return 0;
|
||||
|
||||
if (d_type != DT_UNKNOWN)
|
||||
rdd->d_type_supported = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
|
||||
* if error is encountered.
|
||||
*/
|
||||
int ovl_check_d_type_supported(struct path *realpath)
|
||||
{
|
||||
int err;
|
||||
struct ovl_readdir_data rdd = {
|
||||
.ctx.actor = ovl_check_d_type,
|
||||
.d_type_supported = false,
|
||||
};
|
||||
|
||||
err = ovl_dir_read(realpath, &rdd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return rdd.d_type_supported;
|
||||
}
|
||||
1298
executer/kernel/mcoverlayfs/linux-4.6.7/super.c
Normal file
1298
executer/kernel/mcoverlayfs/linux-4.6.7/super.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -6,14 +6,15 @@ VPATH=@abs_srcdir@
|
||||
TARGET=mcexec
|
||||
@uncomment_if_ENABLE_MEMDUMP@TARGET+=eclair
|
||||
LIBS=@LIBS@
|
||||
IHKDIR ?= $(VPATH)/../../../ihk/linux/include/
|
||||
|
||||
all: $(TARGET)
|
||||
|
||||
mcexec: mcexec.c
|
||||
$(CC) -I${KDIR} $(CFLAGS) $(EXTRA_CFLAGS) -fPIE -pie -lrt -pthread -o $@ $^ $(EXTRA_OBJS)
|
||||
$(CC) -I${KDIR} $(CFLAGS) $(EXTRA_CFLAGS) -fPIE -pie -lrt -lnuma -pthread -o $@ $^ $(EXTRA_OBJS)
|
||||
|
||||
eclair: eclair.c
|
||||
$(CC) $(CFLAGS) -o $@ $^ $(LIBS)
|
||||
$(CC) $(CFLAGS) -I${IHKDIR} -o $@ $^ $(LIBS)
|
||||
|
||||
clean:
|
||||
$(RM) $(TARGET) *.o
|
||||
|
||||
@ -16,20 +16,8 @@
|
||||
#include <unistd.h>
|
||||
#include <sys/socket.h>
|
||||
#include <arpa/inet.h>
|
||||
|
||||
/* From ihk/linux/include/ihk/ihk_host_user.h */
|
||||
#define PHYS_CHUNKS_DESC_SIZE 8192
|
||||
|
||||
struct dump_mem_chunk {
|
||||
unsigned long addr;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
typedef struct dump_mem_chunks_s {
|
||||
int nr_chunks;
|
||||
struct dump_mem_chunk chunks[];
|
||||
} dump_mem_chunks_t;
|
||||
/* ---------- */
|
||||
#include <sys/ioctl.h>
|
||||
#include <ihk/ihk_host_user.h>
|
||||
|
||||
#define CPU_TID_BASE 1000000
|
||||
|
||||
@ -39,6 +27,10 @@ struct options {
|
||||
char *kernel_path;
|
||||
char *dump_path;
|
||||
char *log_path;
|
||||
int interactive;
|
||||
int os_id;
|
||||
int mcos_fd;
|
||||
int print_idle;
|
||||
}; /* struct options */
|
||||
|
||||
struct thread_info {
|
||||
@ -56,7 +48,7 @@ struct thread_info {
|
||||
int tid;
|
||||
int cpu;
|
||||
int lcpu;
|
||||
int padding;
|
||||
int idle;
|
||||
uintptr_t process;
|
||||
uintptr_t clv;
|
||||
uintptr_t x86_clv;
|
||||
@ -150,7 +142,21 @@ static int read_mem(uintptr_t va, void *buf, size_t size) {
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
error = read_physmem(pa, buf, size);
|
||||
|
||||
if (opt.interactive) {
|
||||
dumpargs_t args;
|
||||
|
||||
args.cmd = DUMP_READ;
|
||||
args.start = pa;
|
||||
args.size = size;
|
||||
args.buf = buf;
|
||||
|
||||
error = ioctl(opt.mcos_fd, IHK_OS_DUMP, &args);
|
||||
}
|
||||
else {
|
||||
error = read_physmem(pa, buf, size);
|
||||
}
|
||||
|
||||
if (error) {
|
||||
perror("read_mem:read_physmem");
|
||||
return 1;
|
||||
@ -256,6 +262,7 @@ static int setup_threads(void) {
|
||||
perror("num_processors");
|
||||
return 1;
|
||||
}
|
||||
printf("%s: num_processors: %d\n", __FUNCTION__, num_processors);
|
||||
|
||||
error = read_symbol_64("locals", &locals);
|
||||
if (error) {
|
||||
@ -278,64 +285,6 @@ static int setup_threads(void) {
|
||||
ihk_mc_switch_context = lookup_symbol("ihk_mc_switch_context");
|
||||
if (0) printf("ihk_mc_switch_context: %lx\n", ihk_mc_switch_context);
|
||||
|
||||
/* Set up idle threads first */
|
||||
for (cpu = 0; cpu < num_processors; ++cpu) {
|
||||
uintptr_t v;
|
||||
uintptr_t thread;
|
||||
uintptr_t proc;
|
||||
int pid;
|
||||
int tid;
|
||||
struct thread_info *ti;
|
||||
int status;
|
||||
|
||||
v = clv + (cpu * K(CPU_LOCAL_VAR_SIZE));
|
||||
|
||||
ti = malloc(sizeof(*ti));
|
||||
if (!ti) {
|
||||
perror("malloc");
|
||||
return 1;
|
||||
}
|
||||
|
||||
thread = v+K(IDLE_THREAD_OFFSET);
|
||||
|
||||
error = read_64(thread+K(PROC_OFFSET), &proc);
|
||||
if (error) {
|
||||
perror("proc");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(thread+K(STATUS_OFFSET), &status);
|
||||
if (error) {
|
||||
perror("status");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(proc+K(PID_OFFSET), &pid);
|
||||
if (error) {
|
||||
perror("pid");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(thread+K(TID_OFFSET), &tid);
|
||||
if (error) {
|
||||
perror("tid");
|
||||
return 1;
|
||||
}
|
||||
|
||||
ti->next = NULL;
|
||||
ti->status = status;
|
||||
ti->pid = pid;
|
||||
ti->tid = tid;
|
||||
ti->cpu = cpu;
|
||||
ti->lcpu = cpu;
|
||||
ti->process = thread;
|
||||
ti->clv = v;
|
||||
ti->x86_clv = locals + locals_span*cpu;
|
||||
|
||||
*titailp = ti;
|
||||
titailp = &ti->next;
|
||||
}
|
||||
|
||||
for (cpu = 0; cpu < num_processors; ++cpu) {
|
||||
uintptr_t v;
|
||||
uintptr_t head;
|
||||
@ -400,15 +349,19 @@ static int setup_threads(void) {
|
||||
ti->status = status;
|
||||
ti->pid = pid;
|
||||
ti->tid = tid;
|
||||
ti->cpu = (thread == current)? cpu: -1;
|
||||
ti->cpu = (thread == current) ? cpu : -1;
|
||||
ti->lcpu = cpu;
|
||||
ti->process = thread;
|
||||
ti->idle = 0;
|
||||
ti->clv = v;
|
||||
ti->x86_clv = locals + locals_span*cpu;
|
||||
|
||||
*titailp = ti;
|
||||
titailp = &ti->next;
|
||||
|
||||
if (!curr_thread)
|
||||
curr_thread = ti;
|
||||
|
||||
error = read_64(entry, &entry);
|
||||
if (error) {
|
||||
perror("process2");
|
||||
@ -417,8 +370,78 @@ static int setup_threads(void) {
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up idle threads */
|
||||
if (opt.print_idle) {
|
||||
for (cpu = 0; cpu < num_processors; ++cpu) {
|
||||
uintptr_t v;
|
||||
uintptr_t thread;
|
||||
uintptr_t proc;
|
||||
int pid;
|
||||
int tid;
|
||||
struct thread_info *ti;
|
||||
int status;
|
||||
|
||||
v = clv + (cpu * K(CPU_LOCAL_VAR_SIZE));
|
||||
|
||||
error = read_64(v+K(CURRENT_OFFSET), ¤t);
|
||||
if (error) {
|
||||
perror("current");
|
||||
return 1;
|
||||
}
|
||||
|
||||
ti = malloc(sizeof(*ti));
|
||||
if (!ti) {
|
||||
perror("malloc");
|
||||
return 1;
|
||||
}
|
||||
|
||||
thread = v+K(IDLE_THREAD_OFFSET);
|
||||
|
||||
error = read_64(thread+K(PROC_OFFSET), &proc);
|
||||
if (error) {
|
||||
perror("proc");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(thread+K(STATUS_OFFSET), &status);
|
||||
if (error) {
|
||||
perror("status");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(proc+K(PID_OFFSET), &pid);
|
||||
if (error) {
|
||||
perror("pid");
|
||||
return 1;
|
||||
}
|
||||
|
||||
error = read_32(thread+K(TID_OFFSET), &tid);
|
||||
if (error) {
|
||||
perror("tid");
|
||||
return 1;
|
||||
}
|
||||
|
||||
ti->next = NULL;
|
||||
ti->status = status;
|
||||
ti->pid = 1;
|
||||
ti->tid = 2000000000 + tid;
|
||||
ti->cpu = (thread == current) ? cpu : -1;
|
||||
ti->lcpu = cpu;
|
||||
ti->process = thread;
|
||||
ti->idle = 1;
|
||||
ti->clv = v;
|
||||
ti->x86_clv = locals + locals_span*cpu;
|
||||
|
||||
*titailp = ti;
|
||||
titailp = &ti->next;
|
||||
|
||||
if (!curr_thread)
|
||||
curr_thread = ti;
|
||||
}
|
||||
}
|
||||
|
||||
if (!tihead) {
|
||||
printf("thread not found. cpu mode forcibly\n");
|
||||
printf("No threads found, forcing CPU mode.\n");
|
||||
opt.cpu = 1;
|
||||
}
|
||||
|
||||
@ -459,6 +482,7 @@ static int setup_threads(void) {
|
||||
ti->tid = CPU_TID_BASE + cpu;
|
||||
ti->cpu = cpu;
|
||||
ti->process = current;
|
||||
ti->idle = 1;
|
||||
ti->clv = v;
|
||||
ti->x86_clv = locals + locals_span*cpu;
|
||||
|
||||
@ -471,7 +495,9 @@ static int setup_threads(void) {
|
||||
printf("thread not found\n");
|
||||
return 1;
|
||||
}
|
||||
curr_thread = tihead;
|
||||
|
||||
if (!curr_thread)
|
||||
curr_thread = tihead;
|
||||
|
||||
return 0;
|
||||
} /* setup_threads() */
|
||||
@ -713,18 +739,21 @@ static void command(char *cmd, char *res) {
|
||||
break;
|
||||
}
|
||||
|
||||
//if (regs[17] > MAP_KERNEL) {}
|
||||
pu8 = (void *)®s;
|
||||
for (i = 0; i < sizeof(regs)-4; ++i) {
|
||||
rbp += sprintf(rbp, "%02x", pu8[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
else if (!strcmp(p, "mffffffff80018a82,1")) {
|
||||
rbp += sprintf(rbp, "b8");
|
||||
}
|
||||
else if (!strcmp(p, "mffffffff80018a82,9")) {
|
||||
rbp += sprintf(rbp, "b8f2ffffff41564155");
|
||||
}
|
||||
*/
|
||||
else if (!strncmp(p, "m", 1)) {
|
||||
int n;
|
||||
uintptr_t start;
|
||||
@ -820,33 +849,35 @@ static void command(char *cmd, char *res) {
|
||||
break;
|
||||
}
|
||||
q = buf;
|
||||
q += sprintf(q, "PID %d, ", ti->pid);
|
||||
if (ti->status & PS_RUNNING) {
|
||||
q += sprintf(q, "running on cpu%d", ti->cpu);
|
||||
q += sprintf(q, "%srunning on cpu %d",
|
||||
ti->idle ? "idle " : "", ti->lcpu);
|
||||
}
|
||||
else if (ti->status & (PS_INTERRUPTIBLE | PS_UNINTERRUPTIBLE)) {
|
||||
q += sprintf(q, "waiting on cpu%d", ti->lcpu);
|
||||
q += sprintf(q, "%swaiting on cpu %d",
|
||||
ti->idle ? "idle " : "", ti->lcpu);
|
||||
}
|
||||
else if (ti->status & PS_STOPPED) {
|
||||
q += sprintf(q, "stopped on cpu%d", ti->lcpu);
|
||||
q += sprintf(q, "%sstopped on cpu %d",
|
||||
ti->idle ? "idle " : "", ti->lcpu);
|
||||
}
|
||||
else if (ti->status & PS_TRACED) {
|
||||
q += sprintf(q, "traced on cpu%d", ti->lcpu);
|
||||
q += sprintf(q, "%straced on cpu %d",
|
||||
ti->idle ? "idle " : "", ti->lcpu);
|
||||
}
|
||||
else if (ti->status == CS_IDLE) {
|
||||
q += sprintf(q, "cpu%d idle", ti->cpu);
|
||||
q += sprintf(q, "cpu %d idle", ti->cpu);
|
||||
}
|
||||
else if (ti->status == CS_RUNNING) {
|
||||
q += sprintf(q, "cpu%d running", ti->cpu);
|
||||
q += sprintf(q, "cpu %d running", ti->cpu);
|
||||
}
|
||||
else if (ti->status == CS_RESERVED) {
|
||||
q += sprintf(q, "cpu%d reserved", ti->cpu);
|
||||
q += sprintf(q, "cpu %d reserved", ti->cpu);
|
||||
}
|
||||
else {
|
||||
q += sprintf(q, "status=%#x", ti->status);
|
||||
}
|
||||
if (ti->tid != ti->pid) {
|
||||
q += sprintf(q, ",pid=%d", ti->pid);
|
||||
}
|
||||
rbp += print_hex(rbp, buf);
|
||||
}
|
||||
} while (0);
|
||||
@ -859,11 +890,12 @@ static void options(int argc, char *argv[]) {
|
||||
memset(&opt, 0, sizeof(opt));
|
||||
opt.kernel_path = "./mckernel.img";
|
||||
opt.dump_path = "./mcdump";
|
||||
opt.mcos_fd = -1;
|
||||
|
||||
for (;;) {
|
||||
int c;
|
||||
|
||||
c = getopt(argc, argv, "cd:hk:");
|
||||
c = getopt(argc, argv, "ilcd:hk:o:");
|
||||
if (c < 0) {
|
||||
break;
|
||||
}
|
||||
@ -881,12 +913,32 @@ static void options(int argc, char *argv[]) {
|
||||
case 'd':
|
||||
opt.dump_path = optarg;
|
||||
break;
|
||||
case 'i':
|
||||
opt.interactive = 1;
|
||||
break;
|
||||
case 'o':
|
||||
opt.os_id = atoi(optarg);
|
||||
break;
|
||||
case 'l':
|
||||
opt.print_idle = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (optind < argc) {
|
||||
opt.help = 1;
|
||||
}
|
||||
|
||||
if (opt.interactive) {
|
||||
char fn[128];
|
||||
sprintf(fn, "/dev/mcos%d", opt.os_id);
|
||||
|
||||
opt.mcos_fd = open(fn, O_RDONLY);
|
||||
if (opt.mcos_fd < 0) {
|
||||
perror("open");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
} /* options() */
|
||||
|
||||
@ -969,7 +1021,7 @@ int main(int argc, char *argv[]) {
|
||||
uint8_t sum;
|
||||
uint8_t check;
|
||||
static char lbuf[1024];
|
||||
static char rbuf[1024];
|
||||
static char rbuf[8192];
|
||||
static char cbuf[3];
|
||||
char *lbp;
|
||||
char *p;
|
||||
|
||||
@ -41,6 +41,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <sched.h>
|
||||
#include <dirent.h>
|
||||
|
||||
#include <termios.h>
|
||||
#include <sys/ioctl.h>
|
||||
@ -65,6 +66,8 @@
|
||||
#include "../include/uprotocol.h"
|
||||
#include <getopt.h>
|
||||
#include "../config.h"
|
||||
#include <numa.h>
|
||||
#include <numaif.h>
|
||||
|
||||
//#define DEBUG
|
||||
|
||||
@ -107,6 +110,9 @@ char **__glob_argv = 0;
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0) && LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0)
|
||||
#define ENABLE_MCOVERLAYFS 1
|
||||
#endif // LINUX_VERSION_CODE == 4.0
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) && LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
|
||||
#define ENABLE_MCOVERLAYFS 1
|
||||
#endif // LINUX_VERSION_CODE == 4.6
|
||||
#else
|
||||
#if RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,2)
|
||||
#define ENABLE_MCOVERLAYFS 1
|
||||
@ -149,6 +155,10 @@ static const char rlimit_stack_envname[] = "MCKERNEL_RLIMIT_STACK";
|
||||
static int ischild;
|
||||
static int enable_vdso = 1;
|
||||
|
||||
/* Partitioned execution (e.g., for MPI) */
|
||||
static int nr_processes = 0;
|
||||
static int nr_threads = -1;
|
||||
|
||||
struct fork_sync {
|
||||
pid_t pid;
|
||||
int status;
|
||||
@ -498,7 +508,7 @@ retry:
|
||||
|
||||
/* Check whether the resolved path is a symlink */
|
||||
if (lstat(path, &sb) == -1) {
|
||||
fprintf(stderr, "lookup_exec_path(): error stat\n");
|
||||
__dprintf(stderr, "lookup_exec_path(): error stat\n");
|
||||
return errno;
|
||||
}
|
||||
|
||||
@ -1098,7 +1108,7 @@ static int reduce_stack(struct rlimit *orig_rlim, char *argv[])
|
||||
|
||||
void print_usage(char **argv)
|
||||
{
|
||||
fprintf(stderr, "Usage: %s [-c target_core] [<mcos-id>] (program) [args...]\n", argv[0]);
|
||||
fprintf(stderr, "Usage: %s [-c target_core] [-n nr_partitions] [<mcos-id>] (program) [args...]\n", argv[0]);
|
||||
}
|
||||
|
||||
void init_sigaction(void)
|
||||
@ -1148,75 +1158,41 @@ void init_worker_threads(int fd)
|
||||
|
||||
#ifdef ENABLE_MCOVERLAYFS
|
||||
#define READ_BUFSIZE 1024
|
||||
static int isunshare(void)
|
||||
static int find_mount_prefix(char *prefix)
|
||||
{
|
||||
int err = 0;
|
||||
int ret;
|
||||
int fd;
|
||||
FILE *fp;
|
||||
char *line = NULL;
|
||||
size_t len = 0;
|
||||
ssize_t read;
|
||||
char proc_path[PATH_MAX];
|
||||
ssize_t len_read;
|
||||
char buf_read[READ_BUFSIZE + 1];
|
||||
char *buf_read_off;
|
||||
char *buf_find;
|
||||
char buf_cmp[READ_BUFSIZE + 1];
|
||||
char *buf_cmp_off;
|
||||
ssize_t len_copy;
|
||||
int ret = 0;
|
||||
|
||||
snprintf(proc_path, sizeof(proc_path), "/proc/%d/mounts", getpid());
|
||||
fd = open(proc_path, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
fprintf(stderr, "Error: Failed to open %s.\n", proc_path);
|
||||
|
||||
fp = fopen(proc_path, "r");
|
||||
if (fp == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
buf_cmp_off = buf_cmp;
|
||||
while (1) {
|
||||
len_read = read(fd, buf_read, READ_BUFSIZE);
|
||||
if (len_read == -1) {
|
||||
fprintf(stderr, "Error: Failed to read.\n");
|
||||
err = -1;
|
||||
break;
|
||||
}
|
||||
while ((read = getline(&line, &len, fp)) != -1) {
|
||||
if (strlen(line) < strlen(prefix))
|
||||
continue;
|
||||
|
||||
buf_read_off = buf_read;
|
||||
while (1) {
|
||||
if ((len_read - (buf_read_off - buf_read)) <= 0) {
|
||||
break;
|
||||
}
|
||||
buf_find = memchr(buf_read_off, '\n',
|
||||
len_read - (buf_read_off - buf_read));
|
||||
if (buf_find) {
|
||||
len_copy = buf_find - buf_read_off;
|
||||
} else {
|
||||
len_copy = len_read - (buf_read_off - buf_read);
|
||||
}
|
||||
memcpy(buf_cmp_off, buf_read_off, len_copy);
|
||||
*(buf_cmp_off + len_copy) = '\0';
|
||||
|
||||
if (buf_find) {
|
||||
buf_read_off = buf_read_off + len_copy + 1;
|
||||
buf_cmp_off = buf_cmp;
|
||||
ret = strncmp(buf_cmp, "mcoverlay /proc ", 16);
|
||||
if (!ret) {
|
||||
err = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
buf_read_off = buf_read_off + len_copy;
|
||||
buf_cmp_off = buf_cmp_off + len_copy;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (err == 1 || len_read == 0) {
|
||||
if (!strncmp(line, prefix, strlen(prefix))) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
close(fd);
|
||||
if (line)
|
||||
free(line);
|
||||
|
||||
__dprintf("err=%d\n", err);
|
||||
return err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int isunshare(void)
|
||||
{
|
||||
return find_mount_prefix("mcoverlay /proc ");
|
||||
}
|
||||
#endif // ENABLE_MCOVERLAYFS
|
||||
|
||||
@ -1359,12 +1335,20 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* Parse options ("+" denotes stop at the first non-option) */
|
||||
while ((opt = getopt_long(argc, argv, "+c:", mcexec_options, NULL)) != -1) {
|
||||
while ((opt = getopt_long(argc, argv, "+c:n:t:", mcexec_options, NULL)) != -1) {
|
||||
switch (opt) {
|
||||
case 'c':
|
||||
target_core = atoi(optarg);
|
||||
break;
|
||||
|
||||
|
||||
case 'n':
|
||||
nr_processes = atoi(optarg);
|
||||
break;
|
||||
|
||||
case 't':
|
||||
nr_threads = atoi(optarg);
|
||||
break;
|
||||
|
||||
case 0: /* long opt */
|
||||
break;
|
||||
|
||||
@ -1415,6 +1399,7 @@ int main(int argc, char **argv)
|
||||
if (error == 0) {
|
||||
struct sys_unshare_desc unshare_desc;
|
||||
struct sys_mount_desc mount_desc;
|
||||
struct sys_umount_desc umount_desc;
|
||||
|
||||
memset(&unshare_desc, '\0', sizeof unshare_desc);
|
||||
memset(&mount_desc, '\0', sizeof mount_desc);
|
||||
@ -1426,6 +1411,53 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Umount cgroup filesystems that may expose invalid NUMA
|
||||
* information
|
||||
*/
|
||||
if (find_mount_prefix("cgroup /sys/fs/cgroup/cpu,cpuacct")) {
|
||||
umount_desc.dir_name = "/sys/fs/cgroup/cpu,cpuacct";
|
||||
|
||||
if (ioctl(fd, MCEXEC_UP_SYS_UMOUNT,
|
||||
(unsigned long)&umount_desc) != 0) {
|
||||
fprintf(stderr,
|
||||
"WARNING: Failed to umount cgroup/cpu,cpuacct. (%s)\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
else if (find_mount_prefix("cgroup /sys/fs/cgroup/cpu")) {
|
||||
umount_desc.dir_name = "/sys/fs/cgroup/cpu";
|
||||
|
||||
if (ioctl(fd, MCEXEC_UP_SYS_UMOUNT,
|
||||
(unsigned long)&umount_desc) != 0) {
|
||||
fprintf(stderr,
|
||||
"WARNING: Failed to umount cgroup/cpu. (%s)\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
if (find_mount_prefix("cgroup /sys/fs/cgroup/cpuset")) {
|
||||
umount_desc.dir_name = "/sys/fs/cgroup/cpuset";
|
||||
|
||||
if (ioctl(fd, MCEXEC_UP_SYS_UMOUNT,
|
||||
(unsigned long)&umount_desc) != 0) {
|
||||
fprintf(stderr,
|
||||
"WARNING: Failed to umount cgroup/cpuset. (%s)\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
if (find_mount_prefix("cgroup /sys/fs/cgroup/memory")) {
|
||||
umount_desc.dir_name = "/sys/fs/cgroup/memory/";
|
||||
|
||||
if (ioctl(fd, MCEXEC_UP_SYS_UMOUNT,
|
||||
(unsigned long)&umount_desc) != 0) {
|
||||
fprintf(stderr,
|
||||
"WARNING: Failed to umount cgroup/memory. (%s)\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
sprintf(mcos_procdir, "/tmp/mcos/mcos%d_proc", mcosid);
|
||||
mount_desc.dev_name = mcos_procdir;
|
||||
mount_desc.dir_name = "/proc";
|
||||
@ -1532,9 +1564,15 @@ int main(int argc, char **argv)
|
||||
return 1;
|
||||
}
|
||||
|
||||
n_threads = ncpu;
|
||||
if (ncpu > 16) {
|
||||
n_threads = 16;
|
||||
if (nr_threads > 0) {
|
||||
n_threads = nr_threads;
|
||||
}
|
||||
else if (getenv("OMP_NUM_THREADS")) {
|
||||
/* Leave some headroom for helper threads.. */
|
||||
n_threads = atoi(getenv("OMP_NUM_THREADS")) + 4;
|
||||
}
|
||||
else {
|
||||
n_threads = ncpu;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1546,6 +1584,10 @@ int main(int argc, char **argv)
|
||||
* TODO: implement dynaic thread pool resizing.
|
||||
*/
|
||||
thread_data = (struct thread_data_s *)malloc(sizeof(struct thread_data_s) * (ncpu + 1));
|
||||
if (!thread_data) {
|
||||
fprintf(stderr, "error: allocating thread pool data\n");
|
||||
return 1;
|
||||
}
|
||||
memset(thread_data, '\0', sizeof(struct thread_data_s) * (ncpu + 1));
|
||||
|
||||
#if 0
|
||||
@ -1580,6 +1622,53 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Partitioned execution, obtain CPU set */
|
||||
if (nr_processes > 0) {
|
||||
struct get_cpu_set_arg cpu_set_arg;
|
||||
int mcexec_linux_numa = 0;
|
||||
|
||||
cpu_set_arg.cpu_set = (void *)&desc->cpu_set;
|
||||
cpu_set_arg.cpu_set_size = sizeof(desc->cpu_set);
|
||||
cpu_set_arg.nr_processes = nr_processes;
|
||||
cpu_set_arg.target_core = &target_core;
|
||||
cpu_set_arg.mcexec_linux_numa = &mcexec_linux_numa;
|
||||
|
||||
if (ioctl(fd, MCEXEC_UP_GET_CPUSET, (void *)&cpu_set_arg) != 0) {
|
||||
perror("getting CPU set for partitioned execution");
|
||||
close(fd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
desc->cpu = target_core;
|
||||
|
||||
/* This call may not succeed, but that is fine */
|
||||
if (numa_run_on_node(mcexec_linux_numa) < 0) {
|
||||
__dprint("%s: WARNING: couldn't bind to NUMA %d\n",
|
||||
__FUNCTION__, mcexec_linux_numa);
|
||||
}
|
||||
#ifdef DEBUG
|
||||
else {
|
||||
cpu_set_t cpuset;
|
||||
char affinity[BUFSIZ];
|
||||
|
||||
CPU_ZERO(&cpuset);
|
||||
if ((sched_getaffinity(0, sizeof(cpu_set_t), &cpuset)) != 0) {
|
||||
perror("Error sched_getaffinity");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
affinity[0] = '\0';
|
||||
for (i = 0; i < 512; i++) {
|
||||
if (CPU_ISSET(i, &cpuset) == 1) {
|
||||
sprintf(affinity, "%s %d", affinity, i);
|
||||
}
|
||||
}
|
||||
__dprint("%s: PID: %d affinity: %s\n",
|
||||
__FUNCTION__, getpid(), affinity);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (ioctl(fd, MCEXEC_UP_PREPARE_IMAGE, (unsigned long)desc) != 0) {
|
||||
perror("prepare");
|
||||
close(fd);
|
||||
@ -1686,6 +1775,98 @@ do_generic_syscall(
|
||||
ret = -errno;
|
||||
}
|
||||
|
||||
/* Overlayfs /sys/X directory lseek() problem work around */
|
||||
if (w->sr.number == __NR_lseek && ret == -EINVAL) {
|
||||
char proc_path[PATH_MAX];
|
||||
char path[PATH_MAX];
|
||||
struct stat sb;
|
||||
|
||||
sprintf(proc_path, "/proc/self/fd/%d", (int)w->sr.args[0]);
|
||||
|
||||
/* Get filename */
|
||||
if (readlink(proc_path, path, sizeof(path)) < 0) {
|
||||
fprintf(stderr, "%s: error: readlink() failed for %s\n",
|
||||
__FUNCTION__, proc_path);
|
||||
perror(": ");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Not in /sys? */
|
||||
if (strncmp(path, "/sys/", 5))
|
||||
goto out;
|
||||
|
||||
/* Stat */
|
||||
if (stat(path, &sb) < 0) {
|
||||
fprintf(stderr, "%s: error stat() failed for %s\n",
|
||||
__FUNCTION__, path);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Not dir? */
|
||||
if ((sb.st_mode & S_IFMT) != S_IFDIR)
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
/* Fake that nodeX in /sys/devices/system/node do not exist,
|
||||
* where X >= number of LWK NUMA nodes */
|
||||
else if (w->sr.number == __NR_getdents && ret > 0) {
|
||||
struct linux_dirent {
|
||||
long d_ino;
|
||||
off_t d_off;
|
||||
unsigned short d_reclen;
|
||||
char d_name[];
|
||||
};
|
||||
struct linux_dirent *d;
|
||||
char *buf = (char *)w->sr.args[1];
|
||||
int bpos = 0;
|
||||
int nodes,len;
|
||||
char proc_path[PATH_MAX];
|
||||
char path[PATH_MAX];
|
||||
|
||||
sprintf(proc_path, "/proc/self/fd/%d", (int)w->sr.args[0]);
|
||||
|
||||
/* Get filename */
|
||||
if ((len = readlink(proc_path, path, sizeof(path))) < 0) {
|
||||
fprintf(stderr, "%s: error: readlink() failed for %s\n",
|
||||
__FUNCTION__, proc_path);
|
||||
goto out;
|
||||
}
|
||||
path[len] = 0;
|
||||
|
||||
/* Not /sys/devices/system/node ? */
|
||||
if (strcmp(path, "/sys/devices/system/node"))
|
||||
goto out;
|
||||
|
||||
nodes = ioctl(fd, MCEXEC_UP_GET_NODES, 0);
|
||||
if (nodes == -1) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
d = (struct linux_dirent *) (buf + bpos);
|
||||
for (bpos = 0; bpos < ret; ) {
|
||||
int nodeid, tmp_reclen;
|
||||
d = (struct linux_dirent *) (buf + bpos);
|
||||
|
||||
if (sscanf(d->d_name, "node%d", &nodeid) != 1) {
|
||||
bpos += d->d_reclen;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (nodeid >= nodes) {
|
||||
tmp_reclen = d->d_reclen;
|
||||
memmove(buf + bpos,
|
||||
buf + bpos + tmp_reclen,
|
||||
ret - bpos - tmp_reclen);
|
||||
ret -= tmp_reclen;
|
||||
continue;
|
||||
}
|
||||
|
||||
bpos += d->d_reclen;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
__dprintf("do_generic_syscall(%ld):%ld (%#lx)\n", w->sr.number, ret, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1800,9 +1981,18 @@ int close_cloexec_fds(int mcos_fd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void chgdevpath(char *in, char *buf)
|
||||
{
|
||||
if(!strcmp(in, "/dev/xpmem")){
|
||||
sprintf(in, "/dev/null");
|
||||
}
|
||||
}
|
||||
|
||||
char *
|
||||
chgpath(char *in, char *buf)
|
||||
{
|
||||
chgdevpath(in, buf);
|
||||
|
||||
#ifdef ENABLE_MCOVERLAYFS
|
||||
return in;
|
||||
#endif // ENABLE_MCOVERLAYFS
|
||||
@ -2313,6 +2503,23 @@ return_execve1:
|
||||
|
||||
ret = 0;
|
||||
return_execve2:
|
||||
#ifdef ENABLE_MCOVERLAYFS
|
||||
{
|
||||
struct sys_mount_desc mount_desc;
|
||||
|
||||
mount_desc.dev_name = NULL;
|
||||
mount_desc.dir_name = "/proc";
|
||||
mount_desc.type = NULL;
|
||||
mount_desc.flags = MS_REMOUNT;
|
||||
mount_desc.data = NULL;
|
||||
if (ioctl(fd, MCEXEC_UP_SYS_MOUNT,
|
||||
(unsigned long)&mount_desc) != 0) {
|
||||
fprintf(stderr,
|
||||
"WARNING: failed to remount /proc (%s)\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
do_syscall_return(fd, cpu, ret, 0, 0, 0, 0);
|
||||
break;
|
||||
|
||||
|
||||
@ -3,15 +3,15 @@ SRC=$(VPATH)
|
||||
IHKDIR=$(IHKBASE)/$(TARGETDIR)
|
||||
OBJS = init.o mem.o debug.o mikc.o listeners.o ap.o syscall.o cls.o host.o
|
||||
OBJS += process.o copy.o waitq.o futex.o timer.o plist.o fileobj.o shmobj.o
|
||||
OBJS += zeroobj.o procfs.o devobj.o sysfs.o
|
||||
OBJS += zeroobj.o procfs.o devobj.o sysfs.o xpmem.o
|
||||
DEPSRCS=$(wildcard $(SRC)/*.c)
|
||||
|
||||
CFLAGS += -I$(SRC)/include -D__KERNEL__ -g
|
||||
CFLAGS += -I$(SRC)/include -D__KERNEL__ -g -fno-omit-frame-pointer -fno-inline -fno-inline-small-functions
|
||||
LDFLAGS += -e arch_start
|
||||
IHKOBJ = ihk/ihk.o
|
||||
|
||||
include $(SRC)/config/config.$(TARGET)
|
||||
include $(IHKBASE)/Makefile.common
|
||||
include @abs_builddir@/../../ihk/cokernel/Makefile.common
|
||||
|
||||
# CFLAGS += -I$(SRC)/../arch/$(IHKARCH)/kernel/include -I$(SRC)/../lib/include
|
||||
|
||||
|
||||
@ -9,7 +9,7 @@ V ?= $(VERBOSE)
|
||||
KERNEL = kernel.img
|
||||
KERNELS = $(addsuffix /$(KERNEL),$(addprefix $(O)/,$(BUILD_TARGET)))
|
||||
|
||||
SUBCMD_OPTS = V='$(V)'
|
||||
SUBCMD_OPTS = V='$(V)' BUILD_IHK_COKERNEL=@abs_builddir@/../../ihk/cokernel
|
||||
|
||||
$(if $(O),,$(error Specify the compilation target directory))
|
||||
#$(if $(shell ls $(IHKBASE)/Makefile),,\
|
||||
|
||||
23
kernel/ap.c
23
kernel/ap.c
@ -26,9 +26,21 @@
|
||||
#include <march.h>
|
||||
#include <cls.h>
|
||||
|
||||
//#define DEBUG_PRINT_AP
|
||||
|
||||
#ifdef DEBUG_PRINT_AP
|
||||
#define dkprintf(...) kprintf(__VA_ARGS__)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#else
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
int num_processors = 1;
|
||||
static volatile int ap_stop = 1;
|
||||
|
||||
mcs_lock_node_t ap_syscall_semaphore;
|
||||
|
||||
static void ap_wait(void)
|
||||
{
|
||||
init_tick();
|
||||
@ -43,7 +55,11 @@ static void ap_wait(void)
|
||||
arch_start_pvclock();
|
||||
|
||||
if (find_command_line("hidos")) {
|
||||
mcs_lock_node_t mcs_node;
|
||||
|
||||
mcs_lock_lock_noirq(&ap_syscall_semaphore, &mcs_node);
|
||||
init_host_syscall_channel();
|
||||
mcs_lock_unlock_noirq(&ap_syscall_semaphore, &mcs_node);
|
||||
}
|
||||
|
||||
pc_ap_init();
|
||||
@ -57,6 +73,7 @@ static void ap_wait(void)
|
||||
void ap_start(void)
|
||||
{
|
||||
init_tick();
|
||||
mcs_lock_init(&ap_syscall_semaphore);
|
||||
ap_stop = 0;
|
||||
sync_tick();
|
||||
}
|
||||
@ -93,13 +110,13 @@ void ap_init(void)
|
||||
if (cpu_info->hw_ids[i] == bsp_hw_id) {
|
||||
continue;
|
||||
}
|
||||
kprintf("AP Booting: %d (HW ID: %d @ NUMA %d)\n", i,
|
||||
dkprintf("AP Booting: %d (HW ID: %d @ NUMA %d)\n", i,
|
||||
cpu_info->hw_ids[i], cpu_info->nodes[i]);
|
||||
ihk_mc_boot_cpu(cpu_info->hw_ids[i], (unsigned long)ap_wait);
|
||||
|
||||
num_processors++;
|
||||
}
|
||||
kprintf("AP Booting: Done\n");
|
||||
kprintf("BSP: booted %d AP CPUs\n", cpu_info->ncpus - 1);
|
||||
}
|
||||
|
||||
#include <sysfs.h>
|
||||
@ -209,7 +226,7 @@ cpu_sysfs_setup(void)
|
||||
/* setup table */
|
||||
info = kmalloc(sizeof(*info) * num_processors, IHK_MC_AP_CRITICAL);
|
||||
for (cpu = 0; cpu < num_processors; ++cpu) {
|
||||
info[cpu].online = 10+cpu;
|
||||
info[cpu].online = 1;
|
||||
}
|
||||
fake_cpu_infos = info;
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
extern int num_processors;
|
||||
|
||||
struct cpu_local_var *clv;
|
||||
static int cpu_local_var_initialized = 0;
|
||||
int cpu_local_var_initialized = 0;
|
||||
|
||||
void cpu_local_var_init(void)
|
||||
{
|
||||
|
||||
@ -126,7 +126,8 @@ int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxp
|
||||
__FUNCTION__, fd, len, off, result.handle, result.maxprot);
|
||||
|
||||
obj->memobj.ops = &devobj_ops;
|
||||
obj->memobj.flags = MF_HAS_PAGER;
|
||||
obj->memobj.flags = MF_HAS_PAGER | MF_DEV_FILE;
|
||||
obj->memobj.size = len;
|
||||
obj->handle = result.handle;
|
||||
obj->ref = 1;
|
||||
obj->pfn_pgoff = off / PAGE_SIZE;
|
||||
@ -180,19 +181,21 @@ static void devobj_release(struct memobj *memobj)
|
||||
memobj_unlock(&obj->memobj);
|
||||
|
||||
if (free_obj) {
|
||||
int error;
|
||||
ihk_mc_user_context_t ctx;
|
||||
if (!(free_obj->memobj.flags & MF_HOST_RELEASED)) {
|
||||
int error;
|
||||
ihk_mc_user_context_t ctx;
|
||||
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_UNMAP;
|
||||
ihk_mc_syscall_arg1(&ctx) = handle;
|
||||
ihk_mc_syscall_arg2(&ctx) = 1;
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_UNMAP;
|
||||
ihk_mc_syscall_arg1(&ctx) = handle;
|
||||
ihk_mc_syscall_arg2(&ctx) = 1;
|
||||
|
||||
error = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
if (error) {
|
||||
kprintf("devobj_release(%p %lx):"
|
||||
"release failed. %d\n",
|
||||
free_obj, handle, error);
|
||||
/* through */
|
||||
error = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
if (error) {
|
||||
kprintf("devobj_release(%p %lx):"
|
||||
"release failed. %d\n",
|
||||
free_obj, handle, error);
|
||||
/* through */
|
||||
}
|
||||
}
|
||||
|
||||
if (obj->pfn_table) {
|
||||
|
||||
362
kernel/fileobj.c
362
kernel/fileobj.c
@ -29,22 +29,27 @@
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
|
||||
static ihk_spinlock_t fileobj_list_lock = SPIN_LOCK_UNLOCKED;
|
||||
mcs_rwlock_lock_t fileobj_list_lock =
|
||||
{{{0}, MCS_RWLOCK_TYPE_COMMON_READER, 0, 0, 0, NULL}, NULL};
|
||||
static LIST_HEAD(fileobj_list);
|
||||
|
||||
#define FILEOBJ_PAGE_HASH_SHIFT 9
|
||||
#define FILEOBJ_PAGE_HASH_SIZE (1 << FILEOBJ_PAGE_HASH_SHIFT)
|
||||
#define FILEOBJ_PAGE_HASH_MASK (FILEOBJ_PAGE_HASH_SIZE - 1)
|
||||
|
||||
struct fileobj {
|
||||
struct memobj memobj; /* must be first */
|
||||
long sref;
|
||||
long cref;
|
||||
uintptr_t handle;
|
||||
struct list_head page_list;
|
||||
struct list_head list;
|
||||
struct memobj memobj; /* must be first */
|
||||
long sref;
|
||||
long cref;
|
||||
uintptr_t handle;
|
||||
struct list_head list;
|
||||
struct list_head page_hash[FILEOBJ_PAGE_HASH_SIZE];
|
||||
mcs_rwlock_lock_t page_hash_locks[FILEOBJ_PAGE_HASH_SIZE];
|
||||
};
|
||||
|
||||
static memobj_release_func_t fileobj_release;
|
||||
static memobj_ref_func_t fileobj_ref;
|
||||
static memobj_get_page_func_t fileobj_get_page;
|
||||
static memobj_copy_page_func_t fileobj_copy_page;
|
||||
static memobj_flush_page_func_t fileobj_flush_page;
|
||||
static memobj_invalidate_page_func_t fileobj_invalidate_page;
|
||||
static memobj_lookup_page_func_t fileobj_lookup_page;
|
||||
@ -53,7 +58,7 @@ static struct memobj_ops fileobj_ops = {
|
||||
.release = &fileobj_release,
|
||||
.ref = &fileobj_ref,
|
||||
.get_page = &fileobj_get_page,
|
||||
.copy_page = &fileobj_copy_page,
|
||||
.copy_page = NULL,
|
||||
.flush_page = &fileobj_flush_page,
|
||||
.invalidate_page = &fileobj_invalidate_page,
|
||||
.lookup_page = &fileobj_lookup_page,
|
||||
@ -72,28 +77,36 @@ static struct memobj *to_memobj(struct fileobj *fileobj)
|
||||
/***********************************************************************
|
||||
* page_list
|
||||
*/
|
||||
static void page_list_init(struct fileobj *obj)
|
||||
static void fileobj_page_hash_init(struct fileobj *obj)
|
||||
{
|
||||
INIT_LIST_HEAD(&obj->page_list);
|
||||
int i;
|
||||
for (i = 0; i < FILEOBJ_PAGE_HASH_SIZE; ++i) {
|
||||
mcs_rwlock_init(&obj->page_hash_locks[i]);
|
||||
INIT_LIST_HEAD(&obj->page_hash[i]);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void page_list_insert(struct fileobj *obj, struct page *page)
|
||||
/* NOTE: caller must hold page_hash_locks[hash] */
|
||||
static void __fileobj_page_hash_insert(struct fileobj *obj,
|
||||
struct page *page, int hash)
|
||||
{
|
||||
list_add(&page->list, &obj->page_list);
|
||||
return;
|
||||
list_add(&page->list, &obj->page_hash[hash]);
|
||||
}
|
||||
|
||||
static void page_list_remove(struct fileobj *obj, struct page *page)
|
||||
/* NOTE: caller must hold page_hash_locks[hash] */
|
||||
static void __fileobj_page_hash_remove(struct page *page)
|
||||
{
|
||||
list_del(&page->list);
|
||||
}
|
||||
|
||||
static struct page *page_list_lookup(struct fileobj *obj, off_t off)
|
||||
/* NOTE: caller must hold page_hash_locks[hash] */
|
||||
static struct page *__fileobj_page_hash_lookup(struct fileobj *obj,
|
||||
int hash, off_t off)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
list_for_each_entry(page, &obj->page_list, list) {
|
||||
list_for_each_entry(page, &obj->page_hash[hash], list) {
|
||||
if ((page->mode != PM_WILL_PAGEIO)
|
||||
&& (page->mode != PM_PAGEIO)
|
||||
&& (page->mode != PM_DONE_PAGEIO)
|
||||
@ -104,6 +117,7 @@ static struct page *page_list_lookup(struct fileobj *obj, off_t off)
|
||||
obj, off, page->mode);
|
||||
panic("page_list_lookup:invalid obj page");
|
||||
}
|
||||
|
||||
if (page->offset == off) {
|
||||
goto out;
|
||||
}
|
||||
@ -114,13 +128,22 @@ out:
|
||||
return page;
|
||||
}
|
||||
|
||||
static struct page *page_list_first(struct fileobj *obj)
|
||||
static struct page *fileobj_page_hash_first(struct fileobj *obj)
|
||||
{
|
||||
if (list_empty(&obj->page_list)) {
|
||||
return NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FILEOBJ_PAGE_HASH_SIZE; ++i) {
|
||||
if (!list_empty(&obj->page_hash[i])) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return list_first_entry(&obj->page_list, struct page, list);
|
||||
if (i != FILEOBJ_PAGE_HASH_SIZE) {
|
||||
return list_first_entry(&obj->page_hash[i], struct page, list);
|
||||
}
|
||||
else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
@ -163,10 +186,11 @@ static struct fileobj *obj_list_lookup(uintptr_t handle)
|
||||
int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
|
||||
{
|
||||
ihk_mc_user_context_t ctx;
|
||||
struct pager_create_result result; // XXX: assumes contiguous physical
|
||||
struct pager_create_result result __attribute__((aligned(64)));
|
||||
int error;
|
||||
struct fileobj *newobj = NULL;
|
||||
struct fileobj *obj;
|
||||
struct mcs_rwlock_node node;
|
||||
|
||||
dkprintf("fileobj_create(%d)\n", fd);
|
||||
newobj = kmalloc(sizeof(*newobj), IHK_MC_AP_NOWAIT);
|
||||
@ -179,6 +203,7 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_CREATE;
|
||||
ihk_mc_syscall_arg1(&ctx) = fd;
|
||||
ihk_mc_syscall_arg2(&ctx) = virt_to_phys(&result);
|
||||
memset(&result, 0, sizeof(result));
|
||||
|
||||
error = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
if (error) {
|
||||
@ -188,27 +213,43 @@ int fileobj_create(int fd, struct memobj **objp, int *maxprotp)
|
||||
|
||||
memset(newobj, 0, sizeof(*newobj));
|
||||
newobj->memobj.ops = &fileobj_ops;
|
||||
newobj->memobj.flags = MF_HAS_PAGER;
|
||||
newobj->memobj.flags = MF_HAS_PAGER | MF_REG_FILE;
|
||||
newobj->handle = result.handle;
|
||||
newobj->sref = 1;
|
||||
newobj->cref = 1;
|
||||
page_list_init(newobj);
|
||||
fileobj_page_hash_init(newobj);
|
||||
ihk_mc_spinlock_init(&newobj->memobj.lock);
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&fileobj_list_lock);
|
||||
mcs_rwlock_writer_lock_noirq(&fileobj_list_lock, &node);
|
||||
obj = obj_list_lookup(result.handle);
|
||||
if (!obj) {
|
||||
obj_list_insert(newobj);
|
||||
obj = newobj;
|
||||
to_memobj(obj)->size = result.size;
|
||||
to_memobj(obj)->flags |= result.flags;
|
||||
to_memobj(obj)->status = MEMOBJ_READY;
|
||||
if (to_memobj(obj)->flags & MF_PREFETCH) {
|
||||
to_memobj(obj)->status = MEMOBJ_TO_BE_PREFETCHED;
|
||||
}
|
||||
newobj = NULL;
|
||||
dkprintf("%s: new obj 0x%lx cref: %d, %s\n",
|
||||
__FUNCTION__,
|
||||
obj,
|
||||
obj->cref,
|
||||
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
|
||||
}
|
||||
else {
|
||||
++obj->sref;
|
||||
++obj->cref;
|
||||
memobj_unlock(&obj->memobj); /* locked by obj_list_lookup() */
|
||||
dkprintf("%s: existing obj 0x%lx cref: %d, %s\n",
|
||||
__FUNCTION__,
|
||||
obj,
|
||||
obj->cref,
|
||||
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock_noirq(&fileobj_list_lock);
|
||||
mcs_rwlock_writer_unlock_noirq(&fileobj_list_lock, &node);
|
||||
|
||||
error = 0;
|
||||
*objp = to_memobj(obj);
|
||||
@ -239,6 +280,7 @@ static void fileobj_release(struct memobj *memobj)
|
||||
long free_sref = 0;
|
||||
uintptr_t free_handle;
|
||||
struct fileobj *free_obj = NULL;
|
||||
struct mcs_rwlock_node node;
|
||||
|
||||
dkprintf("fileobj_release(%p %lx)\n", obj, obj->handle);
|
||||
|
||||
@ -252,19 +294,28 @@ static void fileobj_release(struct memobj *memobj)
|
||||
obj->sref -= free_sref;
|
||||
free_handle = obj->handle;
|
||||
memobj_unlock(&obj->memobj);
|
||||
if (obj->memobj.flags & MF_HOST_RELEASED) {
|
||||
free_sref = 0; // don't call syscall_generic_forwarding
|
||||
}
|
||||
|
||||
if (free_obj) {
|
||||
ihk_mc_spinlock_lock_noirq(&fileobj_list_lock);
|
||||
dkprintf("%s: release obj 0x%lx cref: %d, free_obj: 0x%lx, %s\n",
|
||||
__FUNCTION__,
|
||||
obj,
|
||||
obj->cref,
|
||||
free_obj,
|
||||
to_memobj(obj)->flags & MF_ZEROFILL ? "zerofill" : "");
|
||||
mcs_rwlock_writer_lock_noirq(&fileobj_list_lock, &node);
|
||||
/* zap page_list */
|
||||
for (;;) {
|
||||
struct page *page;
|
||||
void *page_va;
|
||||
|
||||
page = page_list_first(obj);
|
||||
page = fileobj_page_hash_first(obj);
|
||||
if (!page) {
|
||||
break;
|
||||
}
|
||||
page_list_remove(obj, page);
|
||||
__fileobj_page_hash_remove(page);
|
||||
page_va = phys_to_virt(page_to_phys(page));
|
||||
|
||||
if (ihk_atomic_read(&page->count) != 1) {
|
||||
@ -295,7 +346,7 @@ static void fileobj_release(struct memobj *memobj)
|
||||
#endif
|
||||
}
|
||||
obj_list_remove(free_obj);
|
||||
ihk_mc_spinlock_unlock_noirq(&fileobj_list_lock);
|
||||
mcs_rwlock_writer_unlock_noirq(&fileobj_list_lock, &node);
|
||||
kfree(free_obj);
|
||||
}
|
||||
|
||||
@ -341,83 +392,101 @@ static void fileobj_do_pageio(void *args0)
|
||||
struct page *page;
|
||||
ihk_mc_user_context_t ctx;
|
||||
ssize_t ss;
|
||||
struct mcs_rwlock_node mcs_node;
|
||||
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
page = page_list_lookup(obj, off);
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
page = __fileobj_page_hash_lookup(obj, hash, off);
|
||||
if (!page) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (page->mode == PM_PAGEIO) {
|
||||
memobj_unlock(&obj->memobj);
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
cpu_pause();
|
||||
memobj_lock(&obj->memobj);
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
}
|
||||
|
||||
if (page->mode == PM_WILL_PAGEIO) {
|
||||
page->mode = PM_PAGEIO;
|
||||
memobj_unlock(&obj->memobj);
|
||||
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_READ;
|
||||
ihk_mc_syscall_arg1(&ctx) = obj->handle;
|
||||
ihk_mc_syscall_arg2(&ctx) = off;
|
||||
ihk_mc_syscall_arg3(&ctx) = pgsize;
|
||||
ihk_mc_syscall_arg4(&ctx) = page_to_phys(page);
|
||||
|
||||
ss = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
if (page->mode != PM_PAGEIO) {
|
||||
kprintf("fileobj_do_pageio(%p,%lx,%lx):"
|
||||
"invalid mode %x\n",
|
||||
obj, off, pgsize, page->mode);
|
||||
panic("fileobj_do_pageio:invalid page mode");
|
||||
if (to_memobj(obj)->flags & MF_ZEROFILL) {
|
||||
void *virt = phys_to_virt(page_to_phys(page));
|
||||
memset(virt, 0, PAGE_SIZE);
|
||||
}
|
||||
else {
|
||||
page->mode = PM_PAGEIO;
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
|
||||
if (ss == 0) {
|
||||
dkprintf("fileobj_do_pageio(%p,%lx,%lx):EOF? %ld\n",
|
||||
obj, off, pgsize, ss);
|
||||
page->mode = PM_PAGEIO_EOF;
|
||||
goto out;
|
||||
}
|
||||
else if (ss != pgsize) {
|
||||
kprintf("fileobj_do_pageio(%p,%lx,%lx):"
|
||||
"read failed. %ld\n",
|
||||
obj, off, pgsize, ss);
|
||||
page->mode = PM_PAGEIO_ERROR;
|
||||
goto out;
|
||||
ihk_mc_syscall_arg0(&ctx) = PAGER_REQ_READ;
|
||||
ihk_mc_syscall_arg1(&ctx) = obj->handle;
|
||||
ihk_mc_syscall_arg2(&ctx) = off;
|
||||
ihk_mc_syscall_arg3(&ctx) = pgsize;
|
||||
ihk_mc_syscall_arg4(&ctx) = page_to_phys(page);
|
||||
|
||||
dkprintf("%s: __NR_mmap for handle 0x%lx\n",
|
||||
__FUNCTION__, obj->handle);
|
||||
ss = syscall_generic_forwarding(__NR_mmap, &ctx);
|
||||
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
if (page->mode != PM_PAGEIO) {
|
||||
kprintf("fileobj_do_pageio(%p,%lx,%lx):"
|
||||
"invalid mode %x\n",
|
||||
obj, off, pgsize, page->mode);
|
||||
panic("fileobj_do_pageio:invalid page mode");
|
||||
}
|
||||
|
||||
if (ss == 0) {
|
||||
dkprintf("fileobj_do_pageio(%p,%lx,%lx):EOF? %ld\n",
|
||||
obj, off, pgsize, ss);
|
||||
page->mode = PM_PAGEIO_EOF;
|
||||
goto out;
|
||||
}
|
||||
else if (ss != pgsize) {
|
||||
kprintf("fileobj_do_pageio(%p,%lx,%lx):"
|
||||
"read failed. %ld\n",
|
||||
obj, off, pgsize, ss);
|
||||
page->mode = PM_PAGEIO_ERROR;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
page->mode = PM_DONE_PAGEIO;
|
||||
}
|
||||
out:
|
||||
memobj_unlock(&obj->memobj);
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
fileobj_release(&obj->memobj); /* got fileobj_get_page() */
|
||||
kfree(args0);
|
||||
dkprintf("fileobj_do_pageio(%p,%lx,%lx):\n", obj, off, pgsize);
|
||||
return;
|
||||
}
|
||||
|
||||
static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *pflag)
|
||||
static int fileobj_get_page(struct memobj *memobj, off_t off,
|
||||
int p2align, uintptr_t *physp, unsigned long *pflag)
|
||||
{
|
||||
struct thread *proc = cpu_local_var(current);
|
||||
struct fileobj *obj = to_fileobj(memobj);
|
||||
int error;
|
||||
int error = -1;
|
||||
void *virt = NULL;
|
||||
int npages;
|
||||
uintptr_t phys = -1;
|
||||
struct page *page;
|
||||
struct pageio_args *args = NULL;
|
||||
struct mcs_rwlock_node mcs_node;
|
||||
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
|
||||
|
||||
dkprintf("fileobj_get_page(%p,%lx,%x,%p)\n", obj, off, p2align, physp);
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
if (p2align != PAGE_P2ALIGN) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
page = page_list_lookup(obj, off);
|
||||
mcs_rwlock_writer_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
page = __fileobj_page_hash_lookup(obj, hash, off);
|
||||
if (!page || (page->mode == PM_WILL_PAGEIO)
|
||||
|| (page->mode == PM_PAGEIO)) {
|
||||
args = kmalloc(sizeof(*args), IHK_MC_AP_NOWAIT);
|
||||
@ -431,7 +500,10 @@ static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintp
|
||||
|
||||
if (!page) {
|
||||
npages = 1 << p2align;
|
||||
virt = ihk_mc_alloc_pages(npages, IHK_MC_AP_NOWAIT);
|
||||
|
||||
virt = ihk_mc_alloc_pages(npages, IHK_MC_AP_NOWAIT |
|
||||
(to_memobj(obj)->flags & MF_ZEROFILL) ? IHK_MC_AP_USER : 0);
|
||||
|
||||
if (!virt) {
|
||||
error = -ENOMEM;
|
||||
kprintf("fileobj_get_page(%p,%lx,%x,%p):"
|
||||
@ -445,13 +517,15 @@ static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintp
|
||||
if (page->mode != PM_NONE) {
|
||||
panic("fileobj_get_page:invalid new page");
|
||||
}
|
||||
page->mode = PM_WILL_PAGEIO;
|
||||
page->offset = off;
|
||||
ihk_atomic_set(&page->count, 1);
|
||||
page_list_insert(obj, page);
|
||||
__fileobj_page_hash_insert(obj, page, hash);
|
||||
page->mode = PM_WILL_PAGEIO;
|
||||
}
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
++obj->cref; /* for fileobj_do_pageio() */
|
||||
memobj_unlock(&obj->memobj);
|
||||
|
||||
args->fileobj = obj;
|
||||
args->objoff = off;
|
||||
@ -483,7 +557,8 @@ static int fileobj_get_page(struct memobj *memobj, off_t off, int p2align, uintp
|
||||
*physp = page_to_phys(page);
|
||||
virt = NULL;
|
||||
out:
|
||||
memobj_unlock(&obj->memobj);
|
||||
mcs_rwlock_writer_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
if (virt) {
|
||||
ihk_mc_free_pages(virt, npages);
|
||||
}
|
||||
@ -495,78 +570,6 @@ out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static uintptr_t fileobj_copy_page(
|
||||
struct memobj *memobj, uintptr_t orgpa, int p2align)
|
||||
{
|
||||
struct page *orgpage = phys_to_page(orgpa);
|
||||
size_t pgsize = PAGE_SIZE << p2align;
|
||||
int npages = 1 << p2align;
|
||||
void *newkva = NULL;
|
||||
uintptr_t newpa = -1;
|
||||
void *orgkva;
|
||||
int count;
|
||||
|
||||
dkprintf("fileobj_copy_page(%p,%lx,%d)\n", memobj, orgpa, p2align);
|
||||
if (p2align != PAGE_P2ALIGN) {
|
||||
panic("p2align");
|
||||
}
|
||||
|
||||
memobj_lock(memobj);
|
||||
for (;;) {
|
||||
if (!orgpage || orgpage->mode != PM_MAPPED) {
|
||||
kprintf("fileobj_copy_page(%p,%lx,%d):"
|
||||
"invalid cow page. %x\n",
|
||||
memobj, orgpa, p2align, orgpage ? orgpage->mode : 0);
|
||||
panic("fileobj_copy_page:invalid cow page");
|
||||
}
|
||||
count = ihk_atomic_read(&orgpage->count);
|
||||
if (count == 2) { // XXX: private only
|
||||
list_del(&orgpage->list);
|
||||
ihk_atomic_dec(&orgpage->count);
|
||||
orgpage->mode = PM_NONE;
|
||||
newpa = orgpa;
|
||||
break;
|
||||
}
|
||||
if (count <= 0) {
|
||||
kprintf("fileobj_copy_page(%p,%lx,%d):"
|
||||
"orgpage count corrupted. %x\n",
|
||||
memobj, orgpa, p2align, count);
|
||||
panic("fileobj_copy_page:orgpage count corrupted");
|
||||
}
|
||||
if (newkva) {
|
||||
orgkva = phys_to_virt(orgpa);
|
||||
memcpy(newkva, orgkva, pgsize);
|
||||
ihk_atomic_dec(&orgpage->count);
|
||||
newpa = virt_to_phys(newkva);
|
||||
if (phys_to_page(newpa)) {
|
||||
page_map(phys_to_page(newpa));
|
||||
}
|
||||
newkva = NULL; /* avoid ihk_mc_free_pages() */
|
||||
break;
|
||||
}
|
||||
|
||||
memobj_unlock(memobj);
|
||||
newkva = ihk_mc_alloc_aligned_pages(npages, p2align,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (!newkva) {
|
||||
kprintf("fileobj_copy_page(%p,%lx,%d):"
|
||||
"alloc page failed\n",
|
||||
memobj, orgpa, p2align);
|
||||
goto out;
|
||||
}
|
||||
memobj_lock(memobj);
|
||||
}
|
||||
memobj_unlock(memobj);
|
||||
|
||||
out:
|
||||
if (newkva) {
|
||||
ihk_mc_free_pages(newkva, npages);
|
||||
}
|
||||
dkprintf("fileobj_copy_page(%p,%lx,%d): %lx\n",
|
||||
memobj, orgpa, p2align, newpa);
|
||||
return newpa;
|
||||
}
|
||||
|
||||
static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
|
||||
size_t pgsize)
|
||||
{
|
||||
@ -575,6 +578,10 @@ static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
|
||||
ihk_mc_user_context_t ctx;
|
||||
ssize_t ss;
|
||||
|
||||
if (to_memobj(obj)->flags & MF_ZEROFILL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
page = phys_to_page(phys);
|
||||
if (!page) {
|
||||
kprintf("%s: warning: tried to flush non-existing page for phys addr: 0x%lx\n",
|
||||
@ -603,63 +610,48 @@ static int fileobj_flush_page(struct memobj *memobj, uintptr_t phys,
|
||||
static int fileobj_invalidate_page(struct memobj *memobj, uintptr_t phys,
|
||||
size_t pgsize)
|
||||
{
|
||||
struct fileobj *obj = to_fileobj(memobj);
|
||||
int error;
|
||||
struct page *page;
|
||||
|
||||
dkprintf("fileobj_invalidate_page(%p,%#lx,%#lx)\n",
|
||||
memobj, phys, pgsize);
|
||||
|
||||
if (!(page = phys_to_page(phys))
|
||||
|| !(page = page_list_lookup(obj, page->offset))) {
|
||||
error = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ihk_atomic_read(&page->count) == 1) {
|
||||
if (page_unmap(page)) {
|
||||
ihk_mc_free_pages(phys_to_virt(phys),
|
||||
pgsize/PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
error = 0;
|
||||
out:
|
||||
dkprintf("fileobj_invalidate_page(%p,%#lx,%#lx):%d\n",
|
||||
memobj, phys, pgsize, error);
|
||||
return error;
|
||||
/* TODO: keep track of reverse mappings so that invalidation
|
||||
* can be performed */
|
||||
kprintf("%s: WARNING: file mapping invalidation not supported\n",
|
||||
__FUNCTION__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fileobj_lookup_page(struct memobj *memobj, off_t off, int p2align, uintptr_t *physp, unsigned long *pflag)
|
||||
static int fileobj_lookup_page(struct memobj *memobj, off_t off,
|
||||
int p2align, uintptr_t *physp, unsigned long *pflag)
|
||||
{
|
||||
struct fileobj *obj = to_fileobj(memobj);
|
||||
int error;
|
||||
uintptr_t phys = -1;
|
||||
int error = -1;
|
||||
struct page *page;
|
||||
struct mcs_rwlock_node mcs_node;
|
||||
int hash = (off >> PAGE_SHIFT) & FILEOBJ_PAGE_HASH_MASK;
|
||||
|
||||
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p)\n", obj, off, p2align, physp);
|
||||
|
||||
memobj_lock(&obj->memobj);
|
||||
if (p2align != PAGE_P2ALIGN) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
page = page_list_lookup(obj, off);
|
||||
mcs_rwlock_reader_lock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
|
||||
page = __fileobj_page_hash_lookup(obj, hash, off);
|
||||
if (!page) {
|
||||
error = -ENOENT;
|
||||
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): page not found. %d\n", obj, off, p2align, physp, error);
|
||||
goto out;
|
||||
}
|
||||
phys = page_to_phys(page);
|
||||
|
||||
*physp = page_to_phys(page);
|
||||
error = 0;
|
||||
if (physp) {
|
||||
*physp = phys;
|
||||
}
|
||||
|
||||
out:
|
||||
memobj_unlock(&obj->memobj);
|
||||
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): %d %lx\n",
|
||||
obj, off, p2align, physp, error, phys);
|
||||
mcs_rwlock_reader_unlock_noirq(&obj->page_hash_locks[hash],
|
||||
&mcs_node);
|
||||
|
||||
dkprintf("fileobj_lookup_page(%p,%lx,%x,%p): %d \n",
|
||||
obj, off, p2align, physp, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
@ -248,9 +248,13 @@ static int cmpxchg_futex_value_locked(uint32_t __user *uaddr, uint32_t uval, uin
|
||||
|
||||
static int get_futex_value_locked(uint32_t *dest, uint32_t *from)
|
||||
{
|
||||
/* RIKEN: futexes are always on not swappable pages */
|
||||
*dest = getint_user((int *)from);
|
||||
|
||||
/*
|
||||
* Officially we should call:
|
||||
* return getint_user((int *)dest, (int *)from);
|
||||
*
|
||||
* but McKernel on x86 can just access user-space.
|
||||
*/
|
||||
*dest = *(volatile uint32_t *)from;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -670,25 +674,32 @@ static uint64_t futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q
|
||||
uint64_t timeout)
|
||||
{
|
||||
uint64_t time_remain = 0;
|
||||
unsigned long irqstate;
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
/*
|
||||
* The task state is guaranteed to be set before another task can
|
||||
* wake it. set_current_state() is implemented using set_mb() and
|
||||
* queue_me() calls spin_unlock() upon completion, both serializing
|
||||
* access to the hash list and forcing another memory barrier.
|
||||
* wake it.
|
||||
* queue_me() calls spin_unlock() upon completion, serializing
|
||||
* access to the hash list and forcing a memory barrier.
|
||||
*/
|
||||
xchg4(&(cpu_local_var(current)->status), PS_INTERRUPTIBLE);
|
||||
|
||||
/* Indicate spin sleep */
|
||||
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
|
||||
thread->spin_sleep = 1;
|
||||
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
|
||||
|
||||
queue_me(q, hb);
|
||||
|
||||
if (!plist_node_empty(&q->list)) {
|
||||
|
||||
/* RIKEN: use mcos timers */
|
||||
if (timeout) {
|
||||
dkprintf("futex_wait_queue_me(): tid: %d schedule_timeout()\n", cpu_local_var(current)->tid);
|
||||
time_remain = schedule_timeout(timeout);
|
||||
}
|
||||
else {
|
||||
dkprintf("futex_wait_queue_me(): tid: %d schedule()\n", cpu_local_var(current)->tid);
|
||||
schedule();
|
||||
spin_sleep_or_schedule();
|
||||
time_remain = 0;
|
||||
}
|
||||
|
||||
@ -697,6 +708,7 @@ static uint64_t futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q
|
||||
|
||||
/* This does not need to be serialized */
|
||||
cpu_local_var(current)->status = PS_RUNNING;
|
||||
thread->spin_sleep = 0;
|
||||
|
||||
return time_remain;
|
||||
}
|
||||
@ -743,14 +755,17 @@ static int futex_wait_setup(uint32_t __user *uaddr, uint32_t val, int fshared,
|
||||
*/
|
||||
q->key = FUTEX_KEY_INIT;
|
||||
ret = get_futex_key(uaddr, fshared, &q->key);
|
||||
if ((ret != 0))
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
*hb = queue_lock(q);
|
||||
|
||||
ret = get_futex_value_locked(&uval, uaddr);
|
||||
|
||||
/* RIKEN: get_futex_value_locked() always returns 0 on mckernel */
|
||||
if (ret) {
|
||||
queue_unlock(q, *hb);
|
||||
put_futex_key(fshared, &q->key);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (uval != val) {
|
||||
queue_unlock(q, *hb);
|
||||
@ -776,8 +791,6 @@ static int futex_wait(uint32_t __user *uaddr, int fshared,
|
||||
q.bitset = bitset;
|
||||
q.requeue_pi_key = NULL;
|
||||
|
||||
/* RIKEN: futex_wait_queue_me() calls schedule_timeout() if timer is set */
|
||||
|
||||
retry:
|
||||
/* Prepare to wait on uaddr. */
|
||||
ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
|
||||
|
||||
@ -125,7 +125,7 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
|
||||
up = virt_to_phys(up_v);
|
||||
if (add_process_memory_range(vm, s, e, up, flags, NULL, 0,
|
||||
PAGE_SHIFT) != 0) {
|
||||
PAGE_SHIFT, NULL) != 0) {
|
||||
ihk_mc_free_pages(up_v, range_npages);
|
||||
kprintf("ERROR: adding memory range for ELF section %i\n", i);
|
||||
goto err;
|
||||
@ -213,7 +213,7 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
args_envs_p = virt_to_phys(args_envs);
|
||||
|
||||
if(add_process_memory_range(vm, addr, e, args_envs_p,
|
||||
flags, NULL, 0, PAGE_SHIFT) != 0){
|
||||
flags, NULL, 0, PAGE_SHIFT, NULL) != 0){
|
||||
ihk_mc_free_pages(args_envs, ARGENV_PAGE_COUNT);
|
||||
kprintf("ERROR: adding memory range for args/envs\n");
|
||||
goto err;
|
||||
@ -393,7 +393,9 @@ static int process_msg_prepare_process(unsigned long rphys)
|
||||
memcpy_long(pn, p, sizeof(struct program_load_desc)
|
||||
+ sizeof(struct program_image_section) * n);
|
||||
|
||||
if((thread = create_thread(p->entry)) == NULL){
|
||||
if ((thread = create_thread(p->entry,
|
||||
(unsigned long *)&p->cpu_set,
|
||||
sizeof(p->cpu_set))) == NULL) {
|
||||
kfree(pn);
|
||||
ihk_mc_unmap_virtual(p, npages, 1);
|
||||
ihk_mc_unmap_memory(NULL, phys, sz);
|
||||
@ -432,9 +434,6 @@ static int process_msg_prepare_process(unsigned long rphys)
|
||||
vm->region.map_end = vm->region.map_start;
|
||||
memcpy(proc->rlimit, pn->rlimit, sizeof(struct rlimit) * MCK_RLIM_MAX);
|
||||
|
||||
/* TODO: Clear it at the proper timing */
|
||||
cpu_local_var(scp).post_idx = 0;
|
||||
|
||||
if (prepare_process_ranges_args_envs(thread, pn, p, attr,
|
||||
NULL, 0, NULL, 0) != 0) {
|
||||
kprintf("error: preparing process ranges, args, envs, stack\n");
|
||||
@ -459,70 +458,6 @@ err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void process_msg_init(struct ikc_scd_init_param *pcp, struct syscall_params *lparam)
|
||||
{
|
||||
lparam->response_va = ihk_mc_alloc_pages(RESPONSE_PAGE_COUNT, 0);
|
||||
lparam->response_pa = virt_to_phys(lparam->response_va);
|
||||
|
||||
pcp->request_page = 0;
|
||||
pcp->doorbell_page = 0;
|
||||
pcp->response_page = lparam->response_pa;
|
||||
}
|
||||
|
||||
static void process_msg_init_acked(struct ihk_ikc_channel_desc *c, unsigned long pphys)
|
||||
{
|
||||
struct ikc_scd_init_param *param = phys_to_virt(pphys);
|
||||
struct syscall_params *lparam;
|
||||
enum ihk_mc_pt_attribute attr;
|
||||
|
||||
attr = PTATTR_NO_EXECUTE | PTATTR_WRITABLE | PTATTR_FOR_USER;
|
||||
|
||||
lparam = &cpu_local_var(scp);
|
||||
if(cpu_local_var(syscall_channel2) == c)
|
||||
lparam = &cpu_local_var(scp2);
|
||||
lparam->request_rpa = param->request_page;
|
||||
lparam->request_pa = ihk_mc_map_memory(NULL, param->request_page,
|
||||
REQUEST_PAGE_COUNT * PAGE_SIZE);
|
||||
if((lparam->request_va = ihk_mc_map_virtual(lparam->request_pa,
|
||||
REQUEST_PAGE_COUNT,
|
||||
attr)) == NULL){
|
||||
// TODO:
|
||||
panic("ENOMEM");
|
||||
}
|
||||
|
||||
lparam->doorbell_rpa = param->doorbell_page;
|
||||
lparam->doorbell_pa = ihk_mc_map_memory(NULL, param->doorbell_page,
|
||||
DOORBELL_PAGE_COUNT *
|
||||
PAGE_SIZE);
|
||||
if((lparam->doorbell_va = ihk_mc_map_virtual(lparam->doorbell_pa,
|
||||
DOORBELL_PAGE_COUNT,
|
||||
attr)) == NULL){
|
||||
// TODO:
|
||||
panic("ENOMEM");
|
||||
}
|
||||
|
||||
lparam->post_rpa = param->post_page;
|
||||
lparam->post_pa = ihk_mc_map_memory(NULL, param->post_page,
|
||||
PAGE_SIZE);
|
||||
if((lparam->post_va = ihk_mc_map_virtual(lparam->post_pa, 1,
|
||||
attr)) == NULL){
|
||||
// TODO:
|
||||
panic("ENOMEM");
|
||||
}
|
||||
|
||||
lparam->post_fin = 1;
|
||||
|
||||
dkprintf("Syscall parameters: (%d)\n", ihk_mc_get_processor_id());
|
||||
dkprintf(" Response: %lx, %p\n",
|
||||
lparam->response_pa, lparam->response_va);
|
||||
dkprintf(" Request : %lx, %lx, %p\n",
|
||||
lparam->request_pa, lparam->request_rpa, lparam->request_va);
|
||||
dkprintf(" Doorbell: %lx, %lx, %p\n",
|
||||
lparam->doorbell_pa, lparam->doorbell_rpa, lparam->doorbell_va);
|
||||
dkprintf(" Post: %lx, %lx, %p\n",
|
||||
lparam->post_pa, lparam->post_rpa, lparam->post_va);
|
||||
}
|
||||
|
||||
static void syscall_channel_send(struct ihk_ikc_channel_desc *c,
|
||||
struct ikc_scd_packet *packet)
|
||||
{
|
||||
@ -530,7 +465,7 @@ static void syscall_channel_send(struct ihk_ikc_channel_desc *c,
|
||||
}
|
||||
|
||||
extern unsigned long do_kill(struct thread *, int, int, int, struct siginfo *, int ptracecont);
|
||||
extern void process_procfs_request(unsigned long rarg);
|
||||
extern void process_procfs_request(struct ikc_scd_packet *rpacket);
|
||||
extern void terminate_host(int pid);
|
||||
extern void debug_log(long);
|
||||
|
||||
@ -557,7 +492,6 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
switch (packet->msg) {
|
||||
case SCD_MSG_INIT_CHANNEL_ACKED:
|
||||
dkprintf("SCD_MSG_INIT_CHANNEL_ACKED\n");
|
||||
process_msg_init_acked(c, packet->arg);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
@ -579,14 +513,16 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
break;
|
||||
|
||||
case SCD_MSG_SCHEDULE_PROCESS:
|
||||
cpuid = obtain_clone_cpuid();
|
||||
if(cpuid == -1){
|
||||
thread = (struct thread *)packet->arg;
|
||||
|
||||
cpuid = obtain_clone_cpuid(&thread->cpu_set);
|
||||
if (cpuid == -1) {
|
||||
kprintf("No CPU available\n");
|
||||
ret = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
dkprintf("SCD_MSG_SCHEDULE_PROCESS: %lx\n", packet->arg);
|
||||
thread = (struct thread *)packet->arg;
|
||||
proc = thread->proc;
|
||||
thread->tid = proc->pid;
|
||||
proc->status = PS_RUNNING;
|
||||
@ -594,8 +530,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
chain_thread(thread);
|
||||
chain_process(proc);
|
||||
runq_add_thread(thread, cpuid);
|
||||
|
||||
//cpu_local_var(next) = (struct thread *)packet->arg;
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
@ -637,7 +572,7 @@ static int syscall_packet_handler(struct ihk_ikc_channel_desc *c,
|
||||
break;
|
||||
|
||||
case SCD_MSG_PROCFS_REQUEST:
|
||||
process_procfs_request(packet->arg);
|
||||
process_procfs_request(packet);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
@ -683,7 +618,7 @@ void init_host_syscall_channel(void)
|
||||
|
||||
param.port = 501;
|
||||
param.pkt_size = sizeof(struct ikc_scd_packet);
|
||||
param.queue_size = PAGE_SIZE;
|
||||
param.queue_size = PAGE_SIZE * 4;
|
||||
param.magic = 0x1129;
|
||||
param.handler = syscall_packet_handler;
|
||||
|
||||
@ -696,7 +631,6 @@ void init_host_syscall_channel(void)
|
||||
|
||||
get_this_cpu_local_var()->syscall_channel = param.channel;
|
||||
|
||||
process_msg_init(&cpu_local_var(iip), &cpu_local_var(scp));
|
||||
pckt.msg = SCD_MSG_INIT_CHANNEL;
|
||||
pckt.ref = ihk_mc_get_processor_id();
|
||||
pckt.arg = virt_to_phys(&cpu_local_var(iip));
|
||||
@ -710,7 +644,7 @@ void init_host_syscall_channel2(void)
|
||||
|
||||
param.port = 502;
|
||||
param.pkt_size = sizeof(struct ikc_scd_packet);
|
||||
param.queue_size = PAGE_SIZE;
|
||||
param.queue_size = PAGE_SIZE * 4;
|
||||
param.magic = 0x1329;
|
||||
param.handler = syscall_packet_handler;
|
||||
|
||||
@ -723,7 +657,6 @@ void init_host_syscall_channel2(void)
|
||||
|
||||
get_this_cpu_local_var()->syscall_channel2 = param.channel;
|
||||
|
||||
process_msg_init(&cpu_local_var(iip2), &cpu_local_var(scp2));
|
||||
pckt.msg = SCD_MSG_INIT_CHANNEL;
|
||||
pckt.ref = ihk_mc_get_processor_id();
|
||||
pckt.arg = virt_to_phys(&cpu_local_var(iip2));
|
||||
|
||||
@ -56,11 +56,9 @@ struct cpu_local_var {
|
||||
size_t runq_len;
|
||||
|
||||
struct ihk_ikc_channel_desc *syscall_channel;
|
||||
struct syscall_params scp;
|
||||
struct ikc_scd_init_param iip;
|
||||
|
||||
struct ihk_ikc_channel_desc *syscall_channel2;
|
||||
struct syscall_params scp2;
|
||||
struct ikc_scd_init_param iip2;
|
||||
struct resource_set *resource_set;
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
extern void arch_init(void);
|
||||
extern void kmsg_init(int);
|
||||
extern void mem_init(void);
|
||||
extern void ikc_master_init(void);
|
||||
extern void ihk_ikc_master_init(void);
|
||||
extern void ap_init(void);
|
||||
extern void arch_ready(void);
|
||||
extern void mc_ikc_test_init(void);
|
||||
@ -32,4 +32,6 @@ extern void cpu_sysfs_setup(void);
|
||||
|
||||
extern char *find_command_line(char *name);
|
||||
|
||||
extern int num_processors;
|
||||
|
||||
#endif
|
||||
|
||||
@ -28,9 +28,9 @@ r;\
|
||||
})
|
||||
#define kfree(ptr) _kfree(ptr, __FILE__, __LINE__)
|
||||
#define memcheck(ptr, msg) _memcheck(ptr, msg, __FILE__, __LINE__, 0)
|
||||
void *_kmalloc(int size, enum ihk_mc_ap_flag flag, char *file, int line);
|
||||
void *_kmalloc(int size, ihk_mc_ap_flag flag, char *file, int line);
|
||||
void _kfree(void *ptr, char *file, int line);
|
||||
void *__kmalloc(int size, enum ihk_mc_ap_flag flag);
|
||||
void *__kmalloc(int size, ihk_mc_ap_flag flag);
|
||||
void __kfree(void *ptr);
|
||||
|
||||
int _memcheck(void *ptr, char *msg, char *file, int line, int free);
|
||||
|
||||
@ -32,13 +32,23 @@ enum {
|
||||
MF_HAS_PAGER = 0x0001,
|
||||
MF_SHMDT_OK = 0x0002,
|
||||
MF_IS_REMOVABLE = 0x0004,
|
||||
MF_PREFETCH = 0x0008,
|
||||
MF_ZEROFILL = 0x0010,
|
||||
MF_REG_FILE = 0x1000,
|
||||
MF_DEV_FILE = 0x2000,
|
||||
MF_HOST_RELEASED = 0x80000000,
|
||||
MF_END
|
||||
};
|
||||
|
||||
#define MEMOBJ_READY 0
|
||||
#define MEMOBJ_TO_BE_PREFETCHED 1
|
||||
|
||||
struct memobj {
|
||||
struct memobj_ops * ops;
|
||||
uint32_t flags;
|
||||
int8_t padding[4];
|
||||
ihk_spinlock_t lock;
|
||||
struct memobj_ops *ops;
|
||||
uint32_t flags;
|
||||
uint32_t status;
|
||||
size_t size;
|
||||
ihk_spinlock_t lock;
|
||||
};
|
||||
|
||||
typedef void memobj_release_func_t(struct memobj *obj);
|
||||
|
||||
@ -30,7 +30,8 @@ enum pager_op {
|
||||
struct pager_create_result {
|
||||
uintptr_t handle;
|
||||
int maxprot;
|
||||
int8_t padding[4];
|
||||
uint32_t flags;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@ -166,7 +166,7 @@
|
||||
|
||||
#define NOPHYS ((uintptr_t)-1)
|
||||
|
||||
#define PROCESS_NUMA_MASK_BITS 64
|
||||
#define PROCESS_NUMA_MASK_BITS 256
|
||||
|
||||
/*
|
||||
* Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
|
||||
@ -242,6 +242,28 @@ struct process_vm;
|
||||
struct vm_regions;
|
||||
struct vm_range;
|
||||
|
||||
//#define TRACK_SYSCALLS
|
||||
|
||||
#ifdef TRACK_SYSCALLS
|
||||
#define TRACK_SYSCALLS_MAX 300
|
||||
#define __NR_track_syscalls 701
|
||||
|
||||
#define TRACK_SYSCALLS_CLEAR 0x01
|
||||
#define TRACK_SYSCALLS_ON 0x02
|
||||
#define TRACK_SYSCALLS_OFF 0x04
|
||||
#define TRACK_SYSCALLS_PRINT 0x08
|
||||
#define TRACK_SYSCALLS_PRINT_PROC 0x10
|
||||
|
||||
void track_syscalls_print_thread_stats(struct thread *thread);
|
||||
void track_syscalls_print_proc_stats(struct process *proc);
|
||||
void track_syscalls_accumulate_counters(struct thread *thread,
|
||||
struct process *proc);
|
||||
void track_syscalls_alloc_counters(struct thread *thread);
|
||||
void track_syscalls_dealloc_thread_counters(struct thread *thread);
|
||||
void track_syscalls_dealloc_proc_counters(struct process *proc);
|
||||
#endif // TRACK_SYSCALLS
|
||||
|
||||
|
||||
#define HASH_SIZE 73
|
||||
|
||||
struct resource_set {
|
||||
@ -369,6 +391,13 @@ struct vm_range {
|
||||
int padding;
|
||||
};
|
||||
|
||||
struct vm_range_numa_policy {
|
||||
struct list_head list;
|
||||
unsigned long start, end;
|
||||
DECLARE_BITMAP(numa_mask, PROCESS_NUMA_MASK_BITS);
|
||||
int numa_mem_policy;
|
||||
};
|
||||
|
||||
struct vm_regions {
|
||||
unsigned long vm_start, vm_end;
|
||||
unsigned long text_start, text_end;
|
||||
@ -398,7 +427,7 @@ struct mckfd {
|
||||
#define SFD_NONBLOCK 04000
|
||||
|
||||
struct sig_common {
|
||||
ihk_spinlock_t lock;
|
||||
mcs_rwlock_lock_t lock;
|
||||
ihk_atomic_t use;
|
||||
struct k_sigaction action[_NSIG];
|
||||
struct list_head sigpending;
|
||||
@ -459,7 +488,7 @@ struct process {
|
||||
// V +---- |
|
||||
// PS_STOPPED -----+
|
||||
// (PS_TRACED)
|
||||
int exit_status;
|
||||
int exit_status; // only for zombie
|
||||
|
||||
/* Store exit_status for a group of threads when stopped by SIGSTOP.
|
||||
exit_status can't be used because values of exit_status of threads
|
||||
@ -530,6 +559,13 @@ struct process {
|
||||
#define PP_COUNT 2
|
||||
#define PP_STOP 3
|
||||
struct mc_perf_event *monitoring_event;
|
||||
#ifdef TRACK_SYSCALLS
|
||||
mcs_lock_node_t st_lock;
|
||||
uint64_t *syscall_times;
|
||||
uint32_t *syscall_cnts;
|
||||
uint64_t *offload_times;
|
||||
uint32_t *offload_cnts;
|
||||
#endif // TRACK_SYSCALLS
|
||||
};
|
||||
|
||||
void hold_thread(struct thread *ftn);
|
||||
@ -571,6 +607,7 @@ struct thread {
|
||||
// PS_TRACED
|
||||
// PS_INTERRPUTIBLE
|
||||
// PS_UNINTERRUPTIBLE
|
||||
int exit_status;
|
||||
|
||||
// process vm
|
||||
struct process_vm *vm;
|
||||
@ -601,12 +638,20 @@ struct thread {
|
||||
fp_regs_struct *fp_regs;
|
||||
int in_syscall_offload;
|
||||
|
||||
#ifdef TRACK_SYSCALLS
|
||||
int track_syscalls;
|
||||
uint64_t *syscall_times;
|
||||
uint32_t *syscall_cnts;
|
||||
uint64_t *offload_times;
|
||||
uint32_t *offload_cnts;
|
||||
#endif // TRACK_SYSCALLS
|
||||
|
||||
// signal
|
||||
struct sig_common *sigcommon;
|
||||
sigset_t sigmask;
|
||||
stack_t sigstack;
|
||||
struct list_head sigpending;
|
||||
ihk_spinlock_t sigpendinglock;
|
||||
mcs_rwlock_lock_t sigpendinglock;
|
||||
volatile int sigevent;
|
||||
|
||||
// gpio
|
||||
@ -636,6 +681,8 @@ struct thread {
|
||||
struct waitq scd_wq;
|
||||
};
|
||||
|
||||
#define VM_RANGE_CACHE_SIZE 4
|
||||
|
||||
struct process_vm {
|
||||
struct address_space *address_space;
|
||||
struct list_head vm_range_list;
|
||||
@ -660,6 +707,10 @@ struct process_vm {
|
||||
long currss;
|
||||
DECLARE_BITMAP(numa_mask, PROCESS_NUMA_MASK_BITS);
|
||||
int numa_mem_policy;
|
||||
/* Protected by memory_range_lock */
|
||||
struct list_head vm_range_numa_policy_list;
|
||||
struct vm_range *range_cache[VM_RANGE_CACHE_SIZE];
|
||||
int range_cache_ind;
|
||||
};
|
||||
|
||||
static inline int has_cap_ipc_lock(struct thread *th)
|
||||
@ -676,7 +727,8 @@ static inline int has_cap_sys_admin(struct thread *th)
|
||||
|
||||
void hold_address_space(struct address_space *);
|
||||
void release_address_space(struct address_space *);
|
||||
struct thread *create_thread(unsigned long user_pc);
|
||||
struct thread *create_thread(unsigned long user_pc,
|
||||
unsigned long *__cpu_set, size_t cpu_set_size);
|
||||
struct thread *clone_thread(struct thread *org, unsigned long pc,
|
||||
unsigned long sp, int clone_flags);
|
||||
void destroy_thread(struct thread *thread);
|
||||
@ -691,9 +743,10 @@ void free_process_memory_ranges(struct process_vm *vm);
|
||||
int populate_process_memory(struct process_vm *vm, void *start, size_t len);
|
||||
|
||||
int add_process_memory_range(struct process_vm *vm,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long phys, unsigned long flag,
|
||||
struct memobj *memobj, off_t objoff, int pgshift);
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long phys, unsigned long flag,
|
||||
struct memobj *memobj, off_t offset,
|
||||
int pgshift, struct vm_range **rp);
|
||||
int remove_process_memory_range(struct process_vm *vm, unsigned long start,
|
||||
unsigned long end, int *ro_freedp);
|
||||
int split_process_memory_range(struct process_vm *vm,
|
||||
@ -733,9 +786,11 @@ extern enum ihk_mc_pt_attribute arch_vrflag_to_ptattr(unsigned long flag, uint64
|
||||
enum ihk_mc_pt_attribute common_vrflag_to_ptattr(unsigned long flag, uint64_t fault, pte_t *ptep);
|
||||
|
||||
void schedule(void);
|
||||
void spin_sleep_or_schedule(void);
|
||||
void runq_add_thread(struct thread *thread, int cpu_id);
|
||||
void runq_del_thread(struct thread *thread, int cpu_id);
|
||||
int sched_wakeup_thread(struct thread *thread, int valid_states);
|
||||
int sched_wakeup_thread_locked(struct thread *thread, int valid_states);
|
||||
|
||||
void sched_request_migrate(int cpu_id, struct thread *thread);
|
||||
void check_need_resched(void);
|
||||
|
||||
@ -149,6 +149,10 @@ struct program_image_section {
|
||||
#define MCK_RLIMIT_SIGPENDING 14
|
||||
#define MCK_RLIMIT_STACK 15
|
||||
|
||||
#define PLD_CPU_SET_MAX_CPUS 1024
|
||||
typedef unsigned long __cpu_set_unit;
|
||||
#define PLD_CPU_SET_SIZE (PLD_CPU_SET_MAX_CPUS / (8 * sizeof(__cpu_set_unit)))
|
||||
|
||||
struct program_load_desc {
|
||||
int num_sections;
|
||||
int status;
|
||||
@ -178,6 +182,7 @@ struct program_load_desc {
|
||||
struct rlimit rlimit[MCK_RLIM_MAX];
|
||||
unsigned long interp_align;
|
||||
char shell_path[SHELL_PATH_MAX_LEN];
|
||||
__cpu_set_unit cpu_set[PLD_CPU_SET_SIZE];
|
||||
struct program_image_section sections[0];
|
||||
};
|
||||
|
||||
@ -250,22 +255,6 @@ struct syscall_post {
|
||||
unsigned long v[8];
|
||||
};
|
||||
|
||||
struct syscall_params {
|
||||
unsigned long request_rpa, request_pa;
|
||||
struct syscall_request *request_va;
|
||||
unsigned long response_pa;
|
||||
struct syscall_response *response_va;
|
||||
|
||||
unsigned long doorbell_rpa, doorbell_pa;
|
||||
unsigned long *doorbell_va;
|
||||
|
||||
unsigned int post_idx;
|
||||
unsigned long post_rpa, post_pa;
|
||||
struct syscall_post *post_va;
|
||||
unsigned long post_fin;
|
||||
struct syscall_post post_buf IHK_DMA_ALIGN;
|
||||
};
|
||||
|
||||
#define SYSCALL_DECLARE(name) long sys_##name(int n, ihk_mc_user_context_t *ctx)
|
||||
#define SYSCALL_HEADER struct syscall_request request IHK_DMA_ALIGN; \
|
||||
request.number = n
|
||||
@ -387,6 +376,7 @@ extern struct tod_data_s tod_data; /* residing in arch-dependent file */
|
||||
|
||||
void reset_cputime();
|
||||
void set_cputime(int mode);
|
||||
int do_munmap(void *addr, size_t len);
|
||||
intptr_t do_mmap(intptr_t addr0, size_t len0, int prot, int flags, int fd,
|
||||
off_t off0);
|
||||
void clear_host_pte(uintptr_t addr, size_t len);
|
||||
|
||||
@ -27,6 +27,8 @@ typedef int (*waitq_func_t)(struct waitq_entry *wait, unsigned mode,
|
||||
|
||||
int default_wake_function(struct waitq_entry *wait, unsigned mode, int flags,
|
||||
void *key);
|
||||
int locked_wake_function(struct waitq_entry *wait, unsigned mode, int flags,
|
||||
void *key);
|
||||
|
||||
typedef struct waitq {
|
||||
ihk_spinlock_t lock;
|
||||
@ -57,6 +59,13 @@ typedef struct waitq_entry {
|
||||
.link = { &(name).link, &(name).link } \
|
||||
}
|
||||
|
||||
#define DECLARE_WAITQ_ENTRY_LOCKED(name, tsk) \
|
||||
waitq_entry_t name = { \
|
||||
.private = tsk, \
|
||||
.func = locked_wake_function, \
|
||||
.link = { &(name).link, &(name).link } \
|
||||
}
|
||||
|
||||
extern void waitq_init(waitq_t *waitq);
|
||||
extern void waitq_init_entry(waitq_entry_t *entry, struct thread *proc);
|
||||
extern int waitq_active(waitq_t *waitq);
|
||||
|
||||
21
kernel/include/xpmem.h
Normal file
21
kernel/include/xpmem.h
Normal file
@ -0,0 +1,21 @@
|
||||
/**
|
||||
* \file xpmem.h
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Structures and functions of xpmem
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#ifndef _XPMEM_H
|
||||
#define _XPMEM_H
|
||||
|
||||
#include <ihk/context.h>
|
||||
|
||||
#define XPMEM_DEV_PATH "/dev/xpmem"
|
||||
|
||||
extern int xpmem_open(ihk_mc_user_context_t *ctx);
|
||||
|
||||
#endif /* _XPMEM_H */
|
||||
|
||||
388
kernel/include/xpmem_private.h
Normal file
388
kernel/include/xpmem_private.h
Normal file
@ -0,0 +1,388 @@
|
||||
/**
|
||||
* \file xpmem_private.h
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Private Cross Partition Memory (XPMEM) structures and macros.
|
||||
*/
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright 2009, 2010, 2014 Cray Inc. All Rights Reserved
|
||||
* Copyright (c) 2014-2016 Los Alamos National Security, LCC. All rights
|
||||
* reserved.
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#ifndef _XPMEM_PRIVATE_H
|
||||
#define _XPMEM_PRIVATE_H
|
||||
|
||||
#include <mc_xpmem.h>
|
||||
#include <xpmem.h>
|
||||
|
||||
#define XPMEM_CURRENT_VERSION 0x00026003
|
||||
|
||||
//#define DEBUG_PRINT_XPMEM
|
||||
|
||||
#ifdef DEBUG_PRINT_XPMEM
|
||||
#define dkprintf(...) kprintf(__VA_ARGS__)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#define XPMEM_DEBUG(format, a...) kprintf("[%d] %s: "format"\n", cpu_local_var(current)->proc->rgid, __func__, ##a)
|
||||
#else
|
||||
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
|
||||
#define ekprintf(...) kprintf(__VA_ARGS__)
|
||||
#define XPMEM_DEBUG(format, a...) do { if (0) kprintf("\n"); } while (0)
|
||||
#endif
|
||||
|
||||
//#define USE_DBUG_ON
|
||||
|
||||
#ifdef USE_DBUG_ON
|
||||
#define DBUG_ON(condition) do { if (condition) kprintf("[%d] BUG: func=%s\n", cpu_local_var(current)->proc->rgid, __func__); } while (0)
|
||||
#else
|
||||
#define DBUG_ON(condition)
|
||||
#endif
|
||||
|
||||
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
|
||||
|
||||
#define min(x, y) ({ \
|
||||
__typeof__(x) _min1 = (x); \
|
||||
__typeof__(y) _min2 = (y); \
|
||||
(void) (&_min1 == &_min2); \
|
||||
_min1 < _min2 ? _min1 : _min2;})
|
||||
|
||||
#define max(x, y) ({ \
|
||||
__typeof__(x) _max1 = (x); \
|
||||
__typeof__(y) _max2 = (y); \
|
||||
(void) (&_max1 == &_max2); \
|
||||
_max1 > _max2 ? _max1 : _max2;})
|
||||
|
||||
#define MAX_ERRNO 4095
|
||||
|
||||
#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
|
||||
|
||||
static inline void * ERR_PTR(long error)
|
||||
{
|
||||
return (void *)error;
|
||||
}
|
||||
|
||||
static inline long PTR_ERR(const void *ptr)
|
||||
{
|
||||
return (long)ptr;
|
||||
}
|
||||
|
||||
static inline long IS_ERR(const void *ptr)
|
||||
{
|
||||
return IS_ERR_VALUE((unsigned long)ptr);
|
||||
}
|
||||
|
||||
static inline long IS_ERR_OR_NULL(const void *ptr)
|
||||
{
|
||||
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Both the xpmem_segid_t and xpmem_apid_t are of type __s64 and designed
|
||||
* to be opaque to the user. Both consist of the same underlying fields.
|
||||
*
|
||||
* The 'uniq' field is designed to give each segid or apid a unique value.
|
||||
* Each type is only unique with respect to itself.
|
||||
*
|
||||
* An ID is never less than or equal to zero.
|
||||
*/
|
||||
struct xpmem_id {
|
||||
pid_t tgid; /* thread group that owns ID */
|
||||
unsigned int uniq; /* this value makes the ID unique */
|
||||
};
|
||||
|
||||
typedef union {
|
||||
struct xpmem_id xpmem_id;
|
||||
xpmem_segid_t segid;
|
||||
xpmem_apid_t apid;
|
||||
} xpmem_id_t;
|
||||
|
||||
/* Shift INT_MAX by one so we can tell when we overflow. */
|
||||
#define XPMEM_MAX_UNIQ_ID (INT_MAX >> 1)
|
||||
|
||||
static inline pid_t xpmem_segid_to_tgid(xpmem_segid_t segid)
|
||||
{
|
||||
DBUG_ON(segid <= 0);
|
||||
return ((xpmem_id_t *)&segid)->xpmem_id.tgid;
|
||||
}
|
||||
|
||||
static inline pid_t xpmem_apid_to_tgid(xpmem_apid_t apid)
|
||||
{
|
||||
DBUG_ON(apid <= 0);
|
||||
return ((xpmem_id_t *)&apid)->xpmem_id.tgid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hash Tables
|
||||
*
|
||||
* XPMEM utilizes hash tables to enable faster lookups of list entries.
|
||||
* These hash tables are implemented as arrays. A simple modulus of the hash
|
||||
* key yields the appropriate array index. A hash table's array element (i.e.,
|
||||
* hash table bucket) consists of a hash list and the lock that protects it.
|
||||
*
|
||||
* XPMEM has the following two hash tables:
|
||||
*
|
||||
* table bucket key
|
||||
* part->tg_hashtable list of struct xpmem_thread_group tgid
|
||||
* tg->ap_hashtable list of struct xpmem_access_permit apid.uniq
|
||||
*/
|
||||
struct xpmem_hashlist {
|
||||
mcs_rwlock_lock_t lock; /* lock for hash list */
|
||||
struct list_head list; /* hash list */
|
||||
};
|
||||
|
||||
#define XPMEM_TG_HASHTABLE_SIZE 8
|
||||
#define XPMEM_AP_HASHTABLE_SIZE 8
|
||||
|
||||
static inline int xpmem_tg_hashtable_index(pid_t tgid)
|
||||
{
|
||||
int index;
|
||||
|
||||
index = (unsigned int)tgid % XPMEM_TG_HASHTABLE_SIZE;
|
||||
|
||||
XPMEM_DEBUG("return: tgid=%lu, index=%d", tgid, index);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
static inline int xpmem_ap_hashtable_index(xpmem_apid_t apid)
|
||||
{
|
||||
int index;
|
||||
|
||||
DBUG_ON(apid <= 0);
|
||||
|
||||
index = ((xpmem_id_t *)&apid)->xpmem_id.uniq % XPMEM_AP_HASHTABLE_SIZE;
|
||||
|
||||
XPMEM_DEBUG("return: apid=%lu, index=%d", apid, index);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
/*
|
||||
* general internal driver structures
|
||||
*/
|
||||
struct xpmem_thread_group {
|
||||
ihk_spinlock_t lock; /* tg lock */
|
||||
pid_t tgid; /* tg's tgid */
|
||||
uid_t uid; /* tg's uid */
|
||||
gid_t gid; /* tg's gid */
|
||||
volatile int flags; /* tg attributes and state */
|
||||
ihk_atomic_t uniq_segid;
|
||||
ihk_atomic_t uniq_apid;
|
||||
mcs_rwlock_lock_t seg_list_lock;
|
||||
struct list_head seg_list; /* tg's list of segs */
|
||||
ihk_atomic_t refcnt; /* references to tg */
|
||||
ihk_atomic_t n_pinned; /* #of pages pinned by this tg */
|
||||
struct list_head tg_hashlist; /* tg hash list */
|
||||
struct thread *group_leader; /* thread group leader */
|
||||
struct process_vm *vm; /* tg's mm */
|
||||
ihk_atomic_t n_recall_PFNs; /* #of recall of PFNs in progress */
|
||||
struct xpmem_hashlist ap_hashtable[]; /* locks + ap hash lists */
|
||||
};
|
||||
|
||||
struct xpmem_segment {
|
||||
ihk_spinlock_t lock; /* seg lock */
|
||||
mcs_rwlock_lock_t seg_lock; /* seg sema */
|
||||
xpmem_segid_t segid; /* unique segid */
|
||||
unsigned long vaddr; /* starting address */
|
||||
size_t size; /* size of seg */
|
||||
int permit_type; /* permission scheme */
|
||||
void *permit_value; /* permission data */
|
||||
volatile int flags; /* seg attributes and state */
|
||||
ihk_atomic_t refcnt; /* references to seg */
|
||||
struct xpmem_thread_group *tg; /* creator tg */
|
||||
struct list_head ap_list; /* local access permits of seg */
|
||||
struct list_head seg_list; /* tg's list of segs */
|
||||
};
|
||||
|
||||
struct xpmem_access_permit {
|
||||
ihk_spinlock_t lock; /* access permit lock */
|
||||
xpmem_apid_t apid; /* unique apid */
|
||||
int mode; /* read/write mode */
|
||||
volatile int flags; /* access permit attributes and state */
|
||||
ihk_atomic_t refcnt; /* references to access permit */
|
||||
struct xpmem_segment *seg; /* seg permitted to be accessed */
|
||||
struct xpmem_thread_group *tg; /* access permit's tg */
|
||||
struct list_head att_list; /* atts of this access permit's seg */
|
||||
struct list_head ap_list; /* access permits linked to seg */
|
||||
struct list_head ap_hashlist; /* access permit hash list */
|
||||
};
|
||||
|
||||
struct xpmem_attachment {
|
||||
mcs_rwlock_lock_t at_lock; /* att lock for serialization */
|
||||
struct mcs_rwlock_node_irqsave at_irqsave; /* att lock for serialization */
|
||||
unsigned long vaddr; /* starting address of seg attached */
|
||||
unsigned long at_vaddr; /* address where seg is attached */
|
||||
size_t at_size; /* size of seg attachment */
|
||||
struct vm_range *at_vma; /* vma where seg is attachment */
|
||||
volatile int flags; /* att attributes and state */
|
||||
ihk_atomic_t refcnt; /* references to att */
|
||||
struct xpmem_access_permit *ap; /* associated access permit */
|
||||
struct list_head att_list; /* atts linked to access permit */
|
||||
struct process_vm *vm; /* mm struct attached to */
|
||||
mcs_rwlock_lock_t invalidate_lock; /* to serialize page table invalidates */
|
||||
};
|
||||
|
||||
struct xpmem_partition {
|
||||
ihk_atomic_t n_opened; /* # of /dev/xpmem opened */
|
||||
struct xpmem_hashlist tg_hashtable[]; /* locks + tg hash lists */
|
||||
};
|
||||
|
||||
#define XPMEM_FLAG_DESTROYING 0x00040 /* being destroyed */
|
||||
#define XPMEM_FLAG_DESTROYED 0x00080 /* 'being destroyed' finished */
|
||||
|
||||
#define XPMEM_FLAG_VALIDPTEs 0x00200 /* valid PTEs exist */
|
||||
|
||||
struct xpmem_perm {
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
unsigned long mode;
|
||||
};
|
||||
|
||||
#define XPMEM_PERM_IRUSR 00400
|
||||
#define XPMEM_PERM_IWUSR 00200
|
||||
|
||||
static int xpmem_ioctl(struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
|
||||
static int xpmem_close( struct mckfd *mckfd, ihk_mc_user_context_t *ctx);
|
||||
|
||||
static int xpmem_init(void);
|
||||
static void xpmem_exit(void);
|
||||
static int __xpmem_open(void);
|
||||
static void xpmem_destroy_tg(struct xpmem_thread_group *);
|
||||
|
||||
static int xpmem_make(unsigned long, size_t, int, void *, xpmem_segid_t *);
|
||||
static xpmem_segid_t xpmem_make_segid(struct xpmem_thread_group *);
|
||||
|
||||
static int xpmem_remove(xpmem_segid_t);
|
||||
static void xpmem_remove_seg(struct xpmem_thread_group *,
|
||||
struct xpmem_segment *);
|
||||
|
||||
static void xpmem_clear_PTEs(struct xpmem_segment *);
|
||||
|
||||
extern struct xpmem_partition *xpmem_my_part;
|
||||
|
||||
static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal(
|
||||
pid_t, int, int);
|
||||
|
||||
static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid(
|
||||
pid_t tgid,
|
||||
int return_destroying)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
int index;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: tgid=%d, return_destroying=%d",
|
||||
tgid, return_destroying);
|
||||
|
||||
index = xpmem_tg_hashtable_index(tgid);
|
||||
mcs_rwlock_reader_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
|
||||
tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid, index,
|
||||
return_destroying);
|
||||
mcs_rwlock_reader_unlock(&xpmem_my_part->tg_hashtable[index].lock,
|
||||
&lock);
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", tg);
|
||||
|
||||
return tg;
|
||||
}
|
||||
|
||||
static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid_nolock(
|
||||
pid_t tgid,
|
||||
int return_destroying)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
|
||||
XPMEM_DEBUG("call: tgid=%d, return_destroying=%d",
|
||||
tgid, return_destroying);
|
||||
|
||||
tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid,
|
||||
xpmem_tg_hashtable_index(tgid), return_destroying);
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", tg);
|
||||
|
||||
return tg;
|
||||
}
|
||||
|
||||
#define xpmem_tg_ref_by_tgid(t) __xpmem_tg_ref_by_tgid(t, 0)
|
||||
#define xpmem_tg_ref_by_tgid_all(t) __xpmem_tg_ref_by_tgid(t, 1)
|
||||
#define xpmem_tg_ref_by_tgid_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 0)
|
||||
#define xpmem_tg_ref_by_tgid_all_nolock(t) __xpmem_tg_ref_by_tgid_nolock(t, 1)
|
||||
|
||||
static struct xpmem_thread_group * xpmem_tg_ref_by_segid(xpmem_segid_t);
|
||||
static void xpmem_tg_deref(struct xpmem_thread_group *);
|
||||
static struct xpmem_segment *xpmem_seg_ref_by_segid(struct xpmem_thread_group *,
|
||||
xpmem_segid_t);
|
||||
static void xpmem_seg_deref(struct xpmem_segment *);
|
||||
|
||||
/*
|
||||
* Inlines that mark an internal driver structure as being destroyable or not.
|
||||
* The idea is to set the refcnt to 1 at structure creation time and then
|
||||
* drop that reference at the time the structure is to be destroyed.
|
||||
*/
|
||||
static inline void xpmem_tg_not_destroyable(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
ihk_atomic_set(&tg->refcnt, 1);
|
||||
|
||||
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_tg_destroyable(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
xpmem_tg_deref(tg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
static inline void xpmem_seg_not_destroyable(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
ihk_atomic_set(&seg->refcnt, 1);
|
||||
|
||||
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_seg_destroyable(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
xpmem_seg_deref(seg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
/*
|
||||
* Inlines that increment the refcnt for the specified structure.
|
||||
*/
|
||||
static inline void xpmem_tg_ref(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
DBUG_ON(ihk_atomic_read(&tg->refcnt) <= 0);
|
||||
ihk_atomic_inc(&tg->refcnt);
|
||||
|
||||
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
|
||||
}
|
||||
|
||||
static inline void xpmem_seg_ref(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
|
||||
ihk_atomic_inc(&seg->refcnt);
|
||||
|
||||
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
|
||||
}
|
||||
|
||||
#endif /* _XPMEM_PRIVATE_H */
|
||||
|
||||
@ -108,11 +108,11 @@ static void dma_test(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern char *ihk_mc_get_kernel_args(void);
|
||||
extern char *ihk_get_kargs(void);
|
||||
|
||||
char *find_command_line(char *name)
|
||||
{
|
||||
char *cmdline = ihk_mc_get_kernel_args();
|
||||
char *cmdline = ihk_get_kargs();
|
||||
|
||||
if (!cmdline) {
|
||||
return NULL;
|
||||
@ -122,7 +122,7 @@ char *find_command_line(char *name)
|
||||
|
||||
static void parse_kargs(void)
|
||||
{
|
||||
kprintf("KCommand Line: %s\n", ihk_mc_get_kernel_args());
|
||||
kprintf("KCommand Line: %s\n", ihk_get_kargs());
|
||||
|
||||
if (1) {
|
||||
char *key = "osnum=";
|
||||
@ -254,7 +254,7 @@ static void rest_init(void)
|
||||
time_init();
|
||||
kmalloc_init();
|
||||
|
||||
ikc_master_init();
|
||||
ihk_ikc_master_init();
|
||||
|
||||
proc_init();
|
||||
|
||||
@ -336,11 +336,8 @@ static void post_init(void)
|
||||
}
|
||||
|
||||
if (find_command_line("hidos")) {
|
||||
extern ihk_spinlock_t syscall_lock;
|
||||
|
||||
init_host_syscall_channel();
|
||||
init_host_syscall_channel2();
|
||||
ihk_mc_spinlock_init(&syscall_lock);
|
||||
}
|
||||
|
||||
arch_setup_vdso();
|
||||
@ -373,6 +370,7 @@ int main(void)
|
||||
|
||||
kputs("IHK/McKernel started.\n");
|
||||
|
||||
ihk_set_kmsg(virt_to_phys(&kmsg_buf), IHK_KMSG_SIZE);
|
||||
arch_init();
|
||||
|
||||
/*
|
||||
|
||||
276
kernel/mem.c
276
kernel/mem.c
@ -49,7 +49,7 @@
|
||||
#endif
|
||||
|
||||
static unsigned long pa_start, pa_end;
|
||||
static struct ihk_mc_numa_node *memory_nodes = NULL;
|
||||
static struct ihk_mc_numa_node memory_nodes[512];
|
||||
|
||||
extern void unhandled_page_fault(struct thread *, void *, void *);
|
||||
extern int interrupt_from_user(void *);
|
||||
@ -65,12 +65,12 @@ extern void early_alloc_invalidate(void);
|
||||
|
||||
static char *memdebug = NULL;
|
||||
|
||||
static void *___kmalloc(int size, enum ihk_mc_ap_flag flag);
|
||||
static void *___kmalloc(int size, ihk_mc_ap_flag flag);
|
||||
static void ___kfree(void *ptr);
|
||||
|
||||
static void *___ihk_mc_alloc_aligned_pages(int npages,
|
||||
int p2align, enum ihk_mc_ap_flag flag);
|
||||
static void *___ihk_mc_alloc_pages(int npages, enum ihk_mc_ap_flag flag);
|
||||
static void *___ihk_mc_alloc_aligned_pages_node(int npages,
|
||||
int p2align, ihk_mc_ap_flag flag, int node);
|
||||
static void *___ihk_mc_alloc_pages(int npages, ihk_mc_ap_flag flag);
|
||||
static void ___ihk_mc_free_pages(void *p, int npages);
|
||||
|
||||
/*
|
||||
@ -151,14 +151,15 @@ struct pagealloc_track_entry *__pagealloc_track_find_entry(
|
||||
}
|
||||
|
||||
/* Top level routines called from macros */
|
||||
void *_ihk_mc_alloc_aligned_pages(int npages, int p2align,
|
||||
enum ihk_mc_ap_flag flag, char *file, int line)
|
||||
void *_ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int node, char *file, int line)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
struct pagealloc_track_entry *entry;
|
||||
struct pagealloc_track_addr_entry *addr_entry;
|
||||
int hash, addr_hash;
|
||||
void *r = ___ihk_mc_alloc_aligned_pages(npages, p2align, flag);
|
||||
void *r = ___ihk_mc_alloc_aligned_pages_node(npages,
|
||||
p2align, flag, node);
|
||||
|
||||
if (!memdebug || !pagealloc_track_initialized)
|
||||
return r;
|
||||
@ -230,12 +231,6 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
void *_ihk_mc_alloc_pages(int npages, enum ihk_mc_ap_flag flag,
|
||||
char *file, int line)
|
||||
{
|
||||
return _ihk_mc_alloc_aligned_pages(npages, PAGE_P2ALIGN, flag, file, line);
|
||||
}
|
||||
|
||||
void _ihk_mc_free_pages(void *ptr, int npages, char *file, int line)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
@ -449,18 +444,18 @@ void pagealloc_memcheck(void)
|
||||
|
||||
|
||||
/* Actual allocation routines */
|
||||
static void *___ihk_mc_alloc_aligned_pages(int npages, int p2align,
|
||||
enum ihk_mc_ap_flag flag)
|
||||
static void *___ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int node)
|
||||
{
|
||||
if (pa_ops)
|
||||
return pa_ops->alloc_page(npages, p2align, flag);
|
||||
return pa_ops->alloc_page(npages, p2align, flag, node);
|
||||
else
|
||||
return early_alloc_pages(npages);
|
||||
}
|
||||
|
||||
static void *___ihk_mc_alloc_pages(int npages, enum ihk_mc_ap_flag flag)
|
||||
static void *___ihk_mc_alloc_pages(int npages, ihk_mc_ap_flag flag)
|
||||
{
|
||||
return ___ihk_mc_alloc_aligned_pages(npages, PAGE_P2ALIGN, flag);
|
||||
return ___ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1);
|
||||
}
|
||||
|
||||
static void ___ihk_mc_free_pages(void *p, int npages)
|
||||
@ -494,18 +489,117 @@ static void reserve_pages(struct ihk_page_allocator_desc *pa_allocator,
|
||||
ihk_pagealloc_reserve(pa_allocator, start, end);
|
||||
}
|
||||
|
||||
static void *allocate_aligned_pages(int npages, int p2align,
|
||||
enum ihk_mc_ap_flag flag)
|
||||
extern int cpu_local_var_initialized;
|
||||
static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int pref_node)
|
||||
{
|
||||
unsigned long pa;
|
||||
int i;
|
||||
unsigned long pa = 0;
|
||||
int i, node;
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
|
||||
/* Not yet initialized or idle process */
|
||||
if (!cpu_local_var_initialized ||
|
||||
!cpu_local_var(current) ||
|
||||
!cpu_local_var(current)->vm)
|
||||
goto distance_based;
|
||||
|
||||
/* User requested policy? */
|
||||
if (!(flag & IHK_MC_AP_USER)) {
|
||||
goto distance_based;
|
||||
}
|
||||
|
||||
node = ihk_mc_get_numa_id();
|
||||
if (!memory_nodes[node].nodes_by_distance)
|
||||
goto order_based;
|
||||
|
||||
switch (cpu_local_var(current)->vm->numa_mem_policy) {
|
||||
case MPOL_BIND:
|
||||
case MPOL_PREFERRED:
|
||||
|
||||
/* Look at nodes in the order of distance but consider
|
||||
* only the ones requested in user policy */
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
|
||||
/* Not part of user requested policy? */
|
||||
if (!test_bit(memory_nodes[node].nodes_by_distance[i].id,
|
||||
cpu_local_var(current)->proc->vm->numa_mask)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[memory_nodes[node].
|
||||
nodes_by_distance[i].id].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
|
||||
if (pa) {
|
||||
dkprintf("%s: policy: CPU @ node %d allocated "
|
||||
"%d pages from node %d\n",
|
||||
__FUNCTION__,
|
||||
ihk_mc_get_numa_id(),
|
||||
npages, node);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pa) break;
|
||||
}
|
||||
break;
|
||||
|
||||
case MPOL_INTERLEAVE:
|
||||
/* TODO: */
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (pa) {
|
||||
return phys_to_virt(pa);
|
||||
}
|
||||
else {
|
||||
dkprintf("%s: couldn't fulfill user policy for %d pages\n",
|
||||
__FUNCTION__, npages);
|
||||
}
|
||||
|
||||
distance_based:
|
||||
node = ihk_mc_get_numa_id();
|
||||
|
||||
/* Look at nodes in the order of distance */
|
||||
if (!memory_nodes[node].nodes_by_distance)
|
||||
goto order_based;
|
||||
|
||||
/* TODO: match NUMA id and distance matrix with allocating core */
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[(ihk_mc_get_numa_id() + i) %
|
||||
&memory_nodes[memory_nodes[node].
|
||||
nodes_by_distance[i].id].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
|
||||
if (pa) {
|
||||
dkprintf("%s: distance: CPU @ node %d allocated "
|
||||
"%d pages from node %d\n",
|
||||
__FUNCTION__,
|
||||
ihk_mc_get_numa_id(),
|
||||
npages,
|
||||
memory_nodes[node].nodes_by_distance[i].id);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pa) break;
|
||||
}
|
||||
|
||||
if (pa)
|
||||
return phys_to_virt(pa);
|
||||
|
||||
order_based:
|
||||
node = ihk_mc_get_numa_id();
|
||||
|
||||
/* Fall back to regular order */
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[(node + i) %
|
||||
ihk_mc_get_nr_numa_nodes()].allocators, list) {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator, npages, p2align);
|
||||
|
||||
@ -524,12 +618,7 @@ static void *allocate_aligned_pages(int npages, int p2align,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *allocate_pages(int npages, enum ihk_mc_ap_flag flag)
|
||||
{
|
||||
return allocate_aligned_pages(npages, PAGE_P2ALIGN, flag);
|
||||
}
|
||||
|
||||
static void __free_pages_in_allocator(void *va, int npages)
|
||||
static void __mckernel_free_pages_in_allocator(void *va, int npages)
|
||||
{
|
||||
int i;
|
||||
unsigned long pa_start = virt_to_phys(va);
|
||||
@ -552,7 +641,7 @@ static void __free_pages_in_allocator(void *va, int npages)
|
||||
}
|
||||
|
||||
|
||||
static void free_pages(void *va, int npages)
|
||||
static void mckernel_free_pages(void *va, int npages)
|
||||
{
|
||||
struct list_head *pendings = &cpu_local_var(pending_free_pages);
|
||||
struct page *page;
|
||||
@ -560,7 +649,8 @@ static void free_pages(void *va, int npages)
|
||||
page = phys_to_page(virt_to_phys(va));
|
||||
if (page) {
|
||||
if (page->mode != PM_NONE) {
|
||||
panic("free_pages:not PM_NONE");
|
||||
kprintf("%s: WARNING: page phys 0x%lx is not PM_NONE",
|
||||
__FUNCTION__, page->phys);
|
||||
}
|
||||
if (pendings->next != NULL) {
|
||||
page->mode = PM_PENDING_FREE;
|
||||
@ -570,7 +660,7 @@ static void free_pages(void *va, int npages)
|
||||
}
|
||||
}
|
||||
|
||||
__free_pages_in_allocator(va, npages);
|
||||
__mckernel_free_pages_in_allocator(va, npages);
|
||||
}
|
||||
|
||||
void begin_free_pages_pending(void) {
|
||||
@ -599,7 +689,7 @@ void finish_free_pages_pending(void)
|
||||
}
|
||||
page->mode = PM_NONE;
|
||||
list_del(&page->list);
|
||||
__free_pages_in_allocator(phys_to_virt(page_to_phys(page)),
|
||||
__mckernel_free_pages_in_allocator(phys_to_virt(page_to_phys(page)),
|
||||
page->offset);
|
||||
}
|
||||
|
||||
@ -608,8 +698,8 @@ void finish_free_pages_pending(void)
|
||||
}
|
||||
|
||||
static struct ihk_mc_pa_ops allocator = {
|
||||
.alloc_page = allocate_aligned_pages,
|
||||
.free_page = free_pages,
|
||||
.alloc_page = mckernel_allocate_aligned_pages_node,
|
||||
.free_page = mckernel_free_pages,
|
||||
};
|
||||
|
||||
void sbox_write(int offset, unsigned int value);
|
||||
@ -754,6 +844,8 @@ void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
flush_tlb();
|
||||
}
|
||||
|
||||
/* Flush on this core */
|
||||
flush_tlb_single(addr & PAGE_MASK);
|
||||
/* Wait for all cores */
|
||||
while (ihk_atomic_read(&flush_entry->pending) != 0) {
|
||||
cpu_pause();
|
||||
@ -804,8 +896,8 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
|
||||
int error;
|
||||
|
||||
set_cputime(interrupt_from_user(regs)? 1: 2);
|
||||
dkprintf("[%d]page_fault_handler(%p,%lx,%p)\n",
|
||||
ihk_mc_get_processor_id(), fault_addr, reason, regs);
|
||||
dkprintf("%s: addr: %p, reason: %lx, regs: %p\n",
|
||||
__FUNCTION__, fault_addr, reason, regs);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
@ -860,21 +952,22 @@ static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
|
||||
error = 0;
|
||||
preempt_enable();
|
||||
out:
|
||||
dkprintf("[%d]page_fault_handler(%p,%lx,%p): (%d)\n",
|
||||
ihk_mc_get_processor_id(), fault_addr, reason,
|
||||
regs, error);
|
||||
dkprintf("%s: addr: %p, reason: %lx, regs: %p -> error: %d\n",
|
||||
__FUNCTION__, fault_addr, reason, regs, error);
|
||||
check_need_resched();
|
||||
set_cputime(0);
|
||||
return;
|
||||
}
|
||||
|
||||
static struct ihk_page_allocator_desc *page_allocator_init(uint64_t start,
|
||||
uint64_t end, int initial)
|
||||
uint64_t end)
|
||||
{
|
||||
struct ihk_page_allocator_desc *pa_allocator;
|
||||
unsigned long page_map_pa, pages;
|
||||
void *page_map;
|
||||
unsigned int i;
|
||||
extern char _end[];
|
||||
unsigned long phys_end = virt_to_phys(_end);
|
||||
|
||||
start &= PAGE_MASK;
|
||||
pa_start = (start + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
@ -887,7 +980,12 @@ static struct ihk_page_allocator_desc *page_allocator_init(uint64_t start,
|
||||
*/
|
||||
page_map_pa = 0x100000;
|
||||
#else
|
||||
page_map_pa = initial ? virt_to_phys(get_last_early_heap()) : pa_start;
|
||||
if (pa_start <= phys_end && phys_end <= pa_end) {
|
||||
page_map_pa = virt_to_phys(get_last_early_heap());
|
||||
}
|
||||
else {
|
||||
page_map_pa = pa_start;
|
||||
}
|
||||
#endif
|
||||
|
||||
page_map = phys_to_virt(page_map_pa);
|
||||
@ -918,18 +1016,21 @@ static struct ihk_page_allocator_desc *page_allocator_init(uint64_t start,
|
||||
static void numa_init(void)
|
||||
{
|
||||
int i, j;
|
||||
memory_nodes = early_alloc_pages((sizeof(*memory_nodes) *
|
||||
ihk_mc_get_nr_numa_nodes() + PAGE_SIZE - 1)
|
||||
>> PAGE_SHIFT);
|
||||
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
int linux_numa_id, type;
|
||||
|
||||
ihk_mc_get_numa_node(i, &linux_numa_id, &type);
|
||||
if (ihk_mc_get_numa_node(i, &linux_numa_id, &type) != 0) {
|
||||
kprintf("%s: error: obtaining NUMA info for node %d\n",
|
||||
__FUNCTION__, i);
|
||||
panic("");
|
||||
}
|
||||
|
||||
memory_nodes[i].id = i;
|
||||
memory_nodes[i].linux_numa_id = linux_numa_id;
|
||||
memory_nodes[i].type = type;
|
||||
INIT_LIST_HEAD(&memory_nodes[i].allocators);
|
||||
memory_nodes[i].nodes_by_distance = 0;
|
||||
|
||||
kprintf("NUMA: %d, Linux NUMA: %d, type: %d\n",
|
||||
i, linux_numa_id, type);
|
||||
@ -942,7 +1043,7 @@ static void numa_init(void)
|
||||
|
||||
ihk_mc_get_memory_chunk(j, &start, &end, &numa_id);
|
||||
|
||||
allocator = page_allocator_init(start, end, (j == 0));
|
||||
allocator = page_allocator_init(start, end);
|
||||
list_add_tail(&allocator->list, &memory_nodes[numa_id].allocators);
|
||||
|
||||
kprintf("Physical memory: 0x%lx - 0x%lx, %lu bytes, %d pages available @ NUMA: %d\n",
|
||||
@ -953,6 +1054,72 @@ static void numa_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void numa_distances_init()
|
||||
{
|
||||
int i, j, swapped;
|
||||
|
||||
for (i = 0; i < ihk_mc_get_nr_numa_nodes(); ++i) {
|
||||
/* TODO: allocate on target node */
|
||||
memory_nodes[i].nodes_by_distance =
|
||||
ihk_mc_alloc_pages((sizeof(struct node_distance) *
|
||||
ihk_mc_get_nr_numa_nodes() + PAGE_SIZE - 1)
|
||||
>> PAGE_SHIFT, IHK_MC_AP_NOWAIT);
|
||||
|
||||
if (!memory_nodes[i].nodes_by_distance) {
|
||||
kprintf("%s: error: allocating nodes_by_distance\n",
|
||||
__FUNCTION__);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (j = 0; j < ihk_mc_get_nr_numa_nodes(); ++j) {
|
||||
memory_nodes[i].nodes_by_distance[j].id = j;
|
||||
memory_nodes[i].nodes_by_distance[j].distance =
|
||||
ihk_mc_get_numa_distance(i, j);
|
||||
}
|
||||
|
||||
/* Sort by distance and node ID */
|
||||
swapped = 1;
|
||||
while (swapped) {
|
||||
swapped = 0;
|
||||
for (j = 1; j < ihk_mc_get_nr_numa_nodes(); ++j) {
|
||||
if ((memory_nodes[i].nodes_by_distance[j - 1].distance >
|
||||
memory_nodes[i].nodes_by_distance[j].distance) ||
|
||||
((memory_nodes[i].nodes_by_distance[j - 1].distance ==
|
||||
memory_nodes[i].nodes_by_distance[j].distance) &&
|
||||
(memory_nodes[i].nodes_by_distance[j - 1].id >
|
||||
memory_nodes[i].nodes_by_distance[j].id))) {
|
||||
memory_nodes[i].nodes_by_distance[j - 1].id ^=
|
||||
memory_nodes[i].nodes_by_distance[j].id;
|
||||
memory_nodes[i].nodes_by_distance[j].id ^=
|
||||
memory_nodes[i].nodes_by_distance[j - 1].id;
|
||||
memory_nodes[i].nodes_by_distance[j - 1].id ^=
|
||||
memory_nodes[i].nodes_by_distance[j].id;
|
||||
|
||||
memory_nodes[i].nodes_by_distance[j - 1].distance ^=
|
||||
memory_nodes[i].nodes_by_distance[j].distance;
|
||||
memory_nodes[i].nodes_by_distance[j].distance ^=
|
||||
memory_nodes[i].nodes_by_distance[j - 1].distance;
|
||||
memory_nodes[i].nodes_by_distance[j - 1].distance ^=
|
||||
memory_nodes[i].nodes_by_distance[j].distance;
|
||||
swapped = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
char buf[1024];
|
||||
char *pbuf = buf;
|
||||
|
||||
pbuf += sprintf(pbuf, "NUMA %d distances: ", i);
|
||||
for (j = 0; j < ihk_mc_get_nr_numa_nodes(); ++j) {
|
||||
pbuf += sprintf(pbuf, "%d (%d), ",
|
||||
memory_nodes[i].nodes_by_distance[j].id,
|
||||
memory_nodes[i].nodes_by_distance[j].distance);
|
||||
}
|
||||
kprintf("%s\n", buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define PHYS_PAGE_HASH_SHIFT (10)
|
||||
#define PHYS_PAGE_HASH_SIZE (1 << PHYS_PAGE_HASH_SHIFT)
|
||||
#define PHYS_PAGE_HASH_MASK (PHYS_PAGE_HASH_SIZE - 1)
|
||||
@ -1234,6 +1401,9 @@ void mem_init(void)
|
||||
kprintf("Demand paging on ANONYMOUS mappings enabled.\n");
|
||||
anon_on_demand = 1;
|
||||
}
|
||||
|
||||
/* Init distance vectors */
|
||||
numa_distances_init();
|
||||
}
|
||||
|
||||
#define KMALLOC_TRACK_HASH_SHIFT (8)
|
||||
@ -1323,7 +1493,7 @@ struct kmalloc_track_entry *__kmalloc_track_find_entry(
|
||||
}
|
||||
|
||||
/* Top level routines called from macro */
|
||||
void *_kmalloc(int size, enum ihk_mc_ap_flag flag, char *file, int line)
|
||||
void *_kmalloc(int size, ihk_mc_ap_flag flag, char *file, int line)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
struct kmalloc_track_entry *entry;
|
||||
@ -1513,7 +1683,7 @@ void kmalloc_memcheck(void)
|
||||
}
|
||||
|
||||
/* Redirection routines registered in alloc structure */
|
||||
void *__kmalloc(int size, enum ihk_mc_ap_flag flag)
|
||||
void *__kmalloc(int size, ihk_mc_ap_flag flag)
|
||||
{
|
||||
return kmalloc(size, flag);
|
||||
}
|
||||
@ -1611,7 +1781,7 @@ void kmalloc_consolidate_free_list(void)
|
||||
#define KMALLOC_MIN_MASK (KMALLOC_MIN_SIZE - 1)
|
||||
|
||||
/* Actual low-level allocation routines */
|
||||
static void *___kmalloc(int size, enum ihk_mc_ap_flag flag)
|
||||
static void *___kmalloc(int size, ihk_mc_ap_flag flag)
|
||||
{
|
||||
struct kmalloc_header *chunk_iter;
|
||||
struct kmalloc_header *chunk = NULL;
|
||||
|
||||
@ -21,7 +21,7 @@ static struct ihk_ikc_channel_desc *mchannel;
|
||||
static int arch_master_channel_packet_handler(struct ihk_ikc_channel_desc *,
|
||||
void *__packet, void *arg);
|
||||
|
||||
void ikc_master_init(void)
|
||||
void ihk_ikc_master_init(void)
|
||||
{
|
||||
mchannel = kmalloc(sizeof(struct ihk_ikc_channel_desc) +
|
||||
sizeof(struct ihk_ikc_master_packet),
|
||||
|
||||
379
kernel/process.c
379
kernel/process.c
@ -74,7 +74,6 @@ init_process(struct process *proc, struct process *parent)
|
||||
{
|
||||
/* These will be filled out when changing status */
|
||||
proc->pid = -1;
|
||||
proc->exit_status = -1;
|
||||
proc->status = PS_RUNNING;
|
||||
|
||||
if(parent){
|
||||
@ -102,6 +101,13 @@ init_process(struct process *proc, struct process *parent)
|
||||
waitq_init(&proc->waitpid_q);
|
||||
ihk_atomic_set(&proc->refcount, 2);
|
||||
proc->monitoring_event = NULL;
|
||||
#ifdef TRACK_SYSCALLS
|
||||
mcs_lock_init(&proc->st_lock);
|
||||
proc->syscall_times = NULL;
|
||||
proc->syscall_cnts = NULL;
|
||||
proc->offload_times = NULL;
|
||||
proc->offload_cnts = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
@ -210,6 +216,7 @@ init_process_vm(struct process *owner, struct address_space *asp, struct process
|
||||
|
||||
ihk_atomic_set(&vm->refcount, 1);
|
||||
INIT_LIST_HEAD(&vm->vm_range_list);
|
||||
INIT_LIST_HEAD(&vm->vm_range_numa_policy_list);
|
||||
vm->address_space = asp;
|
||||
vm->proc = owner;
|
||||
vm->exiting = 0;
|
||||
@ -225,16 +232,23 @@ init_process_vm(struct process *owner, struct address_space *asp, struct process
|
||||
}
|
||||
vm->numa_mem_policy = MPOL_DEFAULT;
|
||||
|
||||
for (i = 0; i < VM_RANGE_CACHE_SIZE; ++i) {
|
||||
vm->range_cache[i] = NULL;
|
||||
}
|
||||
vm->range_cache_ind = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct thread *
|
||||
create_thread(unsigned long user_pc)
|
||||
struct thread *create_thread(unsigned long user_pc,
|
||||
unsigned long *__cpu_set, size_t cpu_set_size)
|
||||
{
|
||||
struct thread *thread;
|
||||
struct process *proc;
|
||||
struct process_vm *vm = NULL;
|
||||
struct address_space *asp = NULL;
|
||||
int cpu;
|
||||
int cpu_set_empty = 1;
|
||||
|
||||
thread = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, IHK_MC_AP_NOWAIT);
|
||||
if (!thread)
|
||||
@ -250,7 +264,22 @@ create_thread(unsigned long user_pc)
|
||||
memset(vm, 0, sizeof(struct process_vm));
|
||||
init_process(proc, cpu_local_var(resource_set)->pid1);
|
||||
|
||||
if (1) {
|
||||
/* Use requested CPU cores */
|
||||
for_each_set_bit(cpu, __cpu_set, cpu_set_size * BITS_PER_BYTE) {
|
||||
if (cpu >= num_processors) {
|
||||
kprintf("%s: invalid CPU requested in initial cpu_set\n",
|
||||
__FUNCTION__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
dkprintf("%s: pid: %d, CPU: %d\n",
|
||||
__FUNCTION__, proc->pid, cpu);
|
||||
CPU_SET(cpu, &thread->cpu_set);
|
||||
cpu_set_empty = 0;
|
||||
}
|
||||
|
||||
/* Default allows all cores */
|
||||
if (cpu_set_empty) {
|
||||
struct ihk_mc_cpu_info *infop;
|
||||
int i;
|
||||
|
||||
@ -272,10 +301,10 @@ create_thread(unsigned long user_pc)
|
||||
dkprintf("fork(): sigshared\n");
|
||||
|
||||
ihk_atomic_set(&thread->sigcommon->use, 1);
|
||||
ihk_mc_spinlock_init(&thread->sigcommon->lock);
|
||||
mcs_rwlock_init(&thread->sigcommon->lock);
|
||||
INIT_LIST_HEAD(&thread->sigcommon->sigpending);
|
||||
|
||||
ihk_mc_spinlock_init(&thread->sigpendinglock);
|
||||
mcs_rwlock_init(&thread->sigpendinglock);
|
||||
INIT_LIST_HEAD(&thread->sigpending);
|
||||
|
||||
thread->sigstack.ss_sp = NULL;
|
||||
@ -292,6 +321,7 @@ create_thread(unsigned long user_pc)
|
||||
if(init_process_vm(proc, asp, vm) != 0){
|
||||
goto err;
|
||||
}
|
||||
thread->exit_status = -1;
|
||||
|
||||
cpu_set(ihk_mc_get_processor_id(), &thread->vm->address_space->cpu_set,
|
||||
&thread->vm->address_space->cpu_set_lock);
|
||||
@ -435,16 +465,19 @@ clone_thread(struct thread *org, unsigned long pc, unsigned long sp,
|
||||
memcpy(thread->sigcommon->action, org->sigcommon->action,
|
||||
sizeof(struct k_sigaction) * _NSIG);
|
||||
ihk_atomic_set(&thread->sigcommon->use, 1);
|
||||
ihk_mc_spinlock_init(&thread->sigcommon->lock);
|
||||
mcs_rwlock_init(&thread->sigcommon->lock);
|
||||
INIT_LIST_HEAD(&thread->sigcommon->sigpending);
|
||||
// TODO: copy signalfd
|
||||
}
|
||||
ihk_mc_spinlock_init(&thread->sigpendinglock);
|
||||
mcs_rwlock_init(&thread->sigpendinglock);
|
||||
INIT_LIST_HEAD(&thread->sigpending);
|
||||
thread->sigmask = org->sigmask;
|
||||
|
||||
ihk_mc_spinlock_init(&thread->spin_sleep_lock);
|
||||
thread->spin_sleep = 0;
|
||||
#ifdef TRACK_SYSCALLS
|
||||
thread->track_syscalls = org->track_syscalls;
|
||||
#endif
|
||||
|
||||
return thread;
|
||||
|
||||
@ -733,6 +766,7 @@ int join_process_memory_range(struct process_vm *vm,
|
||||
struct vm_range *surviving, struct vm_range *merging)
|
||||
{
|
||||
int error;
|
||||
int i;
|
||||
|
||||
dkprintf("join_process_memory_range(%p,%lx-%lx,%lx-%lx)\n",
|
||||
vm, surviving->start, surviving->end,
|
||||
@ -761,6 +795,10 @@ int join_process_memory_range(struct process_vm *vm,
|
||||
memobj_release(merging->memobj);
|
||||
}
|
||||
list_del(&merging->list);
|
||||
for (i = 0; i < VM_RANGE_CACHE_SIZE; ++i) {
|
||||
if (vm->range_cache[i] == merging)
|
||||
vm->range_cache[i] = surviving;
|
||||
}
|
||||
kfree(merging);
|
||||
|
||||
error = 0;
|
||||
@ -774,7 +812,7 @@ int free_process_memory_range(struct process_vm *vm, struct vm_range *range)
|
||||
{
|
||||
const intptr_t start0 = range->start;
|
||||
const intptr_t end0 = range->end;
|
||||
int error;
|
||||
int error, i;
|
||||
intptr_t start;
|
||||
intptr_t end;
|
||||
struct vm_range *neighbor;
|
||||
@ -859,6 +897,10 @@ int free_process_memory_range(struct process_vm *vm, struct vm_range *range)
|
||||
}
|
||||
|
||||
list_del(&range->list);
|
||||
for (i = 0; i < VM_RANGE_CACHE_SIZE; ++i) {
|
||||
if (vm->range_cache[i] == range)
|
||||
vm->range_cache[i] = NULL;
|
||||
}
|
||||
kfree(range);
|
||||
|
||||
dkprintf("free_process_memory_range(%p,%lx-%lx): 0\n",
|
||||
@ -986,21 +1028,18 @@ enum ihk_mc_pt_attribute common_vrflag_to_ptattr(unsigned long flag, uint64_t fa
|
||||
}
|
||||
|
||||
int add_process_memory_range(struct process_vm *vm,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long phys, unsigned long flag,
|
||||
struct memobj *memobj, off_t offset,
|
||||
int pgshift)
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long phys, unsigned long flag,
|
||||
struct memobj *memobj, off_t offset,
|
||||
int pgshift, struct vm_range **rp)
|
||||
{
|
||||
struct vm_range *range;
|
||||
int rc;
|
||||
#if 0
|
||||
extern void __host_update_process_range(struct thread *process,
|
||||
struct vm_range *range);
|
||||
#endif
|
||||
|
||||
if ((start < vm->region.user_start)
|
||||
|| (vm->region.user_end < end)) {
|
||||
kprintf("range(%#lx - %#lx) is not in user avail(%#lx - %#lx)\n",
|
||||
kprintf("%s: error: range %lx - %lx is not in user available area\n",
|
||||
__FUNCTION__,
|
||||
start, end, vm->region.user_start,
|
||||
vm->region.user_end);
|
||||
return -EINVAL;
|
||||
@ -1008,9 +1047,10 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
|
||||
range = kmalloc(sizeof(struct vm_range), IHK_MC_AP_NOWAIT);
|
||||
if (!range) {
|
||||
kprintf("ERROR: allocating pages for range\n");
|
||||
kprintf("%s: ERROR: allocating pages for range\n", __FUNCTION__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&range->list);
|
||||
range->start = start;
|
||||
range->end = end;
|
||||
@ -1019,48 +1059,34 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
range->objoff = offset;
|
||||
range->pgshift = pgshift;
|
||||
|
||||
if(range->flag & VR_DEMAND_PAGING) {
|
||||
dkprintf("range: 0x%lX - 0x%lX => physicall memory area is allocated on demand (%ld) [%lx]\n",
|
||||
range->start, range->end, range->end - range->start,
|
||||
range->flag);
|
||||
} else {
|
||||
dkprintf("range: 0x%lX - 0x%lX (%ld) [%lx]\n",
|
||||
range->start, range->end, range->end - range->start,
|
||||
range->flag);
|
||||
}
|
||||
|
||||
rc = 0;
|
||||
if (0) {
|
||||
/* dummy */
|
||||
}
|
||||
else if (phys == NOPHYS) {
|
||||
/* nothing to map */
|
||||
}
|
||||
else if (flag & VR_REMOTE) {
|
||||
rc = 0;
|
||||
if (phys == NOPHYS) {
|
||||
/* Nothing to map */
|
||||
}
|
||||
else if (flag & VR_REMOTE) {
|
||||
rc = update_process_page_table(vm, range, phys, IHK_PTA_REMOTE);
|
||||
} else if (flag & VR_IO_NOCACHE) {
|
||||
}
|
||||
else if (flag & VR_IO_NOCACHE) {
|
||||
rc = update_process_page_table(vm, range, phys, PTATTR_UNCACHABLE);
|
||||
} else if(flag & VR_DEMAND_PAGING){
|
||||
//demand paging no need to update process table now
|
||||
dkprintf("demand paging do not update process page table\n");
|
||||
rc = 0;
|
||||
} else if ((range->flag & VR_PROT_MASK) == VR_PROT_NONE) {
|
||||
}
|
||||
else if (flag & VR_DEMAND_PAGING) {
|
||||
dkprintf("%s: range: 0x%lx - 0x%lx is demand paging\n",
|
||||
__FUNCTION__, range->start, range->end);
|
||||
rc = 0;
|
||||
} else {
|
||||
}
|
||||
else if ((range->flag & VR_PROT_MASK) == VR_PROT_NONE) {
|
||||
rc = 0;
|
||||
}
|
||||
else {
|
||||
rc = update_process_page_table(vm, range, phys, 0);
|
||||
}
|
||||
if(rc != 0){
|
||||
kprintf("ERROR: preparing page tables\n");
|
||||
|
||||
if (rc != 0) {
|
||||
kprintf("%s: ERROR: preparing page tables\n", __FUNCTION__);
|
||||
kfree(range);
|
||||
return rc;
|
||||
}
|
||||
|
||||
#if 0 // disable __host_update_process_range() in add_process_memory_range(), because it has no effect on the actual mapping on the MICs side.
|
||||
if (!(flag & VR_REMOTE)) {
|
||||
__host_update_process_range(process, range);
|
||||
}
|
||||
#endif
|
||||
|
||||
insert_vm_range_list(vm, range);
|
||||
|
||||
/* Clear content! */
|
||||
@ -1069,12 +1095,18 @@ int add_process_memory_range(struct process_vm *vm,
|
||||
memset((void*)phys_to_virt(phys), 0, end - start);
|
||||
}
|
||||
|
||||
/* Return range object if requested */
|
||||
if (rp) {
|
||||
*rp = range;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct vm_range *lookup_process_memory_range(
|
||||
struct process_vm *vm, uintptr_t start, uintptr_t end)
|
||||
{
|
||||
int i;
|
||||
struct vm_range *range = NULL;
|
||||
|
||||
dkprintf("lookup_process_memory_range(%p,%lx,%lx)\n", vm, start, end);
|
||||
@ -1083,6 +1115,16 @@ struct vm_range *lookup_process_memory_range(
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < VM_RANGE_CACHE_SIZE; ++i) {
|
||||
int c_i = (i + vm->range_cache_ind) % VM_RANGE_CACHE_SIZE;
|
||||
if (!vm->range_cache[c_i])
|
||||
continue;
|
||||
|
||||
if (vm->range_cache[c_i]->start <= start &&
|
||||
vm->range_cache[c_i]->end >= end)
|
||||
return vm->range_cache[c_i];
|
||||
}
|
||||
|
||||
list_for_each_entry(range, &vm->vm_range_list, list) {
|
||||
if (end <= range->start) {
|
||||
break;
|
||||
@ -1094,6 +1136,12 @@ struct vm_range *lookup_process_memory_range(
|
||||
|
||||
range = NULL;
|
||||
out:
|
||||
if (range) {
|
||||
vm->range_cache_ind = (vm->range_cache_ind - 1 + VM_RANGE_CACHE_SIZE)
|
||||
% VM_RANGE_CACHE_SIZE;
|
||||
vm->range_cache[vm->range_cache_ind] = range;
|
||||
}
|
||||
|
||||
dkprintf("lookup_process_memory_range(%p,%lx,%lx): %p %lx-%lx\n",
|
||||
vm, start, end, range,
|
||||
range? range->start: 0, range? range->end: 0);
|
||||
@ -1335,6 +1383,11 @@ static int sync_one_page(void *arg0, page_table_t pt, pte_t *ptep,
|
||||
flush_tlb_single((uintptr_t)pgaddr); /* XXX: TLB flush */
|
||||
|
||||
phys = pte_get_phys(ptep);
|
||||
if (args->memobj->flags & MF_ZEROFILL) {
|
||||
error = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = memobj_flush_page(args->memobj, phys, pgsize);
|
||||
if (error) {
|
||||
ekprintf("sync_one_page(%p,%p,%p %#lx,%p,%d):"
|
||||
@ -1362,11 +1415,19 @@ int sync_process_memory_range(struct process_vm *vm, struct vm_range *range,
|
||||
args.memobj = range->memobj;
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&vm->page_table_lock);
|
||||
memobj_lock(range->memobj);
|
||||
|
||||
if (!(range->memobj->flags & MF_ZEROFILL)) {
|
||||
memobj_lock(range->memobj);
|
||||
}
|
||||
|
||||
error = visit_pte_range(vm->address_space->page_table, (void *)start,
|
||||
(void *)end, range->pgshift, VPTEF_SKIP_NULL,
|
||||
&sync_one_page, &args);
|
||||
memobj_unlock(range->memobj);
|
||||
(void *)end, range->pgshift, VPTEF_SKIP_NULL,
|
||||
&sync_one_page, &args);
|
||||
|
||||
if (!(range->memobj->flags & MF_ZEROFILL)) {
|
||||
memobj_unlock(range->memobj);
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
|
||||
if (error) {
|
||||
ekprintf("sync_process_memory_range(%p,%p,%#lx,%#lx):"
|
||||
@ -1658,10 +1719,9 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
|
||||
range = lookup_process_memory_range(vm, fault_addr, fault_addr+1);
|
||||
if (range == NULL) {
|
||||
error = -EFAULT;
|
||||
dkprintf("[%d]do_page_fault_process_vm(%p,%lx,%lx):"
|
||||
"out of range. %d\n",
|
||||
ihk_mc_get_processor_id(), vm,
|
||||
fault_addr0, reason, error);
|
||||
dkprintf("do_page_fault_process_vm(): vm: %p, addr: %p, reason: %lx):"
|
||||
"out of range: %d\n",
|
||||
vm, fault_addr0, reason, error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1691,10 +1751,18 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
|
||||
kprintf("if (((range->flag & VR_PROT_MASK) == VR_PROT_NONE))\n");
|
||||
if (((reason & PF_WRITE) && !(reason & PF_PATCH)))
|
||||
kprintf("if (((reason & PF_WRITE) && !(reason & PF_PATCH)))\n");
|
||||
if (!(range->flag & VR_PROT_WRITE))
|
||||
if (!(range->flag & VR_PROT_WRITE)) {
|
||||
kprintf("if (!(range->flag & VR_PROT_WRITE))\n");
|
||||
if ((reason & PF_INSTR) && !(range->flag & VR_PROT_EXEC))
|
||||
//kprintf("setting VR_PROT_WRITE\n");
|
||||
//range->flag |= VR_PROT_WRITE;
|
||||
//goto cont;
|
||||
}
|
||||
if ((reason & PF_INSTR) && !(range->flag & VR_PROT_EXEC)) {
|
||||
kprintf("if ((reason & PF_INSTR) && !(range->flag & VR_PROT_EXEC))\n");
|
||||
//kprintf("setting VR_PROT_EXEC\n");
|
||||
//range->flag |= VR_PROT_EXEC;
|
||||
//goto cont;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1783,6 +1851,7 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
|
||||
unsigned long minsz;
|
||||
unsigned long at_rand;
|
||||
struct process *proc = thread->proc;
|
||||
unsigned long __flag;
|
||||
|
||||
/* create stack range */
|
||||
end = STACK_TOP(&thread->vm->region);
|
||||
@ -1801,12 +1870,15 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
|
||||
vrflag |= VR_MAXPROT_READ | VR_MAXPROT_WRITE | VR_MAXPROT_EXEC;
|
||||
#define NOPHYS ((uintptr_t)-1)
|
||||
if ((rc = add_process_memory_range(thread->vm, start, end, NOPHYS,
|
||||
vrflag, NULL, 0, PAGE_SHIFT)) != 0) {
|
||||
vrflag, NULL, 0, PAGE_SHIFT, NULL)) != 0) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
__flag = (size >= 16777216) ? IHK_MC_AP_USER : 0;
|
||||
/* map physical pages for initial stack frame */
|
||||
stack = ihk_mc_alloc_pages(minsz >> PAGE_SHIFT, IHK_MC_AP_NOWAIT);
|
||||
stack = ihk_mc_alloc_pages(minsz >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT | __flag);
|
||||
|
||||
if (!stack) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1920,7 +1992,7 @@ unsigned long extend_process_region(struct process_vm *vm,
|
||||
}
|
||||
if((rc = add_process_memory_range(vm, old_aligned_end,
|
||||
aligned_end, virt_to_phys(p), flag,
|
||||
LARGE_PAGE_SHIFT)) != 0){
|
||||
LARGE_PAGE_SHIFT, NULL)) != 0){
|
||||
ihk_mc_free_pages(p, (aligned_end - old_aligned_end) >> PAGE_SHIFT);
|
||||
return end;
|
||||
}
|
||||
@ -1950,7 +2022,7 @@ unsigned long extend_process_region(struct process_vm *vm,
|
||||
|
||||
if((rc = add_process_memory_range(vm, aligned_end,
|
||||
aligned_new_end, virt_to_phys((void *)p_aligned),
|
||||
flag, LARGE_PAGE_SHIFT)) != 0){
|
||||
flag, LARGE_PAGE_SHIFT, NULL)) != 0){
|
||||
ihk_mc_free_pages(p, (aligned_new_end - aligned_end + LARGE_PAGE_SIZE) >> PAGE_SHIFT);
|
||||
return end;
|
||||
}
|
||||
@ -1969,15 +2041,16 @@ unsigned long extend_process_region(struct process_vm *vm,
|
||||
p=0;
|
||||
}else{
|
||||
|
||||
p = ihk_mc_alloc_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT, IHK_MC_AP_NOWAIT);
|
||||
p = ihk_mc_alloc_pages((aligned_new_end - aligned_end) >> PAGE_SHIFT,
|
||||
IHK_MC_AP_NOWAIT | IHK_MC_AP_USER);
|
||||
|
||||
if (!p) {
|
||||
return end;
|
||||
}
|
||||
}
|
||||
if((rc = add_process_memory_range(vm, aligned_end, aligned_new_end,
|
||||
(p==0?0:virt_to_phys(p)), flag, NULL, 0,
|
||||
PAGE_SHIFT)) != 0){
|
||||
if ((rc = add_process_memory_range(vm, aligned_end, aligned_new_end,
|
||||
(p == 0 ? 0 : virt_to_phys(p)), flag, NULL, 0,
|
||||
PAGE_SHIFT, NULL)) != 0) {
|
||||
ihk_mc_free_pages(p, (aligned_new_end - aligned_end) >> PAGE_SHIFT);
|
||||
return end;
|
||||
}
|
||||
@ -2092,6 +2165,10 @@ release_process(struct process *proc)
|
||||
}
|
||||
|
||||
if (proc->tids) kfree(proc->tids);
|
||||
#ifdef TRACK_SYSCALLS
|
||||
track_syscalls_print_proc_stats(proc);
|
||||
track_syscalls_dealloc_proc_counters(proc);
|
||||
#endif // TRACK_SYSCALLS
|
||||
kfree(proc);
|
||||
}
|
||||
|
||||
@ -2109,6 +2186,9 @@ free_all_process_memory_range(struct process_vm *vm)
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
|
||||
list_for_each_entry_safe(range, next, &vm->vm_range_list, list) {
|
||||
if (range->memobj) {
|
||||
range->memobj->flags |= MF_HOST_RELEASED;
|
||||
}
|
||||
error = free_process_memory_range(vm, range);
|
||||
if (error) {
|
||||
ekprintf("free_process_memory(%p):"
|
||||
@ -2152,9 +2232,10 @@ int populate_process_memory(struct process_vm *vm, void *start, size_t len)
|
||||
for (addr = (uintptr_t)start; addr < end; addr += PAGE_SIZE) {
|
||||
error = page_fault_process_vm(vm, (void *)addr, reason);
|
||||
if (error) {
|
||||
ekprintf("populate_process_range:page_fault_process_vm"
|
||||
"(%p,%lx,%lx) failed %d\n",
|
||||
vm, addr, reason, error);
|
||||
ekprintf("%s: WARNING: page_fault_process_vm(): vm: %p, "
|
||||
"addr: %lx, reason: %lx, off: %lu, len: %lu returns %d\n",
|
||||
__FUNCTION__, vm, addr, reason,
|
||||
((void *)addr - start), len, error);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@ -2274,6 +2355,11 @@ void release_thread(struct thread *thread)
|
||||
|
||||
vm = thread->vm;
|
||||
|
||||
#ifdef TRACK_SYSCALLS
|
||||
track_syscalls_accumulate_counters(thread, thread->proc);
|
||||
//track_syscalls_print_thread_stats(thread);
|
||||
track_syscalls_dealloc_thread_counters(thread);
|
||||
#endif // TRACK_SYSCALLS
|
||||
procfs_delete_thread(thread);
|
||||
destroy_thread(thread);
|
||||
|
||||
@ -2282,7 +2368,7 @@ void release_thread(struct thread *thread)
|
||||
|
||||
void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned int flags;
|
||||
unsigned long flags;
|
||||
flags = ihk_mc_spinlock_lock(lock);
|
||||
CPU_SET(cpu, cpu_set);
|
||||
ihk_mc_spinlock_unlock(lock, flags);
|
||||
@ -2290,7 +2376,7 @@ void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
|
||||
|
||||
void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned int flags;
|
||||
unsigned long flags;
|
||||
flags = ihk_mc_spinlock_lock(lock);
|
||||
CPU_CLR(cpu, cpu_set);
|
||||
ihk_mc_spinlock_unlock(lock, flags);
|
||||
@ -2299,7 +2385,7 @@ void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
|
||||
void cpu_clear_and_set(int c_cpu, int s_cpu,
|
||||
cpu_set_t *cpu_set, ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned int flags;
|
||||
unsigned long flags;
|
||||
flags = ihk_mc_spinlock_lock(lock);
|
||||
CPU_CLR(c_cpu, cpu_set);
|
||||
CPU_SET(s_cpu, cpu_set);
|
||||
@ -2324,7 +2410,9 @@ static void idle(void)
|
||||
cpu_enable_interrupt();
|
||||
|
||||
while (1) {
|
||||
cpu_local_var(current)->status = PS_STOPPED;
|
||||
schedule();
|
||||
cpu_local_var(current)->status = PS_RUNNING;
|
||||
cpu_disable_interrupt();
|
||||
|
||||
/* See if we need to migrate a process somewhere */
|
||||
@ -2370,7 +2458,9 @@ static void idle(void)
|
||||
v->status == CPU_STATUS_RESERVED) {
|
||||
/* No work to do? Consolidate the kmalloc free list */
|
||||
kmalloc_consolidate_free_list();
|
||||
cpu_local_var(current)->status = PS_INTERRUPTIBLE;
|
||||
cpu_safe_halt();
|
||||
cpu_local_var(current)->status = PS_RUNNING;
|
||||
}
|
||||
else {
|
||||
cpu_enable_interrupt();
|
||||
@ -2483,6 +2573,7 @@ void sched_init(void)
|
||||
ihk_mc_init_context(&idle_thread->ctx, NULL, idle);
|
||||
ihk_mc_spinlock_init(&idle_thread->vm->memory_range_lock);
|
||||
INIT_LIST_HEAD(&idle_thread->vm->vm_range_list);
|
||||
INIT_LIST_HEAD(&idle_thread->vm->vm_range_numa_policy_list);
|
||||
idle_thread->proc->pid = 0;
|
||||
idle_thread->tid = ihk_mc_get_processor_id();
|
||||
|
||||
@ -2566,13 +2657,13 @@ static void do_migrate(void)
|
||||
&req->thread->vm->address_space->cpu_set,
|
||||
&req->thread->vm->address_space->cpu_set_lock);
|
||||
|
||||
dkprintf("do_migrate(): migrated TID %d from CPU %d to CPU %d\n",
|
||||
req->thread->tid, old_cpu_id, cpu_id);
|
||||
dkprintf("%s: migrated TID %d from CPU %d to CPU %d\n",
|
||||
__FUNCTION__, req->thread->tid, old_cpu_id, cpu_id);
|
||||
|
||||
v->flags |= CPU_FLAG_NEED_RESCHED;
|
||||
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpu_id)->apic_id, 0xd1);
|
||||
waitq_wakeup(&req->wq);
|
||||
double_rq_unlock(cur_v, v, irqstate);
|
||||
|
||||
continue;
|
||||
ack:
|
||||
waitq_wakeup(&req->wq);
|
||||
}
|
||||
@ -2599,6 +2690,65 @@ set_timer()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: it is assumed that a wait-queue (or futex queue) is
|
||||
* set before calling this function.
|
||||
* NOTE: one must set thread->spin_sleep to 1 before evaluating
|
||||
* the wait condition to avoid lost wake-ups.
|
||||
*/
|
||||
void spin_sleep_or_schedule(void)
|
||||
{
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
struct cpu_local_var *v;
|
||||
int do_schedule = 0;
|
||||
int woken = 0;
|
||||
long irqstate;
|
||||
|
||||
/* Try to spin sleep */
|
||||
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
|
||||
if (thread->spin_sleep == 0) {
|
||||
dkprintf("%s: caught a lost wake-up!\n", __FUNCTION__);
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
|
||||
|
||||
for (;;) {
|
||||
/* Check if we need to reschedule */
|
||||
irqstate =
|
||||
ihk_mc_spinlock_lock(&(get_this_cpu_local_var()->runq_lock));
|
||||
v = get_this_cpu_local_var();
|
||||
|
||||
if (v->flags & CPU_FLAG_NEED_RESCHED || v->runq_len > 1) {
|
||||
do_schedule = 1;
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
|
||||
|
||||
/* Check if we were woken up */
|
||||
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
|
||||
if (thread->spin_sleep == 0) {
|
||||
woken = 1;
|
||||
}
|
||||
|
||||
/* Indicate that we are not spinning any more */
|
||||
if (do_schedule) {
|
||||
thread->spin_sleep = 0;
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
|
||||
|
||||
if (woken) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (do_schedule) {
|
||||
break;
|
||||
}
|
||||
|
||||
cpu_pause();
|
||||
}
|
||||
|
||||
schedule();
|
||||
}
|
||||
|
||||
void schedule(void)
|
||||
{
|
||||
struct cpu_local_var *v;
|
||||
@ -2684,7 +2834,9 @@ redo:
|
||||
restore_fp_regs(next);
|
||||
}
|
||||
|
||||
ihk_mc_load_page_table(next->vm->address_space->page_table);
|
||||
if (prev && prev->vm->address_space->page_table !=
|
||||
next->vm->address_space->page_table)
|
||||
ihk_mc_load_page_table(next->vm->address_space->page_table);
|
||||
|
||||
dkprintf("[%d] schedule: tlsblock_base: 0x%lX\n",
|
||||
ihk_mc_get_processor_id(), next->tlsblock_base);
|
||||
@ -2759,39 +2911,38 @@ void check_need_resched(void)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
sched_wakeup_thread(struct thread *thread, int valid_states)
|
||||
int __sched_wakeup_thread(struct thread *thread,
|
||||
int valid_states, int runq_locked)
|
||||
{
|
||||
int status;
|
||||
int spin_slept = 0;
|
||||
unsigned long irqstate;
|
||||
struct cpu_local_var *v = get_cpu_local_var(thread->cpu_id);
|
||||
struct process *proc = thread->proc;
|
||||
struct mcs_rwlock_node updatelock;
|
||||
|
||||
dkprintf("sched_wakeup_process,proc->pid=%d,valid_states=%08x,proc->status=%08x,proc->cpu_id=%d,my cpu_id=%d\n",
|
||||
proc->pid, valid_states, thread->status, thread->cpu_id, ihk_mc_get_processor_id());
|
||||
dkprintf("%s: proc->pid=%d, valid_states=%08x, "
|
||||
"proc->status=%08x, proc->cpu_id=%d,my cpu_id=%d\n",
|
||||
__FUNCTION__,
|
||||
proc->pid, valid_states, thread->status,
|
||||
thread->cpu_id, ihk_mc_get_processor_id());
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&(thread->spin_sleep_lock));
|
||||
if (thread->spin_sleep > 0) {
|
||||
dkprintf("sched_wakeup_process() spin wakeup: cpu_id: %d\n",
|
||||
thread->cpu_id);
|
||||
if (thread->spin_sleep == 1) {
|
||||
dkprintf("%s: spin wakeup: cpu_id: %d\n",
|
||||
__FUNCTION__, thread->cpu_id);
|
||||
|
||||
spin_slept = 1;
|
||||
status = 0;
|
||||
}
|
||||
--thread->spin_sleep;
|
||||
thread->spin_sleep = 0;
|
||||
ihk_mc_spinlock_unlock(&(thread->spin_sleep_lock), irqstate);
|
||||
|
||||
if (spin_slept) {
|
||||
return status;
|
||||
if (!runq_locked) {
|
||||
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
}
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
|
||||
if (thread->status & valid_states) {
|
||||
mcs_rwlock_writer_lock_noirq(&proc->update_lock, &updatelock);
|
||||
if(proc->status != PS_EXITED)
|
||||
if (proc->status != PS_EXITED)
|
||||
proc->status = PS_RUNNING;
|
||||
mcs_rwlock_writer_unlock_noirq(&proc->update_lock, &updatelock);
|
||||
xchg4((int *)(&thread->status), PS_RUNNING);
|
||||
@ -2801,18 +2952,32 @@ sched_wakeup_thread(struct thread *thread, int valid_states)
|
||||
status = -EINVAL;
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
if (!runq_locked) {
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
}
|
||||
|
||||
if (!status && (thread->cpu_id != ihk_mc_get_processor_id())) {
|
||||
dkprintf("sched_wakeup_process,issuing IPI,thread->cpu_id=%d\n",
|
||||
thread->cpu_id);
|
||||
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(thread->cpu_id)->apic_id,
|
||||
0xd1);
|
||||
dkprintf("%s: issuing IPI, thread->cpu_id=%d\n",
|
||||
__FUNCTION__, thread->cpu_id);
|
||||
ihk_mc_interrupt_cpu(
|
||||
get_x86_cpu_local_variable(thread->cpu_id)->apic_id,
|
||||
0xd1);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
int sched_wakeup_thread_locked(struct thread *thread, int valid_states)
|
||||
{
|
||||
return __sched_wakeup_thread(thread, valid_states, 1);
|
||||
}
|
||||
|
||||
int sched_wakeup_thread(struct thread *thread, int valid_states)
|
||||
{
|
||||
return __sched_wakeup_thread(thread, valid_states, 0);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* 1. Add current process to waitq
|
||||
* 2. Queue migration request into the target CPU's queue
|
||||
@ -2836,7 +3001,7 @@ void sched_request_migrate(int cpu_id, struct thread *thread)
|
||||
struct cpu_local_var *v = get_cpu_local_var(cpu_id);
|
||||
struct migrate_request req = { .thread = thread };
|
||||
unsigned long irqstate;
|
||||
DECLARE_WAITQ_ENTRY(entry, cpu_local_var(current));
|
||||
DECLARE_WAITQ_ENTRY_LOCKED(entry, cpu_local_var(current));
|
||||
|
||||
waitq_init(&req.wq);
|
||||
waitq_prepare_to_wait(&req.wq, &entry, PS_UNINTERRUPTIBLE);
|
||||
@ -2853,6 +3018,8 @@ void sched_request_migrate(int cpu_id, struct thread *thread)
|
||||
if (cpu_id != ihk_mc_get_processor_id())
|
||||
ihk_mc_interrupt_cpu(/* Kick scheduler */
|
||||
get_x86_cpu_local_variable(cpu_id)->apic_id, 0xd1);
|
||||
dkprintf("%s: tid: %d -> cpu: %d\n",
|
||||
__FUNCTION__, thread->tid, cpu_id);
|
||||
|
||||
schedule();
|
||||
waitq_finish_wait(&req.wq, &entry);
|
||||
@ -2916,6 +3083,7 @@ find_thread(int pid, int tid, struct mcs_rwlock_node_irqsave *lock)
|
||||
if(tid <= 0)
|
||||
return NULL;
|
||||
mcs_rwlock_reader_lock(&thash->lock[hash], lock);
|
||||
retry:
|
||||
list_for_each_entry(thread, &thash->list[hash], hash_list){
|
||||
if(thread->tid == tid){
|
||||
if(pid <= 0)
|
||||
@ -2924,6 +3092,13 @@ find_thread(int pid, int tid, struct mcs_rwlock_node_irqsave *lock)
|
||||
return thread;
|
||||
}
|
||||
}
|
||||
/* If no thread with pid == tid was found, then we may be looking for a
|
||||
* specific thread (not the main thread of the process), try to find it
|
||||
* based on tid only */
|
||||
if (pid > 0 && pid == tid) {
|
||||
pid = 0;
|
||||
goto retry;
|
||||
}
|
||||
mcs_rwlock_reader_unlock(&thash->lock[hash], lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
#include <page.h>
|
||||
#include <mman.h>
|
||||
#include <bitmap.h>
|
||||
#include <init.h>
|
||||
|
||||
//#define DEBUG_PRINT_PROCFS
|
||||
|
||||
@ -75,11 +76,11 @@ procfs_delete_thread(struct thread *thread)
|
||||
*
|
||||
* \param rarg returned argument
|
||||
*/
|
||||
void
|
||||
process_procfs_request(unsigned long rarg)
|
||||
void process_procfs_request(struct ikc_scd_packet *rpacket)
|
||||
{
|
||||
unsigned long rarg = rpacket->arg;
|
||||
unsigned long parg, pbuf;
|
||||
struct thread *thread = NULL;
|
||||
struct thread *thread = NULL;
|
||||
struct process *proc = NULL;
|
||||
struct process_vm *vm = NULL;
|
||||
struct procfs_read *r;
|
||||
@ -160,7 +161,7 @@ process_procfs_request(unsigned long rarg)
|
||||
*/
|
||||
ret = sscanf(p, "%d/", &pid);
|
||||
if (ret == 1) {
|
||||
struct mcs_rwlock_node tlock;
|
||||
struct mcs_rwlock_node_irqsave tlock;
|
||||
int tids;
|
||||
struct thread *thread1 = NULL;
|
||||
|
||||
@ -177,7 +178,7 @@ process_procfs_request(unsigned long rarg)
|
||||
else
|
||||
tid = pid;
|
||||
|
||||
mcs_rwlock_reader_lock_noirq(&proc->threads_lock, &tlock);
|
||||
mcs_rwlock_reader_lock(&proc->threads_lock, &tlock);
|
||||
list_for_each_entry(thread, &proc->threads_list, siblings_list){
|
||||
if(thread->tid == tid)
|
||||
break;
|
||||
@ -187,15 +188,15 @@ process_procfs_request(unsigned long rarg)
|
||||
if(thread == NULL){
|
||||
kprintf("process_procfs_request: no such tid %d-%d\n", pid, tid);
|
||||
if(tids){
|
||||
mcs_rwlock_reader_unlock(&proc->threads_lock, &tlock);
|
||||
process_unlock(proc, &lock);
|
||||
mcs_rwlock_reader_unlock_noirq(&proc->threads_lock, &tlock);
|
||||
goto end;
|
||||
}
|
||||
thread = thread1;
|
||||
}
|
||||
if(thread)
|
||||
hold_thread(thread);
|
||||
mcs_rwlock_reader_unlock_noirq(&proc->threads_lock, &tlock);
|
||||
mcs_rwlock_reader_unlock(&proc->threads_lock, &tlock);
|
||||
hold_process(proc);
|
||||
vm = proc->vm;
|
||||
if(vm)
|
||||
@ -408,6 +409,7 @@ process_procfs_request(unsigned long rarg)
|
||||
*/
|
||||
#define BITMASKS_BUF_SIZE 2048
|
||||
if (strcmp(p, "status") == 0) {
|
||||
extern int num_processors; /* kernel/ap.c */
|
||||
struct vm_range *range;
|
||||
unsigned long lockedsize = 0;
|
||||
char *tmp;
|
||||
@ -443,7 +445,7 @@ process_procfs_request(unsigned long rarg)
|
||||
cpu_bitmask = &bitmasks[bitmasks_offset];
|
||||
bitmasks_offset += bitmap_scnprintf(cpu_bitmask,
|
||||
BITMASKS_BUF_SIZE - bitmasks_offset,
|
||||
thread->cpu_set.__bits, __CPU_SETSIZE);
|
||||
thread->cpu_set.__bits, num_processors);
|
||||
bitmasks_offset++;
|
||||
|
||||
cpu_list = &bitmasks[bitmasks_offset];
|
||||
@ -631,6 +633,7 @@ dataunavail:
|
||||
|
||||
packet.msg = SCD_MSG_PROCFS_ANSWER;
|
||||
packet.arg = rarg;
|
||||
packet.pid = rpacket->pid;
|
||||
|
||||
ret = ihk_ikc_send(syscall_channel, &packet, 0);
|
||||
if (ret < 0) {
|
||||
|
||||
@ -179,6 +179,7 @@ int shmobj_create(struct shmid_ds *ds, struct memobj **objp)
|
||||
|
||||
memset(obj, 0, sizeof(*obj));
|
||||
obj->memobj.ops = &shmobj_ops;
|
||||
obj->memobj.size = ds->shm_segsz;
|
||||
obj->ds = *ds;
|
||||
obj->ds.shm_perm.seq = the_seq++;
|
||||
obj->ds.shm_nattch = 1;
|
||||
|
||||
1214
kernel/syscall.c
1214
kernel/syscall.c
File diff suppressed because it is too large
Load Diff
139
kernel/timer.c
139
kernel/timer.c
@ -54,136 +54,75 @@ void init_timers(void)
|
||||
}
|
||||
|
||||
uint64_t schedule_timeout(uint64_t timeout)
|
||||
{
|
||||
struct waitq_entry my_wait;
|
||||
struct timer my_timer;
|
||||
{
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
int irqstate;
|
||||
int spin_sleep;
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
|
||||
dkprintf("schedule_timeout() spin sleep timeout: %lu\n", timeout);
|
||||
spin_sleep = ++thread->spin_sleep;
|
||||
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
|
||||
long irqstate;
|
||||
|
||||
/* Spin sleep.. */
|
||||
for (;;) {
|
||||
int need_schedule;
|
||||
struct cpu_local_var *v = get_this_cpu_local_var();
|
||||
uint64_t t_s = rdtsc();
|
||||
uint64_t t_e;
|
||||
int spin_over = 0;
|
||||
|
||||
|
||||
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
|
||||
|
||||
|
||||
/* Woken up by someone? */
|
||||
if (thread->spin_sleep < 1) {
|
||||
if (thread->spin_sleep == 0) {
|
||||
t_e = rdtsc();
|
||||
|
||||
spin_over = 1;
|
||||
if ((t_e - t_s) < timeout) {
|
||||
timeout -= (t_e - t_s);
|
||||
}
|
||||
else {
|
||||
timeout = 1;
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
|
||||
|
||||
if (!spin_over) {
|
||||
t_s = rdtsc();
|
||||
int need_schedule;
|
||||
struct cpu_local_var *v = get_this_cpu_local_var();
|
||||
int irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
need_schedule = v->runq_len > 1 ? 1 : 0;
|
||||
/* Give a chance to another thread (if any) in case the core is
|
||||
* oversubscribed, but make sure we will be re-scheduled */
|
||||
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
need_schedule = v->runq_len > 1 ? 1 : 0;
|
||||
|
||||
if (need_schedule) {
|
||||
xchg4(&(cpu_local_var(current)->status), PS_RUNNING);
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
schedule();
|
||||
|
||||
/* Give a chance to another thread (if any) in case the core is
|
||||
* oversubscribed, but make sure we will be re-scheduled */
|
||||
if (need_schedule) {
|
||||
xchg4(&(cpu_local_var(current)->status), PS_RUNNING);
|
||||
schedule();
|
||||
xchg4(&(cpu_local_var(current)->status),
|
||||
PS_INTERRUPTIBLE);
|
||||
}
|
||||
else {
|
||||
/* Spin wait */
|
||||
while ((rdtsc() - t_s) < LOOP_TIMEOUT) {
|
||||
cpu_pause();
|
||||
}
|
||||
|
||||
if (timeout < LOOP_TIMEOUT) {
|
||||
timeout = 0;
|
||||
spin_over = 1;
|
||||
}
|
||||
else {
|
||||
timeout -= LOOP_TIMEOUT;
|
||||
}
|
||||
}
|
||||
/* Recheck if woken */
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
}
|
||||
|
||||
if (spin_over) {
|
||||
dkprintf("schedule_timeout() spin woken up, timeout: %lu\n",
|
||||
timeout);
|
||||
|
||||
/* Give a chance to another thread (if any) in case we timed out,
|
||||
* but make sure we will be re-scheduled */
|
||||
if (timeout == 0) {
|
||||
int need_schedule;
|
||||
struct cpu_local_var *v = get_this_cpu_local_var();
|
||||
|
||||
int irqstate =
|
||||
ihk_mc_spinlock_lock(&(v->runq_lock));
|
||||
need_schedule = v->runq_len > 1 ? 1 : 0;
|
||||
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
|
||||
/* Spin wait */
|
||||
while ((rdtsc() - t_s) < LOOP_TIMEOUT) {
|
||||
cpu_pause();
|
||||
}
|
||||
|
||||
if (need_schedule) {
|
||||
xchg4(&(cpu_local_var(current)->status), PS_RUNNING);
|
||||
schedule();
|
||||
xchg4(&(cpu_local_var(current)->status),
|
||||
PS_INTERRUPTIBLE);
|
||||
}
|
||||
}
|
||||
|
||||
/* Time out? */
|
||||
if (timeout < LOOP_TIMEOUT) {
|
||||
timeout = 0;
|
||||
|
||||
/* We are not sleeping any more */
|
||||
irqstate = ihk_mc_spinlock_lock(&thread->spin_sleep_lock);
|
||||
if (spin_sleep == thread->spin_sleep) {
|
||||
--thread->spin_sleep;
|
||||
}
|
||||
thread->spin_sleep = 0;
|
||||
ihk_mc_spinlock_unlock(&thread->spin_sleep_lock, irqstate);
|
||||
|
||||
return timeout;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
timeout -= LOOP_TIMEOUT;
|
||||
}
|
||||
}
|
||||
|
||||
/* Init waitq and wait entry for this timer */
|
||||
my_timer.timeout = (timeout < LOOP_TIMEOUT) ? LOOP_TIMEOUT : timeout;
|
||||
my_timer.thread = cpu_local_var(current);
|
||||
waitq_init(&my_timer.processes);
|
||||
waitq_init_entry(&my_wait, cpu_local_var(current));
|
||||
|
||||
/* Add ourself to the timer queue */
|
||||
ihk_mc_spinlock_lock_noirq(&timers_lock);
|
||||
list_add_tail(&my_timer.list, &timers);
|
||||
|
||||
dkprintf("schedule_timeout() sleep timeout: %lu\n", my_timer.timeout);
|
||||
|
||||
/* Add ourself to the waitqueue and sleep */
|
||||
waitq_prepare_to_wait(&my_timer.processes, &my_wait, PS_INTERRUPTIBLE);
|
||||
ihk_mc_spinlock_unlock_noirq(&timers_lock);
|
||||
schedule();
|
||||
waitq_finish_wait(&my_timer.processes, &my_wait);
|
||||
|
||||
ihk_mc_spinlock_lock_noirq(&timers_lock);
|
||||
|
||||
/* Waken up by someone else then timeout? */
|
||||
if (my_timer.timeout) {
|
||||
list_del(&my_timer.list);
|
||||
}
|
||||
ihk_mc_spinlock_unlock_noirq(&timers_lock);
|
||||
|
||||
dkprintf("schedule_timeout() woken up, timeout: %lu\n",
|
||||
my_timer.timeout);
|
||||
|
||||
return my_timer.timeout;
|
||||
return timeout;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -22,6 +22,13 @@ default_wake_function(waitq_entry_t *entry, unsigned mode,
|
||||
return sched_wakeup_thread(entry->private, PS_NORMAL);
|
||||
}
|
||||
|
||||
int
|
||||
locked_wake_function(waitq_entry_t *entry, unsigned mode,
|
||||
int flags, void *key)
|
||||
{
|
||||
return sched_wakeup_thread_locked(entry->private, PS_NORMAL);
|
||||
}
|
||||
|
||||
void
|
||||
waitq_init(waitq_t *waitq)
|
||||
{
|
||||
|
||||
739
kernel/xpmem.c
Normal file
739
kernel/xpmem.c
Normal file
@ -0,0 +1,739 @@
|
||||
/**
|
||||
* \file xpmem.c
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Cross Partition Memory (XPMEM) support.
|
||||
*/
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
|
||||
* Copyright 2010, 2014 Cray Inc. All Rights Reserved
|
||||
* Copyright 2015-2016 Los Alamos National Security, LLC. All rights reserved.
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <kmalloc.h>
|
||||
#include <limits.h>
|
||||
#include <memobj.h>
|
||||
#include <mman.h>
|
||||
#include <string.h>
|
||||
#include <types.h>
|
||||
#include <vsprintf.h>
|
||||
#include <ihk/lock.h>
|
||||
#include <ihk/mm.h>
|
||||
#include <xpmem_private.h>
|
||||
|
||||
|
||||
struct xpmem_partition *xpmem_my_part = NULL; /* pointer to this partition */
|
||||
|
||||
|
||||
int xpmem_open(
|
||||
ihk_mc_user_context_t *ctx)
|
||||
{
|
||||
const char *pathname = (const char *)ihk_mc_syscall_arg0(ctx);
|
||||
int flags = (int)ihk_mc_syscall_arg1(ctx);
|
||||
int ret;
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
struct process *proc = thread->proc;
|
||||
struct syscall_request request IHK_DMA_ALIGN;
|
||||
int fd;
|
||||
struct mckfd *mckfd;
|
||||
long irqstate;
|
||||
|
||||
XPMEM_DEBUG("call: pathname=%s, flags=%d", pathname, flags);
|
||||
|
||||
if (!xpmem_my_part) {
|
||||
ret = xpmem_init();
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
request.number = __NR_open;
|
||||
request.args[0] = (unsigned long)pathname;
|
||||
request.args[1] = flags;
|
||||
fd = do_syscall(&request, ihk_mc_get_processor_id(), 0);
|
||||
if(fd < 0){
|
||||
XPMEM_DEBUG("__NR_open error: fd=%d", fd);
|
||||
return fd;
|
||||
}
|
||||
|
||||
ret = __xpmem_open();
|
||||
if (ret) {
|
||||
XPMEM_DEBUG("return: ret=%d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mckfd = kmalloc(sizeof(struct mckfd), IHK_MC_AP_NOWAIT);
|
||||
if(!mckfd) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
XPMEM_DEBUG("kmalloc(): mckfd=0x%p", mckfd);
|
||||
memset(mckfd, 0, sizeof(struct mckfd));
|
||||
mckfd->fd = fd;
|
||||
mckfd->sig_no = -1;
|
||||
mckfd->ioctl_cb = xpmem_ioctl;
|
||||
mckfd->close_cb = xpmem_close;
|
||||
irqstate = ihk_mc_spinlock_lock(&proc->mckfd_lock);
|
||||
|
||||
if(proc->mckfd == NULL) {
|
||||
proc->mckfd = mckfd;
|
||||
mckfd->next = NULL;
|
||||
} else {
|
||||
mckfd->next = proc->mckfd;
|
||||
proc->mckfd = mckfd;
|
||||
}
|
||||
|
||||
ihk_mc_spinlock_unlock(&proc->mckfd_lock, irqstate);
|
||||
|
||||
ihk_atomic_inc_return(&xpmem_my_part->n_opened);
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", mckfd->fd);
|
||||
|
||||
return mckfd->fd;
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_ioctl(
|
||||
struct mckfd *mckfd,
|
||||
ihk_mc_user_context_t *ctx)
|
||||
{
|
||||
int ret;
|
||||
unsigned int cmd = ihk_mc_syscall_arg1(ctx);
|
||||
unsigned long arg = ihk_mc_syscall_arg2(ctx);
|
||||
|
||||
XPMEM_DEBUG("call: cmd=0x%x, arg=0x%lx", cmd, arg);
|
||||
|
||||
switch (cmd) {
|
||||
case XPMEM_CMD_VERSION: {
|
||||
ret = XPMEM_CURRENT_VERSION;
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=0x%lx", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_MAKE: {
|
||||
struct xpmem_cmd_make make_info;
|
||||
xpmem_segid_t segid = 0;
|
||||
|
||||
if (copy_from_user(&make_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_make)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = xpmem_make(make_info.vaddr, make_info.size,
|
||||
make_info.permit_type,
|
||||
(void *)make_info.permit_value, &segid);
|
||||
if (ret != 0) {
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (copy_to_user(&((struct xpmem_cmd_make __user *)arg)->segid,
|
||||
(void *)&segid, sizeof(xpmem_segid_t))) {
|
||||
(void)xpmem_remove(segid);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_REMOVE: {
|
||||
struct xpmem_cmd_remove remove_info;
|
||||
|
||||
if (copy_from_user(&remove_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_remove)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = xpmem_remove(remove_info.segid);
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_GET: {
|
||||
struct xpmem_cmd_get get_info;
|
||||
// xpmem_apid_t apid = 0;
|
||||
|
||||
if (copy_from_user(&get_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_get)))
|
||||
return -EFAULT;
|
||||
|
||||
// ret = xpmem_get(get_info.segid, get_info.flags,
|
||||
// get_info.permit_type,
|
||||
// (void *)get_info.permit_value, &apid); // TODO
|
||||
ret = -EINVAL;
|
||||
if (ret != 0) {
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// if (copy_to_user(&((struct xpmem_cmd_get __user *)arg)->apid,
|
||||
// (void *)&apid, sizeof(xpmem_apid_t))) {
|
||||
// (void)xpmem_release(apid);
|
||||
// return -EFAULT;
|
||||
// }
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_RELEASE: {
|
||||
struct xpmem_cmd_release release_info;
|
||||
|
||||
if (copy_from_user(&release_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_release)))
|
||||
return -EFAULT;
|
||||
|
||||
// ret = xpmem_release(release_info.apid); // TODO
|
||||
ret = -EINVAL;
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_ATTACH: {
|
||||
struct xpmem_cmd_attach attach_info;
|
||||
// unsigned long at_vaddr = 0;
|
||||
|
||||
if (copy_from_user(&attach_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_attach)))
|
||||
return -EFAULT;
|
||||
|
||||
// ret = xpmem_attach(mckfd, attach_info.apid, attach_info.offset,
|
||||
// attach_info.size, attach_info.vaddr,
|
||||
// attach_info.fd, attach_info.flags,
|
||||
// &at_vaddr); // TODO
|
||||
ret = -EINVAL;
|
||||
if (ret != 0) {
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// if (copy_to_user(
|
||||
// &((struct xpmem_cmd_attach __user *)arg)->vaddr,
|
||||
// (void *)&at_vaddr, sizeof(unsigned long))) {
|
||||
// (void)xpmem_detach(at_vaddr);
|
||||
// return -EFAULT;
|
||||
// }
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
case XPMEM_CMD_DETACH: {
|
||||
struct xpmem_cmd_detach detach_info;
|
||||
|
||||
if (copy_from_user(&detach_info, (void __user *)arg,
|
||||
sizeof(struct xpmem_cmd_detach)))
|
||||
return -EFAULT;
|
||||
|
||||
// ret = xpmem_detach(detach_info.vaddr); // TODO
|
||||
ret = -EINVAL;
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: cmd=0x%x, ret=%d", cmd, -EINVAL);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_close(
|
||||
struct mckfd *mckfd,
|
||||
ihk_mc_user_context_t *ctx)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
int index;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
int n_opened;
|
||||
|
||||
XPMEM_DEBUG("call: fd=%d", mckfd->fd);
|
||||
|
||||
n_opened = ihk_atomic_dec_return(&xpmem_my_part->n_opened);
|
||||
if (n_opened) {
|
||||
XPMEM_DEBUG("return: ret=%d, n_opened=%d", 0, n_opened);
|
||||
return 0;
|
||||
}
|
||||
XPMEM_DEBUG("n_opened=%d", n_opened);
|
||||
|
||||
index = xpmem_tg_hashtable_index(cpu_local_var(current)->proc->pid);
|
||||
|
||||
mcs_rwlock_writer_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
|
||||
|
||||
tg = xpmem_tg_ref_by_tgid_all_nolock(
|
||||
cpu_local_var(current)->proc->pid);
|
||||
if (!tg) {
|
||||
mcs_rwlock_writer_unlock(
|
||||
&xpmem_my_part->tg_hashtable[index].lock, &lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
list_del_init(&tg->tg_hashlist);
|
||||
|
||||
mcs_rwlock_writer_unlock(&xpmem_my_part->tg_hashtable[index].lock,
|
||||
&lock);
|
||||
|
||||
XPMEM_DEBUG("tg->vm=0x%p", tg->vm);
|
||||
|
||||
xpmem_destroy_tg(tg);
|
||||
|
||||
if (!n_opened) {
|
||||
xpmem_exit();
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
xpmem_my_part = kmalloc(sizeof(struct xpmem_partition) +
|
||||
sizeof(struct xpmem_hashlist) * XPMEM_TG_HASHTABLE_SIZE,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (xpmem_my_part == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
XPMEM_DEBUG("kmalloc(): xpmem_my_part=0x%p", xpmem_my_part);
|
||||
memset(xpmem_my_part, 0, sizeof(struct xpmem_partition) +
|
||||
sizeof(struct xpmem_hashlist) * XPMEM_TG_HASHTABLE_SIZE);
|
||||
|
||||
for (i = 0; i < XPMEM_TG_HASHTABLE_SIZE; i++) {
|
||||
mcs_rwlock_init(&xpmem_my_part->tg_hashtable[i].lock);
|
||||
INIT_LIST_HEAD(&xpmem_my_part->tg_hashtable[i].list);
|
||||
}
|
||||
|
||||
ihk_atomic_set(&xpmem_my_part->n_opened, 0);
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_exit(void)
|
||||
{
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
if (xpmem_my_part) {
|
||||
XPMEM_DEBUG("kfree(): 0x%p", xpmem_my_part);
|
||||
kfree(xpmem_my_part);
|
||||
xpmem_my_part = NULL;
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static int __xpmem_open(void)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
int index;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: ");
|
||||
|
||||
tg = xpmem_tg_ref_by_tgid(cpu_local_var(current)->proc->pid);
|
||||
if (!IS_ERR(tg)) {
|
||||
xpmem_tg_deref(tg);
|
||||
XPMEM_DEBUG("return: ret=%d, tg=0x%p", 0, tg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
tg = kmalloc(sizeof(struct xpmem_thread_group) +
|
||||
sizeof(struct xpmem_hashlist) * XPMEM_AP_HASHTABLE_SIZE,
|
||||
IHK_MC_AP_NOWAIT);
|
||||
if (tg == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
XPMEM_DEBUG("kmalloc(): tg=0x%p", tg);
|
||||
memset(tg, 0, sizeof(struct xpmem_thread_group) +
|
||||
sizeof(struct xpmem_hashlist) * XPMEM_AP_HASHTABLE_SIZE);
|
||||
|
||||
ihk_mc_spinlock_init(&tg->lock);
|
||||
tg->tgid = cpu_local_var(current)->proc->pid;
|
||||
tg->uid = cpu_local_var(current)->proc->ruid;
|
||||
tg->gid = cpu_local_var(current)->proc->rgid;
|
||||
ihk_atomic_set(&tg->uniq_segid, 0);
|
||||
ihk_atomic_set(&tg->uniq_apid, 0);
|
||||
mcs_rwlock_init(&tg->seg_list_lock);
|
||||
INIT_LIST_HEAD(&tg->seg_list);
|
||||
ihk_atomic_set(&tg->n_pinned, 0);
|
||||
INIT_LIST_HEAD(&tg->tg_hashlist);
|
||||
tg->vm = cpu_local_var(current)->vm;
|
||||
ihk_atomic_set(&tg->n_recall_PFNs, 0);
|
||||
|
||||
for (index = 0; index < XPMEM_AP_HASHTABLE_SIZE; index++) {
|
||||
mcs_rwlock_init(&tg->ap_hashtable[index].lock);
|
||||
INIT_LIST_HEAD(&tg->ap_hashtable[index].list);
|
||||
}
|
||||
|
||||
xpmem_tg_not_destroyable(tg);
|
||||
|
||||
index = xpmem_tg_hashtable_index(tg->tgid);
|
||||
mcs_rwlock_writer_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
|
||||
|
||||
list_add_tail(&tg->tg_hashlist,
|
||||
&xpmem_my_part->tg_hashtable[index].list);
|
||||
|
||||
mcs_rwlock_writer_unlock(&xpmem_my_part->tg_hashtable[index].lock,
|
||||
&lock);
|
||||
|
||||
tg->group_leader = cpu_local_var(current);
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_destroy_tg(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
XPMEM_DEBUG("call: tg=0x%p", tg);
|
||||
|
||||
XPMEM_DEBUG("tg->vm=0x%p", tg->vm);
|
||||
|
||||
xpmem_tg_destroyable(tg);
|
||||
xpmem_tg_deref(tg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_make(
|
||||
unsigned long vaddr,
|
||||
size_t size,
|
||||
int permit_type,
|
||||
void *permit_value,
|
||||
xpmem_segid_t *segid_p)
|
||||
{
|
||||
xpmem_segid_t segid;
|
||||
struct xpmem_thread_group *seg_tg;
|
||||
struct xpmem_segment *seg;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: vaddr=0x%lx, size=%lu, permit_type=%d, "
|
||||
"permit_value=0%04lo",
|
||||
vaddr, size, permit_type,
|
||||
(unsigned long)(uintptr_t)permit_value);
|
||||
|
||||
if (permit_type != XPMEM_PERMIT_MODE ||
|
||||
((unsigned long)(uintptr_t)permit_value & ~00777) ||
|
||||
size == 0) {
|
||||
XPMEM_DEBUG("return: ret=%d", -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
seg_tg = xpmem_tg_ref_by_tgid(cpu_local_var(current)->proc->pid);
|
||||
if (IS_ERR(seg_tg)) {
|
||||
DBUG_ON(PTR_ERR(seg_tg) != -ENOENT);
|
||||
return -XPMEM_ERRNO_NOPROC;
|
||||
}
|
||||
|
||||
/*
|
||||
* The start of the segment must be page aligned and it must be a
|
||||
* multiple of pages in size.
|
||||
*/
|
||||
if (offset_in_page(vaddr) != 0 || offset_in_page(size) != 0) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
XPMEM_DEBUG("return: ret=%d", -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
segid = xpmem_make_segid(seg_tg);
|
||||
if (segid < 0) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
return segid;
|
||||
}
|
||||
|
||||
/* create a new struct xpmem_segment structure with a unique segid */
|
||||
seg = kmalloc(sizeof(struct xpmem_segment), IHK_MC_AP_NOWAIT);
|
||||
if (seg == NULL) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
XPMEM_DEBUG("kmalloc(): seg=0x%p", seg);
|
||||
memset(seg, 0, sizeof(struct xpmem_segment));
|
||||
|
||||
ihk_mc_spinlock_init(&seg->lock);
|
||||
mcs_rwlock_init(&seg->seg_lock);
|
||||
seg->segid = segid;
|
||||
seg->vaddr = vaddr;
|
||||
seg->size = size;
|
||||
seg->permit_type = permit_type;
|
||||
seg->permit_value = permit_value;
|
||||
seg->tg = seg_tg;
|
||||
INIT_LIST_HEAD(&seg->ap_list);
|
||||
INIT_LIST_HEAD(&seg->seg_list);
|
||||
|
||||
xpmem_seg_not_destroyable(seg);
|
||||
|
||||
/* add seg to its tg's list of segs */
|
||||
mcs_rwlock_writer_lock(&seg_tg->seg_list_lock, &lock);
|
||||
list_add_tail(&seg->seg_list, &seg_tg->seg_list);
|
||||
mcs_rwlock_writer_unlock(&seg_tg->seg_list_lock, &lock);
|
||||
|
||||
xpmem_tg_deref(seg_tg);
|
||||
|
||||
*segid_p = segid;
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d, segid=0x%lx", 0, *segid_p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static xpmem_segid_t xpmem_make_segid(
|
||||
struct xpmem_thread_group *seg_tg)
|
||||
{
|
||||
struct xpmem_id segid;
|
||||
xpmem_segid_t *segid_p = (xpmem_segid_t *)&segid;
|
||||
int uniq;
|
||||
|
||||
XPMEM_DEBUG("call: seg_tg=0x%p, uniq_segid=%d",
|
||||
seg_tg, ihk_atomic_read(&seg_tg->uniq_segid));
|
||||
|
||||
DBUG_ON(sizeof(struct xpmem_id) != sizeof(xpmem_segid_t));
|
||||
|
||||
uniq = ihk_atomic_inc_return(&seg_tg->uniq_segid);
|
||||
if (uniq > XPMEM_MAX_UNIQ_ID) {
|
||||
ihk_atomic_dec(&seg_tg->uniq_segid);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
*segid_p = 0;
|
||||
segid.tgid = seg_tg->tgid;
|
||||
segid.uniq = (unsigned long)uniq;
|
||||
|
||||
DBUG_ON(*segid_p <= 0);
|
||||
|
||||
XPMEM_DEBUG("return: segid=0x%lx, segid.tgid=%d, segid.uniq=%d",
|
||||
segid, segid.tgid, segid.uniq);
|
||||
|
||||
return *segid_p;
|
||||
}
|
||||
|
||||
|
||||
static int xpmem_remove(
|
||||
xpmem_segid_t segid)
|
||||
{
|
||||
struct xpmem_thread_group *seg_tg;
|
||||
struct xpmem_segment *seg;
|
||||
|
||||
XPMEM_DEBUG("call: segid=0x%lx", segid);
|
||||
|
||||
if (segid <= 0) {
|
||||
XPMEM_DEBUG("return: ret=%d", -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
seg_tg = xpmem_tg_ref_by_segid(segid);
|
||||
if (IS_ERR(seg_tg))
|
||||
return PTR_ERR(seg_tg);
|
||||
|
||||
if (cpu_local_var(current)->proc->pid != seg_tg->tgid) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
XPMEM_DEBUG("return: ret=%d", -EACCES);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
seg = xpmem_seg_ref_by_segid(seg_tg, segid);
|
||||
if (IS_ERR(seg)) {
|
||||
xpmem_tg_deref(seg_tg);
|
||||
return PTR_ERR(seg);
|
||||
}
|
||||
DBUG_ON(seg->tg != seg_tg);
|
||||
|
||||
xpmem_remove_seg(seg_tg, seg);
|
||||
xpmem_seg_deref(seg);
|
||||
xpmem_tg_deref(seg_tg);
|
||||
|
||||
XPMEM_DEBUG("return: ret=%d", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_remove_seg(
|
||||
struct xpmem_thread_group *seg_tg,
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
|
||||
struct mcs_rwlock_node_irqsave seg_lock;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: tgid=%d, segid=0x%lx", seg_tg->tgid, seg->segid);
|
||||
|
||||
ihk_mc_spinlock_lock(&seg->lock);
|
||||
if (seg->flags & XPMEM_FLAG_DESTROYING) {
|
||||
ihk_mc_spinlock_unlock_noirq(&seg->lock);
|
||||
schedule();
|
||||
return;
|
||||
}
|
||||
seg->flags |= XPMEM_FLAG_DESTROYING;
|
||||
ihk_mc_spinlock_unlock_noirq(&seg->lock);
|
||||
|
||||
mcs_rwlock_writer_lock(&seg->seg_lock, &seg_lock);
|
||||
|
||||
/* unpin pages and clear PTEs for each attachment to this segment */
|
||||
xpmem_clear_PTEs(seg);
|
||||
|
||||
/* indicate that the segment has been destroyed */
|
||||
ihk_mc_spinlock_lock(&seg->lock);
|
||||
seg->flags |= XPMEM_FLAG_DESTROYED;
|
||||
ihk_mc_spinlock_unlock_noirq(&seg->lock);
|
||||
|
||||
/* Remove segment structure from its tg's list of segs */
|
||||
mcs_rwlock_writer_lock(&seg_tg->seg_list_lock, &lock);
|
||||
list_del_init(&seg->seg_list);
|
||||
mcs_rwlock_writer_unlock(&seg_tg->seg_list_lock, &lock);
|
||||
|
||||
mcs_rwlock_writer_unlock(&seg->seg_lock, &seg_lock);
|
||||
|
||||
xpmem_seg_destroyable(seg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_clear_PTEs(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
XPMEM_DEBUG("call: seg=0x%p", seg);
|
||||
|
||||
// xpmem_clear_PTEs_range(seg, seg->vaddr, seg->vaddr + seg->size, 0); // TODO
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static struct xpmem_thread_group * __xpmem_tg_ref_by_tgid_nolock_internal(
|
||||
pid_t tgid,
|
||||
int index,
|
||||
int return_destroying)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
|
||||
XPMEM_DEBUG("call: tgid=%d, index=%d, return_destroying=%d",
|
||||
tgid, index, return_destroying);
|
||||
|
||||
list_for_each_entry(tg, &xpmem_my_part->tg_hashtable[index].list,
|
||||
tg_hashlist) {
|
||||
if (tg->tgid == tgid) {
|
||||
if ((tg->flags & XPMEM_FLAG_DESTROYING) &&
|
||||
!return_destroying) {
|
||||
continue;
|
||||
}
|
||||
|
||||
xpmem_tg_ref(tg);
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", tg);
|
||||
return tg;
|
||||
}
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", ERR_PTR(-ENOENT));
|
||||
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
|
||||
static struct xpmem_thread_group * xpmem_tg_ref_by_segid(
|
||||
xpmem_segid_t segid)
|
||||
{
|
||||
struct xpmem_thread_group *tg;
|
||||
|
||||
XPMEM_DEBUG("call: segid=0x%lx", segid);
|
||||
|
||||
tg = xpmem_tg_ref_by_tgid(xpmem_segid_to_tgid(segid));
|
||||
|
||||
XPMEM_DEBUG("return: tg=0x%p", tg);
|
||||
|
||||
return tg;
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_tg_deref(
|
||||
struct xpmem_thread_group *tg)
|
||||
{
|
||||
XPMEM_DEBUG("call: tg=0x%p", tg);
|
||||
|
||||
DBUG_ON(ihk_atomic_read(&tg->refcnt) <= 0);
|
||||
if (ihk_atomic_dec_return(&tg->refcnt) != 0) {
|
||||
XPMEM_DEBUG("return: tg->refcnt=%d", tg->refcnt);
|
||||
return;
|
||||
}
|
||||
|
||||
XPMEM_DEBUG("kfree(): tg=0x%p", tg);
|
||||
kfree(tg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
|
||||
static struct xpmem_segment * xpmem_seg_ref_by_segid(
|
||||
struct xpmem_thread_group *seg_tg,
|
||||
xpmem_segid_t segid)
|
||||
{
|
||||
struct xpmem_segment *seg;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
|
||||
XPMEM_DEBUG("call: seg_tg=0x%p, segid=0x%lx", seg_tg, segid);
|
||||
|
||||
mcs_rwlock_reader_lock(&seg_tg->seg_list_lock, &lock);
|
||||
|
||||
list_for_each_entry(seg, &seg_tg->seg_list, seg_list) {
|
||||
if (seg->segid == segid) {
|
||||
if (seg->flags & XPMEM_FLAG_DESTROYING)
|
||||
continue;
|
||||
|
||||
xpmem_seg_ref(seg);
|
||||
mcs_rwlock_reader_unlock(&seg_tg->seg_list_lock, &lock);
|
||||
return seg;
|
||||
}
|
||||
}
|
||||
|
||||
mcs_rwlock_reader_unlock(&seg_tg->seg_list_lock, &lock);
|
||||
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
|
||||
static void xpmem_seg_deref(
|
||||
struct xpmem_segment *seg)
|
||||
{
|
||||
XPMEM_DEBUG("call: seg=0x%p", seg);
|
||||
|
||||
DBUG_ON(ihk_atomic_read(&seg->refcnt) <= 0);
|
||||
if (ihk_atomic_dec_return(&seg->refcnt) != 0) {
|
||||
XPMEM_DEBUG("return: seg->refcnt=%d", seg->refcnt);
|
||||
return;
|
||||
}
|
||||
|
||||
DBUG_ON(!(seg->flags & XPMEM_FLAG_DESTROYING));
|
||||
|
||||
XPMEM_DEBUG("kfree(): seg=0x%p", seg);
|
||||
kfree(seg);
|
||||
|
||||
XPMEM_DEBUG("return: ");
|
||||
}
|
||||
|
||||
@ -102,6 +102,7 @@ static int alloc_zeroobj(void)
|
||||
|
||||
memset(obj, 0, sizeof(*obj));
|
||||
obj->memobj.ops = &zeroobj_ops;
|
||||
obj->memobj.size = 0;
|
||||
page_list_init(obj);
|
||||
ihk_mc_spinlock_init(&obj->memobj.lock);
|
||||
|
||||
|
||||
@ -49,6 +49,7 @@ struct ihk_mc_cpu_info {
|
||||
int ncpus;
|
||||
int *hw_ids;
|
||||
int *nodes;
|
||||
int *linux_cpu_ids;
|
||||
};
|
||||
|
||||
struct ihk_mc_cpu_info *ihk_mc_get_cpu_info(void);
|
||||
@ -56,6 +57,9 @@ void ihk_mc_boot_cpu(int cpuid, unsigned long pc);
|
||||
int ihk_mc_get_processor_id(void);
|
||||
int ihk_mc_get_hardware_processor_id(void);
|
||||
int ihk_mc_get_numa_id(void);
|
||||
int ihk_mc_get_nr_cores();
|
||||
int ihk_mc_get_core(int id, unsigned long *linux_core_id, unsigned long *apic_id,
|
||||
int *numa_id);
|
||||
|
||||
void ihk_mc_delay_us(int us);
|
||||
void ihk_mc_set_syscall_handler(long (*handler)(int, ihk_mc_user_context_t *));
|
||||
|
||||
@ -34,18 +34,25 @@ enum ihk_mc_gma_type {
|
||||
IHK_MC_RESERVED_AREA_END,
|
||||
};
|
||||
|
||||
extern unsigned long bootstrap_mem_end;
|
||||
|
||||
enum ihk_mc_ma_type {
|
||||
IHK_MC_MA_AVAILABLE,
|
||||
IHK_MC_MA_RESERVED,
|
||||
IHK_MC_MA_SPECIAL,
|
||||
};
|
||||
|
||||
enum ihk_mc_ap_flag {
|
||||
IHK_MC_AP_FLAG,
|
||||
IHK_MC_AP_CRITICAL, /* panic on no memory space */
|
||||
IHK_MC_AP_NOWAIT, /* error return on no memory space */
|
||||
IHK_MC_AP_WAIT /* wait on no memory space */
|
||||
};
|
||||
typedef unsigned long ihk_mc_ap_flag;
|
||||
/* Panic on no memory space */
|
||||
#define IHK_MC_AP_CRITICAL 0x000001
|
||||
/* Error return on no memory space */
|
||||
#define IHK_MC_AP_NOWAIT 0x000002
|
||||
/* Wait on no memory space */
|
||||
#define IHK_MC_AP_WAIT 0x000004
|
||||
#define IHK_MC_AP_USER 0x001000
|
||||
|
||||
#define IHK_MC_AP_BANDWIDTH 0x010000
|
||||
#define IHK_MC_AP_LATENCY 0x020000
|
||||
|
||||
enum ihk_mc_pt_prepare_flag {
|
||||
IHK_MC_PT_FIRST_LEVEL,
|
||||
@ -79,10 +86,10 @@ void ihk_mc_reserve_arch_pages(struct ihk_page_allocator_desc *pa_allocator,
|
||||
unsigned long, unsigned long, int));
|
||||
|
||||
struct ihk_mc_pa_ops {
|
||||
void *(*alloc_page)(int, int, enum ihk_mc_ap_flag);
|
||||
void *(*alloc_page)(int, int, ihk_mc_ap_flag, int node);
|
||||
void (*free_page)(void *, int);
|
||||
|
||||
void *(*alloc)(int, enum ihk_mc_ap_flag);
|
||||
void *(*alloc)(int, ihk_mc_ap_flag);
|
||||
void (*free)(void *);
|
||||
};
|
||||
|
||||
@ -103,17 +110,20 @@ void ihk_mc_map_micpa(unsigned long host_pa, unsigned long* mic_pa);
|
||||
int ihk_mc_free_micpa(unsigned long mic_pa);
|
||||
void ihk_mc_clean_micpa(void);
|
||||
|
||||
void *_ihk_mc_alloc_aligned_pages(int npages, int p2align,
|
||||
enum ihk_mc_ap_flag flag, char *file, int line);
|
||||
#define ihk_mc_alloc_aligned_pages(npages, p2align, flag) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages(npages, p2align, flag, __FILE__, __LINE__);\
|
||||
void *_ihk_mc_alloc_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int node, char *file, int line);
|
||||
#define ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, node) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, node, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
|
||||
#define ihk_mc_alloc_aligned_pages(npages, p2align, flag) ({\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, p2align, flag, -1, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
|
||||
void *_ihk_mc_alloc_pages(int npages, enum ihk_mc_ap_flag flag,
|
||||
char *file, int line);
|
||||
#define ihk_mc_alloc_pages(npages, flag) ({\
|
||||
void *r = _ihk_mc_alloc_pages(npages, flag, __FILE__, __LINE__);\
|
||||
void *r = _ihk_mc_alloc_aligned_pages_node(npages, PAGE_P2ALIGN, flag, -1, __FILE__, __LINE__);\
|
||||
r;\
|
||||
})
|
||||
|
||||
@ -160,10 +170,14 @@ int visit_pte_range(page_table_t pt, void *start, void *end, int pgshift,
|
||||
int move_pte_range(page_table_t pt, struct process_vm *vm,
|
||||
void *src, void *dest, size_t size);
|
||||
|
||||
struct page_table *ihk_mc_pt_create(enum ihk_mc_ap_flag ap_flag);
|
||||
struct page_table *ihk_mc_pt_create(ihk_mc_ap_flag ap_flag);
|
||||
/* XXX: proper use of struct page_table and page_table_t is unknown */
|
||||
void ihk_mc_pt_destroy(struct page_table *pt);
|
||||
void ihk_mc_load_page_table(struct page_table *pt);
|
||||
int ihk_mc_pt_virt_to_phys_size(struct page_table *pt,
|
||||
const void *virt,
|
||||
unsigned long *phys,
|
||||
unsigned long *size);
|
||||
int ihk_mc_pt_virt_to_phys(struct page_table *pt,
|
||||
const void *virt, unsigned long *phys);
|
||||
uint64_t ihk_mc_pt_virt_to_pagemap(struct page_table *pt, unsigned long virt);
|
||||
@ -181,6 +195,9 @@ int ihk_mc_get_memory_chunk(int id,
|
||||
void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
unsigned long addr, int cpu_id);
|
||||
|
||||
int ihk_set_kmsg(unsigned long addr, unsigned long size);
|
||||
char *ihk_get_kargs();
|
||||
|
||||
extern void (*__tlb_flush_handler)(int vector);
|
||||
|
||||
struct tlb_flush_entry {
|
||||
|
||||
@ -17,11 +17,17 @@
|
||||
#include <list.h>
|
||||
|
||||
/* XXX: Physical memory management shouldn't be part of IHK */
|
||||
struct node_distance {
|
||||
int id;
|
||||
int distance;
|
||||
};
|
||||
|
||||
struct ihk_mc_numa_node {
|
||||
int id;
|
||||
int linux_numa_id;
|
||||
int type;
|
||||
struct list_head allocators;
|
||||
struct node_distance *nodes_by_distance;
|
||||
};
|
||||
|
||||
struct ihk_page_allocator_desc {
|
||||
@ -30,7 +36,7 @@ struct ihk_page_allocator_desc {
|
||||
unsigned int count;
|
||||
unsigned int flag;
|
||||
unsigned int shift;
|
||||
ihk_spinlock_t lock;
|
||||
mcs_lock_node_t lock;
|
||||
struct list_head list;
|
||||
|
||||
unsigned long map[0];
|
||||
|
||||
153
lib/include/mc_xpmem.h
Normal file
153
lib/include/mc_xpmem.h
Normal file
@ -0,0 +1,153 @@
|
||||
/**
|
||||
* \file mc_xpmem.h
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Cross Partition Memory (XPMEM) structures and macros.
|
||||
*/
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (c) 2004-2007 Silicon Graphics, Inc. All Rights Reserved.
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#ifndef _MC_XPMEM_H
|
||||
#define _MC_XPMEM_H
|
||||
|
||||
#ifndef __KERNEL__
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* _IOC definitions for McKernel
|
||||
*/
|
||||
#define _IOC_NRBITS 8
|
||||
#define _IOC_TYPEBITS 8
|
||||
|
||||
#define _IOC_SIZEBITS 14
|
||||
|
||||
#define _IOC_DIRBITS 2
|
||||
|
||||
#define _IOC_NRSHIFT 0
|
||||
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
|
||||
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
|
||||
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
|
||||
|
||||
#define _IOC_NONE 0U
|
||||
|
||||
#define _IOC(dir,type,nr,size) \
|
||||
(((dir) << _IOC_DIRSHIFT) | \
|
||||
((type) << _IOC_TYPESHIFT) | \
|
||||
((nr) << _IOC_NRSHIFT) | \
|
||||
((size) << _IOC_SIZESHIFT))
|
||||
|
||||
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
|
||||
|
||||
/*
|
||||
* basic argument type definitions for McKernel
|
||||
*/
|
||||
typedef uint64_t u64;
|
||||
typedef uint64_t __u64;
|
||||
typedef int64_t __s64;
|
||||
|
||||
/*
|
||||
* basic argument type definitions
|
||||
*/
|
||||
typedef __s64 xpmem_segid_t; /* segid returned from xpmem_make() */
|
||||
typedef __s64 xpmem_apid_t; /* apid returned from xpmem_get() */
|
||||
|
||||
struct xpmem_addr {
|
||||
xpmem_apid_t apid; /* apid that represents memory */
|
||||
off_t offset; /* offset into apid's memory */
|
||||
};
|
||||
|
||||
#define XPMEM_MAXADDR_SIZE (size_t)(-1L)
|
||||
|
||||
/*
|
||||
* path to XPMEM device
|
||||
*/
|
||||
#define XPMEM_DEV_PATH "/dev/xpmem"
|
||||
|
||||
/*
|
||||
* The following are the possible XPMEM related errors.
|
||||
*/
|
||||
#define XPMEM_ERRNO_NOPROC 2004 /* unknown thread due to fork() */
|
||||
|
||||
/*
|
||||
* flags for segment permissions
|
||||
*/
|
||||
#define XPMEM_RDONLY 0x1
|
||||
#define XPMEM_RDWR 0x2
|
||||
|
||||
/*
|
||||
* Valid permit_type values for xpmem_make().
|
||||
*/
|
||||
#define XPMEM_PERMIT_MODE 0x1
|
||||
|
||||
/*
|
||||
* ioctl() commands used to interface to the kernel module.
|
||||
*/
|
||||
#define XPMEM_IOC_MAGIC 'x'
|
||||
#define XPMEM_CMD_VERSION _IO(XPMEM_IOC_MAGIC, 0)
|
||||
#define XPMEM_CMD_MAKE _IO(XPMEM_IOC_MAGIC, 1)
|
||||
#define XPMEM_CMD_REMOVE _IO(XPMEM_IOC_MAGIC, 2)
|
||||
#define XPMEM_CMD_GET _IO(XPMEM_IOC_MAGIC, 3)
|
||||
#define XPMEM_CMD_RELEASE _IO(XPMEM_IOC_MAGIC, 4)
|
||||
#define XPMEM_CMD_ATTACH _IO(XPMEM_IOC_MAGIC, 5)
|
||||
#define XPMEM_CMD_DETACH _IO(XPMEM_IOC_MAGIC, 6)
|
||||
|
||||
/*
|
||||
* Structures used with the preceding ioctl() commands to pass data.
|
||||
*/
|
||||
struct xpmem_cmd_make {
|
||||
__u64 vaddr;
|
||||
size_t size;
|
||||
int permit_type;
|
||||
__u64 permit_value;
|
||||
xpmem_segid_t segid; /* returned on success */
|
||||
};
|
||||
|
||||
struct xpmem_cmd_remove {
|
||||
xpmem_segid_t segid;
|
||||
};
|
||||
|
||||
struct xpmem_cmd_get {
|
||||
xpmem_segid_t segid;
|
||||
int flags;
|
||||
int permit_type;
|
||||
__u64 permit_value;
|
||||
xpmem_apid_t apid; /* returned on success */
|
||||
};
|
||||
|
||||
struct xpmem_cmd_release {
|
||||
xpmem_apid_t apid;
|
||||
};
|
||||
|
||||
struct xpmem_cmd_attach {
|
||||
xpmem_apid_t apid;
|
||||
off_t offset;
|
||||
size_t size;
|
||||
__u64 vaddr;
|
||||
int fd;
|
||||
int flags;
|
||||
};
|
||||
|
||||
struct xpmem_cmd_detach {
|
||||
__u64 vaddr;
|
||||
};
|
||||
|
||||
#ifndef __KERNEL__
|
||||
extern int xpmem_version(void);
|
||||
extern xpmem_segid_t xpmem_make(void *, size_t, int, void *);
|
||||
extern int xpmem_remove(xpmem_segid_t);
|
||||
extern xpmem_apid_t xpmem_get(xpmem_segid_t, int, int, void *);
|
||||
extern int xpmem_release(xpmem_apid_t);
|
||||
extern void *xpmem_attach(struct xpmem_addr, size_t, void *);
|
||||
extern int xpmem_detach(void *);
|
||||
#endif
|
||||
|
||||
#endif /* _MC_XPMEM_H */
|
||||
@ -35,8 +35,8 @@ void *phys_to_virt(unsigned long p);
|
||||
int copy_from_user(void *dst, const void *src, size_t siz);
|
||||
int strlen_user(const char *s);
|
||||
int strcpy_from_user(char *dst, const char *src);
|
||||
long getlong_user(const long *p);
|
||||
int getint_user(const int *p);
|
||||
long getlong_user(long *dest, const long *p);
|
||||
int getint_user(int *dest, const int *p);
|
||||
int read_process_vm(struct process_vm *vm, void *kdst, const void *usrc, size_t siz);
|
||||
int copy_to_user(void *dst, const void *src, size_t siz);
|
||||
int setlong_user(long *dst, long data);
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
#define __STRING_H
|
||||
|
||||
#include <types.h>
|
||||
#include <arch-string.h>
|
||||
|
||||
size_t strlen(const char *p);
|
||||
size_t strnlen(const char *p, size_t maxlen);
|
||||
@ -29,6 +30,12 @@ void *memcpy_long(void *dest, const void *src, size_t n);
|
||||
int memcmp(const void *s1, const void *s2, size_t n);
|
||||
void *memset(void *s, int n, size_t l);
|
||||
|
||||
#ifdef ARCH_FAST_MEMCPY
|
||||
#define fast_memcpy __inline_memcpy
|
||||
#else
|
||||
#define fast_memcpy memcpy
|
||||
#endif
|
||||
|
||||
extern int snprintf(char * buf, size_t size, const char *fmt, ...);
|
||||
extern int sprintf(char * buf, const char *fmt, ...);
|
||||
extern int sscanf(const char * buf, const char * fmt, ...);
|
||||
|
||||
@ -19,7 +19,6 @@
|
||||
#include <memory.h>
|
||||
#include <bitops.h>
|
||||
|
||||
void *allocate_pages(int npages, enum ihk_mc_ap_flag flag);
|
||||
void free_pages(void *, int npages);
|
||||
|
||||
#define MAP_INDEX(n) ((n) >> 6)
|
||||
@ -73,7 +72,7 @@ void *__ihk_pagealloc_init(unsigned long start, unsigned long size,
|
||||
//kprintf("page allocator @ %lx - %lx (%d)\n", start, start + size,
|
||||
// page_shift);
|
||||
|
||||
ihk_mc_spinlock_init(&desc->lock);
|
||||
mcs_lock_init(&desc->lock);
|
||||
|
||||
/* Reserve align padding area */
|
||||
for (i = mapsize; i < mapaligned * 8; i++) {
|
||||
@ -99,12 +98,12 @@ void ihk_pagealloc_destroy(void *__desc)
|
||||
static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
|
||||
int npages, int p2align)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int i, j, mi;
|
||||
int nblocks;
|
||||
int nfrags;
|
||||
unsigned long mask;
|
||||
int mialign;
|
||||
unsigned long align_mask = ((PAGE_SIZE << p2align) - 1);
|
||||
mcs_lock_node_t node;
|
||||
|
||||
nblocks = (npages / 64);
|
||||
mask = -1;
|
||||
@ -113,14 +112,13 @@ static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
|
||||
++nblocks;
|
||||
mask = (1UL << nfrags) - 1;
|
||||
}
|
||||
mialign = (p2align <= 6)? 1: (1 << (p2align - 6));
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (i = 0, mi = desc->last; i < desc->count; i++, mi++) {
|
||||
if (mi >= desc->count) {
|
||||
mi = 0;
|
||||
}
|
||||
if ((mi + nblocks >= desc->count) || (mi % mialign)) {
|
||||
if ((mi + nblocks >= desc->count) || (ADDRESS(desc, mi, 0) & align_mask)) {
|
||||
continue;
|
||||
}
|
||||
for (j = mi; j < mi + nblocks - 1; j++) {
|
||||
@ -133,11 +131,11 @@ static unsigned long __ihk_pagealloc_large(struct ihk_page_allocator_desc *desc,
|
||||
desc->map[j] = (unsigned long)-1;
|
||||
}
|
||||
desc->map[j] |= mask;
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
return ADDRESS(desc, mi, 0);
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -147,8 +145,9 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned int i, mi;
|
||||
int j;
|
||||
unsigned long v, mask, flags;
|
||||
unsigned long v, mask;
|
||||
int jalign;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
if ((npages >= 32) || (p2align >= 5)) {
|
||||
return __ihk_pagealloc_large(desc, npages, p2align);
|
||||
@ -157,7 +156,7 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
|
||||
mask = (1UL << npages) - 1;
|
||||
jalign = (p2align <= 0)? 1: (1 << p2align);
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (i = 0, mi = desc->last; i < desc->count; i++, mi++) {
|
||||
if (mi >= desc->count) {
|
||||
mi = 0;
|
||||
@ -174,12 +173,12 @@ unsigned long ihk_pagealloc_alloc(void *__desc, int npages, int p2align)
|
||||
if (!(v & (mask << j))) { /* free */
|
||||
desc->map[mi] |= (mask << j);
|
||||
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
return ADDRESS(desc, mi, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
/* We use null pointer for failure */
|
||||
return 0;
|
||||
@ -189,7 +188,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
|
||||
{
|
||||
int i, n;
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned long flags;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
n = (end + (1 << desc->shift) - 1 - desc->start) >> desc->shift;
|
||||
i = ((start - desc->start) >> desc->shift);
|
||||
@ -197,7 +196,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
|
||||
return;
|
||||
}
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (; i < n; i++) {
|
||||
if (!(i & 63) && i + 63 < n) {
|
||||
desc->map[MAP_INDEX(i)] = (unsigned long)-1L;
|
||||
@ -206,7 +205,7 @@ void ihk_pagealloc_reserve(void *__desc, unsigned long start, unsigned long end)
|
||||
desc->map[MAP_INDEX(i)] |= (1UL << MAP_BIT(i));
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
}
|
||||
|
||||
void ihk_pagealloc_free(void *__desc, unsigned long address, int npages)
|
||||
@ -214,24 +213,24 @@ void ihk_pagealloc_free(void *__desc, unsigned long address, int npages)
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
int i;
|
||||
unsigned mi;
|
||||
unsigned long flags;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
/* XXX: Parameter check */
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
mi = (address - desc->start) >> desc->shift;
|
||||
for (i = 0; i < npages; i++, mi++) {
|
||||
desc->map[MAP_INDEX(mi)] &= ~(1UL << MAP_BIT(mi));
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
}
|
||||
|
||||
unsigned long ihk_pagealloc_count(void *__desc)
|
||||
{
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned long i, j, n = 0;
|
||||
unsigned long flags;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
/* XXX: Very silly counting */
|
||||
for (i = 0; i < desc->count; i++) {
|
||||
for (j = 0; j < 64; j++) {
|
||||
@ -240,7 +239,7 @@ unsigned long ihk_pagealloc_count(void *__desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
return n;
|
||||
}
|
||||
@ -250,10 +249,11 @@ int ihk_pagealloc_query_free(void *__desc)
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned int mi;
|
||||
int j;
|
||||
unsigned long v, flags;
|
||||
unsigned long v;
|
||||
int npages = 0;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (mi = 0; mi < desc->count; mi++) {
|
||||
|
||||
v = desc->map[mi];
|
||||
@ -266,7 +266,7 @@ int ihk_pagealloc_query_free(void *__desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
return npages;
|
||||
}
|
||||
@ -276,11 +276,12 @@ void __ihk_pagealloc_zero_free_pages(void *__desc)
|
||||
struct ihk_page_allocator_desc *desc = __desc;
|
||||
unsigned int mi;
|
||||
int j;
|
||||
unsigned long v, flags;
|
||||
unsigned long v;
|
||||
mcs_lock_node_t node;
|
||||
|
||||
kprintf("zeroing free memory... ");
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&desc->lock);
|
||||
mcs_lock_lock(&desc->lock, &node);
|
||||
for (mi = 0; mi < desc->count; mi++) {
|
||||
|
||||
v = desc->map[mi];
|
||||
@ -294,7 +295,7 @@ kprintf("zeroing free memory... ");
|
||||
}
|
||||
}
|
||||
}
|
||||
ihk_mc_spinlock_unlock(&desc->lock, flags);
|
||||
mcs_lock_unlock(&desc->lock, &node);
|
||||
|
||||
kprintf("\nzeroing done\n");
|
||||
}
|
||||
|
||||
35
lib/string.c
35
lib/string.c
@ -275,13 +275,21 @@ int flatten_strings_from_user(int nr_strings, char *first, char **strings, char
|
||||
long *_flat;
|
||||
char *p;
|
||||
long r;
|
||||
int n;
|
||||
int n, ret;
|
||||
|
||||
/* How many strings do we have? */
|
||||
if (nr_strings == -1) {
|
||||
for (nr_strings = 0; (r = getlong_user((void *)(strings + nr_strings))) > 0; ++nr_strings);
|
||||
if(r < 0)
|
||||
return r;
|
||||
nr_strings = 0;
|
||||
for (;;) {
|
||||
ret = getlong_user(&r, (void *)(strings + nr_strings));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (r == 0)
|
||||
break;
|
||||
|
||||
++nr_strings;
|
||||
}
|
||||
}
|
||||
|
||||
/* Count full length */
|
||||
@ -295,13 +303,19 @@ int flatten_strings_from_user(int nr_strings, char *first, char **strings, char
|
||||
}
|
||||
|
||||
for (string_i = 0; string_i < nr_strings; ++string_i) {
|
||||
char *userp = (char *)getlong_user((void *)(strings + string_i));
|
||||
int len = strlen_user(userp);
|
||||
char *userp;
|
||||
int len;
|
||||
|
||||
ret = getlong_user((long *)&userp, (void *)(strings + string_i));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
len = strlen_user(userp);
|
||||
|
||||
if(len < 0)
|
||||
return len;
|
||||
// Pointer + actual value
|
||||
full_len += sizeof(char *) + len + 1;
|
||||
full_len += sizeof(char *) + len + 1;
|
||||
}
|
||||
|
||||
full_len = (full_len + sizeof(long) - 1) & ~(sizeof(long) - 1);
|
||||
@ -326,8 +340,13 @@ int flatten_strings_from_user(int nr_strings, char *first, char **strings, char
|
||||
}
|
||||
|
||||
for (string_i = 0; string_i < nr_strings; ++string_i) {
|
||||
char *userp = (char *)getlong_user((void *)(strings + string_i));
|
||||
char *userp;
|
||||
_flat[n++] = p - (char *)_flat;
|
||||
|
||||
ret = getlong_user((long *)&userp, (void *)(strings + string_i));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
strcpy_from_user(p, userp);
|
||||
p = strchr(p, '\0') + 1;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user