Merge remote branch 'origin/master' into gdb

This commit is contained in:
Naoki Hamada
2014-07-23 10:29:07 +09:00
11 changed files with 435 additions and 33 deletions

View File

@ -14,8 +14,6 @@
* machines where multiplications are slow.
*/
#define BITS_PER_LONG 64
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */

View File

@ -208,6 +208,9 @@ struct process_vm {
// 2. addition of process page table (allocate_pages, update_process_page_table)
// note that physical memory allocator (ihk_mc_alloc_pages, ihk_pagealloc_alloc)
// is protected by its own lock (see ihk/manycore/generic/page_alloc.c)
cpu_set_t cpu_set;
ihk_spinlock_t cpu_set_lock;
};
@ -267,4 +270,7 @@ int sched_wakeup_process(struct process *proc, int valid_states);
void sched_request_migrate(int cpu_id, struct process *proc);
void check_need_resched(void);
void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock);
void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock);
#endif

View File

@ -33,6 +33,8 @@
#endif
#include <cls.h>
#include <page.h>
#include <bitops.h>
#include <cpulocal.h>
//#define DEBUG_PRINT_MEM
@ -50,6 +52,8 @@ static struct page *pa_pages;
extern int ihk_mc_pt_print_pte(struct page_table *pt, void *virt);
struct tlb_flush_entry tlb_flush_vector[IHK_TLB_FLUSH_IRQ_VECTOR_SIZE];
static void reserve_pages(unsigned long start, unsigned long end, int type)
{
if (start < pa_start) {
@ -257,6 +261,106 @@ static void unhandled_page_fault(struct process *proc, void *fault_addr, void *r
return;
}
void remote_flush_tlb_cpumask(struct process_vm *vm,
unsigned long addr, int cpu_id)
{
unsigned long cpu;
int flush_ind;
struct tlb_flush_entry *flush_entry;
cpu_set_t _cpu_set;
if (addr) {
flush_ind = (addr >> PAGE_SHIFT) % IHK_TLB_FLUSH_IRQ_VECTOR_SIZE;
}
/* Zero address denotes full TLB flush */
else {
/* Random.. */
flush_ind = (rdtsc()) % IHK_TLB_FLUSH_IRQ_VECTOR_SIZE;
}
flush_entry = &tlb_flush_vector[flush_ind];
/* Take a copy of the cpu set so that we don't hold the lock
* all the way while interrupting other cores */
ihk_mc_spinlock_lock_noirq(&vm->cpu_set_lock);
memcpy(&_cpu_set, &vm->cpu_set, sizeof(cpu_set_t));
ihk_mc_spinlock_unlock_noirq(&vm->cpu_set_lock);
dkprintf("trying to aquire flush_entry->lock flush_ind: %d\n", flush_ind);
ihk_mc_spinlock_lock_noirq(&flush_entry->lock);
flush_entry->vm = vm;
flush_entry->addr = addr;
ihk_atomic_set(&flush_entry->pending, 0);
dkprintf("lock aquired, iterating cpu mask.. flush_ind: %d\n", flush_ind);
/* Loop through CPUs in this address space and interrupt them for
* TLB flush on the specified address */
for_each_set_bit(cpu, (const unsigned long*)&_cpu_set.__bits, CPU_SETSIZE) {
if (ihk_mc_get_processor_id() == cpu)
continue;
ihk_atomic_inc(&flush_entry->pending);
dkprintf("remote_flush_tlb_cpumask: flush_ind: %d, addr: 0x%lX, interrupting cpu: %d\n",
flush_ind, addr, cpu);
ihk_mc_interrupt_cpu(get_x86_cpu_local_variable(cpu)->apic_id,
flush_ind + IHK_TLB_FLUSH_IRQ_VECTOR_START);
}
#ifdef DEBUG_IC_TLB
{
unsigned long tsc;
tsc = rdtsc() + 12884901888; /* 1.2GHz =>10 sec */
#endif
/* Wait for all cores */
while (ihk_atomic_read(&flush_entry->pending) != 0) {
cpu_pause();
#ifdef DEBUG_IC_TLB
if (rdtsc() > tsc) {
kprintf("waited 10 secs for remote TLB!! -> panic_all()\n");
panic_all_cores("waited 10 secs for remote TLB!!\n");
}
#endif
}
#ifdef DEBUG_IC_TLB
}
#endif
ihk_mc_spinlock_unlock_noirq(&flush_entry->lock);
}
void tlb_flush_handler(int vector)
{
int flags = cpu_disable_interrupt_save();
struct tlb_flush_entry *flush_entry = &tlb_flush_vector[vector -
IHK_TLB_FLUSH_IRQ_VECTOR_START];
dkprintf("decreasing pending cnt for %d\n",
vector - IHK_TLB_FLUSH_IRQ_VECTOR_START);
/* Decrease counter */
ihk_atomic_dec(&flush_entry->pending);
dkprintf("flusing TLB for addr: 0x%lX\n", flush_entry->addr);
if (flush_entry->addr) {
flush_tlb_single(flush_entry->addr & PAGE_MASK);
}
/* Zero address denotes full TLB flush */
else {
flush_tlb();
}
cpu_restore_interrupt(flags);
}
static void page_fault_handler(void *fault_addr, uint64_t reason, void *regs)
{
struct process *proc = cpu_local_var(current);

View File

@ -116,6 +116,8 @@ static int init_process_vm(struct process *owner, struct process_vm *vm)
vm->page_table = pt;
hold_process(owner);
vm->owner_process = owner;
memset(&vm->cpu_set, 0, sizeof(cpu_set_t));
ihk_mc_spinlock_init(&vm->cpu_set_lock);
return 0;
}
@ -169,6 +171,9 @@ struct process *create_process(unsigned long user_pc)
goto err_free_sigshared;
}
cpu_set(ihk_mc_get_processor_id(), &proc->vm->cpu_set,
&proc->vm->cpu_set_lock);
ihk_mc_spinlock_init(&proc->spin_sleep_lock);
proc->spin_sleep = 0;
@ -389,7 +394,7 @@ static int copy_user_ranges(struct process *proc, struct process *org)
/* Set up new PTE */
attr = arch_vrflag_to_ptattr(range->flag, PF_POPULATE, NULL);
if (ihk_mc_pt_set_range(proc->vm->page_table, vaddr,
if (ihk_mc_pt_set_range(proc->vm->page_table, proc->vm, vaddr,
vaddr + pgsize, virt_to_phys(pg_vaddr), attr)) {
kprintf("ERROR: copy_user_ranges() "
"(%p,%lx-%lx %lx,%lx):"
@ -634,7 +639,7 @@ int free_process_memory_range(struct process_vm *vm, struct vm_range *range)
if (range->memobj) {
memobj_lock(range->memobj);
}
error = ihk_mc_pt_free_range(vm->page_table,
error = ihk_mc_pt_free_range(vm->page_table, vm,
(void *)start, (void *)end,
(range->flag & VR_PRIVATE)? NULL: range->memobj);
if (range->memobj) {
@ -650,7 +655,7 @@ int free_process_memory_range(struct process_vm *vm, struct vm_range *range)
}
else {
ihk_mc_spinlock_lock_noirq(&vm->page_table_lock);
error = ihk_mc_pt_clear_range(vm->page_table,
error = ihk_mc_pt_clear_range(vm->page_table, vm,
(void *)start, (void *)end);
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);
if (error && (error != -ENOENT)) {
@ -1227,7 +1232,8 @@ static int page_fault_process_memory_range(struct process_vm *vm, struct vm_rang
}
}
else {
error = ihk_mc_pt_set_range(vm->page_table, pgaddr, pgaddr+pgsize, phys, attr);
error = ihk_mc_pt_set_range(vm->page_table, vm, pgaddr, pgaddr + pgsize,
phys, attr);
if (error) {
kprintf("page_fault_process_memory_range(%p,%lx-%lx %lx,%lx,%lx):set_range failed. %d\n", vm, range->start, range->end, range->flag, fault_addr, reason, error);
goto out;
@ -1366,7 +1372,7 @@ int init_process_stack(struct process *process, struct program_load_desc *pn,
return -ENOMEM;
}
memset(stack, 0, minsz);
error = ihk_mc_pt_set_range(process->vm->page_table,
error = ihk_mc_pt_set_range(process->vm->page_table, process->vm,
(void *)(end-minsz), (void *)end,
virt_to_phys(stack),
arch_vrflag_to_ptattr(vrflag, PF_POPULATE, NULL));
@ -1532,7 +1538,8 @@ int remove_process_region(struct process *proc,
ihk_mc_spinlock_lock_noirq(&proc->vm->page_table_lock);
/* We defer freeing to the time of exit */
// XXX: check error
ihk_mc_pt_clear_range(proc->vm->page_table, (void *)start, (void *)end);
ihk_mc_pt_clear_range(proc->vm->page_table, proc->vm,
(void *)start, (void *)end);
ihk_mc_spinlock_unlock_noirq(&proc->vm->page_table_lock);
return 0;
@ -1656,6 +1663,10 @@ void destroy_process(struct process *proc)
struct sig_pending *pending;
struct sig_pending *next;
if (proc->vm) {
cpu_clear(proc->cpu_id, &proc->vm->cpu_set, &proc->vm->cpu_set_lock);
}
free_process_memory(proc);
if(ihk_atomic_dec_and_test(&proc->sighandler->use)){
@ -1685,6 +1696,22 @@ void release_process(struct process *proc)
destroy_process(proc);
}
void cpu_set(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
{
unsigned int flags;
flags = ihk_mc_spinlock_lock(lock);
CPU_SET(cpu, cpu_set);
ihk_mc_spinlock_unlock(lock, flags);
}
void cpu_clear(int cpu, cpu_set_t *cpu_set, ihk_spinlock_t *lock)
{
unsigned int flags;
flags = ihk_mc_spinlock_lock(lock);
CPU_CLR(cpu, cpu_set);
ihk_mc_spinlock_unlock(lock, flags);
}
static void idle(void)
{
cpu_local_var(status) = CPU_STATUS_IDLE;

View File

@ -1278,6 +1278,8 @@ SYSCALL_DECLARE(clone)
return -ENOMEM;
}
cpu_set(cpuid, &new->vm->cpu_set, &new->vm->cpu_set_lock);
if (clone_flags & CLONE_VM) {
new->pid = cpu_local_var(current)->pid;
@ -2592,7 +2594,7 @@ SYSCALL_DECLARE(mremap)
if (oldsize > 0) {
size = (oldsize < newsize)? oldsize: newsize;
ihk_mc_spinlock_lock_noirq(&vm->page_table_lock);
error = move_pte_range(vm->page_table,
error = move_pte_range(vm->page_table, vm,
(void *)oldstart, (void *)newstart,
size);
ihk_mc_spinlock_unlock_noirq(&vm->page_table_lock);