release the resources of the process at exit(2)/exit_group(2).

This commit is contained in:
NAKAMURA Gou
2013-05-28 10:49:33 +09:00
parent 9354c82ee7
commit fa1be382c7
8 changed files with 140 additions and 27 deletions

View File

@ -4,8 +4,14 @@
(X86_CPU_LOCAL_OFFSET_TSS + X86_TSS_OFFSET_SP0)
.text
.globl ihk_mc_switch_context
ihk_mc_switch_context:
/*
* rdi - ihk_mc_kernel_context_t *old_ctx
* rsi - ihk_mc_kernel_context_t *new_ctx
* rdx - void *prev
*/
pushfq
popq %rax
testq %rdi, %rdi
@ -35,4 +41,5 @@ ihk_mc_switch_context:
popfq
movq 8(%rsi), %rbp
movq 24(%rsi), %rsi
movq %rdx,%rax
retq

View File

@ -42,6 +42,12 @@
#define PT_ENTRIES 512
/* mask of the physical address of the entry to the page table */
#define PT_PHYSMASK (((1UL << 52) - 1) & PAGE_MASK)
#define PF_PRESENT 0x01 /* entry is valid */
#define PF_SIZE 0x80 /* entry points large page */
#define PFL4_PRESENT 0x01
#define PFL4_WRITABLE 0x02
#define PFL4_USER 0x04

View File

@ -557,6 +557,50 @@ struct page_table *ihk_mc_pt_create(void)
return pt;
}
static void destroy_page_table(int level, struct page_table *pt)
{
int ix;
unsigned long entry;
struct page_table *lower;
if ((level < 1) || (4 < level)) {
panic("destroy_page_table: level is out of range");
}
if (pt == NULL) {
panic("destroy_page_table: pt is NULL");
}
if (level > 1) {
for (ix = 0; ix < PT_ENTRIES; ++ix) {
entry = pt->entry[ix];
if (!(entry & PF_PRESENT)) {
/* entry is not valid */
continue;
}
if (entry & PF_SIZE) {
/* not a page table */
continue;
}
lower = (struct page_table *)phys_to_virt(entry & PT_PHYSMASK);
destroy_page_table(level-1, lower);
}
}
arch_free_page(pt);
return;
}
void ihk_mc_pt_destroy(struct page_table *pt)
{
const int level = 4; /* PML4 */
/* clear shared entry */
memset(pt->entry + PT_ENTRIES / 2, 0, sizeof(pt->entry[0]) * PT_ENTRIES / 2);
destroy_page_table(level, pt);
return;
}
int ihk_mc_pt_clear_page(page_table_t pt, void *virt)
{
return __clear_pt_page(pt, virt, 0);

View File

@ -50,6 +50,7 @@ struct process {
int status;
int cpu_id;
ihk_atomic_t refcount;
struct process_vm *vm;
ihk_mc_kernel_context_t ctx;
@ -73,6 +74,7 @@ struct process_vm {
struct page_table *page_table;
struct list_head vm_range_list;
struct vm_regions region;
struct process *owner_process; /* process that reside on the same page */
// Address space private futexes
struct futex_queue futex_queues[1 << FUTEX_HASHBITS];
@ -90,7 +92,8 @@ struct process_vm {
struct process *create_process(unsigned long user_pc);
struct process *clone_process(struct process *org,
unsigned long pc, unsigned long sp);
void destroy_process(struct process *proc);
void hold_process(struct process *proc);
void free_process(struct process *proc);
void free_process_memory(struct process *proc);
int add_process_memory_range(struct process *process,

View File

@ -23,7 +23,7 @@
extern long do_arch_prctl(unsigned long code, unsigned long address);
void init_process_vm(struct process_vm *vm)
static void init_process_vm(struct process *owner, struct process_vm *vm)
{
int i;
@ -33,6 +33,8 @@ void init_process_vm(struct process_vm *vm)
ihk_atomic_set(&vm->refcount, 1);
INIT_LIST_HEAD(&vm->vm_range_list);
vm->page_table = ihk_mc_pt_create();
hold_process(owner);
vm->owner_process = owner;
/* Initialize futex queues */
for (i = 0; i < (1 << FUTEX_HASHBITS); ++i)
@ -49,6 +51,7 @@ struct process *create_process(unsigned long user_pc)
return NULL;
memset(proc, 0, sizeof(struct process));
ihk_atomic_set(&proc->refcount, 2); /* one for exit, another for wait */
ihk_mc_init_user_process(&proc->ctx, &proc->uctx,
((char *)proc) +
@ -56,7 +59,7 @@ struct process *create_process(unsigned long user_pc)
proc->vm = (struct process_vm *)(proc + 1);
init_process_vm(proc->vm);
init_process_vm(proc, proc->vm);
ihk_mc_spinlock_init(&proc->spin_sleep_lock);
proc->spin_sleep = 0;
@ -72,6 +75,7 @@ struct process *clone_process(struct process *org, unsigned long pc,
proc = ihk_mc_alloc_pages(KERNEL_STACK_NR_PAGES, 0);
memset(proc, 0, KERNEL_STACK_NR_PAGES);
ihk_atomic_set(&proc->refcount, 2); /* one for exit, another for wait */
/* NOTE: sp is the user mode stack! */
ihk_mc_init_user_process(&proc->ctx, &proc->uctx,
@ -394,12 +398,18 @@ extern void print_free_list(void);
void free_process_memory(struct process *proc)
{
struct vm_range *range, *next;
struct process_vm *vm = proc->vm;
if (!ihk_atomic_dec_and_test(&proc->vm->refcount)) {
if (vm == NULL) {
return;
}
list_for_each_entry_safe(range, next, &proc->vm->vm_range_list,
proc->vm = NULL;
if (!ihk_atomic_dec_and_test(&vm->refcount)) {
return;
}
list_for_each_entry_safe(range, next, &vm->vm_range_list,
list) {
if (!(range->flag & VR_REMOTE) &&
!(range->flag & VR_IO_NOCACHE) &&
@ -411,13 +421,28 @@ void free_process_memory(struct process *proc)
list_del(&range->list);
ihk_mc_free(range);
}
/* TODO: Free page tables */
proc->status = PS_ZOMBIE;
ihk_mc_pt_destroy(vm->page_table);
free_process(vm->owner_process);
}
void destroy_process(struct process *proc)
void hold_process(struct process *proc)
{
ihk_mc_free_pages(proc, 1);
if (proc->status & (PS_ZOMBIE | PS_EXITED)) {
panic("hold_process: already exited process");
}
ihk_atomic_inc(&proc->refcount);
return;
}
void free_process(struct process *proc)
{
if (!ihk_atomic_dec_and_test(&proc->refcount)) {
return;
}
ihk_mc_free_pages(proc, KERNEL_STACK_NR_PAGES);
}
static void idle(void)
@ -465,6 +490,7 @@ void schedule(void)
struct process *next, *prev, *proc, *tmp = NULL;
int switch_ctx = 0;
unsigned long irqstate;
struct process *last;
irqstate = ihk_mc_spinlock_lock(&(v->runq_lock));
@ -477,10 +503,14 @@ void schedule(void)
--v->runq_len;
/* Round-robin if not exited yet */
if (prev->status != PS_EXITED) {
if (!(prev->status & (PS_ZOMBIE | PS_EXITED))) {
list_add_tail(&prev->sched_list, &(v->runq));
++v->runq_len;
}
if (!v->runq_len) {
v->status = CPU_STATUS_IDLE;
}
}
/* Pick a new running process */
@ -501,7 +531,6 @@ void schedule(void)
v->current = next;
}
if (switch_ctx) {
dkprintf("[%d] schedule: %d => %d \n",
ihk_mc_get_processor_id(),
@ -518,10 +547,15 @@ void schedule(void)
ihk_mc_spinlock_unlock(&(v->runq_lock), irqstate);
if (prev) {
ihk_mc_switch_context(&prev->ctx, &next->ctx);
last = ihk_mc_switch_context(&prev->ctx, &next->ctx, prev);
}
else {
ihk_mc_switch_context(NULL, &next->ctx);
last = ihk_mc_switch_context(NULL, &next->ctx, prev);
}
if ((last != NULL) && (last->status & (PS_ZOMBIE | PS_EXITED))) {
free_process_memory(last);
free_process(last);
}
}
else {

View File

@ -349,18 +349,24 @@ SYSCALL_DECLARE(lseek)
SYSCALL_DECLARE(exit_group)
{
SYSCALL_HEADER;
struct process *proc = cpu_local_var(current);
#ifdef DCFA_KMOD
do_mod_exit((int)ihk_mc_syscall_arg0(ctx));
#endif
do_syscall(&request, ctx);
runq_del_proc(cpu_local_var(current), ihk_mc_get_processor_id());
free_process_memory(cpu_local_var(current));
/* XXX: send SIGKILL to all threads in this process */
do_syscall(&request, ctx);
#define IS_DETACHED_PROCESS(proc) (1) /* should be implemented in the future */
proc->status = PS_ZOMBIE;
if (IS_DETACHED_PROCESS(proc)) {
/* release a reference for wait(2) */
proc->status = PS_EXITED;
free_process(proc);
}
//cpu_local_var(next) = &cpu_local_var(idle);
cpu_local_var(current) = NULL;
schedule();
return 0;
@ -962,26 +968,34 @@ SYSCALL_DECLARE(futex)
SYSCALL_DECLARE(exit)
{
struct process *proc = cpu_local_var(current);
#ifdef DCFA_KMOD
do_mod_exit((int)ihk_mc_syscall_arg0(ctx));
#endif
/* XXX: for if all threads issued the exit(2) rather than exit_group(2),
* exit(2) also should delegate.
*/
/* If there is a clear_child_tid address set, clear it and wake it.
* This unblocks any pthread_join() waiters. */
if (cpu_local_var(current)->thread.clear_child_tid) {
if (proc->thread.clear_child_tid) {
kprintf("exit clear_child!\n");
*cpu_local_var(current)->thread.clear_child_tid = 0;
*proc->thread.clear_child_tid = 0;
barrier();
futex((uint32_t *)cpu_local_var(current)->thread.clear_child_tid,
futex((uint32_t *)proc->thread.clear_child_tid,
FUTEX_WAKE, 1, 0, NULL, 0, 0);
}
runq_del_proc(cpu_local_var(current), cpu_local_var(current)->cpu_id);
free_process_memory(cpu_local_var(current));
proc->status = PS_ZOMBIE;
if (IS_DETACHED_PROCESS(proc)) {
/* release a reference for wait(2) */
proc->status = PS_EXITED;
free_process(proc);
}
cpu_local_var(current) = NULL;
schedule();
return 0;

View File

@ -50,8 +50,11 @@ void ihk_mc_init_ap(void);
void ihk_mc_init_context(ihk_mc_kernel_context_t *new_ctx,
void *stack_pointer,
void (*next_function)(void));
void ihk_mc_switch_context(ihk_mc_kernel_context_t *old_ctx,
ihk_mc_kernel_context_t *new_ctx);
/* returns the 'prev' argument of the call that caused the switch to the context returned. */
void *ihk_mc_switch_context(ihk_mc_kernel_context_t *old_ctx,
ihk_mc_kernel_context_t *new_ctx,
void *prev);
int ihk_mc_interrupt_cpu(int cpu, int vector);
void ihk_mc_init_user_process(ihk_mc_kernel_context_t *ctx,

View File

@ -91,7 +91,9 @@ int ihk_mc_pt_clear_page(page_table_t pt, void *virt);
int ihk_mc_pt_prepare_map(page_table_t pt, void *virt, unsigned long size,
enum ihk_mc_pt_prepare_flag);
/* XXX: proper use of struct page_table and page_table_t is unknown */
struct page_table *ihk_mc_pt_create(void);
void ihk_mc_pt_destroy(struct page_table *pt);
void ihk_mc_load_page_table(struct page_table *pt);
int ihk_mc_pt_virt_to_phys(struct page_table *pt,
void *virt, unsigned long *phys);