memory_range_lock: Enable interrupt when trylock fails

Also use read-write-lock

Change-Id: I03150b7208325ec1fe422dcd5f931e4e41c8e40e
Refs: #452
This commit is contained in:
Tomoki Shirasawa
2019-07-29 14:12:50 +09:00
committed by Masamichi Takagi
parent 258156b57e
commit 0d3ef65092
13 changed files with 444 additions and 101 deletions

View File

@ -239,7 +239,7 @@ static int
init_process_vm(struct process *owner, struct address_space *asp, struct process_vm *vm)
{
int i;
ihk_mc_spinlock_init(&vm->memory_range_lock);
ihk_mc_rwlock_init(&vm->memory_range_lock);
ihk_mc_spinlock_init(&vm->page_table_lock);
ihk_atomic_set(&vm->refcount, 1);
@ -740,8 +740,9 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
struct vm_range *src_range;
struct vm_range *range;
struct copy_args args;
unsigned long irqflags;
ihk_mc_spinlock_lock_noirq(&orgvm->memory_range_lock);
memory_range_write_lock(orgvm, &irqflags);
/* Iterate original process' vm_range list and take a copy one-by-one */
src_range = NULL;
@ -801,7 +802,7 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
vm_range_insert(vm, range);
}
ihk_mc_spinlock_unlock_noirq(&orgvm->memory_range_lock);
memory_range_write_unlock(orgvm, &irqflags);
return 0;
@ -813,7 +814,7 @@ err_rollback:
/* TODO: implement rollback */
ihk_mc_spinlock_unlock_noirq(&orgvm->memory_range_lock);
memory_range_write_unlock(orgvm, &irqflags);
return -1;
}
@ -2102,6 +2103,7 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
struct vm_range *range;
struct thread *thread = cpu_local_var(current);
int locked = 0;
unsigned long irqflags;
dkprintf("[%d]do_page_fault_process_vm(%p,%lx,%lx)\n",
ihk_mc_get_processor_id(), vm, fault_addr0, reason);
@ -2112,10 +2114,12 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
if (thread->vm->is_memory_range_lock_taken) {
goto skip;
}
if (ihk_mc_spinlock_trylock_noirq(&vm->memory_range_lock)) {
irqflags = cpu_disable_interrupt_save();
if (ihk_mc_read_trylock(&vm->memory_range_lock)) {
locked = 1;
break;
}
cpu_restore_interrupt(irqflags);
}
} else {
skip:;
@ -2231,7 +2235,8 @@ skip:;
error = 0;
out:
if (locked) {
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
ihk_mc_read_unlock(&vm->memory_range_lock);
cpu_restore_interrupt(irqflags);
}
dkprintf("[%d]do_page_fault_process_vm(%p,%lx,%lx): %d\n",
ihk_mc_get_processor_id(), vm, fault_addr0,
@ -2532,9 +2537,10 @@ void flush_process_memory(struct process_vm *vm)
struct vm_range *range;
struct rb_node *node, *next = rb_first(&vm->vm_range_tree);
int error;
unsigned long irqflags;
dkprintf("flush_process_memory(%p)\n", vm);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
memory_range_write_lock(vm, &irqflags);
/* Let concurrent page faults know the VM will be gone */
vm->exiting = 1;
while ((node = next)) {
@ -2552,7 +2558,7 @@ void flush_process_memory(struct process_vm *vm)
}
}
}
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
memory_range_write_unlock(vm, &irqflags);
dkprintf("flush_process_memory(%p):\n", vm);
return;
}
@ -2562,12 +2568,13 @@ void free_process_memory_ranges(struct process_vm *vm)
int error;
struct vm_range *range;
struct rb_node *node, *next = rb_first(&vm->vm_range_tree);
unsigned long irqflags;
if (vm == NULL) {
return;
}
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
memory_range_write_lock(vm, &irqflags);
while ((node = next)) {
range = rb_entry(node, struct vm_range, vm_rb_node);
next = rb_next(node);
@ -2580,7 +2587,7 @@ void free_process_memory_ranges(struct process_vm *vm)
/* through */
}
}
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
memory_range_write_unlock(vm, &irqflags);
}
static void free_thread_pages(struct thread *thread)
@ -2655,8 +2662,9 @@ free_all_process_memory_range(struct process_vm *vm)
struct vm_range *range;
struct rb_node *node, *next = rb_first(&vm->vm_range_tree);
int error;
unsigned long irqflags;
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
memory_range_write_lock(vm, &irqflags);
while ((node = next)) {
range = rb_entry(node, struct vm_range, vm_rb_node);
next = rb_next(node);
@ -2669,7 +2677,7 @@ free_all_process_memory_range(struct process_vm *vm)
/* through */
}
}
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
memory_range_write_unlock(vm, &irqflags);
}
void
@ -3084,7 +3092,7 @@ void sched_init(void)
&idle_thread->proc->children_list);
ihk_mc_init_context(&idle_thread->ctx, NULL, idle);
ihk_mc_spinlock_init(&idle_thread->vm->memory_range_lock);
ihk_mc_rwlock_init(&idle_thread->vm->memory_range_lock);
idle_thread->vm->vm_range_tree = RB_ROOT;
idle_thread->vm->vm_range_numa_policy_tree = RB_ROOT;
idle_thread->proc->pid = 0;