mempolicy: Support MPOL_INTERLEAVE
Change-Id: I6357892d792b2de8ea859a0a6799250f05066713 Refs: #959
This commit is contained in:
committed by
Masamichi Takagi
parent
7f0594d784
commit
aef50d710c
135
kernel/mem.c
135
kernel/mem.c
@ -523,6 +523,18 @@ static void reserve_pages(struct ihk_page_allocator_desc *pa_allocator,
|
||||
ihk_pagealloc_reserve(pa_allocator, start, end);
|
||||
}
|
||||
|
||||
static int interleave_nodes(int off, unsigned long *numa_mask)
|
||||
{
|
||||
int next;
|
||||
|
||||
next = find_next_bit(numa_mask, PROCESS_NUMA_MASK_BITS, off + 1);
|
||||
if (next >= PROCESS_NUMA_MASK_BITS) {
|
||||
next = find_first_bit(numa_mask, PROCESS_NUMA_MASK_BITS);
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
extern int cpu_local_var_initialized;
|
||||
static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
ihk_mc_ap_flag flag, int pref_node, int is_user, uintptr_t virt_addr)
|
||||
@ -538,7 +550,9 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
int numa_mem_policy = -1;
|
||||
struct process_vm *vm;
|
||||
struct vm_range *range = NULL;
|
||||
int chk_shm = 0;
|
||||
int chk_shm = 0, il_start, looping;
|
||||
int *il_prev = NULL;
|
||||
unsigned long *numa_mask = NULL;
|
||||
|
||||
if(npages <= 0)
|
||||
return NULL;
|
||||
@ -549,31 +563,39 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
!cpu_local_var(current)->vm)
|
||||
goto distance_based;
|
||||
|
||||
/* No explicitly requested NUMA or user policy? */
|
||||
if ((pref_node == -1) && (!(flag & IHK_MC_AP_USER) ||
|
||||
cpu_local_var(current)->vm->numa_mem_policy == MPOL_DEFAULT)) {
|
||||
vm = cpu_local_var(current)->vm;
|
||||
node = ihk_mc_get_numa_id();
|
||||
|
||||
if (virt_addr != -1) {
|
||||
vm = cpu_local_var(current)->vm;
|
||||
range_policy_iter = vm_range_policy_search(vm, virt_addr);
|
||||
if (range_policy_iter) {
|
||||
range = lookup_process_memory_range(vm, (uintptr_t)virt_addr, ((uintptr_t)virt_addr) + 1);
|
||||
if (range) {
|
||||
if( (range->memobj) && (range->memobj->flags == MF_SHM)) {
|
||||
chk_shm = 1;
|
||||
}
|
||||
}
|
||||
/* Get mempolicy user requested */
|
||||
if (virt_addr != -1) {
|
||||
range_policy_iter = vm_range_policy_search(vm, virt_addr);
|
||||
|
||||
if (range_policy_iter) {
|
||||
range = lookup_process_memory_range(vm,
|
||||
(uintptr_t)virt_addr,
|
||||
((uintptr_t)virt_addr) + 1);
|
||||
if ((range && (range->memobj->flags == MF_SHM))) {
|
||||
chk_shm = 1;
|
||||
}
|
||||
|
||||
/* Use range policy */
|
||||
numa_mem_policy = range_policy_iter->numa_mem_policy;
|
||||
numa_mask = range_policy_iter->numa_mask;
|
||||
il_prev = &range_policy_iter->il_prev;
|
||||
} else {
|
||||
/* Use process policy */
|
||||
numa_mem_policy = vm->numa_mem_policy;
|
||||
numa_mask = vm->numa_mask;
|
||||
il_prev = &vm->il_prev;
|
||||
}
|
||||
|
||||
|
||||
if ((!((range_policy_iter) && (range_policy_iter->numa_mem_policy != MPOL_DEFAULT))) && (chk_shm == 0))
|
||||
goto distance_based;
|
||||
}
|
||||
|
||||
node = ihk_mc_get_numa_id();
|
||||
if (!memory_nodes[node].nodes_by_distance)
|
||||
goto order_based;
|
||||
/* No explicitly requested NUMA or user policy? */
|
||||
if ((pref_node == -1) && !(flag & IHK_MC_AP_USER)) {
|
||||
if ((numa_mem_policy == MPOL_DEFAULT) && (chk_shm == 0)) {
|
||||
goto distance_based;
|
||||
}
|
||||
}
|
||||
|
||||
/* Explicit valid node? */
|
||||
if (pref_node > -1 && pref_node < ihk_mc_get_nr_numa_nodes()) {
|
||||
@ -615,27 +637,6 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
}
|
||||
}
|
||||
|
||||
if ((virt_addr != -1) && (chk_shm == 0)) {
|
||||
|
||||
vm = cpu_local_var(current)->vm;
|
||||
|
||||
if (!(range_policy_iter)) {
|
||||
range_policy_iter = vm_range_policy_search(vm, virt_addr);
|
||||
}
|
||||
|
||||
if (range_policy_iter) {
|
||||
range = lookup_process_memory_range(vm, (uintptr_t)virt_addr, ((uintptr_t)virt_addr) + 1);
|
||||
if ((range && (range->memobj->flags == MF_SHM))) {
|
||||
chk_shm = 1;
|
||||
} else {
|
||||
numa_mem_policy = range_policy_iter->numa_mem_policy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (numa_mem_policy == -1)
|
||||
numa_mem_policy = cpu_local_var(current)->vm->numa_mem_policy;
|
||||
|
||||
switch (numa_mem_policy) {
|
||||
case MPOL_BIND:
|
||||
case MPOL_PREFERRED:
|
||||
@ -687,7 +688,55 @@ static void *mckernel_allocate_aligned_pages_node(int npages, int p2align,
|
||||
break;
|
||||
|
||||
case MPOL_INTERLEAVE:
|
||||
/* TODO: */
|
||||
/* Initialize interleave */
|
||||
il_start = *il_prev;
|
||||
looping = 0;
|
||||
|
||||
retry_interleave:
|
||||
/* Find next node */
|
||||
numa_id = interleave_nodes(*il_prev, numa_mask);
|
||||
*il_prev = numa_id;
|
||||
|
||||
if (il_start == *il_prev && looping) {
|
||||
/* All interleave nodes are full */
|
||||
pa = 0;
|
||||
break;
|
||||
}
|
||||
looping = 1;
|
||||
#ifdef IHK_RBTREE_ALLOCATOR
|
||||
{
|
||||
if (rusage_check_oom(numa_id, npages, is_user)
|
||||
== -ENOMEM) {
|
||||
goto retry_interleave;
|
||||
} else {
|
||||
pa = ihk_numa_alloc_pages(
|
||||
&memory_nodes[numa_id],
|
||||
npages, p2align);
|
||||
}
|
||||
#else
|
||||
list_for_each_entry(pa_allocator,
|
||||
&memory_nodes[numa_id].allocators,
|
||||
list) {
|
||||
if (rusage_check_oom(numa_id, npages, is_user)
|
||||
== -ENOMEM) {
|
||||
goto retry_interleave;
|
||||
} else {
|
||||
pa = ihk_pagealloc_alloc(pa_allocator,
|
||||
npages, p2align);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (pa) {
|
||||
rusage_page_add(numa_id, npages,
|
||||
is_user);
|
||||
dkprintf("%s: policy: CPU @ node %d allocated "
|
||||
"%d pages from node %d\n",
|
||||
__func__,
|
||||
ihk_mc_get_numa_id(),
|
||||
npages, node);
|
||||
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
Reference in New Issue
Block a user