move rusage into kernel ELF image (avoid dynamic alloc before NUMA init)

Change-Id: I7fe86244c8707694b379e567b31de65ee2c56887
This commit is contained in:
Balazs Gerofi
2018-12-24 01:32:55 +09:00
committed by Dominique Martinet
parent 4d215de641
commit 60dcd0e798
8 changed files with 56 additions and 58 deletions

View File

@ -9,7 +9,7 @@
#define IHK_OS_PGSIZE_2MB 1
#define IHK_OS_PGSIZE_1GB 2
extern struct rusage_global *rusage;
extern struct rusage_global rusage;
static inline int rusage_pgsize_to_pgtype(size_t pgsize)
{

View File

@ -7,7 +7,7 @@
#define IHK_OS_PGSIZE_2MB 1
#define IHK_OS_PGSIZE_1GB 2
extern struct rusage_global *rusage;
extern struct rusage_global rusage;
static inline int rusage_pgsize_to_pgtype(size_t pgsize)
{

View File

@ -40,7 +40,7 @@ void cpu_local_var_init(void)
for (i = 0; i < num_processors; i++) {
clv[i].monitor = monitor->cpu + i;
clv[i].rusage = rusage->cpu + i;
clv[i].rusage = rusage.cpu + i;
INIT_LIST_HEAD(&clv[i].smp_func_req_list);
}

View File

@ -22,11 +22,11 @@ static inline void
rusage_total_memory_add(unsigned long size)
{
#ifdef RUSAGE_DEBUG
kprintf("%s: total_memory=%ld,size=%ld\n", __FUNCTION__, rusage->total_memory, size);
kprintf("%s: total_memory=%ld,size=%ld\n", __FUNCTION__, rusage.total_memory, size);
#endif
rusage->total_memory += size;
rusage.total_memory += size;
#ifdef RUSAGE_DEBUG
kprintf("%s: total_memory=%ld\n", __FUNCTION__, rusage->total_memory);
kprintf("%s: total_memory=%ld\n", __FUNCTION__, rusage.total_memory);
#endif
}
@ -38,10 +38,10 @@ rusage_rss_add(unsigned long size)
unsigned long retval;
struct process_vm *vm;
newval = __sync_add_and_fetch(&rusage->rss_current, size);
oldval = rusage->memory_max_usage;
newval = __sync_add_and_fetch(&rusage.rss_current, size);
oldval = rusage.memory_max_usage;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(&rusage->memory_max_usage,
retval = __sync_val_compare_and_swap(&rusage.memory_max_usage,
oldval, newval);
if (retval == oldval) {
break;
@ -66,7 +66,7 @@ rusage_rss_sub(unsigned long size)
{
struct process_vm *vm = cpu_local_var(current)->vm;
__sync_sub_and_fetch(&rusage->rss_current, size);
__sync_sub_and_fetch(&rusage.rss_current, size);
/* process rss */
vm->currss -= size;
@ -74,22 +74,22 @@ rusage_rss_sub(unsigned long size)
static inline void memory_stat_rss_add(unsigned long size, int pgsize)
{
ihk_atomic_add_long(size, &rusage->memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
ihk_atomic_add_long(size, &rusage.memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void memory_stat_rss_sub(unsigned long size, int pgsize)
{
ihk_atomic_add_long(-size, &rusage->memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
ihk_atomic_add_long(-size, &rusage.memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void rusage_memory_stat_mapped_file_add(unsigned long size, int pgsize)
{
ihk_atomic_add_long(size, &rusage->memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
ihk_atomic_add_long(size, &rusage.memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void rusage_memory_stat_mapped_file_sub(unsigned long size, int pgsize)
{
ihk_atomic_add_long(-size, &rusage->memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
ihk_atomic_add_long(-size, &rusage.memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline int rusage_memory_stat_add(struct vm_range *range, uintptr_t phys, unsigned long size, int pgsize)
@ -213,11 +213,11 @@ rusage_kmem_add(unsigned long size)
unsigned long oldval;
unsigned long retval;
newval = __sync_add_and_fetch(&rusage->memory_kmem_usage, size);
oldval = rusage->memory_kmem_max_usage;
newval = __sync_add_and_fetch(&rusage.memory_kmem_usage, size);
oldval = rusage.memory_kmem_max_usage;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(
&rusage->memory_kmem_max_usage,
&rusage.memory_kmem_max_usage,
oldval, newval);
if (retval == oldval) {
break;
@ -229,13 +229,13 @@ rusage_kmem_add(unsigned long size)
static inline void
rusage_kmem_sub(unsigned long size)
{
__sync_sub_and_fetch(&rusage->memory_kmem_usage, size);
__sync_sub_and_fetch(&rusage.memory_kmem_usage, size);
}
static inline void
rusage_numa_add(int numa_id, unsigned long size)
{
__sync_add_and_fetch(rusage->memory_numa_stat + numa_id, size);
__sync_add_and_fetch(rusage.memory_numa_stat + numa_id, size);
rusage_rss_add(size);
}
@ -243,7 +243,7 @@ static inline void
rusage_numa_sub(int numa_id, unsigned long size)
{
rusage_rss_sub(size);
__sync_sub_and_fetch(rusage->memory_numa_stat + numa_id, size);
__sync_sub_and_fetch(rusage.memory_numa_stat + numa_id, size);
}
static inline int
@ -251,8 +251,8 @@ rusage_check_oom(int numa_id, unsigned long pages, int is_user)
{
unsigned long size = pages * PAGE_SIZE;
if (rusage->total_memory_usage + size > rusage->total_memory - RUSAGE_OOM_MARGIN) {
kprintf("%s: memory used:%ld available:%ld\n", __FUNCTION__, rusage->total_memory_usage, rusage->total_memory);
if (rusage.total_memory_usage + size > rusage.total_memory - RUSAGE_OOM_MARGIN) {
kprintf("%s: memory used:%ld available:%ld\n", __FUNCTION__, rusage.total_memory_usage, rusage.total_memory);
eventfd(IHK_OS_EVENTFD_TYPE_OOM);
if (is_user) {
return -ENOMEM;
@ -271,7 +271,7 @@ rusage_page_add(int numa_id, unsigned long pages, int is_user)
unsigned long retval;
#ifdef RUSAGE_DEBUG
if (numa_id < 0 || numa_id >= rusage->num_numa_nodes) {
if (numa_id < 0 || numa_id >= rusage.num_numa_nodes) {
kprintf("%s: Error: invalid numa_id=%d\n", __FUNCTION__, numa_id);
return;
}
@ -281,16 +281,16 @@ rusage_page_add(int numa_id, unsigned long pages, int is_user)
else
rusage_kmem_add(size);
newval = __sync_add_and_fetch(&rusage->total_memory_usage, size);
oldval = rusage->total_memory_max_usage;
newval = __sync_add_and_fetch(&rusage.total_memory_usage, size);
oldval = rusage.total_memory_max_usage;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(&rusage->total_memory_max_usage,
retval = __sync_val_compare_and_swap(&rusage.total_memory_max_usage,
oldval, newval);
if (retval == oldval) {
#ifdef RUSAGE_DEBUG
if (rusage->total_memory_max_usage > rusage->total_memory_max_usage_old + (1 * (1ULL << 30))) {
kprintf("%s: max(%ld) > old + 1GB,numa_id=%d\n", __FUNCTION__, rusage->total_memory_max_usage, numa_id);
rusage->total_memory_max_usage_old = rusage->total_memory_max_usage;
if (rusage.total_memory_max_usage > rusage.total_memory_max_usage_old + (1 * (1ULL << 30))) {
kprintf("%s: max(%ld) > old + 1GB,numa_id=%d\n", __FUNCTION__, rusage.total_memory_max_usage, numa_id);
rusage.total_memory_max_usage_old = rusage.total_memory_max_usage;
}
#endif
break;
@ -304,15 +304,15 @@ rusage_page_sub(int numa_id, unsigned long pages, int is_user)
{
unsigned long size = pages * PAGE_SIZE;
#ifdef RUSAGE_DEBUG
if (numa_id < 0 || numa_id >= rusage->num_numa_nodes) {
if (numa_id < 0 || numa_id >= rusage.num_numa_nodes) {
kprintf("%s: Error: invalid numa_id=%d\n", __FUNCTION__, numa_id);
return;
}
if (rusage->total_memory_usage < size) {
kprintf("%s: Error, total_memory_usage=%ld,size=%ld\n", __FUNCTION__, rusage->total_memory_max_usage, size);
if (rusage.total_memory_usage < size) {
kprintf("%s: Error, total_memory_usage=%ld,size=%ld\n", __FUNCTION__, rusage.total_memory_max_usage, size);
}
#endif
__sync_sub_and_fetch(&rusage->total_memory_usage, size);
__sync_sub_and_fetch(&rusage.total_memory_usage, size);
if (is_user)
rusage_numa_sub(numa_id, size);
@ -327,10 +327,10 @@ rusage_num_threads_inc()
unsigned long oldval;
unsigned long retval;
newval = __sync_add_and_fetch(&rusage->num_threads, 1);
oldval = rusage->max_num_threads;
newval = __sync_add_and_fetch(&rusage.num_threads, 1);
oldval = rusage.max_num_threads;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(&rusage->
retval = __sync_val_compare_and_swap(&rusage.
max_num_threads,
oldval, newval);
if (retval == oldval) {
@ -343,7 +343,7 @@ rusage_num_threads_inc()
static inline void
rusage_num_threads_dec()
{
__sync_sub_and_fetch(&rusage->num_threads, 1);
__sync_sub_and_fetch(&rusage.num_threads, 1);
}
#else
static inline void
@ -428,6 +428,6 @@ rusage_num_threads_dec()
}
#endif // ENABLE_RUSAGE
extern struct rusage_global *rusage;
extern struct rusage_global rusage;
#endif /* !defined(RUSAGE_PRIVATE_H_INCLUDED) */

View File

@ -33,6 +33,7 @@
#include <sysfs.h>
#include <ihk/monitor.h>
#include <debug.h>
#include <rusage.h>
//#define IOCTL_FUNC_EXTENSION
#ifdef IOCTL_FUNC_EXTENSION
@ -52,7 +53,7 @@ extern unsigned long ihk_mc_get_ns_per_tsc(void);
extern long syscall(int, ihk_mc_user_context_t *);
struct ihk_os_monitor *monitor;
struct rusage_global *rusage;
struct rusage_global rusage;
static void handler_init(void)
{

View File

@ -1734,7 +1734,6 @@ void ihk_mc_clean_micpa(void){
static void rusage_init()
{
int npages;
unsigned long phys;
const struct ihk_mc_cpu_info *cpu_info = ihk_mc_get_cpu_info();
@ -1742,15 +1741,13 @@ static void rusage_init()
panic("rusage_init: PANIC: ihk_mc_get_cpu_info returned NULL");
}
npages = (sizeof(struct rusage_global) + PAGE_SIZE -1) >> PAGE_SHIFT;
rusage = ihk_mc_alloc_pages(npages, IHK_MC_AP_CRITICAL);
memset(rusage, 0, npages * PAGE_SIZE);
rusage->num_processors = cpu_info->ncpus;
rusage->num_numa_nodes = ihk_mc_get_nr_numa_nodes();
rusage->ns_per_tsc = ihk_mc_get_ns_per_tsc();
phys = virt_to_phys(rusage);
memset(&rusage, 0, sizeof(rusage));
rusage.num_processors = cpu_info->ncpus;
rusage.num_numa_nodes = ihk_mc_get_nr_numa_nodes();
rusage.ns_per_tsc = ihk_mc_get_ns_per_tsc();
phys = virt_to_phys(&rusage);
ihk_set_rusage(phys, sizeof(struct rusage_global));
dkprintf("%s: rusage->total_memory=%ld\n", __FUNCTION__, rusage->total_memory);
dkprintf("%s: rusage.total_memory=%ld\n", __FUNCTION__, rusage.total_memory);
}
extern void monitor_init(void);
@ -1758,7 +1755,7 @@ void mem_init(void)
{
monitor_init();
/* It must precedes numa_init() because rusage->total_memory is initialized in numa_init() */
/* It must precedes numa_init() because rusage.total_memory is initialized in numa_init() */
rusage_init();
/* Initialize NUMA information and memory allocator bitmaps */

View File

@ -3380,20 +3380,20 @@ void schedule(void)
release_thread(last);
rusage_num_threads_dec();
#ifdef RUSAGE_DEBUG
if (rusage->num_threads == 0) {
if (rusage.num_threads == 0) {
int i;
kprintf("total_memory_usage=%ld\n",
rusage->total_memory_usage);
rusage.total_memory_usage);
for (i = 0; i < IHK_MAX_NUM_PGSIZES; i++) {
kprintf("memory_stat_rss[%d]=%ld\n", i,
rusage->memory_stat_rss[i]);
rusage.memory_stat_rss[i]);
}
for (i = 0; i < IHK_MAX_NUM_PGSIZES; i++) {
kprintf(
"memory_stat_mapped_file[%d]=%ld\n",
i,
rusage->memory_stat_mapped_file[i]);
rusage.memory_stat_mapped_file[i]);
}
}
#endif
@ -3604,14 +3604,14 @@ void runq_add_thread(struct thread *thread, int cpu_id)
dkprintf("%s: clone_count is %d\n", __FUNCTION__, thread->proc->clone_count);
rusage_num_threads_inc();
#ifdef RUSAGE_DEBUG
if (rusage->num_threads == 1) {
if (rusage.num_threads == 1) {
int i;
kprintf("total_memory_usage=%ld\n", rusage->total_memory_usage);
kprintf("total_memory_usage=%ld\n", rusage.total_memory_usage);
for(i = 0; i < IHK_MAX_NUM_PGSIZES; i++) {
kprintf("memory_stat_rss[%d]=%ld\n", i, rusage->memory_stat_rss[i]);
kprintf("memory_stat_rss[%d]=%ld\n", i, rusage.memory_stat_rss[i]);
}
for(i = 0; i < IHK_MAX_NUM_PGSIZES; i++) {
kprintf("memory_stat_mapped_file[%d]=%ld\n", i, rusage->memory_stat_mapped_file[i]);
kprintf("memory_stat_mapped_file[%d]=%ld\n", i, rusage.memory_stat_mapped_file[i]);
}
}
#endif

View File

@ -2733,7 +2733,7 @@ unsigned long do_fork(int clone_flags, unsigned long newsp,
return -EINVAL;
}
if (!allow_oversubscribe && rusage->num_threads >= cpu_info->ncpus) {
if (!allow_oversubscribe && rusage.num_threads >= cpu_info->ncpus) {
kprintf("%s: ERROR: CPU oversubscription is not allowed. Specify -O option in mcreboot.sh to allow it.\n", __FUNCTION__);
return -EINVAL;
}