MM: generic lockless kmalloc and page cache

Change-Id: I71ad498fdd10136d9c72ffe2b16b9122d1bc9673
This commit is contained in:
Balazs Gerofi
2020-09-06 11:51:52 +09:00
committed by Masamichi Takagi
parent 41f5c0bdde
commit 10c09aa10e
3 changed files with 191 additions and 1 deletions

View File

@ -20,10 +20,17 @@
* CPU Local Storage (cls)
*/
struct kmalloc_cache_header {
struct kmalloc_cache_header *next;
};
struct kmalloc_header {
unsigned int front_magic;
int cpu_id;
struct list_head list;
union {
struct list_head list;
struct kmalloc_cache_header *cache;
};
int size; /* The size of this chunk without the header */
unsigned int end_magic;
/* 32 bytes */

View File

@ -36,4 +36,98 @@ int memcheckall(void);
int freecheck(int runcount);
void kmalloc_consolidate_free_list(void);
#ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
/*
* Generic lockless kmalloc cache.
*/
static inline void kmalloc_cache_free(void *elem)
{
struct kmalloc_cache_header *current = NULL;
struct kmalloc_cache_header *new =
(struct kmalloc_cache_header *)elem;
struct kmalloc_header *header;
register struct kmalloc_cache_header *cache;
if (unlikely(!elem))
return;
/* Get cache pointer from kmalloc header */
header = (struct kmalloc_header *)((void *)elem -
sizeof(struct kmalloc_header));
if (unlikely(!header->cache)) {
kprintf("%s: WARNING: no cache for 0x%lx\n",
__func__, elem);
return;
}
cache = header->cache;
retry:
current = cache->next;
new->next = current;
if (!__sync_bool_compare_and_swap(&cache->next, current, new)) {
goto retry;
}
}
static inline void kmalloc_cache_prealloc(struct kmalloc_cache_header *cache,
size_t size, int nr_elem)
{
struct kmalloc_cache_header *elem;
int i;
if (unlikely(cache->next))
return;
for (i = 0; i < nr_elem; ++i) {
struct kmalloc_header *header;
elem = (struct kmalloc_cache_header *)
kmalloc(size, IHK_MC_AP_NOWAIT);
if (!elem) {
kprintf("%s: ERROR: allocating cache element\n", __func__);
continue;
}
/* Store cache pointer in kmalloc_header */
header = (struct kmalloc_header *)((void *)elem -
sizeof(struct kmalloc_header));
header->cache = cache;
kmalloc_cache_free(elem);
}
}
static inline void *kmalloc_cache_alloc(struct kmalloc_cache_header *cache,
size_t size)
{
register struct kmalloc_cache_header *first, *next;
retry:
next = NULL;
first = cache->next;
if (first) {
next = first->next;
if (!__sync_bool_compare_and_swap(&cache->next,
first, next)) {
goto retry;
}
}
else {
kprintf("%s: calling pre-alloc for 0x%lx...\n", __func__, cache);
kmalloc_cache_prealloc(cache, size, 384);
goto retry;
}
return (void *)first;
}
#endif

View File

@ -20,6 +20,7 @@
#include <ihk/lock.h>
#include <ihk/atomic.h>
#include <arch/mm.h>
#include <ihk/debug.h>
struct memobj;
struct process_vm;
@ -269,4 +270,92 @@ void ihk_mc_query_mem_free_page(void *dump_page_set);
int ihk_mc_chk_page_address(pte_t mem_addr);
int ihk_mc_get_mem_user_page(void *arg0, page_table_t pt, pte_t *ptep, void *pgaddr, int pgshift);
#ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
extern int zero_at_free;
/*
* Generic lockless page cache.
* TODO: Store nr of pages in header and double-check at alloc time..
*/
struct ihk_mc_page_cache_header;
struct ihk_mc_page_cache_header {
struct ihk_mc_page_cache_header *next;
};
static inline void ihk_mc_page_cache_free(
struct ihk_mc_page_cache_header *cache, void *page)
{
struct ihk_mc_page_cache_header *current = NULL;
struct ihk_mc_page_cache_header *new =
(struct ihk_mc_page_cache_header *)page;
if (unlikely(!page))
return;
retry:
current = cache->next;
new->next = current;
if (!__sync_bool_compare_and_swap(&cache->next, current, new)) {
goto retry;
}
}
static inline void ihk_mc_page_cache_prealloc(
struct ihk_mc_page_cache_header *cache,
int nr_pages,
int nr_elem)
{
int i;
if (unlikely(cache->next))
return;
for (i = 0; i < nr_elem; ++i) {
void *pages;
pages = ihk_mc_alloc_pages(nr_pages, IHK_MC_AP_NOWAIT);
if (!pages) {
kprintf("%s: ERROR: allocating pages..\n", __func__);
continue;
}
ihk_mc_page_cache_free(cache, pages);
}
}
static inline void *ihk_mc_page_cache_alloc(
struct ihk_mc_page_cache_header *cache,
int nr_pages)
{
register struct ihk_mc_page_cache_header *first, *next;
retry:
next = NULL;
first = cache->next;
if (first) {
next = first->next;
if (!__sync_bool_compare_and_swap(&cache->next,
first, next)) {
goto retry;
}
}
else {
kprintf("%s: calling pre-alloc for 0x%lx...\n", __func__, cache);
ihk_mc_page_cache_prealloc(cache, nr_pages, 256);
goto retry;
}
return (void *)first;
}
#endif