init_process_stack: change premapped stack size based on arch

Avoid consuming a large 512MB page on 64K base page arch

Change-Id: Ice491d43fd998b375ddc24f4eff7faf5d36d9f42
Fujitsu: POSTK_DEBUG_ARCH_DEP_104
This commit is contained in:
Dominique Martinet
2019-01-28 17:22:37 +09:00
parent 960a6f5f90
commit 2b254f02f8
3 changed files with 30 additions and 17 deletions

View File

@ -330,6 +330,18 @@ static const unsigned int PTL1_CONT_COUNT = __PTL1_CONT_COUNT;
#define PTE_FILEOFF PTE_SPECIAL
#ifdef CONFIG_ARM64_64K_PAGES
# define USER_STACK_PREPAGE_SIZE PAGE_SIZE
# define USER_STACK_PAGE_MASK PAGE_MASK
# define USER_STACK_PAGE_P2ALIGN PAGE_P2ALIGN
# define USER_STACK_PAGE_SHIFT PAGE_SHIFT
#else
# define USER_STACK_PREPAGE_SIZE LARGE_PAGE_SIZE
# define USER_STACK_PAGE_MASK LARGE_PAGE_MASK
# define USER_STACK_PAGE_P2ALIGN LARGE_PAGE_P2ALIGN
# define USER_STACK_PAGE_SHIFT LARGE_PAGE_SHIFT
#endif
#define PT_ENTRIES (PAGE_SIZE >> 3)
#ifndef __ASSEMBLY__

View File

@ -160,6 +160,10 @@ typedef unsigned long pte_t;
#define PM_PRESENT PM_STATUS(4LL)
#define PM_SWAP PM_STATUS(2LL)
#define USER_STACK_PREPAGE_SIZE LARGE_PAGE_SIZE
#define USER_STACK_PAGE_MASK LARGE_PAGE_MASK
#define USER_STACK_PAGE_P2ALIGN LARGE_PAGE_P2ALIGN
#define USER_STACK_PAGE_SHIFT LARGE_PAGE_SHIFT
/* For easy conversion, it is better to be the same as architecture's ones */
enum ihk_mc_pt_attribute {

View File

@ -2236,13 +2236,9 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
int stack_align_padding = 0;
/* Create stack range */
end = STACK_TOP(&thread->vm->region) & LARGE_PAGE_MASK;
#ifdef POSTK_DEBUG_ARCH_DEP_80 /* user stack prepage size fix */
minsz = LARGE_PAGE_SIZE;
#else /* POSTK_DEBUG_ARCH_DEP_80 */
minsz = (pn->stack_premap
+ LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK;
#endif /* POSTK_DEBUG_ARCH_DEP_80 */
end = STACK_TOP(&thread->vm->region) & USER_STACK_PAGE_MASK;
minsz = (pn->stack_premap + USER_STACK_PREPAGE_SIZE - 1) &
USER_STACK_PAGE_MASK;
maxsz = (end - thread->vm->region.map_start) / 2;
size = proc->rlimit[MCK_RLIMIT_STACK].rlim_cur;
if (size > maxsz) {
@ -2251,13 +2247,13 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
else if (size < minsz) {
size = minsz;
}
size = (size + LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK;
size = (size + USER_STACK_PREPAGE_SIZE - 1) & USER_STACK_PAGE_MASK;
dkprintf("%s: stack_premap: %lu, rlim_cur: %lu, minsz: %lu, size: %lu\n",
__FUNCTION__,
pn->stack_premap,
proc->rlimit[MCK_RLIMIT_STACK].rlim_cur,
minsz, size);
start = (end - size) & LARGE_PAGE_MASK;
start = (end - size) & USER_STACK_PAGE_MASK;
/* Apply user allocation policy to stacks */
/* TODO: make threshold kernel or mcexec argument */
@ -2268,7 +2264,9 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
ap_flag ? "(IHK_MC_AP_USER)" : "");
stack = ihk_mc_alloc_aligned_pages_user(minsz >> PAGE_SHIFT,
LARGE_PAGE_P2ALIGN, IHK_MC_AP_NOWAIT | ap_flag, start);
USER_STACK_PAGE_P2ALIGN,
IHK_MC_AP_NOWAIT | ap_flag,
start);
if (!stack) {
kprintf("%s: error: couldn't allocate initial stack\n",
@ -2284,7 +2282,7 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
vrflag |= VR_MAXPROT_READ | VR_MAXPROT_WRITE | VR_MAXPROT_EXEC;
#define NOPHYS ((uintptr_t)-1)
if ((rc = add_process_memory_range(thread->vm, start, end, NOPHYS,
vrflag, NULL, 0, LARGE_PAGE_SHIFT, &range)) != 0) {
vrflag, NULL, 0, USER_STACK_PAGE_SHIFT, &range)) != 0) {
ihk_mc_free_pages_user(stack, minsz >> PAGE_SHIFT);
kprintf("%s: error addding process memory range: %d\n", rc);
return rc;
@ -2294,10 +2292,9 @@ int init_process_stack(struct thread *thread, struct program_load_desc *pn,
error = ihk_mc_pt_set_range(thread->vm->address_space->page_table,
thread->vm, (void *)(end - minsz),
(void *)end, virt_to_phys(stack),
arch_vrflag_to_ptattr(vrflag, PF_POPULATE, NULL),
LARGE_PAGE_SHIFT, range, 0
);
arch_vrflag_to_ptattr(vrflag, PF_POPULATE,
NULL),
USER_STACK_PAGE_SHIFT, range, 0);
if (error) {
kprintf("init_process_stack:"
"set range %lx-%lx %lx failed. %d\n",