Separate mmap area from program loading (relocation) area
We need to separate the two because the heap of a PIE is created in the area to which it is mapped. Related commits:b1309a5d: PIE is mapped at map_end instead of at user_startc4219655: Interpreter is mapped to map_start to make a system call that dereferences a NULL pointer fail [dominique.martinet@cea.fr: Also add ULONG_MAX and friend macroes, used for data_min] [ken.sato.ty@hitachi-solutions.com: fix execve] Change-Id: I8ecaf22b7965090ab67bebece57c68283ba23664
This commit is contained in:
committed by
Dominique Martinet
parent
97e0219f50
commit
4d215de641
@ -57,6 +57,7 @@ extern char _end[];
|
||||
|
||||
#if (VA_BITS == 39 && GRANULE_SIZE == _SZ4KB)
|
||||
#
|
||||
# define LD_TASK_UNMAPPED_BASE UL(0x0000000400000000)
|
||||
# define TASK_UNMAPPED_BASE UL(0x0000000800000000)
|
||||
# define USER_END UL(0x0000002000000000)
|
||||
# define MAP_VMAP_START UL(0xffffffbdc0000000)
|
||||
@ -67,6 +68,7 @@ extern char _end[];
|
||||
#
|
||||
#elif (VA_BITS == 42 && GRANULE_SIZE == _SZ64KB)
|
||||
#
|
||||
# define LD_TASK_UNMAPPED_BASE UL(0x0000002000000000)
|
||||
# define TASK_UNMAPPED_BASE UL(0x0000004000000000)
|
||||
# define USER_END UL(0x0000010000000000)
|
||||
# define MAP_VMAP_START UL(0xfffffdfee0000000)
|
||||
@ -77,6 +79,7 @@ extern char _end[];
|
||||
#
|
||||
#elif (VA_BITS == 48 && GRANULE_SIZE == _SZ4KB)
|
||||
#
|
||||
# define LD_TASK_UNMAPPED_BASE UL(0x0000080000000000)
|
||||
# define TASK_UNMAPPED_BASE UL(0x0000100000000000)
|
||||
# define USER_END UL(0x0000400000000000)
|
||||
# define MAP_VMAP_START UL(0xffff7bffc0000000)
|
||||
@ -87,6 +90,7 @@ extern char _end[];
|
||||
#
|
||||
#elif (VA_BITS == 48 && GRANULE_SIZE == _SZ64KB)
|
||||
#
|
||||
# define LD_TASK_UNMAPPED_BASE UL(0x0000080000000000)
|
||||
# define TASK_UNMAPPED_BASE UL(0x0000100000000000)
|
||||
# define USER_END UL(0x0000400000000000)
|
||||
# define MAP_VMAP_START UL(0xffff780000000000)
|
||||
|
||||
@ -41,8 +41,9 @@
|
||||
#define LARGE_PAGE_MASK (~((unsigned long)LARGE_PAGE_SIZE - 1))
|
||||
#define LARGE_PAGE_P2ALIGN (LARGE_PAGE_SHIFT - PAGE_SHIFT)
|
||||
|
||||
#define USER_END 0x0000800000000000UL
|
||||
#define TASK_UNMAPPED_BASE 0x00002AAAAAA00000UL
|
||||
#define USER_END 0x0000800000000000UL
|
||||
#define LD_TASK_UNMAPPED_BASE 0x0000155555500000UL
|
||||
#define TASK_UNMAPPED_BASE 0x00002AAAAAA00000UL
|
||||
|
||||
/*
|
||||
* Canonical negative addresses (i.e., the smallest kernel virtual address)
|
||||
|
||||
@ -98,6 +98,7 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
|
||||
n = p->num_sections;
|
||||
|
||||
vm->region.data_start = ULONG_MAX;
|
||||
aout_base = (pn->reloc)? vm->region.map_end: 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
ap_flags = 0;
|
||||
@ -172,18 +173,13 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
|
||||
p->sections[i].remote_pa = up;
|
||||
|
||||
/* TODO: Maybe we need flag */
|
||||
if (pn->sections[i].interp) {
|
||||
vm->region.map_end = e;
|
||||
}
|
||||
else if (i == 0) {
|
||||
else if (pn->sections[i].prot & PROT_EXEC) {
|
||||
vm->region.text_start = s;
|
||||
vm->region.text_end = e;
|
||||
}
|
||||
else if (i == 1) {
|
||||
vm->region.data_start = s;
|
||||
vm->region.data_end = e;
|
||||
}
|
||||
else {
|
||||
vm->region.data_start =
|
||||
(s < vm->region.data_start ?
|
||||
@ -212,9 +208,16 @@ int prepare_process_ranges_args_envs(struct thread *thread,
|
||||
pn->at_entry += aout_base;
|
||||
}
|
||||
|
||||
vm->region.map_start = vm->region.map_end = TASK_UNMAPPED_BASE;
|
||||
|
||||
vm->region.brk_start = vm->region.brk_end =
|
||||
(vm->region.data_end + LARGE_PAGE_SIZE - 1) & LARGE_PAGE_MASK;
|
||||
|
||||
if (vm->region.brk_start >= vm->region.map_start) {
|
||||
kprintf("%s: ERROR: data section is too large (end addr: %lx)\n",
|
||||
__func__, vm->region.data_end);
|
||||
}
|
||||
|
||||
#if 0
|
||||
{
|
||||
void *heap;
|
||||
@ -528,17 +531,12 @@ static int process_msg_prepare_process(unsigned long rphys)
|
||||
vm->region.user_end = pn->user_end;
|
||||
if(vm->region.user_end > USER_END)
|
||||
vm->region.user_end = USER_END;
|
||||
if(vm->region.user_start != 0UL ||
|
||||
vm->region.user_end < TASK_UNMAPPED_BASE){
|
||||
vm->region.map_start =
|
||||
(vm->region.user_start +
|
||||
(vm->region.user_end - vm->region.user_start) / 3) &
|
||||
LARGE_PAGE_MASK;
|
||||
}
|
||||
else{
|
||||
vm->region.map_start = TASK_UNMAPPED_BASE;
|
||||
}
|
||||
vm->region.map_end = vm->region.map_start;
|
||||
|
||||
/* map_start / map_end is used to track memory area
|
||||
* to which the program is loaded
|
||||
*/
|
||||
vm->region.map_start = vm->region.map_end = LD_TASK_UNMAPPED_BASE;
|
||||
|
||||
memcpy(proc->rlimit, pn->rlimit, sizeof(struct rlimit) * MCK_RLIM_MAX);
|
||||
dkprintf("%s: rlim_cur: %ld, rlim_max: %ld, stack_premap: %ld\n",
|
||||
__FUNCTION__,
|
||||
|
||||
@ -2598,6 +2598,11 @@ SYSCALL_DECLARE(execve)
|
||||
((char *)thread) +
|
||||
KERNEL_STACK_NR_PAGES * PAGE_SIZE, desc->entry, 0);
|
||||
|
||||
/* map_start / map_end is used to track memory area
|
||||
* to which the program is loaded
|
||||
*/
|
||||
vm->region.map_start = vm->region.map_end = LD_TASK_UNMAPPED_BASE;
|
||||
|
||||
/* Create virtual memory ranges and update args/envs */
|
||||
if (prepare_process_ranges_args_envs(thread, desc, desc,
|
||||
PTATTR_NO_EXECUTE | PTATTR_WRITABLE | PTATTR_FOR_USER,
|
||||
|
||||
@ -15,6 +15,10 @@
|
||||
|
||||
#define INT_MAX 0x7fffffff
|
||||
#define INT_MIN -0x80000000
|
||||
#define UINT_MAX 0xffffffff
|
||||
#define LONG_MAX 0x7fffffffffffffffL
|
||||
#define LONG_MIN -0x8000000000000000L
|
||||
#define ULONG_MAX 0xffffffffffffffffL
|
||||
#define IOV_MAX 1024
|
||||
|
||||
#ifndef PATH_MAX
|
||||
|
||||
Reference in New Issue
Block a user