x86: disable zero mapping and add a boot pt for ap trampoline
the application processor trampoline needs the trampoline physical address to be mapped for the few instructions between loading the page table and jumping to normal memory area; setup a new pt for them. Also make it use its stack where it needs to be directly. With that, x86 can finally remove the 0 page from its init mapping Change-Id: Iab3f33a2ed22570eeb47b5ab6e068c9a17c25413
This commit is contained in:
committed by
Dominique Martinet
parent
c59d8db1b3
commit
4f66d1be0f
@ -824,11 +824,14 @@ void call_ap_func(void (*next_func)(void))
|
||||
next_func();
|
||||
}
|
||||
|
||||
struct page_table *get_init_page_table(void);
|
||||
void setup_x86_ap(void (*next_func)(void))
|
||||
{
|
||||
unsigned long rsp;
|
||||
cpu_disable_interrupt();
|
||||
|
||||
ihk_mc_load_page_table(get_init_page_table());
|
||||
|
||||
assign_processor_id();
|
||||
|
||||
init_smp_processor();
|
||||
@ -1246,7 +1249,7 @@ void ihk_mc_set_page_fault_handler(void (*h)(void *, uint64_t, void *))
|
||||
}
|
||||
|
||||
extern char trampoline_code_data[], trampoline_code_data_end[];
|
||||
struct page_table *get_init_page_table(void);
|
||||
struct page_table *get_boot_page_table(void);
|
||||
unsigned long get_transit_page_table(void);
|
||||
|
||||
/* reusable, but not reentrant */
|
||||
@ -1270,9 +1273,10 @@ void ihk_mc_boot_cpu(int cpuid, unsigned long pc)
|
||||
memcpy(p, trampoline_code_data,
|
||||
trampoline_code_data_end - trampoline_code_data);
|
||||
|
||||
p[1] = (unsigned long)virt_to_phys(get_init_page_table());
|
||||
p[1] = (unsigned long)virt_to_phys(get_boot_page_table());
|
||||
p[2] = (unsigned long)setup_x86_ap;
|
||||
p[3] = pc;
|
||||
p[4] = (unsigned long)get_x86_cpu_local_kstack(cpuid);
|
||||
p[6] = (unsigned long)get_transit_page_table();
|
||||
if (!p[6]) {
|
||||
p[6] = p[1];
|
||||
|
||||
@ -50,6 +50,7 @@ struct x86_cpu_local_variables {
|
||||
|
||||
struct x86_cpu_local_variables *get_x86_cpu_local_variable(int id);
|
||||
struct x86_cpu_local_variables *get_x86_this_cpu_local(void);
|
||||
void *get_x86_cpu_local_kstack(int id);
|
||||
void *get_x86_this_cpu_kstack(void);
|
||||
|
||||
|
||||
|
||||
@ -49,7 +49,7 @@ struct x86_cpu_local_variables *get_x86_cpu_local_variable(int id)
|
||||
((char *)locals + (LOCALS_SPAN * id));
|
||||
}
|
||||
|
||||
static void *get_x86_cpu_local_kstack(int id)
|
||||
void *get_x86_cpu_local_kstack(int id)
|
||||
{
|
||||
return ((char *)locals + (LOCALS_SPAN * (id + 1)));
|
||||
}
|
||||
|
||||
@ -108,6 +108,7 @@ struct page_table {
|
||||
};
|
||||
|
||||
static struct page_table *init_pt;
|
||||
static struct page_table *boot_pt;
|
||||
static int init_pt_loaded = 0;
|
||||
static ihk_spinlock_t init_pt_lock;
|
||||
|
||||
@ -2519,6 +2520,11 @@ struct page_table *get_init_page_table(void)
|
||||
return init_pt;
|
||||
}
|
||||
|
||||
struct page_table *get_boot_page_table(void)
|
||||
{
|
||||
return boot_pt;
|
||||
}
|
||||
|
||||
static unsigned long fixed_virt;
|
||||
static void init_fixed_area(struct page_table *pt)
|
||||
{
|
||||
@ -2695,10 +2701,18 @@ void init_page_table(void)
|
||||
init_normal_area(init_pt);
|
||||
init_linux_kernel_mapping(init_pt);
|
||||
init_fixed_area(init_pt);
|
||||
init_low_area(init_pt);
|
||||
init_text_area(init_pt);
|
||||
init_vsyscall_area(init_pt);
|
||||
|
||||
/* boot page table: needs zero mapping in order to execute the next
|
||||
* instruction that jumps into regular regions
|
||||
*/
|
||||
boot_pt = ihk_mc_alloc_pages(1, IHK_MC_AP_CRITICAL);
|
||||
memcpy(boot_pt, init_pt, sizeof(*boot_pt));
|
||||
init_low_area(boot_pt);
|
||||
if (memcmp(init_pt, boot_pt, sizeof(*init_pt)) == 0)
|
||||
panic("init low area for boot pt did not affect toplevel entry");
|
||||
|
||||
load_page_table(init_pt);
|
||||
init_pt_loaded = 1;
|
||||
kprintf("Page table is now at 0x%lx\n", init_pt);
|
||||
|
||||
Reference in New Issue
Block a user