Merge branch 'postk_master' into development

* Merge 53e436ae7db1ed457692dbe16ccb15511aa6bc64
* Only arm64 stuff are left

Change-Id: I6b79de1f659fa61e75f44811b639d41f9a37d6cc
This commit is contained in:
Masamichi Takagi
2019-01-09 12:01:04 +09:00
committed by Dominique Martinet
parent d4d78e9c61
commit 25ef4e9261
12 changed files with 219 additions and 59 deletions

View File

@ -24,6 +24,7 @@ STATIC_ASSERT(offsetof(struct pt_regs, sp) == S_SP);
STATIC_ASSERT(offsetof(struct pt_regs, pc) == S_PC);
STATIC_ASSERT(offsetof(struct pt_regs, pstate) == S_PSTATE);
STATIC_ASSERT(offsetof(struct pt_regs, orig_x0) == S_ORIG_X0);
STATIC_ASSERT(offsetof(struct pt_regs, orig_pc) == S_ORIG_PC);
STATIC_ASSERT(offsetof(struct pt_regs, syscallno) == S_SYSCALLNO);
STATIC_ASSERT(sizeof(struct pt_regs) == S_FRAME_SIZE);

View File

@ -5,6 +5,7 @@
#include <asm-offsets.h>
#include <esr.h>
#include <thread_info.h>
#include <asm-syscall.h>
/*
* Bad Abort numbers
@ -77,6 +78,7 @@
.macro kernel_exit, el, need_enable_step = 0
.if \el == 0
bl check_sig_pending
bl check_need_resched // or reschedule is needed.
mov x0, #0
mov x1, sp
@ -87,6 +89,9 @@
mov x2, #0
bl check_signal_irq_disabled // check whether the signal is delivered(for kernel_exit)
.endif
.if \el == 1
bl check_sig_pending
.endif
disable_irq x1 // disable interrupts
.if \need_enable_step == 1
ldr x1, [tsk, #TI_FLAGS]
@ -367,7 +372,12 @@ el0_sync:
b el0_inv
el0_svc:
uxtw scno, w8 // syscall number in w8
stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
cmp scno, #__NR_rt_sigreturn
b.eq 1f
str x0, [sp, #S_ORIG_X0] // save the original x0
ldr x16, [sp, #S_PC]
str x16, [sp, #S_ORIG_PC] // save the original pc
1: str scno, [sp, #S_SYSCALLNO] // save syscall number
enable_nmi
enable_dbg_and_irq x0
adrp x16, __arm64_syscall_handler

View File

@ -15,8 +15,9 @@
#define S_PC 0x100 /* offsetof(struct pt_regs, pc) */
#define S_PSTATE 0x108 /* offsetof(struct pt_regs, pstate) */
#define S_ORIG_X0 0x110 /* offsetof(struct pt_regs, orig_x0) */
#define S_SYSCALLNO 0x118 /* offsetof(struct pt_regs, syscallno) */
#define S_FRAME_SIZE 0x120 /* sizeof(struct pt_regs) */
#define S_ORIG_PC 0x118 /* offsetof(struct pt_regs, orig_pc) */
#define S_SYSCALLNO 0x120 /* offsetof(struct pt_regs, syscallno) */
#define S_FRAME_SIZE 0x130 /* sizeof(struct pt_regs) must be 16 byte align */
#define CPU_INFO_SETUP 0x10 /* offsetof(struct cpu_info, cpu_setup) */
#define CPU_INFO_SZ 0x18 /* sizeof(struct cpu_info) */

View File

@ -0,0 +1,19 @@
/* asm-syscall.h COPYRIGHT FUJITSU LIMITED 2018 */
#ifndef __HEADER_ARM64_ASM_SYSCALL_H
#define __HEADER_ARM64_ASM_SYSCALL_H
#ifdef __ASSEMBLY__
#define DECLARATOR(number, name) .equ __NR_##name, number
#define SYSCALL_HANDLED(number, name) DECLARATOR(number, name)
#define SYSCALL_DELEGATED(number, name) DECLARATOR(number, name)
#include <syscall_list.h>
#undef DECLARATOR
#undef SYSCALL_HANDLED
#undef SYSCALL_DELEGATED
#endif /* __ASSEMBLY__ */
#endif /* !__HEADER_ARM64_ASM_SYSCALL_H */

View File

@ -27,7 +27,9 @@ struct pt_regs {
};
};
unsigned long orig_x0;
unsigned long orig_pc;
unsigned long syscallno;
unsigned long __padding;
};
typedef struct pt_regs ihk_mc_user_context_t;

View File

@ -824,7 +824,7 @@ static void setup_l2(translation_table_t *tt,
eidx = PTL2_ENTRIES - 1;
} else {
//base_endが現在のテーブルの管理内ならインデックスを算出
virt_end = (unsigned long)phys_to_virt(base_end);
virt_end = (unsigned long)phys_to_virt(base_end - 1);
eidx = ptl2_index(virt_end);
}
@ -847,7 +847,6 @@ static void setup_l2(translation_table_t *tt,
}
}
static inline void setup_middle_level(translation_table_t *tt, unsigned long base_start, unsigned long base_end,
setup_normal_area_t setup, int shift, unsigned long pgsize, int entries, int level)
{
@ -870,7 +869,7 @@ static inline void setup_middle_level(translation_table_t *tt, unsigned long bas
eidx = entries - 1;
} else {
//base_endが現在のテーブルの管理内ならインデックスを算出
virt_end = (unsigned long)phys_to_virt(base_end);
virt_end = (unsigned long)phys_to_virt(base_end - 1);
eidx = ptl_index(virt_end, level);
}
@ -2357,6 +2356,9 @@ static int clear_range(struct page_table *pt, struct process_vm *vm,
dkprintf("%s: %p,%lx,%lx,%d,%p\n",
__func__, pt, start, end, free_physical, memobj);
dkprintf("%s: %p,%lx,%lx,%d,%p\n",
__func__, pt, start, end, free_physical, memobj);
if ((start < vm->region.user_start)
|| (vm->region.user_end < end)
|| (end <= start)) {

View File

@ -17,6 +17,7 @@
#include <syscall.h>
#include <debug.h>
void terminate_mcexec(int, int);
extern void ptrace_report_signal(struct thread *thread, int sig);
extern void clear_single_step(struct thread *thread);
void terminate(int, int);
@ -50,56 +51,54 @@ uintptr_t debug_constants[] = {
-1,
};
static ihk_spinlock_t cpuid_head_lock = SPIN_LOCK_UNLOCKED;
static int cpuid_head = 1;
extern int num_processors;
int obtain_clone_cpuid(cpu_set_t *cpu_set, int use_last)
{
int min_queue_len = -1;
int i, min_cpu = -1;
int cpu, min_cpu = -1, uti_cpu = -1;
unsigned long irqstate;
irqstate = ihk_mc_spinlock_lock(&runq_reservation_lock);
/* cpu_head lock */
ihk_mc_spinlock_lock_noirq(&cpuid_head_lock);
/* Find the first allowed core with the shortest run queue */
for (i = 0; i < num_processors; cpuid_head++, i++) {
for (cpu = 0; cpu < num_processors; ++cpu) {
struct cpu_local_var *v;
/* cpuid_head over cpu_info->ncpus, cpuid_head = BSP reset. */
if (cpuid_head >= num_processors) {
cpuid_head = 0;
}
if (!CPU_ISSET(cpu, cpu_set))
continue;
if (!CPU_ISSET(cpuid_head, cpu_set)) continue;
v = get_cpu_local_var(cpuid_head);
v = get_cpu_local_var(cpu);
ihk_mc_spinlock_lock_noirq(&v->runq_lock);
dkprintf("%s: cpu=%d,runq_len=%d,runq_reserved=%d\n",
__func__, cpuid_head, v->runq_len, v->runq_reserved);
__func__, cpu, v->runq_len, v->runq_reserved);
if (min_queue_len == -1 ||
v->runq_len + v->runq_reserved < min_queue_len) {
min_queue_len = v->runq_len + v->runq_reserved;
min_cpu = cpuid_head;
min_cpu = cpu;
}
ihk_mc_spinlock_unlock_noirq(&v->runq_lock);
if (min_queue_len == 0) {
cpuid_head++;
break;
/* Record the last tie CPU */
if (min_cpu != cpu &&
v->runq_len + v->runq_reserved == min_queue_len) {
uti_cpu = cpu;
}
dkprintf("%s: cpu=%d,runq_len=%d,runq_reserved=%d,min_cpu=%d,uti_cpu=%d\n",
__func__, cpu, v->runq_len, v->runq_reserved,
min_cpu, uti_cpu);
ihk_mc_spinlock_unlock_noirq(&v->runq_lock);
#if 0
if (min_queue_len == 0)
break;
#endif
}
/* cpu_head unlock */
ihk_mc_spinlock_unlock_noirq(&cpuid_head_lock);
min_cpu = use_last ? uti_cpu : min_cpu;
if (min_cpu != -1) {
if (get_cpu_local_var(min_cpu)->status != CPU_STATUS_RESERVED)
get_cpu_local_var(min_cpu)->status = CPU_STATUS_RESERVED;
get_cpu_local_var(min_cpu)->status =
CPU_STATUS_RESERVED;
__sync_fetch_and_add(&get_cpu_local_var(min_cpu)->runq_reserved,
1);
}
@ -535,6 +534,8 @@ SYSCALL_DECLARE(rt_sigreturn)
thread->sigmask.__val[0] = ksigsp.uc.uc_sigmask.__val[0];
thread->sigstack.ss_flags = ksigsp.uc.uc_stack.ss_flags;
if(ksigsp.restart){
regs->orig_x0 = regs->regs[0];
regs->orig_pc = regs->pc;
return syscall(ksigsp.syscallno, regs);
}
@ -1109,6 +1110,17 @@ do_signal(unsigned long rc, void *regs0, struct thread *thread, struct sig_pendi
if(regs == NULL){ /* call from syscall */
regs = thread->uctx;
/*
* Call do_signal() directly syscalls,
* need to save the return value.
*/
if (rc == -EINTR) {
if (regs->syscallno == __NR_rt_sigtimedwait ||
regs->syscallno == __NR_rt_sigsuspend) {
regs->regs[0] = rc;
}
}
}
else{
rc = regs->regs[0];
@ -1371,12 +1383,16 @@ interrupt_from_user(void *regs0)
void save_syscall_return_value(int num, unsigned long rc)
{
const struct thread *thread = cpu_local_var(current);
/*
* Save syscall return value.
*/
if (cpu_local_var(current) && cpu_local_var(current)->uctx &&
num != __NR_rt_sigsuspend) {
ihk_mc_syscall_arg0(cpu_local_var(current)->uctx) = rc;
if (thread &&
thread->uctx &&
((thread->uctx->regs[0] == thread->uctx->orig_x0) &&
(thread->uctx->pc == thread->uctx->orig_pc))) {
thread->uctx->regs[0] = rc;
}
}
@ -1454,6 +1470,111 @@ out:
return;
}
static int
check_sig_pending_thread(struct thread *thread)
{
int found = 0;
struct list_head *head;
mcs_rwlock_lock_t *lock;
struct mcs_rwlock_node_irqsave mcs_rw_node;
struct sig_pending *next;
struct sig_pending *pending;
__sigset_t w;
__sigset_t x;
int sig = 0;
struct k_sigaction *k;
struct cpu_local_var *v;
v = get_this_cpu_local_var();
w = thread->sigmask.__val[0];
lock = &thread->sigcommon->lock;
head = &thread->sigcommon->sigpending;
for (;;) {
mcs_rwlock_reader_lock(lock, &mcs_rw_node);
list_for_each_entry_safe(pending, next, head, list) {
for (x = pending->sigmask.__val[0], sig = 0; x;
sig++, x >>= 1)
;
k = thread->sigcommon->action + sig - 1;
if ((sig != SIGCHLD && sig != SIGURG) ||
(k->sa.sa_handler != SIG_IGN &&
k->sa.sa_handler != NULL)) {
if (!(pending->sigmask.__val[0] & w)) {
if (pending->interrupted == 0) {
pending->interrupted = 1;
found = 1;
if (sig != SIGCHLD &&
sig != SIGURG &&
!k->sa.sa_handler) {
found = 2;
break;
}
}
}
}
}
mcs_rwlock_reader_unlock(lock, &mcs_rw_node);
if (found == 2) {
break;
}
if (lock == &thread->sigpendinglock) {
break;
}
lock = &thread->sigpendinglock;
head = &thread->sigpending;
}
if (found == 2) {
ihk_mc_spinlock_unlock(&v->runq_lock, v->runq_irqstate);
terminate_mcexec(0, sig);
return 1;
}
else if (found == 1) {
ihk_mc_spinlock_unlock(&v->runq_lock, v->runq_irqstate);
interrupt_syscall(thread, 0);
return 1;
}
return 0;
}
void
check_sig_pending(void)
{
struct thread *thread;
struct cpu_local_var *v;
if (clv == NULL)
return;
v = get_this_cpu_local_var();
repeat:
v->runq_irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
list_for_each_entry(thread, &(v->runq), sched_list) {
if (thread == NULL || thread == &cpu_local_var(idle)) {
continue;
}
if (thread->in_syscall_offload == 0) {
continue;
}
if (thread->proc->group_exit_status & 0x0000000100000000L) {
continue;
}
if (check_sig_pending_thread(thread))
goto repeat;
}
ihk_mc_spinlock_unlock(&v->runq_lock, v->runq_irqstate);
}
unsigned long
do_kill(struct thread * thread, int pid, int tid, int sig, siginfo_t *info, int ptracecont)
{
@ -1709,28 +1830,12 @@ done:
if (doint && !(mask & tthread->sigmask.__val[0])) {
int status = tthread->status;
#ifdef POSTK_DEBUG_TEMP_FIX_74 /* interrupt_syscall() timing change */
#ifdef POSTK_DEBUG_TEMP_FIX_48 /* nohost flag missed fix */
if(tthread->proc->status != PS_EXITED)
interrupt_syscall(tthread, 0);
#else /* POSTK_DEBUG_TEMP_FIX_48 */
if(!tthread->proc->nohost)
interrupt_syscall(tthread, 0);
#endif /* POSTK_DEBUG_TEMP_FIX_48 */
#endif /* POSTK_DEBUG_TEMP_FIX_74 */
if (thread != tthread) {
dkprintf("do_kill,ipi,pid=%d,cpu_id=%d\n",
tproc->pid, tthread->cpu_id);
#define IPI_CPU_NOTIFY 0
ihk_mc_interrupt_cpu(tthread->cpu_id, INTRID_CPU_NOTIFY);
}
#ifndef POSTK_DEBUG_TEMP_FIX_74 /* interrupt_syscall() timing change */
if(!tthread->proc->nohost)
interrupt_syscall(tthread, 0);
#endif /* !POSTK_DEBUG_TEMP_FIX_74 */
if (status != PS_RUNNING) {
if(sig == SIGKILL){
/* Wake up the target only when stopped by ptrace-reporting */

View File

@ -8,15 +8,13 @@
#include <cputype.h>
#include <irq.h>
#include <arch-timer.h>
#include <debug.h>
//#define DEBUG_PRINT_TIMER
#ifdef DEBUG_PRINT_TIMER
#define dkprintf kprintf
#define ekprintf kprintf
#else
#define dkprintf(...) do { if (0) kprintf(__VA_ARGS__); } while (0)
#define ekprintf kprintf
#undef DDEBUG_DEFAULT
#define DDEBUG_DEFAULT DDEBUG_PRINT
#endif
static unsigned int per_cpu_timer_val[NR_CPUS] = { 0 };

View File

@ -22,6 +22,29 @@ void vdso_gettimeofday_unused_funcs(void)
UNUSED(xos_is_tchip);
}
extern int __kernel_gettimeofday(struct timeval *tv, void *tz);
static inline void cpu_pause_for_vsyscall(void)
{
asm volatile ("yield" ::: "memory");
}
static inline void vdso_calculate_time_from_tsc(struct timespec *ts,
struct tod_data_s *tod_data)
{
UNUSED(xgetbv);
UNUSED(xsetbv);
UNUSED(rdpmc);
UNUSED(rdmsr);
UNUSED(set_perfctl);
UNUSED(start_perfctr);
UNUSED(stop_perfctr);
UNUSED(clear_perfctl);
UNUSED(set_perfctr);
UNUSED(read_perfctr);
UNUSED(xos_is_tchip);
}
static inline struct tod_data_s *get_tod_data_addr(void)
{
unsigned long addr;
@ -49,7 +72,7 @@ int __kernel_gettimeofday(struct timeval *tv, void *tz)
/* DO it locally if supported */
if (!tz && tod_data->do_local) {
calculate_time_from_tsc(&ats);
vdso_calculate_time_from_tsc(&ats, tod_data);
tv->tv_sec = ats.tv_sec;
tv->tv_usec = ats.tv_nsec / 1000;
@ -112,7 +135,7 @@ int __kernel_clock_gettime(clockid_t clk_id, struct timespec *tp)
/* DO it locally if supported */
if (tod_data->do_local && clk_id == CLOCK_REALTIME) {
calculate_time_from_tsc(&ats);
vdso_calculate_time_from_tsc(&ats, tod_data);
tp->tv_sec = ats.tv_sec;
tp->tv_nsec = ats.tv_nsec;

View File

@ -22,7 +22,7 @@
*/
#include <linkage.h>
#include "asm_syscall.h"
#include <asm-syscall.h>
.text