arm64: Scalable Vector Extension (SVE) support.
Change-Id: I3568687913f583edfaa297d5cf5ac91d319d97e9
This commit is contained in:
committed by
Masamichi Takagi
parent
dac99f708c
commit
07aa96ef95
@ -1,4 +1,4 @@
|
||||
/* assert.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
/* assert.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
|
||||
#include <process.h>
|
||||
#include <list.h>
|
||||
@ -53,4 +53,4 @@ STATIC_ASSERT(SVE_PT_FPSIMD_OFFSET == sizeof(struct user_sve_header));
|
||||
STATIC_ASSERT(SVE_PT_SVE_OFFSET == sizeof(struct user_sve_header));
|
||||
|
||||
/* assert for struct arm64_cpu_local_thread member offset define */
|
||||
STATIC_ASSERT(offsetof(struct arm64_cpu_local_thread, panic_regs) == 160);
|
||||
STATIC_ASSERT(offsetof(struct arm64_cpu_local_thread, panic_regs) == 168);
|
||||
|
||||
@ -1,7 +1,11 @@
|
||||
/* coredump.c COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
/* coredump.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#include <process.h>
|
||||
#include <elfcore.h>
|
||||
#include <string.h>
|
||||
#include <ptrace.h>
|
||||
#include <cls.h>
|
||||
|
||||
#define align32(x) ((((x) + 3) / 4) * 4)
|
||||
|
||||
void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread, void *regs0)
|
||||
{
|
||||
@ -30,3 +34,43 @@ void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread,
|
||||
/* copy unaligned prstatus addr */
|
||||
memcpy(prstatus, &tmp_prstatus, sizeof(*prstatus));
|
||||
}
|
||||
|
||||
int arch_get_thread_core_info_size(void)
|
||||
{
|
||||
const struct user_regset_view *view = current_user_regset_view();
|
||||
const struct user_regset *regset = find_regset(view, NT_ARM_SVE);
|
||||
|
||||
return sizeof(struct note) + align32(sizeof("LINUX"))
|
||||
+ regset_size(cpu_local_var(current), regset);
|
||||
}
|
||||
|
||||
void arch_fill_thread_core_info(struct note *head,
|
||||
struct thread *thread, void *regs)
|
||||
{
|
||||
const struct user_regset_view *view = current_user_regset_view();
|
||||
const struct user_regset *regset = find_regset(view, NT_ARM_SVE);
|
||||
|
||||
/* pre saved registers */
|
||||
save_fp_regs(thread);
|
||||
|
||||
if (regset->core_note_type && regset->get &&
|
||||
(!regset->active || regset->active(thread, regset))) {
|
||||
int ret;
|
||||
size_t size = regset_size(thread, regset);
|
||||
void *namep;
|
||||
void *descp;
|
||||
|
||||
namep = (void *) (head + 1);
|
||||
descp = namep + align32(sizeof("LINUX"));
|
||||
|
||||
ret = regset->get(thread, regset, 0, size, descp, NULL);
|
||||
if (ret) {
|
||||
return;
|
||||
}
|
||||
|
||||
head->namesz = sizeof("LINUX");
|
||||
head->descsz = size;
|
||||
head->type = NT_ARM_SVE;
|
||||
memcpy(namep, "LINUX", sizeof("LINUX"));
|
||||
}
|
||||
}
|
||||
|
||||
@ -951,7 +951,7 @@ void ihk_mc_boot_cpu(int cpuid, unsigned long pc)
|
||||
setup_cpu_features();
|
||||
}
|
||||
|
||||
init_sve_vl();
|
||||
sve_setup();
|
||||
}
|
||||
|
||||
/* for ihk_mc_init_context() */
|
||||
@ -1001,9 +1001,10 @@ void ihk_mc_init_context(ihk_mc_kernel_context_t *new_ctx,
|
||||
const int lcpuid = ihk_mc_get_processor_id();
|
||||
const unsigned long syscallno = current_pt_regs()->syscallno;
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
const uint16_t orig_sve_vl = current_thread_info()->sve_vl;
|
||||
const uint16_t orig_sve_vl_onexec = current_thread_info()->sve_vl_onexec;
|
||||
const uint16_t orig_sve_flags = current_thread_info()->sve_flags;
|
||||
struct thread_info *ti = current_thread_info();
|
||||
const unsigned int orig_sve_vl = ti->sve_vl;
|
||||
const unsigned int orig_sve_vl_onexec = ti->sve_vl_onexec;
|
||||
const unsigned long orig_sve_flags = ti->sve_flags;
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
|
||||
/* get kernel stack address */
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* fpsimd.c COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
/* fpsimd.c COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||
#include <thread_info.h>
|
||||
#include <fpsimd.h>
|
||||
#include <cpuinfo.h>
|
||||
@ -11,6 +11,7 @@
|
||||
#include <kmalloc.h>
|
||||
#include <debug.h>
|
||||
#include <process.h>
|
||||
#include <bitmap.h>
|
||||
|
||||
//#define DEBUG_PRINT_FPSIMD
|
||||
|
||||
@ -21,11 +22,87 @@
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
|
||||
/* Set of available vector lengths, as vq_to_bit(vq): */
|
||||
static DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
|
||||
|
||||
/* Maximum supported vector length across all CPUs (initially poisoned) */
|
||||
int sve_max_vl = -1;
|
||||
|
||||
/* Default VL for tasks that don't set it explicitly: */
|
||||
int sve_default_vl = -1;
|
||||
|
||||
/*
|
||||
* Helpers to translate bit indices in sve_vq_map to VQ values (and
|
||||
* vice versa). This allows find_next_bit() to be used to find the
|
||||
* _maximum_ VQ not exceeding a certain value.
|
||||
*/
|
||||
|
||||
static unsigned int vq_to_bit(unsigned int vq)
|
||||
{
|
||||
return SVE_VQ_MAX - vq;
|
||||
}
|
||||
|
||||
static unsigned int bit_to_vq(unsigned int bit)
|
||||
{
|
||||
if (bit >= SVE_VQ_MAX) {
|
||||
bit = SVE_VQ_MAX - 1;
|
||||
}
|
||||
return SVE_VQ_MAX - bit;
|
||||
}
|
||||
|
||||
/*
|
||||
* All vector length selection from userspace comes through here.
|
||||
* We're on a slow path, so some sanity-checks are included.
|
||||
* If things go wrong there's a bug somewhere, but try to fall back to a
|
||||
* safe choice.
|
||||
*/
|
||||
static unsigned int find_supported_vector_length(unsigned int vl)
|
||||
{
|
||||
int bit;
|
||||
int max_vl = sve_max_vl;
|
||||
|
||||
if (!sve_vl_valid(vl)) {
|
||||
vl = SVE_VL_MIN;
|
||||
}
|
||||
|
||||
if (!sve_vl_valid(max_vl)) {
|
||||
max_vl = SVE_VL_MIN;
|
||||
}
|
||||
|
||||
if (vl > max_vl) {
|
||||
vl = max_vl;
|
||||
}
|
||||
|
||||
bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
|
||||
vq_to_bit(sve_vq_from_vl(vl)));
|
||||
return sve_vl_from_vq(bit_to_vq(bit));
|
||||
}
|
||||
|
||||
static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
|
||||
{
|
||||
unsigned int vq, vl;
|
||||
unsigned long zcr;
|
||||
|
||||
bitmap_zero(map, SVE_VQ_MAX);
|
||||
|
||||
zcr = ZCR_EL1_LEN_MASK;
|
||||
zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
|
||||
|
||||
for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
|
||||
/* self-syncing */
|
||||
write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1);
|
||||
vl = sve_get_vl();
|
||||
/* skip intervening lengths */
|
||||
vq = sve_vq_from_vl(vl);
|
||||
set_bit(vq_to_bit(vq), map);
|
||||
}
|
||||
}
|
||||
|
||||
void sve_init_vq_map(void)
|
||||
{
|
||||
sve_probe_vqs(sve_vq_map);
|
||||
}
|
||||
|
||||
size_t sve_state_size(struct thread const *thread)
|
||||
{
|
||||
unsigned int vl = thread->ctx.thread->sve_vl;
|
||||
@ -75,19 +152,7 @@ int sve_set_vector_length(struct thread *thread,
|
||||
{
|
||||
struct thread_info *ti = thread->ctx.thread;
|
||||
|
||||
BUG_ON(thread == cpu_local_var(current) && cpu_local_var(no_preempt) == 0);
|
||||
|
||||
/*
|
||||
* To avoid accidents, forbid setting for individual threads of a
|
||||
* multithreaded process. User code that knows what it's doing can
|
||||
* pass PR_SVE_SET_VL_THREAD to override this restriction:
|
||||
*/
|
||||
if (!(flags & PR_SVE_SET_VL_THREAD) && get_nr_threads(thread->proc) != 1) {
|
||||
return -EINVAL;
|
||||
}
|
||||
flags &= ~(unsigned long)PR_SVE_SET_VL_THREAD;
|
||||
|
||||
if (flags & ~(unsigned long)(PR_SVE_SET_VL_INHERIT |
|
||||
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
|
||||
PR_SVE_SET_VL_ONEXEC)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -96,13 +161,19 @@ int sve_set_vector_length(struct thread *thread,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vl > sve_max_vl) {
|
||||
BUG_ON(!sve_vl_valid(sve_max_vl));
|
||||
vl = sve_max_vl;
|
||||
/*
|
||||
* Clamp to the maximum vector length that VL-agnostic SVE code can
|
||||
* work with. A flag may be assigned in the future to allow setting
|
||||
* of larger vector lengths without confusing older software.
|
||||
*/
|
||||
if (vl > SVE_VL_ARCH_MAX) {
|
||||
vl = SVE_VL_ARCH_MAX;
|
||||
}
|
||||
|
||||
if (flags & (PR_SVE_SET_VL_ONEXEC |
|
||||
PR_SVE_SET_VL_INHERIT)) {
|
||||
vl = find_supported_vector_length(vl);
|
||||
|
||||
if (flags & (PR_SVE_VL_INHERIT |
|
||||
PR_SVE_SET_VL_ONEXEC)) {
|
||||
ti->sve_vl_onexec = vl;
|
||||
} else {
|
||||
/* Reset VL to system default on next exec: */
|
||||
@ -114,39 +185,42 @@ int sve_set_vector_length(struct thread *thread,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (vl != ti->sve_vl) {
|
||||
if ((elf_hwcap & HWCAP_SVE)) {
|
||||
fp_regs_struct fp_regs;
|
||||
memset(&fp_regs, 0, sizeof(fp_regs));
|
||||
if (vl == ti->sve_vl) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* for self at prctl syscall */
|
||||
if (thread == cpu_local_var(current)) {
|
||||
save_fp_regs(thread);
|
||||
clear_fp_regs();
|
||||
thread_sve_to_fpsimd(thread, &fp_regs);
|
||||
sve_free(thread);
|
||||
if ((elf_hwcap & HWCAP_SVE)) {
|
||||
fp_regs_struct fp_regs;
|
||||
|
||||
ti->sve_vl = vl;
|
||||
memset(&fp_regs, 0, sizeof(fp_regs));
|
||||
|
||||
sve_alloc(thread);
|
||||
thread_fpsimd_to_sve(thread, &fp_regs);
|
||||
restore_fp_regs(thread);
|
||||
/* for target thread at ptrace */
|
||||
} else {
|
||||
thread_sve_to_fpsimd(thread, &fp_regs);
|
||||
sve_free(thread);
|
||||
/* for self at prctl syscall */
|
||||
if (thread == cpu_local_var(current)) {
|
||||
save_fp_regs(thread);
|
||||
clear_fp_regs();
|
||||
thread_sve_to_fpsimd(thread, &fp_regs);
|
||||
sve_free(thread);
|
||||
|
||||
ti->sve_vl = vl;
|
||||
ti->sve_vl = vl;
|
||||
|
||||
sve_alloc(thread);
|
||||
thread_fpsimd_to_sve(thread, &fp_regs);
|
||||
}
|
||||
sve_alloc(thread);
|
||||
thread_fpsimd_to_sve(thread, &fp_regs);
|
||||
restore_fp_regs(thread);
|
||||
/* for target thread at ptrace */
|
||||
} else {
|
||||
thread_sve_to_fpsimd(thread, &fp_regs);
|
||||
sve_free(thread);
|
||||
|
||||
ti->sve_vl = vl;
|
||||
|
||||
sve_alloc(thread);
|
||||
thread_fpsimd_to_sve(thread, &fp_regs);
|
||||
}
|
||||
}
|
||||
ti->sve_vl = vl;
|
||||
|
||||
out:
|
||||
ti->sve_flags = flags & PR_SVE_SET_VL_INHERIT;
|
||||
ti->sve_flags = flags & PR_SVE_VL_INHERIT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -156,44 +230,53 @@ out:
|
||||
* Encode the current vector length and flags for return.
|
||||
* This is only required for prctl(): ptrace has separate fields
|
||||
*/
|
||||
static int sve_prctl_status(const struct thread_info *ti)
|
||||
static int sve_prctl_status(unsigned long flags)
|
||||
{
|
||||
int ret = ti->sve_vl;
|
||||
int ret;
|
||||
struct thread_info *ti = cpu_local_var(current)->ctx.thread;
|
||||
|
||||
ret |= ti->sve_flags << 16;
|
||||
if (flags & PR_SVE_SET_VL_ONEXEC) {
|
||||
ret = ti->sve_vl_onexec;
|
||||
}
|
||||
else {
|
||||
ret = ti->sve_vl;
|
||||
}
|
||||
|
||||
if (ti->sve_flags & PR_SVE_VL_INHERIT) {
|
||||
ret |= PR_SVE_VL_INHERIT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_set_task_vl */
|
||||
int sve_set_thread_vl(struct thread *thread, const unsigned long vector_length,
|
||||
const unsigned long flags)
|
||||
int sve_set_thread_vl(unsigned long arg)
|
||||
{
|
||||
unsigned long vl, flags;
|
||||
int ret;
|
||||
|
||||
if (!(elf_hwcap & HWCAP_SVE)) {
|
||||
vl = arg & PR_SVE_VL_LEN_MASK;
|
||||
flags = arg & ~vl;
|
||||
|
||||
/* Instead of system_supports_sve() */
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
BUG_ON(thread != cpu_local_var(current));
|
||||
|
||||
preempt_disable();
|
||||
ret = sve_set_vector_length(thread, vector_length, flags);
|
||||
preempt_enable();
|
||||
|
||||
ret = sve_set_vector_length(cpu_local_var(current), vl, flags);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
return sve_prctl_status(thread->ctx.thread);
|
||||
return sve_prctl_status(flags);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_get_ti_vl */
|
||||
int sve_get_thread_vl(const struct thread *thread)
|
||||
int sve_get_thread_vl(void)
|
||||
{
|
||||
if (!(elf_hwcap & HWCAP_SVE)) {
|
||||
/* Instead of system_supports_sve() */
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
return sve_prctl_status(thread->ctx.thread);
|
||||
return sve_prctl_status(0);
|
||||
}
|
||||
|
||||
void do_sve_acc(unsigned int esr, struct pt_regs *regs)
|
||||
@ -203,25 +286,48 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
|
||||
panic("");
|
||||
}
|
||||
|
||||
void init_sve_vl(void)
|
||||
void sve_setup(void)
|
||||
{
|
||||
extern unsigned long ihk_param_default_vl;
|
||||
uint64_t zcr;
|
||||
|
||||
/* Instead of system_supports_sve() */
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return;
|
||||
}
|
||||
|
||||
zcr = read_system_reg(SYS_ZCR_EL1);
|
||||
BUG_ON(((zcr & ZCR_EL1_LEN_MASK) + 1) * 16 > sve_max_vl);
|
||||
/* init sve_vq_map bitmap */
|
||||
sve_init_vq_map();
|
||||
|
||||
/*
|
||||
* The SVE architecture mandates support for 128-bit vectors,
|
||||
* so sve_vq_map must have at least SVE_VQ_MIN set.
|
||||
* If something went wrong, at least try to patch it up:
|
||||
*/
|
||||
if (!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)) {
|
||||
set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
|
||||
}
|
||||
|
||||
zcr = read_system_reg(SYS_ZCR_EL1);
|
||||
sve_max_vl = sve_vl_from_vq((zcr & ZCR_EL1_LEN_MASK) + 1);
|
||||
|
||||
/*
|
||||
* Sanity-check that the max VL we determined through CPU features
|
||||
* corresponds properly to sve_vq_map. If not, do our best:
|
||||
*/
|
||||
if (sve_max_vl != find_supported_vector_length(sve_max_vl)) {
|
||||
sve_max_vl = find_supported_vector_length(sve_max_vl);
|
||||
}
|
||||
|
||||
sve_max_vl = ((zcr & ZCR_EL1_LEN_MASK) + 1) * 16;
|
||||
sve_default_vl = ihk_param_default_vl;
|
||||
|
||||
if (sve_default_vl == 0) {
|
||||
kprintf("SVE: Getting default VL = 0 from HOST-Linux.\n");
|
||||
sve_default_vl = sve_max_vl > 64 ? 64 : sve_max_vl;
|
||||
kprintf("SVE: Using default vl(%d byte).\n", sve_default_vl);
|
||||
if (ihk_param_default_vl !=
|
||||
find_supported_vector_length(ihk_param_default_vl)) {
|
||||
kprintf("SVE: Getting unsupported default VL = %d "
|
||||
"from HOST-Linux.\n", sve_default_vl);
|
||||
sve_default_vl = find_supported_vector_length(64);
|
||||
kprintf("SVE: Using default vl(%d byte).\n",
|
||||
sve_default_vl);
|
||||
}
|
||||
|
||||
kprintf("SVE: maximum available vector length %u bytes per vector\n",
|
||||
@ -232,7 +338,7 @@ void init_sve_vl(void)
|
||||
|
||||
#else /* CONFIG_ARM64_SVE */
|
||||
|
||||
void init_sve_vl(void)
|
||||
void sve_setup(void)
|
||||
{
|
||||
/* nothing to do. */
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* fpsimd.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
|
||||
/* fpsimd.h COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_FPSIMD_H
|
||||
#define __HEADER_ARM64_COMMON_FPSIMD_H
|
||||
|
||||
@ -46,12 +46,15 @@ extern void sve_alloc(struct thread *thread);
|
||||
extern void sve_save_state(void *state, unsigned int *pfpsr);
|
||||
extern void sve_load_state(void const *state, unsigned int const *pfpsr, unsigned long vq_minus_1);
|
||||
extern unsigned int sve_get_vl(void);
|
||||
extern int sve_set_thread_vl(struct thread *thread, const unsigned long vector_length, const unsigned long flags);
|
||||
extern int sve_get_thread_vl(const struct thread *thread);
|
||||
extern int sve_set_thread_vl(unsigned long arg);
|
||||
extern int sve_get_thread_vl(void);
|
||||
extern int sve_set_vector_length(struct thread *thread, unsigned long vl, unsigned long flags);
|
||||
|
||||
#define SVE_SET_VL(thread, vector_length, flags) sve_set_thread_vl(thread, vector_length, flags)
|
||||
#define SVE_GET_VL(thread) sve_get_thread_vl(thread)
|
||||
#define SVE_SET_VL(arg) sve_set_thread_vl(arg)
|
||||
#define SVE_GET_VL() sve_get_thread_vl()
|
||||
|
||||
/* Maximum VL that SVE VL-agnostic software can transparently support */
|
||||
#define SVE_VL_ARCH_MAX 0x100
|
||||
|
||||
#else /* CONFIG_ARM64_SVE */
|
||||
|
||||
@ -80,12 +83,12 @@ static int sve_set_vector_length(struct thread *thread, unsigned long vl, unsign
|
||||
}
|
||||
|
||||
/* for prctl syscall */
|
||||
#define SVE_SET_VL(a,b,c) (-EINVAL)
|
||||
#define SVE_GET_VL(a) (-EINVAL)
|
||||
#define SVE_SET_VL(a) (-EINVAL)
|
||||
#define SVE_GET_VL() (-EINVAL)
|
||||
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
|
||||
extern void init_sve_vl(void);
|
||||
extern void sve_setup(void);
|
||||
extern void fpsimd_save_state(struct fpsimd_state *state);
|
||||
extern void fpsimd_load_state(struct fpsimd_state *state);
|
||||
extern void thread_fpsimd_save(struct thread *thread);
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* prctl.h COPYRIGHT FUJITSU LIMITED 2017 */
|
||||
/* prctl.h COPYRIGHT FUJITSU LIMITED 2017-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_PRCTL_H
|
||||
#define __HEADER_ARM64_COMMON_PRCTL_H
|
||||
|
||||
@ -6,15 +6,12 @@
|
||||
#define PR_GET_THP_DISABLE 42
|
||||
|
||||
/* arm64 Scalable Vector Extension controls */
|
||||
#define PR_SVE_SET_VL 48 /* set task vector length */
|
||||
#define PR_SVE_SET_VL_THREAD (1 << 1) /* set just this thread */
|
||||
#define PR_SVE_SET_VL_INHERIT (1 << 2) /* inherit across exec */
|
||||
#define PR_SVE_SET_VL_ONEXEC (1 << 3) /* defer effect until exec */
|
||||
|
||||
#define PR_SVE_GET_VL 49 /* get task vector length */
|
||||
/* Decode helpers for the return value from PR_SVE_GET_VL: */
|
||||
#define PR_SVE_GET_VL_LEN(ret) ((ret) & 0x3fff) /* vector length */
|
||||
#define PR_SVE_GET_VL_INHERIT (PR_SVE_SET_VL_INHERIT << 16)
|
||||
/* For conveinence, PR_SVE_SET_VL returns the result in the same encoding */
|
||||
/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
|
||||
#define PR_SVE_SET_VL 50 /* set task vector length */
|
||||
# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
|
||||
#define PR_SVE_GET_VL 51 /* get task vector length */
|
||||
/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
|
||||
# define PR_SVE_VL_LEN_MASK 0xffff
|
||||
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_PRCTL_H */
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* ptrace.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
/* ptrace.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_PTRACE_H
|
||||
#define __HEADER_ARM64_COMMON_PTRACE_H
|
||||
|
||||
@ -46,6 +46,7 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <lwk/compiler.h>
|
||||
#include <ihk/types.h>
|
||||
|
||||
struct user_hwdebug_state {
|
||||
@ -78,6 +79,70 @@ struct user_sve_header {
|
||||
uint16_t __reserved;
|
||||
};
|
||||
|
||||
enum aarch64_regset {
|
||||
REGSET_GPR,
|
||||
REGSET_FPR,
|
||||
REGSET_TLS,
|
||||
REGSET_HW_BREAK,
|
||||
REGSET_HW_WATCH,
|
||||
REGSET_SYSTEM_CALL,
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
REGSET_SVE,
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
};
|
||||
|
||||
struct thread;
|
||||
struct user_regset;
|
||||
|
||||
typedef int user_regset_active_fn(struct thread *target,
|
||||
const struct user_regset *regset);
|
||||
|
||||
typedef long user_regset_get_fn(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf);
|
||||
|
||||
typedef long user_regset_set_fn(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
|
||||
typedef int user_regset_writeback_fn(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
int immediate);
|
||||
|
||||
typedef unsigned int user_regset_get_size_fn(struct thread *target,
|
||||
const struct user_regset *regset);
|
||||
|
||||
struct user_regset {
|
||||
user_regset_get_fn *get;
|
||||
user_regset_set_fn *set;
|
||||
user_regset_active_fn *active;
|
||||
user_regset_writeback_fn *writeback;
|
||||
user_regset_get_size_fn *get_size;
|
||||
unsigned int n;
|
||||
unsigned int size;
|
||||
unsigned int align;
|
||||
unsigned int bias;
|
||||
unsigned int core_note_type;
|
||||
};
|
||||
|
||||
struct user_regset_view {
|
||||
const char *name;
|
||||
const struct user_regset *regsets;
|
||||
unsigned int n;
|
||||
uint32_t e_flags;
|
||||
uint16_t e_machine;
|
||||
uint8_t ei_osabi;
|
||||
};
|
||||
|
||||
extern const struct user_regset_view *current_user_regset_view(void);
|
||||
extern const struct user_regset *find_regset(
|
||||
const struct user_regset_view *view,
|
||||
unsigned int type);
|
||||
extern unsigned int regset_size(struct thread *target,
|
||||
const struct user_regset *regset);
|
||||
|
||||
/* Definitions for user_sve_header.flags: */
|
||||
#define SVE_PT_REGS_MASK (1 << 0)
|
||||
|
||||
@ -85,7 +150,7 @@ struct user_sve_header {
|
||||
#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK
|
||||
|
||||
#define SVE_PT_VL_THREAD PR_SVE_SET_VL_THREAD
|
||||
#define SVE_PT_VL_INHERIT PR_SVE_SET_VL_INHERIT
|
||||
#define SVE_PT_VL_INHERIT PR_SVE_VL_INHERIT
|
||||
#define SVE_PT_VL_ONEXEC PR_SVE_SET_VL_ONEXEC
|
||||
|
||||
/*
|
||||
@ -99,7 +164,9 @@ struct user_sve_header {
|
||||
*/
|
||||
|
||||
/* Offset from the start of struct user_sve_header to the register data */
|
||||
#define SVE_PT_REGS_OFFSET ((sizeof(struct sve_context) + 15) / 16 * 16)
|
||||
#define SVE_PT_REGS_OFFSET \
|
||||
((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
|
||||
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||
|
||||
/*
|
||||
* The register data content and layout depends on the value of the
|
||||
@ -174,8 +241,10 @@ struct user_sve_header {
|
||||
#define SVE_PT_SVE_FFR_OFFSET(vq) \
|
||||
__SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
|
||||
|
||||
#define SVE_PT_SVE_FPSR_OFFSET(vq) \
|
||||
((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + 15) / 16 * 16)
|
||||
#define SVE_PT_SVE_FPSR_OFFSET(vq) \
|
||||
((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \
|
||||
(SVE_VQ_BYTES - 1)) \
|
||||
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||
#define SVE_PT_SVE_FPCR_OFFSET(vq) \
|
||||
(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
|
||||
|
||||
@ -184,9 +253,10 @@ struct user_sve_header {
|
||||
* 128-bit boundary.
|
||||
*/
|
||||
|
||||
#define SVE_PT_SVE_SIZE(vq, flags) \
|
||||
((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE - \
|
||||
SVE_PT_SVE_OFFSET + 15) / 16 * 16)
|
||||
#define SVE_PT_SVE_SIZE(vq, flags) \
|
||||
((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \
|
||||
- SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \
|
||||
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||
|
||||
#define SVE_PT_SIZE(vq, flags) \
|
||||
(((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* signal.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
/* signal.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_SIGNAL_H
|
||||
#define __HEADER_ARM64_COMMON_SIGNAL_H
|
||||
|
||||
@ -298,6 +298,7 @@ struct extra_context {
|
||||
struct _aarch64_ctx head;
|
||||
void *data; /* 16-byte aligned pointer to the extra space */
|
||||
uint32_t size; /* size in bytes of the extra space */
|
||||
uint32_t __reserved[3];
|
||||
};
|
||||
|
||||
#define SVE_MAGIC 0x53564501
|
||||
@ -318,19 +319,25 @@ struct sve_context {
|
||||
* The SVE architecture leaves space for future expansion of the
|
||||
* vector length beyond its initial architectural limit of 2048 bits
|
||||
* (16 quadwords).
|
||||
*
|
||||
* See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
|
||||
* terminology.
|
||||
*/
|
||||
#define SVE_VQ_MIN 1
|
||||
#define SVE_VQ_MAX 0x200
|
||||
#define SVE_VQ_BYTES 16 /* number of bytes per quadword */
|
||||
|
||||
#define SVE_VL_MIN (SVE_VQ_MIN * 0x10)
|
||||
#define SVE_VL_MAX (SVE_VQ_MAX * 0x10)
|
||||
#define SVE_VQ_MIN 1
|
||||
#define SVE_VQ_MAX 512
|
||||
|
||||
#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
|
||||
#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
|
||||
|
||||
#define SVE_NUM_ZREGS 32
|
||||
#define SVE_NUM_PREGS 16
|
||||
|
||||
#define sve_vl_valid(vl) \
|
||||
((vl) % 0x10 == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
|
||||
#define sve_vq_from_vl(vl) ((vl) / 0x10)
|
||||
((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
|
||||
#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
|
||||
#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES)
|
||||
|
||||
/*
|
||||
* The total size of meaningful data in the SVE context in bytes,
|
||||
@ -365,11 +372,13 @@ struct sve_context {
|
||||
* Additional data might be appended in the future.
|
||||
*/
|
||||
|
||||
#define SVE_SIG_ZREG_SIZE(vq) ((uint32_t)(vq) * 16)
|
||||
#define SVE_SIG_PREG_SIZE(vq) ((uint32_t)(vq) * 2)
|
||||
#define SVE_SIG_ZREG_SIZE(vq) ((uint32_t)(vq) * SVE_VQ_BYTES)
|
||||
#define SVE_SIG_PREG_SIZE(vq) ((uint32_t)(vq) * (SVE_VQ_BYTES / 8))
|
||||
#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
|
||||
|
||||
#define SVE_SIG_REGS_OFFSET ((sizeof(struct sve_context) + 15) / 16 * 16)
|
||||
#define SVE_SIG_REGS_OFFSET \
|
||||
((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
|
||||
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||
|
||||
#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
|
||||
#define SVE_SIG_ZREG_OFFSET(vq, n) \
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* thread_info.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
/* thread_info.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_THREAD_INFO_H
|
||||
#define __HEADER_ARM64_COMMON_THREAD_INFO_H
|
||||
|
||||
@ -46,9 +46,9 @@ struct thread_info {
|
||||
int cpu; /* cpu */
|
||||
struct cpu_context cpu_context; /* kernel_context */
|
||||
void *sve_state; /* SVE registers, if any */
|
||||
uint16_t sve_vl; /* SVE vector length */
|
||||
uint16_t sve_vl_onexec; /* SVE vl after next exec */
|
||||
uint16_t sve_flags; /* SVE related flags */
|
||||
unsigned int sve_vl; /* SVE vector length */
|
||||
unsigned int sve_vl_onexec; /* SVE vl after next exec */
|
||||
unsigned long sve_flags; /* SVE related flags */
|
||||
unsigned long fault_address; /* fault info */
|
||||
unsigned long fault_code; /* ESR_EL1 value */
|
||||
};
|
||||
@ -56,7 +56,7 @@ struct thread_info {
|
||||
/* Flags for sve_flags (intentionally defined to match the prctl flags) */
|
||||
|
||||
/* Inherit sve_vl and sve_flags across execve(): */
|
||||
#define THREAD_VL_INHERIT PR_SVE_SET_VL_INHERIT
|
||||
#define THREAD_VL_INHERIT PR_SVE_VL_INHERIT
|
||||
|
||||
struct arm64_cpu_local_thread {
|
||||
struct thread_info thread_info;
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* ptrace.c COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
/* ptrace.c COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||
#include <errno.h>
|
||||
#include <debug-monitors.h>
|
||||
#include <hw_breakpoint.h>
|
||||
@ -12,6 +12,7 @@
|
||||
#include <string.h>
|
||||
#include <thread_info.h>
|
||||
#include <debug.h>
|
||||
#include <ptrace.h>
|
||||
|
||||
//#define DEBUG_PRINT_SC
|
||||
|
||||
@ -25,37 +26,6 @@
|
||||
extern void save_debugreg(unsigned long *debugreg);
|
||||
extern int interrupt_from_user(void *);
|
||||
|
||||
enum aarch64_regset {
|
||||
REGSET_GPR,
|
||||
REGSET_FPR,
|
||||
REGSET_TLS,
|
||||
REGSET_HW_BREAK,
|
||||
REGSET_HW_WATCH,
|
||||
REGSET_SYSTEM_CALL,
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
REGSET_SVE,
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
};
|
||||
|
||||
struct user_regset;
|
||||
typedef long user_regset_get_fn(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf);
|
||||
|
||||
typedef long user_regset_set_fn(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
|
||||
struct user_regset {
|
||||
user_regset_get_fn *get;
|
||||
user_regset_set_fn *set;
|
||||
unsigned int n;
|
||||
unsigned int size;
|
||||
unsigned int core_note_type;
|
||||
};
|
||||
|
||||
long ptrace_read_user(struct thread *thread, long addr, unsigned long *value)
|
||||
{
|
||||
return -EIO;
|
||||
@ -273,6 +243,17 @@ static inline long copy_regset_from_user(struct thread *target,
|
||||
return regset->set(target, regset, offset, size, NULL, data);
|
||||
}
|
||||
|
||||
unsigned int regset_size(struct thread *target,
|
||||
const struct user_regset *regset)
|
||||
{
|
||||
if (!regset->get_size) {
|
||||
return regset->n * regset->size;
|
||||
}
|
||||
else {
|
||||
return regset->get_size(target, regset);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Bits which are always architecturally RES0 per ARM DDI 0487A.h
|
||||
* Userspace cannot use these until they have an architectural meaning.
|
||||
@ -624,6 +605,48 @@ out:
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
|
||||
static void sve_init_header_from_thread(struct user_sve_header *header,
|
||||
struct thread *target)
|
||||
{
|
||||
unsigned int vq;
|
||||
|
||||
memset(header, 0, sizeof(*header));
|
||||
|
||||
/* McKernel processes always enable SVE. */
|
||||
header->flags = SVE_PT_REGS_SVE;
|
||||
|
||||
if (target->ctx.thread->sve_flags & SVE_PT_VL_INHERIT) {
|
||||
header->flags |= SVE_PT_VL_INHERIT;
|
||||
}
|
||||
|
||||
header->vl = target->ctx.thread->sve_vl;
|
||||
vq = sve_vq_from_vl(header->vl);
|
||||
|
||||
header->max_vl = sve_max_vl;
|
||||
header->size = SVE_PT_SIZE(vq, header->flags);
|
||||
header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
|
||||
SVE_PT_REGS_SVE);
|
||||
}
|
||||
|
||||
static unsigned int sve_size_from_header(struct user_sve_header const *header)
|
||||
{
|
||||
return ALIGN(header->size, SVE_VQ_BYTES);
|
||||
}
|
||||
|
||||
static unsigned int sve_get_size(struct thread *target,
|
||||
const struct user_regset *regset)
|
||||
{
|
||||
struct user_sve_header header;
|
||||
|
||||
/* Instead of system_supports_sve() */
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
sve_init_header_from_thread(&header, target);
|
||||
return sve_size_from_header(&header);
|
||||
}
|
||||
|
||||
/* read NT_ARM_SVE */
|
||||
static long sve_get(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
@ -646,23 +669,9 @@ static long sve_get(struct thread *target,
|
||||
}
|
||||
|
||||
/* Header */
|
||||
memset(&header, 0, sizeof(header));
|
||||
|
||||
header.vl = target->ctx.thread->sve_vl;
|
||||
|
||||
BUG_ON(!sve_vl_valid(header.vl));
|
||||
sve_init_header_from_thread(&header, target);
|
||||
vq = sve_vq_from_vl(header.vl);
|
||||
|
||||
BUG_ON(!sve_vl_valid(sve_max_vl));
|
||||
header.max_vl = sve_max_vl;
|
||||
|
||||
/* McKernel processes always enable SVE. */
|
||||
header.flags = SVE_PT_REGS_SVE;
|
||||
|
||||
header.size = SVE_PT_SIZE(vq, header.flags);
|
||||
header.max_size = SVE_PT_SIZE(sve_vq_from_vl(header.max_vl),
|
||||
SVE_PT_REGS_SVE);
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
|
||||
0, sizeof(header));
|
||||
if (ret) {
|
||||
@ -676,11 +685,9 @@ static long sve_get(struct thread *target,
|
||||
*/
|
||||
|
||||
/* Otherwise: full SVE case */
|
||||
|
||||
start = SVE_PT_SVE_OFFSET;
|
||||
end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
|
||||
|
||||
BUG_ON(end < start);
|
||||
BUG_ON(end - start > sve_state_size(target));
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
target->ctx.thread->sve_state,
|
||||
start, end);
|
||||
@ -690,24 +697,18 @@ static long sve_get(struct thread *target,
|
||||
|
||||
start = end;
|
||||
end = SVE_PT_SVE_FPSR_OFFSET(vq);
|
||||
|
||||
BUG_ON(end < start);
|
||||
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
start, end);
|
||||
if (ret) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy fpsr, and fpcr which must follow contiguously in
|
||||
* struct fpsimd_state:
|
||||
*/
|
||||
start = end;
|
||||
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
|
||||
|
||||
BUG_ON((char *)(&target->fp_regs->fpcr + 1) <
|
||||
(char *)&target->fp_regs->fpsr);
|
||||
BUG_ON(end < start);
|
||||
BUG_ON((char *)(&target->fp_regs->fpcr + 1) -
|
||||
(char *)&target->fp_regs->fpsr !=
|
||||
end - start);
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->fp_regs->fpsr,
|
||||
start, end);
|
||||
@ -716,9 +717,7 @@ static long sve_get(struct thread *target,
|
||||
}
|
||||
|
||||
start = end;
|
||||
end = (SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE) + 15) / 16 * 16;
|
||||
|
||||
BUG_ON(end < start);
|
||||
end = sve_size_from_header(&header);
|
||||
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
start, end);
|
||||
out:
|
||||
@ -762,13 +761,12 @@ static long sve_set(struct thread *target,
|
||||
* sve_set_vector_length(), which will also validate them for us:
|
||||
*/
|
||||
ret = sve_set_vector_length(target, header.vl,
|
||||
header.flags & ~SVE_PT_REGS_MASK);
|
||||
((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
|
||||
if (ret) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Actual VL set may be less than the user asked for: */
|
||||
BUG_ON(!sve_vl_valid(target->ctx.thread->sve_vl));
|
||||
vq = sve_vq_from_vl(target->ctx.thread->sve_vl);
|
||||
|
||||
/* Registers: FPSIMD-only case */
|
||||
@ -779,11 +777,19 @@ static long sve_set(struct thread *target,
|
||||
}
|
||||
|
||||
/* Otherwise: full SVE case */
|
||||
|
||||
/*
|
||||
* If setting a different VL from the requested VL and there is
|
||||
* register data, the data layout will be wrong: don't even
|
||||
* try to set the registers in this case.
|
||||
*/
|
||||
if (count && vq != sve_vq_from_vl(header.vl)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
start = SVE_PT_SVE_OFFSET;
|
||||
end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
|
||||
|
||||
BUG_ON(end < start);
|
||||
BUG_ON(end - start > sve_state_size(target));
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
target->ctx.thread->sve_state,
|
||||
start, end);
|
||||
@ -793,27 +799,21 @@ static long sve_set(struct thread *target,
|
||||
|
||||
start = end;
|
||||
end = SVE_PT_SVE_FPSR_OFFSET(vq);
|
||||
|
||||
BUG_ON(end < start);
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
start, end);
|
||||
if (ret) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy fpsr, and fpcr which must follow contiguously in
|
||||
* struct fpsimd_state:
|
||||
*/
|
||||
start = end;
|
||||
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
|
||||
|
||||
BUG_ON((char *)(&target->fp_regs->fpcr + 1) <
|
||||
(char *)&target->fp_regs->fpsr);
|
||||
BUG_ON(end < start);
|
||||
BUG_ON((char *)(&target->fp_regs->fpcr + 1) -
|
||||
(char *)&target->fp_regs->fpsr !=
|
||||
end - start);
|
||||
|
||||
user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->fp_regs->fpsr,
|
||||
start, end);
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->fp_regs->fpsr,
|
||||
start, end);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -825,8 +825,9 @@ static const struct user_regset aarch64_regsets[] = {
|
||||
.core_note_type = NT_PRSTATUS,
|
||||
.n = sizeof(struct user_pt_regs) / sizeof(uint64_t),
|
||||
.size = sizeof(uint64_t),
|
||||
.align = sizeof(uint64_t),
|
||||
.get = gpr_get,
|
||||
.set = gpr_set
|
||||
.set = gpr_set,
|
||||
},
|
||||
[REGSET_FPR] = {
|
||||
.core_note_type = NT_PRFPREG,
|
||||
@ -836,56 +837,75 @@ static const struct user_regset aarch64_regsets[] = {
|
||||
* fpcr are 32-bits wide.
|
||||
*/
|
||||
.size = sizeof(uint32_t),
|
||||
.align = sizeof(uint32_t),
|
||||
.get = fpr_get,
|
||||
.set = fpr_set
|
||||
.set = fpr_set,
|
||||
},
|
||||
[REGSET_TLS] = {
|
||||
.core_note_type = NT_ARM_TLS,
|
||||
.n = 1,
|
||||
.size = sizeof(void *),
|
||||
.align = sizeof(void *),
|
||||
.get = tls_get,
|
||||
.set = tls_set
|
||||
.set = tls_set,
|
||||
},
|
||||
[REGSET_HW_BREAK] = {
|
||||
.core_note_type = NT_ARM_HW_BREAK,
|
||||
.n = sizeof(struct user_hwdebug_state) / sizeof(uint32_t),
|
||||
.size = sizeof(uint32_t),
|
||||
.align = sizeof(uint32_t),
|
||||
.get = hw_break_get,
|
||||
.set = hw_break_set
|
||||
.set = hw_break_set,
|
||||
},
|
||||
[REGSET_HW_WATCH] = {
|
||||
.core_note_type = NT_ARM_HW_WATCH,
|
||||
.n = sizeof(struct user_hwdebug_state) / sizeof(uint32_t),
|
||||
.size = sizeof(uint32_t),
|
||||
.align = sizeof(uint32_t),
|
||||
.get = hw_break_get,
|
||||
.set = hw_break_set
|
||||
.set = hw_break_set,
|
||||
},
|
||||
[REGSET_SYSTEM_CALL] = {
|
||||
.core_note_type = NT_ARM_SYSTEM_CALL,
|
||||
.n = 1,
|
||||
.size = sizeof(int),
|
||||
.align = sizeof(int),
|
||||
.get = system_call_get,
|
||||
.set = system_call_set
|
||||
.set = system_call_set,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
[REGSET_SVE] = { /* Scalable Vector Extension */
|
||||
.core_note_type = NT_ARM_SVE,
|
||||
.n = (SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE) + 15) / 16,
|
||||
.size = 16,
|
||||
.n = (SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE) +
|
||||
(SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES,
|
||||
.size = SVE_VQ_BYTES,
|
||||
.align = SVE_VQ_BYTES,
|
||||
.get = sve_get,
|
||||
.set = sve_set
|
||||
.set = sve_set,
|
||||
.get_size = sve_get_size,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
};
|
||||
|
||||
static const struct user_regset *
|
||||
find_regset(const struct user_regset *regset, unsigned int type, int n)
|
||||
static const struct user_regset_view user_aarch64_view = {
|
||||
.name = "aarch64", .e_machine = EM_AARCH64,
|
||||
.regsets = aarch64_regsets,
|
||||
.n = sizeof(aarch64_regsets) / sizeof(aarch64_regsets[0])
|
||||
};
|
||||
|
||||
const struct user_regset_view *current_user_regset_view(void)
|
||||
{
|
||||
return &user_aarch64_view;
|
||||
}
|
||||
|
||||
const struct user_regset *find_regset(const struct user_regset_view *view,
|
||||
unsigned int type)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (regset[i].core_note_type == type) {
|
||||
return ®set[i];
|
||||
for (i = 0; i < view->n; i++) {
|
||||
if (view->regsets[i].core_note_type == type) {
|
||||
return &view->regsets[i];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
@ -894,8 +914,8 @@ find_regset(const struct user_regset *regset, unsigned int type, int n)
|
||||
static long ptrace_regset(struct thread *thread, int req, long type, struct iovec *iov)
|
||||
{
|
||||
long rc = -EINVAL;
|
||||
const struct user_regset *regset = find_regset(aarch64_regsets, type,
|
||||
sizeof(aarch64_regsets) / sizeof(aarch64_regsets[0]));
|
||||
const struct user_regset *regset =
|
||||
find_regset(&user_aarch64_view, type);
|
||||
|
||||
if (!regset) {
|
||||
kprintf("%s: not supported type 0x%x\n", __FUNCTION__, type);
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* syscall.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
/* syscall.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#include <cpulocal.h>
|
||||
#include <string.h>
|
||||
#include <kmalloc.h>
|
||||
@ -178,11 +178,10 @@ SYSCALL_DECLARE(prctl)
|
||||
|
||||
switch (option) {
|
||||
case PR_SVE_SET_VL:
|
||||
error = SVE_SET_VL(cpu_local_var(current),
|
||||
ihk_mc_syscall_arg1(ctx), ihk_mc_syscall_arg2(ctx));
|
||||
error = SVE_SET_VL(ihk_mc_syscall_arg1(ctx));
|
||||
break;
|
||||
case PR_SVE_GET_VL:
|
||||
error = SVE_GET_VL(cpu_local_var(current));
|
||||
error = SVE_GET_VL();
|
||||
break;
|
||||
case PR_SET_THP_DISABLE:
|
||||
if (arg3 || arg4 || arg5) {
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* coredump.c COPYRIGHT FUJITSU LIMITED 2018 */
|
||||
/* coredump.c COPYRIGHT FUJITSU LIMITED 2018-2019 */
|
||||
#include <process.h>
|
||||
#include <elfcore.h>
|
||||
|
||||
@ -55,3 +55,13 @@ void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread,
|
||||
|
||||
prstatus->pr_fpvalid = 0; /* We assume no fp */
|
||||
}
|
||||
|
||||
void arch_fill_thread_core_info(struct note *head,
|
||||
struct thread *thread, void *regs)
|
||||
{
|
||||
}
|
||||
|
||||
int arch_get_thread_core_info_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* arch-eclair.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
/* arch-eclair.h COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||
#ifndef HEADER_USER_ARM64_ECLAIR_H
|
||||
#define HEADER_USER_ARM64_ECLAIR_H
|
||||
|
||||
@ -50,7 +50,7 @@
|
||||
|
||||
#define ARCH_REGS 34
|
||||
|
||||
#define PANIC_REGS_OFFSET 160
|
||||
#define PANIC_REGS_OFFSET 168
|
||||
|
||||
struct arch_kregs {
|
||||
unsigned long x19, x20, x21, x22, x23;
|
||||
|
||||
@ -183,8 +183,8 @@ void fill_auxv(struct note *head, struct thread *thread, void *regs)
|
||||
|
||||
int get_note_size(void)
|
||||
{
|
||||
return get_prstatus_size() + get_prpsinfo_size()
|
||||
+ get_auxv_size();
|
||||
return get_prstatus_size() + arch_get_thread_core_info_size()
|
||||
+ get_prpsinfo_size() + get_auxv_size();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -199,8 +199,13 @@ void fill_note(void *note, struct thread *thread, void *regs)
|
||||
{
|
||||
fill_prstatus(note, thread, regs);
|
||||
note += get_prstatus_size();
|
||||
|
||||
arch_fill_thread_core_info(note, thread, regs);
|
||||
note += arch_get_thread_core_info_size();
|
||||
|
||||
fill_prpsinfo(note, thread, regs);
|
||||
note += get_prpsinfo_size();
|
||||
|
||||
fill_auxv(note, thread, regs);
|
||||
}
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
/* elfcore.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
/* elfcore.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#ifndef __HEADER_ELFCORE_H
|
||||
#define __HEADER_ELFCORE_H
|
||||
|
||||
@ -109,5 +109,8 @@ struct note {
|
||||
/* functions */
|
||||
struct thread;
|
||||
extern void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread, void *regs0);
|
||||
extern int arch_get_thread_core_info_size(void);
|
||||
extern void arch_fill_thread_core_info(struct note *head,
|
||||
struct thread *thread, void *regs);
|
||||
|
||||
#endif /* __HEADER_ELFCORE_H */
|
||||
|
||||
Reference in New Issue
Block a user