This includes the following fixes: * fix build of arch/arm64/kernel/vdso Change-Id: I73b05034d29f7f8731ac17f9736edbba4fb2c639
404 lines
12 KiB
C
404 lines
12 KiB
C
/* sysreg.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
|
/*
|
|
* Macros for accessing system registers with older binutils.
|
|
*
|
|
* Copyright (C) 2014 ARM Ltd.
|
|
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
|
*
|
|
* This program is free software: you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#ifndef __ASM_SYSREG_H
|
|
#define __ASM_SYSREG_H
|
|
|
|
#include <types.h>
|
|
#include <stringify.h>
|
|
#include <ihk/types.h>
|
|
|
|
/*
|
|
* ARMv8 ARM reserves the following encoding for system registers:
|
|
* (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
|
|
* C5.2, version:ARM DDI 0487A.f)
|
|
* [20-19] : Op0
|
|
* [18-16] : Op1
|
|
* [15-12] : CRn
|
|
* [11-8] : CRm
|
|
* [7-5] : Op2
|
|
*/
|
|
#define Op0_shift 19
|
|
#define Op0_mask 0x3
|
|
#define Op1_shift 16
|
|
#define Op1_mask 0x7
|
|
#define CRn_shift 12
|
|
#define CRn_mask 0xf
|
|
#define CRm_shift 8
|
|
#define CRm_mask 0xf
|
|
#define Op2_shift 5
|
|
#define Op2_mask 0x7
|
|
|
|
#define sys_reg(op0, op1, crn, crm, op2) \
|
|
(((op0) << Op0_shift) | ((op1) << Op1_shift) | \
|
|
((crn) << CRn_shift) | ((crm) << CRm_shift) | \
|
|
((op2) << Op2_shift))
|
|
|
|
#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask)
|
|
#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask)
|
|
#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask)
|
|
#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask)
|
|
#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask)
|
|
|
|
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
|
|
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
|
|
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
|
|
|
|
#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
|
|
#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
|
|
#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
|
|
#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
|
|
#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
|
|
#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
|
|
#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7)
|
|
|
|
#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0)
|
|
#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1)
|
|
#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2)
|
|
#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3)
|
|
#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
|
|
#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
|
|
#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
|
|
|
|
#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
|
|
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
|
|
#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
|
|
|
|
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
|
|
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
|
|
#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
|
|
|
|
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
|
|
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
|
|
|
|
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
|
|
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
|
|
|
|
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
|
|
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
|
|
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
|
|
|
|
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
|
|
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
|
|
|
|
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
|
|
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
|
|
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
|
|
|
|
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
|
|
#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
|
|
|
|
/*
|
|
#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM | \
|
|
(!!x)<<8 | 0x1f)
|
|
#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM | \
|
|
(!!x)<<8 | 0x1f)
|
|
*/
|
|
|
|
/* Common SCTLR_ELx flags. */
|
|
#define SCTLR_ELx_EE (1 << 25)
|
|
#define SCTLR_ELx_I (1 << 12)
|
|
#define SCTLR_ELx_SA (1 << 3)
|
|
#define SCTLR_ELx_C (1 << 2)
|
|
#define SCTLR_ELx_A (1 << 1)
|
|
#define SCTLR_ELx_M 1
|
|
|
|
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
|
SCTLR_ELx_SA | SCTLR_ELx_I)
|
|
|
|
/* SCTLR_EL1 specific flags. */
|
|
#define SCTLR_EL1_UCI (1 << 26)
|
|
#define SCTLR_EL1_SPAN (1 << 23)
|
|
#define SCTLR_EL1_UCT (1 << 15)
|
|
#define SCTLR_EL1_SED (1 << 8)
|
|
#define SCTLR_EL1_CP15BEN (1 << 5)
|
|
|
|
/* id_aa64isar0 */
|
|
#define ID_AA64ISAR0_RDM_SHIFT 28
|
|
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
|
|
#define ID_AA64ISAR0_CRC32_SHIFT 16
|
|
#define ID_AA64ISAR0_SHA2_SHIFT 12
|
|
#define ID_AA64ISAR0_SHA1_SHIFT 8
|
|
#define ID_AA64ISAR0_AES_SHIFT 4
|
|
|
|
/* id_aa64isar1 */
|
|
#define ID_AA64ISAR1_LRCPC_SHIFT 20
|
|
#define ID_AA64ISAR1_FCMA_SHIFT 16
|
|
#define ID_AA64ISAR1_JSCVT_SHIFT 12
|
|
#define ID_AA64ISAR1_DPB_SHIFT 0
|
|
|
|
/* id_aa64pfr0 */
|
|
#define ID_AA64PFR0_SVE_SHIFT 32
|
|
#define ID_AA64PFR0_GIC_SHIFT 24
|
|
#define ID_AA64PFR0_ASIMD_SHIFT 20
|
|
#define ID_AA64PFR0_FP_SHIFT 16
|
|
#define ID_AA64PFR0_EL3_SHIFT 12
|
|
#define ID_AA64PFR0_EL2_SHIFT 8
|
|
#define ID_AA64PFR0_EL1_SHIFT 4
|
|
#define ID_AA64PFR0_EL0_SHIFT 0
|
|
|
|
#define ID_AA64PFR0_SVE 0x1
|
|
#define ID_AA64PFR0_FP_NI 0xf
|
|
#define ID_AA64PFR0_FP_SUPPORTED 0x0
|
|
#define ID_AA64PFR0_ASIMD_NI 0xf
|
|
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
|
|
#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1
|
|
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
|
|
#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
|
|
|
|
/* id_aa64mmfr0 */
|
|
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
|
|
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
|
|
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
|
|
#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
|
|
#define ID_AA64MMFR0_SNSMEM_SHIFT 12
|
|
#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
|
|
#define ID_AA64MMFR0_ASID_SHIFT 4
|
|
#define ID_AA64MMFR0_PARANGE_SHIFT 0
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_NI 0xf
|
|
#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
|
|
#define ID_AA64MMFR0_TGRAN64_NI 0xf
|
|
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
|
|
#define ID_AA64MMFR0_TGRAN16_NI 0x0
|
|
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
|
|
#define ID_AA64MMFR0_PARANGE_48 0x5
|
|
#define ID_AA64MMFR0_PARANGE_52 0x6
|
|
|
|
#ifdef CONFIG_ARM64_PA_BITS_52
|
|
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
|
|
#else
|
|
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
|
|
#endif
|
|
|
|
/* id_aa64mmfr1 */
|
|
#define ID_AA64MMFR1_PAN_SHIFT 20
|
|
#define ID_AA64MMFR1_LOR_SHIFT 16
|
|
#define ID_AA64MMFR1_HPD_SHIFT 12
|
|
#define ID_AA64MMFR1_VHE_SHIFT 8
|
|
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
|
|
#define ID_AA64MMFR1_HADBS_SHIFT 0
|
|
|
|
#define ID_AA64MMFR1_VMIDBITS_8 0
|
|
#define ID_AA64MMFR1_VMIDBITS_16 2
|
|
|
|
/* id_aa64mmfr2 */
|
|
#define ID_AA64MMFR2_LVA_SHIFT 16
|
|
#define ID_AA64MMFR2_IESB_SHIFT 12
|
|
#define ID_AA64MMFR2_LSM_SHIFT 8
|
|
#define ID_AA64MMFR2_UAO_SHIFT 4
|
|
#define ID_AA64MMFR2_CNP_SHIFT 0
|
|
|
|
/* id_aa64dfr0 */
|
|
#define ID_AA64DFR0_PMSVER_SHIFT 32
|
|
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
|
|
#define ID_AA64DFR0_WRPS_SHIFT 20
|
|
#define ID_AA64DFR0_BRPS_SHIFT 12
|
|
#define ID_AA64DFR0_PMUVER_SHIFT 8
|
|
#define ID_AA64DFR0_TRACEVER_SHIFT 4
|
|
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
|
|
|
|
#define ID_ISAR5_RDM_SHIFT 24
|
|
#define ID_ISAR5_CRC32_SHIFT 16
|
|
#define ID_ISAR5_SHA2_SHIFT 12
|
|
#define ID_ISAR5_SHA1_SHIFT 8
|
|
#define ID_ISAR5_AES_SHIFT 4
|
|
#define ID_ISAR5_SEVL_SHIFT 0
|
|
|
|
#define MVFR0_FPROUND_SHIFT 28
|
|
#define MVFR0_FPSHVEC_SHIFT 24
|
|
#define MVFR0_FPSQRT_SHIFT 20
|
|
#define MVFR0_FPDIVIDE_SHIFT 16
|
|
#define MVFR0_FPTRAP_SHIFT 12
|
|
#define MVFR0_FPDP_SHIFT 8
|
|
#define MVFR0_FPSP_SHIFT 4
|
|
#define MVFR0_SIMD_SHIFT 0
|
|
|
|
#define MVFR1_SIMDFMAC_SHIFT 28
|
|
#define MVFR1_FPHP_SHIFT 24
|
|
#define MVFR1_SIMDHP_SHIFT 20
|
|
#define MVFR1_SIMDSP_SHIFT 16
|
|
#define MVFR1_SIMDINT_SHIFT 12
|
|
#define MVFR1_SIMDLS_SHIFT 8
|
|
#define MVFR1_FPDNAN_SHIFT 4
|
|
#define MVFR1_FPFTZ_SHIFT 0
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
|
|
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
|
|
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
|
|
|
|
#define ID_AA64MMFR0_TGRAN4_NI 0xf
|
|
#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
|
|
#define ID_AA64MMFR0_TGRAN64_NI 0xf
|
|
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
|
|
#define ID_AA64MMFR0_TGRAN16_NI 0x0
|
|
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
|
|
|
|
#if defined(CONFIG_ARM64_4K_PAGES)
|
|
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
|
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
|
|
#elif defined(CONFIG_ARM64_16K_PAGES)
|
|
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
|
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
|
|
#elif defined(CONFIG_ARM64_64K_PAGES)
|
|
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
|
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
|
|
#endif
|
|
|
|
#define ZCR_EL1_LEN_SHIFT 0
|
|
#define ZCR_EL1_LEN_SIZE 9
|
|
#define ZCR_EL1_LEN_MASK 0x1ff
|
|
|
|
#define CPACR_EL1_ZEN_EL1EN (1 << 16)
|
|
#define CPACR_EL1_ZEN_EL0EN (1 << 17)
|
|
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
|
|
|
|
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
|
|
#define SYS_MPIDR_SAFE_VAL (1UL << 31)
|
|
|
|
/* SYS_MIDR_EL1 */
|
|
//mask
|
|
#define SYS_MIDR_EL1_IMPLEMENTER_MASK (0xFFUL)
|
|
#define SYS_MIDR_EL1_PPNUM_MASK (0xFFFUL)
|
|
//shift
|
|
#define SYS_MIDR_EL1_IMPLEMENTER_SHIFT (24)
|
|
#define SYS_MIDR_EL1_PPNUM_SHIFT (0x4)
|
|
//val
|
|
#define SYS_MIDR_EL1_IMPLEMENTER_FJ (0x46)
|
|
#define SYS_MIDR_EL1_PPNUM_TCHIP (0x1)
|
|
|
|
#define READ_ACCESS (0)
|
|
#define WRITE_ACCESS (1)
|
|
#define ACCESS_REG_FUNC(name, reg) \
|
|
static void xos_access_##name(uint8_t flag, uint64_t *reg_value) \
|
|
{ \
|
|
if (flag == READ_ACCESS) { \
|
|
__asm__ __volatile__("mrs_s %0," __stringify(reg) "\n\t" \
|
|
:"=&r"(*reg_value)::); \
|
|
} \
|
|
else if (flag == WRITE_ACCESS) { \
|
|
__asm__ __volatile__("msr_s" __stringify(reg) ", %0\n\t" \
|
|
::"r"(*reg_value):); \
|
|
} else { \
|
|
; \
|
|
} \
|
|
}
|
|
|
|
#define XOS_FALSE (0)
|
|
#define XOS_TRUE (1)
|
|
|
|
#ifdef __ASSEMBLY__
|
|
#define __emit_inst(x).inst (x)
|
|
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
|
|
.equ .L__reg_num_x\num, \num
|
|
.endr
|
|
.equ .L__reg_num_xzr, 31
|
|
|
|
.macro mrs_s, rt, sreg
|
|
__emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
|
|
.endm
|
|
|
|
.macro msr_s, sreg, rt
|
|
__emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
|
|
.endm
|
|
|
|
#else
|
|
#define __emit_inst(x)".inst " __stringify((x)) "\n\t"
|
|
asm(
|
|
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
|
|
" .equ .L__reg_num_x\\num, \\num\n"
|
|
" .endr\n"
|
|
" .equ .L__reg_num_xzr, 31\n"
|
|
"\n"
|
|
" .macro mrs_s, rt, sreg\n"
|
|
__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
|
|
" .endm\n"
|
|
"\n"
|
|
" .macro msr_s, sreg, rt\n"
|
|
__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
|
|
" .endm\n"
|
|
);
|
|
|
|
ACCESS_REG_FUNC(midr_el1, SYS_MIDR_EL1);
|
|
static int xos_is_tchip(void)
|
|
{
|
|
uint64_t reg = 0;
|
|
int ret = 0, impl = 0, part = 0;
|
|
|
|
xos_access_midr_el1(READ_ACCESS, ®);
|
|
|
|
impl = (reg >> SYS_MIDR_EL1_IMPLEMENTER_SHIFT) &
|
|
SYS_MIDR_EL1_IMPLEMENTER_MASK;
|
|
part = (reg >> SYS_MIDR_EL1_PPNUM_SHIFT) & SYS_MIDR_EL1_PPNUM_MASK;
|
|
|
|
if ((impl == SYS_MIDR_EL1_IMPLEMENTER_FJ) &&
|
|
(part == SYS_MIDR_EL1_PPNUM_TCHIP)) {
|
|
ret = XOS_TRUE;
|
|
}
|
|
else {
|
|
ret = XOS_FALSE;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Unlike read_cpuid, calls to read_sysreg are never expected to be
|
|
* optimized away or replaced with synthetic values.
|
|
*/
|
|
#define read_sysreg(r) ({ \
|
|
uint64_t __val; \
|
|
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
|
|
__val; \
|
|
})
|
|
|
|
/*
|
|
* The "Z" constraint normally means a zero immediate, but when combined with
|
|
* the "%x0" template means XZR.
|
|
*/
|
|
#define write_sysreg(v, r) do { \
|
|
uint64_t __val = (uint64_t)v; \
|
|
asm volatile("msr " __stringify(r) ", %x0" \
|
|
: : "rZ" (__val)); \
|
|
} while (0)
|
|
|
|
/*
|
|
* For registers without architectural names, or simply unsupported by
|
|
* GAS.
|
|
*/
|
|
#define read_sysreg_s(r) ({ \
|
|
uint64_t __val; \
|
|
asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \
|
|
__val; \
|
|
})
|
|
|
|
#define write_sysreg_s(v, r) do { \
|
|
uint64_t __val = (uint64_t)v; \
|
|
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
|
|
} while (0)
|
|
|
|
/* @ref.impl arch/arm64/include/asm/kvm_arm.h */
|
|
#define CPTR_EL2_TZ (1 << 8)
|
|
|
|
#include "imp-sysreg.h"
|
|
|
|
#endif /* __ASM_SYSREG_H */
|