Files
mckernel/arch/arm64/kernel/irq-gic-v3.c
Dominique Martinet 3185334c1c debug messages: implement dynamic debug
Heavily inspired off linux kernel's dynamic debug:
 * add a /sys/kernel/debug/dynamic_debug/control file
 (accessible from linux side in /sys/class/mcos/mcos0/sys/kernel/debug/dynamic_debug/control)
 * read from file to list debug statements (currently limited to 4k in size)
 * write to file with '[file foo ][func bar ][line [x][-[y]]] [+-]p' to change values

Side effects:
 * reindented all linker scripts, there is a new __verbose section
 * added string function strpbrk

Change-Id: I36d7707274dcc3ecaf200075a31a2f0f76021059
2018-07-26 14:16:31 +09:00

415 lines
11 KiB
C

/* irq-gic-v3.c COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <irq.h>
#include <arm-gic-v2.h>
#include <arm-gic-v3.h>
#include <io.h>
#include <cputype.h>
#include <process.h>
#include <syscall.h>
#include <debug.h>
//#define DEBUG_GICV3
#define USE_CAVIUM_THUNDER_X
#ifdef DEBUG_GICV3
#undef DDEBUG_DEFAULT
#define DDEBUG_DEFAULT DDEBUG_PRINT
#endif
#ifdef USE_CAVIUM_THUNDER_X
static char is_cavium_thunderx = 0;
#endif
void *dist_base;
void *rdist_base[NR_CPUS];
extern uint64_t ihk_param_cpu_logical_map;
static uint64_t *__cpu_logical_map = &ihk_param_cpu_logical_map;
extern uint64_t ihk_param_gic_rdist_base_pa[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/* Our default, arbitrary priority value. Linux only uses one anyway. */
#define DEFAULT_PMR_VALUE 0xf0
/**
* Low level accessors
* @ref.impl host-kernel/drivers/irqchip/irq-gic-v3.c
*/
static uint64_t gic_read_iar_common(void)
{
uint64_t irqstat;
#ifdef CONFIG_HAS_NMI
uint64_t daif;
uint64_t pmr;
uint64_t default_pmr_value = DEFAULT_PMR_VALUE;
/*
* The PMR may be configured to mask interrupts when this code is
* called, thus in order to acknowledge interrupts we must set the
* PMR to its default value before reading from the IAR.
*
* To do this without taking an interrupt we also ensure the I bit
* is set whilst we are interfering with the value of the PMR.
*/
asm volatile(
"mrs %1, daif\n\t" /* save I bit */
"msr daifset, #2\n\t" /* set I bit */
"mrs_s %2, " __stringify(ICC_PMR_EL1) "\n\t" /* save PMR */
"msr_s " __stringify(ICC_PMR_EL1) ",%3\n\t" /* set PMR */
"mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t" /* ack int */
"msr_s " __stringify(ICC_PMR_EL1) ",%2\n\t" /* restore PMR */
"isb\n\t"
"msr daif, %1" /* restore I */
: "=r" (irqstat), "=&r" (daif), "=&r" (pmr)
: "r" (default_pmr_value));
#else /* CONFIG_HAS_NMI */
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
#endif /* CONFIG_HAS_NMI */
return irqstat;
}
#ifdef USE_CAVIUM_THUNDER_X
/* Cavium ThunderX erratum 23154 */
static uint64_t gic_read_iar_cavium_thunderx(void)
{
uint64_t irqstat;
#ifdef CONFIG_HAS_NMI
uint64_t daif;
uint64_t pmr;
uint64_t default_pmr_value = DEFAULT_PMR_VALUE;
/*
* The PMR may be configured to mask interrupts when this code is
* called, thus in order to acknowledge interrupts we must set the
* PMR to its default value before reading from the IAR.
*
* To do this without taking an interrupt we also ensure the I bit
* is set whilst we are interfering with the value of the PMR.
*/
asm volatile(
"mrs %1, daif\n\t" /* save I bit */
"msr daifset, #2\n\t" /* set I bit */
"mrs_s %2, " __stringify(ICC_PMR_EL1) "\n\t" /* save PMR */
"msr_s " __stringify(ICC_PMR_EL1) ",%3\n\t" /* set PMR */
"nop;nop;nop;nop\n\t"
"nop;nop;nop;nop\n\t"
"mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t" /* ack int */
"nop;nop;nop;nop\n\t"
"msr_s " __stringify(ICC_PMR_EL1) ",%2\n\t" /* restore PMR */
"isb\n\t"
"msr daif, %1" /* restore I */
: "=r" (irqstat), "=&r" (daif), "=&r" (pmr)
: "r" (default_pmr_value));
#else /* CONFIG_HAS_NMI */
asm volatile("nop;nop;nop;nop;");
asm volatile("nop;nop;nop;nop;");
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
asm volatile("nop;nop;nop;nop;");
#endif /* CONFIG_HAS_NMI */
mb();
return irqstat;
}
#endif
static uint64_t gic_read_iar(void)
{
#ifdef USE_CAVIUM_THUNDER_X
if (is_cavium_thunderx)
return gic_read_iar_cavium_thunderx();
else
#endif
return gic_read_iar_common();
}
static void gic_write_pmr(uint64_t val)
{
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
}
static void gic_write_ctlr(uint64_t val)
{
asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
isb();
}
static void gic_write_grpen1(uint64_t val)
{
asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
isb();
}
static inline void gic_write_eoir(uint64_t irq)
{
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
isb();
}
static void gic_write_sgi1r(uint64_t val)
{
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
}
static inline uint32_t gic_read_sre(void)
{
uint64_t val;
asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
return val;
}
static inline void gic_write_sre(uint32_t val)
{
asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((uint64_t)val));
isb();
}
static uint32_t gic_enable_sre(void)
{
uint32_t val;
val = gic_read_sre();
if (val & ICC_SRE_EL1_SRE)
return 1; /*ok*/
val |= ICC_SRE_EL1_SRE;
gic_write_sre(val);
val = gic_read_sre();
return !!(val & ICC_SRE_EL1_SRE);
}
#ifdef CONFIG_HAS_NMI
static inline void gic_write_bpr1(uint32_t val)
{
asm volatile("msr_s " __stringify(ICC_BPR1_EL1) ", %0" : : "r" (val));
}
#endif
static void arm64_raise_sgi_gicv3(uint32_t cpuid, uint32_t vector)
{
uint64_t mpidr, cluster_id;
uint16_t tlist;
uint64_t val;
/* Build interrupt destination of the target cpu */
uint32_t hw_cpuid = ihk_mc_get_cpu_info()->hw_ids[cpuid];
/*
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
smp_wmb();
mpidr = cpu_logical_map(hw_cpuid);
if((mpidr & 0xffUL) < 16) {
cluster_id = cpu_logical_map(hw_cpuid) & ~0xffUL;
tlist = (uint16_t)(1 << (mpidr & 0xf));
#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
<< ICC_SGI1R_AFFINITY_## level ##_SHIFT)
val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
vector << ICC_SGI1R_SGI_ID_SHIFT |
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
dkprintf("CPU%d: ICC_SGI1R_EL1 %llx\n", ihk_mc_get_processor_id(), val);
gic_write_sgi1r(val);
/* Force the above writes to ICC_SGI1R_EL1 to be executed */
isb();
} else {
/*
* If we ever get a cluster of more than 16 CPUs, just
* scream and skip that CPU.
*/
ekprintf("GICv3 can't send SGI for TargetList=%d\n", (mpidr & 0xffUL));
}
}
static void arm64_raise_spi_gicv3(uint32_t cpuid, uint32_t vector)
{
uint64_t spi_reg_offset;
uint32_t spi_set_pending_bitpos;
/**
* calculates register offset and bit position corresponding to the numbers.
*
* For interrupt vector m,
* - the corresponding GICD_ISPENDR number, n, is given by n = m / 32
* - the offset of the required GICD_ISPENDR is (0x200 + (4*n))
* - the bit number of the required Set-pending bit in this register is m % 32.
*/
spi_reg_offset = vector / 32 * 4;
spi_set_pending_bitpos = vector % 32;
/* write to GICD_ISPENDR */
writel_relaxed(
1 << spi_set_pending_bitpos,
(void *)(dist_base + GICD_ISPENDR + spi_reg_offset)
);
}
static void arm64_raise_lpi_gicv3(uint32_t cpuid, uint32_t vector)
{
// @todo.impl
}
void arm64_issue_ipi_gicv3(uint32_t cpuid, uint32_t vector)
{
dkprintf("Send irq#%d to cpuid=%d\n", vector, cpuid);
barrier();
if(vector < 16){
// send SGI
arm64_raise_sgi_gicv3(cpuid, vector);
} else if (32 <= vector && vector < 1020) {
// send SPI (allow only to host)
arm64_raise_spi_gicv3(cpuid, vector);
} else if (8192 <= vector) {
// send LPI (allow only to host)
arm64_raise_lpi_gicv3(cpuid, vector);
} else {
ekprintf("#%d is bad irq number.", vector);
}
}
extern int interrupt_from_user(void *);
void handle_interrupt_gicv3(struct pt_regs *regs)
{
uint64_t irqnr;
irqnr = gic_read_iar();
cpu_enable_nmi();
set_cputime(interrupt_from_user(regs)? 1: 2);
while (irqnr != ICC_IAR1_EL1_SPURIOUS) {
if ((irqnr < 1020) || (irqnr >= 8192)) {
gic_write_eoir(irqnr);
handle_IPI(irqnr, regs);
}
irqnr = gic_read_iar();
}
set_cputime(0);
}
void gic_dist_init_gicv3(unsigned long dist_base_pa, unsigned long size)
{
dist_base = map_fixed_area(dist_base_pa, size, 1 /*non chachable*/);
#ifdef USE_CAVIUM_THUNDER_X
/* Cavium ThunderX erratum 23154 */
if (MIDR_IMPLEMENTOR(read_cpuid_id()) == ARM_CPU_IMP_CAVIUM) {
is_cavium_thunderx = 1;
}
#endif
}
void gic_cpu_init_gicv3(unsigned long cpu_base_pa, unsigned long size)
{
int32_t cpuid, hw_cpuid;
struct ihk_mc_cpu_info *cpu_info = ihk_mc_get_cpu_info();
for(cpuid = 0; cpuid < cpu_info->ncpus; cpuid++) {
hw_cpuid = cpu_info->hw_ids[cpuid];
if(ihk_param_gic_rdist_base_pa[hw_cpuid] != 0) {
rdist_base[hw_cpuid] =
map_fixed_area(ihk_param_gic_rdist_base_pa[hw_cpuid], size, 1 /*non chachable*/);
}
}
}
static void gic_do_wait_for_rwp(void *base)
{
uint32_t count = 1000000; /* 1s! */
while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
count--;
if (!count) {
ekprintf("RWP timeout, gone fishing\n");
return;
}
cpu_pause();
};
}
void gic_enable_gicv3(void)
{
void *rbase = rdist_base[ihk_mc_get_hardware_processor_id()];
void *rd_sgi_base = rbase + 0x10000 /* SZ_64K */;
int i;
unsigned int enable_ppi_sgi = GICD_INT_EN_SET_SGI;
if (is_use_virt_timer()) {
enable_ppi_sgi |= GICD_ENABLE << get_virt_timer_intrid();
} else {
enable_ppi_sgi |= GICD_ENABLE << get_phys_timer_intrid();
}
/*
* Deal with the banked PPI and SGI interrupts - disable all
* PPI interrupts, ensure all SGI interrupts are enabled.
*/
writel_relaxed(~enable_ppi_sgi, rd_sgi_base + GIC_DIST_ENABLE_CLEAR);
writel_relaxed(enable_ppi_sgi, rd_sgi_base + GIC_DIST_ENABLE_SET);
/*
* Set priority on PPI and SGI interrupts
*/
for (i = 0; i < 32; i += 4)
writel_relaxed(GICD_INT_DEF_PRI_X4,
rd_sgi_base + GIC_DIST_PRI + i * 4 / 4);
/* sync wait */
gic_do_wait_for_rwp(rbase);
/*
* Need to check that the SRE bit has actually been set. If
* not, it means that SRE is disabled at EL2. We're going to
* die painfully, and there is nothing we can do about it.
*
* Kindly inform the luser.
*/
if (!gic_enable_sre())
panic("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
#ifndef CONFIG_HAS_NMI
/* Set priority mask register */
gic_write_pmr(DEFAULT_PMR_VALUE);
#endif
/* EOI deactivates interrupt too (mode 0) */
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
/* ... and let's hit the road... */
gic_write_grpen1(1);
#ifdef CONFIG_HAS_NMI
/*
* Some firmwares hand over to the kernel with the BPR changed from
* its reset value (and with a value large enough to prevent
* any pre-emptive interrupts from working at all). Writing a zero
* to BPR restores is reset value.
*/
gic_write_bpr1(0);
/* Set specific IPI to NMI */
writeb_relaxed(GICD_INT_NMI_PRI, rd_sgi_base + GIC_DIST_PRI + INTRID_CPU_STOP);
writeb_relaxed(GICD_INT_NMI_PRI, rd_sgi_base + GIC_DIST_PRI + INTRID_MEMDUMP);
writeb_relaxed(GICD_INT_NMI_PRI, rd_sgi_base + GIC_DIST_PRI + INTRID_STACK_TRACE);
/* sync wait */
gic_do_wait_for_rwp(rbase);
#endif /* CONFIG_HAS_NMI */
}