RISC-V: Add mechanism to provide custom IPI operations

We add mechanism to set custom IPI operations so that CLINT driver
from drivers directory can provide custom IPI operations.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
Tested-by: Emil Renner Berhing <kernel@esmil.dk>
Reviewed-by: Atish Patra <atish.patra@wdc.com>
Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
This commit is contained in:
Anup Patel 2020-08-17 18:12:48 +05:30 committed by Palmer Dabbelt
parent 9123e3a74e
commit cc7f3f72dc
No known key found for this signature in database
GPG Key ID: 2E1319F35FBB1889
6 changed files with 79 additions and 48 deletions

View File

@ -6,34 +6,9 @@
#include <linux/smp.h>
#ifdef CONFIG_RISCV_M_MODE
extern u32 __iomem *clint_ipi_base;
void clint_init_boot_cpu(void);
static inline void clint_send_ipi_single(unsigned long hartid)
{
writel(1, clint_ipi_base + hartid);
}
static inline void clint_send_ipi_mask(const struct cpumask *mask)
{
int cpu;
for_each_cpu(cpu, mask)
clint_send_ipi_single(cpuid_to_hartid_map(cpu));
}
static inline void clint_clear_ipi(unsigned long hartid)
{
writel(0, clint_ipi_base + hartid);
}
#else /* CONFIG_RISCV_M_MODE */
#define clint_init_boot_cpu() do { } while (0)
/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
void clint_send_ipi_single(unsigned long hartid);
void clint_send_ipi_mask(const struct cpumask *hartid_mask);
void clint_clear_ipi(unsigned long hartid);
#endif /* CONFIG_RISCV_M_MODE */
#endif /* _ASM_RISCV_CLINT_H */

View File

@ -15,6 +15,11 @@
struct seq_file;
extern unsigned long boot_cpu_hartid;
struct riscv_ipi_ops {
void (*ipi_inject)(const struct cpumask *target);
void (*ipi_clear)(void);
};
#ifdef CONFIG_SMP
/*
* Mapping between linux logical cpu index and hartid.
@ -40,6 +45,12 @@ void arch_send_call_function_single_ipi(int cpu);
int riscv_hartid_to_cpuid(int hartid);
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
/* Set custom IPI operations */
void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
/* Clear IPI for current CPU */
void riscv_clear_ipi(void);
/* Secondary hart entry */
asmlinkage void smp_callin(void);
@ -81,6 +92,14 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
cpumask_set_cpu(boot_cpu_hartid, out);
}
static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
{
}
static inline void riscv_clear_ipi(void)
{
}
#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)

View File

@ -5,11 +5,11 @@
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <asm/clint.h>
#include <asm/csr.h>
#include <asm/timex.h>
#include <asm/smp.h>
/*
* This is the layout used by the SiFive clint, which is also shared by the qemu
@ -21,6 +21,24 @@
u32 __iomem *clint_ipi_base;
static void clint_send_ipi(const struct cpumask *target)
{
unsigned int cpu;
for_each_cpu(cpu, target)
writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
}
static void clint_clear_ipi(void)
{
writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
}
static struct riscv_ipi_ops clint_ipi_ops = {
.ipi_inject = clint_send_ipi,
.ipi_clear = clint_clear_ipi,
};
void clint_init_boot_cpu(void)
{
struct device_node *np;
@ -40,5 +58,6 @@ void clint_init_boot_cpu(void)
riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
riscv_time_val = base + CLINT_TIME_VAL_OFF;
clint_clear_ipi(boot_cpu_hartid);
clint_clear_ipi();
riscv_set_ipi_ops(&clint_ipi_ops);
}

View File

@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
}
static void sbi_send_cpumask_ipi(const struct cpumask *target)
{
struct cpumask hartid_mask;
riscv_cpuid_to_hartid_mask(target, &hartid_mask);
sbi_send_ipi(cpumask_bits(&hartid_mask));
}
static struct riscv_ipi_ops sbi_ipi_ops = {
.ipi_inject = sbi_send_cpumask_ipi
};
int __init sbi_init(void)
{
@ -587,5 +599,7 @@ int __init sbi_init(void)
__sbi_rfence = __sbi_rfence_v01;
}
riscv_set_ipi_ops(&sbi_ipi_ops);
return 0;
}

View File

@ -86,9 +86,25 @@ static void ipi_stop(void)
wait_for_interrupt();
}
static struct riscv_ipi_ops *ipi_ops;
void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
{
ipi_ops = ops;
}
EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
void riscv_clear_ipi(void)
{
if (ipi_ops && ipi_ops->ipi_clear)
ipi_ops->ipi_clear();
csr_clear(CSR_IP, IE_SIE);
}
EXPORT_SYMBOL_GPL(riscv_clear_ipi);
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{
struct cpumask hartid_mask;
int cpu;
smp_mb__before_atomic();
@ -96,33 +112,22 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_send_ipi(cpumask_bits(&hartid_mask));
if (ipi_ops && ipi_ops->ipi_inject)
ipi_ops->ipi_inject(mask);
else
clint_send_ipi_mask(mask);
pr_warn("SMP: IPI inject method not available\n");
}
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
int hartid = cpuid_to_hartid_map(cpu);
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
if (ipi_ops && ipi_ops->ipi_inject)
ipi_ops->ipi_inject(cpumask_of(cpu));
else
clint_send_ipi_single(hartid);
}
static inline void clear_ipi(void)
{
if (IS_ENABLED(CONFIG_RISCV_SBI))
csr_clear(CSR_IP, IE_SIE);
else
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
pr_warn("SMP: IPI inject method not available\n");
}
#ifdef CONFIG_IRQ_WORK
@ -140,7 +145,7 @@ void handle_IPI(struct pt_regs *regs)
irq_enter();
clear_ipi();
riscv_clear_ipi();
while (true) {
unsigned long ops;

View File

@ -147,8 +147,7 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
if (!IS_ENABLED(CONFIG_RISCV_SBI))
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
riscv_clear_ipi();
/* All kernel threads share the same mm context. */
mmgrab(mm);