2019-05-27 06:55:21 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-07-11 01:00:26 +00:00
|
|
|
/*
|
|
|
|
* SMP initialisation and IPI support
|
|
|
|
* Based on arch/arm64/kernel/smp.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
* Copyright (C) 2015 Regents of the University of California
|
|
|
|
* Copyright (C) 2017 SiFive
|
|
|
|
*/
|
|
|
|
|
2019-06-27 19:53:00 +00:00
|
|
|
#include <linux/arch_topology.h>
|
2017-07-11 01:00:26 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/sched/task_stack.h>
|
2018-10-02 19:15:02 +00:00
|
|
|
#include <linux/sched/mm.h>
|
2020-03-18 01:11:40 +00:00
|
|
|
#include <asm/cpu_ops.h>
|
2017-07-11 01:00:26 +00:00
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/mmu_context.h>
|
2020-11-19 00:38:29 +00:00
|
|
|
#include <asm/numa.h>
|
2017-07-11 01:00:26 +00:00
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/sbi.h>
|
2019-10-17 22:21:28 +00:00
|
|
|
#include <asm/smp.h>
|
riscv: Introduce alternative mechanism to apply errata solution
Introduce the "alternative" mechanism from ARM64 and x86 to apply the CPU
vendors' errata solution at runtime. The main purpose of this patch is
to provide a framework. Therefore, the implementation is quite basic for
now so that some scenarios could not use this schemei, such as patching
code to a module, relocating the patching code and heterogeneous CPU
topology.
Users could use the macro ALTERNATIVE to apply an errata to the existing
code flow. In the macro ALTERNATIVE, users need to specify the manufacturer
information(vendorid, archid, and impid) for this errata. Therefore, kernel
will know this errata is suitable for which CPU core. During the booting
procedure, kernel will select the errata required by the CPU core and then
patch it. It means that the kernel only applies the errata to the specified
CPU core. In this case, the vendor's errata does not affect each other at
runtime. The above patching procedure only occurs during the booting phase,
so we only take the overhead of the "alternative" mechanism once.
This "alternative" mechanism is enabled by default to ensure that all
required errata will be applied. However, users can disable this feature by
the Kconfig "CONFIG_RISCV_ERRATA_ALTERNATIVE".
Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-03-22 14:26:03 +00:00
|
|
|
#include <asm/alternative.h>
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2019-10-17 22:00:17 +00:00
|
|
|
#include "head.h"
|
|
|
|
|
2019-02-22 19:41:35 +00:00
|
|
|
static DECLARE_COMPLETION(cpu_running);
|
2017-07-11 01:00:26 +00:00
|
|
|
|
|
|
|
void __init smp_prepare_boot_cpu(void)
|
|
|
|
{
|
2019-06-27 19:53:00 +00:00
|
|
|
init_cpu_topology();
|
riscv: Introduce alternative mechanism to apply errata solution
Introduce the "alternative" mechanism from ARM64 and x86 to apply the CPU
vendors' errata solution at runtime. The main purpose of this patch is
to provide a framework. Therefore, the implementation is quite basic for
now so that some scenarios could not use this schemei, such as patching
code to a module, relocating the patching code and heterogeneous CPU
topology.
Users could use the macro ALTERNATIVE to apply an errata to the existing
code flow. In the macro ALTERNATIVE, users need to specify the manufacturer
information(vendorid, archid, and impid) for this errata. Therefore, kernel
will know this errata is suitable for which CPU core. During the booting
procedure, kernel will select the errata required by the CPU core and then
patch it. It means that the kernel only applies the errata to the specified
CPU core. In this case, the vendor's errata does not affect each other at
runtime. The above patching procedure only occurs during the booting phase,
so we only take the overhead of the "alternative" mechanism once.
This "alternative" mechanism is enabled by default to ensure that all
required errata will be applied. However, users can disable this feature by
the Kconfig "CONFIG_RISCV_ERRATA_ALTERNATIVE".
Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-03-22 14:26:03 +00:00
|
|
|
#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
|
|
|
|
apply_boot_alternatives();
|
|
|
|
#endif
|
2017-07-11 01:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
|
{
|
2019-04-24 21:47:59 +00:00
|
|
|
int cpuid;
|
2020-03-18 01:11:40 +00:00
|
|
|
int ret;
|
2020-11-19 00:38:29 +00:00
|
|
|
unsigned int curr_cpuid;
|
|
|
|
|
|
|
|
curr_cpuid = smp_processor_id();
|
|
|
|
numa_store_cpu_info(curr_cpuid);
|
|
|
|
numa_add_cpu(curr_cpuid);
|
2019-04-24 21:47:59 +00:00
|
|
|
|
|
|
|
/* This covers non-smp usecase mandated by "nosmp" option */
|
|
|
|
if (max_cpus == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpuid) {
|
2020-11-19 00:38:29 +00:00
|
|
|
if (cpuid == curr_cpuid)
|
2019-04-24 21:47:59 +00:00
|
|
|
continue;
|
2020-03-18 01:11:40 +00:00
|
|
|
if (cpu_ops[cpuid]->cpu_prepare) {
|
|
|
|
ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
|
|
|
|
if (ret)
|
|
|
|
continue;
|
|
|
|
}
|
2019-04-24 21:47:59 +00:00
|
|
|
set_cpu_present(cpuid, true);
|
2020-11-19 00:38:29 +00:00
|
|
|
numa_store_cpu_info(cpuid);
|
2019-04-24 21:47:59 +00:00
|
|
|
}
|
2017-07-11 01:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init setup_smp(void)
|
|
|
|
{
|
2019-01-18 14:03:08 +00:00
|
|
|
struct device_node *dn;
|
2018-10-02 19:15:01 +00:00
|
|
|
int hart;
|
|
|
|
bool found_boot_cpu = false;
|
2018-10-02 19:15:05 +00:00
|
|
|
int cpuid = 1;
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2020-03-18 01:11:40 +00:00
|
|
|
cpu_set_ops(0);
|
|
|
|
|
2019-01-18 14:03:08 +00:00
|
|
|
for_each_of_cpu_node(dn) {
|
2018-10-02 19:15:00 +00:00
|
|
|
hart = riscv_of_processor_hartid(dn);
|
2019-01-07 14:16:35 +00:00
|
|
|
if (hart < 0)
|
2018-10-02 19:15:05 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (hart == cpuid_to_hartid_map(0)) {
|
|
|
|
BUG_ON(found_boot_cpu);
|
|
|
|
found_boot_cpu = 1;
|
2020-11-19 00:38:29 +00:00
|
|
|
early_map_cpu_to_node(0, of_node_to_nid(dn));
|
2018-10-02 19:15:05 +00:00
|
|
|
continue;
|
2017-07-11 01:00:26 +00:00
|
|
|
}
|
2019-02-22 19:41:39 +00:00
|
|
|
if (cpuid >= NR_CPUS) {
|
|
|
|
pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
|
|
|
|
cpuid, hart);
|
|
|
|
break;
|
|
|
|
}
|
2018-10-02 19:15:05 +00:00
|
|
|
|
|
|
|
cpuid_to_hartid_map(cpuid) = hart;
|
2020-11-19 00:38:29 +00:00
|
|
|
early_map_cpu_to_node(cpuid, of_node_to_nid(dn));
|
2018-10-02 19:15:05 +00:00
|
|
|
cpuid++;
|
2017-07-11 01:00:26 +00:00
|
|
|
}
|
|
|
|
|
2018-10-02 19:15:01 +00:00
|
|
|
BUG_ON(!found_boot_cpu);
|
2019-04-24 21:48:00 +00:00
|
|
|
|
|
|
|
if (cpuid > nr_cpu_ids)
|
|
|
|
pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
|
|
|
|
cpuid, nr_cpu_ids);
|
|
|
|
|
|
|
|
for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
|
2020-03-18 01:11:40 +00:00
|
|
|
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
|
|
|
|
cpu_set_ops(cpuid);
|
2019-04-24 21:48:00 +00:00
|
|
|
set_cpu_possible(cpuid, true);
|
2020-03-18 01:11:40 +00:00
|
|
|
}
|
2019-04-24 21:48:00 +00:00
|
|
|
}
|
2017-07-11 01:00:26 +00:00
|
|
|
}
|
|
|
|
|
2020-07-30 00:25:35 +00:00
|
|
|
static int start_secondary_cpu(int cpu, struct task_struct *tidle)
|
2020-03-18 01:11:40 +00:00
|
|
|
{
|
|
|
|
if (cpu_ops[cpu]->cpu_start)
|
|
|
|
return cpu_ops[cpu]->cpu_start(cpu, tidle);
|
|
|
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2017-07-11 01:00:26 +00:00
|
|
|
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|
|
|
{
|
2019-02-22 19:41:35 +00:00
|
|
|
int ret = 0;
|
2017-07-11 01:00:26 +00:00
|
|
|
tidle->thread_info.cpu = cpu;
|
|
|
|
|
2020-03-18 01:11:40 +00:00
|
|
|
ret = start_secondary_cpu(cpu, tidle);
|
|
|
|
if (!ret) {
|
|
|
|
wait_for_completion_timeout(&cpu_running,
|
2019-02-22 19:41:35 +00:00
|
|
|
msecs_to_jiffies(1000));
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2020-03-18 01:11:40 +00:00
|
|
|
if (!cpu_online(cpu)) {
|
|
|
|
pr_crit("CPU%u: failed to come online\n", cpu);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pr_crit("CPU%u: failed to start\n", cpu);
|
2019-02-22 19:41:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2017-07-11 01:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* C entry point for a secondary processor.
|
|
|
|
*/
|
2020-03-18 01:11:43 +00:00
|
|
|
asmlinkage __visible void smp_callin(void)
|
2017-07-11 01:00:26 +00:00
|
|
|
{
|
|
|
|
struct mm_struct *mm = &init_mm;
|
2020-06-22 23:47:25 +00:00
|
|
|
unsigned int curr_cpuid = smp_processor_id();
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2020-08-17 12:42:48 +00:00
|
|
|
riscv_clear_ipi();
|
2019-10-28 12:10:38 +00:00
|
|
|
|
2017-07-11 01:00:26 +00:00
|
|
|
/* All kernel threads share the same mm context. */
|
2018-10-02 19:15:02 +00:00
|
|
|
mmgrab(mm);
|
2017-07-11 01:00:26 +00:00
|
|
|
current->active_mm = mm;
|
|
|
|
|
2020-06-22 23:47:25 +00:00
|
|
|
notify_cpu_starting(curr_cpuid);
|
2020-11-19 00:38:29 +00:00
|
|
|
numa_add_cpu(curr_cpuid);
|
2020-06-22 23:47:25 +00:00
|
|
|
update_siblings_masks(curr_cpuid);
|
|
|
|
set_cpu_online(curr_cpuid, 1);
|
2020-07-15 23:30:06 +00:00
|
|
|
|
2018-10-02 19:14:57 +00:00
|
|
|
/*
|
|
|
|
* Remote TLB flushes are ignored while the CPU is offline, so emit
|
|
|
|
* a local TLB flush right now just in case.
|
|
|
|
*/
|
2017-07-11 01:00:26 +00:00
|
|
|
local_flush_tlb_all();
|
2019-02-22 19:41:35 +00:00
|
|
|
complete(&cpu_running);
|
2018-10-02 19:14:58 +00:00
|
|
|
/*
|
|
|
|
* Disable preemption before enabling interrupts, so we don't try to
|
|
|
|
* schedule a CPU that hasn't actually started yet.
|
|
|
|
*/
|
|
|
|
local_irq_enable();
|
2017-07-11 01:00:26 +00:00
|
|
|
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
|
|
|
}
|