2019-05-29 14:18:00 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2017-07-11 01:00:26 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 Regents of the University of California
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/asm-offsets.h>
|
|
|
|
#include <asm/asm.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/page.h>
|
2021-04-13 06:35:14 +00:00
|
|
|
#include <asm/pgtable.h>
|
2017-07-11 01:00:26 +00:00
|
|
|
#include <asm/csr.h>
|
2019-10-28 12:10:40 +00:00
|
|
|
#include <asm/hwcap.h>
|
2019-06-06 23:08:00 +00:00
|
|
|
#include <asm/image.h>
|
2020-09-17 22:37:13 +00:00
|
|
|
#include "efi-header.S"
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2021-04-13 06:35:14 +00:00
|
|
|
#ifdef CONFIG_XIP_KERNEL
|
|
|
|
.macro XIP_FIXUP_OFFSET reg
|
|
|
|
REG_L t0, _xip_fixup
|
|
|
|
add \reg, \reg, t0
|
|
|
|
.endm
|
|
|
|
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
|
|
|
|
#else
|
|
|
|
.macro XIP_FIXUP_OFFSET reg
|
|
|
|
.endm
|
|
|
|
#endif /* CONFIG_XIP_KERNEL */
|
|
|
|
|
2020-03-18 01:11:39 +00:00
|
|
|
__HEAD
|
2017-07-11 01:00:26 +00:00
|
|
|
ENTRY(_start)
|
2019-06-06 23:08:00 +00:00
|
|
|
/*
|
|
|
|
* Image header expected by Linux boot-loaders. The image header data
|
|
|
|
* structure is described in asm/image.h.
|
|
|
|
* Do not modify it without modifying the structure and all bootloaders
|
|
|
|
* that expects this header format!!
|
|
|
|
*/
|
2020-09-17 22:37:13 +00:00
|
|
|
#ifdef CONFIG_EFI
|
|
|
|
/*
|
|
|
|
* This instruction decodes to "MZ" ASCII required by UEFI.
|
|
|
|
*/
|
|
|
|
c.li s4,-13
|
|
|
|
j _start_kernel
|
|
|
|
#else
|
2019-06-06 23:08:00 +00:00
|
|
|
/* jump to start kernel */
|
|
|
|
j _start_kernel
|
|
|
|
/* reserved */
|
|
|
|
.word 0
|
2020-09-17 22:37:13 +00:00
|
|
|
#endif
|
2019-06-06 23:08:00 +00:00
|
|
|
.balign 8
|
2020-10-22 20:30:12 +00:00
|
|
|
#ifdef CONFIG_RISCV_M_MODE
|
|
|
|
/* Image load offset (0MB) from start of RAM for M-mode */
|
|
|
|
.dword 0
|
|
|
|
#else
|
2019-06-06 23:08:00 +00:00
|
|
|
#if __riscv_xlen == 64
|
|
|
|
/* Image load offset(2MB) from start of RAM */
|
|
|
|
.dword 0x200000
|
|
|
|
#else
|
|
|
|
/* Image load offset(4MB) from start of RAM */
|
|
|
|
.dword 0x400000
|
2020-10-22 20:30:12 +00:00
|
|
|
#endif
|
2019-06-06 23:08:00 +00:00
|
|
|
#endif
|
|
|
|
/* Effective size of kernel image */
|
|
|
|
.dword _end - _start
|
|
|
|
.dword __HEAD_FLAGS
|
|
|
|
.word RISCV_HEADER_VERSION
|
|
|
|
.word 0
|
|
|
|
.dword 0
|
2019-09-14 01:35:50 +00:00
|
|
|
.ascii RISCV_IMAGE_MAGIC
|
2019-06-06 23:08:00 +00:00
|
|
|
.balign 4
|
2019-09-14 01:35:50 +00:00
|
|
|
.ascii RISCV_IMAGE_MAGIC2
|
2020-09-17 22:37:13 +00:00
|
|
|
#ifdef CONFIG_EFI
|
|
|
|
.word pe_head_start - _start
|
|
|
|
pe_head_start:
|
|
|
|
|
|
|
|
__EFI_PE_HEADER
|
|
|
|
#else
|
2019-06-06 23:08:00 +00:00
|
|
|
.word 0
|
2020-09-17 22:37:13 +00:00
|
|
|
#endif
|
2019-06-06 23:08:00 +00:00
|
|
|
|
2020-03-18 01:11:39 +00:00
|
|
|
.align 2
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
relocate:
|
|
|
|
/* Relocate return address */
|
2021-06-17 13:53:07 +00:00
|
|
|
la a1, kernel_map
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a1
|
2021-06-17 13:53:07 +00:00
|
|
|
REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
|
2020-03-18 01:11:39 +00:00
|
|
|
la a2, _start
|
|
|
|
sub a1, a1, a2
|
|
|
|
add ra, ra, a1
|
|
|
|
|
|
|
|
/* Point stvec to virtual address of intruction after satp write */
|
|
|
|
la a2, 1f
|
|
|
|
add a2, a2, a1
|
|
|
|
csrw CSR_TVEC, a2
|
|
|
|
|
|
|
|
/* Compute satp for kernel page tables, but don't load it yet */
|
|
|
|
srl a2, a0, PAGE_SHIFT
|
|
|
|
li a1, SATP_MODE
|
|
|
|
or a2, a2, a1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load trampoline page directory, which will cause us to trap to
|
|
|
|
* stvec if VA != PA, or simply fall through if VA == PA. We need a
|
|
|
|
* full fence here because setup_vm() just wrote these PTEs and we need
|
|
|
|
* to ensure the new translations are in use.
|
|
|
|
*/
|
|
|
|
la a0, trampoline_pg_dir
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a0
|
2020-03-18 01:11:39 +00:00
|
|
|
srl a0, a0, PAGE_SHIFT
|
|
|
|
or a0, a0, a1
|
|
|
|
sfence.vma
|
|
|
|
csrw CSR_SATP, a0
|
|
|
|
.align 2
|
|
|
|
1:
|
2020-08-13 03:38:04 +00:00
|
|
|
/* Set trap vector to spin forever to help debug */
|
|
|
|
la a0, .Lsecondary_park
|
2020-03-18 01:11:39 +00:00
|
|
|
csrw CSR_TVEC, a0
|
|
|
|
|
|
|
|
/* Reload the global pointer */
|
|
|
|
.option push
|
|
|
|
.option norelax
|
|
|
|
la gp, __global_pointer$
|
|
|
|
.option pop
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch to kernel page tables. A full fence is necessary in order to
|
|
|
|
* avoid using the trampoline translations, which are only correct for
|
|
|
|
* the first superpage. Fetching the fence is guarnteed to work
|
|
|
|
* because that first superpage is translated the same way.
|
|
|
|
*/
|
|
|
|
csrw CSR_SATP, a2
|
|
|
|
sfence.vma
|
|
|
|
|
|
|
|
ret
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
#ifdef CONFIG_SMP
|
2020-03-18 01:11:43 +00:00
|
|
|
.global secondary_start_sbi
|
|
|
|
secondary_start_sbi:
|
|
|
|
/* Mask all interrupts */
|
|
|
|
csrw CSR_IE, zero
|
|
|
|
csrw CSR_IP, zero
|
|
|
|
|
|
|
|
/* Load the global pointer */
|
|
|
|
.option push
|
|
|
|
.option norelax
|
|
|
|
la gp, __global_pointer$
|
|
|
|
.option pop
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable FPU to detect illegal usage of
|
|
|
|
* floating point in kernel space
|
|
|
|
*/
|
|
|
|
li t0, SR_FS
|
|
|
|
csrc CSR_STATUS, t0
|
|
|
|
|
2020-03-18 01:11:39 +00:00
|
|
|
/* Set trap vector to spin forever to help debug */
|
|
|
|
la a3, .Lsecondary_park
|
|
|
|
csrw CSR_TVEC, a3
|
|
|
|
|
|
|
|
slli a3, a0, LGREG
|
2020-03-18 01:11:43 +00:00
|
|
|
la a4, __cpu_up_stack_pointer
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a4
|
2020-03-18 01:11:43 +00:00
|
|
|
la a5, __cpu_up_task_pointer
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a5
|
2020-03-18 01:11:43 +00:00
|
|
|
add a4, a3, a4
|
|
|
|
add a5, a3, a5
|
|
|
|
REG_L sp, (a4)
|
|
|
|
REG_L tp, (a5)
|
|
|
|
|
2020-03-18 01:11:39 +00:00
|
|
|
.global secondary_start_common
|
|
|
|
secondary_start_common:
|
|
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
/* Enable virtual memory and relocate to virtual address */
|
|
|
|
la a0, swapper_pg_dir
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a0
|
2020-03-18 01:11:39 +00:00
|
|
|
call relocate
|
|
|
|
#endif
|
2020-08-13 03:38:04 +00:00
|
|
|
call setup_trap_vector
|
2020-03-18 01:11:39 +00:00
|
|
|
tail smp_callin
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2020-08-13 03:38:04 +00:00
|
|
|
.align 2
|
|
|
|
setup_trap_vector:
|
|
|
|
/* Set trap vector to exception handler */
|
|
|
|
la a0, handle_exception
|
|
|
|
csrw CSR_TVEC, a0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set sup0 scratch register to 0, indicating to exception vector that
|
|
|
|
* we are presently executing in kernel.
|
|
|
|
*/
|
|
|
|
csrw CSR_SCRATCH, zero
|
|
|
|
ret
|
|
|
|
|
2021-10-18 05:22:38 +00:00
|
|
|
.align 2
|
2020-03-18 01:11:39 +00:00
|
|
|
.Lsecondary_park:
|
|
|
|
/* We lack SMP support or have too many harts, so park this hart */
|
|
|
|
wfi
|
|
|
|
j .Lsecondary_park
|
|
|
|
|
|
|
|
END(_start)
|
|
|
|
|
|
|
|
ENTRY(_start_kernel)
|
2019-04-25 08:38:41 +00:00
|
|
|
/* Mask all interrupts */
|
2019-10-28 12:10:32 +00:00
|
|
|
csrw CSR_IE, zero
|
|
|
|
csrw CSR_IP, zero
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2019-10-28 12:10:39 +00:00
|
|
|
#ifdef CONFIG_RISCV_M_MODE
|
2019-10-28 12:10:40 +00:00
|
|
|
/* flush the instruction cache */
|
|
|
|
fence.i
|
|
|
|
|
|
|
|
/* Reset all registers except ra, a0, a1 */
|
|
|
|
call reset_regs
|
|
|
|
|
2020-04-07 18:33:40 +00:00
|
|
|
/*
|
|
|
|
* Setup a PMP to permit access to all of memory. Some machines may
|
|
|
|
* not implement PMPs, so we set up a quick trap handler to just skip
|
|
|
|
* touching the PMPs on any trap.
|
|
|
|
*/
|
|
|
|
la a0, pmp_done
|
|
|
|
csrw CSR_TVEC, a0
|
|
|
|
|
2020-01-09 03:17:40 +00:00
|
|
|
li a0, -1
|
|
|
|
csrw CSR_PMPADDR0, a0
|
|
|
|
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
|
|
|
|
csrw CSR_PMPCFG0, a0
|
2020-04-07 18:33:40 +00:00
|
|
|
.align 2
|
|
|
|
pmp_done:
|
2020-01-09 03:17:40 +00:00
|
|
|
|
2019-10-28 12:10:39 +00:00
|
|
|
/*
|
|
|
|
* The hartid in a0 is expected later on, and we have no firmware
|
|
|
|
* to hand it to us.
|
|
|
|
*/
|
|
|
|
csrr a0, CSR_MHARTID
|
2019-10-28 12:10:40 +00:00
|
|
|
#endif /* CONFIG_RISCV_M_MODE */
|
2019-10-28 12:10:39 +00:00
|
|
|
|
2017-07-11 01:00:26 +00:00
|
|
|
/* Load the global pointer */
|
|
|
|
.option push
|
|
|
|
.option norelax
|
|
|
|
la gp, __global_pointer$
|
|
|
|
.option pop
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable FPU to detect illegal usage of
|
|
|
|
* floating point in kernel space
|
|
|
|
*/
|
|
|
|
li t0, SR_FS
|
2019-10-28 12:10:32 +00:00
|
|
|
csrc CSR_STATUS, t0
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2019-09-06 03:56:09 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
li t0, CONFIG_NR_CPUS
|
2020-01-15 06:54:36 +00:00
|
|
|
blt a0, t0, .Lgood_cores
|
|
|
|
tail .Lsecondary_park
|
|
|
|
.Lgood_cores:
|
2019-09-06 03:56:09 +00:00
|
|
|
#endif
|
|
|
|
|
2021-04-13 06:35:14 +00:00
|
|
|
#ifndef CONFIG_XIP_KERNEL
|
2017-07-11 01:00:26 +00:00
|
|
|
/* Pick one hart to run the main boot sequence */
|
|
|
|
la a3, hart_lottery
|
|
|
|
li a2, 1
|
|
|
|
amoadd.w a3, a2, (a3)
|
|
|
|
bnez a3, .Lsecondary_start
|
|
|
|
|
2021-04-13 06:35:14 +00:00
|
|
|
#else
|
|
|
|
/* hart_lottery in flash contains a magic number */
|
|
|
|
la a3, hart_lottery
|
|
|
|
mv a2, a3
|
|
|
|
XIP_FIXUP_OFFSET a2
|
|
|
|
lw t1, (a3)
|
|
|
|
amoswap.w t0, t1, (a2)
|
|
|
|
/* first time here if hart_lottery in RAM is not set */
|
|
|
|
beq t0, t1, .Lsecondary_start
|
|
|
|
|
|
|
|
la sp, _end + THREAD_SIZE
|
|
|
|
XIP_FIXUP_OFFSET sp
|
|
|
|
mv s0, a0
|
|
|
|
call __copy_data
|
|
|
|
|
|
|
|
/* Restore a0 copy */
|
|
|
|
mv a0, s0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef CONFIG_XIP_KERNEL
|
2018-11-12 05:55:15 +00:00
|
|
|
/* Clear BSS for flat non-ELF images */
|
|
|
|
la a3, __bss_start
|
|
|
|
la a4, __bss_stop
|
|
|
|
ble a4, a3, clear_bss_done
|
|
|
|
clear_bss:
|
|
|
|
REG_S zero, (a3)
|
|
|
|
add a3, a3, RISCV_SZPTR
|
|
|
|
blt a3, a4, clear_bss
|
|
|
|
clear_bss_done:
|
2021-04-13 06:35:14 +00:00
|
|
|
#endif
|
2017-07-11 01:00:26 +00:00
|
|
|
/* Save hart ID and DTB physical address */
|
|
|
|
mv s0, a0
|
|
|
|
mv s1, a1
|
2021-04-13 06:35:14 +00:00
|
|
|
|
2018-10-02 19:15:05 +00:00
|
|
|
la a2, boot_cpu_hartid
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a2
|
2018-10-02 19:15:05 +00:00
|
|
|
REG_S a0, (a2)
|
2017-07-11 01:00:26 +00:00
|
|
|
|
|
|
|
/* Initialize page tables and relocate to virtual addresses */
|
|
|
|
la sp, init_thread_union + THREAD_SIZE
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET sp
|
2021-01-15 23:49:48 +00:00
|
|
|
#ifdef CONFIG_BUILTIN_DTB
|
|
|
|
la a0, __dtb_start
|
|
|
|
#else
|
2019-06-28 20:36:21 +00:00
|
|
|
mv a0, s1
|
2021-01-15 23:49:48 +00:00
|
|
|
#endif /* CONFIG_BUILTIN_DTB */
|
2017-07-11 01:00:26 +00:00
|
|
|
call setup_vm
|
2019-10-28 12:10:41 +00:00
|
|
|
#ifdef CONFIG_MMU
|
2019-06-28 20:36:21 +00:00
|
|
|
la a0, early_pg_dir
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a0
|
2017-07-11 01:00:26 +00:00
|
|
|
call relocate
|
2019-10-28 12:10:41 +00:00
|
|
|
#endif /* CONFIG_MMU */
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2020-08-13 03:38:04 +00:00
|
|
|
call setup_trap_vector
|
2017-07-11 01:00:26 +00:00
|
|
|
/* Restore C environment */
|
|
|
|
la tp, init_task
|
2019-04-15 09:14:37 +00:00
|
|
|
la sp, init_thread_union + THREAD_SIZE
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2020-01-06 18:38:32 +00:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
call kasan_early_init
|
|
|
|
#endif
|
2017-07-11 01:00:26 +00:00
|
|
|
/* Start the kernel */
|
2020-03-16 00:47:38 +00:00
|
|
|
call soc_early_init
|
2017-07-11 01:00:26 +00:00
|
|
|
tail start_kernel
|
|
|
|
|
|
|
|
.Lsecondary_start:
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Set trap vector to spin forever to help debug */
|
|
|
|
la a3, .Lsecondary_park
|
2019-10-28 12:10:32 +00:00
|
|
|
csrw CSR_TVEC, a3
|
2017-07-11 01:00:26 +00:00
|
|
|
|
|
|
|
slli a3, a0, LGREG
|
|
|
|
la a1, __cpu_up_stack_pointer
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a1
|
2017-07-11 01:00:26 +00:00
|
|
|
la a2, __cpu_up_task_pointer
|
2021-04-13 06:35:14 +00:00
|
|
|
XIP_FIXUP_OFFSET a2
|
2017-07-11 01:00:26 +00:00
|
|
|
add a1, a3, a1
|
|
|
|
add a2, a3, a2
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This hart didn't win the lottery, so we wait for the winning hart to
|
|
|
|
* get far enough along the boot process that it should continue.
|
|
|
|
*/
|
|
|
|
.Lwait_for_cpu_up:
|
|
|
|
/* FIXME: We should WFI to save some energy here. */
|
|
|
|
REG_L sp, (a1)
|
|
|
|
REG_L tp, (a2)
|
|
|
|
beqz sp, .Lwait_for_cpu_up
|
|
|
|
beqz tp, .Lwait_for_cpu_up
|
|
|
|
fence
|
|
|
|
|
2020-03-18 01:11:39 +00:00
|
|
|
tail secondary_start_common
|
2019-10-28 12:10:41 +00:00
|
|
|
#endif
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2020-03-18 01:11:39 +00:00
|
|
|
END(_start_kernel)
|
2017-07-11 01:00:26 +00:00
|
|
|
|
2019-10-28 12:10:40 +00:00
|
|
|
#ifdef CONFIG_RISCV_M_MODE
|
|
|
|
ENTRY(reset_regs)
|
|
|
|
li sp, 0
|
|
|
|
li gp, 0
|
|
|
|
li tp, 0
|
|
|
|
li t0, 0
|
|
|
|
li t1, 0
|
|
|
|
li t2, 0
|
|
|
|
li s0, 0
|
|
|
|
li s1, 0
|
|
|
|
li a2, 0
|
|
|
|
li a3, 0
|
|
|
|
li a4, 0
|
|
|
|
li a5, 0
|
|
|
|
li a6, 0
|
|
|
|
li a7, 0
|
|
|
|
li s2, 0
|
|
|
|
li s3, 0
|
|
|
|
li s4, 0
|
|
|
|
li s5, 0
|
|
|
|
li s6, 0
|
|
|
|
li s7, 0
|
|
|
|
li s8, 0
|
|
|
|
li s9, 0
|
|
|
|
li s10, 0
|
|
|
|
li s11, 0
|
|
|
|
li t3, 0
|
|
|
|
li t4, 0
|
|
|
|
li t5, 0
|
|
|
|
li t6, 0
|
2019-12-19 06:44:59 +00:00
|
|
|
csrw CSR_SCRATCH, 0
|
2019-10-28 12:10:40 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_FPU
|
|
|
|
csrr t0, CSR_MISA
|
|
|
|
andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
|
2020-01-05 02:52:14 +00:00
|
|
|
beqz t0, .Lreset_regs_done
|
2019-10-28 12:10:40 +00:00
|
|
|
|
|
|
|
li t1, SR_FS
|
|
|
|
csrs CSR_STATUS, t1
|
|
|
|
fmv.s.x f0, zero
|
|
|
|
fmv.s.x f1, zero
|
|
|
|
fmv.s.x f2, zero
|
|
|
|
fmv.s.x f3, zero
|
|
|
|
fmv.s.x f4, zero
|
|
|
|
fmv.s.x f5, zero
|
|
|
|
fmv.s.x f6, zero
|
|
|
|
fmv.s.x f7, zero
|
|
|
|
fmv.s.x f8, zero
|
|
|
|
fmv.s.x f9, zero
|
|
|
|
fmv.s.x f10, zero
|
|
|
|
fmv.s.x f11, zero
|
|
|
|
fmv.s.x f12, zero
|
|
|
|
fmv.s.x f13, zero
|
|
|
|
fmv.s.x f14, zero
|
|
|
|
fmv.s.x f15, zero
|
|
|
|
fmv.s.x f16, zero
|
|
|
|
fmv.s.x f17, zero
|
|
|
|
fmv.s.x f18, zero
|
|
|
|
fmv.s.x f19, zero
|
|
|
|
fmv.s.x f20, zero
|
|
|
|
fmv.s.x f21, zero
|
|
|
|
fmv.s.x f22, zero
|
|
|
|
fmv.s.x f23, zero
|
|
|
|
fmv.s.x f24, zero
|
|
|
|
fmv.s.x f25, zero
|
|
|
|
fmv.s.x f26, zero
|
|
|
|
fmv.s.x f27, zero
|
|
|
|
fmv.s.x f28, zero
|
|
|
|
fmv.s.x f29, zero
|
|
|
|
fmv.s.x f30, zero
|
|
|
|
fmv.s.x f31, zero
|
|
|
|
csrw fcsr, 0
|
|
|
|
/* note that the caller must clear SR_FS */
|
|
|
|
#endif /* CONFIG_FPU */
|
|
|
|
.Lreset_regs_done:
|
|
|
|
ret
|
|
|
|
END(reset_regs)
|
|
|
|
#endif /* CONFIG_RISCV_M_MODE */
|
|
|
|
|
2017-07-11 01:00:26 +00:00
|
|
|
__PAGE_ALIGNED_BSS
|
|
|
|
/* Empty zero page */
|
|
|
|
.balign PAGE_SIZE
|