diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 41034745d6a2..d1ce49119da8 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -1,5 +1,5 @@ /* - * Glue Code for the AVX assembler implemention of the Cast5 Cipher + * Glue Code for the AVX assembler implementation of the Cast5 Cipher * * Copyright (C) 2012 Johannes Goetzfried * diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 9fb66b5e94b2..18965c39305e 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -1,5 +1,5 @@ /* - * Glue Code for the AVX assembler implemention of the Cast6 Cipher + * Glue Code for the AVX assembler implementation of the Cast6 Cipher * * Copyright (C) 2012 Johannes Goetzfried * diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 3b2490b81918..7bc105f47d21 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -140,7 +140,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) /* * In order to return to user mode, we need to have IRQs off with * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags - * can be set at any time on preemptable kernels if we have IRQs on, + * can be set at any time on preemptible kernels if we have IRQs on, * so we need to loop. Disabling preemption wouldn't help: doing the * work to clear some of the flags can sleep. */ diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 7eb878561910..babc4e7a519c 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -261,7 +261,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) * abusing from userspace install_speciall_mapping, which may * not do accounting and rlimit right. * We could search vma near context.vdso, but it's a slowpath, - * so let's explicitely check all VMAs to be completely sure. + * so let's explicitly check all VMAs to be completely sure. */ for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma_is_special_mapping(vma, &vdso_mapping) || diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 24ffa1e88cf9..a01ef1b0f883 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -589,7 +589,7 @@ static __init int bts_init(void) * the AUX buffer. * * However, since this driver supports per-CPU and per-task inherit - * we cannot use the user mapping since it will not be availble + * we cannot use the user mapping since it will not be available * if we're not running the owning process. * * With PTI we can't use the kernal map either, because its not diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index ecc3e34ca955..40e12cfc87f6 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -1930,7 +1930,7 @@ static void intel_pmu_enable_all(int added) * in sequence on the same PMC or on different PMCs. * * In practise it appears some of these events do in fact count, and - * we need to programm all 4 events. + * we need to program all 4 events. */ static void intel_pmu_nhm_workaround(void) { diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b7b01d762d32..e9acf1d2e7b2 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1199,7 +1199,7 @@ static void setup_pebs_sample_data(struct perf_event *event, /* * We must however always use iregs for the unwinder to stay sane; the * record BP,SP,IP can point into thin air when the record is from a - * previous PMI context or an (I)RET happend between the record and + * previous PMI context or an (I)RET happened between the record and * PMI. */ if (sample_type & PERF_SAMPLE_CALLCHAIN) diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index d32c0eed38ca..dee579efb2b2 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -1259,7 +1259,7 @@ again: } /* * Perf does test runs to see if a whole group can be assigned - * together succesfully. There can be multiple rounds of this. + * together successfully. There can be multiple rounds of this. * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config * bits, such that the next round of group assignments will * cause the above p4_should_swap_ts to pass instead of fail. diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 4cd6a3b71824..0660e14690c8 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -174,7 +174,7 @@ static inline int alternatives_text_reserved(void *start, void *end) /* * Alternative inline assembly with input. * - * Pecularities: + * Peculiarities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index bfb85e5844ab..a8bfac131256 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -7,7 +7,7 @@ #include /* Provides LOCK_PREFIX */ /* - * Non-existant functions to indicate usage errors at link time + * Non-existent functions to indicate usage errors at link time * (or compile-time if the compiler implements __compiletime_error(). */ extern void __xchg_wrong_size(void) diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h index a7adb2bfbf0b..0acf5ee45a21 100644 --- a/arch/x86/include/asm/crash.h +++ b/arch/x86/include/asm/crash.h @@ -6,5 +6,6 @@ int crash_load_segments(struct kimage *image); int crash_copy_backup_region(struct kimage *image); int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params); +void crash_smp_send_stop(void); #endif /* _ASM_X86_CRASH_H */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index d1e64ac80b9c..42982a6cc6cf 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -19,7 +19,7 @@ * This is the main reason why we're doing stable VA mappings for RT * services. * - * This flag is used in conjuction with a chicken bit called + * This flag is used in conjunction with a chicken bit called * "efi=old_map" which can be used as a fallback to the old runtime * services mapping method in case there's some b0rkage with a * particular EFI implementation (haha, it is hard to hold up the diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 2395bb794c7b..fbb16e6b6c18 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -30,6 +30,9 @@ extern void fixup_irqs(void); #ifdef CONFIG_HAVE_KVM extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); +extern __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs); +extern __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs); +extern __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs); #endif extern void (*x86_platform_ipi_callback)(void); @@ -41,9 +44,13 @@ extern __visible unsigned int do_IRQ(struct pt_regs *regs); extern void init_ISA_irqs(void); +extern void __init init_IRQ(void); + #ifdef CONFIG_X86_LOCAL_APIC void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self); + +extern __visible void smp_x86_platform_ipi(struct pt_regs *regs); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h index 800ffce0db29..80b35e3adf03 100644 --- a/arch/x86/include/asm/irq_work.h +++ b/arch/x86/include/asm/irq_work.h @@ -10,6 +10,7 @@ static inline bool arch_irq_work_has_interrupt(void) return boot_cpu_has(X86_FEATURE_APIC); } extern void arch_irq_work_raise(void); +extern __visible void smp_irq_work_interrupt(struct pt_regs *regs); #else static inline bool arch_irq_work_has_interrupt(void) { diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 4c723632c036..5ed3cf1c3934 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -92,6 +92,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); void kvm_async_pf_task_wake(u32 token); u32 kvm_read_and_reset_pf_reason(void); extern void kvm_disable_steal_time(void); +void do_async_page_fault(struct pt_regs *regs, unsigned long error_code); #ifdef CONFIG_PARAVIRT_SPINLOCKS void __init kvm_spinlock_init(void); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4bf42f9e4eea..a97f28d914d5 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -26,6 +26,11 @@ struct static_key; extern struct static_key paravirt_steal_enabled; extern struct static_key paravirt_steal_rq_enabled; +__visible void __native_queued_spin_unlock(struct qspinlock *lock); +bool pv_is_native_spin_unlock(void); +__visible bool __native_vcpu_is_preempted(long cpu); +bool pv_is_native_vcpu_is_preempted(void); + static inline u64 paravirt_steal_clock(int cpu) { return PVOP_CALL1(u64, time.steal_clock, cpu); diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index a671a1145906..04c17be9b5fd 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -26,6 +26,7 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_APM 1 typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); +void nmi_panic_self_stop(struct pt_regs *regs); void nmi_shootdown_cpus(nmi_shootdown_cb callback); void run_crash_ipi_callback(struct pt_regs *regs); diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ae13bc974416..ed8ec011a9fd 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -46,6 +46,9 @@ extern unsigned long saved_video_mode; extern void reserve_standard_io_resources(void); extern void i386_reserve_resources(void); +extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp); +extern unsigned long __startup_secondary_64(void); +extern int early_make_pgtable(unsigned long address); #ifdef CONFIG_X86_INTEL_MID extern void x86_intel_mid_early_setup(void); diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h index bd26834724e5..2fcbd6f33ef7 100644 --- a/arch/x86/include/asm/sighandling.h +++ b/arch/x86/include/asm/sighandling.h @@ -17,4 +17,9 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where); int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned long mask); + +#ifdef CONFIG_X86_X32_ABI +asmlinkage long sys32_x32_rt_sigreturn(void); +#endif + #endif /* _ASM_X86_SIGHANDLING_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 547c4fe50711..2e95b6c1bca3 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -148,6 +148,12 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); void smp_store_boot_cpu_info(void); void smp_store_cpu_info(int id); + +asmlinkage __visible void smp_reboot_interrupt(void); +__visible void smp_reschedule_interrupt(struct pt_regs *regs); +__visible void smp_call_function_interrupt(struct pt_regs *regs); +__visible void smp_call_function_single_interrupt(struct pt_regs *r); + #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h index 69615e387973..e0e6d7f21399 100644 --- a/arch/x86/include/asm/trace/exceptions.h +++ b/arch/x86/include/asm/trace/exceptions.h @@ -45,6 +45,7 @@ DEFINE_PAGE_FAULT_EVENT(page_fault_user); DEFINE_PAGE_FAULT_EVENT(page_fault_kernel); #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE exceptions #endif /* _TRACE_PAGE_FAULT_H */ diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h index 0af81b590a0c..33b9d0f0aafe 100644 --- a/arch/x86/include/asm/trace/irq_vectors.h +++ b/arch/x86/include/asm/trace/irq_vectors.h @@ -389,6 +389,7 @@ TRACE_EVENT(vector_free_moved, #endif /* CONFIG_X86_LOCAL_APIC */ #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE irq_vectors #endif /* _TRACE_IRQ_VECTORS_H */ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 3de69330e6c5..7d6f3f3fad78 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -61,34 +61,38 @@ asmlinkage void xen_machine_check(void); asmlinkage void xen_simd_coprocessor_error(void); #endif -dotraplinkage void do_divide_error(struct pt_regs *, long); -dotraplinkage void do_debug(struct pt_regs *, long); -dotraplinkage void do_nmi(struct pt_regs *, long); -dotraplinkage void do_int3(struct pt_regs *, long); -dotraplinkage void do_overflow(struct pt_regs *, long); -dotraplinkage void do_bounds(struct pt_regs *, long); -dotraplinkage void do_invalid_op(struct pt_regs *, long); -dotraplinkage void do_device_not_available(struct pt_regs *, long); -dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); -dotraplinkage void do_invalid_TSS(struct pt_regs *, long); -dotraplinkage void do_segment_not_present(struct pt_regs *, long); -dotraplinkage void do_stack_segment(struct pt_regs *, long); +dotraplinkage void do_divide_error(struct pt_regs *regs, long error_code); +dotraplinkage void do_debug(struct pt_regs *regs, long error_code); +dotraplinkage void do_nmi(struct pt_regs *regs, long error_code); +dotraplinkage void do_int3(struct pt_regs *regs, long error_code); +dotraplinkage void do_overflow(struct pt_regs *regs, long error_code); +dotraplinkage void do_bounds(struct pt_regs *regs, long error_code); +dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code); +dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code); +dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code); +dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code); +dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code); +dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_64 -dotraplinkage void do_double_fault(struct pt_regs *, long); +dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code); +asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); +asmlinkage __visible notrace +struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s); +void __init trap_init(void); #endif -dotraplinkage void do_general_protection(struct pt_regs *, long); -dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); -dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long); -dotraplinkage void do_coprocessor_error(struct pt_regs *, long); -dotraplinkage void do_alignment_check(struct pt_regs *, long); +dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code); +dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code); +dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code); +dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code); +dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_MCE -dotraplinkage void do_machine_check(struct pt_regs *, long); +dotraplinkage void do_machine_check(struct pt_regs *regs, long error_code); #endif -dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); +dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_32 -dotraplinkage void do_iret_error(struct pt_regs *, long); +dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code); #endif -dotraplinkage void do_mce(struct pt_regs *, long); +dotraplinkage void do_mce(struct pt_regs *regs, long error_code); static inline int get_si_code(unsigned long condition) { @@ -104,11 +108,16 @@ extern int panic_on_unrecovered_nmi; void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 -asmlinkage void smp_thermal_interrupt(void); -asmlinkage void smp_threshold_interrupt(void); -asmlinkage void smp_deferred_error_interrupt(void); +asmlinkage void smp_thermal_interrupt(struct pt_regs *regs); +asmlinkage void smp_threshold_interrupt(struct pt_regs *regs); +asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs); #endif +void smp_apic_timer_interrupt(struct pt_regs *regs); +void smp_spurious_interrupt(struct pt_regs *regs); +void smp_error_interrupt(struct pt_regs *regs); +asmlinkage void smp_irq_move_cleanup_interrupt(void); + extern void ist_enter(struct pt_regs *regs); extern void ist_exit(struct pt_regs *regs); extern void ist_begin_non_atomic(struct pt_regs *regs); diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index eb5bbfeccb66..8a0c25c6bf09 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -35,6 +35,7 @@ extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); extern void tsc_early_init(void); extern void tsc_init(void); +extern unsigned long calibrate_delay_is_known(void); extern void mark_tsc_unstable(char *reason); extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 06635fbca81c..2624de16cd7a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -848,7 +848,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic); /** * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base * has been registered - * @handle: ACPI handle of the IOAPIC deivce + * @handle: ACPI handle of the IOAPIC device * @gsi_base: GSI base associated with the IOAPIC * * Assume caller holds some type of lock to serialize acpi_ioapic_registered() diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 2c4d5ece7456..58176b56354e 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -264,18 +264,23 @@ static int __init parse_gart_mem(char *p) } early_param("gart_fix_e820", parse_gart_mem); +/* + * With kexec/kdump, if the first kernel doesn't shut down the GART and the + * second kernel allocates a different GART region, there might be two + * overlapping GART regions present: + * + * - the first still used by the GART initialized in the first kernel. + * - (sub-)set of it used as normal RAM by the second kernel. + * + * which leads to memory corruptions and a kernel panic eventually. + * + * This can also happen if the BIOS has forgotten to mark the GART region + * as reserved. + * + * Try to update the e820 map to mark that new region as reserved. + */ void __init early_gart_iommu_check(void) { - /* - * in case it is enabled before, esp for kexec/kdump, - * previous kernel already enable that. memset called - * by allocate_aperture/__alloc_bootmem_nopanic cause restart. - * or second kernel have different position for GART hole. and new - * kernel could use hole as RAM that is still used by GART set by - * first kernel - * or BIOS forget to put that in reserved. - * try to update e820 to make that region as reserved. - */ u32 agp_aper_order = 0; int i, fix, slot, valid_agp = 0; u32 ctl; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 32b2b7a41ef5..b7bcdd781651 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index e84c9eb4e5b4..0005c284a5c5 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -8,6 +8,7 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ +#include #include #include #include @@ -16,12 +17,12 @@ #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include static struct apic apic_physflat; static struct apic apic_flat; diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 652e7ffa9b9d..3173e07d3791 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 391f358ebb4c..a555da094157 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1079,7 +1079,7 @@ late_initcall(uv_init_heartbeat); #endif /* !CONFIG_HOTPLUG_CPU */ /* Direct Legacy VGA I/O traffic to designated IOH */ -int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) +static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) { int domain, bus, rc; @@ -1148,7 +1148,7 @@ static void get_mn(struct mn *mnp) mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0; } -void __init uv_init_hub_info(struct uv_hub_info_s *hi) +static void __init uv_init_hub_info(struct uv_hub_info_s *hi) { union uvh_node_id_u node_id; struct mn mn; diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 72adf6c335dc..168543d077d7 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -29,7 +29,8 @@ # include "asm-offsets_64.c" #endif -void common(void) { +static void __used common(void) +{ BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); #ifdef CONFIG_STACKPROTECTOR diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 1979a76bfadd..5136e6818da8 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -9,6 +9,7 @@ #include #include +#include /* * Some BIOSes seem to corrupt the low 64k of memory during events @@ -136,7 +137,7 @@ void __init setup_bios_corruption_check(void) } -void check_for_bios_corruption(void) +static void check_for_bios_corruption(void) { int i; int corruption = 0; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index eeea634bee0a..69f6bbb41be0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 # include diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 7eba34df54c3..804c49493938 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "cpu.h" diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 77bf22546ddd..8654b8b0c848 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -32,6 +32,8 @@ #include #include +#include "cpu.h" + static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index dc1b9342e9c4..c4d1023fb0ab 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -17,6 +17,7 @@ #include #include +#include #include #include diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 4a2fb59a372e..89298c83de53 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } }; -const char *smca_get_name(enum smca_bank_types t) +static const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) return NULL; @@ -824,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) mce_log(&m); } -asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void) +asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) { entering_irq(); trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index ad8f62a0c706..672c7225cb1b 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -684,7 +684,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count); * errors here. However this would be quite problematic -- * we would need to reimplement the Monarch handling and * it would mess up the exclusion between exception handler - * and poll hander -- * so we skip this for now. + * and poll handler -- * so we skip this for now. * These cases should not happen anyways, or only when the CPU * is already totally * confused. In this case it's likely it will * not fully execute the machine check handler either. diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index df01ff8513a5..10a3b0599300 100644 --- a/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -392,7 +393,7 @@ static void unexpected_thermal_interrupt(void) static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; -asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r) +asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs) { entering_irq(); trace_thermal_apic_entry(THERMAL_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mce/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c index 10586a85c23f..28812cc15300 100644 --- a/arch/x86/kernel/cpu/mce/threshold.c +++ b/arch/x86/kernel/cpu/mce/threshold.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -20,7 +21,7 @@ static void default_threshold_interrupt(void) void (*mce_threshold_vector)(void) = default_threshold_interrupt; -asmlinkage __visible void __irq_entry smp_threshold_interrupt(void) +asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs) { entering_irq(); trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index a4d74d616222..0277267239f2 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -5,9 +5,10 @@ #include #include +#include #include -#include +#include "cpu.h" struct cpuid_bit { u16 feature; diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 71ca064e3794..8f6c784141d1 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -10,6 +10,8 @@ #include #include +#include "cpu.h" + /* leaf 0xb SMT level */ #define SMT_LEVEL 0 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index f631a3f15587..c8b07d8ea5a2 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -37,6 +37,7 @@ #include #include #include +#include /* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index eb8ab3915268..22369dd5de3b 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -62,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, /** * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the - * memory with the encryption mask set to accomodate kdump on SME-enabled + * memory with the encryption mask set to accommodate kdump on SME-enabled * machines. */ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 7299dcbf8e85..8d85e00bb40a 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -23,6 +23,7 @@ #include #include #include +#include __initdata u64 initial_dtb; char __initdata cmd_line[COMMAND_LINE_SIZE]; diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 87a57b7642d3..cd3956fc8158 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -811,7 +811,7 @@ void fpu__resume_cpu(void) * * Note: does not work for compacted buffers. */ -void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) +static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) { int feature_nr = fls64(xstate_feature_mask) - 1; diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 108c48d0d40e..1b2ee55a2dfb 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -19,6 +19,7 @@ #include #include #include +#include static __initdata struct jailhouse_setup_data setup_data; static unsigned int precalibrated_tsc_khz; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index d5f88fe57064..4ba75afba527 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -751,7 +751,7 @@ STACK_FRAME_NON_STANDARD(kretprobe_trampoline); /* * Called from kretprobe_trampoline */ -__visible __used void *trampoline_handler(struct pt_regs *regs) +static __used void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7d31192296a8..90ae0ca51083 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include #include @@ -39,6 +41,7 @@ #include #include #include +#include #include "process.h" @@ -793,7 +796,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long start, bottom, top, sp, fp, ip, ret = 0; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (p == current || p->state == TASK_RUNNING) return 0; if (!try_get_task_stack(p)) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 60783b318936..721d02bd2d0d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -646,7 +646,7 @@ void set_personality_64bit(void) /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that - 32bit childs are affected again. */ + 32bit children are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 736348ead421..8451f38ad399 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -7,6 +7,7 @@ #include #include +#include #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index 623965e86b65..fa51723571c8 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -19,12 +19,15 @@ #include #include +#include #include #include #include #include #include #include