kernel-ark/arch/mips/mm/tlb-r3k.c
Mike Rapoport e31cf2f4ca mm: don't include asm/pgtable.h if linux/mm.h is already included
Patch series "mm: consolidate definitions of page table accessors", v2.

The low level page table accessors (pXY_index(), pXY_offset()) are
duplicated across all architectures and sometimes more than once.  For
instance, we have 31 definition of pgd_offset() for 25 supported
architectures.

Most of these definitions are actually identical and typically it boils
down to, e.g.

static inline unsigned long pmd_index(unsigned long address)
{
        return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}

static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
        return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}

These definitions can be shared among 90% of the arches provided
XYZ_SHIFT, PTRS_PER_XYZ and xyz_page_vaddr() are defined.

For architectures that really need a custom version there is always
possibility to override the generic version with the usual ifdefs magic.

These patches introduce include/linux/pgtable.h that replaces
include/asm-generic/pgtable.h and add the definitions of the page table
accessors to the new header.

This patch (of 12):

The linux/mm.h header includes <asm/pgtable.h> to allow inlining of the
functions involving page table manipulations, e.g.  pte_alloc() and
pmd_alloc().  So, there is no point to explicitly include <asm/pgtable.h>
in the files that include <linux/mm.h>.

The include statements in such cases are remove with a simple loop:

	for f in $(git grep -l "include <linux/mm.h>") ; do
		sed -i -e '/include <asm\/pgtable.h>/ d' $f
	done

Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200514170327.31389-1-rppt@kernel.org
Link: http://lkml.kernel.org/r/20200514170327.31389-2-rppt@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-09 09:39:13 -07:00

285 lines
6.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* r2300.c: R2000 and R3000 specific mmu/cache code.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*
* with a lot of changes to make this thing work for R3000s
* Tx39XX R4k style caches added. HK
* Copyright (C) 1998, 1999, 2000 Harald Koerfgen
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
* Copyright (C) 2002 Ralf Baechle
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/mmu_context.h>
#include <asm/tlbmisc.h>
#include <asm/isadep.h>
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#undef DEBUG_TLB
extern void build_tlb_refill_handler(void);
/* CP0 hazard avoidance. */
#define BARRIER \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
"nop\n\t" \
".set pop\n\t")
int r3k_have_wired_reg; /* Should be in cpu_data? */
/* TLB operations. */
static void local_flush_tlb_from(int entry)
{
unsigned long old_ctx;
old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
write_c0_entrylo0(0);
while (entry < current_cpu_data.tlbsize) {
write_c0_index(entry << 8);
write_c0_entryhi((entry | 0x80000) << 12);
entry++; /* BARRIER */
tlb_write_indexed();
}
write_c0_entryhi(old_ctx);
}
void local_flush_tlb_all(void)
{
unsigned long flags;
#ifdef DEBUG_TLB
printk("[tlball]");
#endif
local_irq_save(flags);
local_flush_tlb_from(r3k_have_wired_reg ? read_c0_wired() : 8);
local_irq_restore(flags);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long size, flags;
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
cpu_context(cpu, mm) & asid_mask, start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) {
int oldpid = read_c0_entryhi() & asid_mask;
int newpid = cpu_context(cpu, mm) & asid_mask;
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
end &= PAGE_MASK;
while (start < end) {
int idx;
write_c0_entryhi(start | newpid);
start += PAGE_SIZE; /* BARRIER */
tlb_probe();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entryhi(KSEG0);
if (idx < 0) /* BARRIER */
continue;
tlb_write_indexed();
}
write_c0_entryhi(oldpid);
} else {
drop_mmu_context(mm);
}
local_irq_restore(flags);
}
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long size, flags;
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) {
int pid = read_c0_entryhi();
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
end &= PAGE_MASK;
while (start < end) {
int idx;
write_c0_entryhi(start);
start += PAGE_SIZE; /* BARRIER */
tlb_probe();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entryhi(KSEG0);
if (idx < 0) /* BARRIER */
continue;
tlb_write_indexed();
}
write_c0_entryhi(pid);
} else {
local_flush_tlb_all();
}
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
unsigned long flags;
int oldpid, newpid, idx;
#ifdef DEBUG_TLB
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
#endif
newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
page &= PAGE_MASK;
local_irq_save(flags);
oldpid = read_c0_entryhi() & asid_mask;
write_c0_entryhi(page | newpid);
BARRIER;
tlb_probe();
idx = read_c0_index();
write_c0_entrylo0(0);
write_c0_entryhi(KSEG0);
if (idx < 0) /* BARRIER */
goto finish;
tlb_write_indexed();
finish:
write_c0_entryhi(oldpid);
local_irq_restore(flags);
}
}
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
unsigned long flags;
int idx, pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
pid = read_c0_entryhi() & asid_mask;
#ifdef DEBUG_TLB
if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
(cpu_context(cpu, vma->vm_mm)), pid);
}
#endif
local_irq_save(flags);
address &= PAGE_MASK;
write_c0_entryhi(address | pid);
BARRIER;
tlb_probe();
idx = read_c0_index();
write_c0_entrylo0(pte_val(pte));
write_c0_entryhi(address | pid);
if (idx < 0) { /* BARRIER */
tlb_write_random();
} else {
tlb_write_indexed();
}
write_c0_entryhi(pid);
local_irq_restore(flags);
}
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
unsigned long flags;
unsigned long old_ctx;
static unsigned long wired = 0;
if (r3k_have_wired_reg) { /* TX39XX */
unsigned long old_pagemask;
unsigned long w;
#ifdef DEBUG_TLB
printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
entrylo0, entryhi, pagemask);
#endif
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi() & asid_mask;
old_pagemask = read_c0_pagemask();
w = read_c0_wired();
write_c0_wired(w + 1);
write_c0_index(w << 8);
write_c0_pagemask(pagemask);
write_c0_entryhi(entryhi);
write_c0_entrylo0(entrylo0);
BARRIER;
tlb_write_indexed();
write_c0_entryhi(old_ctx);
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
} else if (wired < 8) {
#ifdef DEBUG_TLB
printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
entrylo0, entryhi);
#endif
local_irq_save(flags);
old_ctx = read_c0_entryhi() & asid_mask;
write_c0_entrylo0(entrylo0);
write_c0_entryhi(entryhi);
write_c0_index(wired);
wired++; /* BARRIER */
tlb_write_indexed();
write_c0_entryhi(old_ctx);
local_flush_tlb_all();
local_irq_restore(flags);
}
}
void tlb_init(void)
{
switch (current_cpu_type()) {
case CPU_TX3922:
case CPU_TX3927:
r3k_have_wired_reg = 1;
write_c0_wired(0); /* Set to 8 on reset... */
break;
}
local_flush_tlb_from(0);
build_tlb_refill_handler();
}