2277ab4a1d
This inverts the delayed dcache flush a bit to be more in line with other platforms. At the same time this also gives us the ability to do some more optimizations and cleanup. Now that the update_mmu_cache() callsite only tests for the bit, the implementation can gradually be split out and made generic, rather than relying on special implementations for each of the peculiar CPU types. SH7705 in 32kB mode and SH-4 still need slightly different handling, but this is something that can remain isolated in the varying page copy/clear routines. On top of that, SH-X3 is dcache coherent, so there is no need to bother with any of these tests in the PTEAEX version of update_mmu_cache(), so we kill that off too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
98 lines
2.4 KiB
C
98 lines
2.4 KiB
C
/*
|
|
* arch/sh/mm/tlb-sh4.c
|
|
*
|
|
* SH-4 specific TLB operations
|
|
*
|
|
* Copyright (C) 1999 Niibe Yutaka
|
|
* Copyright (C) 2002 - 2007 Paul Mundt
|
|
*
|
|
* Released under the terms of the GNU GPL v2.0.
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/io.h>
|
|
#include <asm/system.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
void update_mmu_cache(struct vm_area_struct * vma,
|
|
unsigned long address, pte_t pte)
|
|
{
|
|
unsigned long flags;
|
|
unsigned long pteval;
|
|
unsigned long vpn;
|
|
unsigned long pfn = pte_pfn(pte);
|
|
struct page *page;
|
|
|
|
/* Ptrace may call this routine. */
|
|
if (vma && current->active_mm != vma->vm_mm)
|
|
return;
|
|
|
|
page = pfn_to_page(pfn);
|
|
if (pfn_valid(pfn) && page_mapping(page)) {
|
|
#ifndef CONFIG_SMP
|
|
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
|
|
if (dirty) {
|
|
|
|
unsigned long addr = (unsigned long)page_address(page);
|
|
|
|
if (pages_do_alias(addr, address & PAGE_MASK))
|
|
__flush_wback_region((void *)addr, PAGE_SIZE);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
local_irq_save(flags);
|
|
|
|
/* Set PTEH register */
|
|
vpn = (address & MMU_VPN_MASK) | get_asid();
|
|
ctrl_outl(vpn, MMU_PTEH);
|
|
|
|
pteval = pte.pte_low;
|
|
|
|
/* Set PTEA register */
|
|
#ifdef CONFIG_X2TLB
|
|
/*
|
|
* For the extended mode TLB this is trivial, only the ESZ and
|
|
* EPR bits need to be written out to PTEA, with the remainder of
|
|
* the protection bits (with the exception of the compat-mode SZ
|
|
* and PR bits, which are cleared) being written out in PTEL.
|
|
*/
|
|
ctrl_outl(pte.pte_high, MMU_PTEA);
|
|
#else
|
|
if (cpu_data->flags & CPU_HAS_PTEA)
|
|
/* TODO: make this look less hacky */
|
|
ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
|
|
#endif
|
|
|
|
/* Set PTEL register */
|
|
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
|
|
#ifdef CONFIG_CACHE_WRITETHROUGH
|
|
pteval |= _PAGE_WT;
|
|
#endif
|
|
/* conveniently, we want all the software flags to be 0 anyway */
|
|
ctrl_outl(pteval, MMU_PTEL);
|
|
|
|
/* Load the TLB */
|
|
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
|
|
unsigned long page)
|
|
{
|
|
unsigned long addr, data;
|
|
|
|
/*
|
|
* NOTE: PTEH.ASID should be set to this MM
|
|
* _AND_ we need to write ASID to the array.
|
|
*
|
|
* It would be simple if we didn't need to set PTEH.ASID...
|
|
*/
|
|
addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
|
|
data = page | asid; /* VALID bit is off */
|
|
jump_to_uncached();
|
|
ctrl_outl(data, addr);
|
|
back_to_cached();
|
|
}
|