summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/highmem.h11
-rw-r--r--arch/mips/include/asm/hugetlb.h11
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/kernel/sysrq.c2
-rw-r--r--arch/mips/loongson64/numa.c2
-rw-r--r--arch/mips/mm/cache.c6
-rw-r--r--arch/mips/mm/highmem.c54
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
10 files changed, 19 insertions, 74 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b6338e806a4b..9dc08ee3d6b9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -72,7 +72,6 @@ config MIPS
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
- select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPROFILE
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 9d84aafc33d0..f1f788b57166 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -46,21 +46,14 @@ extern pte_t *pkmap_page_table;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern void * kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-
-extern void *kmap(struct page *page);
-extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
+#define ARCH_HAS_KMAP_FLUSH_TLB
+extern void kmap_flush_tlb(unsigned long addr);
extern void *kmap_atomic_pfn(unsigned long pfn);
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
extern void kmap_init(void);
-#define kmap_prot PAGE_KERNEL
-
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index 425bb6fc3bda..10e3be870df7 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -11,13 +11,6 @@
#include <asm/page.h>
-static inline int is_hugepage_only_range(struct mm_struct *mm,
- unsigned long addr,
- unsigned long len)
-{
- return 0;
-}
-
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr,
@@ -82,10 +75,6 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
return changed;
}
-static inline void arch_clear_hugepage_flags(struct page *page)
-{
-}
-
#include <asm-generic/hugetlb.h>
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 85b39c9fd09e..32760b41aa31 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -705,7 +705,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return pmd;
}
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
index e5a2a6ab71ac..9c1a2019113b 100644
--- a/arch/mips/kernel/sysrq.c
+++ b/arch/mips/kernel/sysrq.c
@@ -52,7 +52,7 @@ static void sysrq_handle_tlbdump(int key)
#endif
}
-static struct sysrq_key_op sysrq_tlbdump_op = {
+static const struct sysrq_key_op sysrq_tlbdump_op = {
.handler = sysrq_handle_tlbdump,
.help_msg = "show-tlbs(x)",
.action_msg = "Show TLB entries",
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index 1ae072df4831..901f5be5ee76 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -247,7 +247,7 @@ void __init paging_init(void)
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
zones_size[ZONE_NORMAL] = max_low_pfn;
- free_area_init_nodes(zones_size);
+ free_area_init(zones_size);
}
void __init mem_init(void)
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index ad6df1cea866..3e81ba000096 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -14,9 +14,9 @@
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
-#include <asm/highmem.h>
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -103,7 +103,7 @@ void __flush_dcache_page(struct page *page)
flush_data_cache_page(addr);
if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ kunmap_atomic((void *)addr);
}
EXPORT_SYMBOL(__flush_dcache_page);
@@ -146,7 +146,7 @@ void __update_cache(unsigned long address, pte_t pte)
flush_data_cache_page(addr);
if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ kunmap_atomic((void *)addr);
ClearPageDcacheDirty(page);
}
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index d08e6d7d533b..8e8726992720 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -12,71 +12,37 @@ static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
-void *kmap(struct page *page)
+void kmap_flush_tlb(unsigned long addr)
{
- void *addr;
-
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- addr = kmap_high(page);
- flush_tlb_one((unsigned long)addr);
-
- return addr;
+ flush_tlb_one(addr);
}
-EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kmap_flush_tlb);
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
- set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
local_flush_tlb_one((unsigned long)vaddr);
return (void*) vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type __maybe_unused;
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
- preempt_enable();
+ if (vaddr < FIXADDR_START)
return;
- }
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -94,10 +60,8 @@ void __kunmap_atomic(void *kvaddr)
}
#endif
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 620ebfa45ec1..7c9f0c0a6cd3 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -424,7 +424,7 @@ void __init paging_init(void)
}
#endif
- free_area_init_nodes(max_zone_pfns);
+ free_area_init(max_zone_pfns);
}
#ifdef CONFIG_64BIT
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index a45691e6ab90..1213215ea965 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -419,7 +419,7 @@ void __init paging_init(void)
pagetable_init();
zones_size[ZONE_NORMAL] = max_low_pfn;
- free_area_init_nodes(zones_size);
+ free_area_init(zones_size);
}
void __init mem_init(void)