1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2024-11-25 00:30:37 +02:00
openwrt-xburst/target/linux/xburst/patches-2.6.32/001-core.patch
juhosg 414e59d258 kernel: update to 2.6.32.6
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@19358 3c298f89-4303-0410-b956-a3cf2f4a3e73
2010-01-28 11:54:20 +00:00

658 lines
18 KiB
Diff

From 42789dfb077bb7b640ee19d0e3f7808dc5318adf Mon Sep 17 00:00:00 2001
From: Lars-Peter Clausen <lars@metafoo.de>
Date: Mon, 11 Jan 2010 04:29:35 +0100
Subject: [PATCH] /opt/Projects/openwrt/target/linux/xburst/patches-2.6.31/001-core.patch
---
arch/mips/Kconfig | 29 ++++
arch/mips/Makefile | 18 +++
arch/mips/boot/Makefile | 23 +++-
arch/mips/include/asm/bootinfo.h | 6 +
arch/mips/include/asm/cpu.h | 13 ++-
arch/mips/include/asm/mach-generic/irq.h | 2 +-
arch/mips/include/asm/r4kcache.h | 231 ++++++++++++++++++++++++++++++
arch/mips/include/asm/suspend.h | 3 +
arch/mips/kernel/cpu-probe.c | 21 +++
arch/mips/mm/c-r4k.c | 30 ++++
arch/mips/mm/cache.c | 2 +
arch/mips/mm/tlbex.c | 5 +
12 files changed, 379 insertions(+), 4 deletions(-)
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -174,6 +174,9 @@ config MACH_JAZZ
Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and
Olivetti M700-10 workstations.
+config MACH_JZ
+ bool "Ingenic JZ4720/JZ4740 based machines"
+
config LASAT
bool "LASAT Networks platforms"
select CEVT_R4K
@@ -677,6 +680,7 @@ source "arch/mips/alchemy/Kconfig"
source "arch/mips/basler/excite/Kconfig"
source "arch/mips/bcm63xx/Kconfig"
source "arch/mips/jazz/Kconfig"
+source "arch/mips/jz4740/Kconfig"
source "arch/mips/lasat/Kconfig"
source "arch/mips/pmc-sierra/Kconfig"
source "arch/mips/sgi-ip27/Kconfig"
@@ -1913,6 +1917,14 @@ config NR_CPUS
source "kernel/time/Kconfig"
+# the value of (max order + 1)
+config FORCE_MAX_ZONEORDER
+ prompt "MAX_ZONEORDER"
+ int
+ default "12"
+ help
+ The max memory that can be allocated = 4KB * 2^(CONFIG_FORCE_MAX_ZONEORDER - 1)
+
#
# Timer Interrupt Frequency Configuration
#
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -184,6 +184,14 @@ cflags-$(CONFIG_AR7) += -I$(srctree)/ar
load-$(CONFIG_AR7) += 0xffffffff94100000
#
+# Commond Ingenic JZ4740 series
+#
+
+core-$(CONFIG_SOC_JZ4740) += arch/mips/jz4740/
+cflags-$(CONFIG_SOC_JZ4740) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
+load-$(CONFIG_SOC_JZ4740) += 0xffffffff80010000
+
+#
# Acer PICA 61, Mips Magnum 4000 and Olivetti M700.
#
core-$(CONFIG_MACH_JAZZ) += arch/mips/jazz/
@@ -705,6 +713,12 @@ makeboot =$(Q)$(MAKE) $(build)=arch/mips
all: $(all-y)
+uImage: $(vmlinux-32)
+ +@$(call makeboot,$@)
+
+zImage: $(vmlinux-32)
+ +@$(call makeboot,$@)
+
vmlinux.bin: $(vmlinux-32)
+@$(call makeboot,$@)
@@ -734,6 +748,7 @@ install:
archclean:
@$(MAKE) $(clean)=arch/mips/boot
+ @$(MAKE) $(clean)=arch/mips/boot/compressed
@$(MAKE) $(clean)=arch/mips/lasat
define archhelp
@@ -741,6 +756,9 @@ define archhelp
echo ' vmlinux.ecoff - ECOFF boot image'
echo ' vmlinux.bin - Raw binary boot image'
echo ' vmlinux.srec - SREC boot image'
+ echo ' uImage - u-boot format image (arch/$(ARCH)/boot/uImage)'
+ echo ' zImage - Compressed binary image (arch/$(ARCH)/boot/compressed/zImage)'
+ echo ' vmlinux.bin - Uncompressed binary image (arch/$(ARCH)/boot/vmlinux.bin)'
echo
echo ' These will be default as apropriate for a configured platform.'
endef
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -7,6 +7,9 @@
# Copyright (C) 2004 Maciej W. Rozycki
#
+# This one must match the LOADADDR in arch/mips/Makefile!
+LOADADDR=0x80010000
+
#
# Some DECstations need all possible sections of an ECOFF executable
#
@@ -25,7 +28,7 @@ strip-flags = $(addprefix --remove-secti
VMLINUX = vmlinux
-all: vmlinux.ecoff vmlinux.srec addinitrd
+all: vmlinux.ecoff vmlinux.srec addinitrd uImage zImage
vmlinux.ecoff: $(obj)/elf2ecoff $(VMLINUX)
$(obj)/elf2ecoff $(VMLINUX) vmlinux.ecoff $(E2EFLAGS)
@@ -42,8 +45,24 @@ vmlinux.srec: $(VMLINUX)
$(obj)/addinitrd: $(obj)/addinitrd.c
$(HOSTCC) -o $@ $^
+uImage: $(VMLINUX) vmlinux.bin
+ rm -f $(obj)/vmlinux.bin.gz
+ gzip -9 $(obj)/vmlinux.bin
+ mkimage -A mips -O linux -T kernel -C gzip \
+ -a $(LOADADDR) -e $(shell sh ./$(obj)/tools/entry $(NM) $(VMLINUX) ) \
+ -n 'Linux-$(KERNELRELEASE)' \
+ -d $(obj)/vmlinux.bin.gz $(obj)/uImage
+ @echo ' Kernel: arch/mips/boot/$@ is ready'
+
+zImage:
+ $(Q)$(MAKE) $(build)=$(obj)/compressed loadaddr=$(LOADADDR) $@
+ @echo ' Kernel: arch/mips/boot/compressed/$@ is ready'
+
clean-files += addinitrd \
elf2ecoff \
vmlinux.bin \
vmlinux.ecoff \
- vmlinux.srec
+ vmlinux.srec \
+ vmlinux.bin.gz \
+ uImage \
+ zImage
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -69,6 +69,12 @@
#define MACH_DEXXON_GDIUM2F10 5
#define MACH_LOONGSON_END 6
+/*
+ * Valid machtype for group INGENIC
+ */
+#define MACH_INGENIC_JZ4720 0 /* JZ4730 SOC */
+#define MACH_INGENIC_JZ4740 1 /* JZ4740 SOC */
+
#define CL_SIZE COMMAND_LINE_SIZE
extern char *system_type;
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -34,7 +34,7 @@
#define PRID_COMP_LSI 0x080000
#define PRID_COMP_LEXRA 0x0b0000
#define PRID_COMP_CAVIUM 0x0d0000
-
+#define PRID_COMP_INGENIC 0xd00000
/*
* Assigned values for the product ID register. In order to detect a
@@ -133,6 +133,12 @@
#define PRID_IMP_CAVIUM_CN52XX 0x0700
/*
+ * These are the PRID's for when 23:16 == PRID_COMP_INGENIC
+ */
+
+#define PRID_IMP_JZRISC 0x0200
+
+/*
* Definitions for 7:0 on legacy processors
*/
@@ -224,6 +230,11 @@ enum cpu_type_enum {
CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
CPU_CAVIUM_OCTEON,
+ /*
+ * Ingenic class processors
+ */
+ CPU_JZRISC, CPU_XBURST,
+
CPU_LAST
};
--- a/arch/mips/include/asm/mach-generic/irq.h
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -9,7 +9,7 @@
#define __ASM_MACH_GENERIC_IRQ_H
#ifndef NR_IRQS
-#define NR_IRQS 128
+#define NR_IRQS 256
#endif
#ifdef CONFIG_I8259
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -17,6 +17,58 @@
#include <asm/cpu-features.h>
#include <asm/mipsmtregs.h>
+#ifdef CONFIG_JZRISC
+
+#define K0_TO_K1() \
+do { \
+ unsigned long __k0_addr; \
+ \
+ __asm__ __volatile__( \
+ "la %0, 1f\n\t" \
+ "or %0, %0, %1\n\t" \
+ "jr %0\n\t" \
+ "nop\n\t" \
+ "1: nop\n" \
+ : "=&r"(__k0_addr) \
+ : "r" (0x20000000) ); \
+} while(0)
+
+#define K1_TO_K0() \
+do { \
+ unsigned long __k0_addr; \
+ __asm__ __volatile__( \
+ "nop;nop;nop;nop;nop;nop;nop\n\t" \
+ "la %0, 1f\n\t" \
+ "jr %0\n\t" \
+ "nop\n\t" \
+ "1: nop\n" \
+ : "=&r" (__k0_addr)); \
+} while (0)
+
+#define INVALIDATE_BTB() \
+do { \
+ unsigned long tmp; \
+ __asm__ __volatile__( \
+ ".set mips32\n\t" \
+ "mfc0 %0, $16, 7\n\t" \
+ "nop\n\t" \
+ "ori %0, 2\n\t" \
+ "mtc0 %0, $16, 7\n\t" \
+ "nop\n\t" \
+ : "=&r" (tmp)); \
+} while (0)
+
+#define SYNC_WB() __asm__ __volatile__ ("sync")
+
+#else /* CONFIG_JZRISC */
+
+#define K0_TO_K1() do { } while (0)
+#define K1_TO_K0() do { } while (0)
+#define INVALIDATE_BTB() do { } while (0)
+#define SYNC_WB() do { } while (0)
+
+#endif /* CONFIG_JZRISC */
+
/*
* This macro return a properly sign-extended address suitable as base address
* for indexed cache operations. Two issues here:
@@ -144,6 +196,7 @@ static inline void flush_icache_line_ind
{
__iflush_prologue
cache_op(Index_Invalidate_I, addr);
+ INVALIDATE_BTB();
__iflush_epilogue
}
@@ -151,6 +204,7 @@ static inline void flush_dcache_line_ind
{
__dflush_prologue
cache_op(Index_Writeback_Inv_D, addr);
+ SYNC_WB();
__dflush_epilogue
}
@@ -163,6 +217,7 @@ static inline void flush_icache_line(uns
{
__iflush_prologue
cache_op(Hit_Invalidate_I, addr);
+ INVALIDATE_BTB();
__iflush_epilogue
}
@@ -170,6 +225,7 @@ static inline void flush_dcache_line(uns
{
__dflush_prologue
cache_op(Hit_Writeback_Inv_D, addr);
+ SYNC_WB();
__dflush_epilogue
}
@@ -177,6 +233,7 @@ static inline void invalidate_dcache_lin
{
__dflush_prologue
cache_op(Hit_Invalidate_D, addr);
+ SYNC_WB();
__dflush_epilogue
}
@@ -209,6 +266,7 @@ static inline void flush_scache_line(uns
static inline void protected_flush_icache_line(unsigned long addr)
{
protected_cache_op(Hit_Invalidate_I, addr);
+ INVALIDATE_BTB();
}
/*
@@ -220,6 +278,7 @@ static inline void protected_flush_icach
static inline void protected_writeback_dcache_line(unsigned long addr)
{
protected_cache_op(Hit_Writeback_Inv_D, addr);
+ SYNC_WB();
}
static inline void protected_writeback_scache_line(unsigned long addr)
@@ -396,8 +455,10 @@ static inline void blast_##pfx##cache##l
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
+#ifndef CONFIG_JZRISC
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
+#endif
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
@@ -405,12 +466,122 @@ __BUILD_BLAST_CACHE(s, scache, Index_Wri
__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
+#ifndef CONFIG_JZRISC
__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
+#endif
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
+#ifdef CONFIG_JZRISC
+
+static inline void blast_dcache32(void)
+{
+ unsigned long start = INDEX_BASE;
+ unsigned long end = start + current_cpu_data.dcache.waysize;
+ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
+ unsigned long ws_end = current_cpu_data.dcache.ways <<
+ current_cpu_data.dcache.waybit;
+ unsigned long ws, addr;
+
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400)
+ cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
+
+ SYNC_WB();
+}
+
+static inline void blast_dcache32_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ do {
+ cache32_unroll32(start,Hit_Writeback_Inv_D);
+ start += 0x400;
+ } while (start < end);
+
+ SYNC_WB();
+}
+
+static inline void blast_dcache32_page_indexed(unsigned long page)
+{
+ unsigned long indexmask = current_cpu_data.dcache.waysize - 1;
+ unsigned long start = INDEX_BASE + (page & indexmask);
+ unsigned long end = start + PAGE_SIZE;
+ unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
+ unsigned long ws_end = current_cpu_data.dcache.ways <<
+ current_cpu_data.dcache.waybit;
+ unsigned long ws, addr;
+
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400)
+ cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
+
+ SYNC_WB();
+}
+
+static inline void blast_icache32(void)
+{
+ unsigned long start = INDEX_BASE;
+ unsigned long end = start + current_cpu_data.icache.waysize;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
+ current_cpu_data.icache.waybit;
+ unsigned long ws, addr;
+
+ K0_TO_K1();
+
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400)
+ cache32_unroll32(addr|ws,Index_Invalidate_I);
+
+ INVALIDATE_BTB();
+
+ K1_TO_K0();
+}
+
+static inline void blast_icache32_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ K0_TO_K1();
+
+ do {
+ cache32_unroll32(start,Hit_Invalidate_I);
+ start += 0x400;
+ } while (start < end);
+
+ INVALIDATE_BTB();
+
+ K1_TO_K0();
+}
+
+static inline void blast_icache32_page_indexed(unsigned long page)
+{
+ unsigned long indexmask = current_cpu_data.icache.waysize - 1;
+ unsigned long start = INDEX_BASE + (page & indexmask);
+ unsigned long end = start + PAGE_SIZE;
+ unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
+ unsigned long ws_end = current_cpu_data.icache.ways <<
+ current_cpu_data.icache.waybit;
+ unsigned long ws, addr;
+
+ K0_TO_K1();
+
+ for (ws = 0; ws < ws_end; ws += ws_inc)
+ for (addr = start; addr < end; addr += 0x400)
+ cache32_unroll32(addr|ws,Index_Invalidate_I);
+
+ INVALIDATE_BTB();
+
+ K1_TO_K0();
+}
+
+#endif /* CONFIG_JZRISC */
+
/* build blast_xxx_range, protected_blast_xxx_range */
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
@@ -432,13 +603,73 @@ static inline void prot##blast_##pfx##ca
__##pfx##flush_epilogue \
}
+#ifndef CONFIG_JZRISC
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
+#endif
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
+#ifndef CONFIG_JZRISC
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
+#endif
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
/* blast_inv_dcache_range */
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
+#ifdef CONFIG_JZRISC
+
+static inline void protected_blast_dcache_range(unsigned long start,
+ unsigned long end)
+{
+ unsigned long lsize = cpu_dcache_line_size();
+ unsigned long addr = start & ~(lsize - 1);
+ unsigned long aend = (end - 1) & ~(lsize - 1);
+
+ while (1) {
+ protected_cache_op(Hit_Writeback_Inv_D, addr);
+ if (addr == aend)
+ break;
+ addr += lsize;
+ }
+ SYNC_WB();
+}
+
+static inline void protected_blast_icache_range(unsigned long start,
+ unsigned long end)
+{
+ unsigned long lsize = cpu_icache_line_size();
+ unsigned long addr = start & ~(lsize - 1);
+ unsigned long aend = (end - 1) & ~(lsize - 1);
+
+ K0_TO_K1();
+
+ while (1) {
+ protected_cache_op(Hit_Invalidate_I, addr);
+ if (addr == aend)
+ break;
+ addr += lsize;
+ }
+ INVALIDATE_BTB();
+
+ K1_TO_K0();
+}
+
+static inline void blast_dcache_range(unsigned long start,
+ unsigned long end)
+{
+ unsigned long lsize = cpu_dcache_line_size();
+ unsigned long addr = start & ~(lsize - 1);
+ unsigned long aend = (end - 1) & ~(lsize - 1);
+
+ while (1) {
+ cache_op(Hit_Writeback_Inv_D, addr);
+ if (addr == aend)
+ break;
+ addr += lsize;
+ }
+ SYNC_WB();
+}
+
+#endif /* CONFIG_JZRISC */
+
#endif /* _ASM_R4KCACHE_H */
--- a/arch/mips/include/asm/suspend.h
+++ b/arch/mips/include/asm/suspend.h
@@ -2,6 +2,9 @@
#define __ASM_SUSPEND_H
static inline int arch_prepare_suspend(void) { return 0; }
+#if defined(CONFIG_PM) && defined(CONFIG_JZSOC)
+extern int jz_pm_init(void);
+#endif
/* References to section boundaries */
extern const void __nosave_begin, __nosave_end;
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -160,6 +160,7 @@ void __init check_wait(void)
case CPU_BCM6348:
case CPU_BCM6358:
case CPU_CAVIUM_OCTEON:
+ case CPU_JZRISC:
cpu_wait = r4k_wait;
break;
@@ -902,6 +903,23 @@ static inline void cpu_probe_cavium(stru
}
}
+static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ c->options &= ~MIPS_CPU_COUNTER; /* JZRISC does not implement the CP0 counter. */
+ switch (c->processor_id & 0xff00) {
+ case PRID_IMP_JZRISC:
+ c->cputype = CPU_JZRISC;
+ c->isa_level = MIPS_CPU_ISA_M32R1;
+ c->tlbsize = 32;
+ __cpu_name[cpu] = "Ingenic JZRISC";
+ break;
+ default:
+ panic("Unknown Ingenic Processor ID!");
+ break;
+ }
+}
+
const char *__cpu_name[NR_CPUS];
__cpuinit void cpu_probe(void)
@@ -939,6 +957,9 @@ __cpuinit void cpu_probe(void)
case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu);
break;
+ case PRID_COMP_INGENIC:
+ cpu_probe_ingenic(c, cpu);
+ break;
}
BUG_ON(!__cpu_name[cpu]);
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -928,6 +928,36 @@ static void __cpuinit probe_pcache(void)
c->dcache.waybit = 0;
break;
+ case CPU_JZRISC:
+ config1 = read_c0_config1();
+ config1 = (config1 >> 22) & 0x07;
+ if (config1 == 0x07)
+ config1 = 10;
+ else
+ config1 = config1 + 11;
+ config1 += 2;
+ icache_size = (1 << config1);
+ c->icache.linesz = 32;
+ c->icache.ways = 4;
+ c->icache.waybit = __ffs(icache_size / c->icache.ways);
+
+ config1 = read_c0_config1();
+ config1 = (config1 >> 13) & 0x07;
+ if (config1 == 0x07)
+ config1 = 10;
+ else
+ config1 = config1 + 11;
+ config1 += 2;
+ dcache_size = (1 << config1);
+ c->dcache.linesz = 32;
+ c->dcache.ways = 4;
+ c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
+
+ c->dcache.flags = 0;
+ c->options |= MIPS_CPU_PREFETCH;
+
+ break;
+
default:
if (!(config & MIPS_CONF_M))
panic("Don't know how to probe P-caches on this cpu.");
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -52,6 +52,8 @@ void (*_dma_cache_wback)(unsigned long s
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
EXPORT_SYMBOL(_dma_cache_wback_inv);
+EXPORT_SYMBOL(_dma_cache_wback);
+EXPORT_SYMBOL(_dma_cache_inv);
#endif /* CONFIG_DMA_NONCOHERENT */
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -389,6 +389,11 @@ static void __cpuinit build_tlb_write_en
tlbw(p);
break;
+ case CPU_JZRISC:
+ tlbw(p);
+ uasm_i_nop(p);
+ break;
+
default:
panic("No TLB refill handler yet (CPU type: %d)",
current_cpu_data.cputype);