[xburst] refresh patches
[openwrt.git] / target / linux / xburst / patches-2.6.32 / 001-core.patch
1 From 42789dfb077bb7b640ee19d0e3f7808dc5318adf Mon Sep 17 00:00:00 2001
2 From: Lars-Peter Clausen <lars@metafoo.de>
3 Date: Mon, 11 Jan 2010 04:29:35 +0100
4 Subject: [PATCH] /opt/Projects/openwrt/target/linux/xburst/patches-2.6.31/001-core.patch
5
6 ---
7  arch/mips/Kconfig                        |   29 ++++
8  arch/mips/Makefile                       |   18 +++
9  arch/mips/boot/Makefile                  |   23 +++-
10  arch/mips/include/asm/bootinfo.h         |    6 +
11  arch/mips/include/asm/cpu.h              |   13 ++-
12  arch/mips/include/asm/mach-generic/irq.h |    2 +-
13  arch/mips/include/asm/r4kcache.h         |  231 ++++++++++++++++++++++++++++++
14  arch/mips/include/asm/suspend.h          |    3 +
15  arch/mips/kernel/cpu-probe.c             |   21 +++
16  arch/mips/mm/c-r4k.c                     |   30 ++++
17  arch/mips/mm/cache.c                     |    2 +
18  arch/mips/mm/tlbex.c                     |    5 +
19  12 files changed, 379 insertions(+), 4 deletions(-)
20
21 --- a/arch/mips/Kconfig
22 +++ b/arch/mips/Kconfig
23 @@ -174,6 +174,9 @@ config MACH_JAZZ
24          Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and
25          Olivetti M700-10 workstations.
26  
27 +config MACH_JZ
28 +       bool "Ingenic JZ4720/JZ4740 based machines"
29 +
30  config LASAT
31         bool "LASAT Networks platforms"
32         select CEVT_R4K
33 @@ -677,6 +680,7 @@ source "arch/mips/alchemy/Kconfig"
34  source "arch/mips/basler/excite/Kconfig"
35  source "arch/mips/bcm63xx/Kconfig"
36  source "arch/mips/jazz/Kconfig"
37 +source "arch/mips/jz4740/Kconfig"
38  source "arch/mips/lasat/Kconfig"
39  source "arch/mips/pmc-sierra/Kconfig"
40  source "arch/mips/sgi-ip27/Kconfig"
41 --- a/arch/mips/Makefile
42 +++ b/arch/mips/Makefile
43 @@ -186,6 +186,14 @@ cflags-$(CONFIG_AR7)               += -I$(srctree)/ar
44  load-$(CONFIG_AR7)             += 0xffffffff94100000
45  
46  #
47 +# Commond Ingenic JZ4740 series
48 +#
49 +
50 +core-$(CONFIG_SOC_JZ4740)      += arch/mips/jz4740/
51 +cflags-$(CONFIG_SOC_JZ4740)    += -I$(srctree)/arch/mips/include/asm/mach-jz4740
52 +load-$(CONFIG_SOC_JZ4740)      += 0xffffffff80010000
53 +
54 +#
55  # Acer PICA 61, Mips Magnum 4000 and Olivetti M700.
56  #
57  core-$(CONFIG_MACH_JAZZ)       += arch/mips/jazz/
58 @@ -704,6 +712,12 @@ makeboot =$(Q)$(MAKE) $(build)=arch/mips
59  
60  all:   $(all-y)
61  
62 +uImage: $(vmlinux-32)
63 +       +@$(call makeboot,$@)
64 +
65 +zImage: $(vmlinux-32)
66 +       +@$(call makeboot,$@)
67 +
68  vmlinux.bin: $(vmlinux-32)
69         +@$(call makeboot,$@)
70  
71 @@ -733,6 +747,7 @@ install:
72  
73  archclean:
74         @$(MAKE) $(clean)=arch/mips/boot
75 +       @$(MAKE) $(clean)=arch/mips/boot/compressed
76         @$(MAKE) $(clean)=arch/mips/lasat
77  
78  define archhelp
79 @@ -740,6 +755,9 @@ define archhelp
80         echo '  vmlinux.ecoff        - ECOFF boot image'
81         echo '  vmlinux.bin          - Raw binary boot image'
82         echo '  vmlinux.srec         - SREC boot image'
83 +       echo  '  uImage - u-boot format image (arch/$(ARCH)/boot/uImage)'
84 +       echo  '  zImage - Compressed binary image (arch/$(ARCH)/boot/compressed/zImage)'
85 +       echo  '  vmlinux.bin    - Uncompressed binary image (arch/$(ARCH)/boot/vmlinux.bin)'
86         echo
87         echo '  These will be default as apropriate for a configured platform.'
88  endef
89 --- a/arch/mips/boot/Makefile
90 +++ b/arch/mips/boot/Makefile
91 @@ -7,6 +7,9 @@
92  # Copyright (C) 2004  Maciej W. Rozycki
93  #
94  
95 +# This one must match the LOADADDR in arch/mips/Makefile!
96 +LOADADDR=0x80010000
97 +
98  #
99  # Some DECstations need all possible sections of an ECOFF executable
100  #
101 @@ -25,7 +28,7 @@ strip-flags   = $(addprefix --remove-secti
102  
103  VMLINUX = vmlinux
104  
105 -all: vmlinux.ecoff vmlinux.srec addinitrd
106 +all: vmlinux.ecoff vmlinux.srec addinitrd uImage zImage
107  
108  vmlinux.ecoff: $(obj)/elf2ecoff $(VMLINUX)
109         $(obj)/elf2ecoff $(VMLINUX) vmlinux.ecoff $(E2EFLAGS)
110 @@ -42,8 +45,24 @@ vmlinux.srec: $(VMLINUX)
111  $(obj)/addinitrd: $(obj)/addinitrd.c
112         $(HOSTCC) -o $@ $^
113  
114 +uImage: $(VMLINUX) vmlinux.bin
115 +       rm -f $(obj)/vmlinux.bin.gz
116 +       gzip -9 $(obj)/vmlinux.bin
117 +       mkimage -A mips -O linux -T kernel -C gzip \
118 +               -a $(LOADADDR) -e $(shell sh ./$(obj)/tools/entry $(NM) $(VMLINUX) ) \
119 +               -n 'Linux-$(KERNELRELEASE)' \
120 +               -d $(obj)/vmlinux.bin.gz $(obj)/uImage
121 +       @echo '  Kernel: arch/mips/boot/$@ is ready'
122 +
123 +zImage:
124 +       $(Q)$(MAKE) $(build)=$(obj)/compressed loadaddr=$(LOADADDR) $@
125 +       @echo '  Kernel: arch/mips/boot/compressed/$@ is ready'
126 +
127  clean-files += addinitrd \
128                elf2ecoff \
129                vmlinux.bin \
130                vmlinux.ecoff \
131 -              vmlinux.srec
132 +              vmlinux.srec \
133 +              vmlinux.bin.gz \
134 +              uImage \
135 +              zImage
136 --- a/arch/mips/include/asm/bootinfo.h
137 +++ b/arch/mips/include/asm/bootinfo.h
138 @@ -69,6 +69,12 @@
139  #define MACH_DEXXON_GDIUM2F10  5
140  #define MACH_LOONGSON_END      6
141  
142 +/*
143 + * Valid machtype for group INGENIC
144 + */
145 +#define  MACH_INGENIC_JZ4730   0       /* JZ4730 SOC           */
146 +#define  MACH_INGENIC_JZ4740   1       /* JZ4740 SOC           */
147 +
148  #define CL_SIZE                        COMMAND_LINE_SIZE
149  
150  extern char *system_type;
151 --- a/arch/mips/include/asm/cpu.h
152 +++ b/arch/mips/include/asm/cpu.h
153 @@ -34,7 +34,7 @@
154  #define PRID_COMP_LSI          0x080000
155  #define PRID_COMP_LEXRA                0x0b0000
156  #define PRID_COMP_CAVIUM       0x0d0000
157 -
158 +#define PRID_COMP_INGENIC      0xd00000
159  
160  /*
161   * Assigned values for the product ID register.  In order to detect a
162 @@ -133,6 +133,12 @@
163  #define PRID_IMP_CAVIUM_CN52XX 0x0700
164  
165  /*
166 + * These are the PRID's for when 23:16 == PRID_COMP_INGENIC
167 + */
168 +
169 +#define PRID_IMP_JZRISC        0x0200
170 +
171 +/*
172   * Definitions for 7:0 on legacy processors
173   */
174  
175 @@ -224,6 +230,11 @@ enum cpu_type_enum {
176         CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
177         CPU_CAVIUM_OCTEON,
178  
179 +       /*
180 +        * Ingenic class processors
181 +        */
182 +       CPU_JZRISC, CPU_XBURST,
183 +
184         CPU_LAST
185  };
186  
187 --- a/arch/mips/include/asm/r4kcache.h
188 +++ b/arch/mips/include/asm/r4kcache.h
189 @@ -17,6 +17,58 @@
190  #include <asm/cpu-features.h>
191  #include <asm/mipsmtregs.h>
192  
193 +#ifdef CONFIG_JZRISC
194 +
195 +#define K0_TO_K1()                             \
196 +do {                                           \
197 +       unsigned long __k0_addr;                \
198 +                                               \
199 +       __asm__ __volatile__(                   \
200 +       "la %0, 1f\n\t"                         \
201 +       "or     %0, %0, %1\n\t"                 \
202 +       "jr     %0\n\t"                         \
203 +       "nop\n\t"                               \
204 +       "1: nop\n"                              \
205 +       : "=&r"(__k0_addr)                      \
206 +       : "r" (0x20000000) );                   \
207 +} while(0)
208 +
209 +#define K1_TO_K0()                             \
210 +do {                                           \
211 +       unsigned long __k0_addr;                \
212 +       __asm__ __volatile__(                   \
213 +       "nop;nop;nop;nop;nop;nop;nop\n\t"       \
214 +       "la %0, 1f\n\t"                         \
215 +       "jr     %0\n\t"                         \
216 +       "nop\n\t"                               \
217 +       "1:     nop\n"                          \
218 +       : "=&r" (__k0_addr));                   \
219 +} while (0)
220 +
221 +#define INVALIDATE_BTB()                       \
222 +do {                                           \
223 +       unsigned long tmp;                      \
224 +       __asm__ __volatile__(                   \
225 +       ".set mips32\n\t"                       \
226 +       "mfc0 %0, $16, 7\n\t"                   \
227 +       "nop\n\t"                               \
228 +       "ori %0, 2\n\t"                         \
229 +       "mtc0 %0, $16, 7\n\t"                   \
230 +       "nop\n\t"                               \
231 +       : "=&r" (tmp));                         \
232 +} while (0)
233 +
234 +#define SYNC_WB() __asm__ __volatile__ ("sync")
235 +
236 +#else /* CONFIG_JZRISC */
237 +
238 +#define K0_TO_K1() do { } while (0)
239 +#define K1_TO_K0() do { } while (0)
240 +#define INVALIDATE_BTB() do { } while (0)
241 +#define SYNC_WB() do { } while (0)
242 +
243 +#endif /* CONFIG_JZRISC */
244 +
245  /*
246   * This macro return a properly sign-extended address suitable as base address
247   * for indexed cache operations.  Two issues here:
248 @@ -144,6 +196,7 @@ static inline void flush_icache_line_ind
249  {
250         __iflush_prologue
251         cache_op(Index_Invalidate_I, addr);
252 +       INVALIDATE_BTB();
253         __iflush_epilogue
254  }
255  
256 @@ -151,6 +204,7 @@ static inline void flush_dcache_line_ind
257  {
258         __dflush_prologue
259         cache_op(Index_Writeback_Inv_D, addr);
260 +       SYNC_WB();
261         __dflush_epilogue
262  }
263  
264 @@ -163,6 +217,7 @@ static inline void flush_icache_line(uns
265  {
266         __iflush_prologue
267         cache_op(Hit_Invalidate_I, addr);
268 +       INVALIDATE_BTB();
269         __iflush_epilogue
270  }
271  
272 @@ -170,6 +225,7 @@ static inline void flush_dcache_line(uns
273  {
274         __dflush_prologue
275         cache_op(Hit_Writeback_Inv_D, addr);
276 +       SYNC_WB();
277         __dflush_epilogue
278  }
279  
280 @@ -177,6 +233,7 @@ static inline void invalidate_dcache_lin
281  {
282         __dflush_prologue
283         cache_op(Hit_Invalidate_D, addr);
284 +       SYNC_WB();
285         __dflush_epilogue
286  }
287  
288 @@ -209,6 +266,7 @@ static inline void flush_scache_line(uns
289  static inline void protected_flush_icache_line(unsigned long addr)
290  {
291         protected_cache_op(Hit_Invalidate_I, addr);
292 +       INVALIDATE_BTB();
293  }
294  
295  /*
296 @@ -220,6 +278,7 @@ static inline void protected_flush_icach
297  static inline void protected_writeback_dcache_line(unsigned long addr)
298  {
299         protected_cache_op(Hit_Writeback_Inv_D, addr);
300 +       SYNC_WB();
301  }
302  
303  static inline void protected_writeback_scache_line(unsigned long addr)
304 @@ -396,8 +455,10 @@ static inline void blast_##pfx##cache##l
305  __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
306  __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
307  __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
308 +#ifndef CONFIG_JZRISC
309  __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
310  __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
311 +#endif
312  __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
313  __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
314  __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
315 @@ -405,12 +466,122 @@ __BUILD_BLAST_CACHE(s, scache, Index_Wri
316  __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
317  
318  __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
319 +#ifndef CONFIG_JZRISC
320  __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
321 +#endif
322  __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
323  __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
324  __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
325  __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
326  
327 +#ifdef CONFIG_JZRISC
328 +
329 +static inline void blast_dcache32(void)
330 +{
331 +       unsigned long start = INDEX_BASE;
332 +       unsigned long end = start + current_cpu_data.dcache.waysize;
333 +       unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
334 +       unsigned long ws_end = current_cpu_data.dcache.ways <<
335 +                              current_cpu_data.dcache.waybit;
336 +       unsigned long ws, addr;
337 +
338 +       for (ws = 0; ws < ws_end; ws += ws_inc)
339 +               for (addr = start; addr < end; addr += 0x400)
340 +                       cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
341 +
342 +       SYNC_WB();
343 +}
344 +
345 +static inline void blast_dcache32_page(unsigned long page)
346 +{
347 +       unsigned long start = page;
348 +       unsigned long end = page + PAGE_SIZE;
349 +
350 +       do {
351 +               cache32_unroll32(start,Hit_Writeback_Inv_D);
352 +               start += 0x400;
353 +       } while (start < end);
354 +
355 +       SYNC_WB();
356 +}
357 +
358 +static inline void blast_dcache32_page_indexed(unsigned long page)
359 +{
360 +       unsigned long indexmask = current_cpu_data.dcache.waysize - 1;
361 +       unsigned long start = INDEX_BASE + (page & indexmask);
362 +       unsigned long end = start + PAGE_SIZE;
363 +       unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
364 +       unsigned long ws_end = current_cpu_data.dcache.ways <<
365 +                              current_cpu_data.dcache.waybit;
366 +       unsigned long ws, addr;
367 +
368 +       for (ws = 0; ws < ws_end; ws += ws_inc)
369 +               for (addr = start; addr < end; addr += 0x400)
370 +                       cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
371 +
372 +       SYNC_WB();
373 +}
374 +
375 +static inline void blast_icache32(void)
376 +{
377 +       unsigned long start = INDEX_BASE;
378 +       unsigned long end = start + current_cpu_data.icache.waysize;
379 +       unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
380 +       unsigned long ws_end = current_cpu_data.icache.ways <<
381 +                              current_cpu_data.icache.waybit;
382 +       unsigned long ws, addr;
383 +
384 +       K0_TO_K1();
385 +
386 +       for (ws = 0; ws < ws_end; ws += ws_inc)
387 +               for (addr = start; addr < end; addr += 0x400)
388 +                       cache32_unroll32(addr|ws,Index_Invalidate_I);
389 +
390 +       INVALIDATE_BTB();
391 +
392 +       K1_TO_K0();
393 +}
394 +
395 +static inline void blast_icache32_page(unsigned long page)
396 +{
397 +       unsigned long start = page;
398 +       unsigned long end = page + PAGE_SIZE;
399 +
400 +       K0_TO_K1();
401 +
402 +       do {
403 +               cache32_unroll32(start,Hit_Invalidate_I);
404 +               start += 0x400;
405 +       } while (start < end);
406 +
407 +       INVALIDATE_BTB();
408 +
409 +       K1_TO_K0();
410 +}
411 +
412 +static inline void blast_icache32_page_indexed(unsigned long page)
413 +{
414 +       unsigned long indexmask = current_cpu_data.icache.waysize - 1;
415 +       unsigned long start = INDEX_BASE + (page & indexmask);
416 +       unsigned long end = start + PAGE_SIZE;
417 +       unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
418 +       unsigned long ws_end = current_cpu_data.icache.ways <<
419 +                              current_cpu_data.icache.waybit;
420 +       unsigned long ws, addr;
421 +
422 +       K0_TO_K1();
423 +
424 +       for (ws = 0; ws < ws_end; ws += ws_inc)
425 +               for (addr = start; addr < end; addr += 0x400)
426 +                       cache32_unroll32(addr|ws,Index_Invalidate_I);
427 +
428 +       INVALIDATE_BTB();
429 +
430 +       K1_TO_K0();
431 +}
432 +
433 +#endif /* CONFIG_JZRISC */
434 +
435  /* build blast_xxx_range, protected_blast_xxx_range */
436  #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
437  static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
438 @@ -432,13 +603,73 @@ static inline void prot##blast_##pfx##ca
439         __##pfx##flush_epilogue                                         \
440  }
441  
442 +#ifndef CONFIG_JZRISC
443  __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
444 +#endif
445  __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
446 +#ifndef CONFIG_JZRISC
447  __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
448  __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
449 +#endif
450  __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
451  /* blast_inv_dcache_range */
452  __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
453  __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
454  
455 +#ifdef CONFIG_JZRISC
456 +
457 +static inline void protected_blast_dcache_range(unsigned long start,
458 +                                               unsigned long end)
459 +{
460 +       unsigned long lsize = cpu_dcache_line_size();
461 +       unsigned long addr = start & ~(lsize - 1);
462 +       unsigned long aend = (end - 1) & ~(lsize - 1);
463 +
464 +       while (1) {
465 +               protected_cache_op(Hit_Writeback_Inv_D, addr);
466 +               if (addr == aend)
467 +                       break;
468 +               addr += lsize;
469 +       }
470 +       SYNC_WB();
471 +}
472 +
473 +static inline void protected_blast_icache_range(unsigned long start,
474 +                                               unsigned long end)
475 +{
476 +       unsigned long lsize = cpu_icache_line_size();
477 +       unsigned long addr = start & ~(lsize - 1);
478 +       unsigned long aend = (end - 1) & ~(lsize - 1);
479 +
480 +       K0_TO_K1();
481 +
482 +       while (1) {
483 +               protected_cache_op(Hit_Invalidate_I, addr);
484 +               if (addr == aend)
485 +                       break;
486 +               addr += lsize;
487 +       }
488 +       INVALIDATE_BTB();
489 +
490 +       K1_TO_K0();
491 +}
492 +
493 +static inline void blast_dcache_range(unsigned long start,
494 +                                     unsigned long end)
495 +{
496 +       unsigned long lsize = cpu_dcache_line_size();
497 +       unsigned long addr = start & ~(lsize - 1);
498 +       unsigned long aend = (end - 1) & ~(lsize - 1);
499 +
500 +       while (1) {
501 +               cache_op(Hit_Writeback_Inv_D, addr);
502 +               if (addr == aend)
503 +                       break;
504 +               addr += lsize;
505 +       }
506 +       SYNC_WB();
507 +}
508 +
509 +#endif /* CONFIG_JZRISC */
510 +
511  #endif /* _ASM_R4KCACHE_H */
512 --- a/arch/mips/kernel/cpu-probe.c
513 +++ b/arch/mips/kernel/cpu-probe.c
514 @@ -160,6 +160,7 @@ void __init check_wait(void)
515         case CPU_BCM6348:
516         case CPU_BCM6358:
517         case CPU_CAVIUM_OCTEON:
518 +       case CPU_JZRISC:
519                 cpu_wait = r4k_wait;
520                 break;
521  
522 @@ -902,6 +903,21 @@ static inline void cpu_probe_cavium(stru
523         }
524  }
525  
526 +static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
527 +{
528 +       decode_configs(c);
529 +       c->options &= ~MIPS_CPU_COUNTER; /* JZRISC does not implement the CP0 counter. */
530 +       switch (c->processor_id & 0xff00) {
531 +       case PRID_IMP_JZRISC:
532 +               c->cputype = CPU_JZRISC;
533 +               __cpu_name[cpu] = "Ingenic JZRISC";
534 +               break;
535 +       default:
536 +               panic("Unknown Ingenic Processor ID!");
537 +               break;
538 +       }
539 +}
540 +
541  const char *__cpu_name[NR_CPUS];
542  
543  __cpuinit void cpu_probe(void)
544 @@ -939,6 +955,9 @@ __cpuinit void cpu_probe(void)
545         case PRID_COMP_CAVIUM:
546                 cpu_probe_cavium(c, cpu);
547                 break;
548 +       case PRID_COMP_INGENIC:
549 +               cpu_probe_ingenic(c, cpu);
550 +               break;
551         }
552  
553         BUG_ON(!__cpu_name[cpu]);
554 --- a/arch/mips/mm/tlbex.c
555 +++ b/arch/mips/mm/tlbex.c
556 @@ -389,6 +389,11 @@ static void __cpuinit build_tlb_write_en
557                 tlbw(p);
558                 break;
559  
560 +       case CPU_JZRISC:
561 +               tlbw(p);
562 +               uasm_i_nop(p);
563 +               break;
564 +
565         default:
566                 panic("No TLB refill handler yet (CPU type: %d)",
567                       current_cpu_data.cputype);