enable start-stop-daemon by default, i want to use this to clean up a few init script...
[openwrt.git] / target / linux / brcm47xx-2.6 / patches-2.6.22 / 150-cpu_fixes.patch
1 Index: linux-2.6.22/arch/mips/kernel/genex.S
2 ===================================================================
3 --- linux-2.6.22.orig/arch/mips/kernel/genex.S  2007-07-26 06:29:25.057170943 +0200
4 +++ linux-2.6.22/arch/mips/kernel/genex.S       2007-07-26 06:29:40.890073208 +0200
5 @@ -51,6 +51,10 @@
6  NESTED(except_vec3_generic, 0, sp)
7         .set    push
8         .set    noat
9 +#ifdef CONFIG_BCM947XX
10 +       nop
11 +       nop
12 +#endif
13  #if R5432_CP0_INTERRUPT_WAR
14         mfc0    k0, CP0_INDEX
15  #endif
16 Index: linux-2.6.22/arch/mips/mm/c-r4k.c
17 ===================================================================
18 --- linux-2.6.22.orig/arch/mips/mm/c-r4k.c      2007-07-26 06:29:40.826069560 +0200
19 +++ linux-2.6.22/arch/mips/mm/c-r4k.c   2007-07-26 06:32:45.956619550 +0200
20 @@ -29,6 +29,9 @@
21  #include <asm/cacheflush.h> /* for run_uncached() */
22  
23  
24 +/* For enabling BCM4710 cache workarounds */
25 +int bcm4710 = 0;
26 +
27  /*
28   * Special Variant of smp_call_function for use by cache functions:
29   *
30 @@ -85,14 +88,21 @@
31  
32  static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
33  {
34 +       unsigned long flags;
35 +
36 +       local_irq_save(flags);
37         R4600_HIT_CACHEOP_WAR_IMPL;
38         blast_dcache32_page(addr);
39 +       local_irq_restore(flags);
40  }
41  
42  static void __init r4k_blast_dcache_page_setup(void)
43  {
44         unsigned long  dc_lsize = cpu_dcache_line_size();
45  
46 +       if (bcm4710)
47 +               r4k_blast_dcache_page = blast_dcache_page;
48 +       else
49         if (dc_lsize == 0)
50                 r4k_blast_dcache_page = (void *)cache_noop;
51         else if (dc_lsize == 16)
52 @@ -107,6 +117,9 @@
53  {
54         unsigned long dc_lsize = cpu_dcache_line_size();
55  
56 +       if (bcm4710)
57 +               r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
58 +       else
59         if (dc_lsize == 0)
60                 r4k_blast_dcache_page_indexed = (void *)cache_noop;
61         else if (dc_lsize == 16)
62 @@ -121,6 +134,9 @@
63  {
64         unsigned long dc_lsize = cpu_dcache_line_size();
65  
66 +       if (bcm4710)
67 +               r4k_blast_dcache = blast_dcache;
68 +       else
69         if (dc_lsize == 0)
70                 r4k_blast_dcache = (void *)cache_noop;
71         else if (dc_lsize == 16)
72 @@ -202,8 +218,12 @@
73  
74  static void (* r4k_blast_icache_page)(unsigned long addr);
75  
76 +static void r4k_flush_cache_all(void);
77  static void __init r4k_blast_icache_page_setup(void)
78  {
79 +#ifdef CONFIG_BCM947XX
80 +       r4k_blast_icache_page = (void *)r4k_flush_cache_all;
81 +#else
82         unsigned long ic_lsize = cpu_icache_line_size();
83  
84         if (ic_lsize == 0)
85 @@ -214,6 +234,7 @@
86                 r4k_blast_icache_page = blast_icache32_page;
87         else if (ic_lsize == 64)
88                 r4k_blast_icache_page = blast_icache64_page;
89 +#endif
90  }
91  
92  
93 @@ -221,6 +242,9 @@
94  
95  static void __init r4k_blast_icache_page_indexed_setup(void)
96  {
97 +#ifdef CONFIG_BCM947XX
98 +       r4k_blast_icache_page_indexed = (void *)r4k_flush_cache_all;
99 +#else
100         unsigned long ic_lsize = cpu_icache_line_size();
101  
102         if (ic_lsize == 0)
103 @@ -239,6 +263,7 @@
104                                 blast_icache32_page_indexed;
105         } else if (ic_lsize == 64)
106                 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
107 +#endif
108  }
109  
110  static void (* r4k_blast_icache)(void);
111 @@ -322,12 +347,17 @@
112   */
113  static inline void local_r4k_flush_cache_all(void * args)
114  {
115 +       unsigned long flags;
116 +
117 +       local_irq_save(flags);
118         r4k_blast_dcache();
119 +       r4k_blast_icache();
120 +       local_irq_restore(flags);
121  }
122  
123  static void r4k_flush_cache_all(void)
124  {
125 -       if (!cpu_has_dc_aliases)
126 +       if (!cpu_has_dc_aliases && cpu_use_kmap_coherent)
127                 return;
128  
129         r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
130 @@ -335,6 +365,9 @@
131  
132  static inline void local_r4k___flush_cache_all(void * args)
133  {
134 +       unsigned long flags;
135 +
136 +       local_irq_save(flags);
137         r4k_blast_dcache();
138         r4k_blast_icache();
139  
140 @@ -348,6 +381,7 @@
141         case CPU_R14000:
142                 r4k_blast_scache();
143         }
144 +       local_irq_restore(flags);
145  }
146  
147  static void r4k___flush_cache_all(void)
148 @@ -358,17 +392,21 @@
149  static inline void local_r4k_flush_cache_range(void * args)
150  {
151         struct vm_area_struct *vma = args;
152 +       unsigned long flags;
153  
154         if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
155                 return;
156  
157 +       local_irq_save(flags);
158         r4k_blast_dcache();
159 +       r4k_blast_icache();
160 +       local_irq_restore(flags);
161  }
162  
163  static void r4k_flush_cache_range(struct vm_area_struct *vma,
164         unsigned long start, unsigned long end)
165  {
166 -       if (!cpu_has_dc_aliases)
167 +       if (!cpu_has_dc_aliases && cpu_use_kmap_coherent)
168                 return;
169  
170         r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
171 @@ -377,6 +415,7 @@
172  static inline void local_r4k_flush_cache_mm(void * args)
173  {
174         struct mm_struct *mm = args;
175 +       unsigned long flags;
176  
177         if (!cpu_context(smp_processor_id(), mm))
178                 return;
179 @@ -395,12 +434,15 @@
180                 return;
181         }
182  
183 +       local_irq_save(flags);
184         r4k_blast_dcache();
185 +       r4k_blast_icache();
186 +       local_irq_restore(flags);
187  }
188  
189  static void r4k_flush_cache_mm(struct mm_struct *mm)
190  {
191 -       if (!cpu_has_dc_aliases)
192 +       if (!cpu_has_dc_aliases && cpu_use_kmap_coherent)
193                 return;
194  
195         r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
196 @@ -420,6 +462,7 @@
197         unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
198         int exec = vma->vm_flags & VM_EXEC;
199         struct mm_struct *mm = vma->vm_mm;
200 +       unsigned long flags;
201         pgd_t *pgdp;
202         pud_t *pudp;
203         pmd_t *pmdp;
204 @@ -451,8 +494,9 @@
205          * for every cache flush operation.  So we do indexed flushes
206          * in that case, which doesn't overly flush the cache too much.
207          */
208 +       local_irq_save(flags);
209         if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
210 -               if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
211 +               if (!cpu_use_kmap_coherent || cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
212                         r4k_blast_dcache_page(addr);
213                         if (exec && !cpu_icache_snoops_remote_store)
214                                 r4k_blast_scache_page(addr);
215 @@ -460,14 +504,14 @@
216                 if (exec)
217                         r4k_blast_icache_page(addr);
218  
219 -               return;
220 +               goto done;
221         }
222  
223         /*
224          * Do indexed flush, too much work to get the (possible) TLB refills
225          * to work correctly.
226          */
227 -       if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
228 +       if (!cpu_use_kmap_coherent || cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
229                 r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
230                                               paddr : addr);
231                 if (exec && !cpu_icache_snoops_remote_store) {
232 @@ -483,6 +527,8 @@
233                 } else
234                         r4k_blast_icache_page_indexed(addr);
235         }
236 +done:
237 +       local_irq_restore(flags);
238  }
239  
240  static void r4k_flush_cache_page(struct vm_area_struct *vma,
241 @@ -499,7 +545,11 @@
242  
243  static inline void local_r4k_flush_data_cache_page(void * addr)
244  {
245 +       unsigned long flags;
246 +
247 +       local_irq_save(flags);
248         r4k_blast_dcache_page((unsigned long) addr);
249 +       local_irq_restore(flags);
250  }
251  
252  static void r4k_flush_data_cache_page(unsigned long addr)
253 @@ -542,6 +592,9 @@
254  
255  static void r4k_flush_icache_range(unsigned long start, unsigned long end)
256  {
257 +#ifdef CONFIG_BCM947XX
258 +       r4k_flush_cache_all();
259 +#else
260         struct flush_icache_range_args args;
261  
262         args.start = start;
263 @@ -549,12 +602,15 @@
264  
265         r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
266         instruction_hazard();
267 +#endif
268  }
269  
270  #ifdef CONFIG_DMA_NONCOHERENT
271  
272  static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
273  {
274 +       unsigned long flags;
275 +
276         /* Catch bad driver code */
277         BUG_ON(size == 0);
278  
279 @@ -571,18 +627,21 @@
280          * subset property so we have to flush the primary caches
281          * explicitly
282          */
283 +       local_irq_save(flags);
284         if (size >= dcache_size) {
285                 r4k_blast_dcache();
286         } else {
287                 R4600_HIT_CACHEOP_WAR_IMPL;
288                 blast_dcache_range(addr, addr + size);
289         }
290 -
291         bc_wback_inv(addr, size);
292 +       local_irq_restore(flags);
293  }
294  
295  static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
296  {
297 +       unsigned long flags;
298 +
299         /* Catch bad driver code */
300         BUG_ON(size == 0);
301  
302 @@ -594,6 +653,7 @@
303                 return;
304         }
305  
306 +       local_irq_save(flags);
307         if (size >= dcache_size) {
308                 r4k_blast_dcache();
309         } else {
310 @@ -602,6 +662,7 @@
311         }
312  
313         bc_inv(addr, size);
314 +       local_irq_restore(flags);
315  }
316  #endif /* CONFIG_DMA_NONCOHERENT */
317  
318 @@ -616,8 +677,12 @@
319         unsigned long dc_lsize = cpu_dcache_line_size();
320         unsigned long sc_lsize = cpu_scache_line_size();
321         unsigned long addr = (unsigned long) arg;
322 +       unsigned long flags;
323  
324 +       local_irq_save(flags);
325         R4600_HIT_CACHEOP_WAR_IMPL;
326 +       BCM4710_PROTECTED_FILL_TLB(addr);
327 +       BCM4710_PROTECTED_FILL_TLB(addr + 4);
328         if (dc_lsize)
329                 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
330         if (!cpu_icache_snoops_remote_store && scache_size)
331 @@ -644,6 +709,7 @@
332         }
333         if (MIPS_CACHE_SYNC_WAR)
334                 __asm__ __volatile__ ("sync");
335 +       local_irq_restore(flags);
336  }
337  
338  static void r4k_flush_cache_sigtramp(unsigned long addr)
339 @@ -1144,6 +1210,17 @@
340          * silly idea of putting something else there ...
341          */
342         switch (current_cpu_data.cputype) {
343 +       case CPU_BCM3302:
344 +               {
345 +                       u32 cm;
346 +                       cm = read_c0_diag();
347 +                       /* Enable icache */
348 +                       cm |= (1 << 31);
349 +                       /* Enable dcache */
350 +                       cm |= (1 << 30);
351 +                       write_c0_diag(cm);
352 +               }
353 +               break;
354         case CPU_R4000PC:
355         case CPU_R4000SC:
356         case CPU_R4000MC:
357 @@ -1174,6 +1251,15 @@
358         /* Default cache error handler for R4000 and R5000 family */
359         set_uncached_handler (0x100, &except_vec2_generic, 0x80);
360  
361 +       /* Check if special workarounds are required */
362 +#ifdef CONFIG_BCM947XX
363 +       if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & 0xff) == 0) {
364 +               printk("Enabling BCM4710A0 cache workarounds.\n");
365 +               bcm4710 = 1;
366 +       } else
367 +#endif
368 +               bcm4710 = 0;
369 +
370         probe_pcache();
371         setup_scache();
372  
373 @@ -1219,5 +1305,13 @@
374         build_clear_page();
375         build_copy_page();
376         local_r4k___flush_cache_all(NULL);
377 +#ifdef CONFIG_BCM947XX
378 +       {
379 +               static void (*_coherency_setup)(void);
380 +               _coherency_setup = (void (*)(void)) KSEG1ADDR(coherency_setup);
381 +               _coherency_setup();
382 +       }
383 +#else
384         coherency_setup();
385 +#endif
386  }
387 Index: linux-2.6.22/arch/mips/mm/tlbex.c
388 ===================================================================
389 --- linux-2.6.22.orig/arch/mips/mm/tlbex.c      2007-07-26 06:29:40.582055658 +0200
390 +++ linux-2.6.22/arch/mips/mm/tlbex.c   2007-07-26 06:32:45.964620005 +0200
391 @@ -1229,6 +1229,10 @@
392  #endif
393  }
394  
395 +#ifdef CONFIG_BCM947XX
396 +extern int bcm4710;
397 +#endif
398 +
399  static void __init build_r4000_tlb_refill_handler(void)
400  {
401         u32 *p = tlb_handler;
402 @@ -1243,6 +1247,10 @@
403         memset(relocs, 0, sizeof(relocs));
404         memset(final_handler, 0, sizeof(final_handler));
405  
406 +#ifdef CONFIG_BCM947XX
407 +       i_nop(&p);
408 +#endif
409 +
410         /*
411          * create the plain linear handler
412          */
413 @@ -1736,6 +1744,9 @@
414         memset(labels, 0, sizeof(labels));
415         memset(relocs, 0, sizeof(relocs));
416  
417 +#ifdef CONFIG_BCM947XX
418 +       i_nop(&p);
419 +#endif
420         if (bcm1250_m3_war()) {
421                 i_MFC0(&p, K0, C0_BADVADDR);
422                 i_MFC0(&p, K1, C0_ENTRYHI);
423 Index: linux-2.6.22/include/asm-mips/r4kcache.h
424 ===================================================================
425 --- linux-2.6.22.orig/include/asm-mips/r4kcache.h       2007-07-26 06:29:25.085172538 +0200
426 +++ linux-2.6.22/include/asm-mips/r4kcache.h    2007-07-26 06:29:40.938075943 +0200
427 @@ -17,6 +17,20 @@
428  #include <asm/cpu-features.h>
429  #include <asm/mipsmtregs.h>
430  
431 +#ifdef CONFIG_BCM947XX
432 +#include <asm/paccess.h>
433 +#include <linux/ssb/ssb.h>
434 +#define BCM4710_DUMMY_RREG() ((void) *((u8 *) KSEG1ADDR(SSB_ENUM_BASE + SSB_IMSTATE)))
435 +
436 +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
437 +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
438 +#else
439 +#define BCM4710_DUMMY_RREG()
440 +
441 +#define BCM4710_FILL_TLB(addr)
442 +#define BCM4710_PROTECTED_FILL_TLB(addr)
443 +#endif
444 +
445  /*
446   * This macro return a properly sign-extended address suitable as base address
447   * for indexed cache operations.  Two issues here:
448 @@ -150,6 +164,7 @@
449  static inline void flush_dcache_line_indexed(unsigned long addr)
450  {
451         __dflush_prologue
452 +       BCM4710_DUMMY_RREG();
453         cache_op(Index_Writeback_Inv_D, addr);
454         __dflush_epilogue
455  }
456 @@ -169,6 +184,7 @@
457  static inline void flush_dcache_line(unsigned long addr)
458  {
459         __dflush_prologue
460 +       BCM4710_DUMMY_RREG();
461         cache_op(Hit_Writeback_Inv_D, addr);
462         __dflush_epilogue
463  }
464 @@ -176,6 +192,7 @@
465  static inline void invalidate_dcache_line(unsigned long addr)
466  {
467         __dflush_prologue
468 +       BCM4710_DUMMY_RREG();
469         cache_op(Hit_Invalidate_D, addr);
470         __dflush_epilogue
471  }
472 @@ -208,6 +225,7 @@
473   */
474  static inline void protected_flush_icache_line(unsigned long addr)
475  {
476 +       BCM4710_DUMMY_RREG();
477         protected_cache_op(Hit_Invalidate_I, addr);
478  }
479  
480 @@ -219,6 +237,7 @@
481   */
482  static inline void protected_writeback_dcache_line(unsigned long addr)
483  {
484 +       BCM4710_DUMMY_RREG();
485         protected_cache_op(Hit_Writeback_Inv_D, addr);
486  }
487  
488 @@ -339,8 +358,52 @@
489                 : "r" (base),                                           \
490                   "i" (op));
491  
492 +static inline void blast_dcache(void)
493 +{
494 +       unsigned long start = KSEG0;
495 +       unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways;
496 +       unsigned long end = (start + dcache_size);
497 +
498 +       do {
499 +               BCM4710_DUMMY_RREG();
500 +               cache_op(Index_Writeback_Inv_D, start);
501 +               start += current_cpu_data.dcache.linesz;
502 +       } while(start < end);
503 +}
504 +
505 +static inline void blast_dcache_page(unsigned long page)
506 +{
507 +       unsigned long start = page;
508 +       unsigned long end = start + PAGE_SIZE;
509 +
510 +       BCM4710_FILL_TLB(start);
511 +       do {
512 +               BCM4710_DUMMY_RREG();
513 +               cache_op(Hit_Writeback_Inv_D, start);
514 +               start += current_cpu_data.dcache.linesz;
515 +       } while(start < end);
516 +}
517 +
518 +static inline void blast_dcache_page_indexed(unsigned long page)
519 +{
520 +       unsigned long start = page;
521 +       unsigned long end = start + PAGE_SIZE;
522 +       unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
523 +       unsigned long ws_end = current_cpu_data.dcache.ways <<
524 +                              current_cpu_data.dcache.waybit;
525 +       unsigned long ws, addr;
526 +       for (ws = 0; ws < ws_end; ws += ws_inc) {
527 +               start = page + ws;
528 +               for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) {
529 +                       BCM4710_DUMMY_RREG();
530 +                       cache_op(Index_Writeback_Inv_D, addr);
531 +               }
532 +       }
533 +}
534 +
535 +
536  /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
537 -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
538 +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, war) \
539  static inline void blast_##pfx##cache##lsize(void)                     \
540  {                                                                      \
541         unsigned long start = INDEX_BASE;                               \
542 @@ -352,6 +415,7 @@
543                                                                         \
544         __##pfx##flush_prologue                                         \
545                                                                         \
546 +       war                                                             \
547         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
548                 for (addr = start; addr < end; addr += lsize * 32)      \
549                         cache##lsize##_unroll32(addr|ws,indexop);       \
550 @@ -366,6 +430,7 @@
551                                                                         \
552         __##pfx##flush_prologue                                         \
553                                                                         \
554 +       war                                                             \
555         do {                                                            \
556                 cache##lsize##_unroll32(start,hitop);                   \
557                 start += lsize * 32;                                    \
558 @@ -384,6 +449,8 @@
559                                current_cpu_data.desc.waybit;            \
560         unsigned long ws, addr;                                         \
561                                                                         \
562 +       war                                                             \
563 +                                                                       \
564         __##pfx##flush_prologue                                         \
565                                                                         \
566         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
567 @@ -393,28 +460,30 @@
568         __##pfx##flush_epilogue                                         \
569  }
570  
571 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
572 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
573 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
574 -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
575 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
576 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
577 -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
578 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
579 -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
580 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
581 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, BCM4710_FILL_TLB(start);)
582 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
583 +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
584 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, BCM4710_FILL_TLB(start);)
585 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
586 +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, BCM4710_FILL_TLB(start);)
587 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
588 +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
589  
590  /* build blast_xxx_range, protected_blast_xxx_range */
591 -#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
592 +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, war, war2) \
593  static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
594                                                     unsigned long end)  \
595  {                                                                      \
596         unsigned long lsize = cpu_##desc##_line_size();                 \
597         unsigned long addr = start & ~(lsize - 1);                      \
598         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
599 +       war                                                             \
600                                                                         \
601         __##pfx##flush_prologue                                         \
602                                                                         \
603         while (1) {                                                     \
604 +               war2                                            \
605                 prot##cache_op(hitop, addr);                            \
606                 if (addr == aend)                                       \
607                         break;                                          \
608 @@ -424,13 +493,13 @@
609         __##pfx##flush_epilogue                                         \
610  }
611  
612 -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
613 -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
614 -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
615 -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
616 -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
617 +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, BCM4710_PROTECTED_FILL_TLB(addr); BCM4710_PROTECTED_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
618 +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_,, )
619 +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_,, )
620 +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D,, BCM4710_FILL_TLB(addr); BCM4710_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
621 +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD,,, )
622  /* blast_inv_dcache_range */
623 -__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
624 -__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
625 +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D,,,BCM4710_DUMMY_RREG();)
626 +__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD,,, )
627  
628  #endif /* _ASM_R4KCACHE_H */
629 Index: linux-2.6.22/include/asm-mips/stackframe.h
630 ===================================================================
631 --- linux-2.6.22.orig/include/asm-mips/stackframe.h     2007-07-26 06:29:25.093172994 +0200
632 +++ linux-2.6.22/include/asm-mips/stackframe.h  2007-07-26 06:29:40.962077312 +0200
633 @@ -350,6 +350,10 @@
634                 .macro  RESTORE_SP_AND_RET
635                 LONG_L  sp, PT_R29(sp)
636                 .set    mips3
637 +#ifdef CONFIG_BCM947XX
638 +               nop
639 +               nop
640 +#endif
641                 eret
642                 .set    mips0
643                 .endm