640d1e2acfe43168bda8d1b0e798c83b471f1589
[openwrt.git] / target / linux / lantiq / patches / 0021-MIPS-lantiq-adds-cache-split.patch
1 From 0f85e79f6f01f50cb703866a555085a9c65bad2f Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Thu, 29 Sep 2011 20:31:54 +0200
4 Subject: [PATCH 21/24] MIPS: lantiq: adds cache split
5
6 ---
7  arch/mips/Kconfig      |   22 ++++++
8  arch/mips/kernel/vpe.c |   66 ++++++++++++++++++
9  arch/mips/mm/c-r4k.c   |  172 ++++++++++++++++++++++++++++++++++++++++++++++++
10  3 files changed, 260 insertions(+), 0 deletions(-)
11
12 --- a/arch/mips/Kconfig
13 +++ b/arch/mips/Kconfig
14 @@ -1922,6 +1922,28 @@ config IFX_VPE_EXT
15         help
16           IFX included extensions in APRP
17  
18 +config IFX_VPE_CACHE_SPLIT
19 +       bool "IFX Cache Split Ways"
20 +       depends on IFX_VPE_EXT
21 +       help
22 +         IFX extension for reserving (splitting) cache ways among VPEs. You must
23 +         give kernel command line arguments vpe_icache_shared=0 or
24 +         vpe_dcache_shared=0 to enable splitting of icache or dcache
25 +         respectively. Then you can specify which cache ways should be
26 +         assigned to which VPE. There are total 8 cache ways, 4 each
27 +         for dcache and icache: dcache_way0, dcache_way1,dcache_way2,
28 +         dcache_way3 and icache_way0,icache_way1, icache_way2,icache_way3.
29 +
30 +         For example, if you specify vpe_icache_shared=0 and icache_way2=1,
31 +         then the 3rd icache way will be assigned to VPE0 and denied in VPE1.
32 +
33 +         For icache, software is required to make at least one cache way available
34 +         for a VPE at all times i.e., one can't assign all the icache ways to one
35 +         VPE.
36 +
37 +         By default, vpe_dcache_shared and vpe_icache_shared are set to 1
38 +         (i.e., both icache and dcache are shared among VPEs)
39 +
40  config PERFCTRS
41         bool "34K Performance counters"
42         depends on MIPS_MT && PROC_FS
43 --- a/arch/mips/kernel/vpe.c
44 +++ b/arch/mips/kernel/vpe.c
45 @@ -128,6 +128,13 @@ __setup("vpe1_wdog_timeout=", wdog_timeo
46  EXPORT_SYMBOL(vpe1_wdog_timeout);
47  
48  #endif
49 +
50 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */
51 +extern int vpe_icache_shared,vpe_dcache_shared;
52 +extern int icache_way0,icache_way1,icache_way2,icache_way3;
53 +extern int dcache_way0,dcache_way1,dcache_way2,dcache_way3;
54 +#endif
55 +
56  /* grab the likely amount of memory we will need. */
57  #ifdef CONFIG_MIPS_VPE_LOADER_TOM
58  #define P_SIZE (2 * 1024 * 1024)
59 @@ -866,6 +873,65 @@ static int vpe_run(struct vpe * v)
60         /* enable this VPE */
61         write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
62  
63 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT
64 +       if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ) {
65 +
66 +               /* PCP bit must be 1 to split the cache */
67 +               if(read_c0_mvpconf0() & MVPCONF0_PCP) {
68 +
69 +                       if ( !vpe_icache_shared ){
70 +                               write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_ICS);
71 +
72 +                               /*
73 +                                * If any cache way is 1, then that way is denied
74 +                                * in VPE1. Otherwise assign that way to VPE1.
75 +                                */
76 +                               if (!icache_way0)
77 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX0 );
78 +                               else
79 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX0 );
80 +                               if (!icache_way1)
81 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX1 );
82 +                               else
83 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX1 );
84 +                               if (!icache_way2)
85 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX2 );
86 +                               else
87 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX2 );
88 +                               if (!icache_way3)
89 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX3 );
90 +                               else
91 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX3 );
92 +                       }
93 +
94 +                       if ( !vpe_dcache_shared ) {
95 +                               write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_DCS);
96 +
97 +                               /*
98 +                                * If any cache way is 1, then that way is denied
99 +                                * in VPE1. Otherwise assign that way to VPE1.
100 +                                */
101 +                               if (!dcache_way0)
102 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX0 );
103 +                               else
104 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX0 );
105 +                               if (!dcache_way1)
106 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX1 );
107 +                               else
108 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX1 );
109 +                               if (!dcache_way2)
110 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX2 );
111 +                               else
112 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX2 );
113 +                               if (!dcache_way3)
114 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX3 );
115 +                               else
116 +                                       write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX3 );
117 +                       }
118 +               }
119 +       }
120 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
121 +
122         /* clear out any left overs from a previous program */
123         write_vpe_c0_status(0);
124         write_vpe_c0_cause(0);
125 --- a/arch/mips/mm/c-r4k.c
126 +++ b/arch/mips/mm/c-r4k.c
127 @@ -1350,6 +1350,106 @@ static int __init setcoherentio(char *st
128  __setup("coherentio", setcoherentio);
129  #endif
130  
131 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */
132 +
133 +#include <asm/mipsmtregs.h>
134 +
135 +/*
136 + * By default, vpe_icache_shared and vpe_dcache_shared
137 + * values are 1 i.e., both icache and dcache are shared
138 + * among the VPEs.
139 + */
140 +
141 +int vpe_icache_shared = 1;
142 +static int __init vpe_icache_shared_val(char *str)
143 +{
144 +       get_option(&str, &vpe_icache_shared);
145 +       return 1;
146 +}
147 +__setup("vpe_icache_shared=", vpe_icache_shared_val);
148 +EXPORT_SYMBOL(vpe_icache_shared);
149 +
150 +int vpe_dcache_shared = 1;
151 +static int __init vpe_dcache_shared_val(char *str)
152 +{
153 +       get_option(&str, &vpe_dcache_shared);
154 +       return 1;
155 +}
156 +__setup("vpe_dcache_shared=", vpe_dcache_shared_val);
157 +EXPORT_SYMBOL(vpe_dcache_shared);
158 +
159 +/*
160 + * Software is required to make atleast one icache
161 + * way available for a VPE at all times i.e., one
162 + * can't assign all the icache ways to one VPE.
163 + */
164 +
165 +int icache_way0 = 0;
166 +static int __init icache_way0_val(char *str)
167 +{
168 +       get_option(&str, &icache_way0);
169 +       return 1;
170 +}
171 +__setup("icache_way0=", icache_way0_val);
172 +
173 +int icache_way1 = 0;
174 +static int __init icache_way1_val(char *str)
175 +{
176 +       get_option(&str, &icache_way1);
177 +       return 1;
178 +}
179 +__setup("icache_way1=", icache_way1_val);
180 +
181 +int icache_way2 = 0;
182 +static int __init icache_way2_val(char *str)
183 +{
184 +       get_option(&str, &icache_way2);
185 +       return 1;
186 +}
187 +__setup("icache_way2=", icache_way2_val);
188 +
189 +int icache_way3 = 0;
190 +static int __init icache_way3_val(char *str)
191 +{
192 +       get_option(&str, &icache_way3);
193 +       return 1;
194 +}
195 +__setup("icache_way3=", icache_way3_val);
196 +
197 +int dcache_way0 = 0;
198 +static int __init dcache_way0_val(char *str)
199 +{
200 +       get_option(&str, &dcache_way0);
201 +       return 1;
202 +}
203 +__setup("dcache_way0=", dcache_way0_val);
204 +
205 +int dcache_way1 = 0;
206 +static int __init dcache_way1_val(char *str)
207 +{
208 +       get_option(&str, &dcache_way1);
209 +       return 1;
210 +}
211 +__setup("dcache_way1=", dcache_way1_val);
212 +
213 +int dcache_way2 = 0;
214 +static int __init dcache_way2_val(char *str)
215 +{
216 +       get_option(&str, &dcache_way2);
217 +       return 1;
218 +}
219 +__setup("dcache_way2=", dcache_way2_val);
220 +
221 +int dcache_way3 = 0;
222 +static int __init dcache_way3_val(char *str)
223 +{
224 +       get_option(&str, &dcache_way3);
225 +       return 1;
226 +}
227 +__setup("dcache_way3=", dcache_way3_val);
228 +
229 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
230 +
231  void __cpuinit r4k_cache_init(void)
232  {
233         extern void build_clear_page(void);
234 @@ -1369,6 +1469,78 @@ void __cpuinit r4k_cache_init(void)
235                 break;
236         }
237  
238 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT
239 +       /*
240 +        * We split the cache ways appropriately among the VPEs
241 +        * based on cache ways values we received as command line
242 +        * arguments
243 +        */
244 +       if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ){
245 +
246 +               /* PCP bit must be 1 to split the cache */
247 +               if(read_c0_mvpconf0() & MVPCONF0_PCP) {
248 +
249 +                       /* Set CPA bit which enables us to modify VPEOpt register */
250 +                       write_c0_mvpcontrol((read_c0_mvpcontrol()) | MVPCONTROL_CPA);
251 +
252 +                       if ( !vpe_icache_shared ){
253 +                               write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_ICS);
254 +                               /*
255 +                                * If any cache way is 1, then that way is denied
256 +                                * in VPE0. Otherwise assign that way to VPE0.
257 +                                */
258 +                               printk(KERN_DEBUG "icache is split\n");
259 +                               printk(KERN_DEBUG "icache_way0=%d icache_way1=%d icache_way2=%d icache_way3=%d\n",
260 +                                       icache_way0, icache_way1,icache_way2, icache_way3);
261 +                               if (icache_way0)
262 +                                       write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX0 );
263 +                               else
264 +                                       write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX0 );
265 +                               if (icache_way1)
266 +                                       write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX1 );
267 +                               else
268 +                                       write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX1 );
269 +                               if (icache_way2)
270 +                                       write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX2 );
271 +                               else
272 +                                       write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX2 );
273 +                               if (icache_way3)
274 +                                       write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX3 );
275 +                               else
276 +                                       write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX3 );
277 +                       }
278 +
279 +                       if ( !vpe_dcache_shared ) {
280 +                               /*
281 +                                * If any cache way is 1, then that way is denied
282 +                                * in VPE0. Otherwise assign that way to VPE0.
283 +                                */
284 +                               printk(KERN_DEBUG "dcache is split\n");
285 +                               printk(KERN_DEBUG "dcache_way0=%d dcache_way1=%d dcache_way2=%d dcache_way3=%d\n",
286 +                                       dcache_way0, dcache_way1, dcache_way2, dcache_way3);
287 +                               write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_DCS);
288 +                               if (dcache_way0)
289 +                                       write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX0 );
290 +                               else
291 +                                       write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX0 );
292 +                               if (dcache_way1)
293 +                                       write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX1 );
294 +                               else
295 +                                       write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX1 );
296 +                               if (dcache_way2)
297 +                                       write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX2 );
298 +                               else
299 +                                       write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX2 );
300 +                               if (dcache_way3)
301 +                                       write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX3 );
302 +                               else
303 +                                       write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX3 );
304 +                       }
305 +               }
306 +       }
307 +
308 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
309 +
310         probe_pcache();
311         setup_scache();
312  
313 --- a/arch/mips/lantiq/setup.c
314 +++ b/arch/mips/lantiq/setup.c
315 @@ -18,10 +18,11 @@
316  #include "devices.h"
317  #include "prom.h"
318  
319 +/* assume 16M as default incase uboot fails to pass proper ramsize */
320 +unsigned long physical_memsize = 16L;
321 +
322  void __init plat_mem_setup(void)
323  {
324 -       /* assume 16M as default incase uboot fails to pass proper ramsize */
325 -       unsigned long memsize = 16;
326         char **envp = (char **) KSEG1ADDR(fw_arg2);
327  
328         ioport_resource.start = IOPORT_RESOURCE_START;
329 @@ -35,13 +36,13 @@ void __init plat_mem_setup(void)
330                 char *e = (char *)KSEG1ADDR(*envp);
331                 if (!strncmp(e, "memsize=", 8)) {
332                         e += 8;
333 -                       if (strict_strtoul(e, 0, &memsize))
334 +                       if (strict_strtoul(e, 0, &physical_memsize))
335                                 pr_warn("bad memsize specified\n");
336                 }
337                 envp++;
338         }
339 -       memsize *= 1024 * 1024;
340 -       add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
341 +       physical_memsize *= 1024 * 1024;
342 +       add_memory_region(0x00000000, physical_memsize, BOOT_MEM_RAM);
343  }
344  
345  static int __init