1 --- a/arch/mips/Kconfig
2 +++ b/arch/mips/Kconfig
5 IFX included extensions in APRP
7 +config IFX_VPE_CACHE_SPLIT
8 + bool "IFX Cache Split Ways"
9 + depends on IFX_VPE_EXT
11 + IFX extension for reserving (splitting) cache ways among VPEs. You must
12 + give kernel command line arguments vpe_icache_shared=0 or
13 + vpe_dcache_shared=0 to enable splitting of icache or dcache
14 + respectively. Then you can specify which cache ways should be
15 + assigned to which VPE. There are total 8 cache ways, 4 each
16 + for dcache and icache: dcache_way0, dcache_way1,dcache_way2,
17 + dcache_way3 and icache_way0,icache_way1, icache_way2,icache_way3.
19 + For example, if you specify vpe_icache_shared=0 and icache_way2=1,
20 + then the 3rd icache way will be assigned to VPE0 and denied in VPE1.
22 + For icache, software is required to make at least one cache way available
23 + for a VPE at all times i.e., one can't assign all the icache ways to one
26 + By default, vpe_dcache_shared and vpe_icache_shared are set to 1
27 + (i.e., both icache and dcache are shared among VPEs)
30 bool "34K Performance counters"
31 depends on MIPS_MT && PROC_FS
32 --- a/arch/mips/kernel/vpe.c
33 +++ b/arch/mips/kernel/vpe.c
35 EXPORT_SYMBOL(vpe1_wdog_timeout);
39 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */
40 +extern int vpe_icache_shared,vpe_dcache_shared;
41 +extern int icache_way0,icache_way1,icache_way2,icache_way3;
42 +extern int dcache_way0,dcache_way1,dcache_way2,dcache_way3;
45 /* grab the likely amount of memory we will need. */
46 #ifdef CONFIG_MIPS_VPE_LOADER_TOM
47 #define P_SIZE (2 * 1024 * 1024)
50 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
52 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT
53 + if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ) {
55 + /* PCP bit must be 1 to split the cache */
56 + if(read_c0_mvpconf0() & MVPCONF0_PCP) {
58 + if ( !vpe_icache_shared ){
59 + write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_ICS);
62 + * If any cache way is 1, then that way is denied
63 + * in VPE1. Otherwise assign that way to VPE1.
66 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX0 );
68 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX0 );
70 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX1 );
72 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX1 );
74 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX2 );
76 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX2 );
78 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_IWX3 );
80 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_IWX3 );
83 + if ( !vpe_dcache_shared ) {
84 + write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0()) & ~VPECONF0_DCS);
87 + * If any cache way is 1, then that way is denied
88 + * in VPE1. Otherwise assign that way to VPE1.
91 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX0 );
93 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX0 );
95 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX1 );
97 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX1 );
99 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX2 );
101 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX2 );
103 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() | VPEOPT_DWX3 );
105 + write_vpe_c0_vpeopt(read_vpe_c0_vpeopt() & ~VPEOPT_DWX3 );
109 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
111 /* clear out any left overs from a previous program */
112 write_vpe_c0_status(0);
113 write_vpe_c0_cause(0);
114 --- a/arch/mips/mm/c-r4k.c
115 +++ b/arch/mips/mm/c-r4k.c
116 @@ -1348,6 +1348,106 @@
117 __setup("coherentio", setcoherentio);
120 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT /* Code for splitting the cache ways among VPEs. */
122 +#include <asm/mipsmtregs.h>
125 + * By default, vpe_icache_shared and vpe_dcache_shared
126 + * values are 1 i.e., both icache and dcache are shared
130 +int vpe_icache_shared = 1;
131 +static int __init vpe_icache_shared_val(char *str)
133 + get_option(&str, &vpe_icache_shared);
136 +__setup("vpe_icache_shared=", vpe_icache_shared_val);
137 +EXPORT_SYMBOL(vpe_icache_shared);
139 +int vpe_dcache_shared = 1;
140 +static int __init vpe_dcache_shared_val(char *str)
142 + get_option(&str, &vpe_dcache_shared);
145 +__setup("vpe_dcache_shared=", vpe_dcache_shared_val);
146 +EXPORT_SYMBOL(vpe_dcache_shared);
149 + * Software is required to make atleast one icache
150 + * way available for a VPE at all times i.e., one
151 + * can't assign all the icache ways to one VPE.
154 +int icache_way0 = 0;
155 +static int __init icache_way0_val(char *str)
157 + get_option(&str, &icache_way0);
160 +__setup("icache_way0=", icache_way0_val);
162 +int icache_way1 = 0;
163 +static int __init icache_way1_val(char *str)
165 + get_option(&str, &icache_way1);
168 +__setup("icache_way1=", icache_way1_val);
170 +int icache_way2 = 0;
171 +static int __init icache_way2_val(char *str)
173 + get_option(&str, &icache_way2);
176 +__setup("icache_way2=", icache_way2_val);
178 +int icache_way3 = 0;
179 +static int __init icache_way3_val(char *str)
181 + get_option(&str, &icache_way3);
184 +__setup("icache_way3=", icache_way3_val);
186 +int dcache_way0 = 0;
187 +static int __init dcache_way0_val(char *str)
189 + get_option(&str, &dcache_way0);
192 +__setup("dcache_way0=", dcache_way0_val);
194 +int dcache_way1 = 0;
195 +static int __init dcache_way1_val(char *str)
197 + get_option(&str, &dcache_way1);
200 +__setup("dcache_way1=", dcache_way1_val);
202 +int dcache_way2 = 0;
203 +static int __init dcache_way2_val(char *str)
205 + get_option(&str, &dcache_way2);
208 +__setup("dcache_way2=", dcache_way2_val);
210 +int dcache_way3 = 0;
211 +static int __init dcache_way3_val(char *str)
213 + get_option(&str, &dcache_way3);
216 +__setup("dcache_way3=", dcache_way3_val);
218 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */
220 void __cpuinit r4k_cache_init(void)
222 extern void build_clear_page(void);
223 @@ -1367,6 +1467,78 @@
227 +#ifdef CONFIG_IFX_VPE_CACHE_SPLIT
229 + * We split the cache ways appropriately among the VPEs
230 + * based on cache ways values we received as command line
233 + if ( (!vpe_icache_shared) || (!vpe_dcache_shared) ){
235 + /* PCP bit must be 1 to split the cache */
236 + if(read_c0_mvpconf0() & MVPCONF0_PCP) {
238 + /* Set CPA bit which enables us to modify VPEOpt register */
239 + write_c0_mvpcontrol((read_c0_mvpcontrol()) | MVPCONTROL_CPA);
241 + if ( !vpe_icache_shared ){
242 + write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_ICS);
244 + * If any cache way is 1, then that way is denied
245 + * in VPE0. Otherwise assign that way to VPE0.
247 + printk(KERN_DEBUG "icache is split\n");
248 + printk(KERN_DEBUG "icache_way0=%d icache_way1=%d icache_way2=%d icache_way3=%d\n",
249 + icache_way0, icache_way1,icache_way2, icache_way3);
251 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX0 );
253 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX0 );
255 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX1 );
257 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX1 );
259 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX2 );
261 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX2 );
263 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_IWX3 );
265 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_IWX3 );
268 + if ( !vpe_dcache_shared ) {
270 + * If any cache way is 1, then that way is denied
271 + * in VPE0. Otherwise assign that way to VPE0.
273 + printk(KERN_DEBUG "dcache is split\n");
274 + printk(KERN_DEBUG "dcache_way0=%d dcache_way1=%d dcache_way2=%d dcache_way3=%d\n",
275 + dcache_way0, dcache_way1, dcache_way2, dcache_way3);
276 + write_c0_vpeconf0((read_c0_vpeconf0()) & ~VPECONF0_DCS);
278 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX0 );
280 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX0 );
282 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX1 );
284 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX1 );
286 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX2 );
288 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX2 );
290 + write_c0_vpeopt(read_c0_vpeopt() | VPEOPT_DWX3 );
292 + write_c0_vpeopt(read_c0_vpeopt() & ~VPEOPT_DWX3 );
297 +#endif /* endif CONFIG_IFX_VPE_CACHE_SPLIT */