[lantiq]
[openwrt.git] / target / linux / lantiq / patches-3.0 / 0019-MIPS-lantiq-adds-VPE-extensions.patch
1 From c6c810d83f0d95f54c3a6b338d219cec7ccef4c9 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Thu, 29 Sep 2011 20:30:40 +0200
4 Subject: [PATCH 19/24] MIPS: lantiq: adds VPE extensions
5
6 ---
7  arch/mips/Kconfig                  |   22 +++
8  arch/mips/include/asm/mipsmtregs.h |   54 +++++++
9  arch/mips/kernel/Makefile          |    3 +-
10  arch/mips/kernel/mips-mt.c         |   97 +++++++++++--
11  arch/mips/kernel/mtsched_proc.c    |  279 ++++++++++++++++++++++++++++++++++++
12  arch/mips/kernel/perf_proc.c       |  191 ++++++++++++++++++++++++
13  arch/mips/kernel/proc.c            |   17 +++
14  arch/mips/kernel/smtc.c            |    7 +
15  arch/mips/kernel/vpe.c             |  250 ++++++++++++++++++++++++++++++++-
16  9 files changed, 905 insertions(+), 15 deletions(-)
17  create mode 100644 arch/mips/kernel/mtsched_proc.c
18  create mode 100644 arch/mips/kernel/perf_proc.c
19
20 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
21 index 0cf5bbd..bf1b76d 100644
22 --- a/arch/mips/Kconfig
23 +++ b/arch/mips/Kconfig
24 @@ -1903,6 +1903,28 @@ config MIPS_VPE_LOADER
25           Includes a loader for loading an elf relocatable object
26           onto another VPE and running it.
27  
28 +config IFX_VPE_EXT
29 +       bool "IFX APRP Extensions"
30 +       depends on MIPS_VPE_LOADER
31 +       default y
32 +       help
33 +         IFX included extensions in APRP
34 +
35 +config PERFCTRS
36 +       bool "34K Performance counters"
37 +       depends on MIPS_MT && PROC_FS
38 +       default n
39 +       help
40 +         34K Performance counter through /proc
41 +
42 +config MTSCHED
43 +       bool "Support mtsched priority configuration for TCs"
44 +       depends on MIPS_MT && PROC_FS
45 +       default y
46 +       help
47 +         Support for mtsched priority configuration for TCs through
48 +         /proc/mips/mtsched
49 +
50  config MIPS_MT_SMTC_IM_BACKSTOP
51         bool "Use per-TC register bits as backstop for inhibited IM bits"
52         depends on MIPS_MT_SMTC
53 diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
54 index c9420aa..04bfb4b 100644
55 --- a/arch/mips/include/asm/mipsmtregs.h
56 +++ b/arch/mips/include/asm/mipsmtregs.h
57 @@ -28,14 +28,34 @@
58  #define read_c0_vpeconf0()             __read_32bit_c0_register($1, 2)
59  #define write_c0_vpeconf0(val)         __write_32bit_c0_register($1, 2, val)
60  
61 +#define read_c0_vpeconf1()              __read_32bit_c0_register($1, 3)
62 +#define write_c0_vpeconf1(val)          __write_32bit_c0_register($1, 3, val)
63 +
64 +#define read_c0_vpeschedule()           __read_32bit_c0_register($1, 5)
65 +#define write_c0_vpeschedule(val)       __write_32bit_c0_register($1, 5, val)
66 +
67 +#define read_c0_vpeschefback()         __read_32bit_c0_register($1, 6)
68 +#define write_c0_vpeschefback(val)     __write_32bit_c0_register($1, 6, val)
69 +
70 +#define read_c0_vpeopt()              __read_32bit_c0_register($1, 7)
71 +#define write_c0_vpeopt(val)          __write_32bit_c0_register($1, 7, val)
72 +
73  #define read_c0_tcstatus()             __read_32bit_c0_register($2, 1)
74  #define write_c0_tcstatus(val)         __write_32bit_c0_register($2, 1, val)
75  
76  #define read_c0_tcbind()               __read_32bit_c0_register($2, 2)
77 +#define write_c0_tcbind(val)           __write_32bit_c0_register($2, 2, val)
78  
79  #define read_c0_tccontext()            __read_32bit_c0_register($2, 5)
80  #define write_c0_tccontext(val)                __write_32bit_c0_register($2, 5, val)
81  
82 +#define read_c0_tcschedule()           __read_32bit_c0_register($2, 6)
83 +#define write_c0_tcschedule(val)       __write_32bit_c0_register($2, 6, val)
84 +
85 +#define read_c0_tcschefback()          __read_32bit_c0_register($2, 7)
86 +#define write_c0_tcschefback(val)      __write_32bit_c0_register($2, 7, val)
87 +
88 +
89  #else /* Assembly */
90  /*
91   * Macros for use in assembly language code
92 @@ -74,6 +94,8 @@
93  #define MVPCONTROL_STLB_SHIFT  2
94  #define MVPCONTROL_STLB                (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT)
95  
96 +#define MVPCONTROL_CPA_SHIFT   3
97 +#define MVPCONTROL_CPA         (_ULCAST_(1) << MVPCONTROL_CPA_SHIFT)
98  
99  /* MVPConf0 fields */
100  #define MVPCONF0_PTC_SHIFT     0
101 @@ -84,6 +106,8 @@
102  #define MVPCONF0_TCA           ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT)
103  #define MVPCONF0_PTLBE_SHIFT   16
104  #define MVPCONF0_PTLBE         (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT)
105 +#define MVPCONF0_PCP_SHIFT     27
106 +#define MVPCONF0_PCP           (_ULCAST_(1) << MVPCONF0_PCP_SHIFT)
107  #define MVPCONF0_TLBS_SHIFT    29
108  #define MVPCONF0_TLBS          (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT)
109  #define MVPCONF0_M_SHIFT       31
110 @@ -121,9 +145,25 @@
111  #define VPECONF0_VPA           (_ULCAST_(1) << VPECONF0_VPA_SHIFT)
112  #define VPECONF0_MVP_SHIFT     1
113  #define VPECONF0_MVP           (_ULCAST_(1) << VPECONF0_MVP_SHIFT)
114 +#define VPECONF0_ICS_SHIFT      16
115 +#define VPECONF0_ICS           (_ULCAST_(1) << VPECONF0_ICS_SHIFT)
116 +#define VPECONF0_DCS_SHIFT      17
117 +#define VPECONF0_DCS            (_ULCAST_(1) << VPECONF0_DCS_SHIFT)
118  #define VPECONF0_XTC_SHIFT     21
119  #define VPECONF0_XTC           (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
120  
121 +/* VPEOpt fields */
122 +#define VPEOPT_DWX_SHIFT       0
123 +#define VPEOPT_IWX_SHIFT       8
124 +#define VPEOPT_IWX0            ( _ULCAST_(0x1) << VPEOPT_IWX_SHIFT)
125 +#define VPEOPT_IWX1            ( _ULCAST_(0x2) << VPEOPT_IWX_SHIFT)
126 +#define VPEOPT_IWX2            ( _ULCAST_(0x4) << VPEOPT_IWX_SHIFT)
127 +#define VPEOPT_IWX3            ( _ULCAST_(0x8) << VPEOPT_IWX_SHIFT)
128 +#define VPEOPT_DWX0            ( _ULCAST_(0x1) << VPEOPT_DWX_SHIFT)
129 +#define VPEOPT_DWX1            ( _ULCAST_(0x2) << VPEOPT_DWX_SHIFT)
130 +#define VPEOPT_DWX2            ( _ULCAST_(0x4) << VPEOPT_DWX_SHIFT)
131 +#define VPEOPT_DWX3            ( _ULCAST_(0x8) << VPEOPT_DWX_SHIFT)
132 +
133  /* TCStatus fields (per TC) */
134  #define TCSTATUS_TASID         (_ULCAST_(0xff))
135  #define TCSTATUS_IXMT_SHIFT    10
136 @@ -350,6 +390,14 @@ do {                                                                       \
137  #define write_vpe_c0_vpecontrol(val)   mttc0(1, 1, val)
138  #define read_vpe_c0_vpeconf0()         mftc0(1, 2)
139  #define write_vpe_c0_vpeconf0(val)     mttc0(1, 2, val)
140 +#define read_vpe_c0_vpeschedule()      mftc0(1, 5)
141 +#define write_vpe_c0_vpeschedule(val)  mttc0(1, 5, val)
142 +#define read_vpe_c0_vpeschefback()     mftc0(1, 6)
143 +#define write_vpe_c0_vpeschefback(val) mttc0(1, 6, val)
144 +#define read_vpe_c0_vpeopt()            mftc0(1, 7)
145 +#define write_vpe_c0_vpeopt(val)        mttc0(1, 7, val)
146 +#define read_vpe_c0_wired()            mftc0(6, 0)
147 +#define write_vpe_c0_wired(val)                mttc0(6, 0, val)
148  #define read_vpe_c0_count()            mftc0(9, 0)
149  #define write_vpe_c0_count(val)                mttc0(9, 0, val)
150  #define read_vpe_c0_status()           mftc0(12, 0)
151 @@ -381,6 +429,12 @@ do {                                                                       \
152  #define write_tc_c0_tchalt(val)                mttc0(2, 4, val)
153  #define read_tc_c0_tccontext()         mftc0(2, 5)
154  #define write_tc_c0_tccontext(val)     mttc0(2, 5, val)
155 +#define read_tc_c0_tcschedule()                mftc0(2, 6)
156 +#define write_tc_c0_tcschedule(val)    mttc0(2, 6, val)
157 +#define read_tc_c0_tcschefback()       mftc0(2, 7)
158 +#define write_tc_c0_tcschefback(val)   mttc0(2, 7, val)
159 +#define read_tc_c0_entryhi()            mftc0(10, 0)
160 +#define write_tc_c0_entryhi(val)        mttc0(10, 0, val)
161  
162  /* GPR */
163  #define read_tc_gpr_sp()               mftgpr(29)
164 diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
165 index 83bba33..53a9a0a 100644
166 --- a/arch/mips/kernel/Makefile
167 +++ b/arch/mips/kernel/Makefile
168 @@ -86,7 +86,8 @@ obj-$(CONFIG_MIPS32_O32)      += binfmt_elfo32.o scall64-o32.o
169  
170  obj-$(CONFIG_KGDB)             += kgdb.o
171  obj-$(CONFIG_PROC_FS)          += proc.o
172 -
173 +obj-$(CONFIG_MTSCHED)          += mtsched_proc.o
174 +obj-$(CONFIG_PERFCTRS)         += perf_proc.o
175  obj-$(CONFIG_64BIT)            += cpu-bugs64.o
176  
177  obj-$(CONFIG_I8253)            += i8253.o
178 diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
179 index 594ca69..ad912fc 100644
180 --- a/arch/mips/kernel/mips-mt.c
181 +++ b/arch/mips/kernel/mips-mt.c
182 @@ -21,26 +21,96 @@
183  #include <asm/cacheflush.h>
184  
185  int vpelimit;
186 -
187  static int __init maxvpes(char *str)
188  {
189         get_option(&str, &vpelimit);
190 -
191         return 1;
192  }
193 -
194  __setup("maxvpes=", maxvpes);
195  
196  int tclimit;
197 -
198  static int __init maxtcs(char *str)
199  {
200         get_option(&str, &tclimit);
201 +       return 1;
202 +}
203 +__setup("maxtcs=", maxtcs);
204  
205 +#ifdef CONFIG_IFX_VPE_EXT
206 +int stlb;
207 +static int __init istlbshared(char *str)
208 +{
209 +       get_option(&str, &stlb);
210         return 1;
211  }
212 +__setup("vpe_tlb_shared=", istlbshared);
213  
214 -__setup("maxtcs=", maxtcs);
215 +int vpe0_wired;
216 +static int __init vpe0wired(char *str)
217 +{
218 +       get_option(&str, &vpe0_wired);
219 +       return 1;
220 +}
221 +__setup("vpe0_wired_tlb_entries=", vpe0wired);
222 +
223 +int vpe1_wired;
224 +static int __init vpe1wired(char *str)
225 +{
226 +       get_option(&str, &vpe1_wired);
227 +       return 1;
228 +}
229 +__setup("vpe1_wired_tlb_entries=", vpe1wired);
230 +
231 +#ifdef CONFIG_MIPS_MT_SMTC
232 +extern int nostlb;
233 +#endif
234 +void configure_tlb(void)
235 +{
236 +       int vpeflags, tcflags, tlbsiz;
237 +       unsigned int config1val;
238 +       vpeflags = dvpe();
239 +       tcflags = dmt();
240 +       write_c0_vpeconf0((read_c0_vpeconf0() | VPECONF0_MVP));
241 +       write_c0_mvpcontrol((read_c0_mvpcontrol() | MVPCONTROL_VPC));
242 +       mips_ihb();
243 +       //printk("stlb = %d, vpe0_wired = %d vpe1_wired=%d\n", stlb,vpe0_wired, vpe1_wired);
244 +       if (stlb) {
245 +               if (!(read_c0_mvpconf0() & MVPCONF0_TLBS)) {
246 +                       emt(tcflags);
247 +                       evpe(vpeflags);
248 +                       return;
249 +               }
250 +
251 +               write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
252 +               write_c0_wired(vpe0_wired + vpe1_wired);
253 +               if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
254 +                       config1val = read_vpe_c0_config1();
255 +                       tlbsiz = (((config1val >> 25) & 0x3f) + 1);
256 +                       if (tlbsiz > 64)
257 +                               tlbsiz = 64;
258 +                       cpu_data[0].tlbsize = tlbsiz;
259 +                       current_cpu_data.tlbsize = tlbsiz;
260 +               }
261 +
262 +       }
263 +       else {
264 +               write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_STLB);
265 +               write_c0_wired(vpe0_wired);
266 +       }
267 +
268 +       ehb();
269 +       write_c0_mvpcontrol((read_c0_mvpcontrol() & ~MVPCONTROL_VPC));
270 +       ehb();
271 +       local_flush_tlb_all();
272 +
273 +       printk("Wired TLB entries for Linux read_c0_wired() = %d\n", read_c0_wired());
274 +#ifdef CONFIG_MIPS_MT_SMTC
275 +       nostlb = !stlb;
276 +#endif
277 +       emt(tcflags);
278 +       evpe(vpeflags);
279 +}
280 +#endif
281  
282  /*
283   * Dump new MIPS MT state for the core. Does not leave TCs halted.
284 @@ -78,18 +148,18 @@ void mips_mt_regdump(unsigned long mvpctl)
285                         if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
286                                 printk("  VPE %d\n", i);
287                                 printk("   VPEControl : %08lx\n",
288 -                                      read_vpe_c0_vpecontrol());
289 +                                       read_vpe_c0_vpecontrol());
290                                 printk("   VPEConf0 : %08lx\n",
291 -                                      read_vpe_c0_vpeconf0());
292 +                                       read_vpe_c0_vpeconf0());
293                                 printk("   VPE%d.Status : %08lx\n",
294 -                                      i, read_vpe_c0_status());
295 +                                       i, read_vpe_c0_status());
296                                 printk("   VPE%d.EPC : %08lx %pS\n",
297 -                                      i, read_vpe_c0_epc(),
298 -                                      (void *) read_vpe_c0_epc());
299 +                                       i, read_vpe_c0_epc(),
300 +                                       (void *) read_vpe_c0_epc());
301                                 printk("   VPE%d.Cause : %08lx\n",
302 -                                      i, read_vpe_c0_cause());
303 +                                       i, read_vpe_c0_cause());
304                                 printk("   VPE%d.Config7 : %08lx\n",
305 -                                      i, read_vpe_c0_config7());
306 +                                       i, read_vpe_c0_config7());
307                                 break; /* Next VPE */
308                         }
309                 }
310 @@ -287,6 +357,9 @@ void mips_mt_set_cpuoptions(void)
311                 printk("Mapped %ld ITC cells starting at 0x%08x\n",
312                         ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
313         }
314 +#ifdef CONFIG_IFX_VPE_EXT
315 +       configure_tlb();
316 +#endif
317  }
318  
319  /*
320 diff --git a/arch/mips/kernel/mtsched_proc.c b/arch/mips/kernel/mtsched_proc.c
321 new file mode 100644
322 index 0000000..4dafded
323 --- /dev/null
324 +++ b/arch/mips/kernel/mtsched_proc.c
325 @@ -0,0 +1,279 @@
326 +/*
327 + * /proc hooks for MIPS MT scheduling policy management for 34K cores
328 + *
329 + *  This program is free software; you can distribute it and/or modify it
330 + *  under the terms of the GNU General Public License (Version 2) as
331 + *  published by the Free Software Foundation.
332 + *
333 + *  This program is distributed in the hope it will be useful, but WITHOUT
334 + *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
335 + *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
336 + *  for more details.
337 + *
338 + *  You should have received a copy of the GNU General Public License along
339 + *  with this program; if not, write to the Free Software Foundation, Inc.,
340 + *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
341 + *
342 + * Copyright (C) 2006 Mips Technologies, Inc
343 + */
344 +
345 +#include <linux/kernel.h>
346 +
347 +#include <asm/cpu.h>
348 +#include <asm/processor.h>
349 +#include <asm/system.h>
350 +#include <asm/mipsregs.h>
351 +#include <asm/mipsmtregs.h>
352 +#include <asm/uaccess.h>
353 +#include <linux/proc_fs.h>
354 +
355 +static struct proc_dir_entry *mtsched_proc;
356 +
357 +#ifndef CONFIG_MIPS_MT_SMTC
358 +#define NTCS 2
359 +#else
360 +#define NTCS NR_CPUS
361 +#endif
362 +#define NVPES 2
363 +
364 +int lastvpe = 1;
365 +int lasttc = 8;
366 +
367 +static int proc_read_mtsched(char *page, char **start, off_t off,
368 +                       int count, int *eof, void *data)
369 +{
370 +       int totalen = 0;
371 +       int len;
372 +
373 +       int i;
374 +       int vpe;
375 +       int mytc;
376 +       unsigned long flags;
377 +       unsigned int mtflags;
378 +       unsigned int haltstate;
379 +       unsigned int vpes_checked[NVPES];
380 +       unsigned int vpeschedule[NVPES];
381 +       unsigned int vpeschefback[NVPES];
382 +       unsigned int tcschedule[NTCS];
383 +       unsigned int tcschefback[NTCS];
384 +
385 +       /* Dump the state of the MIPS MT scheduling policy manager */
386 +       /* Inititalize control state */
387 +       for(i = 0; i < NVPES; i++) {
388 +               vpes_checked[i] = 0;
389 +               vpeschedule[i] = 0;
390 +               vpeschefback[i] = 0;
391 +       }
392 +       for(i = 0; i < NTCS; i++) {
393 +               tcschedule[i] = 0;
394 +               tcschefback[i] = 0;
395 +       }
396 +
397 +       /* Disable interrupts and multithreaded issue */
398 +       local_irq_save(flags);
399 +       mtflags = dvpe();
400 +
401 +       /* Then go through the TCs, halt 'em, and extract the values */
402 +       mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
403 +       for(i = 0; i < NTCS; i++) {
404 +               if(i == mytc) {
405 +                       /* No need to halt ourselves! */
406 +                       tcschedule[i] = read_c0_tcschedule();
407 +                       tcschefback[i] = read_c0_tcschefback();
408 +                       /* If VPE bound to TC hasn't been checked, do it */
409 +                       vpe = read_c0_tcbind() & TCBIND_CURVPE;
410 +                       if(!vpes_checked[vpe]) {
411 +                               vpeschedule[vpe] = read_c0_vpeschedule();
412 +                               vpeschefback[vpe] = read_c0_vpeschefback();
413 +                               vpes_checked[vpe] = 1;
414 +                       }
415 +               } else {
416 +                       settc(i);
417 +                       haltstate = read_tc_c0_tchalt();
418 +                       write_tc_c0_tchalt(TCHALT_H);
419 +                       mips_ihb();
420 +                       tcschedule[i] = read_tc_c0_tcschedule();
421 +                       tcschefback[i] = read_tc_c0_tcschefback();
422 +                       /* If VPE bound to TC hasn't been checked, do it */
423 +                       vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
424 +                       if(!vpes_checked[vpe]) {
425 +                           vpeschedule[vpe] = read_vpe_c0_vpeschedule();
426 +                           vpeschefback[vpe] = read_vpe_c0_vpeschefback();
427 +                           vpes_checked[vpe] = 1;
428 +                       }
429 +                       if(!haltstate) write_tc_c0_tchalt(0);
430 +               }
431 +       }
432 +       /* Re-enable MT and interrupts */
433 +       evpe(mtflags);
434 +       local_irq_restore(flags);
435 +
436 +       for(vpe=0; vpe < NVPES; vpe++) {
437 +               len = sprintf(page, "VPE[%d].VPEschedule  = 0x%08x\n",
438 +                       vpe, vpeschedule[vpe]);
439 +               totalen += len;
440 +               page += len;
441 +               len = sprintf(page, "VPE[%d].VPEschefback = 0x%08x\n",
442 +                       vpe, vpeschefback[vpe]);
443 +               totalen += len;
444 +               page += len;
445 +       }
446 +       for(i=0; i < NTCS; i++) {
447 +               len = sprintf(page, "TC[%d].TCschedule    = 0x%08x\n",
448 +                       i, tcschedule[i]);
449 +               totalen += len;
450 +               page += len;
451 +               len = sprintf(page, "TC[%d].TCschefback   = 0x%08x\n",
452 +                       i, tcschefback[i]);
453 +               totalen += len;
454 +               page += len;
455 +       }
456 +       return totalen;
457 +}
458 +
459 +/*
460 + * Write to perf counter registers based on text input
461 + */
462 +
463 +#define TXTBUFSZ 100
464 +
465 +static int proc_write_mtsched(struct file *file, const char *buffer,
466 +                               unsigned long count, void *data)
467 +{
468 +       int len = 0;
469 +       char mybuf[TXTBUFSZ];
470 +       /* At most, we will set up 9 TCs and 2 VPEs, 11 entries in all */
471 +       char entity[1];   //, entity1[1];
472 +       int number[1];
473 +       unsigned long value[1];
474 +       int nparsed = 0 , index = 0;
475 +       unsigned long flags;
476 +       unsigned int mtflags;
477 +       unsigned int haltstate;
478 +       unsigned int tcbindval;
479 +
480 +       if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
481 +       else len = count;
482 +       memset(mybuf,0,TXTBUFSZ);
483 +       if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
484 +
485 +       nparsed = sscanf(mybuf, "%c%d %lx",
486 +                &entity[0] ,&number[0], &value[0]);
487 +
488 +       /*
489 +        * Having acquired the inputs, which might have
490 +        * generated exceptions and preemptions,
491 +        * program the registers.
492 +        */
493 +       /* Disable interrupts and multithreaded issue */
494 +       local_irq_save(flags);
495 +       mtflags = dvpe();
496 +
497 +       if(entity[index] == 't' ) {
498 +               /* Set TCSchedule or TCScheFBack of specified TC */
499 +               if(number[index] > NTCS) goto skip;
500 +               /* If it's our own TC, do it direct */
501 +               if(number[index] ==
502 +                               ((read_c0_tcbind() & TCBIND_CURTC)
503 +                               >> TCBIND_CURTC_SHIFT)) {
504 +                       if(entity[index] == 't')
505 +                                write_c0_tcschedule(value[index]);
506 +                       else
507 +                               write_c0_tcschefback(value[index]);
508 +               } else {
509 +               /* Otherwise, we do it via MTTR */
510 +                       settc(number[index]);
511 +                       haltstate = read_tc_c0_tchalt();
512 +                       write_tc_c0_tchalt(TCHALT_H);
513 +                       mips_ihb();
514 +                       if(entity[index] == 't')
515 +                                write_tc_c0_tcschedule(value[index]);
516 +                       else
517 +                               write_tc_c0_tcschefback(value[index]);
518 +                       mips_ihb();
519 +                       if(!haltstate) write_tc_c0_tchalt(0);
520 +               }
521 +       } else if(entity[index] == 'v') {
522 +               /* Set VPESchedule of specified VPE */
523 +               if(number[index] > NVPES) goto skip;
524 +               tcbindval = read_c0_tcbind();
525 +               /* Are we doing this to our current VPE? */
526 +               if((tcbindval & TCBIND_CURVPE) == number[index]) {
527 +                       /* Then life is simple */
528 +                       write_c0_vpeschedule(value[index]);
529 +               } else {
530 +                       /*
531 +                        * Bind ourselves to the other VPE long enough
532 +                        * to program the bind value.
533 +                        */
534 +                       write_c0_tcbind((tcbindval & ~TCBIND_CURVPE)
535 +                                          | number[index]);
536 +                       mips_ihb();
537 +                       write_c0_vpeschedule(value[index]);
538 +                       mips_ihb();
539 +                       /* Restore previous binding */
540 +                       write_c0_tcbind(tcbindval);
541 +                       mips_ihb();
542 +               }
543 +       }
544 +
545 +       else if(entity[index] == 'r') {
546 +               unsigned int vpes_checked[2], vpe ,i , mytc;
547 +               vpes_checked[0] = vpes_checked[1] = 0;
548 +
549 +               /* Then go through the TCs, halt 'em, and extract the values */
550 +               mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
551 +
552 +               for(i = 0; i < NTCS; i++) {
553 +                       if(i == mytc) {
554 +                               /* No need to halt ourselves! */
555 +                               write_c0_vpeschefback(0);
556 +                               write_c0_tcschefback(0);
557 +                       } else {
558 +                               settc(i);
559 +                               haltstate = read_tc_c0_tchalt();
560 +                               write_tc_c0_tchalt(TCHALT_H);
561 +                               mips_ihb();
562 +                               write_tc_c0_tcschefback(0);
563 +                               /* If VPE bound to TC hasn't been checked, do it */
564 +                               vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
565 +                               if(!vpes_checked[vpe]) {
566 +                                   write_vpe_c0_vpeschefback(0);
567 +                                   vpes_checked[vpe] = 1;
568 +                               }
569 +                               if(!haltstate) write_tc_c0_tchalt(0);
570 +                       }
571 +               }
572 +       }
573 +       else {
574 +               printk ("\n Usage : <t/v><0/1> <Hex Value>\n Example : t0 0x01\n");
575 +       }
576 +
577 +skip:
578 +       /* Re-enable MT and interrupts */
579 +       evpe(mtflags);
580 +       local_irq_restore(flags);
581 +       return (len);
582 +}
583 +
584 +static int __init init_mtsched_proc(void)
585 +{
586 +       extern struct proc_dir_entry *get_mips_proc_dir(void);
587 +       struct proc_dir_entry *mips_proc_dir;
588 +
589 +       if (!cpu_has_mipsmt) {
590 +               printk("mtsched: not a MIPS MT capable processor\n");
591 +               return -ENODEV;
592 +       }
593 +
594 +       mips_proc_dir = get_mips_proc_dir();
595 +
596 +       mtsched_proc = create_proc_entry("mtsched", 0644, mips_proc_dir);
597 +       mtsched_proc->read_proc = proc_read_mtsched;
598 +       mtsched_proc->write_proc = proc_write_mtsched;
599 +
600 +       return 0;
601 +}
602 +
603 +/* Automagically create the entry */
604 +module_init(init_mtsched_proc);
605 diff --git a/arch/mips/kernel/perf_proc.c b/arch/mips/kernel/perf_proc.c
606 new file mode 100644
607 index 0000000..7eec015
608 --- /dev/null
609 +++ b/arch/mips/kernel/perf_proc.c
610 @@ -0,0 +1,191 @@
611 +/*
612 + * /proc hooks for CPU performance counter support for SMTC kernel
613 + * (and ultimately others)
614 + * Copyright (C) 2006 Mips Technologies, Inc
615 + */
616 +
617 +#include <linux/kernel.h>
618 +
619 +#include <asm/cpu.h>
620 +#include <asm/processor.h>
621 +#include <asm/system.h>
622 +#include <asm/mipsregs.h>
623 +#include <asm/uaccess.h>
624 +#include <linux/proc_fs.h>
625 +
626 +/*
627 + * /proc diagnostic and statistics hooks
628 + */
629 +
630 +
631 +/* Internal software-extended event counters */
632 +
633 +static unsigned long long extencount[4] = {0,0,0,0};
634 +
635 +static struct proc_dir_entry *perf_proc;
636 +
637 +static int proc_read_perf(char *page, char **start, off_t off,
638 +                               int count, int *eof, void *data)
639 +{
640 +       int totalen = 0;
641 +       int len;
642 +
643 +       len = sprintf(page, "PerfCnt[0].Ctl : 0x%08x\n", read_c0_perfctrl0());
644 +       totalen += len;
645 +       page += len;
646 +       len = sprintf(page, "PerfCnt[0].Cnt : %Lu\n",
647 +               extencount[0] + (unsigned long long)((unsigned)read_c0_perfcntr0()));
648 +       totalen += len;
649 +       page += len;
650 +       len = sprintf(page, "PerfCnt[1].Ctl : 0x%08x\n", read_c0_perfctrl1());
651 +       totalen += len;
652 +       page += len;
653 +       len = sprintf(page, "PerfCnt[1].Cnt : %Lu\n",
654 +               extencount[1] + (unsigned long long)((unsigned)read_c0_perfcntr1()));
655 +       totalen += len;
656 +       page += len;
657 +       len = sprintf(page, "PerfCnt[2].Ctl : 0x%08x\n", read_c0_perfctrl2());
658 +       totalen += len;
659 +       page += len;
660 +       len = sprintf(page, "PerfCnt[2].Cnt : %Lu\n",
661 +               extencount[2] + (unsigned long long)((unsigned)read_c0_perfcntr2()));
662 +       totalen += len;
663 +       page += len;
664 +       len = sprintf(page, "PerfCnt[3].Ctl : 0x%08x\n", read_c0_perfctrl3());
665 +       totalen += len;
666 +       page += len;
667 +       len = sprintf(page, "PerfCnt[3].Cnt : %Lu\n",
668 +               extencount[3] + (unsigned long long)((unsigned)read_c0_perfcntr3()));
669 +       totalen += len;
670 +       page += len;
671 +
672 +       return totalen;
673 +}
674 +
675 +/*
676 + * Write to perf counter registers based on text input
677 + */
678 +
679 +#define TXTBUFSZ 100
680 +
681 +static int proc_write_perf(struct file *file, const char *buffer,
682 +                               unsigned long count, void *data)
683 +{
684 +       int len;
685 +       int nparsed;
686 +       int index;
687 +       char mybuf[TXTBUFSZ];
688 +
689 +       int which[4];
690 +       unsigned long control[4];
691 +       long long ctrdata[4];
692 +
693 +       if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
694 +       else len = count;
695 +       memset(mybuf,0,TXTBUFSZ);
696 +       if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
697 +
698 +       nparsed = sscanf(mybuf,
699 +                       "%d %lx %Ld %d %lx %Ld %d %lx %Ld %d %lx %Ld",
700 +                               &which[0], &control[0], &ctrdata[0],
701 +                               &which[1], &control[1], &ctrdata[1],
702 +                               &which[2], &control[2], &ctrdata[2],
703 +                               &which[3], &control[3], &ctrdata[3]);
704 +
705 +       for(index = 0; nparsed >= 3; index++) {
706 +               switch (which[index]) {
707 +               case 0:
708 +                       write_c0_perfctrl0(control[index]);
709 +                       if(ctrdata[index] != -1) {
710 +                           extencount[0] = (unsigned long long)ctrdata[index];
711 +                           write_c0_perfcntr0((unsigned long)0);
712 +                       }
713 +                       break;
714 +               case 1:
715 +                       write_c0_perfctrl1(control[index]);
716 +                       if(ctrdata[index] != -1) {
717 +                           extencount[1] = (unsigned long long)ctrdata[index];
718 +                           write_c0_perfcntr1((unsigned long)0);
719 +                       }
720 +                       break;
721 +               case 2:
722 +                       write_c0_perfctrl2(control[index]);
723 +                       if(ctrdata[index] != -1) {
724 +                           extencount[2] = (unsigned long long)ctrdata[index];
725 +                           write_c0_perfcntr2((unsigned long)0);
726 +                       }
727 +                       break;
728 +               case 3:
729 +                       write_c0_perfctrl3(control[index]);
730 +                       if(ctrdata[index] != -1) {
731 +                           extencount[3] = (unsigned long long)ctrdata[index];
732 +                           write_c0_perfcntr3((unsigned long)0);
733 +                       }
734 +                       break;
735 +               }
736 +               nparsed -= 3;
737 +       }
738 +       return (len);
739 +}
740 +
741 +extern int (*perf_irq)(void);
742 +
743 +/*
744 + * Invoked when timer interrupt vector picks up a perf counter overflow
745 + */
746 +
747 +static int perf_proc_irq(void)
748 +{
749 +       unsigned long snapshot;
750 +
751 +       /*
752 +        * It would be nice to do this as a loop, but we don't have
753 +        * indirect access to CP0 registers.
754 +        */
755 +       snapshot = read_c0_perfcntr0();
756 +       if ((long)snapshot < 0) {
757 +               extencount[0] +=
758 +                       (unsigned long long)((unsigned)read_c0_perfcntr0());
759 +               write_c0_perfcntr0(0);
760 +       }
761 +       snapshot = read_c0_perfcntr1();
762 +       if ((long)snapshot < 0) {
763 +               extencount[1] +=
764 +                       (unsigned long long)((unsigned)read_c0_perfcntr1());
765 +               write_c0_perfcntr1(0);
766 +       }
767 +       snapshot = read_c0_perfcntr2();
768 +       if ((long)snapshot < 0) {
769 +               extencount[2] +=
770 +                       (unsigned long long)((unsigned)read_c0_perfcntr2());
771 +               write_c0_perfcntr2(0);
772 +       }
773 +       snapshot = read_c0_perfcntr3();
774 +       if ((long)snapshot < 0) {
775 +               extencount[3] +=
776 +                       (unsigned long long)((unsigned)read_c0_perfcntr3());
777 +               write_c0_perfcntr3(0);
778 +       }
779 +       return 0;
780 +}
781 +
782 +static int __init init_perf_proc(void)
783 +{
784 +       extern struct proc_dir_entry *get_mips_proc_dir(void);
785 +
786 +       struct proc_dir_entry *mips_proc_dir = get_mips_proc_dir();
787 +
788 +       write_c0_perfcntr0(0);
789 +       write_c0_perfcntr1(0);
790 +       write_c0_perfcntr2(0);
791 +       write_c0_perfcntr3(0);
792 +       perf_proc = create_proc_entry("perf", 0644, mips_proc_dir);
793 +       perf_proc->read_proc = proc_read_perf;
794 +       perf_proc->write_proc = proc_write_perf;
795 +       perf_irq = perf_proc_irq;
796 +
797 +       return 0;
798 +}
799 +
800 +/* Automagically create the entry */
801 +module_init(init_perf_proc);
802 diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
803 index e309665..2de204f 100644
804 --- a/arch/mips/kernel/proc.c
805 +++ b/arch/mips/kernel/proc.c
806 @@ -7,6 +7,7 @@
807  #include <linux/kernel.h>
808  #include <linux/sched.h>
809  #include <linux/seq_file.h>
810 +#include <linux/proc_fs.h>
811  #include <asm/bootinfo.h>
812  #include <asm/cpu.h>
813  #include <asm/cpu-features.h>
814 @@ -110,3 +111,19 @@ const struct seq_operations cpuinfo_op = {
815         .stop   = c_stop,
816         .show   = show_cpuinfo,
817  };
818 +
819 +/*
820 + * Support for MIPS/local /proc hooks in /proc/mips/
821 + */
822 +
823 +static struct proc_dir_entry *mips_proc = NULL;
824 +
825 +struct proc_dir_entry *get_mips_proc_dir(void)
826 +{
827 +       /*
828 +        * This ought not to be preemptable.
829 +        */
830 +       if(mips_proc == NULL)
831 +               mips_proc = proc_mkdir("mips", NULL);
832 +       return(mips_proc);
833 +}
834 diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
835 index f0895e7..199e853 100644
836 --- a/arch/mips/kernel/smtc.c
837 +++ b/arch/mips/kernel/smtc.c
838 @@ -1334,6 +1334,13 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
839         asid = asid_cache(cpu);
840  
841         do {
842 +#ifdef CONFIG_IFX_VPE_EXT
843 +               /* If TLB is shared between AP and RP (AP is running SMTC),
844 +                  leave out max ASID i.e., ASID_MASK for RP
845 +                */
846 +               if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1)))
847 +                       asid++;
848 +#endif
849                 if (!((asid += ASID_INC) & ASID_MASK) ) {
850                         if (cpu_has_vtag_icache)
851                                 flush_icache_all();
852 diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
853 index 3efcb06..742f24b 100644
854 --- a/arch/mips/kernel/vpe.c
855 +++ b/arch/mips/kernel/vpe.c
856 @@ -76,6 +76,58 @@ static struct kspd_notifications kspd_events;
857  static int kspd_events_reqd;
858  #endif
859  
860 +#ifdef CONFIG_IFX_VPE_EXT
861 +static int is_sdepgm;
862 +extern int stlb;
863 +extern int vpe0_wired;
864 +extern int vpe1_wired;
865 +unsigned int vpe1_load_addr;
866 +
867 +static int __init load_address(char *str)
868 +{
869 +       get_option(&str, &vpe1_load_addr);
870 +       return 1;
871 +}
872 +__setup("vpe1_load_addr=", load_address);
873 +
874 +#include <asm/mipsmtregs.h>
875 +#define write_vpe_c0_wired(val)                mttc0(6, 0, val)
876 +
877 +#ifndef COMMAND_LINE_SIZE
878 +#      define COMMAND_LINE_SIZE        512
879 +#endif
880 +
881 +char command_line[COMMAND_LINE_SIZE * 2];
882 +
883 +static unsigned int vpe1_mem;
884 +static int __init vpe1mem(char *str)
885 +{
886 +       vpe1_mem = memparse(str, &str);
887 +       return 1;
888 +}
889 +__setup("vpe1_mem=", vpe1mem);
890 +
891 +uint32_t vpe1_wdog_ctr;
892 +static int __init wdog_ctr(char *str)
893 +{
894 +       get_option(&str, &vpe1_wdog_ctr);
895 +       return 1;
896 +}
897 +
898 +__setup("vpe1_wdog_ctr_addr=", wdog_ctr);
899 +EXPORT_SYMBOL(vpe1_wdog_ctr);
900 +
901 +uint32_t vpe1_wdog_timeout;
902 +static int __init wdog_timeout(char *str)
903 +{
904 +        get_option(&str, &vpe1_wdog_timeout);
905 +        return 1;
906 +}
907 +
908 +__setup("vpe1_wdog_timeout=", wdog_timeout);
909 +EXPORT_SYMBOL(vpe1_wdog_timeout);
910 +
911 +#endif
912  /* grab the likely amount of memory we will need. */
913  #ifdef CONFIG_MIPS_VPE_LOADER_TOM
914  #define P_SIZE (2 * 1024 * 1024)
915 @@ -268,6 +320,13 @@ static void *alloc_progmem(unsigned long len)
916         void *addr;
917  
918  #ifdef CONFIG_MIPS_VPE_LOADER_TOM
919 +#ifdef CONFIG_IFX_VPE_EXT
920 +       if (vpe1_load_addr) {
921 +               memset((void *)vpe1_load_addr, 0, len);
922 +               return (void *)vpe1_load_addr;
923 +       }
924 +#endif
925 +
926         /*
927          * This means you must tell Linux to use less memory than you
928          * physically have, for example by passing a mem= boot argument.
929 @@ -746,6 +805,12 @@ static int vpe_run(struct vpe * v)
930         }
931  
932         /* Write the address we want it to start running from in the TCPC register. */
933 +#if defined(CONFIG_IFX_VPE_EXT) && 0
934 +       if (stlb)
935 +               write_vpe_c0_wired(vpe0_wired + vpe1_wired);
936 +       else
937 +               write_vpe_c0_wired(vpe1_wired);
938 +#endif
939         write_tc_c0_tcrestart((unsigned long)v->__start);
940         write_tc_c0_tccontext((unsigned long)0);
941  
942 @@ -759,6 +824,20 @@ static int vpe_run(struct vpe * v)
943  
944         write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
945  
946 +#if defined(CONFIG_IFX_VPE_EXT) && 0
947 +       /*
948 +        * $a2 & $a3 are used to pass command line parameters to VPE1. $a2
949 +        * points to the start of the command line string and $a3 points to
950 +        * the end of the string. This convention is identical to the Linux
951 +        * kernel boot parameter passing mechanism. Please note that $a3 is
952 +        * used to pass physical memory size or 0 in SDE tool kit. So, if you
953 +        * are passing comand line parameters through $a2 & $a3 SDE programs
954 +        * don't work as desired.
955 +        */
956 +       mttgpr(6, command_line);
957 +       mttgpr(7, (command_line + strlen(command_line)));
958 +       if (is_sdepgm)
959 +#endif
960         /*
961          * The sde-kit passes 'memsize' to __start in $a3, so set something
962          * here...  Or set $a3 to zero and define DFLT_STACK_SIZE and
963 @@ -833,6 +912,9 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
964         if ( (v->__start == 0) || (v->shared_ptr == NULL))
965                 return -1;
966  
967 +#ifdef CONFIG_IFX_VPE_EXT
968 +       is_sdepgm = 1;
969 +#endif
970         return 0;
971  }
972  
973 @@ -994,6 +1076,15 @@ static int vpe_elfload(struct vpe * v)
974                            (unsigned long)v->load_addr + v->len);
975  
976         if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
977 +#ifdef CONFIG_IFX_VPE_EXT
978 +               if (vpe1_load_addr) {
979 +                       /* Conversion to KSEG1 is required ??? */
980 +                       v->__start = KSEG1ADDR(vpe1_load_addr);
981 +                       is_sdepgm = 0;
982 +                       return 0;
983 +               }
984 +#endif
985 +
986                 if (v->__start == 0) {
987                         printk(KERN_WARNING "VPE loader: program does not contain "
988                                "a __start symbol\n");
989 @@ -1064,6 +1155,9 @@ static int vpe_open(struct inode *inode, struct file *filp)
990         struct vpe_notifications *not;
991         struct vpe *v;
992         int ret;
993 +#ifdef CONFIG_IFX_VPE_EXT
994 +   int progsize;
995 +#endif
996  
997         if (minor != iminor(inode)) {
998                 /* assume only 1 device at the moment. */
999 @@ -1089,7 +1183,12 @@ static int vpe_open(struct inode *inode, struct file *filp)
1000                 release_progmem(v->load_addr);
1001                 cleanup_tc(get_tc(tclimit));
1002         }
1003 -
1004 +#ifdef CONFIG_IFX_VPE_EXT
1005 +       progsize = (vpe1_mem  != 0) ? vpe1_mem : P_SIZE;
1006 +       //printk("progsize = %x\n", progsize);
1007 +       v->pbuffer = vmalloc(progsize);
1008 +       v->plen = progsize;
1009 +#else
1010         /* this of-course trashes what was there before... */
1011         v->pbuffer = vmalloc(P_SIZE);
1012         if (!v->pbuffer) {
1013 @@ -1097,11 +1196,14 @@ static int vpe_open(struct inode *inode, struct file *filp)
1014                 return -ENOMEM;
1015         }
1016         v->plen = P_SIZE;
1017 +#endif
1018         v->load_addr = NULL;
1019         v->len = 0;
1020  
1021 +#if 0
1022         v->uid = filp->f_cred->fsuid;
1023         v->gid = filp->f_cred->fsgid;
1024 +#endif
1025  
1026  #ifdef CONFIG_MIPS_APSP_KSPD
1027         /* get kspd to tell us when a syscall_exit happens */
1028 @@ -1349,6 +1451,133 @@ static void kspd_sp_exit( int sp_id)
1029         cleanup_tc(get_tc(sp_id));
1030  }
1031  #endif
1032 +#ifdef CONFIG_IFX_VPE_EXT
1033 +int32_t vpe1_sw_start(void* sw_start_addr, uint32_t tcmask, uint32_t flags)
1034 +{
1035 +       enum vpe_state state;
1036 +       struct vpe *v = get_vpe(tclimit);
1037 +       struct vpe_notifications *not;
1038 +
1039 +       if (tcmask || flags) {
1040 +               printk(KERN_WARNING "Currently tcmask and flags should be 0.\
1041 +                               other values not supported\n");
1042 +               return -1;
1043 +       }
1044 +
1045 +       state = xchg(&v->state, VPE_STATE_INUSE);
1046 +       if (state != VPE_STATE_UNUSED) {
1047 +               vpe_stop(v);
1048 +
1049 +               list_for_each_entry(not, &v->notify, list) {
1050 +                       not->stop(tclimit);
1051 +               }
1052 +       }
1053 +
1054 +       v->__start = (unsigned long)sw_start_addr;
1055 +       is_sdepgm = 0;
1056 +
1057 +       if (!vpe_run(v)) {
1058 +               printk(KERN_DEBUG "VPE loader: VPE1 running successfully\n");
1059 +               return 0;
1060 +       }
1061 +       return -1;
1062 +}
1063 +
1064 +EXPORT_SYMBOL(vpe1_sw_start);
1065 +
1066 +int32_t vpe1_sw_stop(uint32_t flags)
1067 +{
1068 +       struct vpe *v = get_vpe(tclimit);
1069 +
1070 +       if (!vpe_free(v)) {
1071 +               printk(KERN_DEBUG "RP Stopped\n");
1072 +               return 0;
1073 +       }
1074 +       else
1075 +               return -1;
1076 +}
1077 +
1078 +EXPORT_SYMBOL(vpe1_sw_stop);
1079 +
1080 +uint32_t vpe1_get_load_addr (uint32_t flags)
1081 +{
1082 +       return vpe1_load_addr;
1083 +}
1084 +
1085 +EXPORT_SYMBOL(vpe1_get_load_addr);
1086 +
1087 +uint32_t vpe1_get_max_mem (uint32_t flags)
1088 +{
1089 +       if (!vpe1_mem)
1090 +               return P_SIZE;
1091 +       else
1092 +               return vpe1_mem;
1093 +}
1094 +
1095 +EXPORT_SYMBOL(vpe1_get_max_mem);
1096 +
1097 +void* vpe1_get_cmdline_argument(void)
1098 +{
1099 +       return saved_command_line;
1100 +}
1101 +
1102 +EXPORT_SYMBOL(vpe1_get_cmdline_argument);
1103 +
1104 +int32_t vpe1_set_boot_param(char *field, char *value, char flags)
1105 +{
1106 +       char *ptr, string[64];
1107 +       int start_off, end_off;
1108 +       if (!field)
1109 +               return -1;
1110 +       strcpy(string, field);
1111 +       if (value) {
1112 +               strcat(string, "=");
1113 +               strcat(string, value);
1114 +               strcat(command_line, " ");
1115 +               strcat(command_line, string);
1116 +       }
1117 +       else {
1118 +               ptr = strstr(command_line, string);
1119 +               if (ptr) {
1120 +                       start_off = ptr - command_line;
1121 +                       ptr += strlen(string);
1122 +                       while ((*ptr != ' ') && (*ptr != '\0'))
1123 +                               ptr++;
1124 +                       end_off = ptr - command_line;
1125 +                       command_line[start_off] = '\0';
1126 +                       strcat (command_line, command_line+end_off);
1127 +               }
1128 +       }
1129 +       return 0;
1130 +}
1131 +
1132 +EXPORT_SYMBOL(vpe1_set_boot_param);
1133 +
1134 +int32_t vpe1_get_boot_param(char *field, char **value, char flags)
1135 +{
1136 +       char *ptr, string[64];
1137 +       int i = 0;
1138 +       if (!field)
1139 +               return -1;
1140 +       if ((ptr = strstr(command_line, field))) {
1141 +               ptr += strlen(field) + 1; /* including = */
1142 +               while ((*ptr != ' ') && (*ptr != '\0'))
1143 +                       string[i++] = *ptr++;
1144 +               string[i] = '\0';
1145 +               *value = kmalloc((strlen(string) + 1), GFP_KERNEL);
1146 +               if (*value != NULL)
1147 +                       strcpy(*value, string);
1148 +       }
1149 +       else
1150 +               *value = NULL;
1151 +
1152 +       return 0;
1153 +}
1154 +
1155 +EXPORT_SYMBOL(vpe1_get_boot_param);
1156 +
1157 +extern void configure_tlb(void);
1158 +#endif
1159  
1160  static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1161                           const char *buf, size_t len)
1162 @@ -1430,6 +1659,18 @@ static int __init vpe_module_init(void)
1163                 printk("VPE loader: not a MIPS MT capable processor\n");
1164                 return -ENODEV;
1165         }
1166 +#ifdef CONFIG_IFX_VPE_EXT
1167 +#ifndef CONFIG_MIPS_MT_SMTC
1168 +       configure_tlb();
1169 +#endif
1170 +#endif
1171 +
1172 +#ifndef CONFIG_MIPS_MT_SMTC
1173 +       if (!vpelimit)
1174 +               vpelimit = 1;
1175 +       if (!tclimit)
1176 +               tclimit = 1;
1177 +#endif
1178  
1179         if (vpelimit == 0) {
1180                 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
1181 @@ -1474,10 +1715,12 @@ static int __init vpe_module_init(void)
1182         mtflags = dmt();
1183         vpflags = dvpe();
1184  
1185 +       back_to_back_c0_hazard();
1186 +
1187         /* Put MVPE's into 'configuration state' */
1188         set_c0_mvpcontrol(MVPCONTROL_VPC);
1189  
1190 -       /* dump_mtregs(); */
1191 +       dump_mtregs();
1192  
1193         val = read_c0_mvpconf0();
1194         hw_tcs = (val & MVPCONF0_PTC) + 1;
1195 @@ -1489,6 +1732,7 @@ static int __init vpe_module_init(void)
1196                  * reschedule send IPIs or similar we might hang.
1197                  */
1198                 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1199 +               back_to_back_c0_hazard();
1200                 evpe(vpflags);
1201                 emt(mtflags);
1202                 local_irq_restore(flags);
1203 @@ -1514,6 +1758,7 @@ static int __init vpe_module_init(void)
1204                         }
1205  
1206                         v->ntcs = hw_tcs - tclimit;
1207 +                        write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
1208  
1209                         /* add the tc to the list of this vpe's tc's. */
1210                         list_add(&t->tc, &v->tc);
1211 @@ -1582,6 +1827,7 @@ static int __init vpe_module_init(void)
1212  out_reenable:
1213         /* release config state */
1214         clear_c0_mvpcontrol(MVPCONTROL_VPC);
1215 +       back_to_back_c0_hazard();
1216  
1217         evpe(vpflags);
1218         emt(mtflags);
1219 -- 
1220 1.7.5.4
1221