ubus: update to latest version, includes a small bugfix for object call replies
[openwrt.git] / target / linux / lantiq / patches-3.0 / 250-mt-vpe.patch
1 --- a/arch/mips/Kconfig
2 +++ b/arch/mips/Kconfig
3 @@ -1905,6 +1905,28 @@ config MIPS_VPE_LOADER
4           Includes a loader for loading an elf relocatable object
5           onto another VPE and running it.
6  
7 +config IFX_VPE_EXT
8 +       bool "IFX APRP Extensions"
9 +       depends on MIPS_VPE_LOADER
10 +       default y
11 +       help
12 +         IFX included extensions in APRP
13 +
14 +config PERFCTRS
15 +       bool "34K Performance counters"
16 +       depends on MIPS_MT && PROC_FS
17 +       default n
18 +       help
19 +         34K Performance counter through /proc
20 +
21 +config MTSCHED
22 +       bool "Support mtsched priority configuration for TCs"
23 +       depends on MIPS_MT && PROC_FS
24 +       default y
25 +       help
26 +         Support for mtsched priority configuration for TCs through
27 +         /proc/mips/mtsched
28 +
29  config MIPS_MT_SMTC_IM_BACKSTOP
30         bool "Use per-TC register bits as backstop for inhibited IM bits"
31         depends on MIPS_MT_SMTC
32 --- a/arch/mips/include/asm/mipsmtregs.h
33 +++ b/arch/mips/include/asm/mipsmtregs.h
34 @@ -28,14 +28,34 @@
35  #define read_c0_vpeconf0()             __read_32bit_c0_register($1, 2)
36  #define write_c0_vpeconf0(val)         __write_32bit_c0_register($1, 2, val)
37  
38 +#define read_c0_vpeconf1()              __read_32bit_c0_register($1, 3)
39 +#define write_c0_vpeconf1(val)          __write_32bit_c0_register($1, 3, val)
40 +
41 +#define read_c0_vpeschedule()           __read_32bit_c0_register($1, 5)
42 +#define write_c0_vpeschedule(val)       __write_32bit_c0_register($1, 5, val)
43 +
44 +#define read_c0_vpeschefback()         __read_32bit_c0_register($1, 6)
45 +#define write_c0_vpeschefback(val)     __write_32bit_c0_register($1, 6, val)
46 +
47 +#define read_c0_vpeopt()              __read_32bit_c0_register($1, 7)
48 +#define write_c0_vpeopt(val)          __write_32bit_c0_register($1, 7, val)
49 +
50  #define read_c0_tcstatus()             __read_32bit_c0_register($2, 1)
51  #define write_c0_tcstatus(val)         __write_32bit_c0_register($2, 1, val)
52  
53  #define read_c0_tcbind()               __read_32bit_c0_register($2, 2)
54 +#define write_c0_tcbind(val)           __write_32bit_c0_register($2, 2, val)
55  
56  #define read_c0_tccontext()            __read_32bit_c0_register($2, 5)
57  #define write_c0_tccontext(val)                __write_32bit_c0_register($2, 5, val)
58  
59 +#define read_c0_tcschedule()           __read_32bit_c0_register($2, 6)
60 +#define write_c0_tcschedule(val)       __write_32bit_c0_register($2, 6, val)
61 +
62 +#define read_c0_tcschefback()          __read_32bit_c0_register($2, 7)
63 +#define write_c0_tcschefback(val)      __write_32bit_c0_register($2, 7, val)
64 +
65 +
66  #else /* Assembly */
67  /*
68   * Macros for use in assembly language code
69 @@ -74,6 +94,8 @@
70  #define MVPCONTROL_STLB_SHIFT  2
71  #define MVPCONTROL_STLB                (_ULCAST_(1) << MVPCONTROL_STLB_SHIFT)
72  
73 +#define MVPCONTROL_CPA_SHIFT   3
74 +#define MVPCONTROL_CPA         (_ULCAST_(1) << MVPCONTROL_CPA_SHIFT)
75  
76  /* MVPConf0 fields */
77  #define MVPCONF0_PTC_SHIFT     0
78 @@ -84,6 +106,8 @@
79  #define MVPCONF0_TCA           ( _ULCAST_(1) << MVPCONF0_TCA_SHIFT)
80  #define MVPCONF0_PTLBE_SHIFT   16
81  #define MVPCONF0_PTLBE         (_ULCAST_(0x3ff) << MVPCONF0_PTLBE_SHIFT)
82 +#define MVPCONF0_PCP_SHIFT     27
83 +#define MVPCONF0_PCP           (_ULCAST_(1) << MVPCONF0_PCP_SHIFT)
84  #define MVPCONF0_TLBS_SHIFT    29
85  #define MVPCONF0_TLBS          (_ULCAST_(1) << MVPCONF0_TLBS_SHIFT)
86  #define MVPCONF0_M_SHIFT       31
87 @@ -121,9 +145,25 @@
88  #define VPECONF0_VPA           (_ULCAST_(1) << VPECONF0_VPA_SHIFT)
89  #define VPECONF0_MVP_SHIFT     1
90  #define VPECONF0_MVP           (_ULCAST_(1) << VPECONF0_MVP_SHIFT)
91 +#define VPECONF0_ICS_SHIFT      16
92 +#define VPECONF0_ICS           (_ULCAST_(1) << VPECONF0_ICS_SHIFT)
93 +#define VPECONF0_DCS_SHIFT      17
94 +#define VPECONF0_DCS            (_ULCAST_(1) << VPECONF0_DCS_SHIFT)
95  #define VPECONF0_XTC_SHIFT     21
96  #define VPECONF0_XTC           (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
97  
98 +/* VPEOpt fields */
99 +#define VPEOPT_DWX_SHIFT       0
100 +#define VPEOPT_IWX_SHIFT       8
101 +#define VPEOPT_IWX0            ( _ULCAST_(0x1) << VPEOPT_IWX_SHIFT)
102 +#define VPEOPT_IWX1            ( _ULCAST_(0x2) << VPEOPT_IWX_SHIFT)
103 +#define VPEOPT_IWX2            ( _ULCAST_(0x4) << VPEOPT_IWX_SHIFT)
104 +#define VPEOPT_IWX3            ( _ULCAST_(0x8) << VPEOPT_IWX_SHIFT)
105 +#define VPEOPT_DWX0            ( _ULCAST_(0x1) << VPEOPT_DWX_SHIFT)
106 +#define VPEOPT_DWX1            ( _ULCAST_(0x2) << VPEOPT_DWX_SHIFT)
107 +#define VPEOPT_DWX2            ( _ULCAST_(0x4) << VPEOPT_DWX_SHIFT)
108 +#define VPEOPT_DWX3            ( _ULCAST_(0x8) << VPEOPT_DWX_SHIFT)
109 +
110  /* TCStatus fields (per TC) */
111  #define TCSTATUS_TASID         (_ULCAST_(0xff))
112  #define TCSTATUS_IXMT_SHIFT    10
113 @@ -350,6 +390,14 @@ do {                                                                       \
114  #define write_vpe_c0_vpecontrol(val)   mttc0(1, 1, val)
115  #define read_vpe_c0_vpeconf0()         mftc0(1, 2)
116  #define write_vpe_c0_vpeconf0(val)     mttc0(1, 2, val)
117 +#define read_vpe_c0_vpeschedule()      mftc0(1, 5)
118 +#define write_vpe_c0_vpeschedule(val)  mttc0(1, 5, val)
119 +#define read_vpe_c0_vpeschefback()     mftc0(1, 6)
120 +#define write_vpe_c0_vpeschefback(val) mttc0(1, 6, val)
121 +#define read_vpe_c0_vpeopt()            mftc0(1, 7)
122 +#define write_vpe_c0_vpeopt(val)        mttc0(1, 7, val)
123 +#define read_vpe_c0_wired()            mftc0(6, 0)
124 +#define write_vpe_c0_wired(val)                mttc0(6, 0, val)
125  #define read_vpe_c0_count()            mftc0(9, 0)
126  #define write_vpe_c0_count(val)                mttc0(9, 0, val)
127  #define read_vpe_c0_status()           mftc0(12, 0)
128 @@ -381,6 +429,12 @@ do {                                                                       \
129  #define write_tc_c0_tchalt(val)                mttc0(2, 4, val)
130  #define read_tc_c0_tccontext()         mftc0(2, 5)
131  #define write_tc_c0_tccontext(val)     mttc0(2, 5, val)
132 +#define read_tc_c0_tcschedule()                mftc0(2, 6)
133 +#define write_tc_c0_tcschedule(val)    mttc0(2, 6, val)
134 +#define read_tc_c0_tcschefback()       mftc0(2, 7)
135 +#define write_tc_c0_tcschefback(val)   mttc0(2, 7, val)
136 +#define read_tc_c0_entryhi()            mftc0(10, 0)
137 +#define write_tc_c0_entryhi(val)        mttc0(10, 0, val)
138  
139  /* GPR */
140  #define read_tc_gpr_sp()               mftgpr(29)
141 --- a/arch/mips/kernel/Makefile
142 +++ b/arch/mips/kernel/Makefile
143 @@ -86,7 +86,8 @@ obj-$(CONFIG_MIPS32_O32)      += binfmt_elfo3
144  
145  obj-$(CONFIG_KGDB)             += kgdb.o
146  obj-$(CONFIG_PROC_FS)          += proc.o
147 -
148 +obj-$(CONFIG_MTSCHED)          += mtsched_proc.o
149 +obj-$(CONFIG_PERFCTRS)         += perf_proc.o
150  obj-$(CONFIG_64BIT)            += cpu-bugs64.o
151  
152  obj-$(CONFIG_I8253)            += i8253.o
153 --- a/arch/mips/kernel/mips-mt.c
154 +++ b/arch/mips/kernel/mips-mt.c
155 @@ -21,26 +21,96 @@
156  #include <asm/cacheflush.h>
157  
158  int vpelimit;
159 -
160  static int __init maxvpes(char *str)
161  {
162         get_option(&str, &vpelimit);
163 -
164         return 1;
165  }
166 -
167  __setup("maxvpes=", maxvpes);
168  
169  int tclimit;
170 -
171  static int __init maxtcs(char *str)
172  {
173         get_option(&str, &tclimit);
174 +       return 1;
175 +}
176 +__setup("maxtcs=", maxtcs);
177  
178 +#ifdef CONFIG_IFX_VPE_EXT
179 +int stlb;
180 +static int __init istlbshared(char *str)
181 +{
182 +       get_option(&str, &stlb);
183         return 1;
184  }
185 +__setup("vpe_tlb_shared=", istlbshared);
186  
187 -__setup("maxtcs=", maxtcs);
188 +int vpe0_wired;
189 +static int __init vpe0wired(char *str)
190 +{
191 +       get_option(&str, &vpe0_wired);
192 +       return 1;
193 +}
194 +__setup("vpe0_wired_tlb_entries=", vpe0wired);
195 +
196 +int vpe1_wired;
197 +static int __init vpe1wired(char *str)
198 +{
199 +       get_option(&str, &vpe1_wired);
200 +       return 1;
201 +}
202 +__setup("vpe1_wired_tlb_entries=", vpe1wired);
203 +
204 +#ifdef CONFIG_MIPS_MT_SMTC
205 +extern int nostlb;
206 +#endif
207 +void configure_tlb(void)
208 +{
209 +       int vpeflags, tcflags, tlbsiz;
210 +       unsigned int config1val;
211 +       vpeflags = dvpe();
212 +       tcflags = dmt();
213 +       write_c0_vpeconf0((read_c0_vpeconf0() | VPECONF0_MVP));
214 +       write_c0_mvpcontrol((read_c0_mvpcontrol() | MVPCONTROL_VPC));
215 +       mips_ihb();
216 +       //printk("stlb = %d, vpe0_wired = %d vpe1_wired=%d\n", stlb,vpe0_wired, vpe1_wired);
217 +       if (stlb) {
218 +               if (!(read_c0_mvpconf0() & MVPCONF0_TLBS)) {
219 +                       emt(tcflags);
220 +                       evpe(vpeflags);
221 +                       return;
222 +               }
223 +
224 +               write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
225 +               write_c0_wired(vpe0_wired + vpe1_wired);
226 +               if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
227 +                       config1val = read_vpe_c0_config1();
228 +                       tlbsiz = (((config1val >> 25) & 0x3f) + 1);
229 +                       if (tlbsiz > 64)
230 +                               tlbsiz = 64;
231 +                       cpu_data[0].tlbsize = tlbsiz;
232 +                       current_cpu_data.tlbsize = tlbsiz;
233 +               }
234 +
235 +       }
236 +       else {
237 +               write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_STLB);
238 +               write_c0_wired(vpe0_wired);
239 +       }
240 +
241 +       ehb();
242 +       write_c0_mvpcontrol((read_c0_mvpcontrol() & ~MVPCONTROL_VPC));
243 +       ehb();
244 +       local_flush_tlb_all();
245 +
246 +       printk("Wired TLB entries for Linux read_c0_wired() = %d\n", read_c0_wired());
247 +#ifdef CONFIG_MIPS_MT_SMTC
248 +       nostlb = !stlb;
249 +#endif
250 +       emt(tcflags);
251 +       evpe(vpeflags);
252 +}
253 +#endif
254  
255  /*
256   * Dump new MIPS MT state for the core. Does not leave TCs halted.
257 @@ -78,18 +148,18 @@ void mips_mt_regdump(unsigned long mvpct
258                         if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
259                                 printk("  VPE %d\n", i);
260                                 printk("   VPEControl : %08lx\n",
261 -                                      read_vpe_c0_vpecontrol());
262 +                                       read_vpe_c0_vpecontrol());
263                                 printk("   VPEConf0 : %08lx\n",
264 -                                      read_vpe_c0_vpeconf0());
265 +                                       read_vpe_c0_vpeconf0());
266                                 printk("   VPE%d.Status : %08lx\n",
267 -                                      i, read_vpe_c0_status());
268 +                                       i, read_vpe_c0_status());
269                                 printk("   VPE%d.EPC : %08lx %pS\n",
270 -                                      i, read_vpe_c0_epc(),
271 -                                      (void *) read_vpe_c0_epc());
272 +                                       i, read_vpe_c0_epc(),
273 +                                       (void *) read_vpe_c0_epc());
274                                 printk("   VPE%d.Cause : %08lx\n",
275 -                                      i, read_vpe_c0_cause());
276 +                                       i, read_vpe_c0_cause());
277                                 printk("   VPE%d.Config7 : %08lx\n",
278 -                                      i, read_vpe_c0_config7());
279 +                                       i, read_vpe_c0_config7());
280                                 break; /* Next VPE */
281                         }
282                 }
283 @@ -287,6 +357,9 @@ void mips_mt_set_cpuoptions(void)
284                 printk("Mapped %ld ITC cells starting at 0x%08x\n",
285                         ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
286         }
287 +#ifdef CONFIG_IFX_VPE_EXT
288 +       configure_tlb();
289 +#endif
290  }
291  
292  /*
293 --- a/arch/mips/kernel/proc.c
294 +++ b/arch/mips/kernel/proc.c
295 @@ -7,6 +7,7 @@
296  #include <linux/kernel.h>
297  #include <linux/sched.h>
298  #include <linux/seq_file.h>
299 +#include <linux/proc_fs.h>
300  #include <asm/bootinfo.h>
301  #include <asm/cpu.h>
302  #include <asm/cpu-features.h>
303 @@ -110,3 +111,19 @@ const struct seq_operations cpuinfo_op =
304         .stop   = c_stop,
305         .show   = show_cpuinfo,
306  };
307 +
308 +/*
309 + * Support for MIPS/local /proc hooks in /proc/mips/
310 + */
311 +
312 +static struct proc_dir_entry *mips_proc = NULL;
313 +
314 +struct proc_dir_entry *get_mips_proc_dir(void)
315 +{
316 +       /*
317 +        * This ought not to be preemptable.
318 +        */
319 +       if(mips_proc == NULL)
320 +               mips_proc = proc_mkdir("mips", NULL);
321 +       return(mips_proc);
322 +}
323 --- a/arch/mips/kernel/smtc.c
324 +++ b/arch/mips/kernel/smtc.c
325 @@ -1334,6 +1334,13 @@ void smtc_get_new_mmu_context(struct mm_
326         asid = asid_cache(cpu);
327  
328         do {
329 +#ifdef CONFIG_IFX_VPE_EXT
330 +               /* If TLB is shared between AP and RP (AP is running SMTC),
331 +                  leave out max ASID i.e., ASID_MASK for RP
332 +                */
333 +               if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1)))
334 +                       asid++;
335 +#endif
336                 if (!((asid += ASID_INC) & ASID_MASK) ) {
337                         if (cpu_has_vtag_icache)
338                                 flush_icache_all();
339 --- a/arch/mips/kernel/vpe.c
340 +++ b/arch/mips/kernel/vpe.c
341 @@ -76,6 +76,58 @@ static struct kspd_notifications kspd_ev
342  static int kspd_events_reqd;
343  #endif
344  
345 +#ifdef CONFIG_IFX_VPE_EXT
346 +static int is_sdepgm;
347 +extern int stlb;
348 +extern int vpe0_wired;
349 +extern int vpe1_wired;
350 +unsigned int vpe1_load_addr;
351 +
352 +static int __init load_address(char *str)
353 +{
354 +       get_option(&str, &vpe1_load_addr);
355 +       return 1;
356 +}
357 +__setup("vpe1_load_addr=", load_address);
358 +
359 +#include <asm/mipsmtregs.h>
360 +#define write_vpe_c0_wired(val)                mttc0(6, 0, val)
361 +
362 +#ifndef COMMAND_LINE_SIZE
363 +#      define COMMAND_LINE_SIZE        512
364 +#endif
365 +
366 +char command_line[COMMAND_LINE_SIZE * 2];
367 +
368 +static unsigned int vpe1_mem;
369 +static int __init vpe1mem(char *str)
370 +{
371 +       vpe1_mem = memparse(str, &str);
372 +       return 1;
373 +}
374 +__setup("vpe1_mem=", vpe1mem);
375 +
376 +uint32_t vpe1_wdog_ctr;
377 +static int __init wdog_ctr(char *str)
378 +{
379 +       get_option(&str, &vpe1_wdog_ctr);
380 +       return 1;
381 +}
382 +
383 +__setup("vpe1_wdog_ctr_addr=", wdog_ctr);
384 +EXPORT_SYMBOL(vpe1_wdog_ctr);
385 +
386 +uint32_t vpe1_wdog_timeout;
387 +static int __init wdog_timeout(char *str)
388 +{
389 +        get_option(&str, &vpe1_wdog_timeout);
390 +        return 1;
391 +}
392 +
393 +__setup("vpe1_wdog_timeout=", wdog_timeout);
394 +EXPORT_SYMBOL(vpe1_wdog_timeout);
395 +
396 +#endif
397  /* grab the likely amount of memory we will need. */
398  #ifdef CONFIG_MIPS_VPE_LOADER_TOM
399  #define P_SIZE (2 * 1024 * 1024)
400 @@ -268,6 +320,13 @@ static void *alloc_progmem(unsigned long
401         void *addr;
402  
403  #ifdef CONFIG_MIPS_VPE_LOADER_TOM
404 +#ifdef CONFIG_IFX_VPE_EXT
405 +       if (vpe1_load_addr) {
406 +               memset((void *)vpe1_load_addr, 0, len);
407 +               return (void *)vpe1_load_addr;
408 +       }
409 +#endif
410 +
411         /*
412          * This means you must tell Linux to use less memory than you
413          * physically have, for example by passing a mem= boot argument.
414 @@ -746,6 +805,12 @@ static int vpe_run(struct vpe * v)
415         }
416  
417         /* Write the address we want it to start running from in the TCPC register. */
418 +#if defined(CONFIG_IFX_VPE_EXT) && 0
419 +       if (stlb)
420 +               write_vpe_c0_wired(vpe0_wired + vpe1_wired);
421 +       else
422 +               write_vpe_c0_wired(vpe1_wired);
423 +#endif
424         write_tc_c0_tcrestart((unsigned long)v->__start);
425         write_tc_c0_tccontext((unsigned long)0);
426  
427 @@ -759,6 +824,20 @@ static int vpe_run(struct vpe * v)
428  
429         write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
430  
431 +#if defined(CONFIG_IFX_VPE_EXT) && 0
432 +       /*
433 +        * $a2 & $a3 are used to pass command line parameters to VPE1. $a2
434 +        * points to the start of the command line string and $a3 points to
435 +        * the end of the string. This convention is identical to the Linux
436 +        * kernel boot parameter passing mechanism. Please note that $a3 is
437 +        * used to pass physical memory size or 0 in SDE tool kit. So, if you
438 +        * are passing comand line parameters through $a2 & $a3 SDE programs
439 +        * don't work as desired.
440 +        */
441 +       mttgpr(6, command_line);
442 +       mttgpr(7, (command_line + strlen(command_line)));
443 +       if (is_sdepgm)
444 +#endif
445         /*
446          * The sde-kit passes 'memsize' to __start in $a3, so set something
447          * here...  Or set $a3 to zero and define DFLT_STACK_SIZE and
448 @@ -833,6 +912,9 @@ static int find_vpe_symbols(struct vpe *
449         if ( (v->__start == 0) || (v->shared_ptr == NULL))
450                 return -1;
451  
452 +#ifdef CONFIG_IFX_VPE_EXT
453 +       is_sdepgm = 1;
454 +#endif
455         return 0;
456  }
457  
458 @@ -994,6 +1076,15 @@ static int vpe_elfload(struct vpe * v)
459                            (unsigned long)v->load_addr + v->len);
460  
461         if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
462 +#ifdef CONFIG_IFX_VPE_EXT
463 +               if (vpe1_load_addr) {
464 +                       /* Conversion to KSEG1 is required ??? */
465 +                       v->__start = KSEG1ADDR(vpe1_load_addr);
466 +                       is_sdepgm = 0;
467 +                       return 0;
468 +               }
469 +#endif
470 +
471                 if (v->__start == 0) {
472                         printk(KERN_WARNING "VPE loader: program does not contain "
473                                "a __start symbol\n");
474 @@ -1064,6 +1155,9 @@ static int vpe_open(struct inode *inode,
475         struct vpe_notifications *not;
476         struct vpe *v;
477         int ret;
478 +#ifdef CONFIG_IFX_VPE_EXT
479 +   int progsize;
480 +#endif
481  
482         if (minor != iminor(inode)) {
483                 /* assume only 1 device at the moment. */
484 @@ -1089,7 +1183,12 @@ static int vpe_open(struct inode *inode,
485                 release_progmem(v->load_addr);
486                 cleanup_tc(get_tc(tclimit));
487         }
488 -
489 +#ifdef CONFIG_IFX_VPE_EXT
490 +       progsize = (vpe1_mem  != 0) ? vpe1_mem : P_SIZE;
491 +       //printk("progsize = %x\n", progsize);
492 +       v->pbuffer = vmalloc(progsize);
493 +       v->plen = progsize;
494 +#else
495         /* this of-course trashes what was there before... */
496         v->pbuffer = vmalloc(P_SIZE);
497         if (!v->pbuffer) {
498 @@ -1097,11 +1196,14 @@ static int vpe_open(struct inode *inode,
499                 return -ENOMEM;
500         }
501         v->plen = P_SIZE;
502 +#endif
503         v->load_addr = NULL;
504         v->len = 0;
505  
506 +#if 0
507         v->uid = filp->f_cred->fsuid;
508         v->gid = filp->f_cred->fsgid;
509 +#endif
510  
511  #ifdef CONFIG_MIPS_APSP_KSPD
512         /* get kspd to tell us when a syscall_exit happens */
513 @@ -1349,6 +1451,133 @@ static void kspd_sp_exit( int sp_id)
514         cleanup_tc(get_tc(sp_id));
515  }
516  #endif
517 +#ifdef CONFIG_IFX_VPE_EXT
518 +int32_t vpe1_sw_start(void* sw_start_addr, uint32_t tcmask, uint32_t flags)
519 +{
520 +       enum vpe_state state;
521 +       struct vpe *v = get_vpe(tclimit);
522 +       struct vpe_notifications *not;
523 +
524 +       if (tcmask || flags) {
525 +               printk(KERN_WARNING "Currently tcmask and flags should be 0.\
526 +                               other values not supported\n");
527 +               return -1;
528 +       }
529 +
530 +       state = xchg(&v->state, VPE_STATE_INUSE);
531 +       if (state != VPE_STATE_UNUSED) {
532 +               vpe_stop(v);
533 +
534 +               list_for_each_entry(not, &v->notify, list) {
535 +                       not->stop(tclimit);
536 +               }
537 +       }
538 +
539 +       v->__start = (unsigned long)sw_start_addr;
540 +       is_sdepgm = 0;
541 +
542 +       if (!vpe_run(v)) {
543 +               printk(KERN_DEBUG "VPE loader: VPE1 running successfully\n");
544 +               return 0;
545 +       }
546 +       return -1;
547 +}
548 +
549 +EXPORT_SYMBOL(vpe1_sw_start);
550 +
551 +int32_t vpe1_sw_stop(uint32_t flags)
552 +{
553 +       struct vpe *v = get_vpe(tclimit);
554 +
555 +       if (!vpe_free(v)) {
556 +               printk(KERN_DEBUG "RP Stopped\n");
557 +               return 0;
558 +       }
559 +       else
560 +               return -1;
561 +}
562 +
563 +EXPORT_SYMBOL(vpe1_sw_stop);
564 +
565 +uint32_t vpe1_get_load_addr (uint32_t flags)
566 +{
567 +       return vpe1_load_addr;
568 +}
569 +
570 +EXPORT_SYMBOL(vpe1_get_load_addr);
571 +
572 +uint32_t vpe1_get_max_mem (uint32_t flags)
573 +{
574 +       if (!vpe1_mem)
575 +               return P_SIZE;
576 +       else
577 +               return vpe1_mem;
578 +}
579 +
580 +EXPORT_SYMBOL(vpe1_get_max_mem);
581 +
582 +void* vpe1_get_cmdline_argument(void)
583 +{
584 +       return saved_command_line;
585 +}
586 +
587 +EXPORT_SYMBOL(vpe1_get_cmdline_argument);
588 +
589 +int32_t vpe1_set_boot_param(char *field, char *value, char flags)
590 +{
591 +       char *ptr, string[64];
592 +       int start_off, end_off;
593 +       if (!field)
594 +               return -1;
595 +       strcpy(string, field);
596 +       if (value) {
597 +               strcat(string, "=");
598 +               strcat(string, value);
599 +               strcat(command_line, " ");
600 +               strcat(command_line, string);
601 +       }
602 +       else {
603 +               ptr = strstr(command_line, string);
604 +               if (ptr) {
605 +                       start_off = ptr - command_line;
606 +                       ptr += strlen(string);
607 +                       while ((*ptr != ' ') && (*ptr != '\0'))
608 +                               ptr++;
609 +                       end_off = ptr - command_line;
610 +                       command_line[start_off] = '\0';
611 +                       strcat (command_line, command_line+end_off);
612 +               }
613 +       }
614 +       return 0;
615 +}
616 +
617 +EXPORT_SYMBOL(vpe1_set_boot_param);
618 +
619 +int32_t vpe1_get_boot_param(char *field, char **value, char flags)
620 +{
621 +       char *ptr, string[64];
622 +       int i = 0;
623 +       if (!field)
624 +               return -1;
625 +       if ((ptr = strstr(command_line, field))) {
626 +               ptr += strlen(field) + 1; /* including = */
627 +               while ((*ptr != ' ') && (*ptr != '\0'))
628 +                       string[i++] = *ptr++;
629 +               string[i] = '\0';
630 +               *value = kmalloc((strlen(string) + 1), GFP_KERNEL);
631 +               if (*value != NULL)
632 +                       strcpy(*value, string);
633 +       }
634 +       else
635 +               *value = NULL;
636 +
637 +       return 0;
638 +}
639 +
640 +EXPORT_SYMBOL(vpe1_get_boot_param);
641 +
642 +extern void configure_tlb(void);
643 +#endif
644  
645  static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
646                           const char *buf, size_t len)
647 @@ -1430,6 +1659,18 @@ static int __init vpe_module_init(void)
648                 printk("VPE loader: not a MIPS MT capable processor\n");
649                 return -ENODEV;
650         }
651 +#ifdef CONFIG_IFX_VPE_EXT
652 +#ifndef CONFIG_MIPS_MT_SMTC
653 +       configure_tlb();
654 +#endif
655 +#endif
656 +
657 +#ifndef CONFIG_MIPS_MT_SMTC
658 +       if (!vpelimit)
659 +               vpelimit = 1;
660 +       if (!tclimit)
661 +               tclimit = 1;
662 +#endif
663  
664         if (vpelimit == 0) {
665                 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
666 @@ -1474,10 +1715,12 @@ static int __init vpe_module_init(void)
667         mtflags = dmt();
668         vpflags = dvpe();
669  
670 +       back_to_back_c0_hazard();
671 +
672         /* Put MVPE's into 'configuration state' */
673         set_c0_mvpcontrol(MVPCONTROL_VPC);
674  
675 -       /* dump_mtregs(); */
676 +       dump_mtregs();
677  
678         val = read_c0_mvpconf0();
679         hw_tcs = (val & MVPCONF0_PTC) + 1;
680 @@ -1489,6 +1732,7 @@ static int __init vpe_module_init(void)
681                  * reschedule send IPIs or similar we might hang.
682                  */
683                 clear_c0_mvpcontrol(MVPCONTROL_VPC);
684 +               back_to_back_c0_hazard();
685                 evpe(vpflags);
686                 emt(mtflags);
687                 local_irq_restore(flags);
688 @@ -1514,6 +1758,7 @@ static int __init vpe_module_init(void)
689                         }
690  
691                         v->ntcs = hw_tcs - tclimit;
692 +                        write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
693  
694                         /* add the tc to the list of this vpe's tc's. */
695                         list_add(&t->tc, &v->tc);
696 @@ -1582,6 +1827,7 @@ static int __init vpe_module_init(void)
697  out_reenable:
698         /* release config state */
699         clear_c0_mvpcontrol(MVPCONTROL_VPC);
700 +       back_to_back_c0_hazard();
701  
702         evpe(vpflags);
703         emt(mtflags);
704 --- /dev/null
705 +++ b/arch/mips/kernel/mtsched_proc.c
706 @@ -0,0 +1,279 @@
707 +/*
708 + * /proc hooks for MIPS MT scheduling policy management for 34K cores
709 + *
710 + *  This program is free software; you can distribute it and/or modify it
711 + *  under the terms of the GNU General Public License (Version 2) as
712 + *  published by the Free Software Foundation.
713 + *
714 + *  This program is distributed in the hope it will be useful, but WITHOUT
715 + *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
716 + *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
717 + *  for more details.
718 + *
719 + *  You should have received a copy of the GNU General Public License along
720 + *  with this program; if not, write to the Free Software Foundation, Inc.,
721 + *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
722 + *
723 + * Copyright (C) 2006 Mips Technologies, Inc
724 + */
725 +
726 +#include <linux/kernel.h>
727 +
728 +#include <asm/cpu.h>
729 +#include <asm/processor.h>
730 +#include <asm/system.h>
731 +#include <asm/mipsregs.h>
732 +#include <asm/mipsmtregs.h>
733 +#include <asm/uaccess.h>
734 +#include <linux/proc_fs.h>
735 +
736 +static struct proc_dir_entry *mtsched_proc;
737 +
738 +#ifndef CONFIG_MIPS_MT_SMTC
739 +#define NTCS 2
740 +#else
741 +#define NTCS NR_CPUS
742 +#endif
743 +#define NVPES 2
744 +
745 +int lastvpe = 1;
746 +int lasttc = 8;
747 +
748 +static int proc_read_mtsched(char *page, char **start, off_t off,
749 +                       int count, int *eof, void *data)
750 +{
751 +       int totalen = 0;
752 +       int len;
753 +
754 +       int i;
755 +       int vpe;
756 +       int mytc;
757 +       unsigned long flags;
758 +       unsigned int mtflags;
759 +       unsigned int haltstate;
760 +       unsigned int vpes_checked[NVPES];
761 +       unsigned int vpeschedule[NVPES];
762 +       unsigned int vpeschefback[NVPES];
763 +       unsigned int tcschedule[NTCS];
764 +       unsigned int tcschefback[NTCS];
765 +
766 +       /* Dump the state of the MIPS MT scheduling policy manager */
767 +       /* Inititalize control state */
768 +       for(i = 0; i < NVPES; i++) {
769 +               vpes_checked[i] = 0;
770 +               vpeschedule[i] = 0;
771 +               vpeschefback[i] = 0;
772 +       }
773 +       for(i = 0; i < NTCS; i++) {
774 +               tcschedule[i] = 0;
775 +               tcschefback[i] = 0;
776 +       }
777 +
778 +       /* Disable interrupts and multithreaded issue */
779 +       local_irq_save(flags);
780 +       mtflags = dvpe();
781 +
782 +       /* Then go through the TCs, halt 'em, and extract the values */
783 +       mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
784 +       for(i = 0; i < NTCS; i++) {
785 +               if(i == mytc) {
786 +                       /* No need to halt ourselves! */
787 +                       tcschedule[i] = read_c0_tcschedule();
788 +                       tcschefback[i] = read_c0_tcschefback();
789 +                       /* If VPE bound to TC hasn't been checked, do it */
790 +                       vpe = read_c0_tcbind() & TCBIND_CURVPE;
791 +                       if(!vpes_checked[vpe]) {
792 +                               vpeschedule[vpe] = read_c0_vpeschedule();
793 +                               vpeschefback[vpe] = read_c0_vpeschefback();
794 +                               vpes_checked[vpe] = 1;
795 +                       }
796 +               } else {
797 +                       settc(i);
798 +                       haltstate = read_tc_c0_tchalt();
799 +                       write_tc_c0_tchalt(TCHALT_H);
800 +                       mips_ihb();
801 +                       tcschedule[i] = read_tc_c0_tcschedule();
802 +                       tcschefback[i] = read_tc_c0_tcschefback();
803 +                       /* If VPE bound to TC hasn't been checked, do it */
804 +                       vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
805 +                       if(!vpes_checked[vpe]) {
806 +                           vpeschedule[vpe] = read_vpe_c0_vpeschedule();
807 +                           vpeschefback[vpe] = read_vpe_c0_vpeschefback();
808 +                           vpes_checked[vpe] = 1;
809 +                       }
810 +                       if(!haltstate) write_tc_c0_tchalt(0);
811 +               }
812 +       }
813 +       /* Re-enable MT and interrupts */
814 +       evpe(mtflags);
815 +       local_irq_restore(flags);
816 +
817 +       for(vpe=0; vpe < NVPES; vpe++) {
818 +               len = sprintf(page, "VPE[%d].VPEschedule  = 0x%08x\n",
819 +                       vpe, vpeschedule[vpe]);
820 +               totalen += len;
821 +               page += len;
822 +               len = sprintf(page, "VPE[%d].VPEschefback = 0x%08x\n",
823 +                       vpe, vpeschefback[vpe]);
824 +               totalen += len;
825 +               page += len;
826 +       }
827 +       for(i=0; i < NTCS; i++) {
828 +               len = sprintf(page, "TC[%d].TCschedule    = 0x%08x\n",
829 +                       i, tcschedule[i]);
830 +               totalen += len;
831 +               page += len;
832 +               len = sprintf(page, "TC[%d].TCschefback   = 0x%08x\n",
833 +                       i, tcschefback[i]);
834 +               totalen += len;
835 +               page += len;
836 +       }
837 +       return totalen;
838 +}
839 +
840 +/*
841 + * Write to perf counter registers based on text input
842 + */
843 +
844 +#define TXTBUFSZ 100
845 +
846 +static int proc_write_mtsched(struct file *file, const char *buffer,
847 +                               unsigned long count, void *data)
848 +{
849 +       int len = 0;
850 +       char mybuf[TXTBUFSZ];
851 +       /* At most, we will set up 9 TCs and 2 VPEs, 11 entries in all */
852 +       char entity[1];   //, entity1[1];
853 +       int number[1];
854 +       unsigned long value[1];
855 +       int nparsed = 0 , index = 0;
856 +       unsigned long flags;
857 +       unsigned int mtflags;
858 +       unsigned int haltstate;
859 +       unsigned int tcbindval;
860 +
861 +       if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
862 +       else len = count;
863 +       memset(mybuf,0,TXTBUFSZ);
864 +       if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
865 +
866 +       nparsed = sscanf(mybuf, "%c%d %lx",
867 +                &entity[0] ,&number[0], &value[0]);
868 +
869 +       /*
870 +        * Having acquired the inputs, which might have
871 +        * generated exceptions and preemptions,
872 +        * program the registers.
873 +        */
874 +       /* Disable interrupts and multithreaded issue */
875 +       local_irq_save(flags);
876 +       mtflags = dvpe();
877 +
878 +       if(entity[index] == 't' ) {
879 +               /* Set TCSchedule or TCScheFBack of specified TC */
880 +               if(number[index] > NTCS) goto skip;
881 +               /* If it's our own TC, do it direct */
882 +               if(number[index] ==
883 +                               ((read_c0_tcbind() & TCBIND_CURTC)
884 +                               >> TCBIND_CURTC_SHIFT)) {
885 +                       if(entity[index] == 't')
886 +                                write_c0_tcschedule(value[index]);
887 +                       else
888 +                               write_c0_tcschefback(value[index]);
889 +               } else {
890 +               /* Otherwise, we do it via MTTR */
891 +                       settc(number[index]);
892 +                       haltstate = read_tc_c0_tchalt();
893 +                       write_tc_c0_tchalt(TCHALT_H);
894 +                       mips_ihb();
895 +                       if(entity[index] == 't')
896 +                                write_tc_c0_tcschedule(value[index]);
897 +                       else
898 +                               write_tc_c0_tcschefback(value[index]);
899 +                       mips_ihb();
900 +                       if(!haltstate) write_tc_c0_tchalt(0);
901 +               }
902 +       } else if(entity[index] == 'v') {
903 +               /* Set VPESchedule of specified VPE */
904 +               if(number[index] > NVPES) goto skip;
905 +               tcbindval = read_c0_tcbind();
906 +               /* Are we doing this to our current VPE? */
907 +               if((tcbindval & TCBIND_CURVPE) == number[index]) {
908 +                       /* Then life is simple */
909 +                       write_c0_vpeschedule(value[index]);
910 +               } else {
911 +                       /*
912 +                        * Bind ourselves to the other VPE long enough
913 +                        * to program the bind value.
914 +                        */
915 +                       write_c0_tcbind((tcbindval & ~TCBIND_CURVPE)
916 +                                          | number[index]);
917 +                       mips_ihb();
918 +                       write_c0_vpeschedule(value[index]);
919 +                       mips_ihb();
920 +                       /* Restore previous binding */
921 +                       write_c0_tcbind(tcbindval);
922 +                       mips_ihb();
923 +               }
924 +       }
925 +
926 +       else if(entity[index] == 'r') {
927 +               unsigned int vpes_checked[2], vpe ,i , mytc;
928 +               vpes_checked[0] = vpes_checked[1] = 0;
929 +
930 +               /* Then go through the TCs, halt 'em, and extract the values */
931 +               mytc = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
932 +
933 +               for(i = 0; i < NTCS; i++) {
934 +                       if(i == mytc) {
935 +                               /* No need to halt ourselves! */
936 +                               write_c0_vpeschefback(0);
937 +                               write_c0_tcschefback(0);
938 +                       } else {
939 +                               settc(i);
940 +                               haltstate = read_tc_c0_tchalt();
941 +                               write_tc_c0_tchalt(TCHALT_H);
942 +                               mips_ihb();
943 +                               write_tc_c0_tcschefback(0);
944 +                               /* If VPE bound to TC hasn't been checked, do it */
945 +                               vpe = read_tc_c0_tcbind() & TCBIND_CURVPE;
946 +                               if(!vpes_checked[vpe]) {
947 +                                   write_vpe_c0_vpeschefback(0);
948 +                                   vpes_checked[vpe] = 1;
949 +                               }
950 +                               if(!haltstate) write_tc_c0_tchalt(0);
951 +                       }
952 +               }
953 +       }
954 +       else {
955 +               printk ("\n Usage : <t/v><0/1> <Hex Value>\n Example : t0 0x01\n");
956 +       }
957 +
958 +skip:
959 +       /* Re-enable MT and interrupts */
960 +       evpe(mtflags);
961 +       local_irq_restore(flags);
962 +       return (len);
963 +}
964 +
965 +static int __init init_mtsched_proc(void)
966 +{
967 +       extern struct proc_dir_entry *get_mips_proc_dir(void);
968 +       struct proc_dir_entry *mips_proc_dir;
969 +
970 +       if (!cpu_has_mipsmt) {
971 +               printk("mtsched: not a MIPS MT capable processor\n");
972 +               return -ENODEV;
973 +       }
974 +
975 +       mips_proc_dir = get_mips_proc_dir();
976 +
977 +       mtsched_proc = create_proc_entry("mtsched", 0644, mips_proc_dir);
978 +       mtsched_proc->read_proc = proc_read_mtsched;
979 +       mtsched_proc->write_proc = proc_write_mtsched;
980 +
981 +       return 0;
982 +}
983 +
984 +/* Automagically create the entry */
985 +module_init(init_mtsched_proc);
986 --- /dev/null
987 +++ b/arch/mips/kernel/perf_proc.c
988 @@ -0,0 +1,191 @@
989 +/*
990 + * /proc hooks for CPU performance counter support for SMTC kernel
991 + * (and ultimately others)
992 + * Copyright (C) 2006 Mips Technologies, Inc
993 + */
994 +
995 +#include <linux/kernel.h>
996 +
997 +#include <asm/cpu.h>
998 +#include <asm/processor.h>
999 +#include <asm/system.h>
1000 +#include <asm/mipsregs.h>
1001 +#include <asm/uaccess.h>
1002 +#include <linux/proc_fs.h>
1003 +
1004 +/*
1005 + * /proc diagnostic and statistics hooks
1006 + */
1007 +
1008 +
1009 +/* Internal software-extended event counters */
1010 +
1011 +static unsigned long long extencount[4] = {0,0,0,0};
1012 +
1013 +static struct proc_dir_entry *perf_proc;
1014 +
1015 +static int proc_read_perf(char *page, char **start, off_t off,
1016 +                               int count, int *eof, void *data)
1017 +{
1018 +       int totalen = 0;
1019 +       int len;
1020 +
1021 +       len = sprintf(page, "PerfCnt[0].Ctl : 0x%08x\n", read_c0_perfctrl0());
1022 +       totalen += len;
1023 +       page += len;
1024 +       len = sprintf(page, "PerfCnt[0].Cnt : %Lu\n",
1025 +               extencount[0] + (unsigned long long)((unsigned)read_c0_perfcntr0()));
1026 +       totalen += len;
1027 +       page += len;
1028 +       len = sprintf(page, "PerfCnt[1].Ctl : 0x%08x\n", read_c0_perfctrl1());
1029 +       totalen += len;
1030 +       page += len;
1031 +       len = sprintf(page, "PerfCnt[1].Cnt : %Lu\n",
1032 +               extencount[1] + (unsigned long long)((unsigned)read_c0_perfcntr1()));
1033 +       totalen += len;
1034 +       page += len;
1035 +       len = sprintf(page, "PerfCnt[2].Ctl : 0x%08x\n", read_c0_perfctrl2());
1036 +       totalen += len;
1037 +       page += len;
1038 +       len = sprintf(page, "PerfCnt[2].Cnt : %Lu\n",
1039 +               extencount[2] + (unsigned long long)((unsigned)read_c0_perfcntr2()));
1040 +       totalen += len;
1041 +       page += len;
1042 +       len = sprintf(page, "PerfCnt[3].Ctl : 0x%08x\n", read_c0_perfctrl3());
1043 +       totalen += len;
1044 +       page += len;
1045 +       len = sprintf(page, "PerfCnt[3].Cnt : %Lu\n",
1046 +               extencount[3] + (unsigned long long)((unsigned)read_c0_perfcntr3()));
1047 +       totalen += len;
1048 +       page += len;
1049 +
1050 +       return totalen;
1051 +}
1052 +
1053 +/*
1054 + * Write to perf counter registers based on text input
1055 + */
1056 +
1057 +#define TXTBUFSZ 100
1058 +
1059 +static int proc_write_perf(struct file *file, const char *buffer,
1060 +                               unsigned long count, void *data)
1061 +{
1062 +       int len;
1063 +       int nparsed;
1064 +       int index;
1065 +       char mybuf[TXTBUFSZ];
1066 +
1067 +       int which[4];
1068 +       unsigned long control[4];
1069 +       long long ctrdata[4];
1070 +
1071 +       if(count >= TXTBUFSZ) len = TXTBUFSZ-1;
1072 +       else len = count;
1073 +       memset(mybuf,0,TXTBUFSZ);
1074 +       if(copy_from_user(mybuf, buffer, len)) return -EFAULT;
1075 +
1076 +       nparsed = sscanf(mybuf,
1077 +                       "%d %lx %Ld %d %lx %Ld %d %lx %Ld %d %lx %Ld",
1078 +                               &which[0], &control[0], &ctrdata[0],
1079 +                               &which[1], &control[1], &ctrdata[1],
1080 +                               &which[2], &control[2], &ctrdata[2],
1081 +                               &which[3], &control[3], &ctrdata[3]);
1082 +
1083 +       for(index = 0; nparsed >= 3; index++) {
1084 +               switch (which[index]) {
1085 +               case 0:
1086 +                       write_c0_perfctrl0(control[index]);
1087 +                       if(ctrdata[index] != -1) {
1088 +                           extencount[0] = (unsigned long long)ctrdata[index];
1089 +                           write_c0_perfcntr0((unsigned long)0);
1090 +                       }
1091 +                       break;
1092 +               case 1:
1093 +                       write_c0_perfctrl1(control[index]);
1094 +                       if(ctrdata[index] != -1) {
1095 +                           extencount[1] = (unsigned long long)ctrdata[index];
1096 +                           write_c0_perfcntr1((unsigned long)0);
1097 +                       }
1098 +                       break;
1099 +               case 2:
1100 +                       write_c0_perfctrl2(control[index]);
1101 +                       if(ctrdata[index] != -1) {
1102 +                           extencount[2] = (unsigned long long)ctrdata[index];
1103 +                           write_c0_perfcntr2((unsigned long)0);
1104 +                       }
1105 +                       break;
1106 +               case 3:
1107 +                       write_c0_perfctrl3(control[index]);
1108 +                       if(ctrdata[index] != -1) {
1109 +                           extencount[3] = (unsigned long long)ctrdata[index];
1110 +                           write_c0_perfcntr3((unsigned long)0);
1111 +                       }
1112 +                       break;
1113 +               }
1114 +               nparsed -= 3;
1115 +       }
1116 +       return (len);
1117 +}
1118 +
1119 +extern int (*perf_irq)(void);
1120 +
1121 +/*
1122 + * Invoked when timer interrupt vector picks up a perf counter overflow
1123 + */
1124 +
1125 +static int perf_proc_irq(void)
1126 +{
1127 +       unsigned long snapshot;
1128 +
1129 +       /*
1130 +        * It would be nice to do this as a loop, but we don't have
1131 +        * indirect access to CP0 registers.
1132 +        */
1133 +       snapshot = read_c0_perfcntr0();
1134 +       if ((long)snapshot < 0) {
1135 +               extencount[0] +=
1136 +                       (unsigned long long)((unsigned)read_c0_perfcntr0());
1137 +               write_c0_perfcntr0(0);
1138 +       }
1139 +       snapshot = read_c0_perfcntr1();
1140 +       if ((long)snapshot < 0) {
1141 +               extencount[1] +=
1142 +                       (unsigned long long)((unsigned)read_c0_perfcntr1());
1143 +               write_c0_perfcntr1(0);
1144 +       }
1145 +       snapshot = read_c0_perfcntr2();
1146 +       if ((long)snapshot < 0) {
1147 +               extencount[2] +=
1148 +                       (unsigned long long)((unsigned)read_c0_perfcntr2());
1149 +               write_c0_perfcntr2(0);
1150 +       }
1151 +       snapshot = read_c0_perfcntr3();
1152 +       if ((long)snapshot < 0) {
1153 +               extencount[3] +=
1154 +                       (unsigned long long)((unsigned)read_c0_perfcntr3());
1155 +               write_c0_perfcntr3(0);
1156 +       }
1157 +       return 0;
1158 +}
1159 +
1160 +static int __init init_perf_proc(void)
1161 +{
1162 +       extern struct proc_dir_entry *get_mips_proc_dir(void);
1163 +
1164 +       struct proc_dir_entry *mips_proc_dir = get_mips_proc_dir();
1165 +
1166 +       write_c0_perfcntr0(0);
1167 +       write_c0_perfcntr1(0);
1168 +       write_c0_perfcntr2(0);
1169 +       write_c0_perfcntr3(0);
1170 +       perf_proc = create_proc_entry("perf", 0644, mips_proc_dir);
1171 +       perf_proc->read_proc = proc_read_perf;
1172 +       perf_proc->write_proc = proc_write_perf;
1173 +       perf_irq = perf_proc_irq;
1174 +
1175 +       return 0;
1176 +}
1177 +
1178 +/* Automagically create the entry */
1179 +module_init(init_perf_proc);