[coldfire] use the proper serial device, change memory layout
[openwrt.git] / target / linux / coldfire / patches / 008-mcfv4e_coldfire_code.patch
1 From 70b2bd01829b38a1a79caeda05d436b2e5fecf82 Mon Sep 17 00:00:00 2001
2 From: Kurt Mahan <kmahan@freescale.com>
3 Date: Wed, 31 Oct 2007 17:00:18 -0600
4 Subject: [PATCH] Core Coldfire/MCF5445x specific code.
5
6 LTIBName: mcfv4e-coldfire-code
7 Signed-off-by: Kurt Mahan <kmahan@freescale.com>
8 ---
9  arch/m68k/coldfire/Makefile       |   11 +
10  arch/m68k/coldfire/cache.c        |  215 +++++++++
11  arch/m68k/coldfire/config.c       |  420 ++++++++++++++++++
12  arch/m68k/coldfire/entry.S        |  701 ++++++++++++++++++++++++++++++
13  arch/m68k/coldfire/head.S         |  474 ++++++++++++++++++++
14  arch/m68k/coldfire/ints.c         |  384 ++++++++++++++++
15  arch/m68k/coldfire/iomap.c        |   54 +++
16  arch/m68k/coldfire/mcf5445x-pci.c |  427 ++++++++++++++++++
17  arch/m68k/coldfire/muldi3.S       |   64 +++
18  arch/m68k/coldfire/pci.c          |  245 +++++++++++
19  arch/m68k/coldfire/signal.c       |  868 +++++++++++++++++++++++++++++++++++++
20  arch/m68k/coldfire/traps.c        |  454 +++++++++++++++++++
21  arch/m68k/coldfire/vmlinux-cf.lds |   92 ++++
22  13 files changed, 4409 insertions(+), 0 deletions(-)
23  create mode 100644 arch/m68k/coldfire/Makefile
24  create mode 100644 arch/m68k/coldfire/cache.c
25  create mode 100644 arch/m68k/coldfire/config.c
26  create mode 100644 arch/m68k/coldfire/entry.S
27  create mode 100644 arch/m68k/coldfire/head.S
28  create mode 100644 arch/m68k/coldfire/ints.c
29  create mode 100644 arch/m68k/coldfire/iomap.c
30  create mode 100644 arch/m68k/coldfire/mcf5445x-pci.c
31  create mode 100644 arch/m68k/coldfire/muldi3.S
32  create mode 100644 arch/m68k/coldfire/pci.c
33  create mode 100644 arch/m68k/coldfire/signal.c
34  create mode 100644 arch/m68k/coldfire/traps.c
35  create mode 100644 arch/m68k/coldfire/vmlinux-cf.lds
36
37 --- /dev/null
38 +++ b/arch/m68k/coldfire/Makefile
39 @@ -0,0 +1,11 @@
40 +#
41 +# Makefile for Linux arch/m68k/coldfire source directory
42 +#
43 +
44 +obj-y:= entry.o config.o cache.o signal.o muldi3.o traps.o ints.o
45 +
46 +ifneq ($(strip $(CONFIG_USB) $(CONFIG_USB_GADGET_MCF5445X)),)
47 +       obj-y   += usb.o usb/
48 +endif
49 +
50 +obj-$(CONFIG_PCI)      += pci.o mcf5445x-pci.o iomap.o
51 --- /dev/null
52 +++ b/arch/m68k/coldfire/cache.c
53 @@ -0,0 +1,215 @@
54 +/*
55 + *  linux/arch/m68k/coldifre/cache.c
56 + *
57 + *  Matt Waddel Matt.Waddel@freescale.com
58 + *  Copyright Freescale Semiconductor, Inc. 2007
59 + *
60 + *  This program is free software; you can redistribute it and/or modify
61 + *  it under the terms of the GNU General Public License as published by
62 + *  the Free Software Foundation; either version 2 of the License, or
63 + *  (at your option) any later version.
64 + */
65 +
66 +#include <linux/interrupt.h>
67 +#include <asm/cfcache.h>
68 +#include <asm/coldfire.h>
69 +#include <asm/system.h>
70 +
71 +#define _DCACHE_SIZE (2*16384)
72 +#define _ICACHE_SIZE (2*16384)
73 +
74 +#define _SET_SHIFT 4
75 +
76 +/*
77 + * Masks for cache sizes.  Programming note: because the set size is a
78 + * power of two, the mask is also the last address in the set.
79 + */
80 +
81 +#define _DCACHE_SET_MASK ((_DCACHE_SIZE/64-1)<<_SET_SHIFT)
82 +#define _ICACHE_SET_MASK ((_ICACHE_SIZE/64-1)<<_SET_SHIFT)
83 +#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
84 +#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
85 +
86 +/************************************************************
87 + *  Routine to cleanly flush the cache, pushing all lines and
88 + *  invalidating them.
89 + *
90 + *  The is the flash-resident version, used after copying the .text
91 + *  segment from flash to ram.
92 + *************************************************************/
93 +void FLASHDcacheFlushInvalidate(void)
94 +       __attribute__ ((section (".text_loader")));
95 +
96 +void FLASHDcacheFlushInvalidate()
97 +{
98 +       unsigned long set;
99 +       unsigned long start_set;
100 +       unsigned long end_set;
101 +
102 +       start_set = 0;
103 +       end_set = (unsigned long)LAST_DCACHE_ADDR;
104 +
105 +       for (set = start_set; set < end_set; set += (0x10 - 3))
106 +               asm volatile("cpushl %%dc,(%0)\n"
107 +                            "\taddq%.l #1,%0\n"
108 +                            "\tcpushl %%dc,(%0)\n"
109 +                            "\taddq%.l #1,%0\n"
110 +                            "\tcpushl %%dc,(%0)\n"
111 +                            "\taddq%.l #1,%0\n"
112 +                            "\tcpushl %%dc,(%0)" : : "a" (set));
113 +}
114 +
115 +/************************************************************
116 + *  Routine to cleanly flush the cache, pushing all lines and
117 + *  invalidating them.
118 + *
119 + *************************************************************/
120 +void DcacheFlushInvalidate()
121 +{
122 +       unsigned long set;
123 +       unsigned long start_set;
124 +       unsigned long end_set;
125 +
126 +       start_set = 0;
127 +       end_set = (unsigned long)LAST_DCACHE_ADDR;
128 +
129 +       for (set = start_set; set < end_set; set += (0x10 - 3))
130 +               asm volatile("cpushl %%dc,(%0)\n"
131 +                            "\taddq%.l #1,%0\n"
132 +                            "\tcpushl %%dc,(%0)\n"
133 +                            "\taddq%.l #1,%0\n"
134 +                            "\tcpushl %%dc,(%0)\n"
135 +                            "\taddq%.l #1,%0\n"
136 +                            "\tcpushl %%dc,(%0)" : : "a" (set));
137 +}
138 +
139 +
140 +
141 +/******************************************************************************
142 + *  Routine to cleanly flush the a block of cache, pushing all relevant lines
143 + *  and invalidating them.
144 + *
145 + ******************************************************************************/
146 +void DcacheFlushInvalidateCacheBlock(void *start, unsigned long size)
147 +{
148 +       unsigned long set;
149 +       unsigned long start_set;
150 +       unsigned long end_set;
151 +
152 +       /* if size is bigger than the cache can store
153 +        * set the size to the maximum amount
154 +        */
155 +
156 +       if (size > LAST_DCACHE_ADDR)
157 +               size = LAST_DCACHE_ADDR;
158 +
159 +       start_set = ((unsigned long)start) & _DCACHE_SET_MASK;
160 +       end_set = ((unsigned long)(start+size-1)) & _DCACHE_SET_MASK;
161 +
162 +       if (start_set > end_set) {
163 +               /* from the begining to the lowest address */
164 +               for (set = 0; set <= end_set; set += (0x10 - 3))
165 +                       asm volatile("cpushl %%dc,(%0)\n"
166 +                                    "\taddq%.l #1,%0\n"
167 +                                    "\tcpushl %%dc,(%0)\n"
168 +                                    "\taddq%.l #1,%0\n"
169 +                                    "\tcpushl %%dc,(%0)\n"
170 +                                    "\taddq%.l #1,%0\n"
171 +                                    "\tcpushl %%dc,(%0)" : : "a" (set));
172 +
173 +               /* next loop will finish the cache ie pass the hole */
174 +               end_set = LAST_DCACHE_ADDR;
175 +       }
176 +       for (set = start_set; set <= end_set; set += (0x10 - 3))
177 +               asm volatile("cpushl %%dc,(%0)\n"
178 +                            "\taddq%.l #1,%0\n"
179 +                            "\tcpushl %%dc,(%0)\n"
180 +                            "\taddq%.l #1,%0\n"
181 +                            "\tcpushl %%dc,(%0)\n"
182 +                            "\taddq%.l #1,%0\n"
183 +                            "\tcpushl %%dc,(%0)" : : "a" (set));
184 +}
185 +
186 +
187 +void IcacheInvalidateCacheBlock(void *start, unsigned long size)
188 +{
189 +       unsigned long set;
190 +       unsigned long start_set;
191 +       unsigned long end_set;
192 +
193 +       /* if size is bigger than the cache can store
194 +        * set the size to the maximum ammount
195 +        */
196 +
197 +       if (size > LAST_ICACHE_ADDR)
198 +               size = LAST_ICACHE_ADDR;
199 +
200 +       start_set = ((unsigned long)start) & _ICACHE_SET_MASK;
201 +       end_set = ((unsigned long)(start+size-1)) & _ICACHE_SET_MASK;
202 +
203 +       if (start_set > end_set) {
204 +               /* from the begining to the lowest address */
205 +               for (set = 0; set <= end_set; set += (0x10 - 3))
206 +                       asm volatile("cpushl %%ic,(%0)\n"
207 +                                    "\taddq%.l #1,%0\n"
208 +                                    "\tcpushl %%ic,(%0)\n"
209 +                                    "\taddq%.l #1,%0\n"
210 +                                    "\tcpushl %%ic,(%0)\n"
211 +                                    "\taddq%.l #1,%0\n"
212 +                                    "\tcpushl %%ic,(%0)" : : "a" (set));
213 +
214 +               /* next loop will finish the cache ie pass the hole */
215 +               end_set = LAST_ICACHE_ADDR;
216 +       }
217 +       for (set = start_set; set <= end_set; set += (0x10 - 3))
218 +               asm volatile("cpushl %%ic,(%0)\n"
219 +                            "\taddq%.l #1,%0\n"
220 +                            "\tcpushl %%ic,(%0)\n"
221 +                            "\taddq%.l #1,%0\n"
222 +                            "\tcpushl %%ic,(%0)\n"
223 +                            "\taddq%.l #1,%0\n"
224 +                            "\tcpushl %%ic,(%0)" : : "a" (set));
225 +}
226 +
227 +
228 +/********************************************************************
229 + *  Disable the data cache completely
230 + ********************************************************************/
231 +void DcacheDisable(void)
232 +{
233 +       int newValue;
234 +       unsigned long flags;
235 +
236 +       local_save_flags(flags);
237 +       local_irq_disable();
238 +
239 +       DcacheFlushInvalidate();      /* begin by flushing the cache */
240 +       newValue = CACHE_DISABLE_MODE; /* disable it */
241 +       cacr_set(newValue);
242 +       local_irq_restore(flags);
243 +}
244 +
245 +/********************************************************************
246 + *  Unconditionally enable the data cache
247 + ********************************************************************/
248 +void DcacheEnable(void)
249 +{
250 +       cacr_set(CACHE_INITIAL_MODE);
251 +}
252 +
253 +
254 +unsigned long shadow_cacr;
255 +
256 +void cacr_set(unsigned long x)
257 +{
258 +       shadow_cacr = x;
259 +
260 +       __asm__ __volatile__ ("movec %0, %%cacr"
261 +                             : /* no outputs */
262 +                             : "r" (shadow_cacr));
263 +}
264 +
265 +unsigned long cacr_get(void)
266 +{
267 +       return shadow_cacr;
268 +}
269 --- /dev/null
270 +++ b/arch/m68k/coldfire/config.c
271 @@ -0,0 +1,420 @@
272 +/*
273 + *  linux/arch/m68k/coldifre/config.c
274 + *
275 + *  Matt Waddel Matt.Waddel@freescale.com
276 + *  Copyright Freescale Semiconductor, Inc. 2007
277 + *
278 + *  This program is free software; you can redistribute it and/or modify
279 + *  it under the terms of the GNU General Public License as published by
280 + *  the Free Software Foundation; either version 2 of the License, or
281 + *  (at your option) any later version.
282 + */
283 +
284 +#include <linux/module.h>
285 +#include <linux/init.h>
286 +#include <linux/string.h>
287 +#include <linux/kernel.h>
288 +#include <linux/console.h>
289 +#include <linux/bootmem.h>
290 +#include <linux/mm.h>
291 +#include <asm/bootinfo.h>
292 +#include <asm/machdep.h>
293 +#include <asm/coldfire.h>
294 +#include <asm/cfcache.h>
295 +#include <asm/bootinfo.h>
296 +#include <asm/io.h>
297 +#include <asm/cfmmu.h>
298 +#include <asm/setup.h>
299 +#include <asm/irq.h>
300 +#include <asm/traps.h>
301 +#include <asm/movs.h>
302 +#include <asm/movs.h>
303 +#include <asm/page.h>
304 +#include <asm/pgalloc.h>
305 +#include <asm/mcf5445x_intc.h>
306 +#include <asm/mcf5445x_sdramc.h>
307 +#include <asm/mcf5445x_fbcs.h>
308 +#include <asm/mcf5445x_dtim.h>
309 +
310 +/* JKM -- testing */
311 +#include <linux/pfn.h>
312 +/* JKM */
313 +
314 +extern int get_irq_list(struct seq_file *p, void *v);
315 +extern char _text, _end;
316 +extern char _etext, _edata, __init_begin, __init_end;
317 +extern struct console mcfrs_console;
318 +extern char m68k_command_line[CL_SIZE];
319 +extern unsigned long availmem;
320 +
321 +static int irq_enable[NR_IRQS];
322 +unsigned long num_pages;
323 +
324 +void coldfire_sort_memrec(void)
325 +{
326 +       int i, j;
327 +
328 +       /* Sort the m68k_memory records by address */
329 +       for (i = 0; i < m68k_num_memory; ++i) {
330 +               for (j = i + 1; j < m68k_num_memory; ++j) {
331 +                       if (m68k_memory[i].addr > m68k_memory[j].addr) {
332 +                               struct mem_info tmp;
333 +                               tmp = m68k_memory[i];
334 +                               m68k_memory[i] = m68k_memory[j];
335 +                               m68k_memory[j] = tmp;
336 +                       }
337 +               }
338 +       }
339 +       /* Trim off discontiguous bits */
340 +       for (i = 1; i < m68k_num_memory; ++i) {
341 +               if ((m68k_memory[i-1].addr + m68k_memory[i-1].size) !=
342 +                       m68k_memory[i].addr) {
343 +                       printk(KERN_DEBUG "m68k_parse_bootinfo: addr gap between \
344 +                               0x%lx & 0x%lx\n",
345 +                               m68k_memory[i-1].addr+m68k_memory[i-1].size,
346 +                               m68k_memory[i].addr);
347 +                       m68k_num_memory = i;
348 +                       break;
349 +               }
350 +       }
351 +}
352 +
353 +int __init uboot_commandline(char *bootargs)
354 +{
355 +       int len = 0, cmd_line_len;
356 +       static struct uboot_record uboot_info;
357 +
358 +       extern unsigned long uboot_info_stk;
359 +
360 +       /* Add 0x80000000 to get post-remapped kernel memory location */
361 +       uboot_info.bd_info = (*(u32 *)(uboot_info_stk)) + 0x80000000;
362 +       uboot_info.initrd_start = (*(u32 *)(uboot_info_stk+4)) + 0x80000000;
363 +       uboot_info.initrd_end = (*(u32 *)(uboot_info_stk+8)) + 0x80000000;
364 +       uboot_info.cmd_line_start = (*(u32 *)(uboot_info_stk+12)) + 0x80000000;
365 +       uboot_info.cmd_line_stop = (*(u32 *)(uboot_info_stk+16)) + 0x80000000;
366 +
367 +       cmd_line_len = uboot_info.cmd_line_stop - uboot_info.cmd_line_start;
368 +       if ((cmd_line_len > 0) && (cmd_line_len < CL_SIZE-1))
369 +               len = (int)strncpy(bootargs, (char *)uboot_info.cmd_line_start,\
370 +                                  cmd_line_len);
371 +
372 +       return len;
373 +}
374 +
375 +/*
376 + * This routine does things not done in the bootloader.
377 + */
378 +#define DEFAULT_COMMAND_LINE "root=/dev/mtdblock1 rw rootfstype=jffs2 ip=none mtdparts=physmap-flash.0:5M(kernel)ro,-(jffs2)"
379 +asmlinkage void __init cf_early_init(void)
380 +{
381 +       struct bi_record *record = (struct bi_record *) &_end;
382 +
383 +       extern char _end;
384 +
385 +       SET_VBR((void *)MCF_RAMBAR1);
386 +
387 +       /* Mask all interrupts */
388 +       MCF_INTC0_IMRL = 0xFFFFFFFF;
389 +       MCF_INTC0_IMRH = 0xFFFFFFFF;
390 +       MCF_INTC1_IMRL = 0xFFFFFFFF;
391 +       MCF_INTC1_IMRH = 0xFFFFFFFF;
392 +
393 +#if defined(CONFIG_NOR_FLASH_BASE)
394 +       MCF_FBCS_CSAR(1) = CONFIG_NOR_FLASH_BASE;
395 +#else
396 +       MCF_FBCS_CSAR(1) = 0x00000000;
397 +#endif
398 +
399 +#if CONFIG_SDRAM_SIZE > (256*1024*1024)
400 +       /* Init optional SDRAM chip select */
401 +       MCF_SDRAMC_SDCS(1) = (256*1024*1024) | 0x1B;
402 +#endif
403 +
404 +       m68k_machtype = MACH_CFMMU;
405 +       m68k_fputype = FPU_CFV4E;
406 +       m68k_mmutype = MMU_CFV4E;
407 +       m68k_cputype = CPU_CFV4E;
408 +
409 +       m68k_num_memory = 0;
410 +       m68k_memory[m68k_num_memory].addr = CONFIG_SDRAM_BASE;
411 +       m68k_memory[m68k_num_memory++].size = CONFIG_SDRAM_SIZE;
412 +
413 +       if (!uboot_commandline(m68k_command_line)) {
414 +#if defined(CONFIG_BOOTPARAM)
415 +               strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE-1);
416 +#else
417 +               strcpy(m68k_command_line, DEFAULT_COMMAND_LINE);
418 +#endif
419 +       }
420 +
421 +
422 +#if defined(CONFIG_BLK_DEV_INITRD)
423 +       /* add initrd image */
424 +       record = (struct bi_record *) ((void *)record + record->size);
425 +       record->tag = BI_RAMDISK;
426 +       record->size =  sizeof(record->tag) + sizeof(record->size)
427 +               + sizeof(record->data[0]) + sizeof(record->data[1]);
428 +#endif
429 +
430 +       /* Mark end of tags. */
431 +       record = (struct bi_record *) ((void *) record + record->size);
432 +       record->tag = 0;
433 +       record->data[0] = 0;
434 +       record->data[1] = 0;
435 +       record->size = sizeof(record->tag) + sizeof(record->size)
436 +               + sizeof(record->data[0]) + sizeof(record->data[1]);
437 +
438 +       /* Invalidate caches via CACR */
439 +       cacr_set(CACHE_DISABLE_MODE);
440 +
441 +       /* Turn on caches via CACR, enable EUSP */
442 +       cacr_set(CACHE_INITIAL_MODE);
443 +}
444 +
445 +void settimericr(unsigned int timer, unsigned int level)
446 +{
447 +       volatile unsigned char *icrp;
448 +       unsigned int icr;
449 +       unsigned char irq;
450 +
451 +       if (timer <= 2) {
452 +               switch (timer) {
453 +               case 2:  irq = 33; icr = MCFSIM_ICR_TIMER2; break;
454 +               default: irq = 32; icr = MCFSIM_ICR_TIMER1; break;
455 +               }
456 +
457 +               icrp = (volatile unsigned char *) (icr);
458 +               *icrp = level;
459 +               coldfire_enable_irq0(irq);
460 +       }
461 +}
462 +
463 +/* Assembler routines */
464 +asmlinkage void buserr(void);
465 +asmlinkage void trap(void);
466 +asmlinkage void system_call(void);
467 +asmlinkage void inthandler(void);
468 +
469 +void __init coldfire_trap_init(void)
470 +{
471 +       int i = 0;
472 +       e_vector *vectors;
473 +
474 +       vectors = (e_vector *)MCF_RAMBAR1;
475 +       /*
476 +        * There is a common trap handler and common interrupt
477 +        * handler that handle almost every vector. We treat
478 +        * the system call and bus error special, they get their
479 +        * own first level handlers.
480 +        */
481 +       for (i = 3; (i <= 23); i++)
482 +               vectors[i] = trap;
483 +       for (i = 33; (i <= 63); i++)
484 +               vectors[i] = trap;
485 +       for (i = 24; (i <= 31); i++)
486 +               vectors[i] = inthandler;
487 +       for (i = 64; (i < 255); i++)
488 +               vectors[i] = inthandler;
489 +
490 +       vectors[255] = 0;
491 +       vectors[2] = buserr;
492 +       vectors[32] = system_call;
493 +}
494 +
495 +void coldfire_tick(void)
496 +{
497 +       /* Reset the ColdFire timer */
498 +       __raw_writeb(MCF_DTIM_DTER_CAP | MCF_DTIM_DTER_REF, MCF_DTIM0_DTER);
499 +}
500 +
501 +void __init coldfire_sched_init(irq_handler_t handler)
502 +{
503 +       unsigned int    mcf_timerlevel = 5;
504 +       unsigned int    mcf_timervector = 64+32;
505 +
506 +       __raw_writew(MCF_DTIM_DTMR_RST_RST, MCF_DTIM0_DTMR);
507 +       __raw_writel(((MCF_BUSCLK / 16) / HZ), MCF_DTIM0_DTRR);
508 +       __raw_writew(MCF_DTIM_DTMR_ORRI | MCF_DTIM_DTMR_CLK_DIV16 |
509 +                    MCF_DTIM_DTMR_FRR  | MCF_DTIM_DTMR_RST_EN, \
510 +                    MCF_DTIM0_DTMR);
511 +
512 +       request_irq(mcf_timervector, handler, SA_INTERRUPT, \
513 +                   "timer", (void *)MCF_DTIM0_DTMR);
514 +
515 +       settimericr(1, mcf_timerlevel);
516 +}
517 +
518 +int timerirqpending(int timer)
519 +{
520 +       unsigned int imr = 0;
521 +
522 +       switch (timer) {
523 +       case 1:  imr = 0x1; break;
524 +       case 2:  imr = 0x2; break;
525 +       default: break;
526 +       }
527 +
528 +       return (getiprh() & imr);
529 +}
530 +
531 +unsigned long coldfire_gettimeoffset(void)
532 +{
533 +       volatile unsigned long trr, tcn, offset;
534 +
535 +       tcn = __raw_readw(MCF_DTIM0_DTCN);
536 +       trr = __raw_readl(MCF_DTIM0_DTRR);
537 +       offset = (tcn * (1000000 / HZ)) / trr;
538 +
539 +       /* Check if we just wrapped the counters and maybe missed a tick */
540 +       if ((offset < (1000000 / HZ / 2)) && timerirqpending(1))
541 +               offset += 1000000 / HZ;
542 +       return offset;
543 +}
544 +
545 +void coldfire_reboot(void)
546 +{
547 +       /* disable interrupts and do a software reset */
548 +       asm("movew #0x2700, %%sr\n\t"
549 +           "moveb #0x80, %%d0\n\t"
550 +           "moveb %%d0, 0xfc0a0000\n\t"
551 +           : : : "%d0");
552 +}
553 +
554 +/* int coldfire_hwclk(int i, struct rtc_time *t)
555 +{
556 +       printk ("Real time clock needs porting.\n");
557 +       return 0;
558 +}*/
559 +
560 +static void coldfire_get_model(char *model)
561 +{
562 +       sprintf(model, "Version 4 ColdFire");
563 +}
564 +
565 +void coldfire_enable_irq(unsigned int vec)
566 +{
567 +       unsigned long flags;
568 +
569 +       vec -= 64;
570 +
571 +       if (((int)vec < 0) || (vec > 63)) {
572 +               printk(KERN_WARNING "enable_irq %d failed\n", vec);
573 +               return;
574 +       }
575 +
576 +       local_irq_save(flags);
577 +       irq_enable[vec]++;
578 +       if (vec < 32)
579 +               MCF_INTC0_IMRL &= ~(1 << vec);
580 +       else
581 +               MCF_INTC0_IMRH &= ~(1 << (vec - 32));
582 +       local_irq_restore(flags);
583 +}
584 +
585 +void coldfire_disable_irq(unsigned int vec)
586 +{
587 +       unsigned long flags;
588 +
589 +       vec -= 64;
590 +
591 +       if (((int)vec < 0) || (vec > 63)) {
592 +               printk(KERN_WARNING "disable_irq %d failed\n", vec);
593 +               return;
594 +       }
595 +
596 +       local_irq_save(flags);
597 +       if (--irq_enable[vec] == 0) {
598 +               if (vec < 32)
599 +                       MCF_INTC0_IMRL |= (1 << vec);
600 +               else
601 +                       MCF_INTC0_IMRH |= (1 << (vec - 32));
602 +
603 +       }
604 +       local_irq_restore(flags);
605 +}
606 +
607 +static void __init
608 +coldfire_bootmem_alloc(unsigned long memory_start, unsigned long memory_end)
609 +{
610 +       unsigned long base_pfn;
611 +
612 +       /* compute total pages in system */
613 +       num_pages = PAGE_ALIGN(memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
614 +
615 +       /* align start/end to page boundries */
616 +       memory_start = PAGE_ALIGN(memory_start);
617 +       memory_end = memory_end & PAGE_MASK;
618 +
619 +       /* page numbers */
620 +       base_pfn = __pa(PAGE_OFFSET) >> PAGE_SHIFT;
621 +       min_low_pfn = __pa(memory_start) >> PAGE_SHIFT;
622 +       max_low_pfn = __pa(memory_end) >> PAGE_SHIFT;
623 +
624 +       high_memory = (void *)memory_end;
625 +       availmem = memory_start;
626 +
627 +       /* setup bootmem data */
628 +       m68k_setup_node(0);
629 +       availmem += init_bootmem_node(NODE_DATA(0), min_low_pfn,
630 +               base_pfn, max_low_pfn);
631 +       availmem = PAGE_ALIGN(availmem);
632 +       free_bootmem(__pa(availmem), memory_end - (availmem));
633 +}
634 +
635 +void __init config_coldfire(void)
636 +{
637 +       unsigned long endmem, startmem;
638 +       int i;
639 +
640 +       /*
641 +        * Calculate endmem from m68k_memory, assume all are contiguous
642 +        */
643 +       startmem = ((((int) &_end) + (PAGE_SIZE - 1)) & PAGE_MASK);
644 +       endmem = PAGE_OFFSET;
645 +       for (i = 0; i < m68k_num_memory; ++i)
646 +               endmem += m68k_memory[i].size;
647 +
648 +       printk(KERN_INFO "starting up linux startmem 0x%lx, endmem 0x%lx, \
649 +               size %luMB\n", startmem,  endmem, (endmem - startmem) >> 20);
650 +
651 +       memset(irq_enable, 0, sizeof(irq_enable));
652 +
653 +       /*
654 +        * Setup coldfire mach-specific handlers
655 +        */
656 +       mach_max_dma_address    = 0xffffffff;
657 +       mach_sched_init         = coldfire_sched_init;
658 +       mach_tick               = coldfire_tick;
659 +       mach_gettimeoffset      = coldfire_gettimeoffset;
660 +       mach_reset              = coldfire_reboot;
661 +/*     mach_hwclk              = coldfire_hwclk; to be done */
662 +       mach_get_model          = coldfire_get_model;
663 +
664 +       coldfire_bootmem_alloc(startmem, endmem);
665 +
666 +       /*
667 +        * initrd setup
668 +        */
669 +/* #ifdef CONFIG_BLK_DEV_INITRD
670 +       if (m68k_ramdisk.size)  {
671 +               reserve_bootmem (__pa(m68k_ramdisk.addr), m68k_ramdisk.size);
672 +               initrd_start = (unsigned long) m68k_ramdisk.addr;
673 +               initrd_end = initrd_start + m68k_ramdisk.size;
674 +               printk (KERN_DEBUG "initrd: %08lx - %08lx\n", initrd_start,
675 +                       initrd_end);
676 +       }
677 +#endif */
678 +
679 +#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_FRAMEBUFFER_CONSOLE)
680 +       conswitchp = &dummy_con;
681 +#endif
682 +
683 +#if defined(CONFIG_SERIAL_COLDFIRE)
684 +       /*
685 +        * This causes trouble when it is re-registered later.
686 +        * Currently this is fixed by conditionally commenting
687 +        * out the register_console in mcf_serial.c
688 +        */
689 +       register_console(&mcfrs_console);
690 +#endif
691 +}
692 --- /dev/null
693 +++ b/arch/m68k/coldfire/entry.S
694 @@ -0,0 +1,701 @@
695 +/*
696 + *  arch/m68k/coldfire/entry.S
697 + *
698 + *  Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
699 + *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
700 + *                      Kenneth Albanowski <kjahds@kjahds.com>,
701 + *  Copyright (C) 2000  Lineo Inc. (www.lineo.com)
702 + *  Copyright (C) 2004-2006  Macq Electronique SA. (www.macqel.com)
703 + *  Matt Waddel Matt.Waddel@freescale.com
704 + *  Kurt Mahan kmahan@freescale.com
705 + *  Copyright Freescale Semiconductor, Inc. 2007
706 + *
707 + * Based on:
708 + *
709 + *  arch/m68knommu/platform/5307/entry.S &
710 + *  arch/m68k/kernel/entry.S
711 + *
712 + *  Copyright (C) 1991, 1992  Linus Torvalds
713 + *
714 + * This file is subject to the terms and conditions of the GNU General Public
715 + * License.  See the file README.legal in the main directory of this archive
716 + * for more details.
717 + *
718 + * Linux/m68k support by Hamish Macdonald
719 + *
720 + * ColdFire support by Greg Ungerer (gerg@snapgear.com)
721 + * 5307 fixes by David W. Miller
722 + * linux 2.4 support David McCullough <davidm@snapgear.com>
723 + * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
724 + * Ported to mmu Coldfire by Matt Waddel
725 + */
726 +
727 +#include <linux/sys.h>
728 +#include <linux/linkage.h>
729 +#include <asm/cf_entry.h>
730 +#include <asm/errno.h>
731 +#include <asm/setup.h>
732 +#include <asm/segment.h>
733 +#include <asm/traps.h>
734 +#include <asm/unistd.h>
735 +
736 +/*
737 + * TASK_INFO:
738 + *
739 + *  - TINFO_PREEMPT (struct thread_info / preempt_count)
740 + *      Used to keep track of preemptability
741 + *  - TINFO_FLAGS (struct thread_info / flags - include/asm-m68k/thread_info.h)
742 + *      Various bit flags that are checked for scheduling/tracing
743 + *     Bits 0-7  are checked every exception exit
744 + *          8-15 are checked every syscall exit
745 + *
746 + *      TIF_SIGPENDING         6
747 + *      TIF_NEED_RESCHED       7
748 + *      TIF_DELAYED_TRACE      14
749 + *      TIF_SYSCALL_TRACE      15
750 + *      TIF_MEMDIE             16 (never checked here)
751 + */
752 +
753 +.bss
754 +
755 +sw_ksp:
756 +.long  0
757 +
758 +sw_usp:
759 +.long  0
760 +
761 +.text
762 +
763 +.globl system_call
764 +.globl buserr
765 +.globl trap
766 +.globl resume
767 +.globl ret_from_exception
768 +.globl ret_from_signal
769 +.globl sys_call_table
770 +.globl ret_from_interrupt
771 +.globl inthandler
772 +
773 +ENTRY(buserr)
774 +       SAVE_ALL_INT
775 +       GET_CURRENT(%d0)
776 +       movel   %sp,%sp@-               /* stack frame pointer argument */
777 +       jsr     buserr_c
778 +       addql   #4,%sp
779 +       jra     .Lret_from_exception
780 +
781 +ENTRY(trap)
782 +       SAVE_ALL_INT
783 +       GET_CURRENT(%d0)
784 +       movel   %sp,%sp@-               /* stack frame pointer argument */
785 +       jsr     trap_c
786 +       addql   #4,%sp
787 +       jra     .Lret_from_exception
788 +
789 +       /* After a fork we jump here directly from resume,
790 +          %d1 contains the previous task schedule_tail */
791 +ENTRY(ret_from_fork)
792 +       movel   %d1,%sp@-
793 +       jsr     schedule_tail
794 +       addql   #4,%sp
795 +       jra     .Lret_from_exception
796 +
797 +do_trace_entry:
798 +       movel   #-ENOSYS,%d1            /* needed for strace */
799 +       movel   %d1,%sp@(PT_D0)
800 +       subql   #4,%sp
801 +       SAVE_SWITCH_STACK
802 +       jbsr    syscall_trace
803 +       RESTORE_SWITCH_STACK
804 +       addql   #4,%sp
805 +       movel   %sp@(PT_ORIG_D0),%d0
806 +       cmpl    #NR_syscalls,%d0
807 +       jcs     syscall
808 +badsys:
809 +       movel   #-ENOSYS,%d1
810 +       movel   %d1,%sp@(PT_D0)
811 +       jra     ret_from_exception
812 +
813 +do_trace_exit:
814 +       subql   #4,%sp
815 +       SAVE_SWITCH_STACK
816 +       jbsr    syscall_trace
817 +       RESTORE_SWITCH_STACK
818 +       addql   #4,%sp
819 +       jra     .Lret_from_exception
820 +
821 +ENTRY(ret_from_signal)
822 +       RESTORE_SWITCH_STACK
823 +       addql   #4,%sp
824 +       jra     .Lret_from_exception
825 +
826 +ENTRY(system_call)
827 +       SAVE_ALL_SYS
828 +
829 +       GET_CURRENT(%d1)
830 +       /* save top of frame */
831 +       movel   %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
832 +
833 +       /* syscall trace */
834 +       tstb    %curptr@(TASK_INFO+TINFO_FLAGS+2)
835 +       jmi     do_trace_entry          /* SYSCALL_TRACE is set */
836 +       cmpl    #NR_syscalls,%d0
837 +       jcc     badsys
838 +syscall:
839 +       movel   #sys_call_table,%a0
840 +       asll    #2,%d0
841 +       addl    %d0,%a0
842 +       movel   %a0@,%a0
843 +       jsr     %a0@
844 +       movel   %d0,%sp@(PT_D0)         /* save the return value */
845 +ret_from_syscall:
846 +       movew   %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
847 +       jne     syscall_exit_work       /* flags set so process */
848 +1:     RESTORE_ALL
849 +
850 +syscall_exit_work:
851 +       btst    #5,%sp@(PT_SR)          /* check if returning to kernel */
852 +       bnes    1b                      /* if so, skip resched, signals */
853 +
854 +       btstl   #15,%d0                 /* check if SYSCALL_TRACE */
855 +       jne     do_trace_exit
856 +       btstl   #14,%d0                 /* check if DELAYED_TRACE */
857 +       jne     do_delayed_trace
858 +       btstl   #6,%d0                  /* check if SIGPENDING */
859 +       jne     do_signal_return
860 +       pea     resume_userspace
861 +       jra     schedule
862 +
863 +ENTRY(ret_from_exception)
864 +.Lret_from_exception:
865 +       btst    #5,%sp@(PT_SR)          /* check if returning to kernel */
866 +       bnes    1f                      /* if so, skip resched, signals */
867 +       movel   %d0,%sp@-               /* Only allow interrupts when we are  */
868 +       move    %sr,%d0                 /* last one on the kernel stack,      */
869 +       andl    #ALLOWINT,%d0           /* otherwise stack overflow can occur */
870 +       move    %d0,%sr                 /* during heavy interrupt load.       */
871 +       movel   %sp@+,%d0
872 +
873 +resume_userspace:
874 +       moveb   %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
875 +       jne     exit_work       /* SIGPENDING and/or NEED_RESCHED set */
876 +1:     RESTORE_ALL
877 +
878 +exit_work:
879 +       /* save top of frame */
880 +       movel   %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
881 +       btstl   #6,%d0                  /* check for SIGPENDING in flags */
882 +       jne     do_signal_return
883 +       pea     resume_userspace
884 +       jra     schedule
885 +
886 +do_signal_return:
887 +       subql   #4,%sp                  /* dummy return address */
888 +       SAVE_SWITCH_STACK
889 +       pea     %sp@(SWITCH_STACK_SIZE)
890 +       clrl    %sp@-
891 +       bsrl    do_signal
892 +       addql   #8,%sp
893 +       RESTORE_SWITCH_STACK
894 +       addql   #4,%sp
895 +       jbra    resume_userspace
896 +
897 +do_delayed_trace:
898 +       bclr    #7,%sp@(PT_SR)          /* clear trace bit in SR */
899 +       pea     1                       /* send SIGTRAP */
900 +       movel   %curptr,%sp@-
901 +       pea     LSIGTRAP
902 +       jbsr    send_sig
903 +       addql   #8,%sp
904 +       addql   #4,%sp
905 +       jbra    resume_userspace
906 +
907 +/*
908 + * This is the interrupt handler (for all hardware interrupt
909 + * sources). It figures out the vector number and calls the appropriate
910 + * interrupt service routine directly.
911 + */
912 +ENTRY(inthandler)
913 +       SAVE_ALL_INT
914 +       GET_CURRENT(%d0)
915 +       addql   #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
916 +       /* put exception # in d0 */
917 +       movel   %sp@(PT_VECTOR),%d0
918 +       swap    %d0                     /* extract bits 25:18 */
919 +       lsrl    #2,%d0
920 +       andl    #0x0ff,%d0
921 +
922 +       movel   %sp,%sp@-
923 +       movel   %d0,%sp@-               /* put vector # on stack */
924 +auto_irqhandler_fixup = . + 2
925 +       jbsr    process_int             /* process the IRQ */
926 +       addql   #8,%sp                  /* pop parameters off stack */
927 +
928 +ENTRY(ret_from_interrupt)
929 +ret_from_interrupt:
930 +
931 +       subql   #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
932 +       jeq     ret_from_last_interrupt
933 +2:     RESTORE_ALL
934 +
935 +       ALIGN
936 +ret_from_last_interrupt:
937 +       moveb   %sp@(PT_SR),%d0
938 +       andl    #(~ALLOWINT>>8)&0xff,%d0
939 +       jne     2b
940 +
941 +       /* check if we need to do software interrupts */
942 +       tstl    irq_stat+CPUSTAT_SOFTIRQ_PENDING
943 +       jeq     .Lret_from_exception
944 +       pea     ret_from_exception
945 +       jra     do_softirq
946 +
947 +ENTRY(user_inthandler)
948 +       SAVE_ALL_INT
949 +       GET_CURRENT(%d0)
950 +       addql   #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
951 +       /* put exception # in d0 */
952 +       movel   %sp@(PT_VECTOR),%d0
953 +user_irqvec_fixup = . + 2
954 +       swap    %d0                     /* extract bits 25:18 */
955 +       lsrl    #2,%d0
956 +       andl    #0x0ff,%d0
957 +
958 +       movel   %sp,%sp@-
959 +       movel   %d0,%sp@-               /* put vector # on stack */
960 +user_irqhandler_fixup = . + 2
961 +       jbsr    process_int             /* process the IRQ */
962 +       addql   #8,%sp                  /* pop parameters off stack */
963 +
964 +       subql   #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
965 +       jeq     ret_from_last_interrupt
966 +        RESTORE_ALL
967 +
968 +/* Handler for uninitialized and spurious interrupts */
969 +
970 +ENTRY(bad_inthandler)
971 +       SAVE_ALL_INT
972 +       GET_CURRENT(%d0)
973 +       addql   #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
974 +
975 +       movel   %sp,%sp@-
976 +       jsr     handle_badint
977 +       addql   #4,%sp
978 +
979 +       subql   #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
980 +       jeq     ret_from_last_interrupt
981 +       RESTORE_ALL
982 +
983 +ENTRY(sys_fork)
984 +       SAVE_SWITCH_STACK
985 +       pea     %sp@(SWITCH_STACK_SIZE)
986 +       jbsr    m68k_fork
987 +       addql   #4,%sp
988 +       RESTORE_SWITCH_STACK
989 +       rts
990 +
991 +ENTRY(sys_clone)
992 +       SAVE_SWITCH_STACK
993 +       pea     %sp@(SWITCH_STACK_SIZE)
994 +       jbsr    m68k_clone
995 +       addql   #4,%sp
996 +       RESTORE_SWITCH_STACK
997 +       rts
998 +
999 +ENTRY(sys_vfork)
1000 +       SAVE_SWITCH_STACK
1001 +       pea     %sp@(SWITCH_STACK_SIZE)
1002 +       jbsr    m68k_vfork
1003 +       addql   #4,%sp
1004 +       RESTORE_SWITCH_STACK
1005 +       rts
1006 +
1007 +ENTRY(sys_sigsuspend)
1008 +       SAVE_SWITCH_STACK
1009 +       pea     %sp@(SWITCH_STACK_SIZE)
1010 +       jbsr    do_sigsuspend
1011 +       addql   #4,%sp
1012 +       RESTORE_SWITCH_STACK
1013 +       rts
1014 +
1015 +ENTRY(sys_rt_sigsuspend)
1016 +       SAVE_SWITCH_STACK
1017 +       pea     %sp@(SWITCH_STACK_SIZE)
1018 +       jbsr    do_rt_sigsuspend
1019 +       addql   #4,%sp
1020 +       RESTORE_SWITCH_STACK
1021 +       rts
1022 +
1023 +ENTRY(sys_sigreturn)
1024 +       SAVE_SWITCH_STACK
1025 +       jbsr    do_sigreturn
1026 +       RESTORE_SWITCH_STACK
1027 +       rts
1028 +
1029 +ENTRY(sys_rt_sigreturn)
1030 +       SAVE_SWITCH_STACK
1031 +       jbsr    do_rt_sigreturn
1032 +       RESTORE_SWITCH_STACK
1033 +       rts
1034 +
1035 +resume:
1036 +       /*
1037 +        * Beware - when entering resume, prev (the current task) is
1038 +        * in a0, next (the new task) is in a1,so don't change these
1039 +        * registers until their contents are no longer needed.
1040 +        */
1041 +
1042 +       /* save sr */
1043 +       movew   %sr,%d0
1044 +       movew   %d0,%a0@(TASK_THREAD+THREAD_SR)
1045 +
1046 +       /* save usp */
1047 +       /* Save USP via %a1 (which is saved/restored from %d0) */
1048 +       movel   %a1,%d0
1049 +       movel   %usp,%a1
1050 +       movel   %a1,%a0@(TASK_THREAD+THREAD_USP)
1051 +       movel   %d0,%a1
1052 +
1053 +       /* save non-scratch registers on stack */
1054 +       SAVE_SWITCH_STACK
1055 +
1056 +       /* save current kernel stack pointer */
1057 +       movel   %sp,%a0@(TASK_THREAD+THREAD_KSP)
1058 +
1059 +       /* Return previous task in %d1 */
1060 +       movel   %curptr,%d1
1061 +
1062 +       /* switch to new task (a1 contains new task) */
1063 +       movel   %a1,%curptr
1064 +
1065 +       /* restore the kernel stack pointer */
1066 +       movel   %a1@(TASK_THREAD+THREAD_KSP),%sp
1067 +
1068 +       /* restore non-scratch registers */
1069 +       RESTORE_SWITCH_STACK
1070 +
1071 +       /* restore user stack pointer */
1072 +       movel   %a1@(TASK_THREAD+THREAD_USP),%a0
1073 +       movel   %a0,%usp
1074 +
1075 +       /* restore status register */
1076 +       movew   %a1@(TASK_THREAD+THREAD_SR),%d0
1077 +       movew   %d0,%sr
1078 +
1079 +       rts
1080 +
1081 +.data
1082 +ALIGN
1083 +sys_call_table:
1084 +       .long sys_ni_syscall    /* 0  -  old "setup()" system call*/
1085 +       .long sys_exit
1086 +       .long sys_fork
1087 +       .long sys_read
1088 +       .long sys_write
1089 +       .long sys_open          /* 5 */
1090 +       .long sys_close
1091 +       .long sys_waitpid
1092 +       .long sys_creat
1093 +       .long sys_link
1094 +       .long sys_unlink        /* 10 */
1095 +       .long sys_execve
1096 +       .long sys_chdir
1097 +       .long sys_time
1098 +       .long sys_mknod
1099 +       .long sys_chmod         /* 15 */
1100 +       .long sys_chown16
1101 +       .long sys_ni_syscall                    /* old break syscall holder */
1102 +       .long sys_stat
1103 +       .long sys_lseek
1104 +       .long sys_getpid        /* 20 */
1105 +       .long sys_mount
1106 +       .long sys_oldumount
1107 +       .long sys_setuid16
1108 +       .long sys_getuid16
1109 +       .long sys_stime         /* 25 */
1110 +       .long sys_ptrace
1111 +       .long sys_alarm
1112 +       .long sys_fstat
1113 +       .long sys_pause
1114 +       .long sys_utime         /* 30 */
1115 +       .long sys_ni_syscall                    /* old stty syscall holder */
1116 +       .long sys_ni_syscall                    /* old gtty syscall holder */
1117 +       .long sys_access
1118 +       .long sys_nice
1119 +       .long sys_ni_syscall    /* 35 */        /* old ftime syscall holder */
1120 +       .long sys_sync
1121 +       .long sys_kill
1122 +       .long sys_rename
1123 +       .long sys_mkdir
1124 +       .long sys_rmdir         /* 40 */
1125 +       .long sys_dup
1126 +       .long sys_pipe
1127 +       .long sys_times
1128 +       .long sys_ni_syscall                    /* old prof syscall holder */
1129 +       .long sys_brk           /* 45 */
1130 +       .long sys_setgid16
1131 +       .long sys_getgid16
1132 +       .long sys_signal
1133 +       .long sys_geteuid16
1134 +       .long sys_getegid16     /* 50 */
1135 +       .long sys_acct
1136 +       .long sys_umount                        /* recycled never used phys() */
1137 +       .long sys_ni_syscall                    /* old lock syscall holder */
1138 +       .long sys_ioctl
1139 +       .long sys_fcntl         /* 55 */
1140 +       .long sys_ni_syscall                    /* old mpx syscall holder */
1141 +       .long sys_setpgid
1142 +       .long sys_ni_syscall                    /* old ulimit syscall holder */
1143 +       .long sys_ni_syscall
1144 +       .long sys_umask         /* 60 */
1145 +       .long sys_chroot
1146 +       .long sys_ustat
1147 +       .long sys_dup2
1148 +       .long sys_getppid
1149 +       .long sys_getpgrp       /* 65 */
1150 +       .long sys_setsid
1151 +       .long sys_sigaction
1152 +       .long sys_sgetmask
1153 +       .long sys_ssetmask
1154 +       .long sys_setreuid16    /* 70 */
1155 +       .long sys_setregid16
1156 +       .long sys_sigsuspend
1157 +       .long sys_sigpending
1158 +       .long sys_sethostname
1159 +       .long sys_setrlimit     /* 75 */
1160 +       .long sys_old_getrlimit
1161 +       .long sys_getrusage
1162 +       .long sys_gettimeofday
1163 +       .long sys_settimeofday
1164 +       .long sys_getgroups16   /* 80 */
1165 +       .long sys_setgroups16
1166 +       .long old_select
1167 +       .long sys_symlink
1168 +       .long sys_lstat
1169 +       .long sys_readlink      /* 85 */
1170 +       .long sys_uselib
1171 +       .long sys_swapon
1172 +       .long sys_reboot
1173 +       .long old_readdir
1174 +       .long old_mmap          /* 90 */
1175 +       .long sys_munmap
1176 +       .long sys_truncate
1177 +       .long sys_ftruncate
1178 +       .long sys_fchmod
1179 +       .long sys_fchown16      /* 95 */
1180 +       .long sys_getpriority
1181 +       .long sys_setpriority
1182 +       .long sys_ni_syscall                    /* old profil syscall holder */
1183 +       .long sys_statfs
1184 +       .long sys_fstatfs       /* 100 */
1185 +       .long sys_ni_syscall                    /* ioperm for i386 */
1186 +       .long sys_socketcall
1187 +       .long sys_syslog
1188 +       .long sys_setitimer
1189 +       .long sys_getitimer     /* 105 */
1190 +       .long sys_newstat
1191 +       .long sys_newlstat
1192 +       .long sys_newfstat
1193 +       .long sys_ni_syscall
1194 +       .long sys_ni_syscall    /* 110 */       /* iopl for i386 */
1195 +       .long sys_vhangup
1196 +       .long sys_ni_syscall                    /* obsolete idle() syscall */
1197 +       .long sys_ni_syscall                    /* vm86old for i386 */
1198 +       .long sys_wait4
1199 +       .long sys_swapoff       /* 115 */
1200 +       .long sys_sysinfo
1201 +       .long sys_ipc
1202 +       .long sys_fsync
1203 +       .long sys_sigreturn
1204 +       .long sys_clone         /* 120 */
1205 +       .long sys_setdomainname
1206 +       .long sys_newuname
1207 +       .long sys_cacheflush                    /* modify_ldt for i386 */
1208 +       .long sys_adjtimex
1209 +       .long sys_mprotect      /* 125 */
1210 +       .long sys_sigprocmask
1211 +       .long sys_ni_syscall                    /* old "create_module" */
1212 +       .long sys_init_module
1213 +       .long sys_delete_module
1214 +       .long sys_ni_syscall    /* 130 - old "get_kernel_syms" */
1215 +       .long sys_quotactl
1216 +       .long sys_getpgid
1217 +       .long sys_fchdir
1218 +       .long sys_bdflush
1219 +       .long sys_sysfs         /* 135 */
1220 +       .long sys_personality
1221 +       .long sys_ni_syscall                    /* for afs_syscall */
1222 +       .long sys_setfsuid16
1223 +       .long sys_setfsgid16
1224 +       .long sys_llseek        /* 140 */
1225 +       .long sys_getdents
1226 +       .long sys_select
1227 +       .long sys_flock
1228 +       .long sys_msync
1229 +       .long sys_readv         /* 145 */
1230 +       .long sys_writev
1231 +       .long sys_getsid
1232 +       .long sys_fdatasync
1233 +       .long sys_sysctl
1234 +       .long sys_mlock         /* 150 */
1235 +       .long sys_munlock
1236 +       .long sys_mlockall
1237 +       .long sys_munlockall
1238 +       .long sys_sched_setparam
1239 +       .long sys_sched_getparam        /* 155 */
1240 +       .long sys_sched_setscheduler
1241 +       .long sys_sched_getscheduler
1242 +       .long sys_sched_yield
1243 +       .long sys_sched_get_priority_max
1244 +       .long sys_sched_get_priority_min  /* 160 */
1245 +       .long sys_sched_rr_get_interval
1246 +       .long sys_nanosleep
1247 +       .long sys_mremap
1248 +       .long sys_setresuid16
1249 +       .long sys_getresuid16   /* 165 */
1250 +       .long sys_getpagesize
1251 +       .long sys_ni_syscall                    /* old sys_query_module */
1252 +       .long sys_poll
1253 +       .long sys_nfsservctl
1254 +       .long sys_setresgid16   /* 170 */
1255 +       .long sys_getresgid16
1256 +       .long sys_prctl
1257 +       .long sys_rt_sigreturn
1258 +       .long sys_rt_sigaction
1259 +       .long sys_rt_sigprocmask        /* 175 */
1260 +       .long sys_rt_sigpending
1261 +       .long sys_rt_sigtimedwait
1262 +       .long sys_rt_sigqueueinfo
1263 +       .long sys_rt_sigsuspend
1264 +       .long sys_pread64       /* 180 */
1265 +       .long sys_pwrite64
1266 +       .long sys_lchown16;
1267 +       .long sys_getcwd
1268 +       .long sys_capget
1269 +       .long sys_capset        /* 185 */
1270 +       .long sys_sigaltstack
1271 +       .long sys_sendfile
1272 +       .long sys_ni_syscall                    /* streams1 */
1273 +       .long sys_ni_syscall                    /* streams2 */
1274 +       .long sys_vfork         /* 190 */
1275 +       .long sys_getrlimit
1276 +       .long sys_mmap2
1277 +       .long sys_truncate64
1278 +       .long sys_ftruncate64
1279 +       .long sys_stat64        /* 195 */
1280 +       .long sys_lstat64
1281 +       .long sys_fstat64
1282 +       .long sys_chown
1283 +       .long sys_getuid
1284 +       .long sys_getgid        /* 200 */
1285 +       .long sys_geteuid
1286 +       .long sys_getegid
1287 +       .long sys_setreuid
1288 +       .long sys_setregid
1289 +       .long sys_getgroups     /* 205 */
1290 +       .long sys_setgroups
1291 +       .long sys_fchown
1292 +       .long sys_setresuid
1293 +       .long sys_getresuid
1294 +       .long sys_setresgid     /* 210 */
1295 +       .long sys_getresgid
1296 +       .long sys_lchown
1297 +       .long sys_setuid
1298 +       .long sys_setgid
1299 +       .long sys_setfsuid      /* 215 */
1300 +       .long sys_setfsgid
1301 +       .long sys_pivot_root
1302 +       .long sys_ni_syscall
1303 +       .long sys_ni_syscall
1304 +       .long sys_getdents64    /* 220 */
1305 +       .long sys_gettid
1306 +       .long sys_tkill
1307 +       .long sys_setxattr
1308 +       .long sys_lsetxattr
1309 +       .long sys_fsetxattr     /* 225 */
1310 +       .long sys_getxattr
1311 +       .long sys_lgetxattr
1312 +       .long sys_fgetxattr
1313 +       .long sys_listxattr
1314 +       .long sys_llistxattr    /* 230 */
1315 +       .long sys_flistxattr
1316 +       .long sys_removexattr
1317 +       .long sys_lremovexattr
1318 +       .long sys_fremovexattr
1319 +       .long sys_futex         /* 235 */
1320 +       .long sys_sendfile64
1321 +       .long sys_mincore
1322 +       .long sys_madvise
1323 +       .long sys_fcntl64
1324 +       .long sys_readahead     /* 240 */
1325 +       .long sys_io_setup
1326 +       .long sys_io_destroy
1327 +       .long sys_io_getevents
1328 +       .long sys_io_submit
1329 +       .long sys_io_cancel     /* 245 */
1330 +       .long sys_fadvise64
1331 +       .long sys_exit_group
1332 +       .long sys_lookup_dcookie
1333 +       .long sys_epoll_create
1334 +       .long sys_epoll_ctl     /* 250 */
1335 +       .long sys_epoll_wait
1336 +       .long sys_remap_file_pages
1337 +       .long sys_set_tid_address
1338 +       .long sys_timer_create
1339 +       .long sys_timer_settime /* 255 */
1340 +       .long sys_timer_gettime
1341 +       .long sys_timer_getoverrun
1342 +       .long sys_timer_delete
1343 +       .long sys_clock_settime
1344 +       .long sys_clock_gettime /* 260 */
1345 +       .long sys_clock_getres
1346 +       .long sys_clock_nanosleep
1347 +       .long sys_statfs64
1348 +       .long sys_fstatfs64
1349 +       .long sys_tgkill        /* 265 */
1350 +       .long sys_utimes
1351 +       .long sys_fadvise64_64
1352 +       .long sys_mbind 
1353 +       .long sys_get_mempolicy
1354 +       .long sys_set_mempolicy /* 270 */
1355 +       .long sys_mq_open
1356 +       .long sys_mq_unlink
1357 +       .long sys_mq_timedsend
1358 +       .long sys_mq_timedreceive
1359 +       .long sys_mq_notify     /* 275 */
1360 +       .long sys_mq_getsetattr
1361 +       .long sys_waitid
1362 +       .long sys_ni_syscall                    /* for sys_vserver */
1363 +       .long sys_add_key
1364 +       .long sys_request_key   /* 280 */
1365 +       .long sys_keyctl
1366 +       .long sys_ioprio_set
1367 +       .long sys_ioprio_get
1368 +       .long sys_inotify_init
1369 +       .long sys_inotify_add_watch     /* 285 */
1370 +       .long sys_inotify_rm_watch
1371 +       .long sys_migrate_pages
1372 +       .long sys_openat
1373 +       .long sys_mkdirat
1374 +       .long sys_mknodat               /* 290 */
1375 +       .long sys_fchownat
1376 +       .long sys_futimesat
1377 +       .long sys_fstatat64
1378 +       .long sys_unlinkat
1379 +       .long sys_renameat              /* 295 */
1380 +       .long sys_linkat
1381 +       .long sys_symlinkat
1382 +       .long sys_readlinkat
1383 +       .long sys_fchmodat
1384 +       .long sys_faccessat             /* 300 */
1385 +       .long sys_ni_syscall                    /* Reserved for pselect6 */
1386 +       .long sys_ni_syscall                    /* Reserved for ppoll */
1387 +       .long sys_unshare
1388 +       .long sys_set_robust_list
1389 +       .long sys_get_robust_list       /* 305 */
1390 +       .long sys_splice
1391 +       .long sys_sync_file_range
1392 +       .long sys_tee
1393 +       .long sys_vmsplice
1394 +       .long sys_move_pages            /* 310 */
1395 +
1396 --- /dev/null
1397 +++ b/arch/m68k/coldfire/head.S
1398 @@ -0,0 +1,474 @@
1399 +/*
1400 + *  head.S is the MMU enabled ColdFire specific initial boot code
1401 + *
1402 + *  Ported to ColdFire by
1403 + *  Matt Waddel Matt.Waddel@freescale.com
1404 + *  Kurt Mahan kmahan@freescale.com
1405 + *  Copyright Freescale Semiconductor, Inc. 2007
1406 + *
1407 + *  This program is free software; you can redistribute it and/or modify
1408 + *  it under the terms of the GNU General Public License as published by
1409 + *  the Free Software Foundation; either version 2 of the License, or
1410 + *  (at your option) any later version.
1411 + *
1412 + *  Parts of this code came from arch/m68k/kernel/head.S
1413 + */
1414 +#include <linux/linkage.h>
1415 +#include <linux/init.h>
1416 +#include <asm/bootinfo.h>
1417 +#include <asm/setup.h>
1418 +#include <asm/entry.h>
1419 +#include <asm/pgtable.h>
1420 +#include <asm/page.h>
1421 +#include <asm/coldfire.h>
1422 +#include <asm/mcfuart.h>
1423 +#include <asm/cfcache.h>
1424 +
1425 +#define DEBUG
1426 +
1427 +.globl kernel_pg_dir
1428 +.globl availmem
1429 +.globl set_context
1430 +.globl set_fpga
1431 +
1432 +#ifdef DEBUG
1433 +/* When debugging use readable names for labels */
1434 +#ifdef __STDC__
1435 +#define L(name) .head.S.##name
1436 +#else
1437 +#define L(name) .head.S./**/name
1438 +#endif
1439 +#else
1440 +#ifdef __STDC__
1441 +#define L(name) .L##name
1442 +#else
1443 +#define L(name) .L/**/name
1444 +#endif
1445 +#endif
1446 +
1447 +/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
1448 +#ifndef __INITDATA
1449 +#define __INITDATA     .data
1450 +#define __FINIT                .previous
1451 +#endif
1452 +
1453 +/*
1454 + * Setup ACR mappings to provide the following memory map:
1455 + *   Data
1456 + *     0xA0000000 -> 0xAFFFFFFF [0] NO CACHE / PRECISE / SUPER ONLY
1457 + *     0xFC000000 -> 0xFCFFFFFF [1] NO CACHE / PRECISE / SUPER ONLY
1458 + *   Code
1459 + *     None currently (mapped via TLBs)
1460 + */
1461 +
1462 +#define ACR0_DEFAULT   #0xA00FA048   /* ACR0 default value */
1463 +#define ACR1_DEFAULT   #0xFC00A040   /* ACR1 default value */
1464 +#define ACR2_DEFAULT   #0x00000000   /* ACR2 default value */
1465 +#define ACR3_DEFAULT   #0x00000000   /* ACR3 default value */
1466 +
1467 +/* ACR mapping for FPGA (maps 0) */
1468 +#define ACR0_FPGA      #0x000FA048   /* ACR0 enable FPGA */
1469 +
1470 +/* Several macros to make the writing of subroutines easier:
1471 + * - func_start marks the beginning of the routine which setups the frame
1472 + *   register and saves the registers, it also defines another macro
1473 + *   to automatically restore the registers again.
1474 + * - func_return marks the end of the routine and simply calls the prepared
1475 + *   macro to restore registers and jump back to the caller.
1476 + * - func_define generates another macro to automatically put arguments
1477 + *   onto the stack call the subroutine and cleanup the stack again.
1478 + */
1479 +
1480 +.macro load_symbol_address     symbol,register
1481 +       movel   #\symbol,\register
1482 +.endm
1483 +       
1484 +.macro func_start      name,saveregs,savesize,stack=0
1485 +L(\name):
1486 +       linkw   %a6,#-\stack
1487 +       subal   #(\savesize),%sp
1488 +       moveml  \saveregs,%sp@
1489 +.set   stackstart,-\stack
1490 +
1491 +.macro func_return_\name
1492 +       moveml  %sp@,\saveregs
1493 +       addal   #(\savesize),%sp
1494 +       unlk    %a6
1495 +       rts
1496 +.endm
1497 +.endm
1498 +
1499 +.macro func_return     name
1500 +       func_return_\name
1501 +.endm
1502 +
1503 +.macro func_call       name
1504 +       jbsr    L(\name)
1505 +.endm
1506 +
1507 +.macro move_stack      nr,arg1,arg2,arg3,arg4
1508 +.if    \nr
1509 +       move_stack      "(\nr-1)",\arg2,\arg3,\arg4
1510 +       movel   \arg1,%sp@-
1511 +.endif
1512 +.endm
1513 +
1514 +.macro func_define     name,nr=0
1515 +.macro \name   arg1,arg2,arg3,arg4
1516 +       move_stack      \nr,\arg1,\arg2,\arg3,\arg4
1517 +       func_call       \name
1518 +.if    \nr
1519 +       lea     %sp@(\nr*4),%sp
1520 +.endif
1521 +.endm
1522 +.endm
1523 +
1524 +func_define    serial_putc,1
1525 +
1526 +.macro putc    ch
1527 +       pea     \ch
1528 +       func_call       serial_putc
1529 +       addql   #4,%sp
1530 +.endm
1531 +
1532 +.macro dputc   ch
1533 +#ifdef DEBUG
1534 +       putc    \ch
1535 +#endif
1536 +.endm
1537 +
1538 +func_define    putn,1
1539 +
1540 +.macro dputn   nr
1541 +#ifdef DEBUG
1542 +       putn    \nr
1543 +#endif
1544 +.endm
1545 +
1546 +/*
1547 +       mmu_map  -  creates a new TLB entry
1548 +
1549 +       virt_addr      Must be on proper boundary
1550 +       phys_addr      Must be on proper boundary
1551 +       itlb           MMUOR_ITLB if instruction TLB or 0
1552 +       asid           address space ID
1553 +       shared_global  MMUTR_SG if shared between different ASIDs or 0
1554 +       size_code      MMUDR_SZ1M  1 MB
1555 +                      MMUDR_SZ4K  4 KB
1556 +                      MMUDR_SZ8K  8 KB
1557 +                      MMUDR_SZ16M 16 MB
1558 +       cache_mode     MMUDR_INC   instruction non-cacheable
1559 +                       MMUDR_IC    instruction cacheable
1560 +                       MMUDR_DWT   data writethrough
1561 +                      MMUDR_DCB   data copyback
1562 +                      MMUDR_DNCP  data non-cacheable, precise
1563 +                      MMUDR_DNCIP data non-cacheable, imprecise
1564 +       super_prot     MMUDR_SP if user mode generates exception or 0
1565 +       readable       MMUDR_R if permits read access (data TLB) or 0
1566 +       writable       MMUDR_W if permits write access (data TLB) or 0
1567 +       executable     MMUDR_X if permits execute access (instruction TLB) or 0
1568 +       locked         MMUDR_LK prevents TLB entry from being replaced or 0
1569 +       temp_data_reg  a data register to use for temporary values
1570 +*/
1571 +.macro mmu_map virt_addr,phys_addr,itlb,asid,shared_global,size_code,cache_mode,super_prot,readable,writable,executable,locked,temp_data_reg
1572 +       /* Set up search of TLB. */
1573 +       movel   #(\virt_addr+1), \temp_data_reg
1574 +       movel   \temp_data_reg, MMUAR
1575 +       /* Search.  */
1576 +       movel   #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
1577 +       movew   \temp_data_reg, (MMUOR)
1578 +       /* Set up tag value.  */
1579 +       movel   #(\virt_addr + \asid + \shared_global + MMUTR_V), \temp_data_reg
1580 +       movel   \temp_data_reg, MMUTR
1581 +       /* Set up data value.  */
1582 +       movel   #(\phys_addr + \size_code + \cache_mode + \super_prot + \readable + \writable + \executable + \locked), \temp_data_reg
1583 +       movel   \temp_data_reg, MMUDR
1584 +       /* Save it.  */
1585 +       movel   #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
1586 +       movew   \temp_data_reg, (MMUOR)
1587 +.endm  /* mmu_map */
1588 +
1589 +.macro mmu_unmap       virt_addr,itlb,temp_data_reg
1590 +       /* Set up search of TLB. */
1591 +       movel   #(\virt_addr+1), \temp_data_reg
1592 +       movel   \temp_data_reg, MMUAR
1593 +       /* Search.  */
1594 +       movel   #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
1595 +       movew   \temp_data_reg, (MMUOR)
1596 +       /* Test for hit.  */
1597 +       movel   MMUSR,\temp_data_reg
1598 +       btst    #MMUSR_HITN,\temp_data_reg
1599 +       beq     1f
1600 +       /* Read the TLB.  */
1601 +       movel   #(MMUOR_RW + MMUOR_ACC +\itlb), \temp_data_reg
1602 +       movew   \temp_data_reg, (MMUOR)
1603 +       movel   MMUSR,\temp_data_reg
1604 +       /* Set up tag value.  */
1605 +       movel   #0, \temp_data_reg
1606 +       movel   \temp_data_reg, MMUTR
1607 +       /* Set up data value.  */
1608 +       movel   #0, \temp_data_reg
1609 +       movel   \temp_data_reg, MMUDR
1610 +       /* Save it.  */
1611 +       movel   #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
1612 +       movew   \temp_data_reg, (MMUOR)
1613 +1:     
1614 +.endm  /* mmu_unmap */
1615 +
1616 +/* .text */
1617 +.section ".text.head","ax"
1618 +ENTRY(_stext)
1619 +/* Version numbers of the bootinfo interface -- if we later pass info
1620 + * from boot ROM we might want to put something real here.
1621 + *
1622 + * The area from _stext to _start will later be used as kernel pointer table
1623 + */
1624 +       bras    1f      /* Jump over bootinfo version numbers */
1625 +
1626 +       .long   BOOTINFOV_MAGIC
1627 +       .long   0
1628 +1:     jmp     __start-0x80000000
1629 +
1630 +.equ   kernel_pg_dir,_stext
1631 +.equ   .,_stext+0x1000
1632 +
1633 +ENTRY(_start)
1634 +       jra     __start
1635 +__INIT
1636 +ENTRY(__start)
1637 +
1638 +/* Save the location of u-boot info - cmd line, bd_info, etc. */
1639 +       movel   %a7,%a4         /* Don't use %a4 before cf_early_init */
1640 +       addl    #0x80000004,%a4 /* 0x80000004= 1 stack push + high mem offset */
1641 +
1642 +/* Setup initial stack pointer */
1643 +       movel   #0x40001000,%sp 
1644 +
1645 +/* Clear usp */
1646 +       subl    %a0,%a0
1647 +       movel   %a0,%usp
1648 +
1649 +       movel  #(MCF_RAMBAR1 + 0x221), %d0
1650 +       movec   %d0, %rambar1
1651 +       movew   #0x2700,%sr
1652 +
1653 +       movel   #(MMU_BASE+1),%d0
1654 +       movecl  %d0,%mmubar
1655 +       movel   #MMUOR_CA,%a0                   /* Clear tlb entries */
1656 +       movew   %a0,(MMUOR)
1657 +       movel   #(MMUOR_CA + MMUOR_ITLB),%a0    /* Use ITLB for searches */
1658 +       movew   %a0,(MMUOR)
1659 +       movel   #0,%a0                          /* Clear Addr Space User ID */
1660 +       movecl  %a0,%asid 
1661 +
1662 +/* setup ACRs */
1663 +       movel   ACR0_DEFAULT, %d0               /* ACR0 (DATA) setup */
1664 +       movec   %d0, %acr0
1665 +       movel   ACR1_DEFAULT, %d0               /* ACR1 (DATA) setup */
1666 +       movec   %d0, %acr1
1667 +       movel   ACR2_DEFAULT, %d0               /* ACR2 (CODE) setup */
1668 +       movec   %d0, %acr2
1669 +       movel   ACR3_DEFAULT, %d0               /* ACR3 (CODE) setup */
1670 +       movec   %d0, %acr3
1671 +
1672 +       /* If you change the memory size to another value make a matching 
1673 +          change in paging_init(cf-mmu.c) to zones_size[]. */
1674 +
1675 +       /* Map 256MB as code */
1676 +       mmu_map (PAGE_OFFSET+0*0x1000000),  (PHYS_OFFSET+0*0x1000000), \
1677 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1678 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1679 +       mmu_map (PAGE_OFFSET+1*0x1000000),  (PHYS_OFFSET+1*0x1000000), \
1680 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1681 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1682 +       mmu_map (PAGE_OFFSET+2*0x1000000),  (PHYS_OFFSET+2*0x1000000), \
1683 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1684 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1685 +       mmu_map (PAGE_OFFSET+3*0x1000000),  (PHYS_OFFSET+3*0x1000000), \
1686 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1687 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1688 +       mmu_map (PAGE_OFFSET+4*0x1000000),  (PHYS_OFFSET+4*0x1000000), \
1689 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1690 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1691 +       mmu_map (PAGE_OFFSET+5*0x1000000),  (PHYS_OFFSET+5*0x1000000), \
1692 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1693 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1694 +       mmu_map (PAGE_OFFSET+6*0x1000000),  (PHYS_OFFSET+6*0x1000000), \
1695 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1696 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1697 +       mmu_map (PAGE_OFFSET+7*0x1000000),  (PHYS_OFFSET+7*0x1000000), \
1698 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1699 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1700 +       mmu_map (PAGE_OFFSET+8*0x1000000),  (PHYS_OFFSET+8*0x1000000), \
1701 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1702 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1703 +       mmu_map (PAGE_OFFSET+9*0x1000000),  (PHYS_OFFSET+9*0x1000000), \
1704 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1705 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1706 +       mmu_map (PAGE_OFFSET+10*0x1000000), (PHYS_OFFSET+10*0x1000000), \
1707 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1708 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1709 +       mmu_map (PAGE_OFFSET+11*0x1000000), (PHYS_OFFSET+11*0x1000000), \
1710 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1711 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1712 +       mmu_map (PAGE_OFFSET+12*0x1000000), (PHYS_OFFSET+12*0x1000000), \
1713 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1714 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1715 +       mmu_map (PAGE_OFFSET+13*0x1000000), (PHYS_OFFSET+13*0x1000000), \
1716 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1717 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1718 +       mmu_map (PAGE_OFFSET+14*0x1000000), (PHYS_OFFSET+14*0x1000000), \
1719 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1720 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1721 +       mmu_map (PAGE_OFFSET+15*0x1000000), (PHYS_OFFSET+15*0x1000000), \
1722 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC,  MMUDR_SP, \
1723 +               0, 0, MMUDR_X, MMUDR_LK, %d0
1724 +
1725 +       /* Map 256MB as data also */
1726 +       mmu_map (PAGE_OFFSET+0*0x1000000),  (PHYS_OFFSET+0*0x1000000), 0, 0, \
1727 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1728 +               0, MMUDR_LK, %d0
1729 +       mmu_map (PAGE_OFFSET+1*0x1000000),  (PHYS_OFFSET+1*0x1000000), 0, 0, \
1730 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1731 +               0, MMUDR_LK, %d0
1732 +       mmu_map (PAGE_OFFSET+2*0x1000000),  (PHYS_OFFSET+2*0x1000000), 0, 0, \
1733 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1734 +               0, MMUDR_LK, %d0
1735 +       mmu_map (PAGE_OFFSET+3*0x1000000),  (PHYS_OFFSET+3*0x1000000), 0, 0, \
1736 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1737 +               0, MMUDR_LK, %d0
1738 +       mmu_map (PAGE_OFFSET+4*0x1000000),  (PHYS_OFFSET+4*0x1000000), 0, 0, \
1739 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1740 +               0, MMUDR_LK, %d0
1741 +       mmu_map (PAGE_OFFSET+5*0x1000000),  (PHYS_OFFSET+5*0x1000000), 0, 0, \
1742 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1743 +               0, MMUDR_LK, %d0
1744 +       mmu_map (PAGE_OFFSET+6*0x1000000),  (PHYS_OFFSET+6*0x1000000), 0, 0, \
1745 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1746 +               0, MMUDR_LK, %d0
1747 +       mmu_map (PAGE_OFFSET+7*0x1000000),  (PHYS_OFFSET+7*0x1000000), 0, 0, \
1748 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1749 +               0, MMUDR_LK, %d0
1750 +       mmu_map (PAGE_OFFSET+8*0x1000000),  (PHYS_OFFSET+8*0x1000000), 0, 0, \
1751 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1752 +               0, MMUDR_LK, %d0
1753 +       mmu_map (PAGE_OFFSET+9*0x1000000),  (PHYS_OFFSET+9*0x1000000), 0, 0, \
1754 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1755 +               0, MMUDR_LK, %d0
1756 +       mmu_map (PAGE_OFFSET+10*0x1000000), (PHYS_OFFSET+10*0x1000000), 0, 0, \
1757 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1758 +               0, MMUDR_LK, %d0
1759 +       mmu_map (PAGE_OFFSET+11*0x1000000), (PHYS_OFFSET+11*0x1000000), 0, 0, \
1760 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1761 +               0, MMUDR_LK, %d0
1762 +       mmu_map (PAGE_OFFSET+12*0x1000000), (PHYS_OFFSET+12*0x1000000), 0, 0, \
1763 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1764 +               0, MMUDR_LK, %d0
1765 +       mmu_map (PAGE_OFFSET+13*0x1000000), (PHYS_OFFSET+13*0x1000000), 0, 0, \
1766 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1767 +               0, MMUDR_LK, %d0
1768 +       mmu_map (PAGE_OFFSET+14*0x1000000), (PHYS_OFFSET+14*0x1000000), 0, 0, \
1769 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1770 +               0, MMUDR_LK, %d0
1771 +       mmu_map (PAGE_OFFSET+15*0x1000000), (PHYS_OFFSET+15*0x1000000), 0, 0, \
1772 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1773 +               0, MMUDR_LK, %d0
1774 +
1775 +       /* Do unity mapping to enable the MMU.  Map first 16 MB in place as 
1776 +          code (delete TLBs after MMU is enabled and we are executing in high 
1777 +          memory). */
1778 +       mmu_map (PHYS_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), \
1779 +               MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_INC,  MMUDR_SP, 0, \
1780 +               0, MMUDR_X, 0, %d0
1781 +       /* Map first 16 MB as data too.  */
1782 +       mmu_map (PHYS_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), 0, 0, \
1783 +               MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
1784 +               0, 0, %d0
1785 +
1786 +       /* Turn on MMU */
1787 +       movel   #(MMUCR_EN),%a0
1788 +       movel   %a0,MMUCR
1789 +       nop     /* This synchs the pipeline after a write to MMUCR */
1790 +
1791 +       movel   #__running_high,%a0  /* Get around PC-relative addressing. */
1792 +       jmp     %a0@
1793 +
1794 +ENTRY(__running_high)
1795 +       load_symbol_address _stext,%sp
1796 +       movel   L(memory_start),%a0
1797 +       movel   %a0,availmem
1798 +       load_symbol_address L(phys_kernel_start),%a0
1799 +       load_symbol_address _stext,%a1
1800 +       subl    #_stext,%a1
1801 +       addl    #PAGE_OFFSET,%a1
1802 +       movel   %a1,%a0@
1803 +
1804 +       /* Unmap first 16 MB, code and data.  */
1805 +       mmu_unmap (PHYS_OFFSET+0*0x1000000), MMUOR_ITLB, %d0
1806 +       mmu_unmap (PHYS_OFFSET+0*0x1000000), 0, %d0
1807 +
1808 +/* Setup initial stack pointer */
1809 +       lea     init_task,%a2 
1810 +       lea     init_thread_union+THREAD_SIZE,%sp
1811 +       subl    %a6,%a6         /* clear a6 for gdb */
1812 +
1813 +#ifdef CONFIG_MCF_USER_HALT
1814 +/* Setup debug control reg to allow halts from user space */
1815 +       lea     wdbg_uhe,%a0
1816 +       wdebug  (%a0)
1817 +#endif
1818 +
1819 +       movel   %a4,uboot_info_stk /* save uboot info to variable */
1820 +       jsr     cf_early_init
1821 +       jmp     start_kernel
1822 +
1823 +.section ".text.head","ax"
1824 +set_context:
1825 +func_start     set_context,%d0,(1*4)
1826 +       movel   12(%sp),%d0
1827 +       movec   %d0,%asid
1828 +func_return    set_context
1829 +
1830 +/*
1831 + * set_fpga(addr,val)
1832 + *
1833 + * Map in 0x00000000 -> 0x0fffffff and then do the write.
1834 + */
1835 +set_fpga:
1836 +       movew   %sr,%d1
1837 +       movew   #0x2700,%sr
1838 +       movel   ACR0_FPGA, %d0
1839 +       movec   %d0, %acr0
1840 +       nop
1841 +       moveal  4(%sp),%a0
1842 +       movel   8(%sp),%a0@
1843 +       movel   ACR0_DEFAULT, %d0
1844 +       movec   %d0, %acr0
1845 +       nop
1846 +       movew   %d1,%sr
1847 +       rts
1848 +
1849 +       .data
1850 +       .align  4
1851 +
1852 +availmem:
1853 +       .long   0
1854 +L(phys_kernel_start):
1855 +       .long   PAGE_OFFSET
1856 +L(kernel_end):
1857 +       .long   0
1858 +L(memory_start):
1859 +       .long   PAGE_OFFSET_RAW
1860 +
1861 +#ifdef CONFIG_MCF_USER_HALT
1862 +/*
1863 + * Enable User Halt Enable in the debug control register.
1864 + */
1865 +wdbg_uhe:
1866 +       .word   0x2c80  /* DR0 */
1867 +       .word   0x00b0  /* 31:16 */
1868 +       .word   0x0400  /* 15:0 -- enable UHE */
1869 +       .word   0x0000  /* unused */
1870 +#endif
1871 +
1872 +
1873 --- /dev/null
1874 +++ b/arch/m68k/coldfire/ints.c
1875 @@ -0,0 +1,384 @@
1876 +/*
1877 + * linux/arch/m68k/coldfire/ints.c -- General interrupt handling code
1878 + *
1879 + * Copyright (C) 1999-2002  Greg Ungerer (gerg@snapgear.com)
1880 + * Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
1881 + *                     Kenneth Albanowski <kjahds@kjahds.com>,
1882 + * Copyright (C) 2000  Lineo Inc. (www.lineo.com)
1883 + * Matt Waddel Matt.Waddel@freescale.com
1884 + * Copyright Freescale Semiconductor, Inc. 2007
1885 + * Kurt Mahan kmahan@freescale.com
1886 + *
1887 + * Based on:
1888 + * linux/arch/m68k/kernel/ints.c &
1889 + * linux/arch/m68knommu/5307/ints.c
1890 + *
1891 + * This file is subject to the terms and conditions of the GNU General Public
1892 + * License.  See the file COPYING in the main directory of this archive
1893 + * for more details.
1894 + */
1895 +
1896 +#include <linux/module.h>
1897 +#include <linux/types.h>
1898 +#include <linux/init.h>
1899 +#include <linux/sched.h>
1900 +#include <linux/kernel_stat.h>
1901 +#include <linux/errno.h>
1902 +#include <linux/seq_file.h>
1903 +#include <linux/interrupt.h>
1904 +
1905 +#include <asm/system.h>
1906 +#include <asm/irq.h>
1907 +#include <asm/traps.h>
1908 +#include <asm/page.h>
1909 +#include <asm/machdep.h>
1910 +#include <asm/irq_regs.h>
1911 +
1912 +#include <asm/mcfsim.h>
1913 +
1914 +/*
1915 + * IRQ Handler lists.
1916 + */
1917 +static struct irq_node *irq_list[SYS_IRQS];
1918 +static struct irq_controller *irq_controller[SYS_IRQS];
1919 +static int irq_depth[SYS_IRQS];
1920 +
1921 +/*
1922 + * IRQ Controller
1923 + */
1924 +#ifdef CONFIG_M54455
1925 +void m5445x_irq_enable(unsigned int irq);
1926 +void m5445x_irq_disable(unsigned int irq);
1927 +static struct irq_controller m5445x_irq_controller = {
1928 +       .name           = "M5445X",
1929 +       .lock           = SPIN_LOCK_UNLOCKED,
1930 +       .enable         = m5445x_irq_enable,
1931 +       .disable        = m5445x_irq_disable,
1932 +};
1933 +#endif
1934 +
1935 +#define        POOL_SIZE       SYS_IRQS
1936 +static struct irq_node  pool[POOL_SIZE];
1937 +static struct irq_node *get_irq_node(void);
1938 +
1939 +/* The number of spurious interrupts */
1940 +unsigned int num_spurious;
1941 +asmlinkage void handle_badint(struct pt_regs *regs);
1942 +
1943 +/*
1944 + * void init_IRQ(void)
1945 + *
1946 + * This function should be called during kernel startup to initialize
1947 + * the IRQ handling routines.
1948 + */
1949 +void __init init_IRQ(void)
1950 +{
1951 +       int i;
1952 +
1953 +#ifdef CONFIG_M54455
1954 +       for (i = 0; i < SYS_IRQS; i++)
1955 +               irq_controller[i] = &m5445x_irq_controller;
1956 +#endif
1957 +}
1958 +
1959 +/*
1960 + * process_int(unsigned long vec, struct pt_regs *fp)
1961 + *
1962 + * Process an interrupt.  Called from entry.S.
1963 + */
1964 +asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
1965 +{
1966 +       struct pt_regs *old_regs;
1967 +       struct irq_node *node;
1968 +       old_regs = set_irq_regs(fp);
1969 +       kstat_cpu(0).irqs[vec]++;
1970 +
1971 +       node = irq_list[vec];
1972 +       if (!node)
1973 +               handle_badint(fp);
1974 +       else {
1975 +               do {
1976 +                       node->handler(vec, node->dev_id);
1977 +                       node = node->next;
1978 +               } while (node);
1979 +       }
1980 +
1981 +       set_irq_regs(old_regs);
1982 +}
1983 +
1984 +/*
1985 + * show_interrupts( struct seq_file *p, void *v)
1986 + *
1987 + * Called to show all the current interrupt information.
1988 + */
1989 +int show_interrupts(struct seq_file *p, void *v)
1990 +{
1991 +       struct irq_controller *contr;
1992 +       struct irq_node *node;
1993 +       int i = *(loff_t *) v;
1994 +
1995 +       if ((i < NR_IRQS) && (irq_list[i])) {
1996 +               contr = irq_controller[i];
1997 +               node = irq_list[i];
1998 +               seq_printf(p, "%-8s %3u: %10u %s", contr->name, i,
1999 +                       kstat_cpu(0).irqs[i], node->devname);
2000 +               while ((node = node->next))
2001 +                       seq_printf(p, ", %s", node->devname);
2002 +
2003 +               seq_printf(p, "\n");
2004 +       }
2005 +
2006 +       return 0;
2007 +}
2008 +
2009 +/*
2010 + * get_irq_node(void)
2011 + *
2012 + * Get an irq node from the pool.
2013 + */
2014 +struct irq_node *get_irq_node(void)
2015 +{
2016 +       struct irq_node *p = pool;
2017 +       int i;
2018 +
2019 +       for (i = 0; i < POOL_SIZE; i++, p++) {
2020 +               if (!p->handler) {
2021 +                       memset(p, 0, sizeof(struct irq_node));
2022 +                       return p;
2023 +               }
2024 +       }
2025 +       printk(KERN_INFO "%s(%s:%d): No more irq nodes, I suggest you \
2026 +               increase POOL_SIZE", __FUNCTION__, __FILE__, __LINE__);
2027 +       return NULL;
2028 +}
2029 +
2030 +void init_irq_proc(void)
2031 +{
2032 +       /* Insert /proc/irq driver here */
2033 +}
2034 +
2035 +int setup_irq(unsigned int irq, struct irq_node *node)
2036 +{
2037 +       struct irq_controller *contr;
2038 +       struct irq_node **prev;
2039 +       unsigned long flags;
2040 +
2041 +       if (irq >= NR_IRQS || !irq_controller[irq]) {
2042 +               printk("%s: Incorrect IRQ %d from %s\n",
2043 +                      __FUNCTION__, irq, node->devname);
2044 +               return -ENXIO;
2045 +       }
2046 +
2047 +       contr = irq_controller[irq];
2048 +       spin_lock_irqsave(&contr->lock, flags);
2049 +
2050 +       prev = irq_list + irq;
2051 +       if (*prev) {
2052 +               /* Can't share interrupts unless both agree to */
2053 +               if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
2054 +                       spin_unlock_irqrestore(&contr->lock, flags);
2055 +                       return -EBUSY;
2056 +               }
2057 +               while (*prev)
2058 +                       prev = &(*prev)->next;
2059 +       }
2060 +
2061 +       if (!irq_list[irq]) {
2062 +               if (contr->startup)
2063 +                       contr->startup(irq);
2064 +               else
2065 +                       contr->enable(irq);
2066 +       }
2067 +       node->next = NULL;
2068 +       *prev = node;
2069 +
2070 +       spin_unlock_irqrestore(&contr->lock, flags);
2071 +
2072 +       return 0;
2073 +}
2074 +
2075 +int request_irq(unsigned int irq,
2076 +               irq_handler_t handler,
2077 +               unsigned long flags, const char *devname, void *dev_id)
2078 +{
2079 +       struct irq_node *node = get_irq_node();
2080 +       int res;
2081 +
2082 +       if (!node)
2083 +               return -ENOMEM;
2084 +
2085 +       node->handler = handler;
2086 +       node->flags   = flags;
2087 +       node->dev_id  = dev_id;
2088 +       node->devname = devname;
2089 +
2090 +       res = setup_irq(irq, node);
2091 +       if (res)
2092 +               node->handler = NULL;
2093 +
2094 +       return res;
2095 +}
2096 +EXPORT_SYMBOL(request_irq);
2097 +
2098 +void free_irq(unsigned int irq, void *dev_id)
2099 +{
2100 +       struct irq_controller *contr;
2101 +       struct irq_node **p, *node;
2102 +       unsigned long flags;
2103 +
2104 +       if (irq >= NR_IRQS || !irq_controller[irq]) {
2105 +               printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
2106 +               return;
2107 +       }
2108 +
2109 +       contr = irq_controller[irq];
2110 +       spin_lock_irqsave(&contr->lock, flags);
2111 +
2112 +       p = irq_list + irq;
2113 +       while ((node = *p)) {
2114 +               if (node->dev_id == dev_id)
2115 +                       break;
2116 +               p = &node->next;
2117 +       }
2118 +
2119 +       if (node) {
2120 +               *p = node->next;
2121 +               node->handler = NULL;
2122 +       } else
2123 +               printk(KERN_DEBUG "%s: Removing probably wrong IRQ %d\n",
2124 +                      __FUNCTION__, irq);
2125 +
2126 +       if (!irq_list[irq]) {
2127 +               if (contr->shutdown)
2128 +                       contr->shutdown(irq);
2129 +               else
2130 +                       contr->disable(irq);
2131 +       }
2132 +
2133 +       spin_unlock_irqrestore(&contr->lock, flags);
2134 +}
2135 +EXPORT_SYMBOL(free_irq);
2136 +
2137 +void enable_irq(unsigned int irq)
2138 +{
2139 +       struct irq_controller *contr;
2140 +       unsigned long flags;
2141 +
2142 +       if (irq >= NR_IRQS || !irq_controller[irq]) {
2143 +               printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
2144 +               return;
2145 +       }
2146 +
2147 +       contr = irq_controller[irq];
2148 +       spin_lock_irqsave(&contr->lock, flags);
2149 +       if (irq_depth[irq]) {
2150 +               if (!--irq_depth[irq]) {
2151 +                       if (contr->enable)
2152 +                               contr->enable(irq);
2153 +               }
2154 +       } else
2155 +               WARN_ON(1);
2156 +       spin_unlock_irqrestore(&contr->lock, flags);
2157 +}
2158 +EXPORT_SYMBOL(enable_irq);
2159 +
2160 +void disable_irq(unsigned int irq)
2161 +{
2162 +       struct irq_controller *contr;
2163 +       unsigned long flags;
2164 +
2165 +       if (irq >= NR_IRQS || !irq_controller[irq]) {
2166 +               printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
2167 +               return;
2168 +       }
2169 +
2170 +       contr = irq_controller[irq];
2171 +       spin_lock_irqsave(&contr->lock, flags);
2172 +       if (!irq_depth[irq]++) {
2173 +               if (contr->disable)
2174 +                       contr->disable(irq);
2175 +       }
2176 +       spin_unlock_irqrestore(&contr->lock, flags);
2177 +}
2178 +EXPORT_SYMBOL(disable_irq);
2179 +
2180 +unsigned long probe_irq_on(void)
2181 +{
2182 +       return 0;
2183 +}
2184 +EXPORT_SYMBOL(probe_irq_on);
2185 +
2186 +int probe_irq_off(unsigned long irqs)
2187 +{
2188 +       return 0;
2189 +}
2190 +EXPORT_SYMBOL(probe_irq_off);
2191 +
2192 +asmlinkage void handle_badint(struct pt_regs *regs)
2193 +{
2194 +       kstat_cpu(0).irqs[0]++;
2195 +       num_spurious++;
2196 +       printk(KERN_DEBUG "unexpected interrupt from %u\n", regs->vector);
2197 +}
2198 +EXPORT_SYMBOL(handle_badint);
2199 +
2200 +#ifdef CONFIG_M54455
2201 +/*
2202 + * M5445X Implementation
2203 + */
2204 +void m5445x_irq_enable(unsigned int irq)
2205 +{
2206 +       /* enable the interrupt hardware */
2207 +       if (irq < 64)
2208 +               return;
2209 +
2210 +       /* adjust past non-hardware ints */
2211 +       irq -= 64;
2212 +
2213 +       /* check for eport */
2214 +       if ((irq > 0) && (irq < 8)) {
2215 +               /* enable eport */
2216 +               MCF_EPORT_EPPAR &= ~(3 << (irq*2));     /* level */
2217 +               MCF_EPORT_EPDDR &= ~(1 << irq);         /* input */
2218 +               MCF_EPORT_EPIER |= 1 << irq;            /* irq enabled */
2219 +       }
2220 +
2221 +       if (irq < 64) {
2222 +               /* controller 0 */
2223 +               MCF_INTC0_ICR(irq) = 0x02;
2224 +               MCF_INTC0_CIMR = irq;
2225 +       } else {
2226 +               /* controller 1 */
2227 +               irq -= 64;
2228 +               MCF_INTC1_ICR(irq) = 0x02;
2229 +               MCF_INTC1_CIMR = irq;
2230 +       }
2231 +}
2232 +
2233 +void m5445x_irq_disable(unsigned int irq)
2234 +{
2235 +       /* disable the interrupt hardware */
2236 +       if (irq < 64)
2237 +               return;
2238 +
2239 +       /* adjust past non-hardware ints */
2240 +       irq -= 64;
2241 +
2242 +       /* check for eport */
2243 +       if ((irq > 0) && (irq < 8)) {
2244 +               /* disable eport */
2245 +               MCF_EPORT_EPIER &= ~(1 << irq);
2246 +       }
2247 +
2248 +       if (irq < 64) {
2249 +               /* controller 0 */
2250 +               MCF_INTC0_ICR(irq) = 0x00;
2251 +               MCF_INTC0_SIMR = irq;
2252 +       } else {
2253 +               /* controller 1 */
2254 +               irq -= 64;
2255 +               MCF_INTC1_ICR(irq) = 0x00;
2256 +               MCF_INTC1_SIMR = irq;
2257 +       }
2258 +}
2259 +#endif
2260 --- /dev/null
2261 +++ b/arch/m68k/coldfire/iomap.c
2262 @@ -0,0 +1,54 @@
2263 +/*
2264 + * arch/m68k/coldfire/iomap.c
2265 + *
2266 + * Generic coldfire iomap interface
2267 + *
2268 + * Based on the sh64 iomap.c by Paul Mundt.
2269 + *
2270 + * This file is subject to the terms and conditions of the GNU General Public
2271 + * License.  See the file "COPYING" in the main directory of this archive
2272 + * for more details.
2273 + */
2274 +#include <linux/pci.h>
2275 +#include <asm/io.h>
2276 +
2277 +void __iomem *__attribute__ ((weak))
2278 +ioport_map(unsigned long port, unsigned int len)
2279 +{
2280 +       return (void __iomem *)port;
2281 +}
2282 +EXPORT_SYMBOL(pci_iomap);
2283 +
2284 +void ioport_unmap(void __iomem *addr)
2285 +{
2286 +       /* Nothing .. */
2287 +}
2288 +EXPORT_SYMBOL(pci_iounmap);
2289 +
2290 +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
2291 +{
2292 +       unsigned long start = pci_resource_start(dev, bar);
2293 +       unsigned long len = pci_resource_len(dev, bar);
2294 +       unsigned long flags = pci_resource_flags(dev, bar);
2295 +printk(KERN_INFO "PCI_IOMAP: BAR=%d  START=0x%lx  LEN=0x%lx  FLAGS=0x%lx\n",
2296 +       bar, start, len, flags);
2297 +
2298 +       if (!len)
2299 +               return NULL;
2300 +       if (max && len > max)
2301 +               len = max;
2302 +       if (flags & IORESOURCE_IO)
2303 +               return ioport_map(start, len);
2304 +       if (flags & IORESOURCE_MEM)
2305 +               return (void __iomem *)start;
2306 +
2307 +       /* What? */
2308 +       return NULL;
2309 +}
2310 +EXPORT_SYMBOL(ioport_map);
2311 +
2312 +void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
2313 +{
2314 +       /* Nothing .. */
2315 +}
2316 +EXPORT_SYMBOL(ioport_unmap);
2317 --- /dev/null
2318 +++ b/arch/m68k/coldfire/mcf5445x-pci.c
2319 @@ -0,0 +1,427 @@
2320 +/*
2321 + * arch/m68k/coldfire/mcf5445x-pci.c
2322 + *
2323 + * Coldfire M5445x specific PCI implementation.
2324 + *
2325 + * Copyright (c) 2007 Freescale Semiconductor, Inc.
2326 + *     Kurt Mahan <kmahan@freescale.com>
2327 + */
2328 +
2329 +#include <linux/delay.h>
2330 +#include <linux/pci.h>
2331 +
2332 +#include <asm/mcfsim.h>
2333 +#include <asm/pci.h>
2334 +#include <asm/irq.h>
2335 +
2336 +/*
2337 + * Layout MCF5445x to PCI memory mappings:
2338 + *
2339 + *     WIN         MCF5445x                    PCI            TYPE
2340 + *     ---         --------                    ---            ----
2341 + *     [0] 0xA0000000 -> 0xA7FFFFFF  0xA0000000 -> 0xA7FFFFFF  MEM
2342 + *     [1] 0xA8000000 -> 0xABFFFFFF  0xA8000000 -> 0xABFFFFFF  MEM
2343 + *     [2] 0xAC000000 -> 0xAFFFFFFF  0xAC000000 -> 0xAFFFFFFF  IO
2344 + */
2345 +
2346 +#define MCF5445X_PCI_MEM_BASE          0xA0000000
2347 +#define MCF5445X_PCI_MEM_SIZE          0x0C000000
2348 +
2349 +#define MCF5445X_PCI_CONFIG_BASE       0xAC000000
2350 +#define MCF5445X_PCI_CONFIG_SIZE       0x04000000
2351 +
2352 +#define MCF5445X_PCI_IO_BASE           0xAC000000
2353 +#define MCF5445X_PCI_IO_SIZE           0x04000000
2354 +
2355 +/* PCI Bus memory resource block */
2356 +struct resource pci_iomem_resource = {
2357 +       .name = "PCI memory space",
2358 +       .start = MCF5445X_PCI_MEM_BASE,
2359 +       .flags = IORESOURCE_MEM,
2360 +       .end = MCF5445X_PCI_MEM_BASE + MCF5445X_PCI_MEM_SIZE - 1
2361 +};
2362 +
2363 +/* PCI Bus ioport resource block */
2364 +struct resource pci_ioport_resource = {
2365 +       .name = "PCI I/O space",
2366 +       .start = MCF5445X_PCI_IO_BASE,
2367 +       .flags = IORESOURCE_IO,
2368 +       .end = MCF5445X_PCI_IO_BASE + MCF5445X_PCI_IO_SIZE - 1
2369 +};
2370 +
2371 +/*
2372 + * The M54455EVB multiplexes all the PCI interrupts via
2373 + * the FPGA and routes them to a single interrupt.  The
2374 + * PCI spec requires all PCI interrupt routines be smart
2375 + * enough to sort out their own interrupts.
2376 + * The interrupt source from the FPGA is configured
2377 + * to EPORT 3.
2378 + */
2379 +#define MCF5445X_PCI_IRQ               0x43
2380 +
2381 +#define PCI_SLOTS                      4
2382 +
2383 +/*
2384 + * FPGA Info
2385 + */
2386 +#define FPGA_PCI_IRQ_ENABLE            (u32 *)0x09000000
2387 +#define FPGA_PCI_IRQ_STATUS            (u32 *)0x09000004
2388 +#define FPGA_PCI_IRQ_ROUTE             (u32 *)0x0900000c
2389 +#define FPGA_SEVEN_LED                 (u32 *)0x09000014
2390 +
2391 +extern void set_fpga(u32 *addr, u32 val);
2392 +
2393 +#ifdef DEBUG
2394 +void mcf5445x_pci_dumpregs(void);
2395 +#endif
2396 +
2397 +/*
2398 + * static void mcf5445x_conf_device(struct pci_dev *dev)
2399 + *
2400 + * Machine dependent Configure the given device.
2401 + *
2402 + * Parameters:
2403 + *
2404 + * dev         - the pci device.
2405 + */
2406 +void __init
2407 +mcf5445x_conf_device(struct pci_dev *dev)
2408 +{
2409 +       set_fpga(FPGA_PCI_IRQ_ENABLE, 0x0f);
2410 +}
2411 +
2412 +/*
2413 + * int mcf5445x_pci_config_read(unsigned int seg, unsigned int bus,
2414 + *                             unsigned int devfn, int reg,
2415 + *                             u32 *value)
2416 + *
2417 + * Read from PCI configuration space.
2418 + *
2419 + */
2420 +int mcf5445x_pci_config_read(unsigned int seg, unsigned int bus,
2421 +                            unsigned int devfn, int reg, int len, u32 *value)
2422 +{
2423 +       u32 addr = MCF_PCI_PCICAR_BUSNUM(bus) |
2424 +                  MCF_PCI_PCICAR_DEVNUM(PCI_SLOT(devfn)) |
2425 +                  MCF_PCI_PCICAR_FUNCNUM(PCI_FUNC(devfn)) |
2426 +                  MCF_PCI_PCICAR_DWORD(reg) |
2427 +                  MCF_PCI_PCICAR_E;
2428 +
2429 +       if ((bus > 255) || (devfn > 255) || (reg > 255)) {
2430 +               *value = -1;
2431 +               return -EINVAL;
2432 +       }
2433 +
2434 +       /* setup for config mode */
2435 +       MCF_PCI_PCICAR = addr;
2436 +       __asm__ __volatile__("nop");
2437 +
2438 +       switch (len) {
2439 +       case 1:
2440 +               *value = *(volatile u8 *)(MCF5445X_PCI_CONFIG_BASE+(reg&3));
2441 +               break;
2442 +       case 2:
2443 +               *value = le16_to_cpu(*(volatile u16 *)
2444 +                               (MCF5445X_PCI_CONFIG_BASE + (reg&2)));
2445 +               break;
2446 +       case 4:
2447 +               *value = le32_to_cpu(*(volatile u32 *)
2448 +                               (MCF5445X_PCI_CONFIG_BASE));
2449 +               break;
2450 +       }
2451 +
2452 +       /* clear config mode */
2453 +       MCF_PCI_PCICAR = ~MCF_PCI_PCICAR_E;
2454 +       __asm__ __volatile__("nop");
2455 +
2456 +       return 0;
2457 +}
2458 +
2459 +/*
2460 + * int mcf5445x_pci_config_write(unsigned int seg, unsigned int bus,
2461 + *                              unsigned int devfn, int reg,
2462 + *                              u32 *value)
2463 + *
2464 + * Write to PCI configuration space
2465 + */
2466 +int mcf5445x_pci_config_write(unsigned int seg, unsigned int bus,
2467 +                   unsigned int devfn, int reg, int len, u32 value)
2468 +{
2469 +       u32 addr = MCF_PCI_PCICAR_BUSNUM(bus) |
2470 +                  MCF_PCI_PCICAR_DEVNUM(PCI_SLOT(devfn)) |
2471 +                  MCF_PCI_PCICAR_FUNCNUM(PCI_FUNC(devfn)) |
2472 +                  MCF_PCI_PCICAR_DWORD(reg) |
2473 +                  MCF_PCI_PCICAR_E;
2474 +
2475 +       if ((bus > 255) || (devfn > 255) || (reg > 255))
2476 +               return -EINVAL;
2477 +
2478 +       /* setup for config mode */
2479 +       MCF_PCI_PCICAR = addr;
2480 +       __asm__ __volatile__("nop");
2481 +
2482 +       switch (len) {
2483 +       case 1:
2484 +               *(volatile u8 *)(MCF5445X_PCI_CONFIG_BASE+(reg&3)) = (u8)value;
2485 +               break;
2486 +       case 2:
2487 +               *(volatile u16 *)(MCF5445X_PCI_CONFIG_BASE+(reg&2)) =
2488 +                               cpu_to_le16((u16)value);
2489 +               break;
2490 +       case 4:
2491 +               *(volatile u32 *)(MCF5445X_PCI_CONFIG_BASE) =
2492 +                               cpu_to_le32(value);
2493 +               break;
2494 +       }
2495 +
2496 +       /* clear config mode */
2497 +       MCF_PCI_PCICAR = ~MCF_PCI_PCICAR_E;
2498 +       __asm__ __volatile__("nop");
2499 +
2500 +       return 0;
2501 +}
2502 +
2503 +/* hardware operations */
2504 +static struct pci_raw_ops mcf5445x_pci_ops = {
2505 +       .read =         mcf5445x_pci_config_read,
2506 +       .write =        mcf5445x_pci_config_write,
2507 +};
2508 +
2509 +/*
2510 + * irqreturn_t mcf5445x_pci_interrupt( int irq, void *dev)
2511 + *
2512 + * PCI controller interrupt handler.
2513 + */
2514 +static irqreturn_t
2515 +mcf5445x_pci_interrupt(int irq, void *dev)
2516 +{
2517 +       u32 status = MCF_PCI_PCIGSCR;
2518 +#ifdef DEBUG
2519 +       printk(KERN_INFO "PCI: Controller irq status=0x%08x\n", status);
2520 +#endif
2521 +       /* clear */
2522 +       MCF_PCI_PCIGSCR = status;
2523 +
2524 +       return IRQ_HANDLED;
2525 +}
2526 +
2527 +/*
2528 + * irqreturn_t mcf5445x_pci_arb_interrupt( int irq, void *dev)
2529 + *
2530 + * PCI Arbiter interrupt handler.
2531 + */
2532 +static irqreturn_t
2533 +mcf5445x_pci_arb_interrupt(int irq, void *dev)
2534 +{
2535 +       u32 status = MCF_PCIARB_PASR;
2536 +#ifdef DEBUG
2537 +       printk(KERN_INFO "PCI: Arbiter irq status=0x%08x\n", status);
2538 +#endif
2539 +       /* clear */
2540 +       MCF_PCIARB_PASR = status;
2541 +       return IRQ_HANDLED;
2542 +}
2543 +
2544 +/*
2545 + * struct pci_bus_info *init_mcf5445x_pci(void)
2546 + *
2547 + * Machine specific initialisation:
2548 + *
2549 + * - Allocate and initialise a 'pci_bus_info' structure
2550 + * - Initialize hardware
2551 + *
2552 + * Result: pointer to 'pci_bus_info' structure.
2553 + */
2554 +int __init
2555 +init_mcf5445x_pci(void)
2556 +{
2557 +       /*
2558 +        * Initialize the PCI core
2559 +        */
2560 +
2561 +       /* arbitration controller */
2562 +       MCF_PCIARB_PACR = MCF_PCIARB_PACR_INTMPRI |
2563 +                         MCF_PCIARB_PACR_EXTMPRI(0x0f) |
2564 +                         MCF_PCIARB_PACR_INTMINTEN |
2565 +                         MCF_PCIARB_PACR_EXTMINTEN(0x0f);
2566 +
2567 +       /* pci pin assignment regs */
2568 +       MCF_GPIO_PAR_PCI = MCF_GPIO_PAR_PCI_GNT0 |
2569 +                          MCF_GPIO_PAR_PCI_GNT1 |
2570 +                          MCF_GPIO_PAR_PCI_GNT2 |
2571 +                          MCF_GPIO_PAR_PCI_GNT3_GNT3 |
2572 +                          MCF_GPIO_PAR_PCI_REQ0 |
2573 +                          MCF_GPIO_PAR_PCI_REQ1 |
2574 +                          MCF_GPIO_PAR_PCI_REQ2 |
2575 +                          MCF_GPIO_PAR_PCI_REQ3_REQ3;
2576 +
2577 +       /* target control reg */
2578 +       MCF_PCI_PCITCR = MCF_PCI_PCITCR_P |
2579 +                        MCF_PCI_PCITCR_WCT(8);
2580 +
2581 +       /* PCI MEM address */
2582 +       MCF_PCI_PCIIW0BTAR = 0xA007A000;
2583 +
2584 +       /* PCI MEM address */
2585 +       MCF_PCI_PCIIW1BTAR = 0xA803A800;
2586 +
2587 +       /* PCI IO address */
2588 +       MCF_PCI_PCIIW2BTAR = 0xAC03AC00;
2589 +
2590 +       /* window control */
2591 +       MCF_PCI_PCIIWCR = MCF_PCI_PCIIWCR_WINCTRL0_ENABLE |
2592 +                         MCF_PCI_PCIIWCR_WINCTRL0_MEMREAD |
2593 +                         MCF_PCI_PCIIWCR_WINCTRL1_ENABLE |
2594 +                         MCF_PCI_PCIIWCR_WINCTRL1_MEMREAD |
2595 +                         MCF_PCI_PCIIWCR_WINCTRL2_ENABLE |
2596 +                         MCF_PCI_PCIIWCR_WINCTRL2_IO;
2597 +
2598 +       /* initiator control reg */
2599 +       MCF_PCI_PCIICR = 0x00ff;
2600 +
2601 +       /* type 0 - command */
2602 +       MCF_PCI_PCISCR = MCF_PCI_PCISCR_MW |    /* mem write/inval */
2603 +                        MCF_PCI_PCISCR_B |     /* bus master enable */
2604 +                        MCF_PCI_PCISCR_M;      /* mem access enable */
2605 +
2606 +       /* type 0 - config reg */
2607 +       MCF_PCI_PCICR1 = MCF_PCI_PCICR1_CACHELINESIZE(8) |
2608 +                        MCF_PCI_PCICR1_LATTIMER(0xff);
2609 +
2610 +       /* type 0 - config 2 reg */
2611 +       MCF_PCI_PCICR2 = 0;
2612 +
2613 +       /* target control reg */
2614 +       MCF_PCI_PCITCR2 = MCF_PCI_PCITCR2_B0E |
2615 +                         MCF_PCI_PCITCR2_B4E;
2616 +
2617 +       /* translate addresses from PCI[0] to CF[SDRAM] */
2618 +       MCF_PCI_PCITBATR0 = MCF_RAMBAR1 | MCF_PCI_PCITBATR0_EN;
2619 +       MCF_PCI_PCITBATR4 = MCF_RAMBAR1 | MCF_PCI_PCITBATR4_EN;
2620 +
2621 +       /* setup controller interrupt handlers */
2622 +       if (request_irq(55+128, mcf5445x_pci_interrupt, IRQF_SHARED,
2623 +                       "PCI Controller", NULL))
2624 +               printk(KERN_ERR "PCI: Unable to register controller irq\n");
2625 +
2626 +       if (request_irq (56+128, mcf5445x_pci_arb_interrupt, IRQF_SHARED, "PCI Arbiter", NULL))
2627 +               printk(KERN_ERR "PCI: Unable to register arbiter irq\n");
2628 +
2629 +       /* global control - clear reset bit */
2630 +       MCF_PCI_PCIGSCR = MCF_PCI_PCIGSCR_SEE |
2631 +                         MCF_PCI_PCIGSCR_PEE;
2632 +
2633 +       /* let everything settle */
2634 +       udelay(1000);
2635 +
2636 +       /* allocate bus ioport resource */
2637 +       if (request_resource(&ioport_resource, &pci_ioport_resource) < 0)
2638 +               printk(KERN_ERR "PCI: Unable to alloc ioport resource\n");
2639 +
2640 +       /* allocate bus iomem resource */
2641 +       if (request_resource(&iomem_resource, &pci_iomem_resource) < 0)
2642 +               printk(KERN_ERR "PCI: Unable to alloc iomem resource\n");
2643 +
2644 +       /* setup FPGA to route PCI to IRQ3(67), SW7 to IRQ7, SW6 to IRQ4 */
2645 +       set_fpga(FPGA_PCI_IRQ_ENABLE, 0x00000000);
2646 +       set_fpga(FPGA_PCI_IRQ_ROUTE, 0x00000039);
2647 +       set_fpga(FPGA_SEVEN_LED, 0x000000FF);
2648 +
2649 +       raw_pci_ops = &mcf5445x_pci_ops;
2650 +
2651 +       return 0;
2652 +}
2653 +
2654 +/*
2655 + * DEBUGGING
2656 + */
2657 +
2658 +#ifdef DEBUG
2659 +struct regdump {
2660 +       u32 addr;
2661 +       char regname[16];
2662 +};
2663 +
2664 +struct regdump type0regs[] = {
2665 +       { 0xfc0a8000, "PCIIDR" },
2666 +       { 0xfc0a8004, "PCISCR" },
2667 +       { 0xfc0a8008, "PCICCRIR" },
2668 +       { 0xfc0a800c, "PCICR1" },
2669 +       { 0xfc0a8010, "PCIBAR0" },
2670 +       { 0xfc0a8014, "PCIBAR1" },
2671 +       { 0xfc0a8018, "PCIBAR2" },
2672 +       { 0xfc0a801c, "PCIBAR3" },
2673 +       { 0xfc0a8020, "PCIBAR4" },
2674 +       { 0xfc0a8024, "PCIBAR5" },
2675 +       { 0xfc0a8028, "PCICCPR" },
2676 +       { 0xfc0a802c, "PCISID" },
2677 +       { 0xfc0a8030, "PCIERBAR" },
2678 +       { 0xfc0a8034, "PCICPR" },
2679 +       { 0xfc0a803c, "PCICR2" },
2680 +       { 0, "" }
2681 +};
2682 +
2683 +struct regdump genregs[] = {
2684 +       { 0xfc0a8060, "PCIGSCR" },
2685 +       { 0xfc0a8064, "PCITBATR0" },
2686 +       { 0xfc0a8068, "PCITBATR1" },
2687 +       { 0xfc0a806c, "PCITCR1" },
2688 +       { 0xfc0a8070, "PCIIW0BTAR" },
2689 +       { 0xfc0a8074, "PCIIW1BTAR" },
2690 +       { 0xfc0a8078, "PCIIW2BTAR" },
2691 +       { 0xfc0a8080, "PCIIWCR" },
2692 +       { 0xfc0a8084, "PCIICR" },
2693 +       { 0xfc0a8088, "PCIISR" },
2694 +       { 0xfc0a808c, "PCITCR2" },
2695 +       { 0xfc0a8090, "PCITBATR0" },
2696 +       { 0xfc0a8094, "PCITBATR1" },
2697 +       { 0xfc0a8098, "PCITBATR2" },
2698 +       { 0xfc0a809c, "PCITBATR3" },
2699 +       { 0xfc0a80a0, "PCITBATR4" },
2700 +       { 0xfc0a80a4, "PCITBATR5" },
2701 +       { 0xfc0a80a8, "PCIINTR" },
2702 +       { 0xfc0a80f8, "PCICAR" },
2703 +       { 0, "" }
2704 +};
2705 +
2706 +struct regdump arbregs[] = {
2707 +       { 0xfc0ac000, "PACR" },
2708 +       { 0xfc0ac004, "PASR" }, /* documentation error */
2709 +       { 0, "" }
2710 +};
2711 +
2712 +/*
2713 + * void mcf5445x_pci_dumpregs()
2714 + *
2715 + * Dump out all the PCI registers
2716 + */
2717 +void
2718 +mcf5445x_pci_dumpregs(void)
2719 +{
2720 +       struct regdump *reg;
2721 +
2722 +       printk(KERN_INFO "*** MCF5445x PCI TARGET 0 REGISTERS ***\n");
2723 +
2724 +       reg = type0regs;
2725 +       while (reg->addr) {
2726 +               printk(KERN_INFO "0x%08x  0x%08x  %s\n", reg->addr,
2727 +                       *((u32 *)reg->addr), reg->regname);
2728 +               reg++;
2729 +       }
2730 +
2731 +       printk(KERN_INFO "\n*** MCF5445x PCI GENERAL REGISTERS ***\n");
2732 +       reg = genregs;
2733 +       while (reg->addr) {
2734 +               printk(KERN_INFO "0x%08x  0x%08x  %s\n", reg->addr,
2735 +                       *((u32 *)reg->addr), reg->regname);
2736 +               reg++;
2737 +       }
2738 +       printk(KERN_INFO "\n*** MCF5445x PCI ARBITER REGISTERS ***\n");
2739 +       reg = arbregs;
2740 +       while (reg->addr) {
2741 +               printk(KERN_INFO "0x%08x  0x%08x  %s\n", reg->addr,
2742 +                       *((u32 *)reg->addr), reg->regname);
2743 +               reg++;
2744 +       }
2745 +}
2746 +#endif /* DEBUG */
2747 --- /dev/null
2748 +++ b/arch/m68k/coldfire/muldi3.S
2749 @@ -0,0 +1,64 @@
2750 +/*
2751 + * Coldfire muldi3 assembly verion
2752 + */
2753
2754 +#include <linux/linkage.h>
2755 +.globl __muldi3
2756 +
2757 +ENTRY(__muldi3)
2758 +       linkw   %fp,#0
2759 +       lea     %sp@(-32),%sp
2760 +       moveml  %d2-%d7/%a2-%a3,%sp@
2761 +       moveal  %fp@(8), %a2
2762 +       moveal  %fp@(12), %a3
2763 +       moveal  %fp@(16), %a0
2764 +       moveal  %fp@(20),%a1
2765 +       movel   %a3,%d2
2766 +       andil   #65535,%d2
2767 +       movel   %a3,%d3
2768 +       clrw    %d3
2769 +       swap    %d3
2770 +       movel   %a1,%d0
2771 +       andil   #65535,%d0
2772 +       movel   %a1,%d1
2773 +       clrw    %d1
2774 +       swap    %d1
2775 +       movel   %d2,%d7
2776 +       mulsl   %d0,%d7
2777 +       movel   %d2,%d4
2778 +       mulsl   %d1,%d4
2779 +       movel   %d3,%d2
2780 +       mulsl   %d0,%d2
2781 +       mulsl   %d1,%d3
2782 +       movel   %d7,%d0
2783 +       clrw    %d0
2784 +       swap    %d0
2785 +       addl    %d0,%d4
2786 +       addl    %d2,%d4
2787 +       cmpl    %d4,%d2
2788 +       blss    1f
2789 +       addil   #65536,%d3
2790 +1:
2791 +       movel   %d4,%d0
2792 +       clrw    %d0
2793 +       swap    %d0
2794 +       movel   %d3,%d5
2795 +       addl    %d0,%d5
2796 +       movew   %d4,%d6
2797 +       swap    %d6
2798 +       movew   %d7,%d6
2799 +       movel   %d5,%d0
2800 +       movel   %d6,%d1
2801 +       movel   %a3,%d2
2802 +       movel   %a0,%d3
2803 +       mulsl   %d3,%d2
2804 +       movel   %a2,%d3
2805 +       movel   %a1,%d4
2806 +       mulsl   %d4,%d3
2807 +       addl    %d3,%d2
2808 +       movel   %d2,%d0
2809 +       addl    %d5,%d0
2810 +       moveml  %sp@, %d2-%d7/%a2-%a3
2811 +       lea     %sp@(32),%sp
2812 +       unlk    %fp
2813 +       rts
2814 --- /dev/null
2815 +++ b/arch/m68k/coldfire/pci.c
2816 @@ -0,0 +1,245 @@
2817 +/*
2818 + * linux/arch/m68k/coldfire/pci.c
2819 + *
2820 + * PCI initialization for Coldfire architectures.
2821 + *
2822 + * Currently Supported:
2823 + *     M5445x
2824 + *
2825 + * Copyright (c) 2007 Freescale Semiconductor, Inc.
2826 + *     Kurt Mahan <kmahan@freescale.com>
2827 + */
2828 +
2829 +#include <linux/kernel.h>
2830 +#include <linux/init.h>
2831 +#include <linux/pci.h>
2832 +
2833 +#include <asm/mcfsim.h>
2834 +#include <asm/pci.h>
2835 +
2836 +/* pci ops for reading/writing config */
2837 +struct pci_raw_ops *raw_pci_ops;
2838 +
2839 +/* pci debug flag */
2840 +static int debug_pci;
2841 +
2842 +#ifdef CONFIG_M54455
2843 +extern int init_mcf5445x_pci(void);
2844 +extern void mcf5445x_conf_device(struct pci_dev *dev);
2845 +extern void mcf5445x_pci_dumpregs(void);
2846 +
2847 +extern struct resource pci_ioport_resource;
2848 +extern struct resource pci_iomem_resource;
2849 +#endif
2850 +
2851 +static int
2852 +pci_read(struct pci_bus *bus, unsigned int devfn, int where,
2853 +        int size, u32 *value)
2854 +{
2855 +       return raw_pci_ops->read(0, bus->number, devfn, where, size, value);
2856 +}
2857 +
2858 +static int
2859 +pci_write(struct pci_bus *bus, unsigned int devfn, int where,
2860 +         int size, u32 value)
2861 +{
2862 +       return raw_pci_ops->write(0, bus->number, devfn, where, size, value);
2863 +}
2864 +
2865 +struct pci_ops pci_root_ops = {
2866 +       .read = pci_read,
2867 +       .write = pci_write,
2868 +};
2869 +
2870 +/*
2871 + * pcibios_setup(char *)
2872 + *
2873 + * Initialize the pcibios based on cmd line params.
2874 + */
2875 +char * __init
2876 +pcibios_setup(char *str)
2877 +{
2878 +       if (!strcmp(str, "debug")) {
2879 +               debug_pci = 1;
2880 +               return NULL;
2881 +       }
2882 +       return str;
2883 +}
2884 +
2885 +/*
2886 + * We need to avoid collisions with `mirrored' VGA ports
2887 + * and other strange ISA hardware, so we always want the
2888 + * addresses to be allocated in the 0x000-0x0ff region
2889 + * modulo 0x400.
2890 + *
2891 + * Why? Because some silly external IO cards only decode
2892 + * the low 10 bits of the IO address. The 0x00-0xff region
2893 + * is reserved for motherboard devices that decode all 16
2894 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
2895 + * but we want to try to avoid allocating at 0x2900-0x2bff
2896 + * which might have be mirrored at 0x0100-0x03ff..
2897 + */
2898 +void
2899 +pcibios_align_resource(void *data, struct resource *res, resource_size_t size,
2900 +                      resource_size_t align)
2901 +{
2902 +       struct pci_dev *dev = data;
2903 +
2904 +       if (res->flags & IORESOURCE_IO) {
2905 +               resource_size_t start = res->start;
2906 +
2907 +               if (size > 0x100)
2908 +                       printk(KERN_ERR "PCI: I/O Region %s/%d too large"
2909 +                              " (%ld bytes)\n", pci_name(dev),
2910 +                              dev->resource - res, (long int)size);
2911 +
2912 +               if (start & 0x300) {
2913 +                       start = (start + 0x3ff) & ~0x3ff;
2914 +                       res->start = start;
2915 +               }
2916 +       }
2917 +}
2918 +
2919 +/*
2920 + * Swizzle the device pin each time we cross a bridge
2921 + * and return the slot number.
2922 + */
2923 +static u8 __devinit
2924 +pcibios_swizzle(struct pci_dev *dev, u8 *pin)
2925 +{
2926 +       return 0;
2927 +}
2928 +
2929 +/*
2930 + * Map a slot/pin to an IRQ.
2931 + */
2932 +static int
2933 +pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
2934 +{
2935 +       return 0x43;
2936 +}
2937 +
2938 +/*
2939 + * pcibios_update_irq(struct pci_dev *dev, int irq)
2940 + *
2941 + * Update a PCI interrupt.
2942 + */
2943 +void __init
2944 +pcibios_update_irq(struct pci_dev *dev, int irq)
2945 +{
2946 +       pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
2947 +}
2948 +
2949 +/*
2950 + * pcibios_enable_device(struct pci_dev *dev, int mask)
2951 + *
2952 + * Enable a device on the PCI bus.
2953 + */
2954 +int
2955 +pcibios_enable_device(struct pci_dev *dev, int mask)
2956 +{
2957 +       u16 cmd, old_cmd;
2958 +       int idx;
2959 +       struct resource *r;
2960 +
2961 +       pci_read_config_word(dev, PCI_COMMAND, &cmd);
2962 +       old_cmd = cmd;
2963 +       for (idx = 0; idx < 6; idx++) {
2964 +               r = &dev->resource[idx];
2965 +               if (!r->start && r->end) {
2966 +                       printk(KERN_ERR "PCI: Device %s not available because "
2967 +                              "of resource collisions\n", pci_name(dev));
2968 +                       return -EINVAL;
2969 +               }
2970 +               if (r->flags & IORESOURCE_IO)
2971 +                       cmd |= PCI_COMMAND_IO;
2972 +               if (r->flags & IORESOURCE_MEM)
2973 +                       cmd |= PCI_COMMAND_MEMORY;
2974 +       }
2975 +       if (cmd != old_cmd) {
2976 +               printk("PCI: Enabling device %s (%04x -> %04x)\n",
2977 +                      pci_name(dev), old_cmd, cmd);
2978 +               pci_write_config_word(dev, PCI_COMMAND, cmd);
2979 +#ifdef CONFIG_M54455
2980 +               mcf5445x_conf_device(dev);
2981 +#endif
2982 +       }
2983 +
2984 +       return 0;
2985 +}
2986 +
2987 +/*
2988 + * pcibios_fixup_bus(struct pci_bus *bus)
2989 + */
2990 +void __init
2991 +pcibios_fixup_bus(struct pci_bus *bus)
2992 +{
2993 +       struct pci_dev *dev = bus->self;
2994 +
2995 +       if (!dev) {
2996 +               /* Root bus. */
2997 +#ifdef CONFIG_M54455
2998 +               bus->resource[0] = &pci_ioport_resource;
2999 +               bus->resource[1] = &pci_iomem_resource;
3000 +#endif
3001 +       }
3002 +}
3003 +
3004 +/*
3005 + * pcibios_init(void)
3006 + *
3007 + * Allocate/initialize low level pci bus/devices.
3008 + */
3009 +static int __init
3010 +pcibios_init(void)
3011 +{
3012 +       struct pci_bus *bus;
3013 +
3014 +       if (!raw_pci_ops) {
3015 +               printk(KERN_WARNING "PCIBIOS: FATAL: NO PCI Hardware found\n");
3016 +               return 0;
3017 +       }
3018 +
3019 +       /* allocate and scan the (only) bus */
3020 +       bus = pci_scan_bus_parented(NULL, 0, &pci_root_ops, NULL);
3021 +
3022 +       /* setup everything */
3023 +       if (bus) {
3024 +               /* compute the bridge window sizes */
3025 +               pci_bus_size_bridges(bus);
3026 +
3027 +               /* (re)assign device resources */
3028 +               pci_bus_assign_resources(bus);
3029 +
3030 +               /* add the bus to the system */
3031 +               pci_bus_add_devices(bus);
3032 +
3033 +               /* fixup irqs */
3034 +               pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
3035 +       }
3036 +
3037 +       return 0;
3038 +}
3039 +
3040 +/*
3041 + * pci_init(void)
3042 + *
3043 + * Initialize the PCI Hardware.
3044 + */
3045 +static int __init
3046 +pci_init(void)
3047 +{
3048 +#if defined(CONFIG_M54455)
3049 +       init_mcf5445x_pci();
3050 +#endif
3051 +       if (!raw_pci_ops)
3052 +               printk(KERN_ERR "PCI: FATAL: NO PCI Detected\n");
3053 +
3054 +       return 0;
3055 +}
3056 +
3057 +/* low level hardware (first) */
3058 +arch_initcall(pci_init);
3059 +
3060 +/* basic bios init (second) */
3061 +subsys_initcall(pcibios_init);
3062 --- /dev/null
3063 +++ b/arch/m68k/coldfire/signal.c
3064 @@ -0,0 +1,868 @@
3065 +/*
3066 + *  linux/arch/m68k/kernel/signal.c
3067 + *
3068 + *  Copyright (C) 1991, 1992  Linus Torvalds
3069 + *
3070 + * This file is subject to the terms and conditions of the GNU General Public
3071 + * License.  See the file COPYING in the main directory of this archive
3072 + * for more details.
3073 + */
3074 +
3075 +/*
3076 + * Derived from m68k/kernel/signal.c and the original authors are credited
3077 + * there.
3078 + *
3079 + * Coldfire support by:
3080 + * Matt Waddel Matt.Waddel@freescale.com
3081 + * Copyright Freescale Semiconductor, Inc 2007
3082 + */
3083 +
3084 +#include <linux/sched.h>
3085 +#include <linux/mm.h>
3086 +#include <linux/kernel.h>
3087 +#include <linux/signal.h>
3088 +#include <linux/syscalls.h>
3089 +#include <linux/errno.h>
3090 +#include <linux/wait.h>
3091 +#include <linux/ptrace.h>
3092 +#include <linux/unistd.h>
3093 +#include <linux/stddef.h>
3094 +#include <linux/highuid.h>
3095 +#include <linux/personality.h>
3096 +#include <linux/tty.h>
3097 +#include <linux/binfmts.h>
3098 +
3099 +#include <asm/setup.h>
3100 +#include <asm/cf_uaccess.h>
3101 +#include <asm/cf_pgtable.h>
3102 +#include <asm/traps.h>
3103 +#include <asm/ucontext.h>
3104 +
3105 +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
3106 +
3107 +asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
3108 +
3109 +const int frame_extra_sizes[16] = {
3110 +  [1]  = -1,
3111 +  [2]  = sizeof(((struct frame *)0)->un.fmt2),
3112 +  [3]  = sizeof(((struct frame *)0)->un.fmt3),
3113 +  [4]  = 0,
3114 +  [5]  = -1,
3115 +  [6]  = -1,
3116 +  [7]  = sizeof(((struct frame *)0)->un.fmt7),
3117 +  [8]  = -1,
3118 +  [9]  = sizeof(((struct frame *)0)->un.fmt9),
3119 +  [10] = sizeof(((struct frame *)0)->un.fmta),
3120 +  [11] = sizeof(((struct frame *)0)->un.fmtb),
3121 +  [12] = -1,
3122 +  [13] = -1,
3123 +  [14] = -1,
3124 +  [15] = -1,
3125 +};
3126 +
3127 +/*
3128 + * Atomically swap in the new signal mask, and wait for a signal.
3129 + */
3130 +asmlinkage int do_sigsuspend(struct pt_regs *regs)
3131 +{
3132 +       old_sigset_t mask = regs->d3;
3133 +       sigset_t saveset;
3134 +
3135 +       mask &= _BLOCKABLE;
3136 +       spin_lock_irq(&current->sighand->siglock);
3137 +       saveset = current->blocked;
3138 +       siginitset(&current->blocked, mask);
3139 +       recalc_sigpending();
3140 +       spin_unlock_irq(&current->sighand->siglock);
3141 +
3142 +       regs->d0 = -EINTR;
3143 +       while (1) {
3144 +               current->state = TASK_INTERRUPTIBLE;
3145 +               schedule();
3146 +               if (do_signal(&saveset, regs))
3147 +                       return -EINTR;
3148 +       }
3149 +}
3150 +
3151 +asmlinkage int
3152 +do_rt_sigsuspend(struct pt_regs *regs)
3153 +{
3154 +       sigset_t __user *unewset = (sigset_t __user *)regs->d1;
3155 +       size_t sigsetsize = (size_t)regs->d2;
3156 +       sigset_t saveset, newset;
3157 +
3158 +       /* XXX: Don't preclude handling different sized sigset_t's.  */
3159 +       if (sigsetsize != sizeof(sigset_t))
3160 +               return -EINVAL;
3161 +
3162 +       if (copy_from_user(&newset, unewset, sizeof(newset)))
3163 +               return -EFAULT;
3164 +       sigdelsetmask(&newset, ~_BLOCKABLE);
3165 +
3166 +       spin_lock_irq(&current->sighand->siglock);
3167 +       saveset = current->blocked;
3168 +       current->blocked = newset;
3169 +       recalc_sigpending();
3170 +       spin_unlock_irq(&current->sighand->siglock);
3171 +
3172 +       regs->d0 = -EINTR;
3173 +       while (1) {
3174 +               current->state = TASK_INTERRUPTIBLE;
3175 +               schedule();
3176 +               if (do_signal(&saveset, regs))
3177 +                       return -EINTR;
3178 +       }
3179 +}
3180 +
3181 +asmlinkage int
3182 +sys_sigaction(int sig, const struct old_sigaction __user *act,
3183 +             struct old_sigaction __user *oact)
3184 +{
3185 +       struct k_sigaction new_ka, old_ka;
3186 +       int ret;
3187 +
3188 +       if (act) {
3189 +               old_sigset_t mask;
3190 +               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3191 +                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3192 +                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
3193 +                       return -EFAULT;
3194 +               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
3195 +               __get_user(mask, &act->sa_mask);
3196 +               siginitset(&new_ka.sa.sa_mask, mask);
3197 +       }
3198 +
3199 +       ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3200 +
3201 +       if (!ret && oact) {
3202 +               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3203 +                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3204 +                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
3205 +                       return -EFAULT;
3206 +               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3207 +               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
3208 +       }
3209 +
3210 +       return ret;
3211 +}
3212 +
3213 +asmlinkage int
3214 +sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
3215 +{
3216 +       return do_sigaltstack(uss, uoss, rdusp());
3217 +}
3218 +
3219 +
3220 +/*
3221 + * Do a signal return; undo the signal stack.
3222 + *
3223 + * Keep the return code on the stack quadword aligned!
3224 + * That makes the cache flush below easier.
3225 + */
3226 +
3227 +struct sigframe
3228 +{
3229 +       char __user *pretcode;
3230 +       int sig;
3231 +       int code;
3232 +       struct sigcontext __user *psc;
3233 +       char retcode[16];
3234 +       unsigned long extramask[_NSIG_WORDS-1];
3235 +       struct sigcontext sc;
3236 +};
3237 +
3238 +struct rt_sigframe
3239 +{
3240 +       char __user *pretcode;
3241 +       int sig;
3242 +       struct siginfo __user *pinfo;
3243 +       void __user *puc;
3244 +       char retcode[16];
3245 +       struct siginfo info;
3246 +       struct ucontext uc;
3247 +};
3248 +
3249 +#define FPCONTEXT_SIZE 216
3250 +#define uc_fpstate     uc_filler[0]
3251 +#define uc_formatvec   uc_filler[FPCONTEXT_SIZE/4]
3252 +#define uc_extra       uc_filler[FPCONTEXT_SIZE/4+1]
3253 +
3254 +#ifdef CONFIG_FPU
3255 +static unsigned char fpu_version; /* version num of fpu, set by setup_frame */
3256 +
3257 +static inline int restore_fpu_state(struct sigcontext *sc)
3258 +{
3259 +       int err = 1;
3260 +
3261 +       if (FPU_IS_EMU) {
3262 +           /* restore registers */
3263 +           memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
3264 +           memcpy(current->thread.fp, sc->sc_fpregs, 24);
3265 +           return 0;
3266 +       }
3267 +
3268 +       if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
3269 +           /* Verify the frame format.  */
3270 +           if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
3271 +               goto out;
3272 +           if (CPU_IS_020_OR_030) {
3273 +               if (m68k_fputype & FPU_68881 &&
3274 +                   !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
3275 +                   goto out;
3276 +               if (m68k_fputype & FPU_68882 &&
3277 +                   !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
3278 +                   goto out;
3279 +           } else if (CPU_IS_040) {
3280 +               if (!(sc->sc_fpstate[1] == 0x00 ||
3281 +                     sc->sc_fpstate[1] == 0x28 ||
3282 +                     sc->sc_fpstate[1] == 0x60))
3283 +                   goto out;
3284 +           } else if (CPU_IS_060) {
3285 +               if (!(sc->sc_fpstate[3] == 0x00 ||
3286 +                     sc->sc_fpstate[3] == 0x60 ||
3287 +                     sc->sc_fpstate[3] == 0xe0))
3288 +                   goto out;
3289 +           } else
3290 +               goto out;
3291 +
3292 +       }
3293 +       err = 0;
3294 +
3295 +out:
3296 +       return err;
3297 +}
3298 +
3299 +static inline int rt_restore_fpu_state(struct ucontext __user *uc)
3300 +{
3301 +       unsigned char fpstate[FPCONTEXT_SIZE];
3302 +       int context_size = CPU_IS_060 ? 8 : 0;
3303 +       fpregset_t fpregs;
3304 +       int err = 1;
3305 +
3306 +       if (FPU_IS_EMU) {
3307 +               /* restore fpu control register */
3308 +               if (__copy_from_user(current->thread.fpcntl,
3309 +                               uc->uc_mcontext.fpregs.f_fpcntl, 12))
3310 +                       goto out;
3311 +               /* restore all other fpu register */
3312 +               if (__copy_from_user(current->thread.fp,
3313 +                               uc->uc_mcontext.fpregs.f_fpregs, 96))
3314 +                       goto out;
3315 +               return 0;
3316 +       }
3317 +
3318 +       if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
3319 +               goto out;
3320 +       if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
3321 +               if (!CPU_IS_060)
3322 +                       context_size = fpstate[1];
3323 +               /* Verify the frame format.  */
3324 +               if (!CPU_IS_060 && (fpstate[0] != fpu_version))
3325 +                       goto out;
3326 +               if (CPU_IS_020_OR_030) {
3327 +                       if (m68k_fputype & FPU_68881 &&
3328 +                           !(context_size == 0x18 || context_size == 0xb4))
3329 +                               goto out;
3330 +                       if (m68k_fputype & FPU_68882 &&
3331 +                           !(context_size == 0x38 || context_size == 0xd4))
3332 +                               goto out;
3333 +               } else if (CPU_IS_040) {
3334 +                       if (!(context_size == 0x00 ||
3335 +                             context_size == 0x28 ||
3336 +                             context_size == 0x60))
3337 +                               goto out;
3338 +               } else if (CPU_IS_060) {
3339 +                       if (!(fpstate[3] == 0x00 ||
3340 +                             fpstate[3] == 0x60 ||
3341 +                             fpstate[3] == 0xe0))
3342 +                               goto out;
3343 +               } else
3344 +                       goto out;
3345 +               if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
3346 +                                    sizeof(fpregs)))
3347 +                       goto out;
3348 +       }
3349 +       if (context_size &&
3350 +           __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
3351 +                            context_size))
3352 +               goto out;
3353 +       err = 0;
3354 +
3355 +out:
3356 +       return err;
3357 +}
3358 +#endif
3359 +
3360 +static inline int
3361 +restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc,
3362 +       void __user *fp, int *pd0)
3363 +{
3364 +       int fsize, formatvec;
3365 +       struct sigcontext context;
3366 +       int err = 0;
3367 +
3368 +       /* get previous context */
3369 +       if (copy_from_user(&context, usc, sizeof(context)))
3370 +               goto badframe;
3371 +
3372 +       /* restore passed registers */
3373 +       regs->d1 = context.sc_d1;
3374 +       regs->a0 = context.sc_a0;
3375 +       regs->a1 = context.sc_a1;
3376 +       regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
3377 +       regs->pc = context.sc_pc;
3378 +       regs->orig_d0 = -1;             /* disable syscall checks */
3379 +       wrusp(context.sc_usp);
3380 +       formatvec = context.sc_formatvec;
3381 +       regs->format = formatvec >> 12;
3382 +       regs->vector = formatvec & 0xfff;
3383 +
3384 +#ifdef CONFIG_FPU
3385 +       err = restore_fpu_state(&context);
3386 +#endif
3387 +
3388 +       fsize = frame_extra_sizes[regs->format];
3389 +       if (fsize < 0) {
3390 +               /*
3391 +                * user process trying to return with weird frame format
3392 +                */
3393 +#ifdef DEBUG
3394 +               printk(KERN_DEBUG "user process returning with weird \
3395 +                       frame format\n");
3396 +#endif
3397 +               goto badframe;
3398 +       }
3399 +
3400 +       /* OK.  Make room on the supervisor stack for the extra junk,
3401 +        * if necessary.
3402 +        */
3403 +
3404 +       {
3405 +               struct switch_stack *sw = (struct switch_stack *)regs - 1;
3406 +               regs->d0 = context.sc_d0;
3407 +#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
3408 +               __asm__ __volatile__
3409 +                       ("   movel %0,%/sp\n\t"
3410 +                        "   bra ret_from_signal\n"
3411 +                        "4:\n"
3412 +                        ".section __ex_table,\"a\"\n"
3413 +                        "   .align 4\n"
3414 +                        "   .long 2b,4b\n"
3415 +                        ".previous"
3416 +                        : /* no outputs, it doesn't ever return */
3417 +                        : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
3418 +                          "n" (frame_offset), "a" (fp)
3419 +                        : "a0");
3420 +#undef frame_offset
3421 +               /*
3422 +                * If we ever get here an exception occurred while
3423 +                * building the above stack-frame.
3424 +                */
3425 +               goto badframe;
3426 +       }
3427 +
3428 +       *pd0 = context.sc_d0;
3429 +       return err;
3430 +
3431 +badframe:
3432 +       return 1;
3433 +}
3434 +
3435 +static inline int
3436 +rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
3437 +                   struct ucontext __user *uc, int *pd0)
3438 +{
3439 +       int fsize, temp;
3440 +       greg_t __user *gregs = uc->uc_mcontext.gregs;
3441 +       unsigned long usp;
3442 +       int err;
3443 +
3444 +       err = __get_user(temp, &uc->uc_mcontext.version);
3445 +       if (temp != MCONTEXT_VERSION)
3446 +               goto badframe;
3447 +       /* restore passed registers */
3448 +       err |= __get_user(regs->d0, &gregs[0]);
3449 +       err |= __get_user(regs->d1, &gregs[1]);
3450 +       err |= __get_user(regs->d2, &gregs[2]);
3451 +       err |= __get_user(regs->d3, &gregs[3]);
3452 +       err |= __get_user(regs->d4, &gregs[4]);
3453 +       err |= __get_user(regs->d5, &gregs[5]);
3454 +       err |= __get_user(sw->d6, &gregs[6]);
3455 +       err |= __get_user(sw->d7, &gregs[7]);
3456 +       err |= __get_user(regs->a0, &gregs[8]);
3457 +       err |= __get_user(regs->a1, &gregs[9]);
3458 +       err |= __get_user(regs->a2, &gregs[10]);
3459 +       err |= __get_user(sw->a3, &gregs[11]);
3460 +       err |= __get_user(sw->a4, &gregs[12]);
3461 +       err |= __get_user(sw->a5, &gregs[13]);
3462 +       err |= __get_user(sw->a6, &gregs[14]);
3463 +       err |= __get_user(usp, &gregs[15]);
3464 +       wrusp(usp);
3465 +       err |= __get_user(regs->pc, &gregs[16]);
3466 +       err |= __get_user(temp, &gregs[17]);
3467 +       regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
3468 +       regs->orig_d0 = -1;             /* disable syscall checks */
3469 +       err |= __get_user(temp, &uc->uc_formatvec);
3470 +       regs->format = temp >> 12;
3471 +       regs->vector = temp & 0xfff;
3472 +
3473 +#ifdef CONFIG_FPU
3474 +       err |= rt_restore_fpu_state(uc);
3475 +#endif
3476 +
3477 +       if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
3478 +               goto badframe;
3479 +
3480 +       fsize = frame_extra_sizes[regs->format];
3481 +       if (fsize < 0) {
3482 +               /*
3483 +                * user process trying to return with weird frame format
3484 +                */
3485 +#ifdef DEBUG
3486 +               printk(KERN_DEBUG "user process returning with weird \
3487 +                       frame format\n");
3488 +#endif
3489 +               goto badframe;
3490 +       }
3491 +
3492 +       /* OK.  Make room on the supervisor stack for the extra junk,
3493 +        * if necessary.
3494 +        */
3495 +
3496 +       {
3497 +#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
3498 +               __asm__ __volatile__
3499 +                       ("   movel %0,%/sp\n\t"
3500 +                        "   bra ret_from_signal\n"
3501 +                        "4:\n"
3502 +                        ".section __ex_table,\"a\"\n"
3503 +                        "   .align 4\n"
3504 +                        "   .long 2b,4b\n"
3505 +                        ".previous"
3506 +                        : /* no outputs, it doesn't ever return */
3507 +                        : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
3508 +                          "n" (frame_offset), "a" (&uc->uc_extra)
3509 +                        : "a0");
3510 +#undef frame_offset
3511 +               /*
3512 +                * If we ever get here an exception occurred while
3513 +                * building the above stack-frame.
3514 +                */
3515 +               goto badframe;
3516 +       }
3517 +
3518 +       *pd0 = regs->d0;
3519 +       return err;
3520 +
3521 +badframe:
3522 +       return 1;
3523 +}
3524 +
3525 +asmlinkage int do_sigreturn(unsigned long __unused)
3526 +{
3527 +       struct switch_stack *sw = (struct switch_stack *) &__unused;
3528 +       struct pt_regs *regs = (struct pt_regs *) (sw + 1);
3529 +       unsigned long usp = rdusp();
3530 +       struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
3531 +       sigset_t set;
3532 +       int d0;
3533 +
3534 +       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
3535 +               goto badframe;
3536 +       if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
3537 +           (_NSIG_WORDS > 1 &&
3538 +            __copy_from_user(&set.sig[1], &frame->extramask,
3539 +                             sizeof(frame->extramask))))
3540 +               goto badframe;
3541 +
3542 +       sigdelsetmask(&set, ~_BLOCKABLE);
3543 +       spin_lock_irq(&current->sighand->siglock);
3544 +       current->blocked = set;
3545 +       recalc_sigpending();
3546 +       spin_unlock_irq(&current->sighand->siglock);
3547 +
3548 +       if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
3549 +               goto badframe;
3550 +       return d0;
3551 +
3552 +badframe:
3553 +       force_sig(SIGSEGV, current);
3554 +       return 0;
3555 +}
3556 +
3557 +asmlinkage int do_rt_sigreturn(unsigned long __unused)
3558 +{
3559 +       struct switch_stack *sw = (struct switch_stack *) &__unused;
3560 +       struct pt_regs *regs = (struct pt_regs *) (sw + 1);
3561 +       unsigned long usp = rdusp();
3562 +       struct rt_sigframe __user *frame =
3563 +               (struct rt_sigframe __user *)(usp - 4);
3564 +       sigset_t set;
3565 +       int d0;
3566 +
3567 +       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
3568 +               goto badframe;
3569 +       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
3570 +               goto badframe;
3571 +
3572 +       sigdelsetmask(&set, ~_BLOCKABLE);
3573 +       spin_lock_irq(&current->sighand->siglock);
3574 +       current->blocked = set;
3575 +       recalc_sigpending();
3576 +       spin_unlock_irq(&current->sighand->siglock);
3577 +
3578 +       if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
3579 +               goto badframe;
3580 +       return d0;
3581 +
3582 +badframe:
3583 +       force_sig(SIGSEGV, current);
3584 +       return 0;
3585 +}
3586 +
3587 +#ifdef CONFIG_FPU
3588 +/*
3589 + * Set up a signal frame.
3590 + */
3591 +
3592 +static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
3593 +{
3594 +       if (FPU_IS_EMU) {
3595 +               /* save registers */
3596 +               memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
3597 +               memcpy(sc->sc_fpregs, current->thread.fp, 24);
3598 +               return;
3599 +       }
3600 +}
3601 +
3602 +static inline int rt_save_fpu_state(struct ucontext __user *uc,
3603 +       struct pt_regs *regs)
3604 +{
3605 +       int err = 0;
3606 +
3607 +       if (FPU_IS_EMU) {
3608 +               /* save fpu control register */
3609 +               err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
3610 +                               current->thread.fpcntl, 12);
3611 +               /* save all other fpu register */
3612 +               err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
3613 +                               current->thread.fp, 96);
3614 +               return err;
3615 +       }
3616 +
3617 +       return err;
3618 +}
3619 +#endif
3620 +
3621 +static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
3622 +                            unsigned long mask)
3623 +{
3624 +       sc->sc_mask = mask;
3625 +       sc->sc_usp = rdusp();
3626 +       sc->sc_d0 = regs->d0;
3627 +       sc->sc_d1 = regs->d1;
3628 +       sc->sc_a0 = regs->a0;
3629 +       sc->sc_a1 = regs->a1;
3630 +       sc->sc_sr = regs->sr;
3631 +       sc->sc_pc = regs->pc;
3632 +       sc->sc_formatvec = regs->format << 12 | regs->vector;
3633 +#ifdef CONFIG_FPU
3634 +       save_fpu_state(sc, regs);
3635 +#endif
3636 +}
3637 +
3638 +static inline int rt_setup_ucontext(struct ucontext __user *uc,
3639 +       struct pt_regs *regs)
3640 +{
3641 +       struct switch_stack *sw = (struct switch_stack *)regs - 1;
3642 +       greg_t __user *gregs = uc->uc_mcontext.gregs;
3643 +       int err = 0;
3644 +
3645 +       err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
3646 +       err |= __put_user(regs->d0, &gregs[0]);
3647 +       err |= __put_user(regs->d1, &gregs[1]);
3648 +       err |= __put_user(regs->d2, &gregs[2]);
3649 +       err |= __put_user(regs->d3, &gregs[3]);
3650 +       err |= __put_user(regs->d4, &gregs[4]);
3651 +       err |= __put_user(regs->d5, &gregs[5]);
3652 +       err |= __put_user(sw->d6, &gregs[6]);
3653 +       err |= __put_user(sw->d7, &gregs[7]);
3654 +       err |= __put_user(regs->a0, &gregs[8]);
3655 +       err |= __put_user(regs->a1, &gregs[9]);
3656 +       err |= __put_user(regs->a2, &gregs[10]);
3657 +       err |= __put_user(sw->a3, &gregs[11]);
3658 +       err |= __put_user(sw->a4, &gregs[12]);
3659 +       err |= __put_user(sw->a5, &gregs[13]);
3660 +       err |= __put_user(sw->a6, &gregs[14]);
3661 +       err |= __put_user(rdusp(), &gregs[15]);
3662 +       err |= __put_user(regs->pc, &gregs[16]);
3663 +       err |= __put_user(regs->sr, &gregs[17]);
3664 +       err |= __put_user((regs->format << 12) | regs->vector,
3665 +                         &uc->uc_formatvec);
3666 +#ifdef CONFIG_FPU
3667 +       err |= rt_save_fpu_state(uc, regs);
3668 +#endif
3669 +       return err;
3670 +}
3671 +
3672 +extern void IcacheInvalidateCacheBlock(void *, unsigned long);
3673 +static inline void push_cache(unsigned long vaddr)
3674 +{
3675 +       IcacheInvalidateCacheBlock((void *)vaddr, 8);
3676 +}
3677 +
3678 +static inline void __user *
3679 +get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
3680 +{
3681 +       unsigned long usp;
3682 +
3683 +       /* Default to using normal stack.  */
3684 +       usp = rdusp();
3685 +
3686 +       /* This is the X/Open sanctioned signal stack switching.  */
3687 +       if (ka->sa.sa_flags & SA_ONSTACK) {
3688 +               if (!sas_ss_flags(usp))
3689 +                       usp = current->sas_ss_sp + current->sas_ss_size;
3690 +       }
3691 +       return (void __user *)((usp - frame_size) & -8UL);
3692 +}
3693 +
3694 +static void setup_frame(int sig, struct k_sigaction *ka,
3695 +                        sigset_t *set, struct pt_regs *regs)
3696 +{
3697 +       struct sigframe __user *frame;
3698 +       int fsize = frame_extra_sizes[regs->format];
3699 +       struct sigcontext context;
3700 +       int err = 0;
3701 +
3702 +       if (fsize < 0) {
3703 +#ifdef DEBUG
3704 +               printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
3705 +                       regs->format);
3706 +#endif
3707 +               goto give_sigsegv;
3708 +       }
3709 +
3710 +       frame = get_sigframe(ka, regs, sizeof(*frame));
3711 +
3712 +       err |= __put_user((current_thread_info()->exec_domain
3713 +                       && current_thread_info()->exec_domain->signal_invmap
3714 +                       && sig < 32
3715 +                       ? current_thread_info()->exec_domain->signal_invmap[sig]
3716 +                       : sig),
3717 +                       &frame->sig);
3718 +
3719 +       err |= __put_user(regs->vector, &frame->code);
3720 +       err |= __put_user(&frame->sc, &frame->psc);
3721 +
3722 +       if (_NSIG_WORDS > 1)
3723 +               err |= copy_to_user(frame->extramask, &set->sig[1],
3724 +                                   sizeof(frame->extramask));
3725 +
3726 +       setup_sigcontext(&context, regs, set->sig[0]);
3727 +       err |= copy_to_user(&frame->sc, &context, sizeof(context));
3728 +
3729 +       /* Set up to return from userspace.  */
3730 +       err |= __put_user(frame->retcode, &frame->pretcode);
3731 +       /* moveq #,d0; trap #0 */
3732 +       err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
3733 +                         (long __user *)(frame->retcode));
3734 +
3735 +       if (err)
3736 +               goto give_sigsegv;
3737 +
3738 +       push_cache((unsigned long) &frame->retcode);
3739 +
3740 +       /* Set up registers for signal handler */
3741 +       wrusp((unsigned long) frame);
3742 +       regs->pc = (unsigned long) ka->sa.sa_handler;
3743 +
3744 +adjust_stack:
3745 +       /* Prepare to skip over the extra stuff in the exception frame.  */
3746 +       if (regs->stkadj) {
3747 +               struct pt_regs *tregs =
3748 +                       (struct pt_regs *)((ulong)regs + regs->stkadj);
3749 +#ifdef DEBUG
3750 +               printk(KERN_DEBUG "Performing stackadjust=%04x\n",
3751 +                       regs->stkadj);
3752 +#endif
3753 +               /* This must be copied with decreasing addresses to
3754 +                  handle overlaps.  */
3755 +               tregs->vector = 0;
3756 +               tregs->format = 0;
3757 +               tregs->pc = regs->pc;
3758 +               tregs->sr = regs->sr;
3759 +       }
3760 +       return;
3761 +
3762 +give_sigsegv:
3763 +       force_sigsegv(sig, current);
3764 +       goto adjust_stack;
3765 +}
3766 +
3767 +static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
3768 +       sigset_t *set, struct pt_regs *regs)
3769 +{
3770 +       struct rt_sigframe __user *frame;
3771 +       int fsize = frame_extra_sizes[regs->format];
3772 +       int err = 0;
3773 +
3774 +       if (fsize < 0) {
3775 +#ifdef DEBUG
3776 +               printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
3777 +                       regs->format);
3778 +#endif
3779 +               goto give_sigsegv;
3780 +       }
3781 +
3782 +       frame = get_sigframe(ka, regs, sizeof(*frame));
3783 +
3784 +       if (fsize) {
3785 +               err |= copy_to_user(&frame->uc.uc_extra, regs + 1, fsize);
3786 +               regs->stkadj = fsize;
3787 +       }
3788 +
3789 +       err |= __put_user((current_thread_info()->exec_domain
3790 +                       && current_thread_info()->exec_domain->signal_invmap
3791 +                       && sig < 32
3792 +                       ? current_thread_info()->exec_domain->signal_invmap[sig]
3793 +                       : sig),
3794 +                       &frame->sig);
3795 +       err |= __put_user(&frame->info, &frame->pinfo);
3796 +       err |= __put_user(&frame->uc, &frame->puc);
3797 +       err |= copy_siginfo_to_user(&frame->info, info);
3798 +
3799 +       /* Create the ucontext.  */
3800 +       err |= __put_user(0, &frame->uc.uc_flags);
3801 +       err |= __put_user(NULL, &frame->uc.uc_link);
3802 +       err |= __put_user((void __user *)current->sas_ss_sp,
3803 +                         &frame->uc.uc_stack.ss_sp);
3804 +       err |= __put_user(sas_ss_flags(rdusp()),
3805 +                         &frame->uc.uc_stack.ss_flags);
3806 +       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
3807 +       err |= rt_setup_ucontext(&frame->uc, regs);
3808 +       err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
3809 +
3810 +       /* Set up to return from userspace.  */
3811 +       err |= __put_user(frame->retcode, &frame->pretcode);
3812 +
3813 +       /* moveq #,d0; andi.l #,D0; trap #0 */
3814 +       err |= __put_user(0x70AD0280, (long *)(frame->retcode + 0));
3815 +       err |= __put_user(0x000000ff, (long *)(frame->retcode + 4));
3816 +       err |= __put_user(0x4e400000, (long *)(frame->retcode + 8));
3817 +
3818 +       if (err)
3819 +               goto give_sigsegv;
3820 +
3821 +       push_cache((unsigned long) &frame->retcode);
3822 +
3823 +       /* Set up registers for signal handler */
3824 +       wrusp((unsigned long) frame);
3825 +       regs->pc = (unsigned long) ka->sa.sa_handler;
3826 +
3827 +adjust_stack:
3828 +       /* Prepare to skip over the extra stuff in the exception frame.  */
3829 +       if (regs->stkadj) {
3830 +               struct pt_regs *tregs =
3831 +                       (struct pt_regs *)((ulong)regs + regs->stkadj);
3832 +#ifdef DEBUG
3833 +               printk(KERN_DEBUG "Performing stackadjust=%04x\n",
3834 +                       regs->stkadj);
3835 +#endif
3836 +               /* This must be copied with decreasing addresses to
3837 +                  handle overlaps.  */
3838 +               tregs->vector = 0;
3839 +               tregs->format = 0;
3840 +               tregs->pc = regs->pc;
3841 +               tregs->sr = regs->sr;
3842 +       }
3843 +       return;
3844 +
3845 +give_sigsegv:
3846 +       force_sigsegv(sig, current);
3847 +       goto adjust_stack;
3848 +}
3849 +
3850 +static inline void
3851 +handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
3852 +{
3853 +       switch (regs->d0) {
3854 +       case -ERESTARTNOHAND:
3855 +               if (!has_handler)
3856 +                       goto do_restart;
3857 +               regs->d0 = -EINTR;
3858 +               break;
3859 +
3860 +       case -ERESTARTSYS:
3861 +               if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
3862 +                       regs->d0 = -EINTR;
3863 +                       break;
3864 +               }
3865 +       /* fallthrough */
3866 +       case -ERESTARTNOINTR:
3867 +do_restart:
3868 +               regs->d0 = regs->orig_d0;
3869 +               regs->pc -= 2;
3870 +               break;
3871 +       }
3872 +}
3873 +
3874 +/*
3875 + * OK, we're invoking a handler
3876 + */
3877 +static void
3878 +handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
3879 +             sigset_t *oldset, struct pt_regs *regs)
3880 +{
3881 +       /* are we from a system call? */
3882 +       if (regs->orig_d0 >= 0)
3883 +               /* If so, check system call restarting.. */
3884 +               handle_restart(regs, ka, 1);
3885 +
3886 +       /* set up the stack frame */
3887 +       if (ka->sa.sa_flags & SA_SIGINFO)
3888 +               setup_rt_frame(sig, ka, info, oldset, regs);
3889 +       else
3890 +               setup_frame(sig, ka, oldset, regs);
3891 +
3892 +       if (ka->sa.sa_flags & SA_ONESHOT)
3893 +               ka->sa.sa_handler = SIG_DFL;
3894 +
3895 +       spin_lock_irq(&current->sighand->siglock);
3896 +       sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
3897 +       if (!(ka->sa.sa_flags & SA_NODEFER))
3898 +               sigaddset(&current->blocked, sig);
3899 +       recalc_sigpending();
3900 +       spin_unlock_irq(&current->sighand->siglock);
3901 +}
3902 +
3903 +/*
3904 + * Note that 'init' is a special process: it doesn't get signals it doesn't
3905 + * want to handle. Thus you cannot kill init even with a SIGKILL even by
3906 + * mistake.
3907 + */
3908 +asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
3909 +{
3910 +       siginfo_t info;
3911 +       struct k_sigaction ka;
3912 +       int signr;
3913 +
3914 +       current->thread.esp0 = (unsigned long) regs;
3915 +
3916 +       if (!oldset)
3917 +               oldset = &current->blocked;
3918 +
3919 +       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
3920 +       if (signr > 0) {
3921 +               /* Whee!  Actually deliver the signal.  */
3922 +               handle_signal(signr, &ka, &info, oldset, regs);
3923 +               return 1;
3924 +       }
3925 +
3926 +       /* Did we come from a system call? */
3927 +       if (regs->orig_d0 >= 0)
3928 +               /* Restart the system call - no handlers present */
3929 +               handle_restart(regs, NULL, 0);
3930 +
3931 +       return 0;
3932 +}
3933 --- /dev/null
3934 +++ b/arch/m68k/coldfire/traps.c
3935 @@ -0,0 +1,454 @@
3936 +/*
3937 + *  linux/arch/m68knommu/kernel/traps.c
3938 + *
3939 + *  Copyright (C) 1993, 1994 by Hamish Macdonald
3940 + *
3941 + *  68040 fixes by Michael Rausch
3942 + *  68040 fixes by Martin Apel
3943 + *  68060 fixes by Roman Hodek
3944 + *  68060 fixes by Jesper Skov
3945 + *
3946 + * This file is subject to the terms and conditions of the GNU General Public
3947 + * License.  See the file COPYING in the main directory of this archive
3948 + * for more details.
3949 + */
3950 +
3951 +/*
3952 + * Sets up all exception vectors
3953 + */
3954 +#include <linux/sched.h>
3955 +#include <linux/signal.h>
3956 +#include <linux/kernel.h>
3957 +#include <linux/mm.h>
3958 +#include <linux/module.h>
3959 +#include <linux/types.h>
3960 +#include <linux/a.out.h>
3961 +#include <linux/user.h>
3962 +#include <linux/string.h>
3963 +#include <linux/linkage.h>
3964 +#include <linux/init.h>
3965 +#include <linux/ptrace.h>
3966 +#include <linux/kallsyms.h>
3967 +
3968 +#include <asm/setup.h>
3969 +#include <asm/fpu.h>
3970 +#include <asm/system.h>
3971 +#include <asm/uaccess.h>
3972 +#include <asm/traps.h>
3973 +#include <asm/pgtable.h>
3974 +#include <asm/machdep.h>
3975 +#include <asm/siginfo.h>
3976 +
3977 +static char const * const vec_names[] = {
3978 +       "RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
3979 +       "ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
3980 +       "PRIVILEGE VIOLATION", "TRACE", "LINE 1010", "LINE 1111",
3981 +       "UNASSIGNED RESERVED 12", "COPROCESSOR PROTOCOL VIOLATION",
3982 +       "FORMAT ERROR", "UNINITIALIZED INTERRUPT",
3983 +       "UNASSIGNED RESERVED 16", "UNASSIGNED RESERVED 17",
3984 +       "UNASSIGNED RESERVED 18", "UNASSIGNED RESERVED 19",
3985 +       "UNASSIGNED RESERVED 20", "UNASSIGNED RESERVED 21",
3986 +       "UNASSIGNED RESERVED 22", "UNASSIGNED RESERVED 23",
3987 +       "SPURIOUS INTERRUPT", "LEVEL 1 INT", "LEVEL 2 INT", "LEVEL 3 INT",
3988 +       "LEVEL 4 INT", "LEVEL 5 INT", "LEVEL 6 INT", "LEVEL 7 INT",
3989 +       "SYSCALL", "TRAP #1", "TRAP #2", "TRAP #3",
3990 +       "TRAP #4", "TRAP #5", "TRAP #6", "TRAP #7",
3991 +       "TRAP #8", "TRAP #9", "TRAP #10", "TRAP #11",
3992 +       "TRAP #12", "TRAP #13", "TRAP #14", "TRAP #15",
3993 +       "FPCP BSUN", "FPCP INEXACT", "FPCP DIV BY 0", "FPCP UNDERFLOW",
3994 +       "FPCP OPERAND ERROR", "FPCP OVERFLOW", "FPCP SNAN",
3995 +       "FPCP UNSUPPORTED OPERATION",
3996 +       "MMU CONFIGURATION ERROR"
3997 +};
3998 +
3999 +asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
4000 +                            unsigned long error_code);
4001 +asmlinkage void trap_c(struct frame *fp);
4002 +extern void __init coldfire_trap_init(void);
4003 +
4004 +void __init trap_init(void)
4005 +{
4006 +       coldfire_trap_init();
4007 +}
4008 +
4009 +/* The following table converts the FS encoding of a ColdFire
4010 +   exception stack frame into the error_code value needed by
4011 +   do_fault. */
4012 +
4013 +static const unsigned char fs_err_code[] = {
4014 +       0,  /* 0000 */
4015 +       0,  /* 0001 */
4016 +       0,  /* 0010 */
4017 +       0,  /* 0011 */
4018 +       1,  /* 0100 */
4019 +       0,  /* 0101 */
4020 +       0,  /* 0110 */
4021 +       0,  /* 0111 */
4022 +       2,  /* 1000 */
4023 +       3,  /* 1001 */
4024 +       2,  /* 1010 */
4025 +       0,  /* 1011 */
4026 +       1,  /* 1100 */
4027 +       1,  /* 1101 */
4028 +       0,  /* 1110 */
4029 +       0   /* 1111 */
4030 +};
4031 +
4032 +#ifdef DEBUG
4033 +static const char *fs_err_msg[16] = {
4034 +       "Normal",
4035 +       "Reserved",
4036 +       "Interrupt during debug service routine",
4037 +       "Reserved",
4038 +       "X Protection",
4039 +       "TLB X miss (opword)",
4040 +       "TLB X miss (ext. word)",
4041 +       "IFP in emulator mode",
4042 +       "W Protection",
4043 +       "Write error",
4044 +       "TLB W miss",
4045 +       "Reserved",
4046 +       "R Protection",
4047 +       "R/RMW Protection",
4048 +       "TLB R miss",
4049 +       "OEP in emulator mode",
4050 +};
4051 +#endif
4052 +
4053 +static inline void access_errorCF(struct frame *fp)
4054 +{
4055 +       unsigned long int mmusr, complainingAddress;
4056 +       unsigned int err_code, fs;
4057 +       int need_page_fault;
4058 +
4059 +       mmusr = fp->ptregs.mmusr;
4060 +       complainingAddress = fp->ptregs.mmuar;
4061 +#ifdef DEBUG
4062 +       printk(KERN_DEBUG "pc %#lx, mmusr %#lx, complainingAddress %#lx\n", \
4063 +               fp->ptregs.pc, mmusr, complainingAddress);
4064 +#endif
4065 +
4066 +       /*
4067 +        * error_code:
4068 +        *      bit 0 == 0 means no page found, 1 means protection fault
4069 +        *      bit 1 == 0 means read, 1 means write
4070 +        */
4071 +
4072 +       fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
4073 +       switch (fs) {
4074 +       case  5:  /* 0101 TLB opword X miss */
4075 +               need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
4076 +               complainingAddress = fp->ptregs.pc;
4077 +               break;
4078 +       case  6:  /* 0110 TLB extension word X miss */
4079 +               need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
4080 +               complainingAddress = fp->ptregs.pc + sizeof(long);
4081 +               break;
4082 +       case 10:  /* 1010 TLB W miss */
4083 +               need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
4084 +               break;
4085 +       case 14: /* 1110 TLB R miss */
4086 +               need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
4087 +               break;
4088 +       default:
4089 +               /* 0000 Normal  */
4090 +               /* 0001 Reserved */
4091 +               /* 0010 Interrupt during debug service routine */
4092 +               /* 0011 Reserved */
4093 +               /* 0100 X Protection */
4094 +               /* 0111 IFP in emulator mode */
4095 +               /* 1000 W Protection*/
4096 +               /* 1001 Write error*/
4097 +               /* 1011 Reserved*/
4098 +               /* 1100 R Protection*/
4099 +               /* 1101 R Protection*/
4100 +               /* 1111 OEP in emulator mode*/
4101 +               need_page_fault = 1;
4102 +               break;
4103 +       }
4104 +
4105 +       if (need_page_fault) {
4106 +               err_code = fs_err_code[fs];
4107 +               if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
4108 +                       err_code |= 2; /* bit1 - write, bit0 - protection */
4109 +               do_page_fault(&fp->ptregs, complainingAddress, err_code);
4110 +       }
4111 +}
4112 +
4113 +void die_if_kernel(char *str, struct pt_regs *fp, int nr)
4114 +{
4115 +       if (!(fp->sr & PS_S))
4116 +               return;
4117 +
4118 +       console_verbose();
4119 +       printk(KERN_EMERG "%s: %08x\n", str, nr);
4120 +       printk(KERN_EMERG "PC: [<%08lx>]", fp->pc);
4121 +       print_symbol(" %s", fp->pc);
4122 +       printk(KERN_EMERG "\nSR: %04x  SP: %p  a2: %08lx\n",
4123 +              fp->sr, fp, fp->a2);
4124 +       printk(KERN_EMERG "d0: %08lx    d1: %08lx    d2: %08lx    d3: %08lx\n",
4125 +              fp->d0, fp->d1, fp->d2, fp->d3);
4126 +       printk(KERN_EMERG "d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
4127 +              fp->d4, fp->d5, fp->a0, fp->a1);
4128 +
4129 +       printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
4130 +               current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
4131 +       show_stack(NULL, (unsigned long *)fp);
4132 +       do_exit(SIGSEGV);
4133 +}
4134 +
4135 +asmlinkage void buserr_c(struct frame *fp)
4136 +{
4137 +       unsigned int fs;
4138 +
4139 +       /* Only set esp0 if coming from user mode */
4140 +       if (user_mode(&fp->ptregs))
4141 +               current->thread.esp0 = (unsigned long) fp;
4142 +
4143 +       fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
4144 +#if defined(DEBUG)
4145 +       printk(KERN_DEBUG "*** Bus Error *** (%x)%s\n", fs,
4146 +               fs_err_msg[fs & 0xf]);
4147 +#endif
4148 +       switch (fs) {
4149 +       case 0x5:
4150 +       case 0x6:
4151 +       case 0x7:
4152 +       case 0x9:
4153 +       case 0xa:
4154 +       case 0xd:
4155 +       case 0xe:
4156 +       case 0xf:
4157 +               access_errorCF(fp);
4158 +               break;
4159 +       default:
4160 +               die_if_kernel("bad frame format", &fp->ptregs, 0);
4161 +#if defined(DEBUG)
4162 +               printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
4163 +#endif
4164 +               force_sig(SIGSEGV, current);
4165 +       }
4166 +}
4167 +
4168 +
4169 +int kstack_depth_to_print = 48;
4170 +
4171 +void show_stack(struct task_struct *task, unsigned long *stack)
4172 +{
4173 +       unsigned long *endstack, addr, symaddr;
4174 +       extern char _start, _etext;
4175 +       int i;
4176 +
4177 +       if (!stack) {
4178 +               if (task)
4179 +                       stack = (unsigned long *)task->thread.ksp;
4180 +               else
4181 +                       stack = (unsigned long *)&stack;
4182 +       }
4183 +
4184 +       addr = (unsigned long) stack;
4185 +       endstack = (unsigned long *) PAGE_ALIGN(addr);
4186 +
4187 +       printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack);
4188 +       for (i = 0; i < kstack_depth_to_print; i++) {
4189 +               if (stack + 1 > endstack)
4190 +                       break;
4191 +               if (i % 8 == 0)
4192 +                       printk("\n" KERN_EMERG "       ");
4193 +               symaddr = *stack;
4194 +               printk(KERN_EMERG " %08lx", *stack++);
4195 +               if ((symaddr >= 0xc0000000) && (symaddr < 0xc1000000))
4196 +                       print_symbol("(%s)", symaddr);
4197 +       }
4198 +       printk("\n");
4199 +
4200 +       printk(KERN_EMERG "Call Trace:");
4201 +       i = 0;
4202 +       while (stack + 1 <= endstack) {
4203 +               addr = *stack++;
4204 +               /*
4205 +                * If the address is either in the text segment of the
4206 +                * kernel, or in the region which contains vmalloc'ed
4207 +                * memory, it *may* be the address of a calling
4208 +                * routine; if so, print it so that someone tracing
4209 +                * down the cause of the crash will be able to figure
4210 +                * out the call path that was taken.
4211 +                */
4212 +               if (((addr >= (unsigned long) &_start) &&
4213 +                    (addr <= (unsigned long) &_etext))) {
4214 +                       if (i % 4 == 0)
4215 +                               printk("\n" KERN_EMERG "       ");
4216 +                       printk(KERN_EMERG " [<%08lx>]", addr);
4217 +                       i++;
4218 +               }
4219 +       }
4220 +       printk("\n");
4221 +}
4222 +
4223 +void bad_super_trap(struct frame *fp)
4224 +{
4225 +       console_verbose();
4226 +       if (fp->ptregs.vector < 4*sizeof(vec_names)/sizeof(vec_names[0]))
4227 +               printk(KERN_WARNING "*** %s ***   FORMAT=%X\n",
4228 +                       vec_names[(fp->ptregs.vector) >> 2],
4229 +                       fp->ptregs.format);
4230 +       else
4231 +               printk(KERN_WARNING "*** Exception %d ***   FORMAT=%X\n",
4232 +                       (fp->ptregs.vector) >> 2,
4233 +                       fp->ptregs.format);
4234 +       printk(KERN_WARNING "Current process id is %d\n", current->pid);
4235 +       die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
4236 +}
4237 +
4238 +asmlinkage void trap_c(struct frame *fp)
4239 +{
4240 +       int sig;
4241 +       siginfo_t info;
4242 +
4243 +       if (fp->ptregs.sr & PS_S) {
4244 +               if ((fp->ptregs.vector >> 2) == VEC_TRACE) {
4245 +                       /* traced a trapping instruction */
4246 +                       current->ptrace |= PT_DTRACE;
4247 +               } else
4248 +                       bad_super_trap(fp);
4249 +               return;
4250 +       }
4251 +
4252 +       /* send the appropriate signal to the user program */
4253 +       switch ((fp->ptregs.vector) >> 2) {
4254 +       case VEC_ADDRERR:
4255 +               info.si_code = BUS_ADRALN;
4256 +               sig = SIGBUS;
4257 +               break;
4258 +       case VEC_ILLEGAL:
4259 +       case VEC_LINE10:
4260 +       case VEC_LINE11:
4261 +               info.si_code = ILL_ILLOPC;
4262 +               sig = SIGILL;
4263 +               break;
4264 +       case VEC_PRIV:
4265 +               info.si_code = ILL_PRVOPC;
4266 +               sig = SIGILL;
4267 +               break;
4268 +       case VEC_COPROC:
4269 +               info.si_code = ILL_COPROC;
4270 +               sig = SIGILL;
4271 +               break;
4272 +       case VEC_TRAP1: /* gdbserver breakpoint */
4273 +               fp->ptregs.pc -= 2;
4274 +               info.si_code = TRAP_TRACE;
4275 +               sig = SIGTRAP;
4276 +               break;
4277 +       case VEC_TRAP2:
4278 +       case VEC_TRAP3:
4279 +       case VEC_TRAP4:
4280 +       case VEC_TRAP5:
4281 +       case VEC_TRAP6:
4282 +       case VEC_TRAP7:
4283 +       case VEC_TRAP8:
4284 +       case VEC_TRAP9:
4285 +       case VEC_TRAP10:
4286 +       case VEC_TRAP11:
4287 +       case VEC_TRAP12:
4288 +       case VEC_TRAP13:
4289 +       case VEC_TRAP14:
4290 +               info.si_code = ILL_ILLTRP;
4291 +               sig = SIGILL;
4292 +               break;
4293 +       case VEC_FPBRUC:
4294 +       case VEC_FPOE:
4295 +       case VEC_FPNAN:
4296 +               info.si_code = FPE_FLTINV;
4297 +               sig = SIGFPE;
4298 +               break;
4299 +       case VEC_FPIR:
4300 +               info.si_code = FPE_FLTRES;
4301 +               sig = SIGFPE;
4302 +               break;
4303 +       case VEC_FPDIVZ:
4304 +               info.si_code = FPE_FLTDIV;
4305 +               sig = SIGFPE;
4306 +               break;
4307 +       case VEC_FPUNDER:
4308 +               info.si_code = FPE_FLTUND;
4309 +               sig = SIGFPE;
4310 +               break;
4311 +       case VEC_FPOVER:
4312 +               info.si_code = FPE_FLTOVF;
4313 +               sig = SIGFPE;
4314 +               break;
4315 +       case VEC_ZERODIV:
4316 +               info.si_code = FPE_INTDIV;
4317 +               sig = SIGFPE;
4318 +               break;
4319 +       case VEC_CHK:
4320 +       case VEC_TRAP:
4321 +               info.si_code = FPE_INTOVF;
4322 +               sig = SIGFPE;
4323 +               break;
4324 +       case VEC_TRACE:         /* ptrace single step */
4325 +               info.si_code = TRAP_TRACE;
4326 +               sig = SIGTRAP;
4327 +               break;
4328 +       case VEC_TRAP15:                /* breakpoint */
4329 +               info.si_code = TRAP_BRKPT;
4330 +               sig = SIGTRAP;
4331 +               break;
4332 +       default:
4333 +               info.si_code = ILL_ILLOPC;
4334 +               sig = SIGILL;
4335 +               break;
4336 +       }
4337 +       info.si_signo = sig;
4338 +       info.si_errno = 0;
4339 +       switch (fp->ptregs.format) {
4340 +       default:
4341 +               info.si_addr = (void *) fp->ptregs.pc;
4342 +               break;
4343 +       case 2:
4344 +               info.si_addr = (void *) fp->un.fmt2.iaddr;
4345 +               break;
4346 +       case 7:
4347 +               info.si_addr = (void *) fp->un.fmt7.effaddr;
4348 +               break;
4349 +       case 9:
4350 +               info.si_addr = (void *) fp->un.fmt9.iaddr;
4351 +               break;
4352 +       case 10:
4353 +               info.si_addr = (void *) fp->un.fmta.daddr;
4354 +               break;
4355 +       case 11:
4356 +               info.si_addr = (void *) fp->un.fmtb.daddr;
4357 +               break;
4358 +       }
4359 +       force_sig_info(sig, &info, current);
4360 +}
4361 +
4362 +asmlinkage void set_esp0(unsigned long ssp)
4363 +{
4364 +       current->thread.esp0 = ssp;
4365 +}
4366 +
4367 +/*
4368 + * The architecture-independent backtrace generator
4369 + */
4370 +void dump_stack(void)
4371 +{
4372 +       unsigned long stack;
4373 +
4374 +       show_stack(current, &stack);
4375 +}
4376 +EXPORT_SYMBOL(dump_stack);
4377 +
4378 +#ifdef CONFIG_M68KFPU_EMU
4379 +asmlinkage void fpemu_signal(int signal, int code, void *addr)
4380 +{
4381 +       siginfo_t info;
4382 +
4383 +       info.si_signo = signal;
4384 +       info.si_errno = 0;
4385 +       info.si_code = code;
4386 +       info.si_addr = addr;
4387 +       force_sig_info(signal, &info, current);
4388 +}
4389 +#endif
4390 --- /dev/null
4391 +++ b/arch/m68k/coldfire/vmlinux-cf.lds
4392 @@ -0,0 +1,92 @@
4393 +/* ld script to make m68k Coldfire Linux kernel */
4394 +
4395 +#include <asm-generic/vmlinux.lds.h>
4396 +
4397 +OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
4398 +OUTPUT_ARCH(m68k)
4399 +ENTRY(_start)
4400 +jiffies = jiffies_64 + 4;
4401 +SECTIONS
4402 +{
4403 +  . = 0xC0020000;
4404 +  _text = .;                   /* Text and read-only data */
4405 +  .text : {
4406 +       *(.text.head)
4407 +       TEXT_TEXT
4408 +       SCHED_TEXT
4409 +       LOCK_TEXT
4410 +       *(.fixup)
4411 +       *(.gnu.warning)
4412 +       } :text = 0x4e75
4413 +
4414 +  _etext = .;                  /* End of text section */
4415 +
4416 +  . = ALIGN(16);
4417 +  __start___ex_table = .;
4418 +  __ex_table : { *(__ex_table) }
4419 +  __stop___ex_table = .;
4420 +
4421 +  RODATA
4422 +
4423 +  .data : {                    /* Data */
4424 +       DATA_DATA
4425 +       CONSTRUCTORS
4426 +       }
4427 +
4428 +  .bss : { *(.bss) }           /* BSS */
4429 +
4430 +  . = ALIGN(16);
4431 +  .data.cacheline_aligned : { *(.data.cacheline_aligned) } :data
4432 +
4433 +  _edata = .;                  /* End of data section */
4434 +
4435 +  . = ALIGN(8192);             /* Initrd */
4436 +  __init_begin = .;
4437 +  .init.text : {
4438 +       _sinittext = .;
4439 +       *(.init.text)
4440 +       _einittext = .;
4441 +  }
4442 +  .init.data : { *(.init.data) }
4443 +  . = ALIGN(16);
4444 +  __setup_start = .;
4445 +  .init.setup : { *(.init.setup) }
4446 +  __setup_end = .;
4447 +  __initcall_start = .;
4448 +  .initcall.init : {
4449 +       INITCALLS
4450 +  }
4451 +  __initcall_end = .;
4452 +  __con_initcall_start = .;
4453 +  .con_initcall.init : { *(.con_initcall.init) }
4454 +  __con_initcall_end = .;
4455 +  SECURITY_INIT
4456 +#ifdef CONFIG_BLK_DEV_INITRD
4457 +  . = ALIGN(8192);
4458 +  __initramfs_start = .;
4459 +  .init.ramfs : { *(.init.ramfs) }
4460 +  __initramfs_end = .;
4461 +#endif
4462 +  . = ALIGN(8192);
4463 +  __init_end = .;
4464 +
4465 +  .data.init_task : { *(.data.init_task) }     /* The initial task and kernel stack */
4466 +
4467 +  _end = . ;
4468 +
4469 +  /* Sections to be discarded */
4470 +  /DISCARD/ : {
4471 +       *(.exit.text)
4472 +       *(.exit.data)
4473 +       *(.exitcall.exit)
4474 +       }
4475 +
4476 +  /* Stabs debugging sections.  */
4477 +  .stab 0 : { *(.stab) }
4478 +  .stabstr 0 : { *(.stabstr) }
4479 +  .stab.excl 0 : { *(.stab.excl) }
4480 +  .stab.exclstr 0 : { *(.stab.exclstr) }
4481 +  .stab.index 0 : { *(.stab.index) }
4482 +  .stab.indexstr 0 : { *(.stab.indexstr) }
4483 +  .comment 0 : { *(.comment) }
4484 +}