use broken-out patches for the coldfire to make it easier to follow differences again...
[15.05/openwrt.git] / target / linux / coldfire / patches / 003-mcfv4e_coldfire_headers2.patch
1 From 3da86cd2810e9ba4e4a9e7471a92025172c1c990 Mon Sep 17 00:00:00 2001
2 From: Kurt Mahan <kmahan@freescale.com>
3 Date: Wed, 31 Oct 2007 16:41:41 -0600
4 Subject: [PATCH] Add Coldfire specific header files.
5
6 LTIBName: mcfv4e-coldfire-headers2
7 Signed-off-by: Kurt Mahan <kmahan@freescale.com>
8 ---
9  include/asm-m68k/cf_cacheflush.h |  160 ++++++++++++++++
10  include/asm-m68k/cf_entry.h      |  146 +++++++++++++++
11  include/asm-m68k/cf_pgalloc.h    |   99 ++++++++++
12  include/asm-m68k/cf_pgtable.h    |  357 ++++++++++++++++++++++++++++++++++++
13  include/asm-m68k/cf_tlbflush.h   |   59 ++++++
14  include/asm-m68k/cf_uaccess.h    |  376 ++++++++++++++++++++++++++++++++++++++
15  include/asm-m68k/cfcache.h       |   86 +++++++++
16  include/asm-m68k/cfmmu.h         |  104 +++++++++++
17  include/asm-m68k/coldfire.h      |   38 ++++
18  include/asm-m68k/coldfire_edma.h |   39 ++++
19  include/asm-m68k/mcfqspi.h       |   50 +++++
20  include/asm-m68k/mcfsim.h        |   96 ++++++++++
21  include/asm-m68k/mcfuart.h       |  180 ++++++++++++++++++
22  13 files changed, 1790 insertions(+), 0 deletions(-)
23  create mode 100644 include/asm-m68k/cf_cacheflush.h
24  create mode 100644 include/asm-m68k/cf_entry.h
25  create mode 100644 include/asm-m68k/cf_pgalloc.h
26  create mode 100644 include/asm-m68k/cf_pgtable.h
27  create mode 100644 include/asm-m68k/cf_tlbflush.h
28  create mode 100644 include/asm-m68k/cf_uaccess.h
29  create mode 100644 include/asm-m68k/cfcache.h
30  create mode 100644 include/asm-m68k/cfmmu.h
31  create mode 100644 include/asm-m68k/coldfire.h
32  create mode 100644 include/asm-m68k/coldfire_edma.h
33  create mode 100644 include/asm-m68k/mcfqspi.h
34  create mode 100644 include/asm-m68k/mcfsim.h
35  create mode 100644 include/asm-m68k/mcfuart.h
36
37 --- /dev/null
38 +++ b/include/asm-m68k/cf_cacheflush.h
39 @@ -0,0 +1,160 @@
40 +#ifndef M68K_CF_CACHEFLUSH_H
41 +#define M68K_CF_CACHEFLUSH_H
42 +
43 +#include <asm/cfcache.h>
44 +
45 +/*
46 + * Cache handling functions
47 + */
48 +
49 +#define flush_icache()                                         \
50 +({                                                             \
51 +  unsigned long set;                                           \
52 +  unsigned long start_set;                                     \
53 +  unsigned long end_set;                                       \
54 +                                                               \
55 +  start_set = 0;                                               \
56 +  end_set = (unsigned long)LAST_DCACHE_ADDR;                   \
57 +                                                               \
58 +  for (set = start_set; set <= end_set; set += (0x10 - 3))     \
59 +    asm volatile("cpushl %%ic,(%0)\n"                          \
60 +                "\taddq%.l #1,%0\n"                            \
61 +                "\tcpushl %%ic,(%0)\n"                         \
62 +                "\taddq%.l #1,%0\n"                            \
63 +                "\tcpushl %%ic,(%0)\n"                         \
64 +                "\taddq%.l #1,%0\n"                            \
65 +                "\tcpushl %%ic,(%0)" : : "a" (set));           \
66 +})
67 +
68 +/*
69 + * invalidate the cache for the specified memory range.
70 + * It starts at the physical address specified for
71 + * the given number of bytes.
72 + */
73 +extern void cache_clear(unsigned long paddr, int len);
74 +/*
75 + * push any dirty cache in the specified memory range.
76 + * It starts at the physical address specified for
77 + * the given number of bytes.
78 + */
79 +extern void cache_push(unsigned long paddr, int len);
80 +
81 +/*
82 + * push and invalidate pages in the specified user virtual
83 + * memory range.
84 + */
85 +extern void cache_push_v(unsigned long vaddr, int len);
86 +
87 +/* This is needed whenever the virtual mapping of the current
88 +   process changes.  */
89 +
90 +
91 +#define flush_cache_all()              do { } while (0)
92 +#define flush_cache_mm(mm)             do { } while (0)
93 +#define flush_cache_range(mm, a, b)    do { } while (0)
94 +#define flush_cache_page(vma, vmaddr, pfn)     do { } while (0)
95 +
96 +#define flush_dcache_range(paddr, len) do { } while (0)
97 +
98 +/* Push the page at kernel virtual address and clear the icache */
99 +/* use cpush %bc instead of cpush %dc, cinv %ic */
100 +#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
101 +extern inline void __flush_page_to_ram(void *address)
102 +{
103 +  unsigned long set;
104 +  unsigned long start_set;
105 +  unsigned long end_set;
106 +  unsigned long addr = (unsigned long) address;
107 +
108 +  addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
109 +
110 +  start_set = addr & _ICACHE_SET_MASK;
111 +  end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
112 +
113 +  if (start_set > end_set) {
114 +    /* from the begining to the lowest address */
115 +    for (set = 0; set <= end_set; set += (0x10 - 3))
116 +      asm volatile("cpushl %%bc,(%0)\n"
117 +                  "\taddq%.l #1,%0\n"
118 +                  "\tcpushl %%bc,(%0)\n"
119 +                  "\taddq%.l #1,%0\n"
120 +                  "\tcpushl %%bc,(%0)\n"
121 +                  "\taddq%.l #1,%0\n"
122 +                  "\tcpushl %%bc,(%0)" : : "a" (set));
123 +
124 +    /* next loop will finish the cache ie pass the hole */
125 +    end_set = LAST_ICACHE_ADDR;
126 +  }
127 +  for (set = start_set; set <= end_set; set += (0x10 - 3))
128 +    asm volatile("cpushl %%bc,(%0)\n"
129 +                "\taddq%.l #1,%0\n"
130 +                "\tcpushl %%bc,(%0)\n"
131 +                "\taddq%.l #1,%0\n"
132 +                "\tcpushl %%bc,(%0)\n"
133 +                "\taddq%.l #1,%0\n"
134 +                "\tcpushl %%bc,(%0)" : : "a" (set));
135 +}
136 +
137 +#define flush_dcache_page(page)                        do { } while (0)
138 +#define flush_icache_page(vma, pg)             do { } while (0)
139 +#define flush_icache_user_range(adr, len)      do { } while (0)
140 +/* NL */
141 +#define flush_icache_user_page(vma, page, addr, len)   do { } while (0)
142 +
143 +/* Push n pages at kernel virtual address and clear the icache */
144 +/* use cpush %bc instead of cpush %dc, cinv %ic */
145 +extern inline void flush_icache_range(unsigned long address,
146 +                                     unsigned long endaddr)
147 +{
148 +  unsigned long set;
149 +  unsigned long start_set;
150 +  unsigned long end_set;
151 +
152 +  start_set = address & _ICACHE_SET_MASK;
153 +  end_set = endaddr & _ICACHE_SET_MASK;
154 +
155 +  if (start_set > end_set) {
156 +    /* from the begining to the lowest address */
157 +    for (set = 0; set <= end_set; set += (0x10 - 3))
158 +      asm volatile("cpushl %%ic,(%0)\n"
159 +                  "\taddq%.l #1,%0\n"
160 +                  "\tcpushl %%ic,(%0)\n"
161 +                  "\taddq%.l #1,%0\n"
162 +                  "\tcpushl %%ic,(%0)\n"
163 +                  "\taddq%.l #1,%0\n"
164 +                  "\tcpushl %%ic,(%0)" : : "a" (set));
165 +
166 +    /* next loop will finish the cache ie pass the hole */
167 +    end_set = LAST_ICACHE_ADDR;
168 +  }
169 +  for (set = start_set; set <= end_set; set += (0x10 - 3))
170 +    asm volatile("cpushl %%ic,(%0)\n"
171 +                "\taddq%.l #1,%0\n"
172 +                "\tcpushl %%ic,(%0)\n"
173 +                "\taddq%.l #1,%0\n"
174 +                "\tcpushl %%ic,(%0)\n"
175 +                "\taddq%.l #1,%0\n"
176 +                "\tcpushl %%ic,(%0)" : : "a" (set));
177 +}
178 +
179 +static inline void copy_to_user_page(struct vm_area_struct *vma,
180 +                                    struct page *page, unsigned long vaddr,
181 +                                    void *dst, void *src, int len)
182 +{
183 +       memcpy(dst, src, len);
184 +       flush_icache_user_page(vma, page, vaddr, len);
185 +}
186 +static inline void copy_from_user_page(struct vm_area_struct *vma,
187 +                                      struct page *page, unsigned long vaddr,
188 +                                      void *dst, void *src, int len)
189 +{
190 +       memcpy(dst, src, len);
191 +}
192 +
193 +#define flush_cache_dup_mm(mm)                 flush_cache_mm(mm)
194 +#define flush_cache_vmap(start, end)           flush_cache_all()
195 +#define flush_cache_vunmap(start, end)         flush_cache_all()
196 +#define flush_dcache_mmap_lock(mapping)                do { } while (0)
197 +#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
198 +
199 +#endif /* M68K_CF_CACHEFLUSH_H */
200 --- /dev/null
201 +++ b/include/asm-m68k/cf_entry.h
202 @@ -0,0 +1,146 @@
203 +#ifndef __CF_M68K_ENTRY_H
204 +#define __CF_M68K_ENTRY_H
205 +
206 +#include <asm/setup.h>
207 +#include <asm/page.h>
208 +#include <asm/coldfire.h>
209 +#include <asm/cfmmu.h>
210 +#include <asm/asm-offsets.h>
211 +
212 +/*
213 + * Stack layout in 'ret_from_exception':
214 + *
215 + *     This allows access to the syscall arguments in registers d1-d5
216 + *
217 + *      0(sp) - d1
218 + *      4(sp) - d2
219 + *      8(sp) - d3
220 + *      C(sp) - d4
221 + *     10(sp) - d5
222 + *     14(sp) - a0
223 + *     18(sp) - a1
224 + *     1C(sp) - a2
225 + *     20(sp) - d0
226 + *     24(sp) - orig_d0
227 + *     28(sp) - stack adjustment
228 + *     2C(sp) - sr
229 + *     2E(sp) - pc
230 + *     32(sp) - format & vector
231 + *     36(sp) - MMUSR
232 + *     3A(sp) - MMUAR
233 + */
234 +
235 +/*
236 + * 97/05/14 Andreas: Register %a2 is now set to the current task throughout
237 + *                  the whole kernel.
238 + */
239 +
240 +/* the following macro is used when enabling interrupts */
241 +/* portable version */
242 +#define ALLOWINT       (~0x700)
243 +#define        MAX_NOINT_IPL   0
244 +
245 +#ifdef __ASSEMBLY__
246 +
247 +#define curptr a2
248 +
249 +LFLUSH_I_AND_D = 0x00000808
250 +LSIGTRAP = 5
251 +
252 +/* process bits for task_struct.ptrace */
253 +PT_TRACESYS_OFF = 3
254 +PT_TRACESYS_BIT = 1
255 +PT_PTRACED_OFF = 3
256 +PT_PTRACED_BIT = 0
257 +PT_DTRACE_OFF = 3
258 +PT_DTRACE_BIT = 2
259 +
260 +#define SAVE_ALL_INT save_all_int
261 +#define SAVE_ALL_SYS save_all_sys
262 +#define RESTORE_ALL restore_all
263 +/*
264 + * This defines the normal kernel pt-regs layout.
265 + *
266 + * regs a3-a6 and d6-d7 are preserved by C code
267 + * the kernel doesn't mess with usp unless it needs to
268 + */
269 +
270 +/*
271 + * a -1 in the orig_d0 field signifies
272 + * that the stack frame is NOT for syscall
273 + */
274 +.macro save_all_int
275 +       movel   MMUSR,%sp@-
276 +       movel   MMUAR,%sp@-
277 +       clrl    %sp@-           | stk_adj
278 +       pea     -1:w            | orig d0
279 +       movel   %d0,%sp@-       | d0
280 +       subal   #(8*4), %sp
281 +       moveml  %d1-%d5/%a0-%a1/%curptr,%sp@
282 +.endm
283 +
284 +.macro save_all_sys
285 +       movel   MMUSR,%sp@-
286 +       movel   MMUAR,%sp@-
287 +       clrl    %sp@-           | stk_adj
288 +       movel   %d0,%sp@-       | orig d0
289 +       movel   %d0,%sp@-       | d0
290 +       subal   #(8*4), %sp
291 +       moveml  %d1-%d5/%a0-%a1/%curptr,%sp@
292 +.endm
293 +
294 +.macro restore_all
295 +       moveml  %sp@,%a0-%a1/%curptr/%d1-%d5
296 +       addal   #(8*4), %sp
297 +       movel   %sp@+,%d0       | d0
298 +       addql   #4,%sp          | orig d0
299 +       addl    %sp@+,%sp       | stk_adj
300 +       addql   #8,%sp          | MMUAR & MMUSR
301 +       rte
302 +.endm
303 +
304 +#define SWITCH_STACK_SIZE (6*4+4)      /* includes return address */
305 +
306 +#define SAVE_SWITCH_STACK save_switch_stack
307 +#define RESTORE_SWITCH_STACK restore_switch_stack
308 +#define GET_CURRENT(tmp) get_current tmp
309 +
310 +.macro save_switch_stack
311 +       subal   #(6*4), %sp
312 +       moveml  %a3-%a6/%d6-%d7,%sp@
313 +.endm
314 +
315 +.macro restore_switch_stack
316 +       moveml  %sp@,%a3-%a6/%d6-%d7
317 +       addal   #(6*4), %sp
318 +.endm
319 +
320 +.macro get_current reg=%d0
321 +       movel   %sp,\reg
322 +       andl    #-THREAD_SIZE,\reg
323 +       movel   \reg,%curptr
324 +       movel   %curptr@,%curptr
325 +.endm
326 +
327 +#else /* C source */
328 +
329 +#define STR(X) STR1(X)
330 +#define STR1(X) #X
331 +
332 +#define PT_OFF_ORIG_D0  0x24
333 +#define PT_OFF_FORMATVEC 0x32
334 +#define PT_OFF_SR       0x2C
335 +#define SAVE_ALL_INT                           \
336 +       "clrl   %%sp@-;"    /* stk_adj */       \
337 +       "pea    -1:w;"      /* orig d0 = -1 */  \
338 +       "movel  %%d0,%%sp@-;" /* d0 */          \
339 +       "subal  #(8*4),%sp"                     \
340 +       "moveml %%d1-%%d5/%%a0-%%a2,%%sp@"
341 +#define GET_CURRENT(tmp) \
342 +       "movel  %%sp,"#tmp"\n\t" \
343 +       "andw   #-"STR(THREAD_SIZE)","#tmp"\n\t" \
344 +       "movel  "#tmp",%%a2\n\t"
345 +
346 +#endif
347 +
348 +#endif /* __CF_M68K_ENTRY_H */
349 --- /dev/null
350 +++ b/include/asm-m68k/cf_pgalloc.h
351 @@ -0,0 +1,99 @@
352 +#ifndef M68K_CF_PGALLOC_H
353 +#define M68K_CF_PGALLOC_H
354 +
355 +#include <asm/coldfire.h>
356 +#include <asm/page.h>
357 +#include <asm/cf_tlbflush.h>
358 +
359 +extern inline void pte_free_kernel(pte_t *pte)
360 +{
361 +       free_page((unsigned long) pte);
362 +}
363 +
364 +extern const char bad_pmd_string[];
365 +
366 +extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
367 +       unsigned long address)
368 +{
369 +       unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
370 +
371 +       if (!page)
372 +               return NULL;
373 +
374 +       memset((void *)page, 0, PAGE_SIZE);
375 +       return (pte_t *) (page);
376 +}
377 +
378 +extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
379 +{
380 +       return (pmd_t *) pgd;
381 +}
382 +
383 +#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
384 +#define pmd_alloc_one(mm, address)      ({ BUG(); ((pmd_t *)2); })
385 +
386 +#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
387 +
388 +#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
389 +       (unsigned long)(page_address(page)))
390 +#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
391 +
392 +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
393 +{
394 +       __free_page(page);
395 +}
396 +
397 +#define __pmd_free_tlb(tlb, pmd) do { } while (0)
398 +
399 +static inline struct page *pte_alloc_one(struct mm_struct *mm,
400 +       unsigned long address)
401 +{
402 +       struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
403 +       pte_t *pte;
404 +
405 +       if (!page)
406 +               return NULL;
407 +
408 +       pte = kmap(page);
409 +       if (pte) {
410 +               clear_page(pte);
411 +               __flush_page_to_ram(pte);
412 +               flush_tlb_kernel_page(pte);
413 +               nocache_page(pte);
414 +       }
415 +       kunmap(pte);
416 +
417 +       return page;
418 +}
419 +
420 +extern inline void pte_free(struct page *page)
421 +{
422 +       __free_page(page);
423 +}
424 +
425 +/*
426 + * In our implementation, each pgd entry contains 1 pmd that is never allocated
427 + * or freed.  pgd_present is always 1, so this should never be called. -NL
428 + */
429 +#define pmd_free(pmd) BUG()
430 +
431 +extern inline void pgd_free(pgd_t *pgd)
432 +{
433 +       free_page((unsigned long) pgd);
434 +}
435 +
436 +extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
437 +{
438 +     pgd_t *new_pgd;
439 +
440 +     new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
441 +     if (!new_pgd)
442 +            return NULL;
443 +     memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
444 +     memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
445 +     return new_pgd;
446 +}
447 +
448 +#define pgd_populate(mm, pmd, pte) BUG()
449 +
450 +#endif /* M68K_CF_PGALLOC_H */
451 --- /dev/null
452 +++ b/include/asm-m68k/cf_pgtable.h
453 @@ -0,0 +1,357 @@
454 +#ifndef _CF_PGTABLE_H
455 +#define _CF_PGTABLE_H
456 +
457 +#include <asm/cfmmu.h>
458 +#include <asm/page.h>
459 +
460 +#ifndef __ASSEMBLY__
461 +#include <asm/virtconvert.h>
462 +#include <linux/linkage.h>
463 +
464 +/* For virtual address to physical address conversion */
465 +#define VTOP(addr)     __pa(addr)
466 +#define PTOV(addr)     __va(addr)
467 +
468 +
469 +#endif /* !__ASSEMBLY__ */
470 +
471 +/* Page protection values within PTE. */
472 +
473 +/* MMUDR bits, in proper place. */
474 +#define CF_PAGE_LOCKED      (0x00000002)
475 +#define CF_PAGE_EXEC        (0x00000004)
476 +#define CF_PAGE_WRITABLE    (0x00000008)
477 +#define CF_PAGE_READABLE    (0x00000010)
478 +#define CF_PAGE_SYSTEM      (0x00000020)
479 +#define CF_PAGE_COPYBACK    (0x00000040)
480 +#define CF_PAGE_NOCACHE     (0x00000080)
481 +
482 +#define CF_CACHEMASK       (~0x00000040)
483 +#define CF_PAGE_MMUDR_MASK  (0x000000fe)
484 +
485 +#define _PAGE_NOCACHE030  (CF_PAGE_NOCACHE)
486 +
487 +/* MMUTR bits, need shifting down.  */
488 +#define CF_PAGE_VALID       (0x00000400)
489 +#define CF_PAGE_SHARED      (0x00000800)
490 +
491 +#define CF_PAGE_MMUTR_MASK  (0x00000c00)
492 +#define CF_PAGE_MMUTR_SHIFT (10)
493 +#define CF_ASID_MMU_SHIFT   (2)
494 +
495 +/* Fake bits, not implemented in CF, will get masked out before
496 +   hitting hardware, and might go away altogether once this port is
497 +   complete.  */
498 +#if PAGE_SHIFT < 13
499 +#error COLDFIRE Error: Pages must be at least 8k in size
500 +#endif
501 +#define CF_PAGE_ACCESSED    (0x00001000)
502 +#define CF_PAGE_FILE        (0x00000200)
503 +#define CF_PAGE_DIRTY       (0x00000001)
504 +
505 +#define _PAGE_CACHE040 0x020   /* 68040 cache mode, cachable, copyback */
506 +#define _PAGE_NOCACHE_S 0x040   /* 68040 no-cache mode, serialized */
507 +#define _PAGE_NOCACHE   0x060   /* 68040 cache mode, non-serialized */
508 +#define _PAGE_CACHE040W 0x000   /* 68040 cache mode, cachable, write-through */
509 +#define _DESCTYPE_MASK  0x003
510 +#define _CACHEMASK040   (~0x060)
511 +#define _PAGE_GLOBAL040 0x400   /* 68040 global bit, used for kva descs */
512 +
513 +
514 +/* Externally used page protection values. */
515 +#define _PAGE_PRESENT  (CF_PAGE_VALID)
516 +#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
517 +#define _PAGE_DIRTY    (CF_PAGE_DIRTY)
518 +#define _PAGE_READWRITE (CF_PAGE_WRITABLE \
519 +                       | CF_PAGE_READABLE \
520 +                       | CF_PAGE_SYSTEM \
521 +                       | CF_PAGE_SHARED)
522 +
523 +/* Compound page protection values. */
524 +#define PAGE_NONE      __pgprot(CF_PAGE_VALID \
525 +                                | CF_PAGE_ACCESSED)
526 +
527 +#define PAGE_SHARED     __pgprot(CF_PAGE_VALID \
528 +                                | CF_PAGE_ACCESSED \
529 +                                | CF_PAGE_SHARED)
530 +
531 +#define PAGE_INIT      __pgprot(CF_PAGE_VALID \
532 +                                | CF_PAGE_WRITABLE \
533 +                                | CF_PAGE_READABLE \
534 +                                | CF_PAGE_EXEC \
535 +                                | CF_PAGE_SYSTEM \
536 +                                | CF_PAGE_SHARED)
537 +
538 +#define PAGE_KERNEL    __pgprot(CF_PAGE_VALID \
539 +                                | CF_PAGE_WRITABLE \
540 +                                | CF_PAGE_READABLE \
541 +                                | CF_PAGE_EXEC \
542 +                                | CF_PAGE_SYSTEM \
543 +                                | CF_PAGE_SHARED \
544 +                                | CF_PAGE_ACCESSED)
545 +
546 +#define PAGE_COPY      __pgprot(CF_PAGE_VALID \
547 +                                | CF_PAGE_ACCESSED \
548 +                                | CF_PAGE_READABLE \
549 +                                | CF_PAGE_DIRTY)
550 +/*
551 + * Page protections for initialising protection_map.  See mm/mmap.c
552 + * for use.  In general, the bit positions are xwr, and P-items are
553 + * private, the S-items are shared.
554 + */
555 +
556 +#define __P000 PAGE_NONE
557 +#define __P100 __pgprot(CF_PAGE_VALID \
558 +                        | CF_PAGE_ACCESSED \
559 +                        | CF_PAGE_EXEC)
560 +#define __P010 __pgprot(CF_PAGE_VALID \
561 +                        | CF_PAGE_WRITABLE \
562 +                        | CF_PAGE_ACCESSED)
563 +#define __P110 __pgprot(CF_PAGE_VALID \
564 +                        | CF_PAGE_ACCESSED \
565 +                        | CF_PAGE_WRITABLE \
566 +                        | CF_PAGE_EXEC)
567 +#define __P001 __pgprot(CF_PAGE_VALID \
568 +                        | CF_PAGE_ACCESSED \
569 +                        | CF_PAGE_READABLE)
570 +#define __P101 __pgprot(CF_PAGE_VALID \
571 +                        | CF_PAGE_ACCESSED \
572 +                        | CF_PAGE_READABLE \
573 +                        | CF_PAGE_EXEC)
574 +#define __P011 __pgprot(CF_PAGE_VALID \
575 +                        | CF_PAGE_READABLE \
576 +                        | CF_PAGE_WRITABLE \
577 +                        | CF_PAGE_ACCESSED)
578 +#define __P111 __pgprot(CF_PAGE_VALID \
579 +                        | CF_PAGE_ACCESSED \
580 +                        | CF_PAGE_WRITABLE \
581 +                        | CF_PAGE_READABLE \
582 +                        | CF_PAGE_EXEC)
583 +
584 +#define __S000 PAGE_NONE
585 +#define __S100 __pgprot(CF_PAGE_VALID \
586 +                        | CF_PAGE_ACCESSED \
587 +                        | CF_PAGE_SHARED \
588 +                        | CF_PAGE_EXEC)
589 +#define __S010 PAGE_SHARED
590 +#define __S110 __pgprot(CF_PAGE_VALID \
591 +                        | CF_PAGE_ACCESSED \
592 +                        | CF_PAGE_SHARED \
593 +                        | CF_PAGE_EXEC)
594 +#define __S001 __pgprot(CF_PAGE_VALID \
595 +                        | CF_PAGE_ACCESSED \
596 +                        | CF_PAGE_SHARED \
597 +                        | CF_PAGE_READABLE)
598 +#define __S101 __pgprot(CF_PAGE_VALID \
599 +                        | CF_PAGE_ACCESSED \
600 +                        | CF_PAGE_SHARED \
601 +                        | CF_PAGE_READABLE \
602 +                        | CF_PAGE_EXEC)
603 +#define __S011 __pgprot(CF_PAGE_VALID \
604 +                        | CF_PAGE_ACCESSED \
605 +                        | CF_PAGE_SHARED \
606 +                        | CF_PAGE_READABLE)
607 +#define __S111 __pgprot(CF_PAGE_VALID \
608 +                        | CF_PAGE_ACCESSED \
609 +                        | CF_PAGE_SHARED \
610 +                        | CF_PAGE_READABLE \
611 +                        | CF_PAGE_EXEC)
612 +
613 +#define PTE_MASK       PAGE_MASK
614 +#define CF_PAGE_CHG_MASK       (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
615 +
616 +#ifndef __ASSEMBLY__
617 +
618 +/*
619 + * Conversion functions: convert a page and protection to a page entry,
620 + * and a page entry and page directory to the page they refer to.
621 + */
622 +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
623 +
624 +extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
625 +{
626 +       pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
627 +       return pte;
628 +}
629 +
630 +#define pmd_set(pmdp, ptep) do {} while (0)
631 +
632 +extern inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
633 +{
634 +       pgd_val(*pgdp) = virt_to_phys(pmdp);
635 +}
636 +
637 +#define __pte_page(pte) \
638 +       ((unsigned long) ((pte_val(pte) & CF_PAGE_PGNUM_MASK) + PAGE_OFFSET))
639 +#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
640 +
641 +extern inline int pte_none(pte_t pte)
642 +{
643 +       return !pte_val(pte);
644 +}
645 +extern inline int pte_present(pte_t pte)
646 +{
647 +       return pte_val(pte) & CF_PAGE_VALID;
648 +}
649 +extern inline void pte_clear(struct mm_struct *mm, unsigned long addr,
650 +       pte_t *ptep)
651 +{
652 +       pte_val(*ptep) = 0;
653 +}
654 +
655 +#define pte_pagenr(pte)                ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
656 +#define pte_page(pte)          virt_to_page(__pte_page(pte))
657 +
658 +extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
659 +#define pmd_none(pmd) pmd_none2(&(pmd))
660 +extern inline int pmd_bad2(pmd_t *pmd) { return 0; }
661 +#define pmd_bad(pmd) pmd_bad2(&(pmd))
662 +#define pmd_present(pmd) (!pmd_none2(&(pmd)))
663 +extern inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
664 +
665 +extern inline int pgd_none(pgd_t pgd) { return 0; }
666 +extern inline int pgd_bad(pgd_t pgd) { return 0; }
667 +extern inline int pgd_present(pgd_t pgd) { return 1; }
668 +extern inline void pgd_clear(pgd_t *pgdp) {}
669 +
670 +
671 +#define pte_ERROR(e) \
672 +       printk(KERN_ERR "%s:%d: bad pte %08lx.\n",      \
673 +       __FILE__, __LINE__, pte_val(e))
674 +#define pmd_ERROR(e) \
675 +       printk(KERN_ERR "%s:%d: bad pmd %08lx.\n",      \
676 +       __FILE__, __LINE__, pmd_val(e))
677 +#define pgd_ERROR(e) \
678 +       printk(KERN_ERR "%s:%d: bad pgd %08lx.\n",      \
679 +       __FILE__, __LINE__, pgd_val(e))
680 +
681 +
682 +/*
683 + * The following only work if pte_present() is true.
684 + * Undefined behaviour if not...
685 + * [we have the full set here even if they don't change from m68k]
686 + */
687 +extern inline int pte_read(pte_t pte)  \
688 +       { return pte_val(pte) & CF_PAGE_READABLE; }
689 +extern inline int pte_write(pte_t pte) \
690 +       { return pte_val(pte) & CF_PAGE_WRITABLE; }
691 +extern inline int pte_exec(pte_t pte)  \
692 +       { return pte_val(pte) & CF_PAGE_EXEC; }
693 +extern inline int pte_dirty(pte_t pte) \
694 +       { return pte_val(pte) & CF_PAGE_DIRTY; }
695 +extern inline int pte_young(pte_t pte) \
696 +       { return pte_val(pte) & CF_PAGE_ACCESSED; }
697 +extern inline int pte_file(pte_t pte)  \
698 +       { return pte_val(pte) & CF_PAGE_FILE; }
699 +
700 +extern inline pte_t pte_wrprotect(pte_t pte)   \
701 +       { pte_val(pte) &= ~CF_PAGE_WRITABLE; return pte; }
702 +extern inline pte_t pte_rdprotect(pte_t pte)   \
703 +       { pte_val(pte) &= ~CF_PAGE_READABLE; return pte; }
704 +extern inline pte_t pte_exprotect(pte_t pte)   \
705 +       { pte_val(pte) &= ~CF_PAGE_EXEC; return pte; }
706 +extern inline pte_t pte_mkclean(pte_t pte)     \
707 +       { pte_val(pte) &= ~CF_PAGE_DIRTY; return pte; }
708 +extern inline pte_t pte_mkold(pte_t pte)       \
709 +       { pte_val(pte) &= ~CF_PAGE_ACCESSED; return pte; }
710 +extern inline pte_t pte_mkwrite(pte_t pte)     \
711 +       { pte_val(pte) |= CF_PAGE_WRITABLE; return pte; }
712 +extern inline pte_t pte_mkread(pte_t pte)      \
713 +       { pte_val(pte) |= CF_PAGE_READABLE; return pte; }
714 +extern inline pte_t pte_mkexec(pte_t pte)      \
715 +       { pte_val(pte) |= CF_PAGE_EXEC; return pte; }
716 +extern inline pte_t pte_mkdirty(pte_t pte)     \
717 +       { pte_val(pte) |= CF_PAGE_DIRTY; return pte; }
718 +extern inline pte_t pte_mkyoung(pte_t pte)     \
719 +       { pte_val(pte) |= CF_PAGE_ACCESSED; return pte; }
720 +extern inline pte_t pte_mknocache(pte_t pte)   \
721 +       { pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); return pte; }
722 +extern inline pte_t pte_mkcache(pte_t pte)     \
723 +       { pte_val(pte) &= ~CF_PAGE_NOCACHE; return pte; }
724 +
725 +#define swapper_pg_dir kernel_pg_dir
726 +extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
727 +
728 +/* Find an entry in a pagetable directory. */
729 +#define pgd_index(address)     ((address) >> PGDIR_SHIFT)
730 +
731 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
732 +
733 +/* Find an entry in a kernel pagetable directory. */
734 +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
735 +
736 +/* Find an entry in the second-level pagetable. */
737 +extern inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
738 +{
739 +       return (pmd_t *) pgd;
740 +}
741 +
742 +/* Find an entry in the third-level pagetable. */
743 +#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
744 +#define pte_offset_kernel(dir, address) ((pte_t *) __pmd_page(*(dir)) + \
745 +                                         __pte_offset(address))
746 +
747 +/* Disable caching for page at given kernel virtual address. */
748 +static inline void nocache_page(void *vaddr)
749 +{
750 +       pgd_t *dir;
751 +       pmd_t *pmdp;
752 +       pte_t *ptep;
753 +       unsigned long addr = (unsigned long)vaddr;
754 +
755 +       dir = pgd_offset_k(addr);
756 +       pmdp = pmd_offset(dir, addr);
757 +       ptep = pte_offset_kernel(pmdp, addr);
758 +       *ptep = pte_mknocache(*ptep);
759 +}
760 +
761 +/* Enable caching for page at given kernel virtual address. */
762 +static inline void cache_page(void *vaddr)
763 +{
764 +       pgd_t *dir;
765 +       pmd_t *pmdp;
766 +       pte_t *ptep;
767 +       unsigned long addr = (unsigned long)vaddr;
768 +
769 +       dir = pgd_offset_k(addr);
770 +       pmdp = pmd_offset(dir, addr);
771 +       ptep = pte_offset_kernel(pmdp, addr);
772 +       *ptep = pte_mkcache(*ptep);
773 +}
774 +
775 +#define PTE_FILE_MAX_BITS      21
776 +#define PTE_FILE_SHIFT         11
777 +
778 +static inline unsigned long pte_to_pgoff(pte_t pte)
779 +{
780 +       return pte_val(pte) >> PTE_FILE_SHIFT;
781 +}
782 +
783 +static inline pte_t pgoff_to_pte(unsigned pgoff)
784 +{
785 +       pte_t pte = __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
786 +       return pte;
787 +}
788 +
789 +/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
790 +#define __swp_entry(type, offset) ((swp_entry_t) { (type) |    \
791 +                                  (offset << PTE_FILE_SHIFT) })
792 +#define __swp_type(x)          ((x).val & 0xFF)
793 +#define __swp_offset(x)                ((x).val >> PTE_FILE_SHIFT)
794 +#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
795 +#define __swp_entry_to_pte(x)  (__pte((x).val))
796 +
797 +#define pmd_page(pmd)          virt_to_page(__pmd_page(pmd))
798 +
799 +#define pte_offset_map(pmdp, address) ((pte_t *)__pmd_page(*pmdp) +    \
800 +                                      __pte_offset(address))
801 +#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
802 +#define pte_unmap(pte) kunmap(pte)
803 +#define pte_unmap_nested(pte) kunmap(pte)
804 +
805 +#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
806 +#define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
807 +
808 +
809 +#endif /* !__ASSEMBLY__ */
810 +#endif /* !_CF_PGTABLE_H */
811 --- /dev/null
812 +++ b/include/asm-m68k/cf_tlbflush.h
813 @@ -0,0 +1,59 @@
814 +#ifndef M68K_CF_TLBFLUSH_H
815 +#define M68K_CF_TLBFLUSH_H
816 +
817 +#include <asm/coldfire.h>
818 +
819 +/* Flush all userspace mappings.  */
820 +static inline void flush_tlb_all(void)
821 +{
822 +       preempt_disable();
823 +       *MMUOR = MMUOR_CNL;
824 +       preempt_enable();
825 +}
826 +
827 +/* Clear user TLB entries within the context named in mm */
828 +static inline void flush_tlb_mm(struct mm_struct *mm)
829 +{
830 +       preempt_disable();
831 +       *MMUOR = MMUOR_CNL;
832 +       preempt_enable();
833 +}
834 +
835 +/* Flush a single TLB page.  */
836 +static inline void flush_tlb_page(struct vm_area_struct *vma,
837 +                                  unsigned long addr)
838 +{
839 +       preempt_disable();
840 +       *MMUOR = MMUOR_CNL;
841 +       preempt_enable();
842 +}
843 +/* Flush a range of pages from TLB. */
844 +
845 +static inline void flush_tlb_range(struct mm_struct *mm,
846 +                     unsigned long start, unsigned long end)
847 +{
848 +       preempt_disable();
849 +       *MMUOR = MMUOR_CNL;
850 +       preempt_enable();
851 +}
852 +
853 +/* Flush kernel page from TLB. */
854 +static inline void flush_tlb_kernel_page(void *addr)
855 +{
856 +       preempt_disable();
857 +       *MMUOR = MMUOR_CNL;
858 +       preempt_enable();
859 +}
860 +
861 +static inline void flush_tlb_kernel_range(unsigned long start,
862 +       unsigned long end)
863 +{
864 +       flush_tlb_all();
865 +}
866 +
867 +extern inline void flush_tlb_pgtables(struct mm_struct *mm,
868 +                                     unsigned long start, unsigned long end)
869 +{
870 +}
871 +
872 +#endif /* M68K_CF_TLBFLUSH_H */
873 --- /dev/null
874 +++ b/include/asm-m68k/cf_uaccess.h
875 @@ -0,0 +1,376 @@
876 +#ifndef __M68K_CF_UACCESS_H
877 +#define __M68K_CF_UACCESS_H
878 +
879 +/*
880 + * User space memory access functions
881 + */
882 +
883 +/* The "moves" command is not available in the CF instruction set. */
884 +#include <linux/compiler.h>
885 +#include <linux/errno.h>
886 +#include <linux/types.h>
887 +#include <linux/sched.h>
888 +#include <asm/segment.h>
889 +
890 +#define VERIFY_READ    0
891 +#define VERIFY_WRITE   1
892 +
893 +/* We let the MMU do all checking */
894 +#define access_ok(type, addr, size) 1
895 +
896 +/*
897 + * The exception table consists of pairs of addresses: the first is the
898 + * address of an instruction that is allowed to fault, and the second is
899 + * the address at which the program should continue.  No registers are
900 + * modified, so it is entirely up to the continuation code to figure out
901 + * what to do.
902 + *
903 + * All the routines below use bits of fixup code that are out of line
904 + * with the main instruction path.  This means when everything is well,
905 + * we don't even have to jump over them.  Further, they do not intrude
906 + * on our cache or tlb entries.
907 + */
908 +
909 +struct exception_table_entry
910 +{
911 +       unsigned long insn, fixup;
912 +};
913 +
914 +extern int __put_user_bad(void);
915 +extern int __get_user_bad(void);
916 +
917 +#define __put_user_asm(res, x, ptr, bwl, reg, err)     \
918 +asm volatile ("\n"                                     \
919 +       "1:     move."#bwl"     %2,%1\n"                \
920 +       "2:\n"                                          \
921 +       "       .section .fixup,\"ax\"\n"               \
922 +       "       .even\n"                                \
923 +       "10:    moveq.l %3,%0\n"                        \
924 +       "       jra 2b\n"                               \
925 +       "       .previous\n"                            \
926 +       "\n"                                            \
927 +       "       .section __ex_table,\"a\"\n"            \
928 +       "       .align  4\n"                            \
929 +       "       .long   1b,10b\n"                       \
930 +       "       .long   2b,10b\n"                       \
931 +       "       .previous"                              \
932 +       : "+d" (res), "=m" (*(ptr))                     \
933 +       : #reg (x), "i" (err))
934 +
935 +/*
936 + * These are the main single-value transfer routines.  They automatically
937 + * use the right size if we just have the right pointer type.
938 + */
939 +
940 +#define __put_user(x, ptr)                                             \
941 +({                                                                     \
942 +       typeof(*(ptr)) __pu_val = (x);                                  \
943 +       int __pu_err = 0;                                               \
944 +       __chk_user_ptr(ptr);                                            \
945 +       switch (sizeof (*(ptr))) {                                      \
946 +       case 1:                                                         \
947 +               __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
948 +               break;                                                  \
949 +       case 2:                                                         \
950 +               __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
951 +               break;                                                  \
952 +       case 4:                                                         \
953 +               __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
954 +               break;                                                  \
955 +       case 8:                                                         \
956 +               {                                                       \
957 +               const void __user *__pu_ptr = (ptr);                    \
958 +               asm volatile ("\n"                                      \
959 +                       "1:     move.l  %2,(%1)+\n"                     \
960 +                       "2:     move.l  %R2,(%1)\n"                     \
961 +                       "3:\n"                                          \
962 +                       "       .section .fixup,\"ax\"\n"               \
963 +                       "       .even\n"                                \
964 +                       "10:    movel %3,%0\n"                          \
965 +                       "       jra 3b\n"                               \
966 +                       "       .previous\n"                            \
967 +                       "\n"                                            \
968 +                       "       .section __ex_table,\"a\"\n"            \
969 +                       "       .align 4\n"                             \
970 +                       "       .long 1b,10b\n"                         \
971 +                       "       .long 2b,10b\n"                         \
972 +                       "       .long 3b,10b\n"                         \
973 +                       "       .previous"                              \
974 +                       : "+d" (__pu_err), "+a" (__pu_ptr)              \
975 +                       : "r" (__pu_val), "i" (-EFAULT)                 \
976 +                       : "memory");                                    \
977 +               break;                                                  \
978 +           }                                                           \
979 +       default:                                                        \
980 +               __pu_err = __put_user_bad();                            \
981 +               break;                                                  \
982 +       }                                                               \
983 +       __pu_err;                                                       \
984 +})
985 +#define put_user(x, ptr)       __put_user(x, ptr)
986 +
987 +
988 +#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({    \
989 +       type __gu_val;                                          \
990 +       asm volatile ("\n"                                      \
991 +               "1:     move."#bwl"     %2,%1\n"                \
992 +               "2:\n"                                          \
993 +               "       .section .fixup,\"ax\"\n"               \
994 +               "       .even\n"                                \
995 +               "10:    move.l  %3,%0\n"                        \
996 +               "       subl    %1,%1\n"                        \
997 +               "       jra     2b\n"                           \
998 +               "       .previous\n"                            \
999 +               "\n"                                            \
1000 +               "       .section __ex_table,\"a\"\n"            \
1001 +               "       .align  4\n"                            \
1002 +               "       .long   1b,10b\n"                       \
1003 +               "       .previous"                              \
1004 +               : "+d" (res), "=&" #reg (__gu_val)              \
1005 +               : "m" (*(ptr)), "i" (err));                     \
1006 +       (x) = (typeof(*(ptr)))(unsigned long)__gu_val;          \
1007 +})
1008 +
1009 +#define __get_user(x, ptr)                                             \
1010 +({                                                                     \
1011 +       int __gu_err = 0;                                               \
1012 +       __chk_user_ptr(ptr);                                            \
1013 +       switch (sizeof(*(ptr))) {                                       \
1014 +       case 1:                                                         \
1015 +               __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);    \
1016 +               break;                                                  \
1017 +       case 2:                                                         \
1018 +               __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT);   \
1019 +               break;                                                  \
1020 +       case 4:                                                         \
1021 +               __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);   \
1022 +               break;                                                  \
1023 +/*     case 8: disabled because gcc-4.1 has a broken typeof            \
1024 +               {                                                       \
1025 +               const void *__gu_ptr = (ptr);                           \
1026 +               u64 __gu_val;                                           \
1027 +               asm volatile ("\n"                                      \
1028 +                       "1:     move.l  (%2)+,%1\n"                     \
1029 +                       "2:     move.l  (%2),%R1\n"                     \
1030 +                       "3:\n"                                          \
1031 +                       "       .section .fixup,\"ax\"\n"               \
1032 +                       "       .even\n"                                \
1033 +                       "10:    move.l  %3,%0\n"                        \
1034 +                       "       subl    %1,%1\n"                        \
1035 +                       "       subl    %R1,%R1\n"                      \
1036 +                       "       jra     3b\n"                           \
1037 +                       "       .previous\n"                            \
1038 +                       "\n"                                            \
1039 +                       "       .section __ex_table,\"a\"\n"            \
1040 +                       "       .align  4\n"                            \
1041 +                       "       .long   1b,10b\n"                       \
1042 +                       "       .long   2b,10b\n"                       \
1043 +                       "       .previous"                              \
1044 +                       : "+d" (__gu_err), "=&r" (__gu_val),            \
1045 +                         "+a" (__gu_ptr)                               \
1046 +                       : "i" (-EFAULT)                                 \
1047 +                       : "memory");                                    \
1048 +               (x) = (typeof(*(ptr)))__gu_val;                         \
1049 +               break;                                                  \
1050 +           }   */                                                      \
1051 +       default:                                                        \
1052 +               __gu_err = __get_user_bad();                            \
1053 +               break;                                                  \
1054 +       }                                                               \
1055 +       __gu_err;                                                       \
1056 +})
1057 +#define get_user(x, ptr) __get_user(x, ptr)
1058 +
1059 +unsigned long __generic_copy_from_user(void *to, const void __user *from,
1060 +       unsigned long n);
1061 +unsigned long __generic_copy_to_user(void __user *to, const void *from,
1062 +       unsigned long n);
1063 +
1064 +#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
1065 +       asm volatile ("\n"                                              \
1066 +               "1:     move."#s1"      (%2)+,%3\n"                     \
1067 +               "       move."#s1"      %3,(%1)+\n"                     \
1068 +               "2:     move."#s2"      (%2)+,%3\n"                     \
1069 +               "       move."#s2"      %3,(%1)+\n"                     \
1070 +               "       .ifnc   \""#s3"\",\"\"\n"                       \
1071 +               "3:     move."#s3"      (%2)+,%3\n"                     \
1072 +               "       move."#s3"      %3,(%1)+\n"                     \
1073 +               "       .endif\n"                                       \
1074 +               "4:\n"                                                  \
1075 +               "       .section __ex_table,\"a\"\n"                    \
1076 +               "       .align  4\n"                                    \
1077 +               "       .long   1b,10f\n"                               \
1078 +               "       .long   2b,20f\n"                               \
1079 +               "       .ifnc   \""#s3"\",\"\"\n"                       \
1080 +               "       .long   3b,30f\n"                               \
1081 +               "       .endif\n"                                       \
1082 +               "       .previous\n"                                    \
1083 +               "\n"                                                    \
1084 +               "       .section .fixup,\"ax\"\n"                       \
1085 +               "       .even\n"                                        \
1086 +               "10:    clr."#s1"       (%1)+\n"                        \
1087 +               "20:    clr."#s2"       (%1)+\n"                        \
1088 +               "       .ifnc   \""#s3"\",\"\"\n"                       \
1089 +               "30:    clr."#s3"       (%1)+\n"                        \
1090 +               "       .endif\n"                                       \
1091 +               "       moveq.l #"#n",%0\n"                             \
1092 +               "       jra     4b\n"                                   \
1093 +               "       .previous\n"                                    \
1094 +               : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)      \
1095 +               : : "memory")
1096 +
1097 +static __always_inline unsigned long
1098 +__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
1099 +{
1100 +       unsigned long res = 0, tmp;
1101 +
1102 +       switch (n) {
1103 +       case 1:
1104 +               __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
1105 +               break;
1106 +       case 2:
1107 +               __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w,
1108 +                       d, 2);
1109 +               break;
1110 +       case 3:
1111 +               __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
1112 +               break;
1113 +       case 4:
1114 +               __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l,
1115 +                       r, 4);
1116 +               break;
1117 +       case 5:
1118 +               __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
1119 +               break;
1120 +       case 6:
1121 +               __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
1122 +               break;
1123 +       case 7:
1124 +               __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
1125 +               break;
1126 +       case 8:
1127 +               __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
1128 +               break;
1129 +       case 9:
1130 +               __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
1131 +               break;
1132 +       case 10:
1133 +               __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
1134 +               break;
1135 +       case 12:
1136 +               __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
1137 +               break;
1138 +       default:
1139 +               /* we limit the inlined version to 3 moves */
1140 +               return __generic_copy_from_user(to, from, n);
1141 +       }
1142 +
1143 +       return res;
1144 +}
1145 +
1146 +#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
1147 +       asm volatile ("\n"                                              \
1148 +               "       move."#s1"      (%2)+,%3\n"                     \
1149 +               "11:    move."#s1"      %3,(%1)+\n"                     \
1150 +               "12:    move."#s2"      (%2)+,%3\n"                     \
1151 +               "21:    move."#s2"      %3,(%1)+\n"                     \
1152 +               "22:\n"                                                 \
1153 +               "       .ifnc   \""#s3"\",\"\"\n"                       \
1154 +               "       move."#s3"      (%2)+,%3\n"                     \
1155 +               "31:    move."#s3"      %3,(%1)+\n"                     \
1156 +               "32:\n"                                                 \
1157 +               "       .endif\n"                                       \
1158 +               "4:\n"                                                  \
1159 +               "\n"                                                    \
1160 +               "       .section __ex_table,\"a\"\n"                    \
1161 +               "       .align  4\n"                                    \
1162 +               "       .long   11b,5f\n"                               \
1163 +               "       .long   12b,5f\n"                               \
1164 +               "       .long   21b,5f\n"                               \
1165 +               "       .long   22b,5f\n"                               \
1166 +               "       .ifnc   \""#s3"\",\"\"\n"                       \
1167 +               "       .long   31b,5f\n"                               \
1168 +               "       .long   32b,5f\n"                               \
1169 +               "       .endif\n"                                       \
1170 +               "       .previous\n"                                    \
1171 +               "\n"                                                    \
1172 +               "       .section .fixup,\"ax\"\n"                       \
1173 +               "       .even\n"                                        \
1174 +               "5:     moveq.l #"#n",%0\n"                             \
1175 +               "       jra     4b\n"                                   \
1176 +               "       .previous\n"                                    \
1177 +               : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)       \
1178 +               : : "memory")
1179 +
1180 +static __always_inline unsigned long
1181 +__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
1182 +{
1183 +       unsigned long res = 0, tmp;
1184 +
1185 +       switch (n) {
1186 +       case 1:
1187 +               __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
1188 +               break;
1189 +       case 2:
1190 +               __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
1191 +               break;
1192 +       case 3:
1193 +               __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
1194 +               break;
1195 +       case 4:
1196 +               __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
1197 +               break;
1198 +       case 5:
1199 +               __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
1200 +               break;
1201 +       case 6:
1202 +               __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
1203 +               break;
1204 +       case 7:
1205 +               __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
1206 +               break;
1207 +       case 8:
1208 +               __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
1209 +               break;
1210 +       case 9:
1211 +               __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
1212 +               break;
1213 +       case 10:
1214 +               __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
1215 +               break;
1216 +       case 12:
1217 +               __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
1218 +               break;
1219 +       default:
1220 +               /* limit the inlined version to 3 moves */
1221 +               return __generic_copy_to_user(to, from, n);
1222 +       }
1223 +
1224 +       return res;
1225 +}
1226 +
1227 +#define __copy_from_user(to, from, n)          \
1228 +(__builtin_constant_p(n) ?                     \
1229 + __constant_copy_from_user(to, from, n) :      \
1230 + __generic_copy_from_user(to, from, n))
1231 +
1232 +#define __copy_to_user(to, from, n)            \
1233 +(__builtin_constant_p(n) ?                     \
1234 + __constant_copy_to_user(to, from, n) :                \
1235 + __generic_copy_to_user(to, from, n))
1236 +
1237 +#define __copy_to_user_inatomic                __copy_to_user
1238 +#define __copy_from_user_inatomic      __copy_from_user
1239 +
1240 +#define copy_from_user(to, from, n)    __copy_from_user(to, from, n)
1241 +#define copy_to_user(to, from, n)      __copy_to_user(to, from, n)
1242 +
1243 +long strncpy_from_user(char *dst, const char __user *src, long count);
1244 +long strnlen_user(const char __user *src, long n);
1245 +unsigned long __clear_user(void __user *to, unsigned long n);
1246 +
1247 +#define clear_user __clear_user
1248 +
1249 +#define strlen_user(str) strnlen_user(str, 32767)
1250 +
1251 +#endif /* _M68K_CF_UACCESS_H */
1252 --- /dev/null
1253 +++ b/include/asm-m68k/cfcache.h
1254 @@ -0,0 +1,86 @@
1255 +/*
1256 + * include/asm-m68k/cfcache.h
1257 + */
1258 +#ifndef CF_CFCACHE_H
1259 +#define CF_CFCACHE_H
1260 +
1261 +#define CF_CACR_DEC         (0x80000000) /* Data Cache Enable                */
1262 +#define CF_CACR_DW          (0x40000000) /* Data default Write-protect       */
1263 +#define CF_CACR_DESB        (0x20000000) /* Data Enable Store Buffer         */
1264 +#define CF_CACR_DDPI        (0x10000000) /* Data Disable CPUSHL Invalidate   */
1265 +#define CF_CACR_DHLCK       (0x08000000) /* 1/2 Data Cache Lock Mode         */
1266 +#define CF_CACR_DDCM_00     (0x00000000) /* Cacheable writethrough imprecise */
1267 +#define CF_CACR_DDCM_01     (0x02000000) /* Cacheable copyback               */
1268 +#define CF_CACR_DDCM_10     (0x04000000) /* Noncacheable precise             */
1269 +#define CF_CACR_DDCM_11     (0x06000000) /* Noncacheable imprecise           */
1270 +#define CF_CACR_DCINVA      (0x01000000) /* Data Cache Invalidate All        */
1271 +#define CF_CACR_IVO         (0x00100000) /* Invalidate only                  */
1272 +#define CF_CACR_BEC         (0x00080000) /* Branch Cache Enable              */
1273 +#define CF_CACR_BCINVA      (0x00040000) /* Branch Cache Invalidate All      */
1274 +#define CF_CACR_IEC         (0x00008000) /* Instruction Cache Enable         */
1275 +#define CF_CACR_SPA         (0x00004000) /* Search by Physical Address       */
1276 +#define CF_CACR_DNFB        (0x00002000) /* Default cache-inhibited fill buf */
1277 +#define CF_CACR_IDPI        (0x00001000) /* Instr Disable CPUSHL Invalidate  */
1278 +#define CF_CACR_IHLCK       (0x00000800) /* 1/2 Instruction Cache Lock Mode  */
1279 +#define CF_CACR_IDCM        (0x00000400) /* Noncacheable Instr default mode  */
1280 +#define CF_CACR_ICINVA      (0x00000100) /* Instr Cache Invalidate All       */
1281 +#define CF_CACR_EUSP        (0x00000020) /* Switch stacks in user mode       */
1282 +
1283 +#define DCACHE_LINE_SIZE 0x0010     /* bytes per line        */
1284 +#define DCACHE_WAY_SIZE  0x2000     /* words per cache block */
1285 +#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+CF_CACR_BCINVA+CF_CACR_ICINVA)
1286 +#ifdef CONFIG_M5445X_DISABLE_CACHE
1287 +/* disable cache for testing rev0 silicon */
1288 +#define CACHE_INITIAL_MODE (CF_CACR_EUSP)
1289 +#else
1290 +#define CACHE_INITIAL_MODE (CF_CACR_DEC+CF_CACR_BEC+CF_CACR_IEC+CF_CACR_EUSP)
1291 +#endif
1292 +
1293 +#define _DCACHE_SIZE (2*16384)
1294 +#define _ICACHE_SIZE (2*16384)
1295 +
1296 +#define _SET_SHIFT 4
1297 +
1298 +/*
1299 + * Masks for cache sizes.  Programming note: because the set size is a
1300 + * power of two, the mask is also the last address in the set.
1301 + * This may need to be #ifdef for other Coldfire processors.
1302 + */
1303 +
1304 +#define _DCACHE_SET_MASK ((_DCACHE_SIZE/64-1)<<_SET_SHIFT)
1305 +#define _ICACHE_SET_MASK ((_ICACHE_SIZE/64-1)<<_SET_SHIFT)
1306 +#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
1307 +#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
1308 +
1309 +
1310 +#ifndef __ASSEMBLY__
1311 +
1312 +extern void DcacheFlushInvalidate(void);
1313 +
1314 +extern void DcacheDisable(void);
1315 +extern void DcacheEnable(void);
1316 +
1317 +/******************************************************************************/
1318 +/*** Unimplemented Cache functionality                                      ***/
1319 +/******************************************************************************/
1320 +#define preDcacheInvalidateBlockMark()
1321 +#define postDcacheInvalidateBlockMark()
1322 +#define DcacheZeroBlock(p, l)           fast_bzero((char *)(p), (long)(l))
1323 +#define loadDcacheInvalidateBlock()     ASSERT(!"Not Implemented on V4e")
1324 +#define IcacheInvalidateBlock()         ASSERT(!"Not Implemented on V4e")
1325 +
1326 +/******************************************************************************/
1327 +/*** Redundant Cache functionality on ColdFire                              ***/
1328 +/******************************************************************************/
1329 +#define DcacheInvalidateBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l)
1330 +#define DcacheFlushCacheBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l)
1331 +#define DcacheFlushBlock(p, l)      DcacheFlushInvalidateCacheBlock(p, l)
1332 +
1333 +extern void DcacheFlushInvalidateCacheBlock(void *start, unsigned long size);
1334 +extern void FLASHDcacheFlushInvalidate(void);
1335 +
1336 +extern void cacr_set(unsigned long x);
1337 +
1338 +#endif /* !__ASSEMBLY__ */
1339 +
1340 +#endif /* CF_CACHE_H */
1341 --- /dev/null
1342 +++ b/include/asm-m68k/cfmmu.h
1343 @@ -0,0 +1,104 @@
1344 +/*
1345 + * Definitions for Coldfire V4e MMU
1346 + */
1347 +#include <asm/movs.h>
1348 +
1349 +#ifndef __CF_MMU_H__
1350 +#define __CF_MMU_H__
1351 +
1352 +
1353 +#define MMU_BASE 0xE1000000
1354 +
1355 +
1356 +#define MMUCR (MMU_BASE+0x00)
1357 +#define MMUCR_ASMN  1
1358 +#define MMUCR_ASM   (1<<MMUCR_ASMN)
1359 +#define MMUCR_ENN   0
1360 +#define MMUCR_EN    (1<<MMUCR_ENN)
1361 +
1362 +#define MMUOR REG16(MMU_BASE+0x04+0x02)
1363 +#define MMUOR_AAN   16
1364 +#define MMUOR_AA    (0xffff<<MMUOR_AAN)
1365 +#define MMUOR_STLBN 8
1366 +#define MMUOR_STLB  (1<<MMUOR_STLBN)
1367 +#define MMUOR_CAN   7
1368 +#define MMUOR_CA    (1<<MMUOR_CAN)
1369 +#define MMUOR_CNLN  6
1370 +#define MMUOR_CNL   (1<<MMUOR_CNLN)
1371 +#define MMUOR_CASN  5
1372 +#define MMUOR_CAS   (1<<MMUOR_CASN)
1373 +#define MMUOR_ITLBN 4
1374 +#define MMUOR_ITLB  (1<<MMUOR_ITLBN)
1375 +#define MMUOR_ADRN  3
1376 +#define MMUOR_ADR   (1<<MMUOR_ADRN)
1377 +#define MMUOR_RWN   2
1378 +#define MMUOR_RW    (1<<MMUOR_RWN)
1379 +#define MMUOR_ACCN  1
1380 +#define MMUOR_ACC   (1<<MMUOR_ACCN)
1381 +#define MMUOR_UAAN  0
1382 +#define MMUOR_UAA   (1<<MMUOR_UAAN)
1383 +
1384 +#define MMUSR REG32(MMU_BASE+0x08)
1385 +#define MMUSR_SPFN  5
1386 +#define MMUSR_SPF   (1<<MMUSR_SPFN)
1387 +#define MMUSR_RFN   4
1388 +#define MMUSR_RF    (1<<MMUSR_RFN)
1389 +#define MMUSR_WFN   3
1390 +#define MMUSR_WF    (1<<MMUSR_WFN)
1391 +#define MMUSR_HITN  1
1392 +#define MMUSR_HIT   (1<<MMUSR_HITN)
1393 +
1394 +#define MMUAR REG32(MMU_BASE+0x10)
1395 +#define MMUAR_VPN   1
1396 +#define MMUAR_VP    (0xfffffffe)
1397 +#define MMUAR_SN    0
1398 +#define MMUAR_S     (1<<MMUAR_SN)
1399 +
1400 +#define MMUTR REG32(MMU_BASE+0x14)
1401 +#define MMUTR_VAN   10
1402 +#define MMUTR_VA    (0xfffffc00)
1403 +#define MMUTR_IDN   2
1404 +#define MMUTR_ID    (0xff<<MMUTR_IDN)
1405 +#define MMUTR_SGN   1
1406 +#define MMUTR_SG    (1<<MMUTR_SGN)
1407 +#define MMUTR_VN    0
1408 +#define MMUTR_V     (1<<MMUTR_VN)
1409 +
1410 +#define MMUDR REG32(MMU_BASE+0x18)
1411 +#define MMUDR_PAN   10
1412 +#define MMUDR_PA    (0xfffffc00)
1413 +#define MMUDR_SZN   8
1414 +#define MMUDR_SZ_MASK (0x2<<MMUDR_SZN)
1415 +#define MMUDR_SZ1M  (0<<MMUDR_SZN)
1416 +#define MMUDR_SZ4K  (1<<MMUDR_SZN)
1417 +#define MMUDR_SZ8K  (2<<MMUDR_SZN)
1418 +#define MMUDR_SZ16M (3<<MMUDR_SZN)
1419 +#define MMUDR_CMN   6
1420 +#define MMUDR_INC   (2<<MMUDR_CMN)
1421 +#define MMUDR_IC    (0<<MMUDR_CMN)
1422 +#define MMUDR_DWT   (0<<MMUDR_CMN)
1423 +#define MMUDR_DCB   (1<<MMUDR_CMN)
1424 +#define MMUDR_DNCP  (2<<MMUDR_CMN)
1425 +#define MMUDR_DNCIP (3<<MMUDR_CMN)
1426 +#define MMUDR_SPN   5
1427 +#define MMUDR_SP    (1<<MMUDR_SPN)
1428 +#define MMUDR_RN    4
1429 +#define MMUDR_R     (1<<MMUDR_RN)
1430 +#define MMUDR_WN    3
1431 +#define MMUDR_W     (1<<MMUDR_WN)
1432 +#define MMUDR_XN    2
1433 +#define MMUDR_X     (1<<MMUDR_XN)
1434 +#define MMUDR_LKN   1
1435 +#define MMUDR_LK    (1<<MMUDR_LKN)
1436 +
1437 +
1438 +#ifndef __ASSEMBLY__
1439 +#define CF_PMEGS_NUM           256
1440 +#define CF_INVALID_CONTEXT     255
1441 +#define CF_PAGE_PGNUM_MASK     (PAGE_MASK)
1442 +
1443 +extern int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb,
1444 +                      int extension_word);
1445 +#endif /* __ASSEMBLY__*/
1446 +
1447 +#endif /* !__CF_MMU_H__ */
1448 --- /dev/null
1449 +++ b/include/asm-m68k/coldfire.h
1450 @@ -0,0 +1,38 @@
1451 +#ifndef _COLDFIRE_H_
1452 +#define _COLDFIRE_H_
1453 +
1454 +#define MCF_MBAR       0x0
1455 +#define MCF_RAMBAR1    0x40000000
1456 +#define MCF_SRAM       0x80000000
1457 +#define MCF_CLK        CONFIG_MCFCLK
1458 +#define MCF_BUSCLK     (CONFIG_MCFCLK/2)
1459 +
1460 +#ifdef __ASSEMBLY__
1461 +#define REG32
1462 +#define REG16
1463 +#define REG08
1464 +#else  /* __ASSEMBLY__ */
1465 +#define REG32(x) ((volatile unsigned long  *)(x))
1466 +#define REG16(x) ((volatile unsigned short *)(x))
1467 +#define REG08(x) ((volatile unsigned char  *)(x))
1468 +
1469 +#define MCF_REG32(x) *(volatile unsigned long  *)(MCF_MBAR+(x))
1470 +#define MCF_REG16(x) *(volatile unsigned short *)(MCF_MBAR+(x))
1471 +#define MCF_REG08(x) *(volatile unsigned char  *)(MCF_MBAR+(x))
1472 +
1473 +void cacr_set(unsigned long);
1474 +unsigned long cacr_get(void);
1475 +
1476 +#define coldfire_enable_irq0(irq)      MCF_INTC0_CIMR = (irq);
1477 +
1478 +#define coldfire_enable_irq1(irq)      MCF_INTC1_CIMR = (irq);
1479 +
1480 +#define coldfire_disable_irq0(irq)     MCF_INTC0_SIMR = (irq);
1481 +
1482 +#define coldfire_disable_irq1(irq)     MCF_INTC1_SIMR = (irq);
1483 +
1484 +#define getiprh()                      MCF_INTC0_IPRH
1485 +
1486 +#endif /* __ASSEMBLY__ */
1487 +
1488 +#endif  /* _COLDFIRE_H_  */
1489 --- /dev/null
1490 +++ b/include/asm-m68k/coldfire_edma.h
1491 @@ -0,0 +1,39 @@
1492 +#ifndef _LINUX_COLDFIRE_DMA_H
1493 +#define _LINUX_COLDFIRE_DMA_H
1494 +
1495 +#include <linux/interrupt.h>
1496 +
1497 +#define EDMA_DRIVER_NAME               "ColdFire-eDMA"
1498 +#define DMA_DEV_MINOR                  1
1499 +
1500 +#define EDMA_INT_CHANNEL_BASE          8
1501 +#define EDMA_INT_CONTROLLER_BASE       64
1502 +#define EDMA_CHANNELS                  16
1503 +
1504 +#define EDMA_IRQ_LEVEL                 5
1505 +
1506 +typedef irqreturn_t (*edma_irq_handler)(int, void *);
1507 +typedef void (*edma_error_handler)(int, void *);
1508 +
1509 +void set_edma_params(int channel, u32 source, u32 dest,
1510 +       u32 attr, u32 soff, u32 nbytes, u32 slast,
1511 +       u32 citer, u32 biter, u32 doff, u32 dlast_sga);
1512 +
1513 +void start_edma_transfer(int channel, int major_int);
1514 +
1515 +void stop_edma_transfer(int channel);
1516 +
1517 +void confirm_edma_interrupt_handled(int channel);
1518 +
1519 +void init_edma(void);
1520 +
1521 +int  request_edma_channel(int channel,
1522 +                       edma_irq_handler handler,
1523 +                       edma_error_handler error_handler,
1524 +                       void *dev,
1525 +                       spinlock_t *lock,
1526 +                       const char *device_id);
1527 +
1528 +int free_edma_channel(int channel, void *dev);
1529 +
1530 +#endif
1531 --- /dev/null
1532 +++ b/include/asm-m68k/mcfqspi.h
1533 @@ -0,0 +1,50 @@
1534 +/****************************************************************************/
1535 +/*
1536 + *     mcfqspi.h - Master QSPI controller for the ColdFire processors
1537 + *
1538 + *     (C) Copyright 2005, Intec Automation,
1539 + *                         Mike Lavender (mike@steroidmicros)
1540 + *
1541 +
1542 +     This program is free software; you can redistribute it and/or modify
1543 +     it under the terms of the GNU General Public License as published by
1544 +     the Free Software Foundation; either version 2 of the License, or
1545 +     (at your option) any later version.
1546 +
1547 +     This program is distributed in the hope that it will be useful,
1548 +     but WITHOUT ANY WARRANTY; without even the implied warranty of
1549 +     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1550 +     GNU General Public License for more details.
1551 +
1552 +     You should have received a copy of the GNU General Public License
1553 +     along with this program; if not, write to the Free Software
1554 +     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.              */
1555 +/* ------------------------------------------------------------------------- */
1556 +
1557 +#ifndef MCFQSPI_H_
1558 +#define MCFQSPI_H_
1559 +
1560 +#define QSPI_CS_INIT     0x01
1561 +#define QSPI_CS_ASSERT  0x02
1562 +#define QSPI_CS_DROP    0x04
1563 +
1564 +struct coldfire_spi_master {
1565 +       u16 bus_num;
1566 +       u16 num_chipselect;
1567 +       u8  irq_source;
1568 +       u32 irq_vector;
1569 +       u32 irq_mask;
1570 +       u8  irq_lp;
1571 +       u8  par_val;
1572 +       void (*cs_control)(u8 cs, u8 command);
1573 +};
1574 +
1575 +
1576 +struct coldfire_spi_chip {
1577 +       u8 mode;
1578 +       u8 bits_per_word;
1579 +       u8 del_cs_to_clk;
1580 +       u8 del_after_trans;
1581 +       u16 void_write_data;
1582 +};
1583 +#endif /*MCFQSPI_H_*/
1584 --- /dev/null
1585 +++ b/include/asm-m68k/mcfsim.h
1586 @@ -0,0 +1,96 @@
1587 +/*
1588 + *     mcfsim.h -- ColdFire System Integration Module support.
1589 + *
1590 + *     (C) Copyright 1999-2003, Greg Ungerer (gerg@snapgear.com)
1591 + *     (C) Copyright 2000, Lineo Inc. (www.lineo.com)
1592 + */
1593 +
1594 +#ifndef        mcfsim_h
1595 +#define        mcfsim_h
1596 +
1597 +#if defined(CONFIG_COLDFIRE)
1598 +#include <asm/coldfire.h>
1599 +#endif
1600 +
1601 +#if defined(CONFIG_M54455)
1602 +#include <asm/mcf5445x_intc.h>
1603 +#include <asm/mcf5445x_gpio.h>
1604 +#include <asm/mcf5445x_i2c.h>
1605 +#include <asm/mcf5445x_ccm.h>
1606 +#include <asm/mcf5445x_pci.h>
1607 +#include <asm/mcf5445x_pciarb.h>
1608 +#include <asm/mcf5445x_eport.h>
1609 +#endif
1610 +
1611 +/*
1612 + *     Define the base address of the SIM within the MBAR address space.
1613 + */
1614 +#define        MCFSIM_BASE             0x0             /* Base address of SIM */
1615 +
1616 +/*
1617 + *     Bit definitions for the ICR family of registers.
1618 + */
1619 +#define        MCFSIM_ICR_AUTOVEC      0x80            /* Auto-vectored intr */
1620 +#define        MCFSIM_ICR_LEVEL0       0x00            /* Level 0 intr */
1621 +#define        MCFSIM_ICR_LEVEL1       0x04            /* Level 1 intr */
1622 +#define        MCFSIM_ICR_LEVEL2       0x08            /* Level 2 intr */
1623 +#define        MCFSIM_ICR_LEVEL3       0x0c            /* Level 3 intr */
1624 +#define        MCFSIM_ICR_LEVEL4       0x10            /* Level 4 intr */
1625 +#define        MCFSIM_ICR_LEVEL5       0x14            /* Level 5 intr */
1626 +#define        MCFSIM_ICR_LEVEL6       0x18            /* Level 6 intr */
1627 +#define        MCFSIM_ICR_LEVEL7       0x1c            /* Level 7 intr */
1628 +
1629 +#define        MCFSIM_ICR_PRI0         0x00            /* Priority 0 intr */
1630 +#define        MCFSIM_ICR_PRI1         0x01            /* Priority 1 intr */
1631 +#define        MCFSIM_ICR_PRI2         0x02            /* Priority 2 intr */
1632 +#define        MCFSIM_ICR_PRI3         0x03            /* Priority 3 intr */
1633 +
1634 +/*
1635 + *     Bit definitions for the Interrupt Mask register (IMR).
1636 + */
1637 +#define        MCFSIM_IMR_EINT1        0x0002          /* External intr # 1 */
1638 +#define        MCFSIM_IMR_EINT2        0x0004          /* External intr # 2 */
1639 +#define        MCFSIM_IMR_EINT3        0x0008          /* External intr # 3 */
1640 +#define        MCFSIM_IMR_EINT4        0x0010          /* External intr # 4 */
1641 +#define        MCFSIM_IMR_EINT5        0x0020          /* External intr # 5 */
1642 +#define        MCFSIM_IMR_EINT6        0x0040          /* External intr # 6 */
1643 +#define        MCFSIM_IMR_EINT7        0x0080          /* External intr # 7 */
1644 +
1645 +#define        MCFSIM_IMR_SWD          0x0100          /* Software Watchdog intr */
1646 +#define        MCFSIM_IMR_TIMER1       0x0200          /* TIMER 1 intr */
1647 +#define        MCFSIM_IMR_TIMER2       0x0400          /* TIMER 2 intr */
1648 +#define MCFSIM_IMR_MBUS                0x0800          /* MBUS intr    */
1649 +#define        MCFSIM_IMR_UART1        0x1000          /* UART 1 intr */
1650 +#define        MCFSIM_IMR_UART2        0x2000          /* UART 2 intr */
1651 +
1652 +/*
1653 + *     Mask for all of the SIM devices. Some parts have more or less
1654 + *     SIM devices. This is a catchall for the sandard set.
1655 + */
1656 +#ifndef MCFSIM_IMR_MASKALL
1657 +#define        MCFSIM_IMR_MASKALL      0x3ffe          /* All intr sources */
1658 +#endif
1659 +
1660 +
1661 +/*
1662 + *     PIT interrupt settings, if not found in mXXXXsim.h file.
1663 + */
1664 +#ifndef        ICR_INTRCONF
1665 +#define        ICR_INTRCONF            0x2b            /* PIT1 level 5, priority 3 */
1666 +#endif
1667 +#ifndef        MCFPIT_IMR
1668 +#define        MCFPIT_IMR              MCFINTC_IMRH
1669 +#endif
1670 +#ifndef        MCFPIT_IMR_IBIT
1671 +#define        MCFPIT_IMR_IBIT         (1 << (MCFINT_PIT1 - 32))
1672 +#endif
1673 +
1674 +
1675 +#ifndef __ASSEMBLY__
1676 +/*
1677 + *     Definition for the interrupt auto-vectoring support.
1678 + */
1679 +extern void    mcf_autovector(unsigned int vec);
1680 +#endif /* __ASSEMBLY__ */
1681 +
1682 +#endif /* mcfsim_h */
1683 --- /dev/null
1684 +++ b/include/asm-m68k/mcfuart.h
1685 @@ -0,0 +1,180 @@
1686 +/*
1687 + *     mcfuart.h -- ColdFire internal UART support defines.
1688 + *
1689 + *     Matt Waddel Matt.Waddel@freescale.com
1690 + *     Copyright Freescale Semiconductor, Inc. 2007
1691 + *
1692 + *     Derived from m68knommu version of this same file (Greg Ungerer & Lineo).
1693 + *
1694 + */
1695 +
1696 +#ifndef        mcfuart_h
1697 +#define        mcfuart_h
1698 +
1699 +/*
1700 + *     Define the base address of the UARTS within the MBAR address
1701 + *     space.
1702 + */
1703 +#if defined(CONFIG_M54455)
1704 +#include <asm/mcf5445x_intc.h>
1705 +#define MCFUART_BASE1          0xfc060000      /* Base address of UART1 */
1706 +#define MCFUART_BASE2          0xfc064000      /* Base address of UART2 */
1707 +#define MCFUART_BASE3          0xfc068000      /* Base address of UART3 */
1708 +#define MCFINT_VECBASE         64
1709 +#define MCFINT_UART0           26
1710 +#endif
1711 +
1712 +
1713 +/*
1714 + *     Define the ColdFire UART register set addresses.
1715 + */
1716 +#define        MCFUART_UMR             0x00            /* Mode register (r/w) */
1717 +#define        MCFUART_USR             0x04            /* Status register (r) */
1718 +#define        MCFUART_UCSR            0x04            /* Clock Select (w) */
1719 +#define        MCFUART_UCR             0x08            /* Command register (w) */
1720 +#define        MCFUART_URB             0x0c            /* Receiver Buffer (r) */
1721 +#define        MCFUART_UTB             0x0c            /* Transmit Buffer (w) */
1722 +#define        MCFUART_UIPCR           0x10            /* Input Port Change (r) */
1723 +#define        MCFUART_UACR            0x10            /* Auxiliary Control (w) */
1724 +#define        MCFUART_UISR            0x14            /* Interrup Status (r) */
1725 +#define        MCFUART_UIMR            0x14            /* Interrupt Mask (w) */
1726 +#define        MCFUART_UBG1            0x18            /* Baud Rate MSB (r/w) */
1727 +#define        MCFUART_UBG2            0x1c            /* Baud Rate LSB (r/w) */
1728 +#ifdef CONFIG_M5272
1729 +#define        MCFUART_UTF             0x28            /* Transmitter FIFO (r/w) */
1730 +#define        MCFUART_URF             0x2c            /* Receiver FIFO (r/w) */
1731 +#define        MCFUART_UFPD            0x30            /* Frac Prec. Divider (r/w) */
1732 +#else
1733 +#define        MCFUART_UIVR            0x30            /* Interrupt Vector (r/w) */
1734 +#endif
1735 +#define        MCFUART_UIPR            0x34            /* Input Port (r) */
1736 +#define        MCFUART_UOP1            0x38            /* Output Port Bit Set (w) */
1737 +#define        MCFUART_UOP0            0x3c            /* Output Port Bit Reset (w) */
1738 +
1739 +
1740 +/*
1741 + *     Define bit flags in Mode Register 1 (MR1).
1742 + */
1743 +#define        MCFUART_MR1_RXRTS       0x80            /* Auto RTS flow control */
1744 +#define        MCFUART_MR1_RXIRQFULL   0x40            /* RX IRQ type FULL */
1745 +#define        MCFUART_MR1_RXIRQRDY    0x00            /* RX IRQ type RDY */
1746 +#define        MCFUART_MR1_RXERRBLOCK  0x20            /* RX block error mode */
1747 +#define        MCFUART_MR1_RXERRCHAR   0x00            /* RX char error mode */
1748 +
1749 +#define        MCFUART_MR1_PARITYNONE  0x10            /* No parity */
1750 +#define        MCFUART_MR1_PARITYEVEN  0x00            /* Even parity */
1751 +#define        MCFUART_MR1_PARITYODD   0x04            /* Odd parity */
1752 +#define        MCFUART_MR1_PARITYSPACE 0x08            /* Space parity */
1753 +#define        MCFUART_MR1_PARITYMARK  0x0c            /* Mark parity */
1754 +
1755 +#define        MCFUART_MR1_CS5         0x00            /* 5 bits per char */
1756 +#define        MCFUART_MR1_CS6         0x01            /* 6 bits per char */
1757 +#define        MCFUART_MR1_CS7         0x02            /* 7 bits per char */
1758 +#define        MCFUART_MR1_CS8         0x03            /* 8 bits per char */
1759 +
1760 +/*
1761 + *     Define bit flags in Mode Register 2 (MR2).
1762 + */
1763 +#define        MCFUART_MR2_LOOPBACK    0x80            /* Loopback mode */
1764 +#define        MCFUART_MR2_REMOTELOOP  0xc0            /* Remote loopback mode */
1765 +#define        MCFUART_MR2_AUTOECHO    0x40            /* Automatic echo */
1766 +#define        MCFUART_MR2_TXRTS       0x20            /* Assert RTS on TX */
1767 +#define        MCFUART_MR2_TXCTS       0x10            /* Auto CTS flow control */
1768 +
1769 +#define        MCFUART_MR2_STOP1       0x07            /* 1 stop bit */
1770 +#define        MCFUART_MR2_STOP15      0x08            /* 1.5 stop bits */
1771 +#define        MCFUART_MR2_STOP2       0x0f            /* 2 stop bits */
1772 +
1773 +/*
1774 + *     Define bit flags in Status Register (USR).
1775 + */
1776 +#define        MCFUART_USR_RXBREAK     0x80            /* Received BREAK */
1777 +#define        MCFUART_USR_RXFRAMING   0x40            /* Received framing error */
1778 +#define        MCFUART_USR_RXPARITY    0x20            /* Received parity error */
1779 +#define        MCFUART_USR_RXOVERRUN   0x10            /* Received overrun error */
1780 +#define        MCFUART_USR_TXEMPTY     0x08            /* Transmitter empty */
1781 +#define        MCFUART_USR_TXREADY     0x04            /* Transmitter ready */
1782 +#define        MCFUART_USR_RXFULL      0x02            /* Receiver full */
1783 +#define        MCFUART_USR_RXREADY     0x01            /* Receiver ready */
1784 +
1785 +#define        MCFUART_USR_RXERR       (MCFUART_USR_RXBREAK | MCFUART_USR_RXFRAMING | \
1786 +                               MCFUART_USR_RXPARITY | MCFUART_USR_RXOVERRUN)
1787 +
1788 +/*
1789 + *     Define bit flags in Clock Select Register (UCSR).
1790 + */
1791 +#define        MCFUART_UCSR_RXCLKTIMER 0xd0            /* RX clock is timer */
1792 +#define        MCFUART_UCSR_RXCLKEXT16 0xe0            /* RX clock is external x16 */
1793 +#define        MCFUART_UCSR_RXCLKEXT1  0xf0            /* RX clock is external x1 */
1794 +
1795 +#define        MCFUART_UCSR_TXCLKTIMER 0x0d            /* TX clock is timer */
1796 +#define        MCFUART_UCSR_TXCLKEXT16 0x0e            /* TX clock is external x16 */
1797 +#define        MCFUART_UCSR_TXCLKEXT1  0x0f            /* TX clock is external x1 */
1798 +
1799 +/*
1800 + *     Define bit flags in Command Register (UCR).
1801 + */
1802 +#define        MCFUART_UCR_CMDNULL             0x00    /* No command */
1803 +#define        MCFUART_UCR_CMDRESETMRPTR       0x10    /* Reset MR pointer */
1804 +#define        MCFUART_UCR_CMDRESETRX          0x20    /* Reset receiver */
1805 +#define        MCFUART_UCR_CMDRESETTX          0x30    /* Reset transmitter */
1806 +#define        MCFUART_UCR_CMDRESETERR         0x40    /* Reset error status */
1807 +#define        MCFUART_UCR_CMDRESETBREAK       0x50    /* Reset BREAK change */
1808 +#define        MCFUART_UCR_CMDBREAKSTART       0x60    /* Start BREAK */
1809 +#define        MCFUART_UCR_CMDBREAKSTOP        0x70    /* Stop BREAK */
1810 +
1811 +#define        MCFUART_UCR_TXNULL      0x00            /* No TX command */
1812 +#define        MCFUART_UCR_TXENABLE    0x04            /* Enable TX */
1813 +#define        MCFUART_UCR_TXDISABLE   0x08            /* Disable TX */
1814 +#define        MCFUART_UCR_RXNULL      0x00            /* No RX command */
1815 +#define        MCFUART_UCR_RXENABLE    0x01            /* Enable RX */
1816 +#define        MCFUART_UCR_RXDISABLE   0x02            /* Disable RX */
1817 +
1818 +/*
1819 + *     Define bit flags in Input Port Change Register (UIPCR).
1820 + */
1821 +#define        MCFUART_UIPCR_CTSCOS    0x10            /* CTS change of state */
1822 +#define        MCFUART_UIPCR_CTS       0x01            /* CTS value */
1823 +
1824 +/*
1825 + *     Define bit flags in Input Port Register (UIP).
1826 + */
1827 +#define        MCFUART_UIPR_CTS        0x01            /* CTS value */
1828 +
1829 +/*
1830 + *     Define bit flags in Output Port Registers (UOP).
1831 + *     Clear bit by writing to UOP0, set by writing to UOP1.
1832 + */
1833 +#define        MCFUART_UOP_RTS         0x01            /* RTS set or clear */
1834 +
1835 +/*
1836 + *     Define bit flags in the Auxiliary Control Register (UACR).
1837 + */
1838 +#define        MCFUART_UACR_IEC        0x01            /* Input enable control */
1839 +
1840 +/*
1841 + *     Define bit flags in Interrupt Status Register (UISR).
1842 + *     These same bits are used for the Interrupt Mask Register (UIMR).
1843 + */
1844 +#define        MCFUART_UIR_COS         0x80            /* Change of state (CTS) */
1845 +#define        MCFUART_UIR_DELTABREAK  0x04            /* Break start or stop */
1846 +#define        MCFUART_UIR_RXREADY     0x02            /* Receiver ready */
1847 +#define        MCFUART_UIR_TXREADY     0x01            /* Transmitter ready */
1848 +
1849 +#ifdef CONFIG_M5272
1850 +/*
1851 + *     Define bit flags in the Transmitter FIFO Register (UTF).
1852 + */
1853 +#define        MCFUART_UTF_TXB         0x1f            /* Transmitter data level */
1854 +#define        MCFUART_UTF_FULL        0x20            /* Transmitter fifo full */
1855 +#define        MCFUART_UTF_TXS         0xc0            /* Transmitter status */
1856 +
1857 +/*
1858 + *     Define bit flags in the Receiver FIFO Register (URF).
1859 + */
1860 +#define        MCFUART_URF_RXB         0x1f            /* Receiver data level */
1861 +#define        MCFUART_URF_FULL        0x20            /* Receiver fifo full */
1862 +#define        MCFUART_URF_RXS         0xc0            /* Receiver status */
1863 +#endif
1864 +
1865 +#endif /* mcfuart_h */