strip the kernel version suffix from target directories, except for brcm-2.4 (the...
[openwrt.git] / target / linux / ixp4xx / patches / 100-npe_driver.patch
1 diff --git a/Documentation/networking/ixp4xx/IxNpeMicrocode.h b/Documentation/networking/ixp4xx/IxNpeMicrocode.h
2 new file mode 100644
3 index 0000000..e5a4bd3
4 Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/IxNpeMicrocode.h
5 ===================================================================
6 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
7 +++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/IxNpeMicrocode.h       2007-02-21 02:24:35.000000000 -0800
8 @@ -0,0 +1,143 @@
9 +/*
10 + * IxNpeMicrocode.h - Headerfile for compiling the Intel microcode C file
11 + *
12 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
13 + *
14 + * This file is released under the GPLv2
15 + *
16 + *
17 + * compile with
18 + *
19 + * gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode
20 + *
21 + * Executing the resulting binary on your build-host creates the
22 + * "NPE-[ABC].xxxxxxxx" files containing the selected microcode
23 + *
24 + * fetch the IxNpeMicrocode.c from the Intel Access Library.
25 + * It will include this header.
26 + *
27 + * select Images for every NPE from the following
28 + * (used C++ comments for easy uncommenting ....)
29 + */
30 +
31 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
32 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
33 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
34 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_TSLOT_SWITCH
35 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
36 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
37 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL
38 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_2_PORT
39 +// #define IX_NPEDL_NPEIMAGE_NPEA_DMA
40 +// #define IX_NPEDL_NPEIMAGE_NPEA_ATM_MPHY_12_PORT
41 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_MPHY_1_PORT
42 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_SPHY_1_PORT
43 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0
44 +// #define IX_NPEDL_NPEIMAGE_NPEA_WEP
45 +
46 +
47 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
48 +//#define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
49 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
50 +// #define IX_NPEDL_NPEIMAGE_NPEB_DMA
51 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
52 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
53 + #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL
54 +
55 +
56 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
57 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
58 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
59 +// #define IX_NPEDL_NPEIMAGE_NPEC_DMA
60 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_SPAN
61 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_FIREWALL
62 + #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_CCM_ETH
63 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_ETH_LEARN_FILTER_SPAN_FIREWALL
64 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
65 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
66 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL
67 +
68 +
69 +#include <stdio.h>
70 +#include <unistd.h>
71 +#include <stdlib.h>
72 +#include <netinet/in.h>
73 +#include <sys/types.h>
74 +#include <sys/stat.h>
75 +#include <fcntl.h>
76 +#include <errno.h>
77 +#include <endian.h>
78 +#include <byteswap.h>
79 +#include <string.h>
80 +
81 +#if __BYTE_ORDER == __LITTLE_ENDIAN
82 +#define to_le32(x) (x)
83 +#define to_be32(x) bswap_32(x)
84 +#else
85 +#define to_be32(x) (x)
86 +#define to_le32(x) bswap_32(x)
87 +#endif
88 +
89 +struct dl_image {
90 +       unsigned magic;
91 +       unsigned id;
92 +       unsigned size;
93 +       unsigned data[0];
94 +};
95 +
96 +const unsigned IxNpeMicrocode_array[];
97 +
98 +int main(int argc, char *argv[])
99 +{
100 +       struct dl_image *image = (struct dl_image *)IxNpeMicrocode_array;
101 +       int imgsiz, i, fd, cnt;
102 +       const unsigned *arrayptr = IxNpeMicrocode_array;
103 +       const char *names[] = { "IXP425", "IXP465", "unknown" };
104 +       int bigendian = 1;
105 +
106 +       if (argc > 1) {
107 +               if (!strcmp(argv[1], "-le"))
108 +                       bigendian = 0;
109 +               else if (!strcmp(argv[1], "-be"))
110 +                       bigendian = 1;
111 +               else {
112 +                       printf("Usage: %s <-le|-be>\n", argv[0]);
113 +                       return EXIT_FAILURE;
114 +               }
115 +       }
116 +
117 +       for (image = (struct dl_image *)arrayptr, cnt=0;
118 +               (image->id != 0xfeedf00d) && (image->magic == 0xfeedf00d);
119 +               image = (struct dl_image *)(arrayptr), cnt++)
120 +       {
121 +               unsigned char field[4];
122 +               imgsiz = image->size + 3;
123 +               *(unsigned*)field = to_be32(image->id);
124 +               char filename[40], slnk[10];
125 +
126 +               sprintf(filename, "NPE-%c.%08x", (field[0] & 0xf) + 'A',
127 +                       image->id);
128 +               sprintf(slnk, "NPE-%c", (field[0] & 0xf) + 'A');
129 +               printf("Writing image: %s.NPE_%c Func: %2x Rev: %02x.%02x "
130 +                       "Size: %5d to: '%s'\n",
131 +                       names[field[0] >> 4], (field[0] & 0xf) + 'A',
132 +                       field[1], field[2], field[3], imgsiz*4, filename);
133 +               fd = open(filename, O_CREAT | O_RDWR | O_TRUNC, 0644);
134 +               if (fd >= 0) {
135 +                       for (i=0; i<imgsiz; i++) {
136 +                               *(unsigned*)field = bigendian ?
137 +                                       to_be32(arrayptr[i]) :
138 +                                       to_le32(arrayptr[i]);
139 +                               write(fd, field, sizeof(field));
140 +                       }
141 +                       close(fd);
142 +                       unlink(slnk);
143 +                       symlink(filename, slnk);
144 +               } else {
145 +                       perror(filename);
146 +               }
147 +               arrayptr += imgsiz;
148 +       }
149 +       close(fd);
150 +       return 0;
151 +}
152 Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/README
153 ===================================================================
154 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
155 +++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/README 2007-02-21 02:24:35.000000000 -0800
156 @@ -0,0 +1,62 @@
157 +Informations about the Networking Driver using the IXP4XX CPU internal NPEs
158 +and Queue manager.
159 +
160 +If this driver is used, the IAL (Intel Access Library) must not be loaded.
161 +However, the IAL may be loaded, if this Modules are unloaded:
162 +       ixp4xx_npe.ko, ixp4xx_qmgr.ko ixp4xx_mac.ko
163 +
164 +This also means that HW crypto accelleration does NOT work when using this
165 +driver, unless I have finished my crypto driver for NPE-C
166 +
167 +
168 +Adoption to your custom board:
169 +------------------------------
170 +use "arch/arm/mach-ixp4xx/ixdp425-setup.c" as template:
171 +
172 +in "static struct mac_plat_info" adopt the entry "phy_id" to your needs
173 +(Ask your hardware designer about the PHY id)
174 +
175 +The order of "&mac0" and "&mac1" in the "struct platform_device"
176 +determines which of them becomes eth0 and eth1
177 +
178 +
179 +The Microcode:
180 +---------------
181 +Solution 1)
182 + Configure "CONFIG_HOTPLUG" and "CONFIG_FW_LOADER" and configure
183 + IXP4XX_NPE as module.
184 + The default hotplug script will load the Firmware from
185 + /usr/lib/hotplug/firmware/NPE-[ABC]
186 + see Documentation/firmware_class/hotplug-script
187 +
188 + You should take care, that $ACTION is "add" and $SUBSYSTEM is "firmware"
189 + to avoid unnessecary calls:
190 + test $ACTION = "remove" -o $SUBSYSTEM != "firmware" && exit
191 +
192 +Solution 2)
193 + create a char-dev: "mknod /dev/misc/npe c 10 184"
194 + cat the Microcode into it:
195 + cat /usr/lib/hotplug/firmware/NPE-* > /dev/misc/npe
196 + This also works if the driver is linked to the kernel
197 +
198 + Having a mix of both (e.g. solution 1 for NPE-B and solution 2 for NPE-C)
199 + is perfectly ok and works.
200 +
201 + The state of the NPEs can be seen and changed at:
202 + /sys/bus/platform/devices/ixp4xx_npe.X/state
203 +
204 +
205 +Obtaining the Microcode:
206 +------------------------
207 +1) IxNpeMicrocode.h in this directory:
208 +   Download IPL_IXP400NPELIBRARYWITHCRYPTO-2_1.ZIP from Intel
209 +   It unpacks the Microcode IxNpeMicrocode.c
210 +   Read the Licence !
211 +   Compile it with "gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode" on your host.
212 +   The resulting images can be moved to "/usr/lib/hotplug/firmware"
213 +
214 +2) mc_grab.c in this directory:
215 +   Compile and execute it either on the host or on the target
216 +   to grab the microcode from a binary image like the RedBoot bootloader.
217 +
218 +
219 Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/mc_grab.c
220 ===================================================================
221 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
222 +++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/mc_grab.c      2007-02-21 02:24:35.000000000 -0800
223 @@ -0,0 +1,97 @@
224 +/*
225 + * mc_grab.c  - grabs IXP4XX microcode from a binary datastream
226 + * e.g. The redboot bootloader....
227 + *
228 + * usage: mc_grab 1010200 2010200 < /dev/mtd/0 > /dev/misc/npe
229 + *
230 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
231 + *
232 + * This file is released under the GPLv2
233 + */
234 +
235 +
236 +#include <stdlib.h>
237 +#include <stdio.h>
238 +#include <unistd.h>
239 +#include <netinet/in.h>
240 +#include <sys/types.h>
241 +#include <sys/stat.h>
242 +#include <fcntl.h>
243 +#include <errno.h>
244 +#include <string.h>
245 +
246 +#define MAX_IMG 6
247 +
248 +static void print_mc_info(unsigned id, int siz)
249 +{
250 +       unsigned char buf[sizeof(unsigned)];
251 +       *(unsigned*)buf = id;
252 +       unsigned idx;
253 +       const char *names[] = { "IXP425", "IXP465", "unknown" };
254 +
255 +       idx = (buf[0] >> 4) < 2 ? (buf[0] >> 4) : 2;
256 +
257 +       fprintf(stderr, "Device: %s:NPE_%c Func: %2x Rev: %02x.%02x "
258 +               "Size: %5d bytes ID:%08x\n", names[idx], (buf[0] & 0xf)+'A',
259 +               buf[1], buf[2], buf[3], siz*4, ntohl(id));
260 +}
261 +
262 +int main(int argc, char *argv[])
263 +{
264 +       int i,j;
265 +       unsigned char buf[sizeof(unsigned)];
266 +       unsigned magic = htonl(0xfeedf00d);
267 +       unsigned id, my_ids[MAX_IMG+1], siz, sizbe;
268 +       int ret=1, verbose=0;
269 +
270 +       for (i=0, j=0; i<argc-1 && j<MAX_IMG; i++) {
271 +               if (!strcmp(argv[i+1], "-v"))
272 +                       verbose = 1;
273 +               else
274 +                       my_ids[j++] = htonl(strtoul(argv[i+1], NULL, 16));
275 +       }
276 +       my_ids[j] = 0;
277 +       if (my_ids[0] == 0 && !verbose) {
278 +               fprintf(stderr, "Usage: %s <-v> [ID1] [ID2] [IDn]\n", argv[0]);
279 +               return 1;
280 +       }
281 +
282 +       while ((ret=read(0, buf, sizeof(unsigned))) == sizeof(unsigned)) {
283 +               if (*(unsigned*)buf != magic)
284 +                       continue;
285 +               if ((ret=read(0, buf, sizeof(unsigned))) != sizeof(unsigned) )
286 +                       break;
287 +               id = *(unsigned*)buf;
288 +
289 +               if (read(0, buf, sizeof(siz)) != sizeof(siz) )
290 +                       break;
291 +               sizbe = *(unsigned*)buf;
292 +               siz = ntohl(sizbe);
293 +
294 +               if (verbose)
295 +                       print_mc_info(id, siz);
296 +
297 +               for(i=0; my_ids[i]; i++)
298 +                       if (id == my_ids[i])
299 +                               break;
300 +               if (!my_ids[i])
301 +                       continue;
302 +
303 +               if (!verbose)
304 +                       print_mc_info(id, siz);
305 +
306 +               write(1, &magic, sizeof(magic));
307 +               write(1, &id, sizeof(id));
308 +               write(1, &sizbe, sizeof(sizbe));
309 +               for (i=0; i<siz; i++) {
310 +                       if (read(0, buf, sizeof(unsigned)) != sizeof(unsigned))
311 +                               break;
312 +                       write(1, buf, sizeof(unsigned));
313 +               }
314 +               if (i != siz)
315 +                       break;
316 +       }
317 +       if (ret)
318 +               fprintf(stderr, "Error reading  Microcode\n");
319 +       return ret;
320 +}
321 Index: linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/common.c
322 ===================================================================
323 --- linux-2.6.21-rc1-arm.orig/arch/arm/mach-ixp4xx/common.c     2007-02-21 02:24:18.000000000 -0800
324 +++ linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/common.c  2007-02-21 02:24:35.000000000 -0800
325 @@ -357,6 +357,90 @@
326         &ixp46x_i2c_controller
327  };
328  
329 +static struct npe_plat_data npea = {
330 +       .name           = "NPE-A",
331 +       .data_size      = 0x800,
332 +       .inst_size      = 0x1000,
333 +       .id             = 0,
334 +};
335 +
336 +static struct npe_plat_data npeb = {
337 +       .name           = "NPE-B",
338 +       .data_size      = 0x800,
339 +       .inst_size      = 0x800,
340 +       .id             = 1,
341 +};
342 +
343 +static struct npe_plat_data npec = {
344 +       .name           = "NPE-C",
345 +       .data_size      = 0x800,
346 +       .inst_size      = 0x800,
347 +       .id             = 2,
348 +};
349 +
350 +static struct resource res_npea = {
351 +       .start          = IXP4XX_NPEA_BASE_PHYS,
352 +       .end            = IXP4XX_NPEA_BASE_PHYS + 0xfff,
353 +       .flags          = IORESOURCE_MEM,
354 +};
355 +
356 +static struct resource res_npeb = {
357 +       .start          = IXP4XX_NPEB_BASE_PHYS,
358 +       .end            = IXP4XX_NPEB_BASE_PHYS + 0xfff,
359 +       .flags          = IORESOURCE_MEM,
360 +};
361 +
362 +static struct resource res_npec = {
363 +       .start          = IXP4XX_NPEC_BASE_PHYS,
364 +       .end            = IXP4XX_NPEC_BASE_PHYS + 0xfff,
365 +       .flags          = IORESOURCE_MEM,
366 +};
367 +
368 +static struct platform_device dev_npea = {
369 +       .name           = "ixp4xx_npe",
370 +       .id             = 0,
371 +       .dev.platform_data = &npea,
372 +       .num_resources  = 1,
373 +       .resource       = &res_npea,
374 +};
375 +
376 +static struct platform_device dev_npeb = {
377 +       .name           = "ixp4xx_npe",
378 +       .id             = 1,
379 +       .dev.platform_data = &npeb,
380 +       .num_resources  = 1,
381 +       .resource       = &res_npeb,
382 +};
383 +
384 +static struct platform_device dev_npec = {
385 +       .name           = "ixp4xx_npe",
386 +       .id             = 2,
387 +       .dev.platform_data = &npec,
388 +       .num_resources  = 1,
389 +       .resource       = &res_npec,
390 +};
391 +
392 +/* QMGR */
393 +static struct resource res_qmgr[] = {
394 +{
395 +       .start          = IXP4XX_QMGR_BASE_PHYS,
396 +       .end            = IXP4XX_QMGR_BASE_PHYS + IXP4XX_QMGR_REGION_SIZE -1,
397 +       .flags          = IORESOURCE_MEM,
398 +}, {
399 +       .start          = IRQ_IXP4XX_QM1,
400 +       .flags          = IORESOURCE_IRQ,
401 +} };
402 +
403 +static struct platform_device qmgr = {
404 +       .name           = "ixp4xx_qmgr",
405 +       .id             = 0,
406 +       .dev            = {
407 +               .coherent_dma_mask      = DMA_32BIT_MASK,
408 +       },
409 +       .num_resources  = ARRAY_SIZE(res_qmgr),
410 +       .resource       = res_qmgr,
411 +};
412 +
413  unsigned long ixp4xx_exp_bus_size;
414  EXPORT_SYMBOL(ixp4xx_exp_bus_size);
415  
416 @@ -378,8 +462,19 @@
417                                 break;
418                         }
419                 }
420 +               npeb.inst_size = 0x1000;
421 +               npec.inst_size = 0x1000;
422         }
423  
424 +       platform_device_register(&qmgr);
425 +
426 +       if (ix_fuse() & IX_FUSE_NPEA)
427 +               platform_device_register(&dev_npea);
428 +       if (ix_fuse() & IX_FUSE_NPEB)
429 +               platform_device_register(&dev_npeb);
430 +       if (ix_fuse() & IX_FUSE_NPEC)
431 +               platform_device_register(&dev_npec);
432 +
433         printk("IXP4xx: Using %luMiB expansion bus window size\n",
434                         ixp4xx_exp_bus_size >> 20);
435  }
436 Index: linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/ixdp425-setup.c
437 ===================================================================
438 --- linux-2.6.21-rc1-arm.orig/arch/arm/mach-ixp4xx/ixdp425-setup.c      2007-02-21 02:24:18.000000000 -0800
439 +++ linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/ixdp425-setup.c   2007-02-21 02:24:35.000000000 -0800
440 @@ -101,10 +101,59 @@
441         .resource               = ixdp425_uart_resources
442  };
443  
444 +/* MACs */
445 +static struct resource res_mac0 = {
446 +       .start          = IXP4XX_EthB_BASE_PHYS,
447 +       .end            = IXP4XX_EthB_BASE_PHYS + 0x1ff,
448 +       .flags          = IORESOURCE_MEM,
449 +};
450 +
451 +static struct resource res_mac1 = {
452 +       .start          = IXP4XX_EthC_BASE_PHYS,
453 +       .end            = IXP4XX_EthC_BASE_PHYS + 0x1ff,
454 +       .flags          = IORESOURCE_MEM,
455 +};
456 +
457 +static struct mac_plat_info plat_mac0 = {
458 +       .npe_id         = 1,
459 +       .phy_id         = 0,
460 +       .eth_id         = 0,
461 +       .rxq_id         = 27,
462 +       .txq_id         = 24,
463 +       .rxdoneq_id     = 4,
464 +};
465 +
466 +static struct mac_plat_info plat_mac1 = {
467 +       .npe_id         = 2,
468 +       .phy_id         = 1,
469 +       .eth_id         = 1,
470 +       .rxq_id         = 28,
471 +       .txq_id         = 25,
472 +       .rxdoneq_id     = 5,
473 +};
474 +
475 +static struct platform_device mac0 = {
476 +       .name           = "ixp4xx_mac",
477 +       .id             = 0,
478 +       .dev.platform_data = &plat_mac0,
479 +       .num_resources  = 1,
480 +       .resource       = &res_mac0,
481 +};
482 +
483 +static struct platform_device mac1 = {
484 +       .name           = "ixp4xx_mac",
485 +       .id             = 1,
486 +       .dev.platform_data = &plat_mac1,
487 +       .num_resources  = 1,
488 +       .resource       = &res_mac1,
489 +};
490 +
491  static struct platform_device *ixdp425_devices[] __initdata = {
492         &ixdp425_i2c_controller,
493         &ixdp425_flash,
494 -       &ixdp425_uart
495 +       &ixdp425_uart,
496 +       &mac0,
497 +       &mac1,
498  };
499  
500  static void __init ixdp425_init(void)
501 Index: linux-2.6.21-rc1-arm/drivers/net/Kconfig
502 ===================================================================
503 --- linux-2.6.21-rc1-arm.orig/drivers/net/Kconfig       2007-02-21 02:24:18.000000000 -0800
504 +++ linux-2.6.21-rc1-arm/drivers/net/Kconfig    2007-02-21 02:24:35.000000000 -0800
505 @@ -201,6 +201,8 @@
506  
507  source "drivers/net/arm/Kconfig"
508  
509 +source "drivers/net/ixp4xx/Kconfig"
510 +
511  config MACE
512         tristate "MACE (Power Mac ethernet) support"
513         depends on NET_ETHERNET && PPC_PMAC && PPC32
514 Index: linux-2.6.21-rc1-arm/drivers/net/Makefile
515 ===================================================================
516 --- linux-2.6.21-rc1-arm.orig/drivers/net/Makefile      2007-02-21 02:24:18.000000000 -0800
517 +++ linux-2.6.21-rc1-arm/drivers/net/Makefile   2007-02-21 02:24:35.000000000 -0800
518 @@ -212,6 +212,7 @@
519  obj-$(CONFIG_IRDA) += irda/
520  obj-$(CONFIG_ETRAX_ETHERNET) += cris/
521  obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
522 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx/
523  
524  obj-$(CONFIG_NETCONSOLE) += netconsole.o
525  
526 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Kconfig
527 ===================================================================
528 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
529 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Kconfig     2007-02-21 02:24:35.000000000 -0800
530 @@ -0,0 +1,48 @@
531 +config IXP4XX_QMGR
532 +       tristate "IXP4xx Queue Manager support"
533 +       depends on ARCH_IXP4XX
534 +       depends on NET_ETHERNET
535 +       help
536 +         The IXP4XX Queue manager is a configurable hardware ringbuffer.
537 +         It is used by the NPEs to exchange data from and to the CPU.
538 +         You can either use this OR the Intel Access Library (IAL)
539 +
540 +config IXP4XX_NPE
541 +       tristate "IXP4xx NPE support"
542 +       depends on ARCH_IXP4XX
543 +       depends on NET_ETHERNET
544 +       help
545 +         The IXP4XX NPE driver supports the 3 CPU co-processors called
546 +         "Network Processing Engines" (NPE). It adds support fo downloading
547 +         the Microcode (firmware) via Hotplug or  character-special-device.
548 +         More about this at: Documentation/networking/ixp4xx/README.
549 +         You can either use this OR the Intel Access Library (IAL)
550 +
551 +config IXP4XX_FW_LOAD
552 +       bool "Use Firmware hotplug for Microcode download"
553 +       depends on IXP4XX_NPE
554 +       select HOTPLUG
555 +       select FW_LOADER
556 +       help
557 +         The default hotplug script will load the Firmware from
558 +         /usr/lib/hotplug/firmware/NPE-[ABC]
559 +         see Documentation/firmware_class/hotplug-script
560 +
561 +config IXP4XX_MAC
562 +       tristate "IXP4xx MAC support"
563 +       depends on IXP4XX_NPE
564 +       depends on IXP4XX_QMGR
565 +       depends on NET_ETHERNET
566 +       select MII
567 +       help
568 +         The IXP4XX MAC driver supports the MACs on the IXP4XX CPUs.
569 +         There are 2 on ixp425 and up to 5 on ixdp465.
570 +         You can either use this OR the Intel Access Library (IAL)
571 +
572 +config IXP4XX_CRYPTO
573 +       tristate "IXP4xx crypto support"
574 +       depends on IXP4XX_NPE
575 +       depends on IXP4XX_QMGR
576 +       help
577 +         This driver is a generic NPE-crypto access layer.
578 +         You need additional code in OCF for example.
579 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Makefile
580 ===================================================================
581 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
582 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Makefile    2007-02-21 02:24:35.000000000 -0800
583 @@ -0,0 +1,7 @@
584 +obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o
585 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o
586 +obj-$(CONFIG_IXP4XX_MAC) += ixp4xx_mac.o
587 +obj-$(CONFIG_IXP4XX_CRYPTO) += ixp4xx_crypto.o
588 +
589 +ixp4xx_npe-objs := ucode_dl.o npe_mh.o npe.o
590 +ixp4xx_mac-objs := mac_driver.o phy.o
591 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_crypto.c
592 ===================================================================
593 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
594 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_crypto.c     2007-02-21 02:24:35.000000000 -0800
595 @@ -0,0 +1,851 @@
596 +/*
597 + * ixp4xx_crypto.c - interface to the HW crypto
598 + *
599 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
600 + *
601 + * This file is released under the GPLv2
602 + */
603 +
604 +#include <linux/ixp_qmgr.h>
605 +#include <linux/ixp_npe.h>
606 +#include <linux/dma-mapping.h>
607 +#include <linux/dmapool.h>
608 +#include <linux/device.h>
609 +#include <linux/delay.h>
610 +#include <linux/slab.h>
611 +#include <linux/kernel.h>
612 +#include <linux/ixp_crypto.h>
613 +
614 +#define SEND_QID 29
615 +#define RECV_QID 30
616 +
617 +#define NPE_ID   2 /* NPE C */
618 +
619 +#define QUEUE_SIZE 64
620 +#define MY_VERSION "0.0.1"
621 +
622 +/* local head for all sa_ctx */
623 +static struct ix_sa_master sa_master;
624 +
625 +static const struct ix_hash_algo _hash_algos[] = {
626 +{
627 +       .name           = "MD5",
628 +       .cfgword        = 0xAA010004,
629 +       .digest_len     = 16,
630 +       .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
631 +                       "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
632 +       .type           = HASH_TYPE_MD5,
633 +},{
634 +       .name           = "SHA1",
635 +       .cfgword        = 0x00000005,
636 +       .digest_len     = 20,
637 +       .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
638 +                       "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
639 +       .type           = HASH_TYPE_SHA1,
640 +#if 0
641 +},{
642 +       .name           = "CBC MAC",
643 +       .digest_len     = 64,
644 +       .aad_len        = 48,
645 +       .type           = HASH_TYPE_CBCMAC,
646 +#endif
647 +} };
648 +
649 +static const struct ix_cipher_algo _cipher_algos[] = {
650 +{
651 +       .name           = "DES ECB",
652 +       .cfgword_enc    = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
653 +       .cfgword_dec    = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
654 +       .block_len      = 8,
655 +       .type           = CIPHER_TYPE_DES,
656 +       .mode           = CIPHER_MODE_ECB,
657 +},{
658 +       .name           = "DES CBC",
659 +       .cfgword_enc    = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
660 +       .cfgword_dec    = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
661 +       .iv_len         = 8,
662 +       .block_len      = 8,
663 +       .type           = CIPHER_TYPE_DES,
664 +       .mode           = CIPHER_MODE_CBC,
665 +},{
666 +       .name           = "3DES ECB",
667 +       .cfgword_enc    = CIPH_ENCR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
668 +       .cfgword_dec    = CIPH_DECR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
669 +       .block_len      = 8,
670 +       .type           = CIPHER_TYPE_3DES,
671 +       .mode           = CIPHER_MODE_ECB,
672 +},{
673 +       .name           = "3DES CBC",
674 +       .cfgword_enc    = CIPH_ENCR | MOD_TDEA3 | MOD_CBC_ENC | KEYLEN_192,
675 +       .cfgword_dec    = CIPH_DECR | MOD_TDEA3 | MOD_CBC_DEC | KEYLEN_192,
676 +       .iv_len         = 8,
677 +       .block_len      = 8,
678 +       .type           = CIPHER_TYPE_3DES,
679 +       .mode           = CIPHER_MODE_CBC,
680 +},{
681 +       .name           = "AES ECB",
682 +       .cfgword_enc    = CIPH_ENCR | ALGO_AES | MOD_ECB,
683 +       .cfgword_dec    = CIPH_DECR | ALGO_AES | MOD_ECB,
684 +       .block_len      = 16,
685 +       .type           = CIPHER_TYPE_AES,
686 +       .mode           = CIPHER_MODE_ECB,
687 +},{
688 +       .name           = "AES CBC",
689 +       .cfgword_enc    = CIPH_ENCR | ALGO_AES | MOD_CBC_ENC,
690 +       .cfgword_dec    = CIPH_DECR | ALGO_AES | MOD_CBC_DEC,
691 +       .block_len      = 16,
692 +       .iv_len         = 16,
693 +       .type           = CIPHER_TYPE_AES,
694 +       .mode           = CIPHER_MODE_CBC,
695 +},{
696 +       .name           = "AES CTR",
697 +       .cfgword_enc    = CIPH_ENCR | ALGO_AES | MOD_CTR,
698 +       .cfgword_dec    = CIPH_ENCR | ALGO_AES | MOD_CTR,
699 +       .block_len      = 16,
700 +       .iv_len         = 16,
701 +       .type           = CIPHER_TYPE_AES,
702 +       .mode           = CIPHER_MODE_CTR,
703 +#if 0
704 +},{
705 +       .name           = "AES CCM",
706 +       .cfgword_enc    = CIPH_ENCR | ALGO_AES | MOD_CCM_ENC,
707 +       .cfgword_dec    = CIPH_ENCR | ALGO_AES | MOD_CCM_DEC,
708 +       .block_len      = 16,
709 +       .iv_len         = 16,
710 +       .type           = CIPHER_TYPE_AES,
711 +       .mode           = CIPHER_MODE_CCM,
712 +#endif
713 +} };
714 +
715 +const struct ix_hash_algo *ix_hash_by_id(int type)
716 +{
717 +       int i;
718 +
719 +       for(i=0; i<ARRAY_SIZE(_hash_algos); i++) {
720 +               if (_hash_algos[i].type == type)
721 +                       return _hash_algos + i;
722 +       }
723 +       return NULL;
724 +}
725 +
726 +const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode)
727 +{
728 +       int i;
729 +
730 +       for(i=0; i<ARRAY_SIZE(_cipher_algos); i++) {
731 +               if (_cipher_algos[i].type==type && _cipher_algos[i].mode==mode)
732 +                       return _cipher_algos + i;
733 +       }
734 +       return NULL;
735 +}
736 +
737 +static void irqcb_recv(struct qm_queue *queue);
738 +
739 +static int init_sa_master(struct ix_sa_master *master)
740 +{
741 +       struct npe_info *npe;
742 +       int ret = -ENODEV;
743 +
744 +       if (! (ix_fuse() & (IX_FUSE_HASH | IX_FUSE_AES | IX_FUSE_DES))) {
745 +               printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
746 +               return ret;
747 +       }
748 +       memset(master, 0, sizeof(struct ix_sa_master));
749 +       master->npe_dev = get_npe_by_id(NPE_ID);
750 +       if (! master->npe_dev)
751 +               goto err;
752 +
753 +       npe = dev_get_drvdata(master->npe_dev);
754 +
755 +       if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
756 +               switch (npe->img_info[1]) {
757 +               case 4:
758 +                       printk(KERN_INFO "Crypto AES avaialable\n");
759 +                       break;
760 +               case 5:
761 +                       printk(KERN_INFO "Crypto AES and CCM avaialable\n");
762 +                       break;
763 +               default:
764 +                       printk(KERN_WARNING "Current microcode for %s has no"
765 +                               " crypto capabilities\n", npe->plat->name);
766 +                       break;
767 +               }
768 +       }
769 +       rwlock_init(&master->lock);
770 +       master->dmapool = dma_pool_create("ixp4xx_crypto", master->npe_dev,
771 +                       sizeof(struct npe_crypt_cont), 32, 0);
772 +       if (!master->dmapool) {
773 +               ret = -ENOMEM;
774 +               goto err;
775 +       }
776 +       master->sendq = request_queue(SEND_QID, QUEUE_SIZE);
777 +       if (IS_ERR(master->sendq)) {
778 +               printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
779 +                               SEND_QID);
780 +               ret = PTR_ERR(master->sendq);
781 +               goto err;
782 +       }
783 +       master->recvq = request_queue(RECV_QID, QUEUE_SIZE);
784 +       if (IS_ERR(master->recvq)) {
785 +               printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
786 +                               RECV_QID);
787 +               ret = PTR_ERR(master->recvq);
788 +               release_queue(master->sendq);
789 +               goto err;
790 +       }
791 +
792 +       master->recvq->irq_cb = irqcb_recv;
793 +       queue_set_watermarks(master->recvq, 0, 0);
794 +       queue_set_irq_src(master->recvq, Q_IRQ_ID_NOT_E);
795 +       queue_enable_irq(master->recvq);
796 +       printk(KERN_INFO "ixp4xx_crypto " MY_VERSION " registered successfully\n");
797 +
798 +       return 0;
799 +err:
800 +       if (master->dmapool)
801 +               dma_pool_destroy(master->dmapool);
802 +       if (! master->npe_dev)
803 +               put_device(master->npe_dev);
804 +       return ret;
805 +
806 +}
807 +
808 +static void release_sa_master(struct ix_sa_master *master)
809 +{
810 +       struct npe_crypt_cont *cont;
811 +       unsigned long flags;
812 +
813 +       write_lock_irqsave(&master->lock, flags);
814 +       while (master->pool) {
815 +               cont = master->pool;
816 +               master->pool = cont->next;
817 +               dma_pool_free(master->dmapool, cont, cont->phys);
818 +               master->pool_size--;
819 +       }
820 +       write_unlock_irqrestore(&master->lock, flags);
821 +       if (master->pool_size) {
822 +               printk(KERN_ERR "ixp4xx_crypto: %d items lost from DMA pool\n",
823 +                               master->pool_size);
824 +       }
825 +
826 +       dma_pool_destroy(master->dmapool);
827 +       release_queue(master->sendq);
828 +       release_queue(master->recvq);
829 +       return_npe_dev(master->npe_dev);
830 +}
831 +
832 +static struct npe_crypt_cont *ix_sa_get_cont(struct ix_sa_master *master)
833 +{
834 +       unsigned long flags;
835 +       struct npe_crypt_cont *cont;
836 +       dma_addr_t handle;
837 +
838 +       write_lock_irqsave(&master->lock, flags);
839 +       if (!master->pool) {
840 +               cont = dma_pool_alloc(master->dmapool, GFP_ATOMIC, &handle);
841 +               if (cont) {
842 +                       master->pool_size++;
843 +                       cont->phys = handle;
844 +                       cont->virt = cont;
845 +               }
846 +       } else {
847 +               cont = master->pool;
848 +               master->pool = cont->next;
849 +       }
850 +       write_unlock_irqrestore(&master->lock, flags);
851 +       return cont;
852 +}
853 +
854 +static void
855 +ix_sa_return_cont(struct ix_sa_master *master,struct npe_crypt_cont *cont)
856 +{
857 +       unsigned long flags;
858 +
859 +       write_lock_irqsave(&master->lock, flags);
860 +       cont->next = master->pool;
861 +       master->pool = cont;
862 +       write_unlock_irqrestore(&master->lock, flags);
863 +}
864 +
865 +static void free_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
866 +{
867 +       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
868 +       dma_pool_free(sa_ctx->master->dmapool, dir->npe_ctx,
869 +                       dir->npe_ctx_phys);
870 +}
871 +
872 +static void ix_sa_ctx_destroy(struct ix_sa_ctx *sa_ctx)
873 +{
874 +       BUG_ON(sa_ctx->state != STATE_UNLOADING);
875 +       free_sa_dir(sa_ctx, &sa_ctx->encrypt);
876 +       free_sa_dir(sa_ctx, &sa_ctx->decrypt);
877 +       kfree(sa_ctx);
878 +       module_put(THIS_MODULE);
879 +}
880 +
881 +static void recv_pack(struct qm_queue *queue, u32 phys)
882 +{
883 +       struct ix_sa_ctx *sa_ctx;
884 +       struct npe_crypt_cont *cr_cont;
885 +       struct npe_cont *cont;
886 +       int failed;
887 +
888 +       failed = phys & 0x1;
889 +       phys &= ~0x3;
890 +
891 +       cr_cont = dma_to_virt(queue->dev, phys);
892 +       cr_cont = cr_cont->virt;
893 +       sa_ctx = cr_cont->ctl.crypt.sa_ctx;
894 +
895 +       phys = npe_to_cpu32(cr_cont->ctl.crypt.src_buf);
896 +       if (phys) {
897 +               cont = dma_to_virt(queue->dev, phys);
898 +               cont = cont->virt;
899 +       } else {
900 +               cont = NULL;
901 +       }
902 +       if (cr_cont->ctl.crypt.oper_type == OP_PERFORM) {
903 +               dma_unmap_single(sa_ctx->master->npe_dev,
904 +                               cont->eth.phys_addr,
905 +                               cont->eth.buf_len,
906 +                               DMA_BIDIRECTIONAL);
907 +               if (sa_ctx->perf_cb)
908 +                       sa_ctx->perf_cb(sa_ctx, cont->data, failed);
909 +               qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
910 +               ix_sa_return_cont(sa_ctx->master, cr_cont);
911 +               if (atomic_dec_and_test(&sa_ctx->use_cnt))
912 +                       ix_sa_ctx_destroy(sa_ctx);
913 +               return;
914 +       }
915 +
916 +       /* We are registering */
917 +       switch (cr_cont->ctl.crypt.mode) {
918 +       case NPE_OP_HASH_GEN_ICV:
919 +               /* 1 out of 2 HMAC preparation operations completed */
920 +               dma_unmap_single(sa_ctx->master->npe_dev,
921 +                               cont->eth.phys_addr,
922 +                               cont->eth.buf_len,
923 +                               DMA_TO_DEVICE);
924 +               kfree(cont->data);
925 +               qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
926 +               break;
927 +       case NPE_OP_ENC_GEN_KEY:
928 +               memcpy(sa_ctx->decrypt.npe_ctx + sizeof(u32),
929 +                       sa_ctx->rev_aes->ctl.rev_aes_key + sizeof(u32),
930 +                       sa_ctx->c_key.len);
931 +               /* REV AES data not needed anymore, free it */
932 +               ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
933 +               sa_ctx->rev_aes = NULL;
934 +               break;
935 +       default:
936 +               printk(KERN_ERR "Unknown crypt-register mode: %x\n",
937 +                               cr_cont->ctl.crypt.mode);
938 +
939 +       }
940 +       if (cr_cont->ctl.crypt.oper_type == OP_REG_DONE) {
941 +               if (sa_ctx->state == STATE_UNREGISTERED)
942 +                       sa_ctx->state = STATE_REGISTERED;
943 +               if (sa_ctx->reg_cb)
944 +                       sa_ctx->reg_cb(sa_ctx, failed);
945 +       }
946 +       ix_sa_return_cont(sa_ctx->master, cr_cont);
947 +       if (atomic_dec_and_test(&sa_ctx->use_cnt))
948 +               ix_sa_ctx_destroy(sa_ctx);
949 +}
950 +
951 +static void irqcb_recv(struct qm_queue *queue)
952 +{
953 +       u32 phys;
954 +
955 +       queue_ack_irq(queue);
956 +       while ((phys = queue_get_entry(queue)))
957 +               recv_pack(queue, phys);
958 +}
959 +
960 +static int init_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
961 +{
962 +       dir->npe_ctx = dma_pool_alloc(sa_ctx->master->dmapool,
963 +                       sa_ctx->gfp_flags, &dir->npe_ctx_phys);
964 +       if (!dir->npe_ctx) {
965 +               return 1;
966 +       }
967 +       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
968 +       return 0;
969 +}
970 +
971 +struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags)
972 +{
973 +       struct ix_sa_ctx *sa_ctx;
974 +       struct ix_sa_master *master = &sa_master;
975 +       struct npe_info *npe = dev_get_drvdata(master->npe_dev);
976 +
977 +       /* first check if Microcode was downloaded into this NPE */
978 +       if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
979 +               printk(KERN_ERR "%s not running\n", npe->plat->name);
980 +               return NULL;
981 +       }
982 +       switch (npe->img_info[1]) {
983 +               case 4:
984 +               case 5:
985 +                       break;
986 +               default:
987 +                       /* No crypto Microcode */
988 +                       return NULL;
989 +       }
990 +       if (!try_module_get(THIS_MODULE)) {
991 +               return NULL;
992 +       }
993 +
994 +       sa_ctx = kzalloc(sizeof(struct ix_sa_ctx) + priv_len, flags);
995 +       if (!sa_ctx) {
996 +               goto err_put;
997 +       }
998 +
999 +       sa_ctx->master = master;
1000 +       sa_ctx->gfp_flags = flags;
1001 +
1002 +       if (init_sa_dir(sa_ctx, &sa_ctx->encrypt))
1003 +               goto err_free;
1004 +       if (init_sa_dir(sa_ctx, &sa_ctx->decrypt)) {
1005 +               free_sa_dir(sa_ctx, &sa_ctx->encrypt);
1006 +               goto err_free;
1007 +       }
1008 +       if (priv_len)
1009 +               sa_ctx->priv = sa_ctx + 1;
1010 +
1011 +       atomic_set(&sa_ctx->use_cnt, 1);
1012 +       return sa_ctx;
1013 +
1014 +err_free:
1015 +        kfree(sa_ctx);
1016 +err_put:
1017 +        module_put(THIS_MODULE);
1018 +        return NULL;
1019 +}
1020 +
1021 +void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx)
1022 +{
1023 +       sa_ctx->state = STATE_UNLOADING;
1024 +       if (atomic_dec_and_test(&sa_ctx->use_cnt))
1025 +               ix_sa_ctx_destroy(sa_ctx);
1026 +       else
1027 +               printk("ix_sa_ctx_free -> delayed: %p %d\n",
1028 +                               sa_ctx, atomic_read(&sa_ctx->use_cnt));
1029 +}
1030 +
1031 +/* http://www.ietf.org/rfc/rfc2104.txt */
1032 +#define HMAC_IPAD_VALUE 0x36
1033 +#define HMAC_OPAD_VALUE 0x5C
1034 +#define PAD_BLOCKLEN 64
1035 +
1036 +static int register_chain_var(struct ix_sa_ctx *sa_ctx,
1037 +       unsigned char *pad, u32 target, int init_len, u32 ctx_addr, int oper)
1038 +{
1039 +       struct npe_crypt_cont *cr_cont;
1040 +       struct npe_cont *cont;
1041 +
1042 +       cr_cont = ix_sa_get_cont(sa_ctx->master);
1043 +       if (!cr_cont)
1044 +               return -ENOMEM;
1045 +
1046 +       cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1047 +       cr_cont->ctl.crypt.auth_offs = 0;
1048 +       cr_cont->ctl.crypt.auth_len =cpu_to_npe16(PAD_BLOCKLEN);
1049 +       cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(ctx_addr);
1050 +
1051 +       cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
1052 +       if (!cont) {
1053 +               ix_sa_return_cont(sa_ctx->master, cr_cont);
1054 +               return -ENOMEM;
1055 +       }
1056 +
1057 +       cont->data = pad;
1058 +       cont->eth.next = 0;
1059 +       cont->eth.buf_len = cpu_to_npe16(PAD_BLOCKLEN);
1060 +       cont->eth.pkt_len = 0;
1061 +
1062 +       cont->eth.phys_addr = cpu_to_npe32(dma_map_single(
1063 +               sa_ctx->master->npe_dev, pad, PAD_BLOCKLEN, DMA_TO_DEVICE));
1064 +
1065 +       cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);
1066 +       cr_cont->ctl.crypt.oper_type = oper;
1067 +
1068 +       cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(target);
1069 +       cr_cont->ctl.crypt.mode = NPE_OP_HASH_GEN_ICV;
1070 +       cr_cont->ctl.crypt.init_len = init_len;
1071 +
1072 +       atomic_inc(&sa_ctx->use_cnt);
1073 +       queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1074 +       if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */
1075 +               atomic_dec(&sa_ctx->use_cnt);
1076 +               qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev),
1077 +                               cont);
1078 +               ix_sa_return_cont(sa_ctx->master, cr_cont);
1079 +               return -ENOMEM;
1080 +       }
1081 +       return 0;
1082 +}
1083 +
1084 +/* Return value
1085 + * 0 if nothing registered,
1086 + * 1 if something registered and
1087 + * < 0 on error
1088 + */
1089 +static int ix_sa_ctx_setup_auth(struct ix_sa_ctx *sa_ctx,
1090 +               const struct ix_hash_algo *algo, int len, int oper, int encrypt)
1091 +{
1092 +       unsigned char *ipad, *opad;
1093 +       u32 itarget, otarget, ctx_addr;
1094 +       unsigned char *cinfo;
1095 +       int init_len, i, ret = 0;
1096 +       struct qm_qmgr *qmgr;
1097 +       struct ix_sa_dir *dir;
1098 +       u32 cfgword;
1099 +
1100 +       dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1101 +       cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1102 +
1103 +       qmgr = dev_get_drvdata(sa_ctx->master->sendq->dev);
1104 +
1105 +       cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1106 +       sa_ctx->h_algo = algo;
1107 +
1108 +       if (!algo) {
1109 +               dir->npe_mode |= NPE_OP_HMAC_DISABLE;
1110 +               return 0;
1111 +       }
1112 +       if (algo->type == HASH_TYPE_CBCMAC) {
1113 +               dir->npe_mode |= NPE_OP_CCM_ENABLE | NPE_OP_HMAC_DISABLE;
1114 +               return 0;
1115 +       }
1116 +       if (sa_ctx->h_key.len > 64 || sa_ctx->h_key.len < algo->digest_len)
1117 +               return -EINVAL;
1118 +       if (len > algo->digest_len || (len % 4))
1119 +               return -EINVAL;
1120 +       if (!len)
1121 +               len = algo->digest_len;
1122 +
1123 +       sa_ctx->digest_len = len;
1124 +
1125 +       /* write cfg word to cryptinfo */
1126 +       cfgword = algo->cfgword | ((len/4) << 8);
1127 +       *(u32*)cinfo = cpu_to_be32(cfgword);
1128 +       cinfo += sizeof(cfgword);
1129 +
1130 +       /* write ICV to cryptinfo */
1131 +       memcpy(cinfo, algo->icv, algo->digest_len);
1132 +       cinfo += algo->digest_len;
1133 +
1134 +       itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
1135 +                               + sizeof(algo->cfgword);
1136 +       otarget = itarget + algo->digest_len;
1137 +
1138 +       opad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA);
1139 +       if (!opad) {
1140 +               return -ENOMEM;
1141 +       }
1142 +       ipad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA);
1143 +       if (!ipad) {
1144 +               kfree(opad);
1145 +               return -ENOMEM;
1146 +       }
1147 +       memcpy(ipad, sa_ctx->h_key.key, sa_ctx->h_key.len);
1148 +       memcpy(opad, sa_ctx->h_key.key, sa_ctx->h_key.len);
1149 +       for (i = 0; i < PAD_BLOCKLEN; i++) {
1150 +               ipad[i] ^= HMAC_IPAD_VALUE;
1151 +               opad[i] ^= HMAC_OPAD_VALUE;
1152 +       }
1153 +       init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
1154 +       ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
1155 +
1156 +       dir->npe_ctx_idx += init_len;
1157 +       dir->npe_mode |= NPE_OP_HASH_ENABLE;
1158 +
1159 +       if (!encrypt)
1160 +               dir->npe_mode |= NPE_OP_HASH_VERIFY;
1161 +
1162 +       /* register first chainvar */
1163 +       ret = register_chain_var(sa_ctx, opad, otarget,
1164 +                       init_len, ctx_addr, OP_REGISTER);
1165 +       if (ret) {
1166 +               kfree(ipad);
1167 +               kfree(opad);
1168 +               return ret;
1169 +       }
1170 +
1171 +       /* register second chainvar */
1172 +       ret = register_chain_var(sa_ctx, ipad, itarget,
1173 +                       init_len, ctx_addr, oper);
1174 +       if (ret) {
1175 +               kfree(ipad);
1176 +               return ret;
1177 +       }
1178 +
1179 +       return 1;
1180 +}
1181 +
1182 +static int gen_rev_aes_key(struct ix_sa_ctx *sa_ctx,
1183 +               u32 keylen_cfg, int cipher_op)
1184 +{
1185 +       unsigned char *cinfo;
1186 +       struct npe_crypt_cont *cr_cont;
1187 +
1188 +       keylen_cfg |= CIPH_ENCR | ALGO_AES | MOD_ECB;
1189 +       sa_ctx->rev_aes = ix_sa_get_cont(sa_ctx->master);
1190 +       if (!sa_ctx->rev_aes)
1191 +               return -ENOMEM;
1192 +
1193 +       cinfo = sa_ctx->rev_aes->ctl.rev_aes_key;
1194 +       *(u32*)cinfo = cpu_to_be32(keylen_cfg);
1195 +       cinfo += sizeof(keylen_cfg);
1196 +
1197 +       memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len);
1198 +
1199 +       cr_cont = ix_sa_get_cont(sa_ctx->master);
1200 +       if (!cr_cont) {
1201 +               ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
1202 +               sa_ctx->rev_aes = NULL;
1203 +               return -ENOMEM;
1204 +       }
1205 +       cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1206 +       cr_cont->ctl.crypt.oper_type = cipher_op;
1207 +
1208 +       cr_cont->ctl.crypt.crypt_offs = 0;
1209 +       cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(AES_BLOCK128);
1210 +       cr_cont->ctl.crypt.addr.rev_aes = cpu_to_npe32(
1211 +                       sa_ctx->rev_aes->phys + sizeof(keylen_cfg));
1212 +
1213 +       cr_cont->ctl.crypt.src_buf = 0;
1214 +       cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(sa_ctx->rev_aes->phys);
1215 +       cr_cont->ctl.crypt.mode = NPE_OP_ENC_GEN_KEY;
1216 +       cr_cont->ctl.crypt.init_len = sa_ctx->decrypt.npe_ctx_idx;
1217 +
1218 +       atomic_inc(&sa_ctx->use_cnt);
1219 +       queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1220 +       if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */
1221 +               atomic_dec(&sa_ctx->use_cnt);
1222 +               ix_sa_return_cont(sa_ctx->master, cr_cont);
1223 +               ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
1224 +               sa_ctx->rev_aes = NULL;
1225 +               return -ENOMEM;
1226 +       }
1227 +
1228 +       return 1;
1229 +}
1230 +
1231 +/* Return value
1232 + * 0 if nothing registered,
1233 + * 1 if something registered and
1234 + * < 0 on error
1235 + */
1236 +static int ix_sa_ctx_setup_cipher(struct ix_sa_ctx *sa_ctx,
1237 +               const struct ix_cipher_algo *algo, int cipher_op, int encrypt)
1238 +{
1239 +       unsigned char *cinfo;
1240 +       int keylen, init_len;
1241 +       u32 cipher_cfg;
1242 +       u32 keylen_cfg = 0;
1243 +       struct ix_sa_dir *dir;
1244 +
1245 +       dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1246 +       cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1247 +
1248 +       sa_ctx->c_algo = algo;
1249 +
1250 +       if (!algo)
1251 +               return 0;
1252 +
1253 +       if (algo->type == CIPHER_TYPE_DES && sa_ctx->c_key.len != 8)
1254 +               return -EINVAL;
1255 +
1256 +       if (algo->type == CIPHER_TYPE_3DES && sa_ctx->c_key.len != 24)
1257 +               return -EINVAL;
1258 +
1259 +       keylen = 24;
1260 +
1261 +       if (encrypt) {
1262 +               cipher_cfg = algo->cfgword_enc;
1263 +               dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
1264 +       } else {
1265 +               cipher_cfg = algo->cfgword_dec;
1266 +       }
1267 +       if (algo->type == CIPHER_TYPE_AES) {
1268 +               switch (sa_ctx->c_key.len) {
1269 +                       case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
1270 +                       case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
1271 +                       case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
1272 +                       default: return -EINVAL;
1273 +               }
1274 +               keylen = sa_ctx->c_key.len;
1275 +               cipher_cfg |= keylen_cfg;
1276 +       }
1277 +
1278 +       /* write cfg word to cryptinfo */
1279 +       *(u32*)cinfo = cpu_to_be32(cipher_cfg);
1280 +       cinfo += sizeof(cipher_cfg);
1281 +
1282 +       /* write cipher key to cryptinfo */
1283 +       memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len);
1284 +       cinfo += keylen;
1285 +
1286 +       init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
1287 +       dir->npe_ctx_idx += init_len;
1288 +
1289 +       dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
1290 +
1291 +       if (algo->type == CIPHER_TYPE_AES && !encrypt) {
1292 +               return gen_rev_aes_key(sa_ctx, keylen_cfg, cipher_op);
1293 +       }
1294 +
1295 +       return 0;
1296 +}
1297 +
1298 +/* returns 0 on OK, <0 on error and 1 on overflow */
1299 +int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
1300 +       int datalen, int c_offs, int c_len, int a_offs, int a_len,
1301 +       int hmac, char *iv, int encrypt)
1302 +{
1303 +       struct npe_crypt_cont *cr_cont;
1304 +       struct npe_cont *cont;
1305 +       u32 data_phys;
1306 +       int ret = -ENOMEM;
1307 +       struct ix_sa_dir *dir;
1308 +
1309 +       dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1310 +
1311 +       if (sa_ctx->state != STATE_REGISTERED)
1312 +               return -ENOENT;
1313 +
1314 +       cr_cont = ix_sa_get_cont(sa_ctx->master);
1315 +       if (!cr_cont)
1316 +               return ret;
1317 +
1318 +       cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1319 +       cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(dir->npe_ctx_phys);
1320 +       cr_cont->ctl.crypt.oper_type = OP_PERFORM;
1321 +       cr_cont->ctl.crypt.mode = dir->npe_mode;
1322 +       cr_cont->ctl.crypt.init_len = dir->npe_ctx_idx;
1323 +
1324 +       if (sa_ctx->c_algo) {
1325 +               cr_cont->ctl.crypt.crypt_offs = cpu_to_npe16(c_offs);
1326 +               cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(c_len);
1327 +               if (sa_ctx->c_algo->iv_len) {
1328 +                       if (!iv) {
1329 +                               ret = -EINVAL;
1330 +                               goto err_cr;
1331 +                       }
1332 +                       memcpy(cr_cont->ctl.crypt.iv, iv,
1333 +                                       sa_ctx->c_algo->iv_len);
1334 +               }
1335 +       }
1336 +
1337 +       if (sa_ctx->h_algo) {
1338 +               /* prepare hashing */
1339 +               cr_cont->ctl.crypt.auth_offs = cpu_to_npe16(a_offs);
1340 +               cr_cont->ctl.crypt.auth_len = cpu_to_npe16(a_len);
1341 +       }
1342 +
1343 +       data_phys = dma_map_single(sa_ctx->master->npe_dev,
1344 +                       data, datalen, DMA_BIDIRECTIONAL);
1345 +       if (hmac)
1346 +               cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(data_phys + hmac);
1347 +
1348 +       /* Prepare the data ptr */
1349 +       cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
1350 +       if (!cont) {
1351 +               goto err_unmap;
1352 +       }
1353 +
1354 +       cont->data = ptr;
1355 +       cont->eth.next = 0;
1356 +       cont->eth.buf_len = cpu_to_npe16(datalen);
1357 +       cont->eth.pkt_len = 0;
1358 +
1359 +       cont->eth.phys_addr = cpu_to_npe32(data_phys);
1360 +       cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);
1361 +
1362 +       atomic_inc(&sa_ctx->use_cnt);
1363 +       queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1364 +       if (queue_stat(sa_ctx->master->sendq) != 2) {
1365 +               return 0;
1366 +       }
1367 +
1368 +       /* overflow */
1369 +       printk("%s: Overflow\n", __FUNCTION__);
1370 +       ret = -EAGAIN;
1371 +       atomic_dec(&sa_ctx->use_cnt);
1372 +       qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev), cont);
1373 +
1374 +err_unmap:
1375 +       dma_unmap_single(sa_ctx->master->npe_dev, data_phys, datalen,
1376 +                       DMA_BIDIRECTIONAL);
1377 +err_cr:
1378 +       ix_sa_return_cont(sa_ctx->master, cr_cont);
1379 +
1380 +       return ret;
1381 +}
1382 +
1383 +int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx,
1384 +               const struct ix_cipher_algo *cipher,
1385 +               const struct ix_hash_algo *auth, int len)
1386 +{
1387 +       int ret = 0, sum = 0;
1388 +       int cipher_op;
1389 +
1390 +       if (sa_ctx->state != STATE_UNREGISTERED)
1391 +               return -ENOENT;
1392 +
1393 +       atomic_inc(&sa_ctx->use_cnt);
1394 +
1395 +       cipher_op = auth ? OP_REGISTER : OP_REG_DONE;
1396 +       if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, OP_REGISTER, 1)) < 0)
1397 +               goto out;
1398 +       sum += ret;
1399 +       if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, cipher_op, 0)) < 0)
1400 +               goto out;
1401 +       sum += ret;
1402 +       if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REGISTER, 1)) < 0)
1403 +               goto out;
1404 +       sum += ret;
1405 +       if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REG_DONE, 0)) < 0)
1406 +               goto out;
1407 +       sum += ret;
1408 +
1409 +       /* Nothing registered ?
1410 +        * Ok, then we are done and call the callback here.
1411 +        */
1412 +       if (!sum) {
1413 +               if (sa_ctx->state == STATE_UNREGISTERED)
1414 +                       sa_ctx->state = STATE_REGISTERED;
1415 +               if (sa_ctx->reg_cb)
1416 +                       sa_ctx->reg_cb(sa_ctx, 0);
1417 +       }
1418 +out:
1419 +       atomic_dec(&sa_ctx->use_cnt);
1420 +       return ret;
1421 +}
1422 +
1423 +static int __init init_crypto(void)
1424 +{
1425 +       return init_sa_master(&sa_master);
1426 +}
1427 +
1428 +static void __exit finish_crypto(void)
1429 +{
1430 +       release_sa_master(&sa_master);
1431 +}
1432 +
1433 +MODULE_LICENSE("GPL");
1434 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1435 +
1436 +EXPORT_SYMBOL(ix_hash_by_id);
1437 +EXPORT_SYMBOL(ix_cipher_by_id);
1438 +
1439 +EXPORT_SYMBOL(ix_sa_ctx_new);
1440 +EXPORT_SYMBOL(ix_sa_ctx_free);
1441 +EXPORT_SYMBOL(ix_sa_ctx_setup_cipher_auth);
1442 +EXPORT_SYMBOL(ix_sa_crypto_perform);
1443 +
1444 +module_init(init_crypto);
1445 +module_exit(finish_crypto);
1446 +
1447 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_qmgr.c
1448 ===================================================================
1449 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
1450 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_qmgr.c       2007-02-21 02:24:35.000000000 -0800
1451 @@ -0,0 +1,474 @@
1452 +/*
1453 + * qmgr.c - reimplementation of the queue configuration interface.
1454 + *
1455 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1456 + *
1457 + * This file is released under the GPLv2
1458 + */
1459 +
1460 +#include <linux/kernel.h>
1461 +#include <linux/module.h>
1462 +#include <linux/platform_device.h>
1463 +#include <linux/fs.h>
1464 +#include <linux/init.h>
1465 +#include <linux/slab.h>
1466 +#include <linux/dmapool.h>
1467 +#include <linux/interrupt.h>
1468 +#include <linux/err.h>
1469 +#include <linux/delay.h>
1470 +#include <asm/uaccess.h>
1471 +#include <asm/io.h>
1472 +
1473 +#include <linux/ixp_qmgr.h>
1474 +#include <linux/ixp_npe.h>
1475 +
1476 +#define IXQMGR_VERSION "IXP4XX Q Manager 0.2.1"
1477 +
1478 +static struct device *qmgr_dev = NULL;
1479 +
1480 +static int poll_freq = 4000;
1481 +static int poll_enable = 0;
1482 +static u32 timer_countup_ticks;
1483 +
1484 +module_param(poll_freq, int, 0644);
1485 +module_param(poll_enable, int, 0644);
1486 +
1487 +int queue_len(struct qm_queue *queue)
1488 +{
1489 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1490 +       int diff, offs;
1491 +       u32 val;
1492 +
1493 +       offs = queue->id/8 + QUE_LOW_STAT0;
1494 +       val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
1495 +
1496 +       diff = (val - (val >> 7)) & 0x7f;
1497 +       if (!diff) {
1498 +               /* diff == 0 means either empty or full, must look at STAT0 */
1499 +               if ((*(qmgr->addr + offs) >>  ((queue->id % 8)*4)) & 0x04)
1500 +                       diff = queue->len;
1501 +       }
1502 +       return diff;
1503 +}
1504 +
1505 +static int request_pool(struct device *dev, int count)
1506 +{
1507 +       int i;
1508 +       struct npe_cont *cont;
1509 +       struct qm_qmgr *qmgr = dev_get_drvdata(dev);
1510 +       dma_addr_t handle;
1511 +
1512 +       for (i=0; i<count; i++) {
1513 +               cont = dma_pool_alloc(qmgr->dmapool, GFP_KERNEL, &handle);
1514 +               if (!cont) {
1515 +                       return -ENOMEM;
1516 +               }
1517 +               cont->phys = handle;
1518 +               cont->virt = cont;
1519 +               write_lock(&qmgr->lock);
1520 +               cont->next = qmgr->pool;
1521 +               qmgr->pool = cont;
1522 +               write_unlock(&qmgr->lock);
1523 +       }
1524 +       return 0;
1525 +}
1526 +
1527 +static int free_pool(struct device *dev, int count)
1528 +{
1529 +       int i;
1530 +       struct npe_cont *cont;
1531 +       struct qm_qmgr *qmgr = dev_get_drvdata(dev);
1532 +
1533 +       for (i=0; i<count; i++) {
1534 +               write_lock(&qmgr->lock);
1535 +               cont = qmgr->pool;
1536 +               if (!cont) {
1537 +                       write_unlock(&qmgr->lock);
1538 +                       return -1;
1539 +               }
1540 +               qmgr->pool = cont->next;
1541 +               write_unlock(&qmgr->lock);
1542 +               dma_pool_free(qmgr->dmapool, cont, cont->phys);
1543 +       }
1544 +       return 0;
1545 +}
1546 +
1547 +static int get_free_qspace(struct qm_qmgr *qmgr, int len)
1548 +{
1549 +       int words = (qmgr->res->end - qmgr->res->start + 1) / 4 -
1550 +                       IX_QMGR_SRAM_SPACE;
1551 +       int i,q;
1552 +
1553 +       for (i=0; i<words; i+=len) {
1554 +               for (q=0; q<MAX_QUEUES; q++) {
1555 +                       struct qm_queue *qu = qmgr->queues[q];
1556 +                       if (!qu)
1557 +                               continue;
1558 +                       if ((qu->addr + qu->len > i) && (qu->addr < i + len))
1559 +                               break;
1560 +               }
1561 +               if (q == MAX_QUEUES) {
1562 +                       /* we have a free address */
1563 +                       return i;
1564 +               }
1565 +       }
1566 +       return -1;
1567 +}
1568 +
1569 +static inline int _log2(int x)
1570 +{
1571 +       int r=0;
1572 +       while(x>>=1)
1573 +               r++;
1574 +       return r;
1575 +}
1576 +
1577 +/*
1578 + * 32bit Config registers at IX_QMGR_QUECONFIG_BASE_OFFSET[Qid]
1579 + *    0 - 6 WRPTR Word offset to baseaddr (index 0 .. BSIZE-1)
1580 + *    7 -13 RDPTR        ''
1581 + *   14 -21 BADDR baseaddr = (offset to IX_QMGR_QUEBUFFER_SPACE_OFFSET) >> 6
1582 + *   22 -23 ESIZE entrySizeInWords (always 00 because entrySizeInWords==1)
1583 + *   24 -25 BSIZE qSizeInWords 00=16,01=32,10=64,11=128
1584 + *   26 -28 NE nearly empty
1585 + *   29 -31 NF nearly full
1586 + */
1587 +static int conf_q_regs(struct qm_queue *queue)
1588 +{
1589 +       int bsize = _log2(queue->len/16);
1590 +       int baddr = queue->addr + IX_QMGR_QCFG_SIZE;
1591 +
1592 +       /* +2, because baddr is in words and not in bytes */
1593 +       queue_write_cfg_reg(queue,  (bsize << 24) | (baddr<<(14-6+2)) );
1594 +
1595 +       return 0;
1596 +}
1597 +
1598 +static void pmu_timer_restart(void)
1599 +{
1600 +    unsigned long flags;
1601 +
1602 +    local_irq_save(flags);
1603 +
1604 +     __asm__(" mcr p14,0,%0,c1,c1,0\n"  /* write current counter */
1605 +            : : "r" (timer_countup_ticks));
1606 +
1607 +    __asm__(" mrc p14,0,r1,c4,c1,0; "  /* get int enable register */
1608 +            " orr r1,r1,#1; "
1609 +            " mcr p14,0,r1,c5,c1,0; "  /* clear overflow */
1610 +            " mcr p14,0,r1,c4,c1,0\n"  /* enable interrupts */
1611 +            : : : "r1");
1612 +
1613 +    local_irq_restore(flags);
1614 +}
1615 +
1616 +static void pmu_timer_init(void)
1617 +{
1618 +       u32 controlRegisterMask =
1619 +               BIT(0) | /* enable counters */
1620 +               BIT(2);  /* reset clock counter; */
1621 +
1622 +       /*
1623 +        *   Compute the number of xscale cycles needed between each
1624 +        *   PMU IRQ. This is done from the result of an OS calibration loop.
1625 +        *
1626 +        *   For 533MHz CPU, 533000000 tick/s / 4000 times/sec = 138250
1627 +        *   4000 times/sec = 37 mbufs/interrupt at line rate
1628 +        *   The pmu timer is reset to -138250 = 0xfffde3f6, to trigger an IRQ
1629 +        *   when this up counter overflows.
1630 +        *
1631 +        *   The multiplication gives a number of instructions per second.
1632 +        *   which is close to the processor frequency, and then close to the
1633 +        *   PMU clock rate.
1634 +        *
1635 +        *   2 is the number of instructions per loop
1636 +        *
1637 +        */
1638 +
1639 +       timer_countup_ticks = - ((loops_per_jiffy * HZ * 2) / poll_freq);
1640 +
1641 +       /* enable the CCNT (clock count) timer from the PMU */
1642 +       __asm__(" mcr p14,0,%0,c0,c1,0\n"
1643 +               : : "r" (controlRegisterMask));
1644 +}
1645 +
1646 +static void pmu_timer_disable(void)
1647 +{
1648 +       unsigned long flags;
1649 +
1650 +       local_irq_save(flags);
1651 +
1652 +       __asm__(" mrc p14,0,r1,c4,c1,0; "  /* get int enable register */
1653 +               " and r1,r1,#0x1e; "
1654 +               " mcr p14,0,r1,c4,c1,0\n"  /* disable interrupts */
1655 +               : : : "r1");
1656 +       local_irq_restore(flags);
1657 +}
1658 +
1659 +void queue_set_watermarks(struct qm_queue *queue, unsigned ne, unsigned nf)
1660 +{
1661 +       u32 val;
1662 +       /* calculate the register values
1663 +        * 0->0, 1->1, 2->2, 4->3, 8->4 16->5...*/
1664 +       ne = _log2(ne<<1) & 0x7;
1665 +       nf = _log2(nf<<1) & 0x7;
1666 +
1667 +       /* Mask out old watermarks */
1668 +       val = queue_read_cfg_reg(queue) & ~0xfc000000;
1669 +       queue_write_cfg_reg(queue, val | (ne << 26) | (nf << 29));
1670 +}
1671 +
1672 +int queue_set_irq_src(struct qm_queue *queue, int flag)
1673 +{
1674 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1675 +       u32 reg;
1676 +       int offs, bitoffs;
1677 +
1678 +       /* Q 0-7 are in REG0, 8-15 are in REG1, etc. They occupy 4 bits/Q */
1679 +       offs = queue->id/8 + INT0_SRC_SELREG0;
1680 +       bitoffs = (queue->id % 8)*4;
1681 +
1682 +       reg = *(qmgr->addr + offs) & ~(0xf << bitoffs);
1683 +       *(qmgr->addr + offs) = reg | (flag << bitoffs);
1684 +
1685 +       return 0;
1686 +}
1687 +
1688 +static irqreturn_t irq_qm1(int irq, void *dev_id)
1689 +{
1690 +       struct qm_qmgr *qmgr = dev_id;
1691 +       int offs, reg;
1692 +       struct qm_queue *queue;
1693 +
1694 +       if (poll_enable)
1695 +               pmu_timer_restart();
1696 +
1697 +       reg = *(qmgr->addr + QUE_INT_REG0);
1698 +       while(reg) {
1699 +               /*
1700 +                * count leading zeros. "offs" gets
1701 +                * the amount of leading 0 in "reg"
1702 +                */
1703 +               asm ("clz %0, %1;" : "=r"(offs) : "r"(reg));
1704 +               offs = 31 - offs;
1705 +               reg &= ~(1 << offs);
1706 +               queue = qmgr->queues[offs];
1707 +               if (likely(queue)) {
1708 +                       if (likely(queue->irq_cb)) {
1709 +                               queue->irq_cb(queue);
1710 +                       } else {
1711 +                               printk(KERN_ERR "Missing callback for Q %d\n",
1712 +                                               offs);
1713 +                       }
1714 +               } else {
1715 +                       printk(KERN_ERR "IRQ for unregistered Q %d\n", offs);
1716 +               }
1717 +       }
1718 +       return IRQ_HANDLED;
1719 +}
1720 +
1721 +struct qm_queue *request_queue(int qid, int len)
1722 +{
1723 +       int ram;
1724 +       struct qm_qmgr *qmgr;
1725 +       struct qm_queue *queue;
1726 +
1727 +       if (!qmgr_dev)
1728 +               return ERR_PTR(-ENODEV);
1729 +
1730 +       if ((qid < 0) || (qid > MAX_QUEUES))
1731 +               return ERR_PTR(-ERANGE);
1732 +
1733 +       switch (len) {
1734 +               case 16:
1735 +               case 32:
1736 +               case 64:
1737 +               case 128: break;
1738 +               default : return ERR_PTR(-EINVAL);
1739 +       }
1740 +
1741 +       qmgr = dev_get_drvdata(qmgr_dev);
1742 +
1743 +       if (qmgr->queues[qid]) {
1744 +               /* not an error, just in use already */
1745 +               return NULL;
1746 +       }
1747 +       if ((ram = get_free_qspace(qmgr, len)) < 0) {
1748 +               printk(KERN_ERR "No free SRAM space for this queue\n");
1749 +               return ERR_PTR(-ENOMEM);
1750 +       }
1751 +       if (!(queue = kzalloc(sizeof(struct qm_queue), GFP_KERNEL)))
1752 +               return ERR_PTR(-ENOMEM);
1753 +
1754 +       if (!try_module_get(THIS_MODULE)) {
1755 +               kfree(queue);
1756 +               return ERR_PTR(-ENODEV);
1757 +       }
1758 +
1759 +       queue->addr = ram;
1760 +       queue->len = len;
1761 +       queue->id = qid;
1762 +       queue->dev = get_device(qmgr_dev);
1763 +       queue->acc_reg = qmgr->addr + (4 * qid);
1764 +       qmgr->queues[qid] = queue;
1765 +       if (request_pool(qmgr_dev, len)) {
1766 +               printk(KERN_ERR "Failed to request DMA pool of Q %d\n", qid);
1767 +       }
1768 +
1769 +       conf_q_regs(queue);
1770 +       return queue;
1771 +}
1772 +
1773 +void release_queue(struct qm_queue *queue)
1774 +{
1775 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1776 +
1777 +       BUG_ON(qmgr->queues[queue->id] != queue);
1778 +       qmgr->queues[queue->id] = NULL;
1779 +
1780 +       if (free_pool(queue->dev, queue->len)) {
1781 +               printk(KERN_ERR "Failed to release DMA pool of Q %d\n",
1782 +                               queue->id);
1783 +       }
1784 +       queue_disable_irq(queue);
1785 +       queue_write_cfg_reg(queue, 0);
1786 +
1787 +       module_put(THIS_MODULE);
1788 +       put_device(queue->dev);
1789 +       kfree(queue);
1790 +}
1791 +
1792 +
1793 +
1794 +
1795 +static int qmgr_probe(struct platform_device *pdev)
1796 +{
1797 +       struct resource *res;
1798 +       struct qm_qmgr *qmgr;
1799 +       int size, ret=0, i;
1800 +
1801 +       if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
1802 +               return -EIO;
1803 +
1804 +       if ((i = platform_get_irq(pdev, 0)) < 0)
1805 +               return -EIO;
1806 +
1807 +       if (!(qmgr = kzalloc(sizeof(struct qm_qmgr), GFP_KERNEL)))
1808 +               return -ENOMEM;
1809 +
1810 +       qmgr->irq = i;
1811 +       size = res->end - res->start +1;
1812 +       qmgr->res = request_mem_region(res->start, size, "ixp_qmgr");
1813 +       if (!qmgr->res) {
1814 +               ret = -EBUSY;
1815 +               goto out_free;
1816 +       }
1817 +
1818 +       qmgr->addr = ioremap(res->start, size);
1819 +       if (!qmgr->addr) {
1820 +               ret = -ENOMEM;
1821 +               goto out_rel;
1822 +       }
1823 +
1824 +       /* Reset Q registers */
1825 +       for (i=0; i<4; i++)
1826 +               *(qmgr->addr + QUE_LOW_STAT0 +i) = 0x33333333;
1827 +       for (i=0; i<10; i++)
1828 +               *(qmgr->addr + QUE_UO_STAT0 +i) = 0x0;
1829 +       for (i=0; i<4; i++)
1830 +               *(qmgr->addr + INT0_SRC_SELREG0 +i) = 0x0;
1831 +       for (i=0; i<2; i++) {
1832 +               *(qmgr->addr + QUE_IE_REG0 +i) = 0x00;
1833 +               *(qmgr->addr + QUE_INT_REG0 +i) = 0xffffffff;
1834 +       }
1835 +       for (i=0; i<64; i++) {
1836 +               *(qmgr->addr + IX_QMGR_QCFG_BASE + i) = 0x0;
1837 +       }
1838 +
1839 +       if (poll_enable) {
1840 +               pmu_timer_init();
1841 +               qmgr->irq = IRQ_IXP4XX_XSCALE_PMU;
1842 +       }
1843 +       ret = request_irq(qmgr->irq, irq_qm1, SA_SHIRQ | SA_INTERRUPT,
1844 +                       "qmgr", qmgr);
1845 +       if (ret) {
1846 +               printk(KERN_ERR "Failed to request IRQ(%d)\n", qmgr->irq);
1847 +               ret = -EIO;
1848 +               goto out_rel;
1849 +       }
1850 +       if (poll_enable)
1851 +               pmu_timer_restart();
1852 +
1853 +       rwlock_init(&qmgr->lock);
1854 +       qmgr->dmapool = dma_pool_create("qmgr", &pdev->dev,
1855 +                       sizeof(struct npe_cont), 32, 0);
1856 +       platform_set_drvdata(pdev, qmgr);
1857 +
1858 +       qmgr_dev = &pdev->dev;
1859 +
1860 +       printk(KERN_INFO IXQMGR_VERSION " initialized.\n");
1861 +
1862 +       return 0;
1863 +
1864 +out_rel:
1865 +       release_resource(qmgr->res);
1866 +out_free:
1867 +       kfree(qmgr);
1868 +       return ret;
1869 +}
1870 +
1871 +static int qmgr_remove(struct platform_device *pdev)
1872 +{
1873 +       struct qm_qmgr *qmgr = platform_get_drvdata(pdev);
1874 +       int i;
1875 +
1876 +       for (i=0; i<MAX_QUEUES; i++) {
1877 +               if (qmgr->queues[i]) {
1878 +                       printk(KERN_ERR "WARNING Unreleased Q: %d\n", i);
1879 +                       release_queue(qmgr->queues[i]);
1880 +               }
1881 +       }
1882 +
1883 +       if (poll_enable)
1884 +               pmu_timer_disable();
1885 +
1886 +       synchronize_irq (qmgr->irq);
1887 +       free_irq(qmgr->irq, qmgr);
1888 +
1889 +       dma_pool_destroy(qmgr->dmapool);
1890 +       iounmap(qmgr->addr);
1891 +       release_resource(qmgr->res);
1892 +       platform_set_drvdata(pdev, NULL);
1893 +       qmgr_dev = NULL;
1894 +       kfree(qmgr);
1895 +       return 0;
1896 +}
1897 +
1898 +static struct platform_driver ixp4xx_qmgr = {
1899 +       .driver.name    = "ixp4xx_qmgr",
1900 +       .probe          = qmgr_probe,
1901 +       .remove         = qmgr_remove,
1902 +};
1903 +
1904 +
1905 +static int __init init_qmgr(void)
1906 +{
1907 +       return platform_driver_register(&ixp4xx_qmgr);
1908 +}
1909 +
1910 +static void __exit finish_qmgr(void)
1911 +{
1912 +       platform_driver_unregister(&ixp4xx_qmgr);
1913 +}
1914 +
1915 +module_init(init_qmgr);
1916 +module_exit(finish_qmgr);
1917 +
1918 +MODULE_LICENSE("GPL");
1919 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1920 +
1921 +EXPORT_SYMBOL(request_queue);
1922 +EXPORT_SYMBOL(release_queue);
1923 +EXPORT_SYMBOL(queue_set_irq_src);
1924 +EXPORT_SYMBOL(queue_set_watermarks);
1925 +EXPORT_SYMBOL(queue_len);
1926 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac.h
1927 ===================================================================
1928 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
1929 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac.h       2007-02-21 02:24:35.000000000 -0800
1930 @@ -0,0 +1,275 @@
1931 +/*
1932 + * Copyright (C) 2002-2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1933 + *
1934 + * This file is released under the GPLv2
1935 + */
1936 +
1937 +#include <linux/resource.h>
1938 +#include <linux/netdevice.h>
1939 +#include <linux/io.h>
1940 +#include <linux/mii.h>
1941 +#include <linux/workqueue.h>
1942 +#include <asm/hardware.h>
1943 +#include <linux/ixp_qmgr.h>
1944 +
1945 +/* 32 bit offsets to be added to u32 *pointers */
1946 +#define MAC_TX_CNTRL1       0x00  // 0x000
1947 +#define MAC_TX_CNTRL2       0x01  // 0x004
1948 +#define MAC_RX_CNTRL1       0x04  // 0x010
1949 +#define MAC_RX_CNTRL2       0x05  // 0x014
1950 +#define MAC_RANDOM_SEED     0x08  // 0x020
1951 +#define MAC_THRESH_P_EMPTY  0x0c  // 0x030
1952 +#define MAC_THRESH_P_FULL   0x0e  // 0x038
1953 +#define MAC_BUF_SIZE_TX     0x10  // 0x040
1954 +#define MAC_TX_DEFER        0x14  // 0x050
1955 +#define MAC_RX_DEFER        0x15  // 0x054
1956 +#define MAC_TX_TWO_DEFER_1  0x18  // 0x060
1957 +#define MAC_TX_TWO_DEFER_2  0x19  // 0x064
1958 +#define MAC_SLOT_TIME       0x1c  // 0x070
1959 +#define MAC_MDIO_CMD        0x20  // 0x080 4 registers 0x20 - 0x23
1960 +#define MAC_MDIO_STS        0x24  // 0x090 4 registers 0x24 - 0x27
1961 +#define MAC_ADDR_MASK       0x28  // 0x0A0 6 registers 0x28 - 0x2d
1962 +#define MAC_ADDR            0x30  // 0x0C0 6 registers 0x30 - 0x35
1963 +#define MAC_INT_CLK_THRESH  0x38  // 0x0E0 1 register
1964 +#define MAC_UNI_ADDR        0x3c  // 0x0F0 6 registers 0x3c - 0x41
1965 +#define MAC_CORE_CNTRL      0x7f  // 0x1fC
1966 +
1967 +/* TX Control Register 1*/
1968 +
1969 +#define TX_CNTRL1_TX_EN         BIT(0)
1970 +#define TX_CNTRL1_DUPLEX        BIT(1)
1971 +#define TX_CNTRL1_RETRY         BIT(2)
1972 +#define TX_CNTRL1_PAD_EN        BIT(3)
1973 +#define TX_CNTRL1_FCS_EN        BIT(4)
1974 +#define TX_CNTRL1_2DEFER        BIT(5)
1975 +#define TX_CNTRL1_RMII          BIT(6)
1976 +
1977 +/* TX Control Register 2 */
1978 +#define TX_CNTRL2_RETRIES_MASK  0xf
1979 +
1980 +/* RX Control Register 1 */
1981 +#define RX_CNTRL1_RX_EN         BIT(0)
1982 +#define RX_CNTRL1_PADSTRIP_EN   BIT(1)
1983 +#define RX_CNTRL1_CRC_EN        BIT(2)
1984 +#define RX_CNTRL1_PAUSE_EN      BIT(3)
1985 +#define RX_CNTRL1_LOOP_EN       BIT(4)
1986 +#define RX_CNTRL1_ADDR_FLTR_EN  BIT(5)
1987 +#define RX_CNTRL1_RX_RUNT_EN    BIT(6)
1988 +#define RX_CNTRL1_BCAST_DIS     BIT(7)
1989 +
1990 +/* RX Control Register 2 */
1991 +#define RX_CNTRL2_DEFER_EN      BIT(0)
1992 +
1993 +/* Core Control Register */
1994 +#define CORE_RESET              BIT(0)
1995 +#define CORE_RX_FIFO_FLUSH      BIT(1)
1996 +#define CORE_TX_FIFO_FLUSH      BIT(2)
1997 +#define CORE_SEND_JAM           BIT(3)
1998 +#define CORE_MDC_EN             BIT(4)
1999 +
2000 +/* Definitions for MII access routines*/
2001 +
2002 +#define MII_REG_SHL    16
2003 +#define MII_ADDR_SHL   21
2004 +
2005 +#define MII_GO                  BIT(31)
2006 +#define MII_WRITE               BIT(26)
2007 +#define MII_READ_FAIL           BIT(31)
2008 +
2009 +#define MII_TIMEOUT_10TH_SECS        5
2010 +#define MII_10TH_SEC_IN_MILLIS     100
2011 +
2012 +/*
2013 + *
2014 + * Default values
2015 + *
2016 + */
2017 +
2018 +#define MAC_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
2019 +
2020 +#define MAC_TX_CNTRL1_DEFAULT  (\
2021 +               TX_CNTRL1_TX_EN | \
2022 +               TX_CNTRL1_RETRY  | \
2023 +               TX_CNTRL1_FCS_EN | \
2024 +               TX_CNTRL1_2DEFER | \
2025 +               TX_CNTRL1_PAD_EN )
2026 +
2027 +#define MAC_TX_MAX_RETRIES_DEFAULT 0x0f
2028 +
2029 +#define MAC_RX_CNTRL1_DEFAULT ( \
2030 +               RX_CNTRL1_PADSTRIP_EN | \
2031 +               RX_CNTRL1_CRC_EN | \
2032 +               RX_CNTRL1_RX_EN )
2033 +
2034 +#define MAC_RX_CNTRL2_DEFAULT       0x0
2035 +#define MAC_TX_CNTRL2_DEFAULT       TX_CNTRL2_RETRIES_MASK
2036 +
2037 +/* Thresholds determined by NPE firmware FS */
2038 +#define MAC_THRESH_P_EMPTY_DEFAULT  0x12
2039 +#define MAC_THRESH_P_FULL_DEFAULT   0x30
2040 +
2041 +/* Number of bytes that must be in the tx fifo before
2042 + * transmission commences */
2043 +#define MAC_BUF_SIZE_TX_DEFAULT     0x8
2044 +
2045 +/* One-part deferral values */
2046 +#define MAC_TX_DEFER_DEFAULT        0x15
2047 +#define MAC_RX_DEFER_DEFAULT        0x16
2048 +
2049 +/* Two-part deferral values... */
2050 +#define MAC_TX_TWO_DEFER_1_DEFAULT  0x08
2051 +#define MAC_TX_TWO_DEFER_2_DEFAULT  0x07
2052 +
2053 +/* This value applies to MII */
2054 +#define MAC_SLOT_TIME_DEFAULT       0x80
2055 +
2056 +/* This value applies to RMII */
2057 +#define MAC_SLOT_TIME_RMII_DEFAULT  0xFF
2058 +
2059 +#define MAC_ADDR_MASK_DEFAULT       0xFF
2060 +
2061 +#define MAC_INT_CLK_THRESH_DEFAULT  0x1
2062 +/* The following is a value chosen at random */
2063 +#define MAC_RANDOM_SEED_DEFAULT     0x8
2064 +
2065 +/* By default we must configure the MAC to generate the MDC clock*/
2066 +#define CORE_DEFAULT                (CORE_MDC_EN)
2067 +
2068 +/* End of Intel provided register information */
2069 +
2070 +extern int
2071 +mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg);
2072 +extern void
2073 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val);
2074 +extern void init_mdio(struct net_device *dev, int phy_id);
2075 +
2076 +struct mac_info {
2077 +       u32 __iomem *addr;
2078 +       struct resource *res;
2079 +       struct device *npe_dev;
2080 +       struct net_device *netdev;
2081 +       struct qm_qmgr *qmgr;
2082 +       struct qm_queue *rxq;
2083 +       struct qm_queue *txq;
2084 +       struct qm_queue *rxdoneq;
2085 +       u32 irqflags;
2086 +       struct net_device_stats stat;
2087 +       struct mii_if_info mii;
2088 +       struct delayed_work mdio_thread;
2089 +       int rxq_pkt;
2090 +       int txq_pkt;
2091 +       int unloading;
2092 +       struct mac_plat_info *plat;
2093 +       int npe_stat_num;
2094 +       spinlock_t rx_lock;
2095 +       u32 msg_enable;
2096 +};
2097 +
2098 +static inline void mac_write_reg(struct mac_info *mac, int offset, u32 val)
2099 +{
2100 +       *(mac->addr + offset) = val;
2101 +}
2102 +static inline u32 mac_read_reg(struct mac_info *mac, int offset)
2103 +{
2104 +       return *(mac->addr + offset);
2105 +}
2106 +static inline void mac_set_regbit(struct mac_info *mac, int offset, u32 bit)
2107 +{
2108 +       mac_write_reg(mac, offset, mac_read_reg(mac, offset) | bit);
2109 +}
2110 +static inline void mac_reset_regbit(struct mac_info *mac, int offset, u32 bit)
2111 +{
2112 +       mac_write_reg(mac, offset, mac_read_reg(mac, offset) & ~bit);
2113 +}
2114 +
2115 +static inline void mac_mdio_cmd_write(struct mac_info *mac, u32 cmd)
2116 +{
2117 +       int i;
2118 +       for(i=0; i<4; i++) {
2119 +               mac_write_reg(mac, MAC_MDIO_CMD + i, cmd & 0xff);
2120 +               cmd >>=8;
2121 +       }
2122 +}
2123 +
2124 +#define mac_mdio_cmd_read(mac) mac_mdio_read((mac), MAC_MDIO_CMD)
2125 +#define mac_mdio_status_read(mac) mac_mdio_read((mac), MAC_MDIO_STS)
2126 +static inline u32 mac_mdio_read(struct mac_info *mac, int offset)
2127 +{
2128 +       int i;
2129 +       u32 data = 0;
2130 +       for(i=0; i<4; i++) {
2131 +               data |= (mac_read_reg(mac, offset + i) & 0xff) << (i*8);
2132 +       }
2133 +       return data;
2134 +}
2135 +
2136 +static inline u32 mdio_cmd(int phy_addr, int phy_reg)
2137 +{
2138 +       return phy_addr << MII_ADDR_SHL |
2139 +               phy_reg << MII_REG_SHL |
2140 +               MII_GO;
2141 +}
2142 +
2143 +#define MAC_REG_LIST { \
2144 +       MAC_TX_CNTRL1, MAC_TX_CNTRL2, \
2145 +       MAC_RX_CNTRL1, MAC_RX_CNTRL2, \
2146 +       MAC_RANDOM_SEED, MAC_THRESH_P_EMPTY, MAC_THRESH_P_FULL, \
2147 +       MAC_BUF_SIZE_TX, MAC_TX_DEFER, MAC_RX_DEFER, \
2148 +       MAC_TX_TWO_DEFER_1, MAC_TX_TWO_DEFER_2, MAC_SLOT_TIME, \
2149 +       MAC_ADDR_MASK +0, MAC_ADDR_MASK +1, MAC_ADDR_MASK +2, \
2150 +       MAC_ADDR_MASK +3, MAC_ADDR_MASK +4, MAC_ADDR_MASK +5, \
2151 +       MAC_ADDR +0, MAC_ADDR +1, MAC_ADDR +2, \
2152 +       MAC_ADDR +3, MAC_ADDR +4, MAC_ADDR +5, \
2153 +       MAC_INT_CLK_THRESH, \
2154 +       MAC_UNI_ADDR +0, MAC_UNI_ADDR +1, MAC_UNI_ADDR +2, \
2155 +       MAC_UNI_ADDR +3, MAC_UNI_ADDR +4, MAC_UNI_ADDR +5, \
2156 +       MAC_CORE_CNTRL \
2157 +}
2158 +
2159 +#define NPE_STAT_NUM            34
2160 +#define NPE_STAT_NUM_BASE       22
2161 +#define NPE_Q_STAT_NUM           4
2162 +
2163 +#define NPE_Q_STAT_STRINGS \
2164 +       {"RX ready to use queue len     "}, \
2165 +       {"RX received queue len         "}, \
2166 +       {"TX to be send queue len       "}, \
2167 +       {"TX done queue len             "},
2168 +
2169 +#define NPE_STAT_STRINGS \
2170 +       {"StatsAlignmentErrors          "}, \
2171 +       {"StatsFCSErrors                "}, \
2172 +       {"StatsInternalMacReceiveErrors "}, \
2173 +       {"RxOverrunDiscards             "}, \
2174 +       {"RxLearnedEntryDiscards        "}, \
2175 +       {"RxLargeFramesDiscards         "}, \
2176 +       {"RxSTPBlockedDiscards          "}, \
2177 +       {"RxVLANTypeFilterDiscards      "}, \
2178 +       {"RxVLANIdFilterDiscards        "}, \
2179 +       {"RxInvalidSourceDiscards       "}, \
2180 +       {"RxBlackListDiscards           "}, \
2181 +       {"RxWhiteListDiscards           "}, \
2182 +       {"RxUnderflowEntryDiscards      "}, \
2183 +       {"StatsSingleCollisionFrames    "}, \
2184 +       {"StatsMultipleCollisionFrames  "}, \
2185 +       {"StatsDeferredTransmissions    "}, \
2186 +       {"StatsLateCollisions           "}, \
2187 +       {"StatsExcessiveCollsions       "}, \
2188 +       {"StatsInternalMacTransmitErrors"}, \
2189 +       {"StatsCarrierSenseErrors       "}, \
2190 +       {"TxLargeFrameDiscards          "}, \
2191 +       {"TxVLANIdFilterDiscards        "}, \
2192 +\
2193 +       {"RxValidFramesTotalOctets      "}, \
2194 +       {"RxUcastPkts                   "}, \
2195 +       {"RxBcastPkts                   "}, \
2196 +       {"RxMcastPkts                   "}, \
2197 +       {"RxPkts64Octets                "}, \
2198 +       {"RxPkts65to127Octets           "}, \
2199 +       {"RxPkts128to255Octets          "}, \
2200 +       {"RxPkts256to511Octets          "}, \
2201 +       {"RxPkts512to1023Octets         "}, \
2202 +       {"RxPkts1024to1518Octets        "}, \
2203 +       {"RxInternalNPEReceiveErrors    "}, \
2204 +       {"TxInternalNPETransmitErrors   "}
2205 +
2206 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac_driver.c
2207 ===================================================================
2208 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
2209 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac_driver.c        2007-02-21 02:24:46.000000000 -0800
2210 @@ -0,0 +1,850 @@
2211 +/*
2212 + * mac_driver.c - provide a network interface for each MAC
2213 + *
2214 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
2215 + *
2216 + * This file is released under the GPLv2
2217 + */
2218 +
2219 +#include <linux/kernel.h>
2220 +#include <linux/module.h>
2221 +#include <linux/platform_device.h>
2222 +#include <linux/netdevice.h>
2223 +#include <linux/etherdevice.h>
2224 +#include <linux/ethtool.h>
2225 +#include <linux/slab.h>
2226 +#include <linux/delay.h>
2227 +#include <linux/err.h>
2228 +#include <linux/dma-mapping.h>
2229 +#include <linux/workqueue.h>
2230 +#include <asm/io.h>
2231 +#include <asm/irq.h>
2232 +
2233 +
2234 +#include <linux/ixp_qmgr.h>
2235 +#include <linux/ixp_npe.h>
2236 +#include "mac.h"
2237 +
2238 +#define MDIO_INTERVAL (3*HZ)
2239 +#define RX_QUEUE_PREFILL 64
2240 +#define TX_QUEUE_PREFILL 16
2241 +
2242 +#define IXMAC_NAME "ixp4xx_mac"
2243 +#define IXMAC_VERSION "0.3.1"
2244 +
2245 +#define MAC_DEFAULT_REG(mac, name) \
2246 +       mac_write_reg(mac, MAC_ ## name, MAC_ ## name ## _DEFAULT)
2247 +
2248 +#define TX_DONE_QID 31
2249 +
2250 +#define DMA_ALLOC_SIZE 2048
2251 +#define DMA_HDR_SIZE   (sizeof(struct npe_cont))
2252 +#define DMA_BUF_SIZE (DMA_ALLOC_SIZE - DMA_HDR_SIZE)
2253 +
2254 +/* Since the NPEs use 1 Return Q for sent frames, we need a device
2255 + * independent return Q. We call it tx_doneq.
2256 + * It will be initialized during module load and uninitialized
2257 + * during module unload. Evil hack, but there is no choice :-(
2258 + */
2259 +
2260 +static struct qm_queue *tx_doneq = NULL;
2261 +static int debug = -1;
2262 +module_param(debug, int, 0);
2263 +
2264 +static int init_buffer(struct qm_queue *queue, int count)
2265 +{
2266 +       int i;
2267 +       struct npe_cont *cont;
2268 +
2269 +       for (i=0; i<count; i++) {
2270 +               cont = kmalloc(DMA_ALLOC_SIZE, GFP_KERNEL | GFP_DMA);
2271 +               if (!cont)
2272 +                       goto err;
2273 +
2274 +               cont->phys = dma_map_single(queue->dev, cont, DMA_ALLOC_SIZE,
2275 +                               DMA_BIDIRECTIONAL);
2276 +               if (dma_mapping_error(cont->phys))
2277 +                       goto err;
2278 +
2279 +               cont->data = cont+1;
2280 +               /* now the buffer is on a 32 bit boundary.
2281 +                * we add 2 bytes for good alignment to SKB */
2282 +               cont->data+=2;
2283 +               cont->eth.next = 0;
2284 +               cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2285 +               cont->eth.pkt_len = 0;
2286 +               /* also add 2 alignment bytes from cont->data*/
2287 +               cont->eth.phys_addr = cpu_to_npe32(cont->phys+ DMA_HDR_SIZE+ 2);
2288 +
2289 +               dma_sync_single(queue->dev, cont->phys, DMA_HDR_SIZE,
2290 +                               DMA_TO_DEVICE);
2291 +
2292 +               queue_put_entry(queue, cont->phys);
2293 +               if (queue_stat(queue) == 2) { /* overflow */
2294 +                       dma_unmap_single(queue->dev, cont->phys, DMA_ALLOC_SIZE,
2295 +                               DMA_BIDIRECTIONAL);
2296 +                       goto err;
2297 +               }
2298 +       }
2299 +       return i;
2300 +err:
2301 +       if (cont)
2302 +               kfree(cont);
2303 +       return i;
2304 +}
2305 +
2306 +static int destroy_buffer(struct qm_queue *queue, int count)
2307 +{
2308 +       u32 phys;
2309 +       int i;
2310 +       struct npe_cont *cont;
2311 +
2312 +       for (i=0; i<count; i++) {
2313 +               phys = queue_get_entry(queue) & ~0xf;
2314 +               if (!phys)
2315 +                       break;
2316 +               dma_unmap_single(queue->dev, phys, DMA_ALLOC_SIZE,
2317 +                               DMA_BIDIRECTIONAL);
2318 +               cont = dma_to_virt(queue->dev, phys);
2319 +               kfree(cont);
2320 +       }
2321 +       return i;
2322 +}
2323 +
2324 +static void mac_init(struct mac_info *mac)
2325 +{
2326 +       MAC_DEFAULT_REG(mac, TX_CNTRL2);
2327 +       MAC_DEFAULT_REG(mac, RANDOM_SEED);
2328 +       MAC_DEFAULT_REG(mac, THRESH_P_EMPTY);
2329 +       MAC_DEFAULT_REG(mac, THRESH_P_FULL);
2330 +       MAC_DEFAULT_REG(mac, TX_DEFER);
2331 +       MAC_DEFAULT_REG(mac, TX_TWO_DEFER_1);
2332 +       MAC_DEFAULT_REG(mac, TX_TWO_DEFER_2);
2333 +       MAC_DEFAULT_REG(mac, SLOT_TIME);
2334 +       MAC_DEFAULT_REG(mac, INT_CLK_THRESH);
2335 +       MAC_DEFAULT_REG(mac, BUF_SIZE_TX);
2336 +       MAC_DEFAULT_REG(mac, TX_CNTRL1);
2337 +       MAC_DEFAULT_REG(mac, RX_CNTRL1);
2338 +}
2339 +
2340 +static void mac_set_uniaddr(struct net_device *dev)
2341 +{
2342 +       int i;
2343 +       struct mac_info *mac = netdev_priv(dev);
2344 +       struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2345 +
2346 +       /* check for multicast */
2347 +       if (dev->dev_addr[0] & 1)
2348 +               return;
2349 +
2350 +       npe_mh_setportaddr(npe, mac->plat, dev->dev_addr);
2351 +       npe_mh_disable_firewall(npe, mac->plat);
2352 +       for (i=0; i<dev->addr_len; i++)
2353 +               mac_write_reg(mac, MAC_UNI_ADDR + i, dev->dev_addr[i]);
2354 +}
2355 +
2356 +static void update_duplex_mode(struct net_device *dev)
2357 +{
2358 +       struct mac_info *mac = netdev_priv(dev);
2359 +       if (netif_msg_link(mac)) {
2360 +               printk(KERN_DEBUG "Link of %s is %s-duplex\n", dev->name,
2361 +                               mac->mii.full_duplex ? "full" : "half");
2362 +       }
2363 +       if (mac->mii.full_duplex) {
2364 +               mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
2365 +       } else {
2366 +               mac_set_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
2367 +       }
2368 +}
2369 +
2370 +static int media_check(struct net_device *dev, int init)
2371 +{
2372 +       struct mac_info *mac = netdev_priv(dev);
2373 +
2374 +       if (mii_check_media(&mac->mii, netif_msg_link(mac), init)) {
2375 +               update_duplex_mode(dev);
2376 +               return 1;
2377 +       }
2378 +       return 0;
2379 +}
2380 +
2381 +static void get_npe_stats(struct mac_info *mac, u32 *buf, int len, int reset)
2382 +{
2383 +       struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2384 +       u32 phys;
2385 +
2386 +       memset(buf, len, 0);
2387 +       phys = dma_map_single(mac->npe_dev, buf, len, DMA_BIDIRECTIONAL);
2388 +       npe_mh_get_stats(npe, mac->plat, phys, reset);
2389 +       dma_unmap_single(mac->npe_dev, phys, len, DMA_BIDIRECTIONAL);
2390 +}
2391 +
2392 +static void irqcb_recv(struct qm_queue *queue)
2393 +{
2394 +       struct net_device *dev = queue->cb_data;
2395 +
2396 +       queue_ack_irq(queue);
2397 +       queue_disable_irq(queue);
2398 +       if (netif_running(dev))
2399 +               netif_rx_schedule(dev);
2400 +}
2401 +
2402 +int ix_recv(struct net_device *dev, int *budget, struct qm_queue *queue)
2403 +{
2404 +       struct mac_info *mac = netdev_priv(dev);
2405 +       struct sk_buff *skb;
2406 +       u32 phys;
2407 +       struct npe_cont *cont;
2408 +
2409 +       while (*budget > 0 && netif_running(dev) ) {
2410 +               int len;
2411 +               phys = queue_get_entry(queue) & ~0xf;
2412 +               if (!phys)
2413 +                       break;
2414 +               dma_sync_single(queue->dev, phys, DMA_HDR_SIZE,
2415 +                               DMA_FROM_DEVICE);
2416 +               cont = dma_to_virt(queue->dev, phys);
2417 +               len = npe_to_cpu16(cont->eth.pkt_len) -4; /* strip FCS */
2418 +
2419 +               if (unlikely(netif_msg_rx_status(mac))) {
2420 +                       printk(KERN_DEBUG "%s: RX packet size: %u\n",
2421 +                               dev->name, len);
2422 +                       queue_state(mac->rxq);
2423 +                       queue_state(mac->rxdoneq);
2424 +               }
2425 +               skb = dev_alloc_skb(len + 2);
2426 +               if (likely(skb)) {
2427 +                       skb->dev = dev;
2428 +                       skb_reserve(skb, 2);
2429 +                       dma_sync_single(queue->dev, cont->eth.phys_addr, len,
2430 +                                       DMA_FROM_DEVICE);
2431 +#ifdef CONFIG_NPE_ADDRESS_COHERENT
2432 +                       /* swap the payload of the SKB */
2433 +                       {
2434 +                               u32 *t = (u32*)(skb->data-2);
2435 +                               u32 *s = (u32*)(cont->data-2);
2436 +                               int i, j = (len+5)/4;
2437 +                               for (i=0; i<j; i++)
2438 +                                       t[i] = cpu_to_be32(s[i]);
2439 +                       }
2440 +#else                  
2441 +                       eth_copy_and_sum(skb, cont->data, len, 0);
2442 +#endif
2443 +                       skb_put(skb, len);
2444 +                       skb->protocol = eth_type_trans(skb, dev);
2445 +                       dev->last_rx = jiffies;
2446 +                       netif_receive_skb(skb);
2447 +                       mac->stat.rx_packets++;
2448 +                       mac->stat.rx_bytes += skb->len;
2449 +               } else {
2450 +                       mac->stat.rx_dropped++;
2451 +               }
2452 +               cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2453 +               cont->eth.pkt_len = 0;
2454 +               dma_sync_single(queue->dev, phys, DMA_HDR_SIZE, DMA_TO_DEVICE);
2455 +               queue_put_entry(mac->rxq, phys);
2456 +               dev->quota--;
2457 +               (*budget)--;
2458 +       }
2459 +
2460 +       return !budget;
2461 +}
2462 +
2463 +static int ix_poll(struct net_device *dev, int *budget)
2464 +{
2465 +       struct mac_info *mac = netdev_priv(dev);
2466 +       struct qm_queue *queue = mac->rxdoneq;
2467 +
2468 +       for (;;) {
2469 +               if (ix_recv(dev, budget, queue))
2470 +                       return 1;
2471 +               netif_rx_complete(dev);
2472 +               queue_enable_irq(queue);
2473 +               if (!queue_len(queue))
2474 +                       break;
2475 +               queue_disable_irq(queue);
2476 +               if (netif_rx_reschedule(dev, 0))
2477 +                       break;
2478 +       }
2479 +       return 0;
2480 +}
2481 +
2482 +static void ixmac_set_rx_mode (struct net_device *dev)
2483 +{
2484 +       struct mac_info *mac = netdev_priv(dev);
2485 +       struct dev_mc_list *mclist;
2486 +       u8 aset[dev->addr_len], aclear[dev->addr_len];
2487 +       int i,j;
2488 +
2489 +       if (dev->flags & IFF_PROMISC) {
2490 +               mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
2491 +       } else {
2492 +               mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
2493 +
2494 +               mclist = dev->mc_list;
2495 +               memset(aset, 0xff, dev->addr_len);
2496 +               memset(aclear, 0x00, dev->addr_len);
2497 +               for (i = 0; mclist && i < dev->mc_count; i++) {
2498 +                       for (j=0; j< dev->addr_len; j++) {
2499 +                               aset[j] &= mclist->dmi_addr[j];
2500 +                               aclear[j] |= mclist->dmi_addr[j];
2501 +                       }
2502 +                       mclist = mclist->next;
2503 +               }
2504 +               for (j=0; j< dev->addr_len; j++) {
2505 +                       aclear[j] = aset[j] | ~aclear[j];
2506 +               }
2507 +               for (i=0; i<dev->addr_len; i++) {
2508 +                       mac_write_reg(mac, MAC_ADDR + i, aset[i]);
2509 +                       mac_write_reg(mac, MAC_ADDR_MASK + i, aclear[i]);
2510 +               }
2511 +       }
2512 +}
2513 +
2514 +static int ixmac_open (struct net_device *dev)
2515 +{
2516 +       struct mac_info *mac = netdev_priv(dev);
2517 +       struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2518 +       u32 buf[NPE_STAT_NUM];
2519 +       int i;
2520 +       u32 phys;
2521 +
2522 +       /* first check if the NPE is up and running */
2523 +       if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
2524 +               printk(KERN_ERR "%s: %s not running\n", dev->name,
2525 +                               npe->plat->name);
2526 +               return -EIO;
2527 +       }
2528 +       if (npe_mh_status(npe)) {
2529 +               printk(KERN_ERR "%s: %s not responding\n", dev->name,
2530 +                               npe->plat->name);
2531 +               return -EIO;
2532 +       }
2533 +       mac->txq_pkt += init_buffer(mac->txq, TX_QUEUE_PREFILL - mac->txq_pkt);
2534 +       mac->rxq_pkt += init_buffer(mac->rxq, RX_QUEUE_PREFILL - mac->rxq_pkt);
2535 +
2536 +       queue_enable_irq(mac->rxdoneq);
2537 +
2538 +       /* drain all buffers from then RX-done-q to make the IRQ happen */
2539 +       while ((phys = queue_get_entry(mac->rxdoneq) & ~0xf)) {
2540 +               struct npe_cont *cont;
2541 +               cont = dma_to_virt(mac->rxdoneq->dev, phys);
2542 +               cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2543 +               cont->eth.pkt_len = 0;
2544 +               dma_sync_single(mac->rxdoneq->dev, phys, DMA_HDR_SIZE,
2545 +                               DMA_TO_DEVICE);
2546 +               queue_put_entry(mac->rxq, phys);
2547 +       }
2548 +       mac_init(mac);
2549 +       npe_mh_set_rxqid(npe, mac->plat, mac->plat->rxdoneq_id);
2550 +       get_npe_stats(mac, buf, sizeof(buf), 1); /* reset stats */
2551 +       get_npe_stats(mac, buf, sizeof(buf), 0);
2552 +       /*
2553 +        * if the extended stats contain random values
2554 +        * the NPE image lacks extendet statistic counters
2555 +        */
2556 +       for (i=NPE_STAT_NUM_BASE; i<NPE_STAT_NUM; i++) {
2557 +               if (buf[i] >10000)
2558 +                       break;
2559 +       }
2560 +       mac->npe_stat_num = i<NPE_STAT_NUM ? NPE_STAT_NUM_BASE : NPE_STAT_NUM;
2561 +       mac->npe_stat_num += NPE_Q_STAT_NUM;
2562 +
2563 +       mac_set_uniaddr(dev);
2564 +       media_check(dev, 1);
2565 +       ixmac_set_rx_mode(dev);
2566 +       netif_start_queue(dev);
2567 +       schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
2568 +       if (netif_msg_ifup(mac)) {
2569 +                printk(KERN_DEBUG "%s: open " IXMAC_NAME
2570 +                       " RX queue %d bufs, TX queue %d bufs\n",
2571 +                        dev->name, mac->rxq_pkt, mac->txq_pkt);
2572 +       }
2573 +       return 0;
2574 +}
2575 +
2576 +static int ixmac_start_xmit (struct sk_buff *skb, struct net_device *dev)
2577 +{
2578 +       struct mac_info *mac = netdev_priv(dev);
2579 +       struct npe_cont *cont;
2580 +       u32 phys;
2581 +       struct qm_queue *queue = mac->txq;
2582 +
2583 +       if (unlikely(skb->len > DMA_BUF_SIZE)) {
2584 +               dev_kfree_skb(skb);
2585 +               mac->stat.tx_errors++;
2586 +               return NETDEV_TX_OK;
2587 +       }
2588 +       phys = queue_get_entry(tx_doneq) & ~0xf;
2589 +       if (!phys)
2590 +               goto busy;
2591 +       cont = dma_to_virt(queue->dev, phys);
2592 +#ifdef CONFIG_NPE_ADDRESS_COHERENT
2593 +       /* swap the payload of the SKB */
2594 +       {
2595 +               u32 *s = (u32*)(skb->data-2);
2596 +               u32 *t = (u32*)(cont->data-2);
2597 +               int i,j = (skb->len+5) / 4;
2598 +               for (i=0; i<j; i++)
2599 +                       t[i] = cpu_to_be32(s[i]);
2600 +       }
2601 +#else                  
2602 +       //skb_copy_and_csum_dev(skb, cont->data);
2603 +       memcpy(cont->data, skb->data, skb->len);
2604 +#endif
2605 +       cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2606 +       cont->eth.pkt_len = cpu_to_npe16(skb->len);
2607 +       /* disable VLAN functions in NPE image for now */
2608 +       cont->eth.flags = 0;
2609 +       dma_sync_single(queue->dev, phys, skb->len + DMA_HDR_SIZE,
2610 +                       DMA_TO_DEVICE);
2611 +       queue_put_entry(queue, phys);
2612 +       if (queue_stat(queue) == 2) { /* overflow */
2613 +               queue_put_entry(tx_doneq, phys);
2614 +               goto busy;
2615 +       }
2616 +        dev_kfree_skb(skb);
2617 +
2618 +       mac->stat.tx_packets++;
2619 +       mac->stat.tx_bytes += skb->len;
2620 +       dev->trans_start = jiffies;
2621 +       if (netif_msg_tx_queued(mac)) {
2622 +               printk(KERN_DEBUG "%s: TX packet size %u\n",
2623 +                               dev->name, skb->len);
2624 +               queue_state(mac->txq);
2625 +               queue_state(tx_doneq);
2626 +       }
2627 +       return NETDEV_TX_OK;
2628 +busy:
2629 +       return NETDEV_TX_BUSY;
2630 +}
2631 +
2632 +static int ixmac_close (struct net_device *dev)
2633 +{
2634 +       struct mac_info *mac = netdev_priv(dev);
2635 +
2636 +       netif_stop_queue (dev);
2637 +       queue_disable_irq(mac->rxdoneq);
2638 +
2639 +       mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
2640 +       mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
2641 +
2642 +       cancel_rearming_delayed_work(&(mac->mdio_thread));
2643 +
2644 +       if (netif_msg_ifdown(mac)) {
2645 +               printk(KERN_DEBUG "%s: close " IXMAC_NAME
2646 +                       " RX queue %d bufs, TX queue %d bufs\n",
2647 +                       dev->name, mac->rxq_pkt, mac->txq_pkt);
2648 +       }
2649 +       return 0;
2650 +}
2651 +
2652 +static int ixmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2653 +{
2654 +       struct mac_info *mac = netdev_priv(dev);
2655 +       int rc, duplex_changed;
2656 +
2657 +       if (!netif_running(dev))
2658 +               return -EINVAL;
2659 +       if (!try_module_get(THIS_MODULE))
2660 +               return -ENODEV;
2661 +       rc = generic_mii_ioctl(&mac->mii, if_mii(rq), cmd, &duplex_changed);
2662 +       module_put(THIS_MODULE);
2663 +       if (duplex_changed)
2664 +               update_duplex_mode(dev);
2665 +       return rc;
2666 +}
2667 +
2668 +static struct net_device_stats *ixmac_stats (struct net_device *dev)
2669 +{
2670 +       struct mac_info *mac = netdev_priv(dev);
2671 +       return &mac->stat;
2672 +}
2673 +
2674 +static void ixmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2675 +{
2676 +       struct mac_info *mac = netdev_priv(dev);
2677 +       struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2678 +
2679 +       strcpy(info->driver, IXMAC_NAME);
2680 +       strcpy(info->version, IXMAC_VERSION);
2681 +       if  (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2682 +               snprintf(info->fw_version, 32, "%d.%d func [%d]",
2683 +                       npe->img_info[2], npe->img_info[3], npe->img_info[1]);
2684 +       }
2685 +       strncpy(info->bus_info, npe->plat->name, ETHTOOL_BUSINFO_LEN);
2686 +}
2687 +
2688 +static int ixmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2689 +{
2690 +       struct mac_info *mac = netdev_priv(dev);
2691 +       mii_ethtool_gset(&mac->mii, cmd);
2692 +       return 0;
2693 +}
2694 +
2695 +static int ixmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2696 +{
2697 +       struct mac_info *mac = netdev_priv(dev);
2698 +       int rc;
2699 +       rc = mii_ethtool_sset(&mac->mii, cmd);
2700 +       return rc;
2701 +}
2702 +
2703 +static int ixmac_nway_reset(struct net_device *dev)
2704 +{
2705 +       struct mac_info *mac = netdev_priv(dev);
2706 +       return mii_nway_restart(&mac->mii);
2707 +}
2708 +
2709 +static u32 ixmac_get_link(struct net_device *dev)
2710 +{
2711 +       struct mac_info *mac = netdev_priv(dev);
2712 +       return mii_link_ok(&mac->mii);
2713 +}
2714 +
2715 +static const int mac_reg_list[] = MAC_REG_LIST;
2716 +
2717 +static int ixmac_get_regs_len(struct net_device *dev)
2718 +{
2719 +       return ARRAY_SIZE(mac_reg_list);
2720 +}
2721 +
2722 +static void
2723 +ixmac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
2724 +{
2725 +       int i;
2726 +       struct mac_info *mac = netdev_priv(dev);
2727 +       u8 *buf = regbuf;
2728 +
2729 +       for (i=0; i<regs->len; i++) {
2730 +               buf[i] = mac_read_reg(mac, mac_reg_list[i]);
2731 +       }
2732 +}
2733 +
2734 +static struct {
2735 +       const char str[ETH_GSTRING_LEN];
2736 +} ethtool_stats_keys[NPE_STAT_NUM + NPE_Q_STAT_NUM] = {
2737 +       NPE_Q_STAT_STRINGS
2738 +       NPE_STAT_STRINGS
2739 +};
2740 +
2741 +static void ixmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2742 +{
2743 +       struct mac_info *mac = netdev_priv(dev);
2744 +       memcpy(data, ethtool_stats_keys, mac->npe_stat_num * ETH_GSTRING_LEN);
2745 +}
2746 +
2747 +static int ixmac_get_stats_count(struct net_device *dev)
2748 +{
2749 +       struct mac_info *mac = netdev_priv(dev);
2750 +       return mac->npe_stat_num;
2751 +}
2752 +
2753 +static u32 ixmac_get_msglevel(struct net_device *dev)
2754 +{
2755 +       struct mac_info *mac = netdev_priv(dev);
2756 +       return mac->msg_enable;
2757 +}
2758 +
2759 +static void ixmac_set_msglevel(struct net_device *dev, u32 datum)
2760 +{
2761 +       struct mac_info *mac = netdev_priv(dev);
2762 +       mac->msg_enable = datum;
2763 +}
2764 +
2765 +static void ixmac_get_ethtool_stats(struct net_device *dev,
2766 +               struct ethtool_stats *stats, u64 *data)
2767 +{
2768 +       int i;
2769 +       struct mac_info *mac = netdev_priv(dev);
2770 +       u32 buf[NPE_STAT_NUM];
2771 +
2772 +       data[0] = queue_len(mac->rxq);
2773 +       data[1] = queue_len(mac->rxdoneq);
2774 +       data[2] = queue_len(mac->txq);
2775 +       data[3] = queue_len(tx_doneq);
2776 +
2777 +       get_npe_stats(mac, buf, sizeof(buf), 0);
2778 +
2779 +       for (i=0; i<stats->n_stats-4; i++) {
2780 +               data[i+4] = npe_to_cpu32(buf[i]);
2781 +       }
2782 +}
2783 +
2784 +static struct ethtool_ops ixmac_ethtool_ops = {
2785 +       .get_drvinfo            = ixmac_get_drvinfo,
2786 +       .get_settings           = ixmac_get_settings,
2787 +       .set_settings           = ixmac_set_settings,
2788 +       .nway_reset             = ixmac_nway_reset,
2789 +       .get_link               = ixmac_get_link,
2790 +       .get_msglevel           = ixmac_get_msglevel,
2791 +       .set_msglevel           = ixmac_set_msglevel,
2792 +       .get_regs_len           = ixmac_get_regs_len,
2793 +       .get_regs               = ixmac_get_regs,
2794 +       .get_perm_addr          = ethtool_op_get_perm_addr,
2795 +       .get_strings            = ixmac_get_strings,
2796 +       .get_stats_count        = ixmac_get_stats_count,
2797 +       .get_ethtool_stats      = ixmac_get_ethtool_stats,
2798 +};
2799 +
2800 +static void mac_mdio_thread(struct work_struct *work)
2801 +{
2802 +       struct mac_info *mac = container_of(work, struct mac_info,
2803 +                       mdio_thread.work);
2804 +       struct net_device *dev = mac->netdev;
2805 +
2806 +       media_check(dev, 0);
2807 +       schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
2808 +}
2809 +
2810 +static int mac_probe(struct platform_device *pdev)
2811 +{
2812 +       struct resource *res;
2813 +       struct mac_info *mac;
2814 +       struct net_device *dev;
2815 +       struct npe_info *npe;
2816 +       struct mac_plat_info *plat = pdev->dev.platform_data;
2817 +       int size, ret;
2818 +
2819 +       if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) {
2820 +               return -EIO;
2821 +       }
2822 +       if (!(dev = alloc_etherdev (sizeof(struct mac_info)))) {
2823 +               return -ENOMEM;
2824 +       }
2825 +       SET_MODULE_OWNER(dev);
2826 +       SET_NETDEV_DEV(dev, &pdev->dev);
2827 +       mac = netdev_priv(dev);
2828 +       mac->netdev = dev;
2829 +
2830 +       size = res->end - res->start +1;
2831 +       mac->res = request_mem_region(res->start, size, IXMAC_NAME);
2832 +       if (!mac->res) {
2833 +               ret = -EBUSY;
2834 +               goto out_free;
2835 +       }
2836 +
2837 +       mac->addr = ioremap(res->start, size);
2838 +       if (!mac->addr) {
2839 +               ret = -ENOMEM;
2840 +               goto out_rel;
2841 +       }
2842 +
2843 +       dev->open = ixmac_open;
2844 +       dev->hard_start_xmit = ixmac_start_xmit;
2845 +       dev->poll = ix_poll;
2846 +       dev->stop = ixmac_close;
2847 +       dev->get_stats = ixmac_stats;
2848 +       dev->do_ioctl = ixmac_ioctl;
2849 +       dev->set_multicast_list = ixmac_set_rx_mode;
2850 +       dev->ethtool_ops = &ixmac_ethtool_ops;
2851 +
2852 +       dev->weight = 16;
2853 +       dev->tx_queue_len = 100;
2854 +
2855 +       mac->npe_dev = get_npe_by_id(plat->npe_id);
2856 +       if (!mac->npe_dev) {
2857 +               ret = -EIO;
2858 +               goto out_unmap;
2859 +       }
2860 +       npe = dev_get_drvdata(mac->npe_dev);
2861 +
2862 +       mac->rxq = request_queue(plat->rxq_id, 128);
2863 +       if (IS_ERR(mac->rxq)) {
2864 +               printk(KERN_ERR "Error requesting Q: %d\n", plat->rxq_id);
2865 +               ret = -EBUSY;
2866 +               goto out_putmod;
2867 +       }
2868 +       mac->txq = request_queue(plat->txq_id, 128);
2869 +       if (IS_ERR(mac->txq)) {
2870 +               printk(KERN_ERR "Error requesting Q: %d\n", plat->txq_id);
2871 +               ret = -EBUSY;
2872 +               goto out_putmod;
2873 +       }
2874 +       mac->rxdoneq = request_queue(plat->rxdoneq_id, 128);
2875 +       if (IS_ERR(mac->rxdoneq)) {
2876 +               printk(KERN_ERR "Error requesting Q: %d\n", plat->rxdoneq_id);
2877 +               ret = -EBUSY;
2878 +               goto out_putmod;
2879 +       }
2880 +       mac->rxdoneq->irq_cb = irqcb_recv;
2881 +       mac->rxdoneq->cb_data = dev;
2882 +       queue_set_watermarks(mac->rxdoneq, 0, 0);
2883 +       queue_set_irq_src(mac->rxdoneq, Q_IRQ_ID_NOT_E);
2884 +
2885 +       mac->qmgr = dev_get_drvdata(mac->rxq->dev);
2886 +       if (register_netdev (dev)) {
2887 +               ret = -EIO;
2888 +               goto out_putmod;
2889 +       }
2890 +
2891 +       mac->plat = plat;
2892 +       mac->npe_stat_num = NPE_STAT_NUM_BASE;
2893 +       mac->msg_enable = netif_msg_init(debug, MAC_DEF_MSG_ENABLE);
2894 +
2895 +       platform_set_drvdata(pdev, dev);
2896 +
2897 +       mac_write_reg(mac, MAC_CORE_CNTRL, CORE_RESET);
2898 +       udelay(500);
2899 +       mac_write_reg(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
2900 +
2901 +       init_mdio(dev, plat->phy_id);
2902 +
2903 +       INIT_DELAYED_WORK(&mac->mdio_thread, mac_mdio_thread);
2904 +
2905 +       /* The place of the MAC address is very system dependent.
2906 +        * Here we use a random one to be replaced by one of the
2907 +        * following commands:
2908 +        * "ip link set address 02:03:04:04:04:01 dev eth0"
2909 +        * "ifconfig eth0 hw ether 02:03:04:04:04:07"
2910 +       */
2911 +
2912 +       if (is_zero_ether_addr(plat->hwaddr)) {
2913 +               random_ether_addr(dev->dev_addr);
2914 +               dev->dev_addr[5] = plat->phy_id;
2915 +       }
2916 +       else
2917 +               memcpy(dev->dev_addr, plat->hwaddr, 6);
2918 +
2919 +       printk(KERN_INFO IXMAC_NAME " driver " IXMAC_VERSION
2920 +                       ": %s on %s with PHY[%d] initialized\n",
2921 +                       dev->name, npe->plat->name, plat->phy_id);
2922 +
2923 +       return 0;
2924 +
2925 +out_putmod:
2926 +       if (mac->rxq)
2927 +               release_queue(mac->rxq);
2928 +       if (mac->txq)
2929 +               release_queue(mac->txq);
2930 +       if (mac->rxdoneq)
2931 +               release_queue(mac->rxdoneq);
2932 +       module_put(mac->npe_dev->driver->owner);
2933 +out_unmap:
2934 +       iounmap(mac->addr);
2935 +out_rel:
2936 +       release_resource(mac->res);
2937 +out_free:
2938 +       kfree(mac);
2939 +       return ret;
2940 +}
2941 +
2942 +static void drain_npe(struct mac_info *mac)
2943 +{
2944 +       struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2945 +       struct npe_cont *cont;
2946 +       u32 phys;
2947 +       int loop = 0;
2948 +
2949 +       /* Now there are some skb hold by the NPE.
2950 +        * We switch the MAC in loopback mode and send a pseudo packet
2951 +        * that will be returned by the NPE in its last SKB.
2952 +        * We will also try to isolate the PHY to keep the packets internal.
2953 +        */
2954 +
2955 +       if (mac->txq_pkt <2)
2956 +               mac->txq_pkt += init_buffer(tx_doneq, 5);
2957 +
2958 +       if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2959 +               mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
2960 +               mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
2961 +
2962 +               npe_mh_npe_loopback_mode(npe, mac->plat, 1);
2963 +               mdelay(200);
2964 +
2965 +               while (mac->rxq_pkt && loop++ < 2000 ) {
2966 +                       phys = queue_get_entry(tx_doneq) & ~0xf;
2967 +                       if (!phys)
2968 +                               break;
2969 +                       cont = dma_to_virt(queue->dev, phys);
2970 +                       /* actually the packets should never leave the system,
2971 +                        * but if they do, they shall contain 0s instead of
2972 +                        * intresting random data....
2973 +                        */
2974 +                       memset(cont->data, 0, 64);
2975 +                       cont->eth.pkt_len = 64;
2976 +                       dma_sync_single(mac->txq->dev, phys, 64 + DMA_HDR_SIZE,
2977 +                                       DMA_TO_DEVICE);
2978 +                       queue_put_entry(mac->txq, phys);
2979 +                       if (queue_stat(mac->txq) == 2) { /* overflow */
2980 +                               queue_put_entry(tx_doneq, phys);
2981 +                               break;
2982 +                       }
2983 +                       mdelay(1);
2984 +                       mac->rxq_pkt -= destroy_buffer(mac->rxdoneq,
2985 +                                       mac->rxq_pkt);
2986 +               }
2987 +               npe_mh_npe_loopback_mode(npe, mac->plat, 0);
2988 +       }
2989 +       /* Flush MAC TX fifo to drain the bogus packages */
2990 +       mac_set_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2991 +       mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_RX_EN);
2992 +       mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_TX_EN);
2993 +       mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
2994 +       mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2995 +       mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2996 +}
2997 +
2998 +static int mac_remove(struct platform_device *pdev)
2999 +{
3000 +       struct net_device* dev = platform_get_drvdata(pdev);
3001 +       struct mac_info *mac = netdev_priv(dev);
3002 +
3003 +       unregister_netdev(dev);
3004 +
3005 +       mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
3006 +       if (mac->rxq_pkt)
3007 +               drain_npe(mac);
3008 +
3009 +       mac->txq_pkt -= destroy_buffer(mac->txq, mac->txq_pkt);
3010 +       mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
3011 +
3012 +       if (mac->rxq_pkt || mac->txq_pkt)
3013 +               printk("Buffers lost in NPE: RX:%d, TX:%d\n",
3014 +                               mac->rxq_pkt,  mac->txq_pkt);
3015 +
3016 +       release_queue(mac->txq);
3017 +       release_queue(mac->rxq);
3018 +       release_queue(mac->rxdoneq);
3019 +
3020 +       flush_scheduled_work();
3021 +       return_npe_dev(mac->npe_dev);
3022 +
3023 +       iounmap(mac->addr);
3024 +       release_resource(mac->res);
3025 +       platform_set_drvdata(pdev, NULL);
3026 +       free_netdev(dev);
3027 +       return 0;
3028 +}
3029 +
3030 +static struct platform_driver ixp4xx_mac = {
3031 +       .driver.name    = IXMAC_NAME,
3032 +       .probe          = mac_probe,
3033 +       .remove         = mac_remove,
3034 +};
3035 +
3036 +static int __init init_mac(void)
3037 +{
3038 +       /* The TX done Queue handles skbs sent out by the NPE */
3039 +       tx_doneq = request_queue(TX_DONE_QID, 128);
3040 +       if (IS_ERR(tx_doneq)) {
3041 +               printk(KERN_ERR "Error requesting Q: %d\n", TX_DONE_QID);
3042 +               return -EBUSY;
3043 +       }
3044 +       return platform_driver_register(&ixp4xx_mac);
3045 +}
3046 +
3047 +static void __exit finish_mac(void)
3048 +{
3049 +       platform_driver_unregister(&ixp4xx_mac);
3050 +       if (tx_doneq) {
3051 +               release_queue(tx_doneq);
3052 +       }
3053 +}
3054 +
3055 +module_init(init_mac);
3056 +module_exit(finish_mac);
3057 +
3058 +MODULE_LICENSE("GPL");
3059 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
3060 +
3061 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe.c
3062 ===================================================================
3063 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
3064 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe.c       2007-02-21 02:24:35.000000000 -0800
3065 @@ -0,0 +1,291 @@
3066 +
3067 +#include <linux/ixp_npe.h>
3068 +#include <asm/hardware.h>
3069 +
3070 +#define RESET_NPE_PARITY                0x0800
3071 +#define PARITY_BIT_MASK             0x3F00FFFF
3072 +#define CONFIG_CTRL_REG_MASK        0x3F3FFFFF
3073 +#define MAX_RETRIES                    1000000
3074 +#define NPE_PHYS_REG                        32
3075 +#define RESET_MBST_VAL              0x0000F0F0
3076 +#define NPE_REGMAP                 0x0000001E
3077 +#define INSTR_WR_REG_SHORT          0x0000C000
3078 +#define INSTR_WR_REG_BYTE           0x00004000
3079 +#define MASK_ECS_REG_0_NEXTPC       0x1FFF0000
3080 +
3081 +#define INSTR_RD_FIFO               0x0F888220
3082 +#define INSTR_RESET_MBOX            0x0FAC8210
3083 +
3084 +#define ECS_REG_0_LDUR                 8
3085 +#define ECS_REG_1_CCTXT               16
3086 +#define ECS_REG_1_SELCTXT              0
3087 +
3088 +#define ECS_BG_CTXT_REG_0           0x00
3089 +#define ECS_BG_CTXT_REG_1           0x01
3090 +#define ECS_BG_CTXT_REG_2           0x02
3091 +#define ECS_PRI_1_CTXT_REG_0        0x04
3092 +#define ECS_PRI_1_CTXT_REG_1        0x05
3093 +#define ECS_PRI_1_CTXT_REG_2        0x06
3094 +#define ECS_PRI_2_CTXT_REG_0        0x08
3095 +#define ECS_PRI_2_CTXT_REG_1        0x09
3096 +#define ECS_PRI_2_CTXT_REG_2        0x0A
3097 +#define ECS_DBG_CTXT_REG_0          0x0C
3098 +#define ECS_DBG_CTXT_REG_1          0x0D
3099 +#define ECS_DBG_CTXT_REG_2          0x0E
3100 +#define ECS_INSTRUCT_REG            0x11
3101 +
3102 +#define ECS_BG_CTXT_REG_0_RESET     0xA0000000
3103 +#define ECS_BG_CTXT_REG_1_RESET     0x01000000
3104 +#define ECS_BG_CTXT_REG_2_RESET     0x00008000
3105 +#define ECS_PRI_1_CTXT_REG_0_RESET  0x20000080
3106 +#define ECS_PRI_1_CTXT_REG_1_RESET  0x01000000
3107 +#define ECS_PRI_1_CTXT_REG_2_RESET  0x00008000
3108 +#define ECS_PRI_2_CTXT_REG_0_RESET  0x20000080
3109 +#define ECS_PRI_2_CTXT_REG_1_RESET  0x01000000
3110 +#define ECS_PRI_2_CTXT_REG_2_RESET  0x00008000
3111 +#define ECS_DBG_CTXT_REG_0_RESET    0x20000000
3112 +#define ECS_DBG_CTXT_REG_1_RESET    0x00000000
3113 +#define ECS_DBG_CTXT_REG_2_RESET    0x001E0000
3114 +#define ECS_INSTRUCT_REG_RESET      0x1003C00F
3115 +
3116 +static struct { u32 reg; u32 val; } ecs_reset[] =
3117 +{
3118 +    { ECS_BG_CTXT_REG_0,    ECS_BG_CTXT_REG_0_RESET },
3119 +    { ECS_BG_CTXT_REG_1,    ECS_BG_CTXT_REG_1_RESET },
3120 +    { ECS_BG_CTXT_REG_2,    ECS_BG_CTXT_REG_2_RESET },
3121 +    { ECS_PRI_1_CTXT_REG_0, ECS_PRI_1_CTXT_REG_0_RESET },
3122 +    { ECS_PRI_1_CTXT_REG_1, ECS_PRI_1_CTXT_REG_1_RESET },
3123 +    { ECS_PRI_1_CTXT_REG_2, ECS_PRI_1_CTXT_REG_2_RESET },
3124 +    { ECS_PRI_2_CTXT_REG_0, ECS_PRI_2_CTXT_REG_0_RESET },
3125 +    { ECS_PRI_2_CTXT_REG_1, ECS_PRI_2_CTXT_REG_1_RESET },
3126 +    { ECS_PRI_2_CTXT_REG_2, ECS_PRI_2_CTXT_REG_2_RESET },
3127 +    { ECS_DBG_CTXT_REG_0,   ECS_DBG_CTXT_REG_0_RESET },
3128 +    { ECS_DBG_CTXT_REG_1,   ECS_DBG_CTXT_REG_1_RESET },
3129 +    { ECS_DBG_CTXT_REG_2,   ECS_DBG_CTXT_REG_2_RESET },
3130 +    { ECS_INSTRUCT_REG,     ECS_INSTRUCT_REG_RESET }
3131 +};
3132 +
3133 +/* actually I have no idea what I'm doing here !!
3134 + * I only rewrite the "reset" sequence the way Intel does it.
3135 + */
3136 +
3137 +static void npe_debg_preexec(struct npe_info *npe)
3138 +{
3139 +       u32 r = IX_NPEDL_MASK_ECS_DBG_REG_2_IF | IX_NPEDL_MASK_ECS_DBG_REG_2_IE;
3140 +
3141 +       npe->exec_count = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCT);
3142 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, 0);
3143 +       npe->ctx_reg2 = npe_read_ecs_reg(npe, ECS_DBG_CTXT_REG_2);
3144 +       npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2 | r);
3145 +}
3146 +
3147 +static void npe_debg_postexec(struct npe_info *npe)
3148 +{
3149 +       npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0, 0);
3150 +       npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3151 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, npe->exec_count);
3152 +       npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2);
3153 +}
3154 +
3155 +static int
3156 +npe_debg_inst_exec(struct npe_info *npe, u32 instr, u32 ctx, u32 ldur)
3157 +{
3158 +       u32 regval, wc;
3159 +       int c = 0;
3160 +
3161 +       regval = IX_NPEDL_MASK_ECS_REG_0_ACTIVE |
3162 +               (ldur << ECS_REG_0_LDUR);
3163 +       npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0 , regval);
3164 +       /* set CCTXT at ECS DEBUG L3 to specify in which context
3165 +        * to execute the instruction
3166 +        */
3167 +       regval = (ctx << ECS_REG_1_CCTXT) |
3168 +                (ctx << ECS_REG_1_SELCTXT);
3169 +       npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_1, regval);
3170 +
3171 +       /* clear the pipeline */
3172 +       npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3173 +
3174 +       /* load NPE instruction into the instruction register */
3175 +       npe_write_ecs_reg(npe, ECS_INSTRUCT_REG, instr);
3176 +       /* we need this value later to wait for
3177 +        * completion of NPE execution step
3178 +        */
3179 +       wc = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC);
3180 +       npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STEP);
3181 +
3182 +       /* Watch Count register increments when NPE completes an instruction */
3183 +       while (wc == npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC) &&
3184 +                       ++c < MAX_RETRIES);
3185 +
3186 +       if (c >= MAX_RETRIES) {
3187 +               printk(KERN_ERR "%s reset:npe_debg_inst_exec(): Timeout\n",
3188 +                               npe->plat->name);
3189 +               return 1;
3190 +       }
3191 +       return 0;
3192 +}
3193 +
3194 +static int npe_logical_reg_write8(struct npe_info *npe, u32 addr, u32 val)
3195 +{
3196 +       u32 instr;
3197 +       val &= 0xff;
3198 +       /* here we build the NPE assembler instruction:
3199 +        * mov8 d0, #0 */
3200 +       instr = INSTR_WR_REG_BYTE |     /* OpCode */
3201 +               addr << 9 |             /* base Operand */
3202 +               (val & 0x1f) << 4 |     /* lower 5 bits to immediate data */
3203 +               (val & ~0x1f) << (18-5);/* higher 3 bits to CoProc instr. */
3204 +       /* and execute it */
3205 +       return npe_debg_inst_exec(npe, instr, 0, 1);
3206 +}
3207 +
3208 +static int npe_logical_reg_write16(struct npe_info *npe, u32 addr, u32 val)
3209 +{
3210 +       u32 instr;
3211 +       /* here we build the NPE assembler instruction:
3212 +        * mov16 d0, #0 */
3213 +       val &= 0xffff;
3214 +       instr = INSTR_WR_REG_SHORT |    /* OpCode */
3215 +               addr << 9 |             /* base Operand */
3216 +               (val & 0x1f) << 4 |     /* lower 5 bits to immediate data */
3217 +               (val & ~0x1f) << (18-5);/* higher 11 bits to CoProc instr. */
3218 +       /* and execute it */
3219 +       return npe_debg_inst_exec(npe, instr, 0, 1);
3220 +}
3221 +
3222 +static int npe_logical_reg_write32(struct npe_info *npe, u32 addr, u32 val)
3223 +{
3224 +       /* write in 16 bit steps first the high and then the low value */
3225 +       npe_logical_reg_write16(npe, addr, val >> 16);
3226 +       return npe_logical_reg_write16(npe, addr+2, val & 0xffff);
3227 +}
3228 +
3229 +void npe_reset(struct npe_info *npe)
3230 +{
3231 +       u32 reg, cfg_ctrl;
3232 +       int i;
3233 +       struct { u32 reset; int addr; int size; } ctx_reg[] = {
3234 +               { 0x80,  0x1b, 8  },
3235 +               { 0,     0x1c, 16 },
3236 +               { 0x820, 0x1e, 16 },
3237 +               { 0,     0x1f, 8  }
3238 +       }, *cr;
3239 +
3240 +       cfg_ctrl = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
3241 +       cfg_ctrl |= 0x3F000000;
3242 +       /* disable the parity interrupt */
3243 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, cfg_ctrl & PARITY_BIT_MASK);
3244 +
3245 +       npe_debg_preexec(npe);
3246 +
3247 +       /* clear the FIFOs */
3248 +       while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WFIFO) ==
3249 +                                       IX_NPEDL_MASK_WFIFO_VALID);
3250 +       while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) ==
3251 +                                       IX_NPEDL_MASK_STAT_OFNE)
3252 +       {
3253 +               u32 reg;
3254 +               reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_FIFO);
3255 +               printk("%s reset: Read FIFO:=%x\n", npe->plat->name, reg);
3256 +       }
3257 +       while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) ==
3258 +                                       IX_NPEDL_MASK_STAT_IFNE) {
3259 +               npe_debg_inst_exec(npe, INSTR_RD_FIFO, 0, 0);
3260 +       }
3261 +
3262 +       /*  Reset the mailbox reg */
3263 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_MBST, RESET_MBST_VAL);
3264 +       npe_debg_inst_exec(npe, INSTR_RESET_MBOX, 0, 0);
3265 +
3266 +       /* Reset the physical registers in the NPE register file */
3267 +       for (i=0; i<NPE_PHYS_REG; i++) {
3268 +               npe_logical_reg_write16(npe, NPE_REGMAP, i >> 1);
3269 +               npe_logical_reg_write32(npe, (i&1) *4, 0);
3270 +       }
3271 +
3272 +       /* Reset the context store. Iterate over the 16 ctx s */
3273 +       for(i=0; i<16; i++) {
3274 +               for (reg=0; reg<4; reg++) {
3275 +                       /* There is no (STEVT) register for Context 0.
3276 +                        * ignore if register=0 and ctx=0 */
3277 +                       if (!(reg || i))
3278 +                               continue;
3279 +                        /* Context 0 has no STARTPC. Instead, this value is
3280 +                         * used to set NextPC for Background ECS,
3281 +                         * to set where NPE starts executing code
3282 +                         */
3283 +                       if (!i && reg==1) {
3284 +                               u32 r;
3285 +                               r = npe_read_ecs_reg(npe, ECS_BG_CTXT_REG_0);
3286 +                               r &= ~MASK_ECS_REG_0_NEXTPC;
3287 +                               r |= (cr->reset << 16) & MASK_ECS_REG_0_NEXTPC;
3288 +                               continue;
3289 +                       }
3290 +                       cr = ctx_reg + reg;
3291 +                       switch (cr->size) {
3292 +                               case 8:
3293 +                                       npe_logical_reg_write8(npe, cr->addr,
3294 +                                               cr->reset);
3295 +                                       break;
3296 +                               case 16:
3297 +                                       npe_logical_reg_write16(npe, cr->addr,
3298 +                                               cr->reset);
3299 +                       }
3300 +               }
3301 +       }
3302 +       npe_debg_postexec(npe);
3303 +
3304 +       for (i=0; i< ARRAY_SIZE(ecs_reset); i++) {
3305 +               npe_write_ecs_reg(npe, ecs_reset[i].reg, ecs_reset[i].val);
3306 +       }
3307 +       npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT);
3308 +
3309 +       for (i=IX_NPEDL_REG_OFFSET_EXCT; i<=IX_NPEDL_REG_OFFSET_AP3; i+=4) {
3310 +               npe_reg_write(npe, i, 0);
3311 +       }
3312 +
3313 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_WC, 0);
3314 +
3315 +       reg = *IXP4XX_EXP_CFG2;
3316 +       reg |= 0x800 << npe->plat->id;  /* IX_FUSE_NPE[ABC] */
3317 +       *IXP4XX_EXP_CFG2 = reg;
3318 +       reg &= ~(0x800 << npe->plat->id);  /* IX_FUSE_NPE[ABC] */
3319 +       *IXP4XX_EXP_CFG2 = reg;
3320 +
3321 +       npe_stop(npe);
3322 +
3323 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL,
3324 +                       cfg_ctrl & CONFIG_CTRL_REG_MASK);
3325 +       npe->loaded = 0;
3326 +}
3327 +
3328 +
3329 +void npe_stop(struct npe_info *npe)
3330 +{
3331 +       npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STOP);
3332 +       npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3333 +}
3334 +
3335 +static void npe_reset_active(struct npe_info *npe, u32 reg)
3336 +{
3337 +       u32 regval;
3338 +
3339 +       regval = npe_read_ecs_reg(npe, reg);
3340 +       regval &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
3341 +       npe_write_ecs_reg(npe, reg, regval);
3342 +}
3343 +
3344 +void npe_start(struct npe_info *npe)
3345 +{
3346 +       npe_reset_active(npe, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
3347 +       npe_reset_active(npe, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
3348 +       npe_reset_active(npe, IX_NPEDL_ECS_DBG_CTXT_REG_0);
3349 +
3350 +       npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3351 +       npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_START);
3352 +}
3353 +
3354 +EXPORT_SYMBOL(npe_stop);
3355 +EXPORT_SYMBOL(npe_start);
3356 +EXPORT_SYMBOL(npe_reset);
3357 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe_mh.c
3358 ===================================================================
3359 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
3360 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe_mh.c    2007-02-21 02:24:35.000000000 -0800
3361 @@ -0,0 +1,170 @@
3362 +/*
3363 + * npe_mh.c - NPE message handler.
3364 + *
3365 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3366 + *
3367 + * This file is released under the GPLv2
3368 + */
3369 +
3370 +#include <linux/ixp_npe.h>
3371 +#include <linux/slab.h>
3372 +
3373 +#define MAX_RETRY 200
3374 +
3375 +struct npe_mh_msg {
3376 +       union {
3377 +               u8 byte[8]; /* Very desciptive name, I know ... */
3378 +               u32 data[2];
3379 +       } u;
3380 +};
3381 +
3382 +/*
3383 + * The whole code in this function must be reworked.
3384 + * It is in a state that works but is not rock solid
3385 + */
3386 +static int send_message(struct npe_info *npe, struct npe_mh_msg *msg)
3387 +{
3388 +       int i,j;
3389 +       u32 send[2], recv[2];
3390 +
3391 +       for (i=0; i<2; i++)
3392 +               send[i] = be32_to_cpu(msg->u.data[i]);
3393 +
3394 +       if ((npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3395 +                               IX_NPEMH_NPE_STAT_IFNE))
3396 +               return -1;
3397 +
3398 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[0]);
3399 +       for(i=0; i<MAX_RETRY; i++) {
3400 +               /* if the IFNF status bit is unset then the inFIFO is full */
3401 +               if (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3402 +                               IX_NPEMH_NPE_STAT_IFNF)
3403 +                       break;
3404 +       }
3405 +       if (i>=MAX_RETRY)
3406 +               return -1;
3407 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[1]);
3408 +       i=0;
3409 +       while (!(npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3410 +                                       IX_NPEMH_NPE_STAT_OFNE)) {
3411 +               if (i++>MAX_RETRY) {
3412 +                       printk("Waiting for Output FIFO NotEmpty failed\n");
3413 +                       return -1;
3414 +               }
3415 +       }
3416 +       //printk("Output FIFO Not Empty. Loops: %d\n", i);
3417 +       j=0;
3418 +       while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3419 +                                       IX_NPEMH_NPE_STAT_OFNE) {
3420 +               recv[j&1] = npe_reg_read(npe,IX_NPEDL_REG_OFFSET_FIFO);
3421 +               j++;
3422 +       }
3423 +       if ((recv[0] != send[0]) || (recv[1] != send[1])) {
3424 +               if (send[0] || send[1]) {
3425 +                       /* all CMDs return the complete message as answer,
3426 +                        * only GETSTATUS returns the ImageID of the NPE
3427 +                        */
3428 +                       printk("Unexpected answer: "
3429 +                               "Send %08x:%08x Ret %08x:%08x\n",
3430 +                               send[0], send[1], recv[0], recv[1]);
3431 +               }
3432 +       }
3433 +       return 0;
3434 +}
3435 +
3436 +#define CMD  0
3437 +#define PORT 1
3438 +#define MAC  2
3439 +
3440 +#define IX_ETHNPE_NPE_GETSTATUS                        0x00
3441 +#define IX_ETHNPE_EDB_SETPORTADDRESS            0x01
3442 +#define IX_ETHNPE_GETSTATS                     0x04
3443 +#define IX_ETHNPE_RESETSTATS                   0x05
3444 +#define IX_ETHNPE_FW_SETFIREWALLMODE            0x0E
3445 +#define IX_ETHNPE_VLAN_SETRXQOSENTRY            0x0B
3446 +#define IX_ETHNPE_SETLOOPBACK_MODE             0x12
3447 +
3448 +#define logical_id(mp) (((mp)->npe_id << 4) | ((mp)->port_id & 0xf))
3449 +
3450 +int npe_mh_status(struct npe_info *npe)
3451 +{
3452 +       struct npe_mh_msg msg;
3453 +
3454 +       memset(&msg, 0, sizeof(msg));
3455 +       msg.u.byte[CMD] = IX_ETHNPE_NPE_GETSTATUS;
3456 +       return send_message(npe, &msg);
3457 +}
3458 +
3459 +int npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp,
3460 +               u8 *macaddr)
3461 +{
3462 +       struct npe_mh_msg msg;
3463 +
3464 +       msg.u.byte[CMD] = IX_ETHNPE_EDB_SETPORTADDRESS;
3465 +       msg.u.byte[PORT] = mp->eth_id;
3466 +       memcpy(msg.u.byte + MAC, macaddr, 6);
3467 +
3468 +       return send_message(npe, &msg);
3469 +}
3470 +
3471 +int npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp)
3472 +{
3473 +       struct npe_mh_msg msg;
3474 +
3475 +       memset(&msg, 0, sizeof(msg));
3476 +       msg.u.byte[CMD] = IX_ETHNPE_FW_SETFIREWALLMODE;
3477 +       msg.u.byte[PORT] = logical_id(mp);
3478 +
3479 +       return send_message(npe, &msg);
3480 +}
3481 +
3482 +int npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp,
3483 +               int enable)
3484 +{
3485 +       struct npe_mh_msg msg;
3486 +
3487 +       memset(&msg, 0, sizeof(msg));
3488 +       msg.u.byte[CMD] = IX_ETHNPE_SETLOOPBACK_MODE;
3489 +       msg.u.byte[PORT] = logical_id(mp);
3490 +       msg.u.byte[3] = enable ? 1 : 0;
3491 +
3492 +       return send_message(npe, &msg);
3493 +}
3494 +
3495 +int npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid)
3496 +{
3497 +       struct npe_mh_msg msg;
3498 +       int i, ret;
3499 +
3500 +       memset(&msg, 0, sizeof(msg));
3501 +       msg.u.byte[CMD] = IX_ETHNPE_VLAN_SETRXQOSENTRY;
3502 +       msg.u.byte[PORT] = logical_id(mp);
3503 +       msg.u.byte[5] = qid | 0x80;
3504 +       msg.u.byte[7] = qid<<4;
3505 +       for(i=0; i<8; i++) {
3506 +               msg.u.byte[3] = i;
3507 +               if ((ret = send_message(npe, &msg)))
3508 +                       return ret;
3509 +       }
3510 +       return 0;
3511 +}
3512 +
3513 +int npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys,
3514 +               int reset)
3515 +{
3516 +       struct npe_mh_msg msg;
3517 +       memset(&msg, 0, sizeof(msg));
3518 +       msg.u.byte[CMD] = reset ? IX_ETHNPE_RESETSTATS : IX_ETHNPE_GETSTATS;
3519 +       msg.u.byte[PORT] = logical_id(mp);
3520 +       msg.u.data[1] = cpu_to_npe32(cpu_to_be32(phys));
3521 +
3522 +       return send_message(npe, &msg);
3523 +}
3524 +
3525 +
3526 +EXPORT_SYMBOL(npe_mh_status);
3527 +EXPORT_SYMBOL(npe_mh_setportaddr);
3528 +EXPORT_SYMBOL(npe_mh_disable_firewall);
3529 +EXPORT_SYMBOL(npe_mh_set_rxqid);
3530 +EXPORT_SYMBOL(npe_mh_npe_loopback_mode);
3531 +EXPORT_SYMBOL(npe_mh_get_stats);
3532 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/phy.c
3533 ===================================================================
3534 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
3535 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/phy.c       2007-02-21 02:24:35.000000000 -0800
3536 @@ -0,0 +1,113 @@
3537 +/*
3538 + * phy.c - MDIO functions and mii initialisation
3539 + *
3540 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3541 + *
3542 + * This file is released under the GPLv2
3543 + */
3544 +
3545 +
3546 +#include <linux/mutex.h>
3547 +#include "mac.h"
3548 +
3549 +#define MAX_PHYS (1<<5)
3550 +
3551 +/*
3552 + * We must always use the same MAC for acessing the MDIO
3553 + * We may not use each MAC for its PHY :-(
3554 + */
3555 +
3556 +static struct net_device *phy_dev = NULL;
3557 +static struct mutex mtx;
3558 +
3559 +/* here we remember if the PHY is alive, to avoid log dumping */
3560 +static int phy_works[MAX_PHYS];
3561 +
3562 +int mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg)
3563 +{
3564 +       struct mac_info *mac;
3565 +       u32 cmd, reg;
3566 +       int cnt = 0;
3567 +
3568 +       if (!phy_dev)
3569 +               return 0;
3570 +
3571 +       mac = netdev_priv(phy_dev);
3572 +       cmd = mdio_cmd(phy_addr, phy_reg);
3573 +       mutex_lock_interruptible(&mtx);
3574 +       mac_mdio_cmd_write(mac, cmd);
3575 +       while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
3576 +               if (++cnt >= 100) {
3577 +                       printk("%s: PHY[%d] access failed\n",
3578 +                               dev->name, phy_addr);
3579 +                       break;
3580 +               }
3581 +               schedule();
3582 +       }
3583 +       reg = mac_mdio_status_read(mac);
3584 +       mutex_unlock(&mtx);
3585 +       if (reg & MII_READ_FAIL) {
3586 +               if (phy_works[phy_addr]) {
3587 +                       printk("%s: PHY[%d] unresponsive\n",
3588 +                                       dev->name, phy_addr);
3589 +               }
3590 +               reg = 0;
3591 +               phy_works[phy_addr] = 0;
3592 +       } else {
3593 +               if ( !phy_works[phy_addr]) {
3594 +                       printk("%s: PHY[%d] responsive again\n",
3595 +                               dev->name, phy_addr);
3596 +               }
3597 +               phy_works[phy_addr] = 1;
3598 +       }
3599 +       return reg & 0xffff;
3600 +}
3601 +
3602 +void
3603 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val)
3604 +{
3605 +       struct mac_info *mac;
3606 +       u32 cmd;
3607 +       int cnt=0;
3608 +
3609 +       if (!phy_dev)
3610 +               return;
3611 +
3612 +       mac = netdev_priv(phy_dev);
3613 +       cmd = mdio_cmd(phy_addr, phy_reg) | MII_WRITE | val;
3614 +
3615 +       mutex_lock_interruptible(&mtx);
3616 +       mac_mdio_cmd_write(mac, cmd);
3617 +       while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
3618 +               if (++cnt >= 100) {
3619 +                       printk("%s: PHY[%d] access failed\n",
3620 +                                       dev->name, phy_addr);
3621 +                       break;
3622 +               }
3623 +               schedule();
3624 +       }
3625 +       mutex_unlock(&mtx);
3626 +}
3627 +
3628 +void init_mdio(struct net_device *dev, int phy_id)
3629 +{
3630 +       struct mac_info *mac = netdev_priv(dev);
3631 +       int i;
3632 +
3633 +       /* All phy operations should use the same MAC
3634 +        * (my experience)
3635 +        */
3636 +       if (mac->plat->eth_id == 0) {
3637 +               mutex_init(&mtx);
3638 +               phy_dev = dev;
3639 +               for (i=0; i<MAX_PHYS; i++)
3640 +                       phy_works[i] = 1;
3641 +       }
3642 +       mac->mii.dev = dev;
3643 +       mac->mii.phy_id = phy_id;
3644 +       mac->mii.phy_id_mask = MAX_PHYS - 1;
3645 +       mac->mii.reg_num_mask = 0x1f;
3646 +       mac->mii.mdio_read = mdio_read_register;
3647 +       mac->mii.mdio_write = mdio_write_register;
3648 +}
3649 +
3650 Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ucode_dl.c
3651 ===================================================================
3652 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
3653 +++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ucode_dl.c  2007-02-21 02:24:35.000000000 -0800
3654 @@ -0,0 +1,479 @@
3655 +/*
3656 + * ucode_dl.c - provide an NPE device and a char-dev for microcode download
3657 + *
3658 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3659 + *
3660 + * This file is released under the GPLv2
3661 + */
3662 +
3663 +#include <linux/kernel.h>
3664 +#include <linux/module.h>
3665 +#include <linux/miscdevice.h>
3666 +#include <linux/platform_device.h>
3667 +#include <linux/fs.h>
3668 +#include <linux/init.h>
3669 +#include <linux/slab.h>
3670 +#include <linux/firmware.h>
3671 +#include <linux/dma-mapping.h>
3672 +#include <linux/byteorder/swab.h>
3673 +#include <asm/uaccess.h>
3674 +#include <asm/io.h>
3675 +
3676 +#include <linux/ixp_npe.h>
3677 +
3678 +#define IXNPE_VERSION "IXP4XX NPE driver Version 0.3.0"
3679 +
3680 +#define DL_MAGIC 0xfeedf00d
3681 +#define DL_MAGIC_SWAP 0x0df0edfe
3682 +
3683 +#define EOF_BLOCK 0xf
3684 +#define IMG_SIZE(image) (((image)->size * sizeof(u32)) + \
3685 +               sizeof(struct dl_image))
3686 +
3687 +#define BT_INSTR 0
3688 +#define BT_DATA 1
3689 +
3690 +enum blk_type {
3691 +       instruction,
3692 +       data,
3693 +};
3694 +
3695 +struct dl_block {
3696 +       u32 type;
3697 +       u32 offset;
3698 +};
3699 +
3700 +struct dl_image {
3701 +       u32 magic;
3702 +       u32 id;
3703 +       u32 size;
3704 +       union {
3705 +               u32 data[0];
3706 +               struct dl_block block[0];
3707 +       } u;
3708 +};
3709 +
3710 +struct dl_codeblock {
3711 +       u32 npe_addr;
3712 +       u32 size;
3713 +       u32 data[0];
3714 +};
3715 +
3716 +static struct platform_driver ixp4xx_npe_driver;
3717 +
3718 +static int match_by_npeid(struct device *dev, void *id)
3719 +{
3720 +       struct npe_info *npe = dev_get_drvdata(dev);
3721 +       if (!npe->plat)
3722 +               return 0;
3723 +       return (npe->plat->id == *(int*)id);
3724 +}
3725 +
3726 +struct device *get_npe_by_id(int id)
3727 +{
3728 +       struct device *dev = driver_find_device(&ixp4xx_npe_driver.driver,
3729 +                       NULL, &id, match_by_npeid);
3730 +       if (dev) {
3731 +               struct npe_info *npe = dev_get_drvdata(dev);
3732 +               if (!try_module_get(THIS_MODULE)) {
3733 +                        put_device(dev);
3734 +                        return NULL;
3735 +               }
3736 +               npe->usage++;
3737 +       }
3738 +       return dev;
3739 +}
3740 +
3741 +void return_npe_dev(struct device *dev)
3742 +{
3743 +       struct npe_info *npe = dev_get_drvdata(dev);
3744 +       put_device(dev);
3745 +       module_put(THIS_MODULE);
3746 +       npe->usage--;
3747 +}
3748 +
3749 +static int
3750 +download_block(struct npe_info *npe, struct dl_codeblock *cb, unsigned type)
3751 +{
3752 +       int i;
3753 +       int cmd;
3754 +
3755 +       switch (type) {
3756 +       case BT_DATA:
3757 +               cmd = IX_NPEDL_EXCTL_CMD_WR_DATA_MEM;
3758 +               if (cb->npe_addr + cb->size > npe->plat->data_size) {
3759 +                       printk(KERN_INFO "Data size too large: %d+%d > %d\n",
3760 +                               cb->npe_addr, cb->size, npe->plat->data_size);
3761 +                       return -EIO;
3762 +               }
3763 +               break;
3764 +       case BT_INSTR:
3765 +               cmd = IX_NPEDL_EXCTL_CMD_WR_INS_MEM;
3766 +               if (cb->npe_addr + cb->size > npe->plat->inst_size) {
3767 +                       printk(KERN_INFO "Instr size too large: %d+%d > %d\n",
3768 +                               cb->npe_addr, cb->size, npe->plat->inst_size);
3769 +                       return -EIO;
3770 +               }
3771 +               break;
3772 +       default:
3773 +               printk(KERN_INFO "Unknown CMD: %d\n", type);
3774 +               return -EIO;
3775 +       }
3776 +
3777 +       for (i=0; i < cb->size; i++) {
3778 +               npe_write_cmd(npe, cb->npe_addr + i, cb->data[i], cmd);
3779 +       }
3780 +
3781 +       return 0;
3782 +}
3783 +
3784 +static int store_npe_image(struct dl_image *image, struct device *dev)
3785 +{
3786 +       struct dl_block *blk;
3787 +       struct dl_codeblock *cb;
3788 +       struct npe_info *npe;
3789 +       int ret=0;
3790 +
3791 +       if (!dev) {
3792 +               dev = get_npe_by_id( (image->id >> 24) & 0xf);
3793 +               return_npe_dev(dev);
3794 +       }
3795 +       if (!dev)
3796 +               return -ENODEV;
3797 +
3798 +       npe = dev_get_drvdata(dev);
3799 +       if (npe->loaded && (npe->usage > 0)) {
3800 +               printk(KERN_INFO "Cowardly refusing to reload an Image "
3801 +                       "into the used and running %s\n", npe->plat->name);
3802 +               return 0; /* indicate success anyway... */
3803 +       }
3804 +       if (!cpu_is_ixp46x() && ((image->id >> 28) & 0xf)) {
3805 +               printk(KERN_INFO "IXP46x NPE image ignored on IXP42x\n");
3806 +               return -EIO;
3807 +       }
3808 +
3809 +       npe_stop(npe);
3810 +       npe_reset(npe);
3811 +
3812 +       for (blk = image->u.block; blk->type != EOF_BLOCK; blk++) {
3813 +               if (blk->offset > image->size) {
3814 +                       printk(KERN_INFO "Block offset out of range\n");
3815 +                       return -EIO;
3816 +               }
3817 +               cb = (struct dl_codeblock*)&image->u.data[blk->offset];
3818 +               if (blk->offset + cb->size + 2 > image->size) {
3819 +                       printk(KERN_INFO "Codeblock size out of range\n");
3820 +                       return -EIO;
3821 +               }
3822 +               if ((ret = download_block(npe, cb, blk->type)))
3823 +                       return ret;
3824 +       }
3825 +       *(u32*)npe->img_info = cpu_to_be32(image->id);
3826 +       npe_start(npe);
3827 +
3828 +       printk(KERN_INFO "Image loaded to %s Func:%x, Rel: %x:%x, Status: %x\n",
3829 +                       npe->plat->name, npe->img_info[1], npe->img_info[2],
3830 +                       npe->img_info[3], npe_status(npe));
3831 +       if (npe_mh_status(npe)) {
3832 +               printk(KERN_ERR "%s not responding\n", npe->plat->name);
3833 +       }
3834 +       npe->loaded = 1;
3835 +       return 0;
3836 +}
3837 +
3838 +static int ucode_open(struct inode *inode, struct file *file)
3839 +{
3840 +       file->private_data = kmalloc(sizeof(struct dl_image), GFP_KERNEL);
3841 +       if (!file->private_data)
3842 +               return -ENOMEM;
3843 +       return 0;
3844 +}
3845 +
3846 +static int ucode_close(struct inode *inode, struct file *file)
3847 +{
3848 +       kfree(file->private_data);
3849 +       return 0;
3850 +}
3851 +
3852 +static ssize_t ucode_write(struct file *file, const char __user *buf,
3853 +               size_t count, loff_t *ppos)
3854 +{
3855 +       union {
3856 +               char *data;
3857 +               struct dl_image *image;
3858 +       } u;
3859 +       const char __user *cbuf = buf;
3860 +
3861 +       u.data = file->private_data;
3862 +
3863 +       while (count) {
3864 +               int len;
3865 +               if (*ppos < sizeof(struct dl_image)) {
3866 +                       len = sizeof(struct dl_image) - *ppos;
3867 +                       len = len > count ? count : len;
3868 +                       if (copy_from_user(u.data + *ppos, cbuf, len))
3869 +                               return -EFAULT;
3870 +                       count -= len;
3871 +                       *ppos += len;
3872 +                       cbuf += len;
3873 +                       continue;
3874 +               } else if (*ppos == sizeof(struct dl_image)) {
3875 +                       void *data;
3876 +                       if (u.image->magic == DL_MAGIC_SWAP) {
3877 +                               printk(KERN_INFO "swapped image found\n");
3878 +                               u.image->id = swab32(u.image->id);
3879 +                               u.image->size = swab32(u.image->size);
3880 +                       } else if (u.image->magic != DL_MAGIC) {
3881 +                               printk(KERN_INFO "Bad magic:%x\n",
3882 +                                               u.image->magic);
3883 +                               return -EFAULT;
3884 +                       }
3885 +                       len = IMG_SIZE(u.image);
3886 +                       data = kmalloc(len, GFP_KERNEL);
3887 +                       if (!data)
3888 +                               return -ENOMEM;
3889 +                       memcpy(data, u.data, *ppos);
3890 +                       kfree(u.data);
3891 +                       u.data = (char*)data;
3892 +                       file->private_data = data;
3893 +               }
3894 +               len = IMG_SIZE(u.image) - *ppos;
3895 +               len = len > count ? count : len;
3896 +               if (copy_from_user(u.data + *ppos, cbuf, len))
3897 +                       return -EFAULT;
3898 +               count -= len;
3899 +               *ppos += len;
3900 +               cbuf += len;
3901 +               if (*ppos == IMG_SIZE(u.image)) {
3902 +                       int ret, i;
3903 +                       *ppos = 0;
3904 +                       if (u.image->magic == DL_MAGIC_SWAP) {
3905 +                               for (i=0; i<u.image->size; i++) {
3906 +                                       u.image->u.data[i] =
3907 +                                               swab32(u.image->u.data[i]);
3908 +                               }
3909 +                               u.image->magic = swab32(u.image->magic);
3910 +                       }
3911 +                       ret = store_npe_image(u.image, NULL);
3912 +                       if (ret) {
3913 +                               printk(KERN_INFO "Error in NPE image: %x\n",
3914 +                                       u.image->id);
3915 +                               return ret;
3916 +                       }
3917 +               }
3918 +       }
3919 +       return (cbuf-buf);
3920 +}
3921 +
3922 +static void npe_firmware_probe(struct device *dev)
3923 +{
3924 +#if (defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)) \
3925 +       && defined(MODULE)
3926 +       const struct firmware *fw_entry;
3927 +       struct npe_info *npe = dev_get_drvdata(dev);
3928 +       struct dl_image *image;
3929 +       int ret = -1, i;
3930 +
3931 +       if (request_firmware(&fw_entry, npe->plat->name, dev) != 0) {
3932 +               return;
3933 +       }
3934 +       image = (struct dl_image*)fw_entry->data;
3935 +       /* Sanity checks */
3936 +       if (fw_entry->size < sizeof(struct dl_image)) {
3937 +               printk(KERN_ERR "Firmware error: too small\n");
3938 +               goto out;
3939 +       }
3940 +       if (image->magic == DL_MAGIC_SWAP) {
3941 +               printk(KERN_INFO "swapped image found\n");
3942 +               image->id = swab32(image->id);
3943 +               image->size = swab32(image->size);
3944 +       } else if (image->magic != DL_MAGIC) {
3945 +               printk(KERN_ERR "Bad magic:%x\n", image->magic);
3946 +               goto out;
3947 +       }
3948 +       if (IMG_SIZE(image) != fw_entry->size) {
3949 +               printk(KERN_ERR "Firmware error: bad size\n");
3950 +               goto out;
3951 +       }
3952 +       if (((image->id >> 24) & 0xf) != npe->plat->id) {
3953 +               printk(KERN_ERR "NPE id missmatch\n");
3954 +               goto out;
3955 +       }
3956 +       if (image->magic == DL_MAGIC_SWAP) {
3957 +               for (i=0; i<image->size; i++) {
3958 +                       image->u.data[i] = swab32(image->u.data[i]);
3959 +               }
3960 +               image->magic = swab32(image->magic);
3961 +       }
3962 +
3963 +       ret = store_npe_image(image, dev);
3964 +out:
3965 +       if (ret) {
3966 +               printk(KERN_ERR "Error downloading Firmware for %s\n",
3967 +                               npe->plat->name);
3968 +       }
3969 +       release_firmware(fw_entry);
3970 +#endif
3971 +}
3972 +
3973 +static void disable_npe_irq(struct npe_info *npe)
3974 +{
3975 +       u32 reg;
3976 +       reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
3977 +       reg &= ~(IX_NPEMH_NPE_CTL_OFE | IX_NPEMH_NPE_CTL_IFE);
3978 +       reg |= IX_NPEMH_NPE_CTL_OFEWE | IX_NPEMH_NPE_CTL_IFEWE;
3979 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, reg);
3980 +}
3981 +
3982 +static ssize_t show_npe_state(struct device *dev, struct device_attribute *attr,
3983 +               char *buf)
3984 +{
3985 +       struct npe_info *npe = dev_get_drvdata(dev);
3986 +
3987 +       strcpy(buf, npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN ?
3988 +                       "start\n" : "stop\n");
3989 +       return strlen(buf);
3990 +}
3991 +
3992 +static ssize_t set_npe_state(struct device *dev, struct device_attribute *attr,
3993 +               const char *buf, size_t count)
3994 +{
3995 +       struct npe_info *npe = dev_get_drvdata(dev);
3996 +
3997 +       if (npe->usage) {
3998 +               printk("%s in use: read-only\n", npe->plat->name);
3999 +               return count;
4000 +       }
4001 +       if (!strncmp(buf, "start", 5)) {
4002 +               npe_start(npe);
4003 +       }
4004 +       if (!strncmp(buf, "stop", 4)) {
4005 +               npe_stop(npe);
4006 +       }
4007 +       if (!strncmp(buf, "reset", 5)) {
4008 +               npe_stop(npe);
4009 +               npe_reset(npe);
4010 +       }
4011 +       return count;
4012 +}
4013 +
4014 +static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_npe_state, set_npe_state);
4015 +
4016 +static int npe_probe(struct platform_device *pdev)
4017 +{
4018 +       struct resource *res;
4019 +       struct npe_info *npe;
4020 +       struct npe_plat_data *plat = pdev->dev.platform_data;
4021 +       int err, size, ret=0;
4022 +
4023 +       if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
4024 +               return -EIO;
4025 +
4026 +       if (!(npe = kzalloc(sizeof(struct npe_info), GFP_KERNEL)))
4027 +               return -ENOMEM;
4028 +
4029 +       size = res->end - res->start +1;
4030 +       npe->res = request_mem_region(res->start, size, plat->name);
4031 +       if (!npe->res) {
4032 +               ret = -EBUSY;
4033 +               printk(KERN_ERR "Failed to get memregion(%x, %x)\n",
4034 +                               res->start, size);
4035 +               goto out_free;
4036 +       }
4037 +
4038 +       npe->addr = ioremap(res->start, size);
4039 +       if (!npe->addr) {
4040 +               ret = -ENOMEM;
4041 +               printk(KERN_ERR "Failed to ioremap(%x, %x)\n",
4042 +                               res->start, size);
4043 +               goto out_rel;
4044 +       }
4045 +
4046 +       pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
4047 +
4048 +       platform_set_drvdata(pdev, npe);
4049 +
4050 +       err = device_create_file(&pdev->dev, &dev_attr_state);
4051 +       if (err)
4052 +               goto out_rel;
4053 +
4054 +       npe->plat = plat;
4055 +       disable_npe_irq(npe);
4056 +       npe->usage = 0;
4057 +       npe_reset(npe);
4058 +       npe_firmware_probe(&pdev->dev);
4059 +
4060 +       return 0;
4061 +
4062 +out_rel:
4063 +       release_resource(npe->res);
4064 +out_free:
4065 +       kfree(npe);
4066 +       return ret;
4067 +}
4068 +
4069 +static struct file_operations ucode_dl_fops = {
4070 +       .owner          = THIS_MODULE,
4071 +       .write          = ucode_write,
4072 +       .open           = ucode_open,
4073 +       .release        = ucode_close,
4074 +};
4075 +
4076 +static struct miscdevice ucode_dl_dev = {
4077 +       .minor  = MICROCODE_MINOR,
4078 +       .name   = "ixp4xx_ucode",
4079 +       .fops   = &ucode_dl_fops,
4080 +};
4081 +
4082 +static int npe_remove(struct platform_device *pdev)
4083 +{
4084 +       struct npe_info *npe = platform_get_drvdata(pdev);
4085 +
4086 +       device_remove_file(&pdev->dev, &dev_attr_state);
4087 +
4088 +       iounmap(npe->addr);
4089 +       release_resource(npe->res);
4090 +       kfree(npe);
4091 +       return 0;
4092 +}
4093 +
4094 +static struct platform_driver ixp4xx_npe_driver = {
4095 +       .driver = {
4096 +               .name   = "ixp4xx_npe",
4097 +               .owner  = THIS_MODULE,
4098 +       },
4099 +       .probe  = npe_probe,
4100 +       .remove = npe_remove,
4101 +};
4102 +
4103 +static int __init init_npedriver(void)
4104 +{
4105 +       int ret;
4106 +       if ((ret = misc_register(&ucode_dl_dev))){
4107 +               printk(KERN_ERR "Failed to register misc device %d\n",
4108 +                               MICROCODE_MINOR);
4109 +               return ret;
4110 +       }
4111 +       if ((ret = platform_driver_register(&ixp4xx_npe_driver)))
4112 +               misc_deregister(&ucode_dl_dev);
4113 +       else
4114 +               printk(KERN_INFO IXNPE_VERSION " initialized\n");
4115 +
4116 +       return ret;
4117 +
4118 +}
4119 +
4120 +static void __exit finish_npedriver(void)
4121 +{
4122 +       misc_deregister(&ucode_dl_dev);
4123 +       platform_driver_unregister(&ixp4xx_npe_driver);
4124 +}
4125 +
4126 +module_init(init_npedriver);
4127 +module_exit(finish_npedriver);
4128 +
4129 +MODULE_LICENSE("GPL");
4130 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
4131 +
4132 +EXPORT_SYMBOL(get_npe_by_id);
4133 +EXPORT_SYMBOL(return_npe_dev);
4134 Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h
4135 ===================================================================
4136 --- linux-2.6.21-rc1-arm.orig/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2007-02-21 02:24:18.000000000 -0800
4137 +++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h      2007-02-21 02:24:35.000000000 -0800
4138 @@ -22,6 +22,8 @@
4139  #ifndef _ASM_ARM_IXP4XX_H_
4140  #define _ASM_ARM_IXP4XX_H_
4141  
4142 +#include "npe_regs.h"
4143 +
4144  /*
4145   * IXP4xx Linux Memory Map:
4146   *
4147 @@ -44,6 +46,12 @@
4148   */
4149  
4150  /*
4151 + * PCI Memory Space
4152 + */
4153 +#define IXP4XX_PCIMEM_BASE_PHYS                (0x48000000)
4154 +#define IXP4XX_PCIMEM_REGION_SIZE      (0x04000000)
4155 +#define IXP4XX_PCIMEM_BAR_SIZE         (0x01000000)
4156 +/*
4157   * Queue Manager
4158   */
4159  #define IXP4XX_QMGR_BASE_PHYS          (0x60000000)
4160 @@ -322,7 +330,13 @@
4161  #define PCI_ATPDMA0_LENADDR_OFFSET  0x48
4162  #define PCI_ATPDMA1_AHBADDR_OFFSET  0x4C
4163  #define PCI_ATPDMA1_PCIADDR_OFFSET  0x50
4164 -#define PCI_ATPDMA1_LENADDR_OFFSET     0x54
4165 +#define PCI_ATPDMA1_LENADDR_OFFSET  0x54
4166 +#define PCI_PTADMA0_AHBADDR_OFFSET  0x58
4167 +#define PCI_PTADMA0_PCIADDR_OFFSET  0x5c
4168 +#define PCI_PTADMA0_LENADDR_OFFSET  0x60
4169 +#define PCI_PTADMA1_AHBADDR_OFFSET  0x64
4170 +#define PCI_PTADMA1_PCIADDR_OFFSET  0x68
4171 +#define PCI_PTADMA1_LENADDR_OFFSET  0x6c
4172  
4173  /*
4174   * PCI Control/Status Registers
4175 @@ -351,6 +365,12 @@
4176  #define PCI_ATPDMA1_AHBADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
4177  #define PCI_ATPDMA1_PCIADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
4178  #define PCI_ATPDMA1_LENADDR     IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
4179 +#define PCI_PTADMA0_AHBADDR     IXP4XX_PCI_CSR(PCI_PTADMA0_AHBADDR_OFFSET)
4180 +#define PCI_PTADMA0_PCIADDR     IXP4XX_PCI_CSR(PCI_PTADMA0_PCIADDR_OFFSET)
4181 +#define PCI_PTADMA0_LENADDR     IXP4XX_PCI_CSR(PCI_PTADMA0_LENADDR_OFFSET)
4182 +#define PCI_PTADMA1_AHBADDR     IXP4XX_PCI_CSR(PCI_PTADMA1_AHBADDR_OFFSET)
4183 +#define PCI_PTADMA1_PCIADDR     IXP4XX_PCI_CSR(PCI_PTADMA1_PCIADDR_OFFSET)
4184 +#define PCI_PTADMA1_LENADDR     IXP4XX_PCI_CSR(PCI_PTADMA1_LENADDR_OFFSET)
4185  
4186  /*
4187   * PCI register values and bit definitions 
4188 @@ -607,6 +627,34 @@
4189  
4190  #define DCMD_LENGTH    0x01fff         /* length mask (max = 8K - 1) */
4191  
4192 +
4193 +/* Fuse Bits of IXP_EXP_CFG2 */
4194 +#define IX_FUSE_RCOMP   (1 << 0)
4195 +#define IX_FUSE_USB     (1 << 1)
4196 +#define IX_FUSE_HASH    (1 << 2)
4197 +#define IX_FUSE_AES     (1 << 3)
4198 +#define IX_FUSE_DES     (1 << 4)
4199 +#define IX_FUSE_HDLC    (1 << 5)
4200 +#define IX_FUSE_AAL     (1 << 6)
4201 +#define IX_FUSE_HSS     (1 << 7)
4202 +#define IX_FUSE_UTOPIA  (1 << 8)
4203 +#define IX_FUSE_ETH0    (1 << 9)
4204 +#define IX_FUSE_ETH1    (1 << 10)
4205 +#define IX_FUSE_NPEA    (1 << 11)
4206 +#define IX_FUSE_NPEB    (1 << 12)
4207 +#define IX_FUSE_NPEC    (1 << 13)
4208 +#define IX_FUSE_PCI     (1 << 14)
4209 +#define IX_FUSE_ECC     (1 << 15)
4210 +#define IX_FUSE_UTOPIA_PHY_LIMIT  (3 << 16)
4211 +#define IX_FUSE_USB_HOST          (1 << 18)
4212 +#define IX_FUSE_NPEA_ETH          (1 << 19)
4213 +#define IX_FUSE_NPEB_ETH          (1 << 20)
4214 +#define IX_FUSE_RSA               (1 << 21)
4215 +#define IX_FUSE_XSCALE_MAX_FREQ   (3 << 22)
4216 +
4217 +#define IX_FUSE_IXP46X_ONLY IX_FUSE_XSCALE_MAX_FREQ | IX_FUSE_RSA | \
4218 +       IX_FUSE_NPEB_ETH | IX_FUSE_NPEA_ETH | IX_FUSE_USB_HOST | IX_FUSE_ECC
4219 +
4220  #ifndef __ASSEMBLY__
4221  static inline int cpu_is_ixp46x(void)
4222  {
4223 @@ -620,6 +668,15 @@
4224  #endif
4225         return 0;
4226  }
4227 +
4228 +static inline u32 ix_fuse(void)
4229 +{
4230 +       unsigned int fuses = ~(*IXP4XX_EXP_CFG2);
4231 +       if (!cpu_is_ixp46x())
4232 +               fuses &= ~IX_FUSE_IXP46X_ONLY;
4233 +
4234 +       return fuses;
4235 +}
4236  #endif
4237  
4238  #endif
4239 Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/npe_regs.h
4240 ===================================================================
4241 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
4242 +++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/npe_regs.h 2007-02-21 02:24:35.000000000 -0800
4243 @@ -0,0 +1,82 @@
4244 +#ifndef NPE_REGS_H
4245 +#define NPE_REGS_H
4246 +
4247 +/* Execution Address  */
4248 +#define IX_NPEDL_REG_OFFSET_EXAD             0x00
4249 +/* Execution Data */
4250 +#define IX_NPEDL_REG_OFFSET_EXDATA           0x04
4251 +/* Execution Control */
4252 +#define IX_NPEDL_REG_OFFSET_EXCTL            0x08
4253 +/* Execution Count */
4254 +#define IX_NPEDL_REG_OFFSET_EXCT             0x0C
4255 +/* Action Point 0 */
4256 +#define IX_NPEDL_REG_OFFSET_AP0              0x10
4257 +/* Action Point 1 */
4258 +#define IX_NPEDL_REG_OFFSET_AP1              0x14
4259 +/* Action Point 2 */
4260 +#define IX_NPEDL_REG_OFFSET_AP2              0x18
4261 +/* Action Point 3 */
4262 +#define IX_NPEDL_REG_OFFSET_AP3              0x1C
4263 +/* Watchpoint FIFO */
4264 +#define IX_NPEDL_REG_OFFSET_WFIFO            0x20
4265 +/* Watch Count */
4266 +#define IX_NPEDL_REG_OFFSET_WC               0x24
4267 +/* Profile Count */
4268 +#define IX_NPEDL_REG_OFFSET_PROFCT           0x28
4269 +
4270 +/* Messaging Status */
4271 +#define IX_NPEDL_REG_OFFSET_STAT            0x2C
4272 +/* Messaging Control */
4273 +#define IX_NPEDL_REG_OFFSET_CTL                     0x30
4274 +/* Mailbox Status */
4275 +#define IX_NPEDL_REG_OFFSET_MBST            0x34
4276 +/* messaging in/out FIFO */
4277 +#define IX_NPEDL_REG_OFFSET_FIFO            0x38
4278 +
4279 +
4280 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IF       0x00100000
4281 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IE       0x00080000
4282 +#define IX_NPEDL_MASK_ECS_REG_0_ACTIVE       0x80000000
4283 +
4284 +#define IX_NPEDL_EXCTL_CMD_NPE_STEP          0x01
4285 +#define IX_NPEDL_EXCTL_CMD_NPE_START         0x02
4286 +#define IX_NPEDL_EXCTL_CMD_NPE_STOP          0x03
4287 +#define IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE      0x04
4288 +#define IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT   0x0C
4289 +#define IX_NPEDL_EXCTL_CMD_RD_INS_MEM        0x10
4290 +#define IX_NPEDL_EXCTL_CMD_WR_INS_MEM        0x11
4291 +#define IX_NPEDL_EXCTL_CMD_RD_DATA_MEM       0x12
4292 +#define IX_NPEDL_EXCTL_CMD_WR_DATA_MEM       0x13
4293 +#define IX_NPEDL_EXCTL_CMD_RD_ECS_REG        0x14
4294 +#define IX_NPEDL_EXCTL_CMD_WR_ECS_REG        0x15
4295 +
4296 +#define IX_NPEDL_EXCTL_STATUS_RUN            0x80000000
4297 +#define IX_NPEDL_EXCTL_STATUS_STOP           0x40000000
4298 +#define IX_NPEDL_EXCTL_STATUS_CLEAR          0x20000000
4299 +
4300 +#define IX_NPEDL_MASK_WFIFO_VALID            0x80000000
4301 +#define IX_NPEDL_MASK_STAT_OFNE              0x00010000
4302 +#define IX_NPEDL_MASK_STAT_IFNE              0x00080000
4303 +
4304 +#define IX_NPEDL_ECS_DBG_CTXT_REG_0          0x0C
4305 +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_0        0x04
4306 +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_0        0x08
4307 +
4308 +/* NPE control register bit definitions */
4309 +#define IX_NPEMH_NPE_CTL_OFE   (1 << 16) /**< OutFifoEnable */
4310 +#define IX_NPEMH_NPE_CTL_IFE   (1 << 17) /**< InFifoEnable */
4311 +#define IX_NPEMH_NPE_CTL_OFEWE (1 << 24) /**< OutFifoEnableWriteEnable */
4312 +#define IX_NPEMH_NPE_CTL_IFEWE (1 << 25) /**< InFifoEnableWriteEnable */
4313 +
4314 +/* NPE status register bit definitions */
4315 +#define IX_NPEMH_NPE_STAT_OFNE  (1 << 16) /**< OutFifoNotEmpty */
4316 +#define IX_NPEMH_NPE_STAT_IFNF  (1 << 17) /**< InFifoNotFull */
4317 +#define IX_NPEMH_NPE_STAT_OFNF  (1 << 18) /**< OutFifoNotFull */
4318 +#define IX_NPEMH_NPE_STAT_IFNE  (1 << 19) /**< InFifoNotEmpty */
4319 +#define IX_NPEMH_NPE_STAT_MBINT (1 << 20) /**< Mailbox interrupt */
4320 +#define IX_NPEMH_NPE_STAT_IFINT (1 << 21) /**< InFifo interrupt */
4321 +#define IX_NPEMH_NPE_STAT_OFINT (1 << 22) /**< OutFifo interrupt */
4322 +#define IX_NPEMH_NPE_STAT_WFINT (1 << 23) /**< WatchFifo interrupt */
4323 +
4324 +#endif
4325 +
4326 Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/platform.h
4327 ===================================================================
4328 --- linux-2.6.21-rc1-arm.orig/include/asm-arm/arch-ixp4xx/platform.h    2007-02-21 02:24:18.000000000 -0800
4329 +++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/platform.h 2007-02-21 02:24:35.000000000 -0800
4330 @@ -86,6 +86,25 @@
4331         unsigned long scl_pin;
4332  };
4333  
4334 +struct npe_plat_data {
4335 +       const char *name;
4336 +       int data_size;
4337 +       int inst_size;
4338 +       int id;         /* Node ID */
4339 +};
4340 +
4341 +struct mac_plat_info {
4342 +       int npe_id;     /* Node ID of the NPE for this port */
4343 +       int port_id;    /* Port ID for NPE-B @ ixp465 */
4344 +       int eth_id;     /* Physical ID */
4345 +       int phy_id;     /* ID of the connected PHY (PCB/platform dependent) */
4346 +       int rxq_id;     /* Queue ID of the RX-free q */
4347 +       int rxdoneq_id; /* where incoming packets are returned */
4348 +       int txq_id;     /* Where to push the outgoing packets */
4349 +       unsigned char hwaddr[6]; /* Desired hardware address */
4350 +
4351 +};
4352 +
4353  /*
4354   * This structure provide a means for the board setup code
4355   * to give information to th pata_ixp4xx driver. It is
4356 Index: linux-2.6.21-rc1-arm/include/linux/ixp_crypto.h
4357 ===================================================================
4358 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
4359 +++ linux-2.6.21-rc1-arm/include/linux/ixp_crypto.h     2007-02-21 02:24:35.000000000 -0800
4360 @@ -0,0 +1,192 @@
4361 +
4362 +#ifndef IX_CRYPTO_H
4363 +#define IX_CRYPTO_H
4364 +
4365 +#define MAX_KEYLEN 64
4366 +#define NPE_CTX_LEN 80
4367 +#define AES_BLOCK128 16
4368 +
4369 +#define NPE_OP_HASH_GEN_ICV   0x50
4370 +#define NPE_OP_ENC_GEN_KEY    0xc9
4371 +
4372 +
4373 +#define NPE_OP_HASH_VERIFY   0x01
4374 +#define NPE_OP_CCM_ENABLE    0x04
4375 +#define NPE_OP_CRYPT_ENABLE  0x08
4376 +#define NPE_OP_HASH_ENABLE   0x10
4377 +#define NPE_OP_NOT_IN_PLACE  0x20
4378 +#define NPE_OP_HMAC_DISABLE  0x40
4379 +#define NPE_OP_CRYPT_ENCRYPT 0x80
4380 +
4381 +#define MOD_ECB     0x0000
4382 +#define MOD_CTR     0x1000
4383 +#define MOD_CBC_ENC 0x2000
4384 +#define MOD_CBC_DEC 0x3000
4385 +#define MOD_CCM_ENC 0x4000
4386 +#define MOD_CCM_DEC 0x5000
4387 +
4388 +#define ALGO_AES    0x0800
4389 +#define CIPH_DECR   0x0000
4390 +#define CIPH_ENCR   0x0400
4391 +
4392 +#define MOD_DES     0x0000
4393 +#define MOD_TDEA2   0x0100
4394 +#define MOD_TDEA3   0x0200
4395 +#define MOD_AES128  0x0000
4396 +#define MOD_AES192  0x0100
4397 +#define MOD_AES256  0x0200
4398 +
4399 +#define KEYLEN_128  4
4400 +#define KEYLEN_192  6
4401 +#define KEYLEN_256  8
4402 +
4403 +#define CIPHER_TYPE_NULL   0
4404 +#define CIPHER_TYPE_DES    1
4405 +#define CIPHER_TYPE_3DES   2
4406 +#define CIPHER_TYPE_AES    3
4407 +
4408 +#define CIPHER_MODE_ECB    1
4409 +#define CIPHER_MODE_CTR    2
4410 +#define CIPHER_MODE_CBC    3
4411 +#define CIPHER_MODE_CCM    4
4412 +
4413 +#define HASH_TYPE_NULL     0
4414 +#define HASH_TYPE_MD5      1
4415 +#define HASH_TYPE_SHA1     2
4416 +#define HASH_TYPE_CBCMAC   3
4417 +
4418 +#define OP_REG_DONE  1
4419 +#define OP_REGISTER  2
4420 +#define OP_PERFORM   3
4421 +
4422 +#define STATE_UNREGISTERED 0
4423 +#define STATE_REGISTERED   1
4424 +#define STATE_UNLOADING    2
4425 +
4426 +struct crypt_ctl {
4427 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4428 +       u8 mode;    /* NPE operation */
4429 +       u8 init_len;
4430 +       u16 reserved;
4431 +#else
4432 +       u16 reserved;
4433 +       u8 init_len;
4434 +       u8 mode;    /* NPE operation */
4435 +#endif
4436 +       u8 iv[16];  /* IV for CBC mode or CTR IV for CTR mode */
4437 +       union {
4438 +               u32 icv;
4439 +               u32 rev_aes;
4440 +       } addr;
4441 +       u32 src_buf;
4442 +       u32 dest_buf;
4443 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4444 +       u16 auth_offs;  /* Authentication start offset */
4445 +       u16 auth_len;   /* Authentication data length */
4446 +       u16 crypt_offs; /* Cryption start offset */
4447 +       u16 crypt_len;  /* Cryption data length */
4448 +#else
4449 +       u16 auth_len;   /* Authentication data length */
4450 +       u16 auth_offs;  /* Authentication start offset */
4451 +       u16 crypt_len;  /* Cryption data length */
4452 +       u16 crypt_offs; /* Cryption start offset */
4453 +#endif
4454 +       u32 aadAddr;    /* Additional Auth Data Addr for CCM mode */
4455 +       u32 crypto_ctx; /* NPE Crypto Param structure address */
4456 +
4457 +       /* Used by Host */
4458 +       struct ix_sa_ctx *sa_ctx;
4459 +       int oper_type;
4460 +};
4461 +
4462 +struct npe_crypt_cont {
4463 +       union {
4464 +               struct crypt_ctl crypt;
4465 +               u8 rev_aes_key[NPE_CTX_LEN];
4466 +       } ctl;
4467 +       struct npe_crypt_cont *next;
4468 +       struct npe_crypt_cont *virt;
4469 +       dma_addr_t phys;
4470 +};
4471 +
4472 +struct ix_hash_algo {
4473 +       char *name;
4474 +       u32 cfgword;
4475 +       int digest_len;
4476 +       int aad_len;
4477 +       unsigned char *icv;
4478 +       int type;
4479 +};
4480 +
4481 +struct ix_cipher_algo {
4482 +       char *name;
4483 +       u32 cfgword_enc;
4484 +       u32 cfgword_dec;
4485 +       int block_len;
4486 +       int iv_len;
4487 +       int type;
4488 +       int mode;
4489 +};
4490 +
4491 +struct ix_key {
4492 +       u8 key[MAX_KEYLEN];
4493 +       int len;
4494 +};
4495 +
4496 +struct ix_sa_master {
4497 +       struct device *npe_dev;
4498 +       struct qm_queue *sendq;
4499 +       struct qm_queue *recvq;
4500 +       struct dma_pool *dmapool;
4501 +       struct npe_crypt_cont *pool;
4502 +       int pool_size;
4503 +       rwlock_t lock;
4504 +};
4505 +
4506 +struct ix_sa_dir {
4507 +       unsigned char *npe_ctx;
4508 +       dma_addr_t npe_ctx_phys;
4509 +       int npe_ctx_idx;
4510 +       u8 npe_mode;
4511 +};
4512 +
4513 +struct ix_sa_ctx {
4514 +       struct list_head list;
4515 +       struct ix_sa_master *master;
4516 +
4517 +       const struct ix_hash_algo *h_algo;
4518 +       const struct ix_cipher_algo *c_algo;
4519 +       struct ix_key c_key;
4520 +       struct ix_key h_key;
4521 +
4522 +       int digest_len;
4523 +
4524 +       struct ix_sa_dir encrypt;
4525 +       struct ix_sa_dir decrypt;
4526 +
4527 +       struct npe_crypt_cont *rev_aes;
4528 +       gfp_t gfp_flags;
4529 +
4530 +       int state;
4531 +       void *priv;
4532 +
4533 +       void(*reg_cb)(struct ix_sa_ctx*, int);
4534 +       void(*perf_cb)(struct ix_sa_ctx*, void*, int);
4535 +       atomic_t use_cnt;
4536 +};
4537 +
4538 +const struct ix_hash_algo *ix_hash_by_id(int type);
4539 +const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode);
4540 +
4541 +struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags);
4542 +void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx);
4543 +
4544 +int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
4545 +               int datalen, int c_offs, int c_len, int a_offs, int a_len,
4546 +               int hmac, char *iv, int encrypt);
4547 +
4548 +int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx,
4549 +               const struct ix_cipher_algo *cipher,
4550 +               const struct ix_hash_algo *auth, int len);
4551 +
4552 +#endif
4553 Index: linux-2.6.21-rc1-arm/include/linux/ixp_npe.h
4554 ===================================================================
4555 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
4556 +++ linux-2.6.21-rc1-arm/include/linux/ixp_npe.h        2007-02-21 02:24:35.000000000 -0800
4557 @@ -0,0 +1,117 @@
4558 +/*
4559 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
4560 + *
4561 + * This file is released under the GPLv2
4562 + */
4563 +
4564 +#ifndef NPE_DEVICE_H
4565 +#define NPE_DEVICE_H
4566 +
4567 +#include <linux/miscdevice.h>
4568 +#include <asm/hardware.h>
4569 +
4570 +#ifdef __ARMEB__
4571 +#undef CONFIG_NPE_ADDRESS_COHERENT
4572 +#else
4573 +#define CONFIG_NPE_ADDRESS_COHERENT
4574 +#endif
4575 +
4576 +#if defined(__ARMEB__) || defined (CONFIG_NPE_ADDRESS_COHERENT)
4577 +#define npe_to_cpu32(x) (x)
4578 +#define npe_to_cpu16(x) (x)
4579 +#define cpu_to_npe32(x) (x)
4580 +#define cpu_to_npe16(x) (x)
4581 +#else
4582 +#error NPE_DATA_COHERENT
4583 +#define NPE_DATA_COHERENT
4584 +#define npe_to_cpu32(x) be32_to_cpu(x)
4585 +#define npe_to_cpu16(x) be16_to_cpu(x)
4586 +#define cpu_to_npe32(x) cpu_to_be32(x)
4587 +#define cpu_to_npe16(x) cpu_to_be16(x)
4588 +#endif
4589 +
4590 +
4591 +struct npe_info {
4592 +       struct resource *res;
4593 +       void __iomem *addr;
4594 +       struct npe_plat_data *plat;
4595 +       u8 img_info[4];
4596 +       int usage;
4597 +       int loaded;
4598 +       u32 exec_count;
4599 +       u32 ctx_reg2;
4600 +};
4601 +
4602 +
4603 +static inline void npe_reg_write(struct npe_info *npe, u32 reg, u32 val)
4604 +{
4605 +       *(volatile u32*)((u8*)(npe->addr) + reg) = val;
4606 +}
4607 +
4608 +static inline u32 npe_reg_read(struct npe_info *npe, u32 reg)
4609 +{
4610 +       return *(volatile u32*)((u8*)(npe->addr) + reg);
4611 +}
4612 +
4613 +static inline u32 npe_status(struct npe_info *npe)
4614 +{
4615 +       return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCTL);
4616 +}
4617 +
4618 +/* ixNpeDlNpeMgrCommandIssue */
4619 +static inline void npe_write_exctl(struct npe_info *npe, u32 cmd)
4620 +{
4621 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4622 +}
4623 +/* ixNpeDlNpeMgrWriteCommandIssue */
4624 +static inline void
4625 +npe_write_cmd(struct npe_info *npe, u32 addr, u32 data, int cmd)
4626 +{
4627 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXDATA, data);
4628 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
4629 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4630 +}
4631 +/* ixNpeDlNpeMgrReadCommandIssue */
4632 +static inline u32
4633 +npe_read_cmd(struct npe_info *npe, u32 addr, int cmd)
4634 +{
4635 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
4636 +       npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4637 +       /* Intel reads the data twice - so do we... */
4638 +       npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
4639 +       return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
4640 +}
4641 +
4642 +/* ixNpeDlNpeMgrExecAccRegWrite */
4643 +static inline void npe_write_ecs_reg(struct npe_info *npe, u32 addr, u32 data)
4644 +{
4645 +       npe_write_cmd(npe, addr, data, IX_NPEDL_EXCTL_CMD_WR_ECS_REG);
4646 +}
4647 +/* ixNpeDlNpeMgrExecAccRegRead */
4648 +static inline u32 npe_read_ecs_reg(struct npe_info *npe, u32 addr)
4649 +{
4650 +       return npe_read_cmd(npe, addr, IX_NPEDL_EXCTL_CMD_RD_ECS_REG);
4651 +}
4652 +
4653 +extern void npe_stop(struct npe_info *npe);
4654 +extern void npe_start(struct npe_info *npe);
4655 +extern void npe_reset(struct npe_info *npe);
4656 +
4657 +extern struct device *get_npe_by_id(int id);
4658 +extern void return_npe_dev(struct device *dev);
4659 +
4660 +/* NPE Messages */
4661 +extern int
4662 +npe_mh_status(struct npe_info *npe);
4663 +extern int
4664 +npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp, u8 *macaddr);
4665 +extern int
4666 +npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp);
4667 +extern int
4668 +npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid);
4669 +extern int
4670 +npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp, int enable);
4671 +extern int
4672 +npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys, int reset);
4673 +
4674 +#endif
4675 Index: linux-2.6.21-rc1-arm/include/linux/ixp_qmgr.h
4676 ===================================================================
4677 --- /dev/null   1970-01-01 00:00:00.000000000 +0000
4678 +++ linux-2.6.21-rc1-arm/include/linux/ixp_qmgr.h       2007-02-21 02:24:35.000000000 -0800
4679 @@ -0,0 +1,202 @@
4680 +/*
4681 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
4682 + *
4683 + * This file is released under the GPLv2
4684 + */
4685 +
4686 +#ifndef IX_QMGR_H
4687 +#define IX_QMGR_H
4688 +
4689 +#include <linux/skbuff.h>
4690 +#include <linux/list.h>
4691 +#include <linux/if_ether.h>
4692 +#include <linux/spinlock.h>
4693 +#include <linux/platform_device.h>
4694 +#include <linux/ixp_npe.h>
4695 +#include <asm/atomic.h>
4696 +
4697 +/* All offsets are in 32bit words */
4698 +#define QUE_LOW_STAT0    0x100 /* 4x Status of the 32 lower queues 0-31 */
4699 +#define QUE_UO_STAT0     0x104 /* 2x Underflow/Overflow status bits*/
4700 +#define QUE_UPP_STAT0    0x106 /* 2x Status of thew 32 upper queues 32-63 */
4701 +#define INT0_SRC_SELREG0 0x108 /* 4x */
4702 +#define QUE_IE_REG0      0x10c /* 2x */
4703 +#define QUE_INT_REG0     0x10e /* 2x IRQ reg, write 1 to reset IRQ */
4704 +
4705 +#define IX_QMGR_QCFG_BASE      0x800
4706 +#define IX_QMGR_QCFG_SIZE      0x40
4707 +#define IX_QMGR_SRAM_SPACE     (IX_QMGR_QCFG_BASE + IX_QMGR_QCFG_SIZE)
4708 +
4709 +#define MAX_QUEUES 32 /* first, we only support the lower 32 queues */
4710 +#define MAX_NPES    3
4711 +
4712 +enum {
4713 +       Q_IRQ_ID_E = 0,  /* Queue Empty due to last read  */
4714 +       Q_IRQ_ID_NE,     /* Queue Nearly Empty due to last read */
4715 +       Q_IRQ_ID_NF,     /* Queue Nearly Full due to last write */
4716 +       Q_IRQ_ID_F,      /* Queue Full due to last write  */
4717 +       Q_IRQ_ID_NOT_E,  /* Queue Not Empty due to last write */
4718 +       Q_IRQ_ID_NOT_NE, /* Queue Not Nearly Empty due to last write */
4719 +       Q_IRQ_ID_NOT_NF, /* Queue Not Nearly Full due to last read */
4720 +       Q_IRQ_ID_NOT_F   /* Queue Not Full due to last read */
4721 +};
4722 +
4723 +extern struct qm_queue *request_queue(int qid, int len);
4724 +extern void release_queue(struct qm_queue *queue);
4725 +extern int queue_set_irq_src(struct qm_queue *queue, int flag);
4726 +extern void queue_set_watermarks(struct qm_queue *, unsigned ne, unsigned nf);
4727 +extern int queue_len(struct qm_queue *queue);
4728 +
4729 +struct qm_qmgr;
4730 +struct qm_queue;
4731 +
4732 +typedef void(*queue_cb)(struct qm_queue *);
4733 +
4734 +struct qm_queue {
4735 +       int addr;       /* word offset from IX_QMGR_SRAM_SPACE */
4736 +       int len;        /* size in words */
4737 +       int id;         /* Q Id */
4738 +       u32 __iomem *acc_reg;
4739 +       struct device *dev;
4740 +       atomic_t use;
4741 +       queue_cb irq_cb;
4742 +       void *cb_data;
4743 +};
4744 +
4745 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4746 +struct eth_ctl {
4747 +       u32 next;
4748 +       u16 buf_len;
4749 +       u16 pkt_len;
4750 +       u32 phys_addr;
4751 +       u8 dest_id;
4752 +       u8 src_id;
4753 +       u16 flags;
4754 +       u8 qos;
4755 +       u8 padlen;
4756 +       u16 vlan_tci;
4757 +       u8 dest_mac[ETH_ALEN];
4758 +       u8 src_mac[ETH_ALEN];
4759 +};
4760 +
4761 +#else
4762 +struct eth_ctl {
4763 +       u32 next;
4764 +       u16 pkt_len;
4765 +       u16 buf_len;
4766 +       u32 phys_addr;
4767 +       u16 flags;
4768 +       u8 src_id;
4769 +       u8 dest_id;
4770 +       u16 vlan_tci;
4771 +       u8 padlen;
4772 +       u8 qos;
4773 +       u8 dest_mac[ETH_ALEN];
4774 +       u8 src_mac[ETH_ALEN];
4775 +};
4776 +#endif
4777 +
4778 +struct npe_cont {
4779 +       struct eth_ctl eth;
4780 +       void *data;
4781 +       struct npe_cont *next;
4782 +       struct npe_cont *virt;
4783 +       dma_addr_t phys;
4784 +};
4785 +
4786 +struct qm_qmgr {
4787 +       u32 __iomem *addr;
4788 +       struct resource *res;
4789 +       struct qm_queue *queues[MAX_QUEUES];
4790 +       rwlock_t lock;
4791 +       struct npe_cont *pool;
4792 +       struct dma_pool *dmapool;
4793 +       int irq;
4794 +};
4795 +
4796 +static inline void queue_write_cfg_reg(struct qm_queue *queue, u32 val)
4797 +{
4798 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4799 +       *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id) = val;
4800 +}
4801 +static inline u32 queue_read_cfg_reg(struct qm_queue *queue)
4802 +{
4803 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4804 +       return *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
4805 +}
4806 +
4807 +static inline void queue_ack_irq(struct qm_queue *queue)
4808 +{
4809 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4810 +       *(qmgr->addr + QUE_INT_REG0) = 1 << queue->id;
4811 +}
4812 +
4813 +static inline void queue_enable_irq(struct qm_queue *queue)
4814 +{
4815 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4816 +       *(qmgr->addr + QUE_IE_REG0) |= 1 << queue->id;
4817 +}
4818 +
4819 +static inline void queue_disable_irq(struct qm_queue *queue)
4820 +{
4821 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4822 +       *(qmgr->addr + QUE_IE_REG0) &= ~(1 << queue->id);
4823 +}
4824 +
4825 +static inline void queue_put_entry(struct qm_queue *queue, u32 entry)
4826 +{
4827 +       *(queue->acc_reg) = npe_to_cpu32(entry);
4828 +}
4829 +
4830 +static inline u32 queue_get_entry(struct qm_queue *queue)
4831 +{
4832 +       return cpu_to_npe32(*queue->acc_reg);
4833 +}
4834 +
4835 +static inline struct npe_cont *qmgr_get_cont(struct qm_qmgr *qmgr)
4836 +{
4837 +       unsigned long flags;
4838 +       struct npe_cont *cont;
4839 +
4840 +       if (!qmgr->pool)
4841 +               return NULL;
4842 +       write_lock_irqsave(&qmgr->lock, flags);
4843 +       cont = qmgr->pool;
4844 +       qmgr->pool = cont->next;
4845 +       write_unlock_irqrestore(&qmgr->lock, flags);
4846 +       return cont;
4847 +}
4848 +
4849 +static inline void qmgr_return_cont(struct qm_qmgr *qmgr,struct npe_cont *cont)
4850 +{
4851 +       unsigned long flags;
4852 +
4853 +       write_lock_irqsave(&qmgr->lock, flags);
4854 +       cont->next = qmgr->pool;
4855 +       qmgr->pool = cont;
4856 +       write_unlock_irqrestore(&qmgr->lock, flags);
4857 +}
4858 +
4859 +static inline int queue_stat(struct qm_queue *queue)
4860 +{
4861 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4862 +       u32 reg = *(qmgr->addr + QUE_UO_STAT0 + (queue->id >> 4));
4863 +       return (reg >> (queue->id & 0xf) << 1) & 3;
4864 +}
4865 +
4866 +/* Prints the queue state, which is very, very helpfull for debugging */
4867 +static inline void queue_state(struct qm_queue *queue)
4868 +{
4869 +       u32 val=0, lstat=0;
4870 +       int offs;
4871 +       struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4872 +
4873 +       offs = queue->id/8 + QUE_LOW_STAT0;
4874 +       val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
4875 +       lstat = (*(qmgr->addr + offs) >>  ((queue->id % 8)*4)) & 0x0f;
4876 +
4877 +       printk("Qid[%02d]: Wptr=%4x, Rptr=%4x, diff=%4x, Stat:%x\n", queue->id,
4878 +               val&0x7f, (val>>7) &0x7f, (val - (val >> 7)) & 0x7f, lstat);
4879 +}
4880 +
4881 +#endif