3 * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
4 * Copyright (c) 2007 David McCullough (david_mccullough@securecomputing.com)
5 * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
6 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
7 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
27 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
30 * Effort sponsored in part by the Defense Advanced Research Projects
31 * Agency (DARPA) and Air Force Research Laboratory, Air Force
32 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 #undef UBSEC_VERBOSE_DEBUG
38 #ifdef UBSEC_VERBOSE_DEBUG
43 * uBsec BCM5365 hardware crypto accelerator
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/moduleparam.h>
49 #include <linux/proc_fs.h>
50 #include <linux/types.h>
51 #include <linux/init.h>
52 #include <linux/delay.h>
53 #include <linux/interrupt.h>
55 #include <linux/random.h>
56 #include <linux/skbuff.h>
57 #include <linux/stat.h>
60 #include <linux/ssb/ssb.h>
70 #include "cryptodev.h"
86 #define DRV_MODULE_NAME "ubsec_ssb"
87 #define PFX DRV_MODULE_NAME ": "
88 #define DRV_MODULE_VERSION "0.02"
89 #define DRV_MODULE_RELDATE "Feb 21, 2009"
92 #define DPRINTF(a...) \
95 printk(DRV_MODULE_NAME ": " a); \
104 static irqreturn_t ubsec_ssb_isr(int, void *, struct pt_regs *);
105 static int __devinit ubsec_ssb_probe(struct ssb_device *sdev,
106 const struct ssb_device_id *ent);
107 static void __devexit ubsec_ssb_remove(struct ssb_device *sdev);
108 int ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
109 struct device *self);
110 static void ubsec_setup_mackey(struct ubsec_session *ses, int algo,
111 caddr_t key, int klen);
112 static int dma_map_skb(struct ubsec_softc *sc,
113 struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen);
114 static int dma_map_uio(struct ubsec_softc *sc,
115 struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen);
116 static void dma_unmap(struct ubsec_softc *sc,
117 struct ubsec_dma_alloc *q_map, int mlen);
118 static int ubsec_dmamap_aligned(struct ubsec_softc *sc,
119 const struct ubsec_dma_alloc *q_map, int mlen);
122 static int proc_read(char *buf, char **start, off_t offset,
123 int size, int *peof, void *data);
126 void ubsec_reset_board(struct ubsec_softc *);
127 void ubsec_init_board(struct ubsec_softc *);
128 void ubsec_cleanchip(struct ubsec_softc *);
129 void ubsec_totalreset(struct ubsec_softc *);
130 int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *);
132 static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *);
133 static int ubsec_freesession(device_t, u_int64_t);
134 static int ubsec_process(device_t, struct cryptop *, int);
136 void ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
137 void ubsec_feed(struct ubsec_softc *);
138 void ubsec_mcopy(struct sk_buff *, struct sk_buff *, int, int);
139 void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
140 int ubsec_dma_malloc(struct ubsec_softc *, struct ubsec_dma_alloc *,
144 void ubsec_dump_pb(struct ubsec_pktbuf *);
145 void ubsec_dump_mcr(struct ubsec_mcr *);
147 #define READ_REG(sc,r) \
148 ssb_read32((sc)->sdev, (r));
149 #define WRITE_REG(sc,r,val) \
150 ssb_write32((sc)->sdev, (r), (val));
151 #define READ_REG_SDEV(sdev,r) \
152 ssb_read32((sdev), (r));
153 #define WRITE_REG_SDEV(sdev,r,val) \
154 ssb_write32((sdev), (r), (val));
156 #define SWAP32(x) (x) = htole32(ntohl((x)))
157 #define HTOLE32(x) (x) = htole32(x)
159 #ifdef __LITTLE_ENDIAN
160 #define letoh16(x) (x)
161 #define letoh32(x) (x)
165 module_param(debug, int, 0644);
166 MODULE_PARM_DESC(debug, "Enable debug output");
168 #define UBSEC_SSB_MAX_CHIPS 1
169 static struct ubsec_softc *ubsec_chip_idx[UBSEC_SSB_MAX_CHIPS];
170 static struct ubsec_stats ubsecstats;
173 static struct proc_dir_entry *procdebug;
176 static struct ssb_device_id ubsec_ssb_tbl[] = {
177 /* Broadcom BCM5365P IPSec Core */
178 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_IPSEC, SSB_ANY_REV),
182 static struct ssb_driver ubsec_ssb_driver = {
183 .name = DRV_MODULE_NAME,
184 .id_table = ubsec_ssb_tbl,
185 .probe = ubsec_ssb_probe,
186 .remove = __devexit_p(ubsec_ssb_remove),
188 .suspend = ubsec_ssb_suspend,
189 .resume = ubsec_ssb_resume
193 static device_method_t ubsec_ssb_methods = {
194 /* crypto device methods */
195 DEVMETHOD(cryptodev_newsession, ubsec_newsession),
196 DEVMETHOD(cryptodev_freesession,ubsec_freesession),
197 DEVMETHOD(cryptodev_process, ubsec_process),
202 proc_read(char *buf, char **start, off_t offset,
203 int size, int *peof, void *data)
205 int i = 0, byteswritten = 0, ret;
206 unsigned int stat, ctrl;
207 #ifdef UBSEC_VERBOSE_DEBUG
209 struct ubsec_dma *dmap;
212 while ((i < UBSEC_SSB_MAX_CHIPS) && (ubsec_chip_idx[i] != NULL))
214 struct ubsec_softc *sc = ubsec_chip_idx[i];
216 stat = READ_REG(sc, BS_STAT);
217 ctrl = READ_REG(sc, BS_CTRL);
218 ret = snprintf((buf + byteswritten),
219 (size - byteswritten) ,
220 "DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
224 #ifdef UBSEC_VERBOSE_DEBUG
225 printf("DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
227 /* Dump all queues MCRs */
228 if (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
229 BSD_SIMPLEQ_FOREACH(q, &sc->sc_qchip, q_next)
232 ubsec_dump_mcr(&dmap->d_dma->d_mcr);
247 * map in a given sk_buff
250 dma_map_skb(struct ubsec_softc *sc, struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen)
256 DPRINTF("%s()\n", __FUNCTION__);
260 * We support only a limited number of fragments.
262 if (unlikely((skb_shinfo(skb)->nr_frags + 1) >= UBS_MAX_SCATTER))
264 printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
268 #ifdef UBSEC_VERBOSE_DEBUG
269 DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, 0, (unsigned int)skb->data, skb_headlen(skb));
272 /* first data package */
273 tmp = dma_map_single(sc->sc_dv,
278 q_map[i].dma_paddr = tmp;
279 q_map[i].dma_vaddr = skb->data;
280 q_map[i].dma_size = skb_headlen(skb);
282 if (unlikely(tmp == 0))
284 printk(KERN_ERR "Could not map memory region for dma.\n");
288 #ifdef UBSEC_VERBOSE_DEBUG
289 DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, 0, (unsigned int)tmp);
293 /* all other data packages */
294 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
296 #ifdef UBSEC_VERBOSE_DEBUG
297 DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, i + 1,
298 (unsigned int)page_address(skb_shinfo(skb)->frags[i].page) +
299 skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].size);
302 tmp = dma_map_single(sc->sc_dv,
303 page_address(skb_shinfo(skb)->frags[i].page) +
304 skb_shinfo(skb)->frags[i].page_offset,
305 skb_shinfo(skb)->frags[i].size,
308 q_map[i + 1].dma_paddr = tmp;
309 q_map[i + 1].dma_vaddr = (void*)(page_address(skb_shinfo(skb)->frags[i].page) +
310 skb_shinfo(skb)->frags[i].page_offset);
311 q_map[i + 1].dma_size = skb_shinfo(skb)->frags[i].size;
313 if (unlikely(tmp == 0))
315 printk(KERN_ERR "Could not map memory region for dma.\n");
319 #ifdef UBSEC_VERBOSE_DEBUG
320 DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, i + 1, (unsigned int)tmp);
330 * map in a given uio buffer
334 dma_map_uio(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen)
336 struct iovec *iov = uio->uio_iov;
341 DPRINTF("%s()\n", __FUNCTION__);
345 * We support only a limited number of fragments.
347 if (unlikely(uio->uio_iovcnt >= UBS_MAX_SCATTER))
349 printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
353 for (n = 0; n < uio->uio_iovcnt; n++) {
354 #ifdef UBSEC_VERBOSE_DEBUG
355 DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, n, (unsigned int)iov->iov_base, iov->iov_len);
357 tmp = dma_map_single(sc->sc_dv,
362 q_map[n].dma_paddr = tmp;
363 q_map[n].dma_vaddr = iov->iov_base;
364 q_map[n].dma_size = iov->iov_len;
366 if (unlikely(tmp == 0))
368 printk(KERN_ERR "Could not map memory region for dma.\n");
372 #ifdef UBSEC_VERBOSE_DEBUG
373 DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, n, (unsigned int)tmp);
384 dma_unmap(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, int mlen)
389 DPRINTF("%s()\n", __FUNCTION__);
392 for(i = 0; i < mlen; i++)
394 #ifdef UBSEC_VERBOSE_DEBUG
395 DPRINTF("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, (unsigned int)q_map[i].dma_paddr, q_map[i].dma_size);
397 dma_unmap_single(sc->sc_dv,
406 * Is the operand suitable aligned for direct DMA. Each
407 * segment must be aligned on a 32-bit boundary and all
408 * but the last segment must be a multiple of 4 bytes.
411 ubsec_dmamap_aligned(struct ubsec_softc *sc, const struct ubsec_dma_alloc *q_map, int mlen)
416 DPRINTF("%s()\n", __FUNCTION__);
419 for (i = 0; i < mlen; i++) {
420 if (q_map[i].dma_paddr & 3)
422 if (i != (mlen - 1) && (q_map[i].dma_size & 3))
429 #define N(a) (sizeof(a) / sizeof (a[0]))
431 ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen)
439 DPRINTF("%s()\n", __FUNCTION__);
442 for (i = 0; i < klen; i++)
443 key[i] ^= HMAC_IPAD_VAL;
445 if (algo == CRYPTO_MD5_HMAC) {
447 MD5Update(&md5ctx, key, klen);
448 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
449 bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
452 SHA1Update(&sha1ctx, key, klen);
453 SHA1Update(&sha1ctx, hmac_ipad_buffer,
454 SHA1_HMAC_BLOCK_LEN - klen);
455 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
458 for (i = 0; i < klen; i++)
459 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
461 if (algo == CRYPTO_MD5_HMAC) {
463 MD5Update(&md5ctx, key, klen);
464 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
465 bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
468 SHA1Update(&sha1ctx, key, klen);
469 SHA1Update(&sha1ctx, hmac_opad_buffer,
470 SHA1_HMAC_BLOCK_LEN - klen);
471 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
474 for (i = 0; i < klen; i++)
475 key[i] ^= HMAC_OPAD_VAL;
477 #else /* HMAC_HACK */
478 DPRINTF("md5/sha not implemented\n");
479 #endif /* HMAC_HACK */
484 __devinit ubsec_ssb_probe(struct ssb_device *sdev,
485 const struct ssb_device_id *ent)
490 DPRINTF("%s()\n", __FUNCTION__);
493 err = ssb_bus_powerup(sdev->bus, 0);
495 dev_err(sdev->dev, "Failed to powerup the bus\n");
499 err = request_irq(sdev->irq, (irq_handler_t)ubsec_ssb_isr,
500 IRQF_DISABLED | IRQF_SHARED, DRV_MODULE_NAME, sdev);
502 dev_err(sdev->dev, "Could not request irq\n");
503 goto err_out_powerdown;
506 err = ssb_dma_set_mask(sdev, DMA_32BIT_MASK);
509 "Required 32BIT DMA mask unsupported by the system.\n");
510 goto err_out_free_irq;
513 printk(KERN_INFO "Sentry5(tm) ROBOGateway(tm) IPSec Core at IRQ %u\n",
516 DPRINTF("Vendor: %x, core id: %x, revision: %x\n",
517 sdev->id.vendor, sdev->id.coreid, sdev->id.revision);
519 ssb_device_enable(sdev, 0);
521 if (ubsec_attach(sdev, ent, sdev->dev) != 0)
522 goto err_out_disable;
525 procdebug = create_proc_entry(DRV_MODULE_NAME, S_IRUSR, NULL);
528 procdebug->read_proc = proc_read;
529 procdebug->data = NULL;
531 DPRINTF("Unable to create proc file.\n");
537 ssb_device_disable(sdev, 0);
540 free_irq(sdev->irq, sdev);
543 ssb_bus_may_powerdown(sdev->bus);
549 static void __devexit ubsec_ssb_remove(struct ssb_device *sdev) {
551 struct ubsec_softc *sc;
552 unsigned int ctrlflgs;
553 struct ubsec_dma *dmap;
557 DPRINTF("%s()\n", __FUNCTION__);
560 ctrlflgs = READ_REG_SDEV(sdev, BS_CTRL);
561 /* disable all IPSec Core interrupts globally */
562 ctrlflgs ^= (BS_CTRL_MCR1INT | BS_CTRL_MCR2INT |
564 WRITE_REG_SDEV(sdev, BS_CTRL, ctrlflgs);
566 free_irq(sdev->irq, sdev);
568 sc = (struct ubsec_softc *)ssb_get_drvdata(sdev);
570 /* unregister all crypto algorithms */
571 crypto_unregister_all(sc->sc_cid);
573 /* Free queue / dma memory */
574 for (i = 0; i < UBS_MAX_NQUEUE; i++) {
577 q = sc->sc_queuea[i];
583 ubsec_dma_free(sc, &dmap->d_alloc);
588 sc->sc_queuea[i] = NULL;
591 ssb_device_disable(sdev, 0);
592 ssb_bus_may_powerdown(sdev->bus);
593 ssb_set_drvdata(sdev, NULL);
597 remove_proc_entry(DRV_MODULE_NAME, NULL);
604 ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
607 struct ubsec_softc *sc = NULL;
608 struct ubsec_dma *dmap;
610 static int num_chips = 0;
613 DPRINTF("%s()\n", __FUNCTION__);
616 sc = (struct ubsec_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
619 memset(sc, 0, sizeof(*sc));
621 sc->sc_dv = sdev->dev;
624 spin_lock_init(&sc->sc_ringmtx);
626 softc_device_init(sc, "ubsec_ssb", num_chips, ubsec_ssb_methods);
628 /* Maybe someday there are boards with more than one chip available */
629 if (num_chips < UBSEC_SSB_MAX_CHIPS) {
630 ubsec_chip_idx[device_get_unit(sc->sc_dev)] = sc;
634 ssb_set_drvdata(sdev, sc);
636 BSD_SIMPLEQ_INIT(&sc->sc_queue);
637 BSD_SIMPLEQ_INIT(&sc->sc_qchip);
638 BSD_SIMPLEQ_INIT(&sc->sc_queue2);
639 BSD_SIMPLEQ_INIT(&sc->sc_qchip2);
640 BSD_SIMPLEQ_INIT(&sc->sc_q2free);
642 sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR;
644 sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
645 if (sc->sc_cid < 0) {
646 device_printf(sc->sc_dev, "could not get crypto driver id\n");
650 BSD_SIMPLEQ_INIT(&sc->sc_freequeue);
652 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
655 q = (struct ubsec_q *)kmalloc(sizeof(struct ubsec_q), GFP_KERNEL);
657 printf(": can't allocate queue buffers\n");
661 if (ubsec_dma_malloc(sc, &dmap->d_alloc, sizeof(struct ubsec_dmachunk),0)) {
662 printf(": can't allocate dma buffers\n");
666 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
669 sc->sc_queuea[i] = q;
671 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
675 * Reset Broadcom chip
677 ubsec_reset_board(sc);
682 ubsec_init_board(sc);
684 /* supported crypto algorithms */
685 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
686 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
688 if (sc->sc_flags & UBS_FLAGS_AES) {
689 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
690 printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES AES128 AES192 AES256 MD5_HMAC SHA1_HMAC\n");
693 printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES MD5_HMAC SHA1_HMAC\n");
695 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
696 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
702 * UBSEC Interrupt routine
705 ubsec_ssb_isr(int irq, void *arg, struct pt_regs *regs)
707 struct ubsec_softc *sc = NULL;
708 volatile u_int32_t stat;
710 struct ubsec_dma *dmap;
713 #ifdef UBSEC_VERBOSE_DEBUG
714 DPRINTF("%s()\n", __FUNCTION__);
717 sc = (struct ubsec_softc *)ssb_get_drvdata(arg);
719 stat = READ_REG(sc, BS_STAT);
721 stat &= sc->sc_statmask;
725 WRITE_REG(sc, BS_STAT, stat); /* IACK */
728 * Check to see if we have any packets waiting for us
730 if ((stat & BS_STAT_MCR1_DONE)) {
731 while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
732 q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
735 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
737 DPRINTF("error while processing MCR. Flags = %x\n", dmap->d_dma->d_mcr.mcr_flags);
741 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
743 npkts = q->q_nstacked_mcrs;
745 * search for further sc_qchip ubsec_q's that share
746 * the same MCR, and complete them too, they must be
749 for (i = 0; i < npkts; i++) {
750 if(q->q_stacked_mcr[i])
751 ubsec_callback(sc, q->q_stacked_mcr[i]);
755 ubsec_callback(sc, q);
759 * Don't send any more packet to chip if there has been
762 if (likely(!(stat & BS_STAT_DMAERR)))
765 DPRINTF("DMA error occurred. Stop feeding crypto chip.\n");
769 * Check to see if we got any DMA Error
771 if (stat & BS_STAT_DMAERR) {
772 volatile u_int32_t a = READ_REG(sc, BS_ERR);
774 printf(KERN_ERR "%s: dmaerr %s@%08x\n", DRV_MODULE_NAME,
775 (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR);
777 ubsecstats.hst_dmaerr++;
778 ubsec_totalreset(sc);
786 * ubsec_feed() - aggregate and post requests to chip
787 * It is assumed that the caller set splnet()
790 ubsec_feed(struct ubsec_softc *sc)
792 #ifdef UBSEC_VERBOSE_DEBUG
795 struct ubsec_q *q, *q2;
800 npkts = sc->sc_nqueue;
801 if (npkts > UBS_MAX_AGGR)
802 npkts = UBS_MAX_AGGR;
806 stat = READ_REG(sc, BS_STAT);
808 if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
809 if(stat & BS_STAT_DMAERR) {
810 ubsec_totalreset(sc);
811 ubsecstats.hst_dmaerr++;
816 #ifdef UBSEC_VERBOSE_DEBUG
817 DPRINTF("merging %d records\n", npkts);
819 /* XXX temporary aggregation statistics reporting code */
822 DPRINTF("%s: new max aggregate %d\n", DRV_MODULE_NAME, max);
824 #endif /* UBSEC_VERBOSE_DEBUG */
826 q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
827 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
833 * We use dma_map_single() - no sync required!
836 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
837 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
838 if (q->q_dst_map != NULL)
839 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
840 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
843 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */
845 for (i = 0; i < q->q_nstacked_mcrs; i++) {
846 q2 = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
849 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
850 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
851 if (q2->q_dst_map != NULL)
852 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
853 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
855 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
858 v = ((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) -
859 sizeof(struct ubsec_mcr_add);
860 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
861 q->q_stacked_mcr[i] = q2;
863 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
864 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
866 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
867 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
868 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
870 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
871 offsetof(struct ubsec_dmachunk, d_mcr));
872 #ifdef UBSEC_VERBOSE_DEBUG
873 DPRINTF("feed (1): q->chip %p %08x %08x\n", q,
874 (u_int32_t)q->q_dma->d_alloc.dma_paddr,
875 (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
876 offsetof(struct ubsec_dmachunk, d_mcr)));
877 #endif /* UBSEC_DEBUG */
881 while (!BSD_SIMPLEQ_EMPTY(&sc->sc_queue)) {
882 stat = READ_REG(sc, BS_STAT);
884 if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
885 if(stat & BS_STAT_DMAERR) {
886 ubsec_totalreset(sc);
887 ubsecstats.hst_dmaerr++;
892 q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
895 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
896 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
897 if (q->q_dst_map != NULL)
898 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
899 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
900 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
901 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
902 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
905 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
906 offsetof(struct ubsec_dmachunk, d_mcr));
907 #ifdef UBSEC_VERBOSE_DEBUG
908 DPRINTF("feed (2): q->chip %p %08x %08x\n", q,
909 (u_int32_t)q->q_dma->d_alloc.dma_paddr,
910 (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
911 offsetof(struct ubsec_dmachunk, d_mcr)));
912 #endif /* UBSEC_DEBUG */
913 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
915 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
920 * Allocate a new 'session' and return an encoded session id. 'sidp'
921 * contains our registration id, and should contain an encoded session
922 * id on successful allocation.
925 ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
927 struct cryptoini *c, *encini = NULL, *macini = NULL;
928 struct ubsec_softc *sc = NULL;
929 struct ubsec_session *ses = NULL;
933 DPRINTF("%s()\n", __FUNCTION__);
936 if (sidp == NULL || cri == NULL)
939 sc = device_get_softc(dev);
944 for (c = cri; c != NULL; c = c->cri_next) {
945 if (c->cri_alg == CRYPTO_MD5_HMAC ||
946 c->cri_alg == CRYPTO_SHA1_HMAC) {
950 } else if (c->cri_alg == CRYPTO_DES_CBC ||
951 c->cri_alg == CRYPTO_3DES_CBC ||
952 c->cri_alg == CRYPTO_AES_CBC) {
959 if (encini == NULL && macini == NULL)
962 if (sc->sc_sessions == NULL) {
963 ses = sc->sc_sessions = (struct ubsec_session *)kmalloc(
964 sizeof(struct ubsec_session), SLAB_ATOMIC);
967 memset(ses, 0, sizeof(struct ubsec_session));
969 sc->sc_nsessions = 1;
971 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
972 if (sc->sc_sessions[sesn].ses_used == 0) {
973 ses = &sc->sc_sessions[sesn];
979 sesn = sc->sc_nsessions;
980 ses = (struct ubsec_session *)kmalloc((sesn + 1) *
981 sizeof(struct ubsec_session), SLAB_ATOMIC);
984 memset(ses, 0, (sesn + 1) * sizeof(struct ubsec_session));
985 bcopy(sc->sc_sessions, ses, sesn *
986 sizeof(struct ubsec_session));
987 bzero(sc->sc_sessions, sesn *
988 sizeof(struct ubsec_session));
989 kfree(sc->sc_sessions);
990 sc->sc_sessions = ses;
991 ses = &sc->sc_sessions[sesn];
996 bzero(ses, sizeof(struct ubsec_session));
1000 /* XXX may read fewer than requested */
1001 read_random(ses->ses_iv, sizeof(ses->ses_iv));
1003 /* Go ahead and compute key in ubsec's byte order */
1004 if (encini->cri_alg == CRYPTO_DES_CBC) {
1005 /* DES uses the same key three times:
1006 * 1st encrypt -> 2nd decrypt -> 3nd encrypt */
1007 bcopy(encini->cri_key, &ses->ses_key[0], 8);
1008 bcopy(encini->cri_key, &ses->ses_key[2], 8);
1009 bcopy(encini->cri_key, &ses->ses_key[4], 8);
1010 ses->ses_keysize = 192; /* Fake! Actually its only 64bits ..
1011 oh no it is even less: 54bits. */
1012 } else if(encini->cri_alg == CRYPTO_3DES_CBC) {
1013 bcopy(encini->cri_key, ses->ses_key, 24);
1014 ses->ses_keysize = 192;
1015 } else if(encini->cri_alg == CRYPTO_AES_CBC) {
1016 ses->ses_keysize = encini->cri_klen;
1018 if (ses->ses_keysize != 128 &&
1019 ses->ses_keysize != 192 &&
1020 ses->ses_keysize != 256)
1022 DPRINTF("unsupported AES key size: %d\n", ses->ses_keysize);
1025 bcopy(encini->cri_key, ses->ses_key, (ses->ses_keysize / 8));
1028 /* Hardware requires the keys in little endian byte order */
1029 for (i=0; i < (ses->ses_keysize / 32); i++)
1030 SWAP32(ses->ses_key[i]);
1034 ses->ses_mlen = macini->cri_mlen;
1036 if (ses->ses_mlen == 0 ||
1037 ses->ses_mlen > SHA1_HASH_LEN) {
1039 if (macini->cri_alg == CRYPTO_MD5_HMAC ||
1040 macini->cri_alg == CRYPTO_SHA1_HMAC)
1042 ses->ses_mlen = DEFAULT_HMAC_LEN;
1046 * Reserved for future usage. MD5/SHA1 calculations have
1047 * different hash sizes.
1049 printk(KERN_ERR DRV_MODULE_NAME ": unsupported hash operation with mac/hash len: %d\n", ses->ses_mlen);
1055 if (macini->cri_key != NULL) {
1056 ubsec_setup_mackey(ses, macini->cri_alg, macini->cri_key,
1057 macini->cri_klen / 8);
1061 *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn);
1066 * Deallocate a session.
1069 ubsec_freesession(device_t dev, u_int64_t tid)
1071 struct ubsec_softc *sc = device_get_softc(dev);
1073 u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
1076 DPRINTF("%s()\n", __FUNCTION__);
1082 session = UBSEC_SESSION(sid);
1083 if (session < sc->sc_nsessions) {
1084 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
1091 ubsec_process(device_t dev, struct cryptop *crp, int hint)
1093 struct ubsec_q *q = NULL;
1094 int err = 0, i, j, nicealign;
1095 struct ubsec_softc *sc = device_get_softc(dev);
1096 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
1097 int encoffset = 0, macoffset = 0, cpskip, cpoffset;
1098 int sskip, dskip, stheend, dtheend, ivsize = 8;
1100 struct ubsec_session *ses;
1101 struct ubsec_generic_ctx ctx;
1102 struct ubsec_dma *dmap = NULL;
1103 unsigned long flags;
1106 DPRINTF("%s()\n", __FUNCTION__);
1109 if (unlikely(crp == NULL || crp->crp_callback == NULL)) {
1110 ubsecstats.hst_invalid++;
1114 if (unlikely(sc == NULL))
1117 #ifdef UBSEC_VERBOSE_DEBUG
1118 DPRINTF("spin_lock_irqsave\n");
1120 spin_lock_irqsave(&sc->sc_ringmtx, flags);
1121 //spin_lock_irq(&sc->sc_ringmtx);
1123 if (BSD_SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
1124 ubsecstats.hst_queuefull++;
1125 #ifdef UBSEC_VERBOSE_DEBUG
1126 DPRINTF("spin_unlock_irqrestore\n");
1128 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1129 //spin_unlock_irq(&sc->sc_ringmtx);
1134 q = BSD_SIMPLEQ_FIRST(&sc->sc_freequeue);
1135 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
1136 #ifdef UBSEC_VERBOSE_DEBUG
1137 DPRINTF("spin_unlock_irqrestore\n");
1139 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1140 //spin_unlock_irq(&sc->sc_ringmtx);
1142 dmap = q->q_dma; /* Save dma pointer */
1143 bzero(q, sizeof(struct ubsec_q));
1144 bzero(&ctx, sizeof(ctx));
1146 q->q_sesn = UBSEC_SESSION(crp->crp_sid);
1148 ses = &sc->sc_sessions[q->q_sesn];
1150 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1151 q->q_src_m = (struct sk_buff *)crp->crp_buf;
1152 q->q_dst_m = (struct sk_buff *)crp->crp_buf;
1153 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1154 q->q_src_io = (struct uio *)crp->crp_buf;
1155 q->q_dst_io = (struct uio *)crp->crp_buf;
1158 goto errout; /* XXX we don't handle contiguous blocks! */
1161 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
1163 dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
1164 dmap->d_dma->d_mcr.mcr_flags = 0;
1167 crd1 = crp->crp_desc;
1172 crd2 = crd1->crd_next;
1175 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
1176 crd1->crd_alg == CRYPTO_SHA1_HMAC) {
1179 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
1180 crd1->crd_alg == CRYPTO_3DES_CBC ||
1181 crd1->crd_alg == CRYPTO_AES_CBC) {
1189 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
1190 crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
1191 (crd2->crd_alg == CRYPTO_DES_CBC ||
1192 crd2->crd_alg == CRYPTO_3DES_CBC ||
1193 crd2->crd_alg == CRYPTO_AES_CBC) &&
1194 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
1197 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
1198 crd1->crd_alg == CRYPTO_3DES_CBC ||
1199 crd1->crd_alg == CRYPTO_AES_CBC) &&
1200 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
1201 crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
1202 (crd1->crd_flags & CRD_F_ENCRYPT)) {
1207 * We cannot order the ubsec as requested
1209 printk(KERN_ERR DRV_MODULE_NAME ": got wrong algorithm/signature order.\n");
1215 /* Encryption/Decryption requested */
1217 encoffset = enccrd->crd_skip;
1219 if (enccrd->crd_alg == CRYPTO_DES_CBC ||
1220 enccrd->crd_alg == CRYPTO_3DES_CBC)
1222 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
1223 ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_DES);
1224 ivsize = 8; /* [3]DES uses 64bit IVs */
1226 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_AES);
1227 ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_AES);
1228 ivsize = 16; /* AES uses 128bit IVs / [3]DES 64bit IVs */
1230 switch(ses->ses_keysize)
1233 ctx.pc_flags |= htole16(UBS_PKTCTX_AES128);
1236 ctx.pc_flags |= htole16(UBS_PKTCTX_AES192);
1239 ctx.pc_flags |= htole16(UBS_PKTCTX_AES256);
1242 DPRINTF("invalid AES key size: %d\n", ses->ses_keysize);
1248 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
1249 /* Direction: Outbound */
1251 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
1253 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
1254 bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
1256 for(i=0; i < (ivsize / 4); i++)
1257 ctx.pc_iv[i] = ses->ses_iv[i];
1260 /* If there is no IV in the buffer -> copy it here */
1261 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1262 if (crp->crp_flags & CRYPTO_F_SKBUF)
1264 m_copyback(q->q_src_m,
1268 crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_m,
1269 enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
1270 else if (crp->crp_flags & CRYPTO_F_IOV)
1272 cuio_copyback(q->q_src_io,
1276 crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_io,
1277 enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
1280 /* Direction: Inbound */
1282 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
1284 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1285 bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
1286 else if (crp->crp_flags & CRYPTO_F_SKBUF)
1288 m_copydata(q->q_src_m, enccrd->crd_inject,
1289 8, (caddr_t)ctx.pc_iv);
1291 crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_m,
1292 enccrd->crd_inject, ivsize,
1293 (caddr_t)ctx.pc_iv);
1294 else if (crp->crp_flags & CRYPTO_F_IOV)
1296 cuio_copydata(q->q_src_io,
1297 enccrd->crd_inject, 8,
1298 (caddr_t)ctx.pc_iv);
1300 crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_io,
1301 enccrd->crd_inject, ivsize,
1302 (caddr_t)ctx.pc_iv);
1306 /* Even though key & IV sizes differ from cipher to cipher
1307 * copy / swap the full array lengths. Let the compiler unroll
1308 * the loop to increase the cpu pipeline performance... */
1309 for(i=0; i < 8; i++)
1310 ctx.pc_key[i] = ses->ses_key[i];
1311 for(i=0; i < 4; i++)
1312 SWAP32(ctx.pc_iv[i]);
1315 /* Authentication requested */
1317 macoffset = maccrd->crd_skip;
1319 if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
1320 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
1322 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
1324 for (i = 0; i < 5; i++) {
1325 ctx.pc_hminner[i] = ses->ses_hminner[i];
1326 ctx.pc_hmouter[i] = ses->ses_hmouter[i];
1328 HTOLE32(ctx.pc_hminner[i]);
1329 HTOLE32(ctx.pc_hmouter[i]);
1333 if (enccrd && maccrd) {
1335 * ubsec cannot handle packets where the end of encryption
1336 * and authentication are not the same, or where the
1337 * encrypted part begins before the authenticated part.
1339 if (((encoffset + enccrd->crd_len) !=
1340 (macoffset + maccrd->crd_len)) ||
1341 (enccrd->crd_skip < maccrd->crd_skip)) {
1345 sskip = maccrd->crd_skip;
1346 cpskip = dskip = enccrd->crd_skip;
1347 stheend = maccrd->crd_len;
1348 dtheend = enccrd->crd_len;
1349 coffset = enccrd->crd_skip - maccrd->crd_skip;
1350 cpoffset = cpskip + dtheend;
1352 DPRINTF("mac: skip %d, len %d, inject %d\n",
1353 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
1354 DPRINTF("enc: skip %d, len %d, inject %d\n",
1355 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
1356 DPRINTF("src: skip %d, len %d\n", sskip, stheend);
1357 DPRINTF("dst: skip %d, len %d\n", dskip, dtheend);
1358 DPRINTF("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
1359 coffset, stheend, cpskip, cpoffset);
1362 cpskip = dskip = sskip = macoffset + encoffset;
1363 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
1364 cpoffset = cpskip + dtheend;
1367 ctx.pc_offset = htole16(coffset >> 2);
1370 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER,
1371 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
1377 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1379 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
1380 q->q_src_m, BUS_DMA_NOWAIT) != 0) {
1381 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1382 q->q_src_map = NULL;
1387 err = dma_map_skb(sc, q->q_src_map, q->q_src_m, &q->q_src_len);
1388 if (unlikely(err != 0))
1391 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1393 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
1394 q->q_src_io, BUS_DMA_NOWAIT) != 0) {
1395 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1396 q->q_src_map = NULL;
1401 err = dma_map_uio(sc, q->q_src_map, q->q_src_io, &q->q_src_len);
1402 if (unlikely(err != 0))
1409 nicealign = ubsec_dmamap_aligned(sc, q->q_src_map, q->q_src_len);
1411 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
1414 DPRINTF("src skip: %d\n", sskip);
1416 for (i = j = 0; i < q->q_src_len; i++) {
1417 struct ubsec_pktbuf *pb;
1418 size_t packl = q->q_src_map[i].dma_size;
1419 dma_addr_t packp = q->q_src_map[i].dma_paddr;
1421 if (sskip >= packl) {
1430 /* maximum fragment size is 0xfffc */
1431 if (packl > 0xfffc) {
1432 DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
1438 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
1440 pb = &dmap->d_dma->d_sbuf[j - 1];
1442 pb->pb_addr = htole32(packp);
1445 if (packl > stheend) {
1446 pb->pb_len = htole32(stheend);
1449 pb->pb_len = htole32(packl);
1453 pb->pb_len = htole32(packl);
1455 if ((i + 1) == q->q_src_len)
1458 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1459 offsetof(struct ubsec_dmachunk, d_sbuf[j]));
1463 if (enccrd == NULL && maccrd != NULL) {
1464 /* Authentication only */
1465 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
1466 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
1467 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next =
1468 htole32(dmap->d_alloc.dma_paddr +
1469 offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1471 DPRINTF("opkt: %x %x %x\n",
1472 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
1473 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
1474 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
1477 if (crp->crp_flags & CRYPTO_F_IOV) {
1483 if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
1484 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
1485 &q->q_dst_map) != 0) {
1489 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
1490 q->q_dst_io, BUS_DMA_NOWAIT) != 0) {
1491 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1492 q->q_dst_map = NULL;
1497 /* HW shall copy the result into the source memory */
1498 for(i = 0; i < q->q_src_len; i++)
1499 q->q_dst_map[i] = q->q_src_map[i];
1501 q->q_dst_len = q->q_src_len;
1504 } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
1507 /* HW shall copy the result into the source memory */
1508 q->q_dst_m = q->q_src_m;
1509 for(i = 0; i < q->q_src_len; i++)
1510 q->q_dst_map[i] = q->q_src_map[i];
1512 q->q_dst_len = q->q_src_len;
1518 struct sk_buff *m, *top, **mp;
1520 totlen = q->q_src_map->dm_mapsize;
1521 if (q->q_src_m->m_flags & M_PKTHDR) {
1523 MGETHDR(m, M_DONTWAIT, MT_DATA);
1526 MGET(m, M_DONTWAIT, MT_DATA);
1533 M_DUP_PKTHDR(m, q->q_src_m);
1534 if (totlen >= MINCLSIZE) {
1535 MCLGET(m, M_DONTWAIT);
1536 if (m->m_flags & M_EXT)
1543 while (totlen > 0) {
1545 MGET(m, M_DONTWAIT, MT_DATA);
1553 if (top && totlen >= MINCLSIZE) {
1554 MCLGET(m, M_DONTWAIT);
1555 if (m->m_flags & M_EXT)
1558 m->m_len = len = min(totlen, len);
1564 ubsec_mcopy(q->q_src_m, q->q_dst_m,
1566 if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
1567 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
1568 &q->q_dst_map) != 0) {
1572 if (bus_dmamap_load_mbuf(sc->sc_dmat,
1573 q->q_dst_map, q->q_dst_m,
1574 BUS_DMA_NOWAIT) != 0) {
1575 bus_dmamap_destroy(sc->sc_dmat,
1577 q->q_dst_map = NULL;
1582 device_printf(sc->sc_dev,
1583 "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
1584 __FILE__, __LINE__);
1595 DPRINTF("dst skip: %d\n", dskip);
1597 for (i = j = 0; i < q->q_dst_len; i++) {
1598 struct ubsec_pktbuf *pb;
1599 size_t packl = q->q_dst_map[i].dma_size;
1600 dma_addr_t packp = q->q_dst_map[i].dma_paddr;
1602 if (dskip >= packl) {
1611 if (packl > 0xfffc) {
1612 DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
1618 pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
1620 pb = &dmap->d_dma->d_dbuf[j - 1];
1622 pb->pb_addr = htole32(packp);
1625 if (packl > dtheend) {
1626 pb->pb_len = htole32(dtheend);
1629 pb->pb_len = htole32(packl);
1633 pb->pb_len = htole32(packl);
1635 if ((i + 1) == q->q_dst_len) {
1638 * The last fragment of the output buffer
1639 * contains the HMAC. */
1640 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1641 offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1645 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1646 offsetof(struct ubsec_dmachunk, d_dbuf[j]));
1651 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
1652 offsetof(struct ubsec_dmachunk, d_ctx));
1654 if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
1655 /* new Broadcom cards with dynamic long command context structure */
1657 if (enccrd != NULL &&
1658 enccrd->crd_alg == CRYPTO_AES_CBC)
1660 struct ubsec_pktctx_aes128 *ctxaes128;
1661 struct ubsec_pktctx_aes192 *ctxaes192;
1662 struct ubsec_pktctx_aes256 *ctxaes256;
1664 switch(ses->ses_keysize)
1668 ctxaes128 = (struct ubsec_pktctx_aes128 *)
1669 (dmap->d_alloc.dma_vaddr +
1670 offsetof(struct ubsec_dmachunk, d_ctx));
1672 ctxaes128->pc_len = htole16(sizeof(struct ubsec_pktctx_aes128));
1673 ctxaes128->pc_type = ctx.pc_type;
1674 ctxaes128->pc_flags = ctx.pc_flags;
1675 ctxaes128->pc_offset = ctx.pc_offset;
1676 for (i = 0; i < 4; i++)
1677 ctxaes128->pc_aeskey[i] = ctx.pc_key[i];
1678 for (i = 0; i < 5; i++)
1679 ctxaes128->pc_hminner[i] = ctx.pc_hminner[i];
1680 for (i = 0; i < 5; i++)
1681 ctxaes128->pc_hmouter[i] = ctx.pc_hmouter[i];
1682 for (i = 0; i < 4; i++)
1683 ctxaes128->pc_iv[i] = ctx.pc_iv[i];
1688 ctxaes192 = (struct ubsec_pktctx_aes192 *)
1689 (dmap->d_alloc.dma_vaddr +
1690 offsetof(struct ubsec_dmachunk, d_ctx));
1692 ctxaes192->pc_len = htole16(sizeof(struct ubsec_pktctx_aes192));
1693 ctxaes192->pc_type = ctx.pc_type;
1694 ctxaes192->pc_flags = ctx.pc_flags;
1695 ctxaes192->pc_offset = ctx.pc_offset;
1696 for (i = 0; i < 6; i++)
1697 ctxaes192->pc_aeskey[i] = ctx.pc_key[i];
1698 for (i = 0; i < 5; i++)
1699 ctxaes192->pc_hminner[i] = ctx.pc_hminner[i];
1700 for (i = 0; i < 5; i++)
1701 ctxaes192->pc_hmouter[i] = ctx.pc_hmouter[i];
1702 for (i = 0; i < 4; i++)
1703 ctxaes192->pc_iv[i] = ctx.pc_iv[i];
1708 ctxaes256 = (struct ubsec_pktctx_aes256 *)
1709 (dmap->d_alloc.dma_vaddr +
1710 offsetof(struct ubsec_dmachunk, d_ctx));
1712 ctxaes256->pc_len = htole16(sizeof(struct ubsec_pktctx_aes256));
1713 ctxaes256->pc_type = ctx.pc_type;
1714 ctxaes256->pc_flags = ctx.pc_flags;
1715 ctxaes256->pc_offset = ctx.pc_offset;
1716 for (i = 0; i < 8; i++)
1717 ctxaes256->pc_aeskey[i] = ctx.pc_key[i];
1718 for (i = 0; i < 5; i++)
1719 ctxaes256->pc_hminner[i] = ctx.pc_hminner[i];
1720 for (i = 0; i < 5; i++)
1721 ctxaes256->pc_hmouter[i] = ctx.pc_hmouter[i];
1722 for (i = 0; i < 4; i++)
1723 ctxaes256->pc_iv[i] = ctx.pc_iv[i];
1729 * [3]DES / MD5_HMAC / SHA1_HMAC
1731 * MD5_HMAC / SHA1_HMAC can use the IPSEC 3DES operation without
1734 struct ubsec_pktctx_des *ctxdes;
1736 ctxdes = (struct ubsec_pktctx_des *)(dmap->d_alloc.dma_vaddr +
1737 offsetof(struct ubsec_dmachunk, d_ctx));
1739 ctxdes->pc_len = htole16(sizeof(struct ubsec_pktctx_des));
1740 ctxdes->pc_type = ctx.pc_type;
1741 ctxdes->pc_flags = ctx.pc_flags;
1742 ctxdes->pc_offset = ctx.pc_offset;
1743 for (i = 0; i < 6; i++)
1744 ctxdes->pc_deskey[i] = ctx.pc_key[i];
1745 for (i = 0; i < 5; i++)
1746 ctxdes->pc_hminner[i] = ctx.pc_hminner[i];
1747 for (i = 0; i < 5; i++)
1748 ctxdes->pc_hmouter[i] = ctx.pc_hmouter[i];
1749 ctxdes->pc_iv[0] = ctx.pc_iv[0];
1750 ctxdes->pc_iv[1] = ctx.pc_iv[1];
1754 /* old Broadcom card with fixed small command context structure */
1757 * [3]DES / MD5_HMAC / SHA1_HMAC
1759 struct ubsec_pktctx *ctxs;
1761 ctxs = (struct ubsec_pktctx *)(dmap->d_alloc.dma_vaddr +
1762 offsetof(struct ubsec_dmachunk, d_ctx));
1764 /* transform generic context into small context */
1765 for (i = 0; i < 6; i++)
1766 ctxs->pc_deskey[i] = ctx.pc_key[i];
1767 for (i = 0; i < 5; i++)
1768 ctxs->pc_hminner[i] = ctx.pc_hminner[i];
1769 for (i = 0; i < 5; i++)
1770 ctxs->pc_hmouter[i] = ctx.pc_hmouter[i];
1771 ctxs->pc_iv[0] = ctx.pc_iv[0];
1772 ctxs->pc_iv[1] = ctx.pc_iv[1];
1773 ctxs->pc_flags = ctx.pc_flags;
1774 ctxs->pc_offset = ctx.pc_offset;
1777 #ifdef UBSEC_VERBOSE_DEBUG
1778 DPRINTF("spin_lock_irqsave\n");
1780 spin_lock_irqsave(&sc->sc_ringmtx, flags);
1781 //spin_lock_irq(&sc->sc_ringmtx);
1783 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
1785 ubsecstats.hst_ipackets++;
1786 ubsecstats.hst_ibytes += stheend;
1789 #ifdef UBSEC_VERBOSE_DEBUG
1790 DPRINTF("spin_unlock_irqrestore\n");
1792 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1793 //spin_unlock_irq(&sc->sc_ringmtx);
1800 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
1801 m_freem(q->q_dst_m);
1804 if ((q->q_has_dst == 1) && q->q_dst_len > 0) {
1806 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1807 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1809 dma_unmap(sc, q->q_dst_map, q->q_dst_len);
1811 if (q->q_src_len > 0) {
1813 bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1814 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1816 dma_unmap(sc, q->q_src_map, q->q_src_len);
1819 #ifdef UBSEC_VERBOSE_DEBUG
1820 DPRINTF("spin_lock_irqsave\n");
1822 spin_lock_irqsave(&sc->sc_ringmtx, flags);
1823 //spin_lock_irq(&sc->sc_ringmtx);
1825 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1827 #ifdef UBSEC_VERBOSE_DEBUG
1828 DPRINTF("spin_unlock_irqrestore\n");
1830 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1831 //spin_unlock_irq(&sc->sc_ringmtx);
1835 ubsecstats.hst_invalid++;
1837 ubsecstats.hst_nomem++;
1839 crp->crp_etype = err;
1843 DPRINTF("%s() err = %x\n", __FUNCTION__, err);
1850 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
1852 struct cryptop *crp = (struct cryptop *)q->q_crp;
1853 struct cryptodesc *crd;
1854 struct ubsec_dma *dmap = q->q_dma;
1858 DPRINTF("%s()\n", __FUNCTION__);
1861 ubsecstats.hst_opackets++;
1862 ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
1865 bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0,
1866 dmap->d_alloc.dma_map->dm_mapsize,
1867 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1868 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1869 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
1870 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1871 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1872 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1874 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
1875 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1876 bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1877 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1880 if ((q->q_has_dst == 1) && q->q_dst_len > 0)
1881 dma_unmap(sc, q->q_dst_map, q->q_dst_len);
1883 dma_unmap(sc, q->q_src_map, q->q_src_len);
1886 if ((crp->crp_flags & CRYPTO_F_SKBUF) && (q->q_src_m != q->q_dst_m)) {
1887 m_freem(q->q_src_m);
1888 crp->crp_buf = (caddr_t)q->q_dst_m;
1892 /* copy out IV for future use */
1893 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
1894 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1895 if (crd->crd_alg != CRYPTO_DES_CBC &&
1896 crd->crd_alg != CRYPTO_3DES_CBC &&
1897 crd->crd_alg != CRYPTO_AES_CBC)
1900 if (crd->crd_alg == CRYPTO_AES_CBC)
1905 if (crp->crp_flags & CRYPTO_F_SKBUF)
1907 m_copydata((struct sk_buff *)crp->crp_buf,
1908 crd->crd_skip + crd->crd_len - 8, 8,
1909 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1911 crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
1912 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1913 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1915 else if (crp->crp_flags & CRYPTO_F_IOV) {
1917 cuio_copydata((struct uio *)crp->crp_buf,
1918 crd->crd_skip + crd->crd_len - 8, 8,
1919 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1921 crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
1922 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1923 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1930 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1931 if (crd->crd_alg != CRYPTO_MD5_HMAC &&
1932 crd->crd_alg != CRYPTO_SHA1_HMAC)
1935 if (crp->crp_flags & CRYPTO_F_SKBUF)
1936 m_copyback((struct sk_buff *)crp->crp_buf,
1937 crd->crd_inject, 12,
1938 dmap->d_dma->d_macbuf);
1941 /* BUG? it does not honor the mac len.. */
1942 crypto_copyback(crp->crp_flags, crp->crp_buf,
1943 crd->crd_inject, 12,
1944 (caddr_t)dmap->d_dma->d_macbuf);
1946 crypto_copyback(crp->crp_flags, crp->crp_buf,
1948 sc->sc_sessions[q->q_sesn].ses_mlen,
1949 (caddr_t)dmap->d_dma->d_macbuf);
1951 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac)
1952 bcopy((caddr_t)dmap->d_dma->d_macbuf,
1957 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1962 ubsec_mcopy(struct sk_buff *srcm, struct sk_buff *dstm, int hoffset, int toffset)
1964 int i, j, dlen, slen;
1974 for (i = 0; i < min(slen, dlen); i++) {
1975 if (j < hoffset || j >= toffset)
1999 ubsec_dma_malloc(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma,
2000 size_t size, int mapflags)
2002 dma->dma_vaddr = dma_alloc_coherent(sc->sc_dv,
2003 size, &dma->dma_paddr, GFP_KERNEL);
2005 if (likely(dma->dma_vaddr))
2007 dma->dma_size = size;
2011 DPRINTF("could not allocate %d bytes of coherent memory.\n", size);
2017 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
2019 dma_free_coherent(sc->sc_dv, dma->dma_size, dma->dma_vaddr,
2024 * Resets the board. Values in the regesters are left as is
2025 * from the reset (i.e. initial values are assigned elsewhere).
2028 ubsec_reset_board(struct ubsec_softc *sc)
2030 volatile u_int32_t ctrl;
2033 DPRINTF("%s()\n", __FUNCTION__);
2035 DPRINTF("Send reset signal to chip.\n");
2037 ctrl = READ_REG(sc, BS_CTRL);
2038 ctrl |= BS_CTRL_RESET;
2039 WRITE_REG(sc, BS_CTRL, ctrl);
2042 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
2048 * Init Broadcom registers
2051 ubsec_init_board(struct ubsec_softc *sc)
2056 DPRINTF("%s()\n", __FUNCTION__);
2058 DPRINTF("Initialize chip.\n");
2060 ctrl = READ_REG(sc, BS_CTRL);
2061 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
2062 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT | BS_CTRL_DMAERR;
2064 WRITE_REG(sc, BS_CTRL, ctrl);
2066 /* Set chip capabilities (BCM5365P) */
2067 sc->sc_flags |= UBS_FLAGS_LONGCTX | UBS_FLAGS_AES;
2071 * Clean up after a chip crash.
2072 * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
2075 ubsec_cleanchip(struct ubsec_softc *sc)
2080 DPRINTF("%s()\n", __FUNCTION__);
2082 DPRINTF("Clean up queues after chip crash.\n");
2084 while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
2085 q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
2086 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
2087 ubsec_free_q(sc, q);
2093 * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
2096 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
2099 struct cryptop *crp;
2104 DPRINTF("%s()\n", __FUNCTION__);
2107 npkts = q->q_nstacked_mcrs;
2109 for (i = 0; i < npkts; i++) {
2110 if(q->q_stacked_mcr[i]) {
2111 q2 = q->q_stacked_mcr[i];
2113 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
2115 m_freem(q2->q_dst_m);
2117 printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
2120 crp = (struct cryptop *)q2->q_crp;
2122 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
2124 crp->crp_etype = EFAULT;
2134 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
2136 m_freem(q->q_dst_m);
2138 printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
2141 crp = (struct cryptop *)q->q_crp;
2143 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
2145 crp->crp_etype = EFAULT;
2151 * Routine to reset the chip and clean up.
2152 * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
2155 ubsec_totalreset(struct ubsec_softc *sc)
2159 DPRINTF("%s()\n", __FUNCTION__);
2161 DPRINTF("initiate total chip reset.. \n");
2162 ubsec_reset_board(sc);
2163 ubsec_init_board(sc);
2164 ubsec_cleanchip(sc);
2168 ubsec_dump_pb(struct ubsec_pktbuf *pb)
2170 printf("addr 0x%x (0x%x) next 0x%x\n",
2171 pb->pb_addr, pb->pb_len, pb->pb_next);
2175 ubsec_dump_mcr(struct ubsec_mcr *mcr)
2177 struct ubsec_mcr_add *ma;
2181 printf(" pkts: %u, flags 0x%x\n",
2182 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags));
2183 ma = (struct ubsec_mcr_add *)&mcr->mcr_cmdctxp;
2184 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) {
2185 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i,
2186 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen),
2187 letoh16(ma->mcr_reserved));
2188 printf(" %d: ipkt ", i);
2189 ubsec_dump_pb(&ma->mcr_ipktbuf);
2190 printf(" %d: opkt ", i);
2191 ubsec_dump_pb(&ma->mcr_opktbuf);
2194 printf("END MCR\n");
2197 static int __init mod_init(void) {
2198 return ssb_driver_register(&ubsec_ssb_driver);
2201 static void __exit mod_exit(void) {
2202 ssb_driver_unregister(&ubsec_ssb_driver);
2205 module_init(mod_init);
2206 module_exit(mod_exit);
2209 MODULE_AUTHOR("Daniel Mueller <daniel@danm.de>");
2210 MODULE_LICENSE("BSD");
2211 MODULE_DESCRIPTION("OCF driver for BCM5365P IPSec Core");
2212 MODULE_VERSION(DRV_MODULE_VERSION);