2 * An OCF module that uses the linux kernel cryptoapi, based on the
3 * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
4 * but is mostly unrecognisable,
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2004-2010 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 * ---------------------------------------------------------------------------
37 #ifndef AUTOCONF_INCLUDED
38 #include <linux/config.h>
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/list.h>
43 #include <linux/slab.h>
44 #include <linux/sched.h>
45 #include <linux/wait.h>
46 #include <linux/crypto.h>
48 #include <linux/skbuff.h>
49 #include <linux/random.h>
50 #include <linux/version.h>
51 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
52 #include <linux/scatterlist.h>
54 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
55 #include <crypto/hash.h>
58 #include <cryptodev.h>
62 softc_device_decl sc_dev;
65 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
67 #define SW_TYPE_CIPHER 0x01
68 #define SW_TYPE_HMAC 0x02
69 #define SW_TYPE_HASH 0x04
70 #define SW_TYPE_COMP 0x08
71 #define SW_TYPE_BLKCIPHER 0x10
72 #define SW_TYPE_ALG_MASK 0x1f
74 #define SW_TYPE_ASYNC 0x8000
76 /* We change some of the above if we have an async interface */
78 #define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
80 #define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
81 #define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
82 #define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
84 #define SCATTERLIST_MAX 16
89 struct crypto_tfm *sw_tfm;
98 struct swcr_data *sw_next;
102 struct swcr_data *sw_head;
103 struct swcr_data *sw;
105 struct cryptodesc *crd;
106 struct scatterlist sg[SCATTERLIST_MAX];
107 unsigned char iv[EALG_MAX_BLOCK_LEN];
108 char result[HASH_MAX_LEN];
112 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
113 static kmem_cache_t *swcr_req_cache;
115 static struct kmem_cache *swcr_req_cache;
118 #ifndef CRYPTO_TFM_MODE_CBC
120 * As of linux-2.6.21 this is no longer defined, and presumably no longer
121 * needed to be passed into the crypto core code.
123 #define CRYPTO_TFM_MODE_CBC 0
124 #define CRYPTO_TFM_MODE_ECB 0
127 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
129 * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
133 /* Symmetric/Block Cipher */
134 struct blkcipher_desc
136 struct crypto_tfm *tfm;
139 #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
140 #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
141 #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
142 #define crypto_blkcipher_cast(X) X
143 #define crypto_blkcipher_tfm(X) X
144 #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
145 #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
146 #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
147 #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
148 #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
149 crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
150 #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
151 crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
152 #define crypto_blkcipher_set_flags(x, y) /* nop */
154 /* Hash/HMAC/Digest */
157 struct crypto_tfm *tfm;
159 #define hmac(X) #X , 0
160 #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
161 #define crypto_hash_cast(X) X
162 #define crypto_hash_tfm(X) X
163 #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
164 #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
165 #define crypto_hash_digest(W, X, Y, Z) \
166 crypto_digest_digest((W)->tfm, X, sg_num, Z)
168 /* Asymmetric Cipher */
169 #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
172 #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
173 #define crypto_comp_tfm(X) X
174 #define crypto_comp_cast(X) X
175 #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
176 #define plain(X) #X , 0
178 #define ecb(X) "ecb(" #X ")" , 0
179 #define cbc(X) "cbc(" #X ")" , 0
180 #define hmac(X) "hmac(" #X ")" , 0
181 #define plain(X) #X , 0
182 #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
184 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
185 /* no ablkcipher in older kernels */
186 #define crypto_alloc_ablkcipher(a,b,c) (NULL)
187 #define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
188 #define crypto_ablkcipher_set_flags(a, b) /* nop */
189 #define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
190 #define crypto_has_ablkcipher(a,b,c) (0)
192 #define HAVE_ABLKCIPHER
195 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
196 /* no ahash in older kernels */
197 #define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
198 #define crypto_alloc_ahash(a,b,c) (NULL)
199 #define crypto_ahash_digestsize(x) 0
204 struct crypto_details {
210 static struct crypto_details crypto_details[] = {
211 [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
212 [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
213 [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
214 [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
215 [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
216 [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
217 [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
218 [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
219 [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
220 [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
221 [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
222 [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
223 [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
224 [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
225 [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
226 [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
227 [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
228 [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
229 [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
230 [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
231 [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
232 [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
233 [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
234 [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
235 [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
238 int32_t swcr_id = -1;
239 module_param(swcr_id, int, 0444);
240 MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
242 int swcr_fail_if_compression_grows = 1;
243 module_param(swcr_fail_if_compression_grows, int, 0644);
244 MODULE_PARM_DESC(swcr_fail_if_compression_grows,
245 "Treat compression that results in more data as a failure");
247 int swcr_no_ahash = 0;
248 module_param(swcr_no_ahash, int, 0644);
249 MODULE_PARM_DESC(swcr_no_ahash,
250 "Do not use async hash/hmac even if available");
252 int swcr_no_ablk = 0;
253 module_param(swcr_no_ablk, int, 0644);
254 MODULE_PARM_DESC(swcr_no_ablk,
255 "Do not use async blk ciphers even if available");
257 static struct swcr_data **swcr_sessions = NULL;
258 static u_int32_t swcr_sesnum = 0;
260 static int swcr_process(device_t, struct cryptop *, int);
261 static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
262 static int swcr_freesession(device_t, u_int64_t);
264 static device_method_t swcr_methods = {
265 /* crypto device methods */
266 DEVMETHOD(cryptodev_newsession, swcr_newsession),
267 DEVMETHOD(cryptodev_freesession,swcr_freesession),
268 DEVMETHOD(cryptodev_process, swcr_process),
271 #define debug swcr_debug
273 module_param(swcr_debug, int, 0644);
274 MODULE_PARM_DESC(swcr_debug, "Enable debug");
276 static void swcr_process_req(struct swcr_req *req);
279 * Generate a new software session.
282 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
284 struct swcr_data **swd;
290 dprintk("%s()\n", __FUNCTION__);
291 if (sid == NULL || cri == NULL) {
292 dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
297 for (i = 1; i < swcr_sesnum; i++)
298 if (swcr_sessions[i] == NULL)
301 i = 1; /* NB: to silence compiler warning */
303 if (swcr_sessions == NULL || i == swcr_sesnum) {
304 if (swcr_sessions == NULL) {
305 i = 1; /* We leave swcr_sessions[0] empty */
306 swcr_sesnum = CRYPTO_SW_SESSIONS;
310 swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
312 /* Reset session number */
313 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
317 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
320 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
322 /* Copy existing sessions */
324 memcpy(swd, swcr_sessions,
325 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
326 kfree(swcr_sessions);
332 swd = &swcr_sessions[i];
336 *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
339 swcr_freesession(NULL, i);
340 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
343 memset(*swd, 0, sizeof(struct swcr_data));
345 if (cri->cri_alg < 0 ||
346 cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
347 printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
348 swcr_freesession(NULL, i);
352 algo = crypto_details[cri->cri_alg].alg_name;
353 if (!algo || !*algo) {
354 printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
355 swcr_freesession(NULL, i);
359 mode = crypto_details[cri->cri_alg].mode;
360 (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
361 (*swd)->sw_alg = cri->cri_alg;
363 /* Algorithm specific configuration */
364 switch (cri->cri_alg) {
365 case CRYPTO_NULL_CBC:
366 cri->cri_klen = 0; /* make it work with crypto API */
372 if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
373 dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
376 /* try async first */
377 (*swd)->sw_tfm = swcr_no_ablk ? NULL :
378 crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
379 if ((*swd)->sw_tfm) {
380 dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
381 (*swd)->sw_type |= SW_TYPE_ASYNC;
383 dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
384 (*swd)->sw_tfm = crypto_blkcipher_tfm(
385 crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
387 if (!(*swd)->sw_tfm) {
388 dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
390 swcr_freesession(NULL, i);
395 dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
396 __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
397 for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
398 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
399 cri->cri_key[i] & 0xff);
402 if ((*swd)->sw_type & SW_TYPE_ASYNC) {
403 /* OCF doesn't enforce keys */
404 crypto_ablkcipher_set_flags(
405 __crypto_ablkcipher_cast((*swd)->sw_tfm),
406 CRYPTO_TFM_REQ_WEAK_KEY);
407 error = crypto_ablkcipher_setkey(
408 __crypto_ablkcipher_cast((*swd)->sw_tfm),
409 cri->cri_key, (cri->cri_klen + 7) / 8);
411 /* OCF doesn't enforce keys */
412 crypto_blkcipher_set_flags(
413 crypto_blkcipher_cast((*swd)->sw_tfm),
414 CRYPTO_TFM_REQ_WEAK_KEY);
415 error = crypto_blkcipher_setkey(
416 crypto_blkcipher_cast((*swd)->sw_tfm),
417 cri->cri_key, (cri->cri_klen + 7) / 8);
420 printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
421 (*swd)->sw_tfm->crt_flags);
422 swcr_freesession(NULL, i);
425 } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
426 dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
429 /* try async first */
430 (*swd)->sw_tfm = swcr_no_ahash ? NULL :
431 crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
432 if ((*swd)->sw_tfm) {
433 dprintk("%s %s hash is async\n", __FUNCTION__, algo);
434 (*swd)->sw_type |= SW_TYPE_ASYNC;
436 dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
437 (*swd)->sw_tfm = crypto_hash_tfm(
438 crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
441 if (!(*swd)->sw_tfm) {
442 dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
444 swcr_freesession(NULL, i);
448 (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
449 (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
451 if ((*swd)->u.hmac.sw_key == NULL) {
452 swcr_freesession(NULL, i);
453 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
456 memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
458 (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
459 } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
460 (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
461 __crypto_ahash_cast((*swd)->sw_tfm));
463 (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
464 crypto_hash_cast((*swd)->sw_tfm));
466 } else if ((*swd)->sw_type & SW_TYPE_COMP) {
467 (*swd)->sw_tfm = crypto_comp_tfm(
468 crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
469 if (!(*swd)->sw_tfm) {
470 dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
472 swcr_freesession(NULL, i);
475 (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
476 if ((*swd)->u.sw_comp_buf == NULL) {
477 swcr_freesession(NULL, i);
478 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
482 printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
483 swcr_freesession(NULL, i);
488 swd = &((*swd)->sw_next);
497 swcr_freesession(device_t dev, u_int64_t tid)
499 struct swcr_data *swd;
500 u_int32_t sid = CRYPTO_SESID2LID(tid);
502 dprintk("%s()\n", __FUNCTION__);
503 if (sid > swcr_sesnum || swcr_sessions == NULL ||
504 swcr_sessions[sid] == NULL) {
505 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
509 /* Silently accept and return */
513 while ((swd = swcr_sessions[sid]) != NULL) {
514 swcr_sessions[sid] = swd->sw_next;
516 switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
520 crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
523 #ifdef HAVE_ABLKCIPHER
524 case SW_TYPE_ABLKCIPHER:
525 crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
528 case SW_TYPE_BLKCIPHER:
529 crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
533 crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
536 crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
538 crypto_free_tfm(swd->sw_tfm);
543 if (swd->sw_type & SW_TYPE_COMP) {
544 if (swd->u.sw_comp_buf)
545 kfree(swd->u.sw_comp_buf);
547 if (swd->u.hmac.sw_key)
548 kfree(swd->u.hmac.sw_key);
555 #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
556 /* older kernels had no async interface */
558 static void swcr_process_callback(struct crypto_async_request *creq, int err)
560 struct swcr_req *req = creq->data;
562 dprintk("%s()\n", __FUNCTION__);
564 if (err == -EINPROGRESS)
566 dprintk("%s() fail %d\n", __FUNCTION__, -err);
567 req->crp->crp_etype = -err;
571 switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
574 crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
575 req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
576 ahash_request_free(req->crypto_req);
578 case SW_TYPE_ABLKCIPHER:
579 ablkcipher_request_free(req->crypto_req);
582 req->crp->crp_etype = EINVAL;
586 req->crd = req->crd->crd_next;
588 swcr_process_req(req);
593 dprintk("%s crypto_done %p\n", __FUNCTION__, req);
594 crypto_done(req->crp);
595 kmem_cache_free(swcr_req_cache, req);
597 #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
600 static void swcr_process_req(struct swcr_req *req)
602 struct swcr_data *sw;
603 struct cryptop *crp = req->crp;
604 struct cryptodesc *crd = req->crd;
605 struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
606 struct uio *uiop = (struct uio *) crp->crp_buf;
607 int sg_num, sg_len, skip;
609 dprintk("%s()\n", __FUNCTION__);
612 * Find the crypto context.
614 * XXX Note that the logic here prevents us from having
615 * XXX the same algorithm multiple times in a session
616 * XXX (or rather, we can but it won't give us the right
617 * XXX results). To do that, we'd need some way of differentiating
618 * XXX between the various instances of an algorithm (so we can
619 * XXX locate the correct crypto context).
621 for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
624 /* No such context ? */
626 crp->crp_etype = EINVAL;
627 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
632 skip = crd->crd_skip;
635 * setup the SG list skip from the start of the buffer
637 memset(req->sg, 0, sizeof(req->sg));
638 sg_init_table(req->sg, SCATTERLIST_MAX);
639 if (crp->crp_flags & CRYPTO_F_SKBUF) {
645 if (skip < skb_headlen(skb)) {
646 len = skb_headlen(skb) - skip;
647 if (len + sg_len > crd->crd_len)
648 len = crd->crd_len - sg_len;
649 sg_set_page(&req->sg[sg_num],
650 virt_to_page(skb->data + skip), len,
651 offset_in_page(skb->data + skip));
656 skip -= skb_headlen(skb);
658 for (i = 0; sg_len < crd->crd_len &&
659 i < skb_shinfo(skb)->nr_frags &&
660 sg_num < SCATTERLIST_MAX; i++) {
661 if (skip < skb_shinfo(skb)->frags[i].size) {
662 len = skb_shinfo(skb)->frags[i].size - skip;
663 if (len + sg_len > crd->crd_len)
664 len = crd->crd_len - sg_len;
665 sg_set_page(&req->sg[sg_num],
666 skb_shinfo(skb)->frags[i].page,
668 skb_shinfo(skb)->frags[i].page_offset + skip);
673 skip -= skb_shinfo(skb)->frags[i].size;
675 } else if (crp->crp_flags & CRYPTO_F_IOV) {
679 for (sg_num = 0; sg_len < crd->crd_len &&
680 sg_num < uiop->uio_iovcnt &&
681 sg_num < SCATTERLIST_MAX; sg_num++) {
682 if (skip <= uiop->uio_iov[sg_num].iov_len) {
683 len = uiop->uio_iov[sg_num].iov_len - skip;
684 if (len + sg_len > crd->crd_len)
685 len = crd->crd_len - sg_len;
686 sg_set_page(&req->sg[sg_num],
687 virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
689 offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
693 skip -= uiop->uio_iov[sg_num].iov_len;
696 sg_len = (crp->crp_ilen - skip);
697 if (sg_len > crd->crd_len)
698 sg_len = crd->crd_len;
699 sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
700 sg_len, offset_in_page(crp->crp_buf + skip));
704 switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
712 /* check we have room for the result */
713 if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
714 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
715 "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
716 crd->crd_inject, sw->u.hmac.sw_mlen);
717 crp->crp_etype = EINVAL;
722 ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_KERNEL);
723 if (!req->crypto_req) {
724 crp->crp_etype = ENOMEM;
725 dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
729 ahash_request_set_callback(req->crypto_req,
730 CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
732 memset(req->result, 0, sizeof(req->result));
734 if (sw->sw_type & SW_TYPE_AHMAC)
735 crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
736 sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
737 ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
738 ret = crypto_ahash_digest(req->crypto_req);
745 dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
746 crp->crp_etype = ret;
747 ahash_request_free(req->crypto_req);
751 #endif /* HAVE_AHASH */
753 #ifdef HAVE_ABLKCIPHER
754 case SW_TYPE_ABLKCIPHER: {
756 unsigned char *ivp = req->iv;
758 crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
760 if (sg_len < crypto_ablkcipher_blocksize(
761 __crypto_ablkcipher_cast(sw->sw_tfm))) {
762 crp->crp_etype = EINVAL;
763 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
764 sg_len, crypto_ablkcipher_blocksize(
765 __crypto_ablkcipher_cast(sw->sw_tfm)));
769 if (ivsize > sizeof(req->iv)) {
770 crp->crp_etype = EINVAL;
771 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
775 req->crypto_req = ablkcipher_request_alloc(
776 __crypto_ablkcipher_cast(sw->sw_tfm), GFP_KERNEL);
777 if (!req->crypto_req) {
778 crp->crp_etype = ENOMEM;
779 dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
784 ablkcipher_request_set_callback(req->crypto_req,
785 CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
787 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
791 dprintk("%s key:", __FUNCTION__);
792 for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
793 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
794 crd->crd_key[i] & 0xff);
797 /* OCF doesn't enforce keys */
798 crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
799 CRYPTO_TFM_REQ_WEAK_KEY);
800 error = crypto_ablkcipher_setkey(
801 __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
802 (crd->crd_klen + 7) / 8);
804 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
805 error, sw->sw_tfm->crt_flags);
806 crp->crp_etype = -error;
810 if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
812 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
815 get_random_bytes(ivp, ivsize);
817 * do we have to copy the IV back to the buffer ?
819 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
820 crypto_copyback(crp->crp_flags, crp->crp_buf,
821 crd->crd_inject, ivsize, (caddr_t)ivp);
823 ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
825 ret = crypto_ablkcipher_encrypt(req->crypto_req);
827 } else { /*decrypt */
829 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
832 crypto_copydata(crp->crp_flags, crp->crp_buf,
833 crd->crd_inject, ivsize, (caddr_t)ivp);
834 ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
836 ret = crypto_ablkcipher_decrypt(req->crypto_req);
845 dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
846 crp->crp_etype = ret;
850 #endif /* HAVE_ABLKCIPHER */
852 case SW_TYPE_BLKCIPHER: {
853 unsigned char iv[EALG_MAX_BLOCK_LEN];
854 unsigned char *ivp = iv;
855 struct blkcipher_desc desc;
856 int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
858 if (sg_len < crypto_blkcipher_blocksize(
859 crypto_blkcipher_cast(sw->sw_tfm))) {
860 crp->crp_etype = EINVAL;
861 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
862 sg_len, crypto_blkcipher_blocksize(
863 crypto_blkcipher_cast(sw->sw_tfm)));
867 if (ivsize > sizeof(iv)) {
868 crp->crp_etype = EINVAL;
869 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
873 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
877 dprintk("%s key:", __FUNCTION__);
878 for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
879 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
880 crd->crd_key[i] & 0xff);
883 /* OCF doesn't enforce keys */
884 crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
885 CRYPTO_TFM_REQ_WEAK_KEY);
886 error = crypto_blkcipher_setkey(
887 crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
888 (crd->crd_klen + 7) / 8);
890 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
891 error, sw->sw_tfm->crt_flags);
892 crp->crp_etype = -error;
896 memset(&desc, 0, sizeof(desc));
897 desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
899 if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
901 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
904 get_random_bytes(ivp, ivsize);
907 * do we have to copy the IV back to the buffer ?
909 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
910 crypto_copyback(crp->crp_flags, crp->crp_buf,
911 crd->crd_inject, ivsize, (caddr_t)ivp);
914 crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
916 } else { /*decrypt */
918 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
921 crypto_copydata(crp->crp_flags, crp->crp_buf,
922 crd->crd_inject, ivsize, (caddr_t)ivp);
925 crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
932 char result[HASH_MAX_LEN];
933 struct hash_desc desc;
935 /* check we have room for the result */
936 if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
937 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
938 "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
939 crd->crd_inject, sw->u.hmac.sw_mlen);
940 crp->crp_etype = EINVAL;
944 memset(&desc, 0, sizeof(desc));
945 desc.tfm = crypto_hash_cast(sw->sw_tfm);
947 memset(result, 0, sizeof(result));
949 if (sw->sw_type & SW_TYPE_HMAC) {
950 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
951 crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
952 req->sg, sg_num, result);
954 crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
956 crypto_hash_digest(&desc, req->sg, sg_len, result);
957 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
959 } else { /* SW_TYPE_HASH */
960 crypto_hash_digest(&desc, req->sg, sg_len, result);
963 crypto_copyback(crp->crp_flags, crp->crp_buf,
964 crd->crd_inject, sw->u.hmac.sw_mlen, result);
970 void *obuf = sw->u.sw_comp_buf;
971 int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
975 * we need to use an additional copy if there is more than one
976 * input chunk since the kernel comp routines do not handle
977 * SG yet. Otherwise we just use the input buffer as is.
978 * Rather than allocate another buffer we just split the tmp
979 * buffer we already have.
980 * Perhaps we should just use zlib directly ?
986 for (blk = 0; blk < sg_num; blk++) {
987 memcpy(obuf, sg_virt(&req->sg[blk]),
988 req->sg[blk].length);
989 obuf += req->sg[blk].length;
993 ibuf = sg_virt(&req->sg[0]);
995 if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
996 ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
997 ibuf, ilen, obuf, &olen);
998 if (!ret && olen > crd->crd_len) {
999 dprintk("cryptosoft: ERANGE compress %d into %d\n",
1000 crd->crd_len, olen);
1001 if (swcr_fail_if_compression_grows)
1004 } else { /* decompress */
1005 ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
1006 ibuf, ilen, obuf, &olen);
1007 if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
1008 dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
1009 "space for %d,at offset %d\n",
1010 crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
1015 dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
1018 * on success copy result back,
1019 * linux crpyto API returns -errno, we need to fix that
1021 crp->crp_etype = ret < 0 ? -ret : ret;
1023 /* copy back the result and return it's size */
1024 crypto_copyback(crp->crp_flags, crp->crp_buf,
1025 crd->crd_inject, olen, obuf);
1026 crp->crp_olen = olen;
1033 /* Unknown/unsupported algorithm */
1034 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1035 crp->crp_etype = EINVAL;
1041 kmem_cache_free(swcr_req_cache, req);
1046 * Process a crypto request.
1049 swcr_process(device_t dev, struct cryptop *crp, int hint)
1051 struct swcr_req *req = NULL;
1054 dprintk("%s()\n", __FUNCTION__);
1057 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1063 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1064 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1065 crp->crp_etype = EINVAL;
1069 lid = crp->crp_sid & 0xffffffff;
1070 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
1071 swcr_sessions[lid] == NULL) {
1072 crp->crp_etype = ENOENT;
1073 dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
1078 * do some error checking outside of the loop for SKB and IOV processing
1079 * this leaves us with valid skb or uiop pointers for later
1081 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1082 struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
1083 if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
1084 printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
1085 skb_shinfo(skb)->nr_frags);
1088 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1089 struct uio *uiop = (struct uio *) crp->crp_buf;
1090 if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
1091 printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
1098 * setup a new request ready for queuing
1100 req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
1102 dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
1103 crp->crp_etype = ENOMEM;
1106 memset(req, 0, sizeof(*req));
1108 req->sw_head = swcr_sessions[lid];
1110 req->crd = crp->crp_desc;
1112 swcr_process_req(req);
1118 kmem_cache_free(swcr_req_cache, req);
1124 cryptosoft_init(void)
1126 int i, sw_type, mode;
1129 dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
1131 swcr_req_cache = kmem_cache_create("cryptosoft_req",
1132 sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
1133 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1137 if (!swcr_req_cache) {
1138 printk("cryptosoft: failed to create request cache\n");
1142 softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
1144 swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
1145 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1147 printk("cryptosoft: Software crypto device cannot initialize!");
1151 #define REGISTER(alg) \
1152 crypto_register(swcr_id, alg, 0,0)
1154 for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
1157 algo = crypto_details[i].alg_name;
1158 if (!algo || !*algo) {
1159 dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
1163 mode = crypto_details[i].mode;
1164 sw_type = crypto_details[i].sw_type;
1167 switch (sw_type & SW_TYPE_ALG_MASK) {
1168 case SW_TYPE_CIPHER:
1169 found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
1172 found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1175 found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1178 found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
1180 case SW_TYPE_BLKCIPHER:
1181 found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
1182 if (!found && !swcr_no_ablk)
1183 found = crypto_has_ablkcipher(algo, 0, 0);
1189 dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
1190 __FUNCTION__, sw_type, i, algo);
1197 cryptosoft_exit(void)
1199 dprintk("%s()\n", __FUNCTION__);
1200 crypto_unregister_all(swcr_id);
1202 kmem_cache_destroy(swcr_req_cache);
1205 late_initcall(cryptosoft_init);
1206 module_exit(cryptosoft_exit);
1208 MODULE_LICENSE("Dual BSD/GPL");
1209 MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
1210 MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");