2 * An OCF module that uses the linux kernel cryptoapi, based on the
3 * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
4 * but is mostly unrecognisable,
6 * Written by David McCullough <david_mccullough@mcafee.com>
7 * Copyright (C) 2004-2011 David McCullough
8 * Copyright (C) 2004-2005 Intel Corporation.
12 * The free distribution and use of this software in both source and binary
13 * form is allowed (with or without changes) provided that:
15 * 1. distributions of this source code include the above copyright
16 * notice, this list of conditions and the following disclaimer;
18 * 2. distributions in binary form include the above copyright
19 * notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other associated materials;
22 * 3. the copyright holder's name is not used to endorse products
23 * built using this software without specific written permission.
25 * ALTERNATIVELY, provided that this notice is retained in full, this product
26 * may be distributed under the terms of the GNU General Public License (GPL),
27 * in which case the provisions of the GPL apply INSTEAD OF those given above.
31 * This software is provided 'as is' with no explicit or implied warranties
32 * in respect of its properties, including, but not limited to, correctness
33 * and/or fitness for purpose.
34 * ---------------------------------------------------------------------------
37 #include <linux/version.h>
38 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
39 #include <linux/config.h>
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
44 #include <linux/slab.h>
45 #include <linux/sched.h>
46 #include <linux/wait.h>
47 #include <linux/crypto.h>
49 #include <linux/skbuff.h>
50 #include <linux/random.h>
51 #include <linux/interrupt.h>
52 #include <linux/spinlock.h>
53 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
54 #include <linux/scatterlist.h>
56 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
57 #include <crypto/hash.h>
60 #include <cryptodev.h>
64 softc_device_decl sc_dev;
67 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
69 #define SW_TYPE_CIPHER 0x01
70 #define SW_TYPE_HMAC 0x02
71 #define SW_TYPE_HASH 0x04
72 #define SW_TYPE_COMP 0x08
73 #define SW_TYPE_BLKCIPHER 0x10
74 #define SW_TYPE_ALG_MASK 0x1f
76 #define SW_TYPE_ASYNC 0x8000
78 #define SW_TYPE_INUSE 0x10000000
80 /* We change some of the above if we have an async interface */
82 #define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
84 #define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
85 #define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
86 #define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
88 #define SCATTERLIST_MAX 16
91 struct work_struct workq;
94 struct crypto_tfm *sw_tfm;
95 spinlock_t sw_tfm_lock;
104 struct swcr_data *sw_next;
108 struct swcr_data *sw_head;
109 struct swcr_data *sw;
111 struct cryptodesc *crd;
112 struct scatterlist sg[SCATTERLIST_MAX];
113 unsigned char iv[EALG_MAX_BLOCK_LEN];
114 char result[HASH_MAX_LEN];
118 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
119 static kmem_cache_t *swcr_req_cache;
121 static struct kmem_cache *swcr_req_cache;
124 #ifndef CRYPTO_TFM_MODE_CBC
126 * As of linux-2.6.21 this is no longer defined, and presumably no longer
127 * needed to be passed into the crypto core code.
129 #define CRYPTO_TFM_MODE_CBC 0
130 #define CRYPTO_TFM_MODE_ECB 0
133 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
135 * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
139 /* Symmetric/Block Cipher */
140 struct blkcipher_desc
142 struct crypto_tfm *tfm;
145 #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
146 #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
147 #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
148 #define crypto_blkcipher_cast(X) X
149 #define crypto_blkcipher_tfm(X) X
150 #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
151 #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
152 #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
153 #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
154 #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
155 crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
156 #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
157 crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
158 #define crypto_blkcipher_set_flags(x, y) /* nop */
159 #define crypto_free_blkcipher(x) crypto_free_tfm(x)
160 #define crypto_free_comp crypto_free_tfm
161 #define crypto_free_hash crypto_free_tfm
163 /* Hash/HMAC/Digest */
166 struct crypto_tfm *tfm;
168 #define hmac(X) #X , 0
169 #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
170 #define crypto_hash_cast(X) X
171 #define crypto_hash_tfm(X) X
172 #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
173 #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
174 #define crypto_hash_digest(W, X, Y, Z) \
175 crypto_digest_digest((W)->tfm, X, sg_num, Z)
177 /* Asymmetric Cipher */
178 #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
181 #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
182 #define crypto_comp_tfm(X) X
183 #define crypto_comp_cast(X) X
184 #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
185 #define plain(X) #X , 0
187 #define ecb(X) "ecb(" #X ")" , 0
188 #define cbc(X) "cbc(" #X ")" , 0
189 #define hmac(X) "hmac(" #X ")" , 0
190 #define plain(X) #X , 0
191 #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
194 /* no ablkcipher in older kernels */
195 #define crypto_alloc_ablkcipher(a,b,c) (NULL)
196 #define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
197 #define crypto_ablkcipher_set_flags(a, b) /* nop */
198 #define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
199 #define crypto_has_ablkcipher(a,b,c) (0)
201 #define HAVE_ABLKCIPHER
204 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
205 /* no ahash in older kernels */
206 #define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
207 #define crypto_alloc_ahash(a,b,c) (NULL)
208 #define crypto_ahash_digestsize(x) 0
213 struct crypto_details {
219 static struct crypto_details crypto_details[] = {
220 [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
221 [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
222 [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
223 [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
224 [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
225 [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
226 [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
227 [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
228 [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
229 [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
230 [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
231 [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
232 [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
233 [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
234 [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
235 [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
236 [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
237 [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
238 [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
239 [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
240 [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
241 [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
242 [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
243 [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
244 [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
247 int32_t swcr_id = -1;
248 module_param(swcr_id, int, 0444);
249 MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
251 int swcr_fail_if_compression_grows = 1;
252 module_param(swcr_fail_if_compression_grows, int, 0644);
253 MODULE_PARM_DESC(swcr_fail_if_compression_grows,
254 "Treat compression that results in more data as a failure");
256 int swcr_no_ahash = 0;
257 module_param(swcr_no_ahash, int, 0644);
258 MODULE_PARM_DESC(swcr_no_ahash,
259 "Do not use async hash/hmac even if available");
261 int swcr_no_ablk = 0;
262 module_param(swcr_no_ablk, int, 0644);
263 MODULE_PARM_DESC(swcr_no_ablk,
264 "Do not use async blk ciphers even if available");
266 static struct swcr_data **swcr_sessions = NULL;
267 static u_int32_t swcr_sesnum = 0;
269 static int swcr_process(device_t, struct cryptop *, int);
270 static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
271 static int swcr_freesession(device_t, u_int64_t);
273 static device_method_t swcr_methods = {
274 /* crypto device methods */
275 DEVMETHOD(cryptodev_newsession, swcr_newsession),
276 DEVMETHOD(cryptodev_freesession,swcr_freesession),
277 DEVMETHOD(cryptodev_process, swcr_process),
280 #define debug swcr_debug
282 module_param(swcr_debug, int, 0644);
283 MODULE_PARM_DESC(swcr_debug, "Enable debug");
285 static void swcr_process_req(struct swcr_req *req);
288 * somethings just need to be run with user context no matter whether
289 * the kernel compression libs use vmalloc/vfree for example.
293 struct work_struct wq;
294 void (*func)(void *arg);
298 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
300 doing_it_now(struct work_struct *wq)
302 execute_later_t *w = container_of(wq, execute_later_t, wq);
308 doing_it_now(void *arg)
310 execute_later_t *w = (execute_later_t *) arg;
317 execute_later(void (fn)(void *), void *arg)
321 w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC);
323 memset(w, '\0', sizeof(w));
326 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
327 INIT_WORK(&w->wq, doing_it_now);
329 INIT_WORK(&w->wq, doing_it_now, w);
331 schedule_work(&w->wq);
336 * Generate a new software session.
339 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
341 struct swcr_data **swd;
347 dprintk("%s()\n", __FUNCTION__);
348 if (sid == NULL || cri == NULL) {
349 dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
354 for (i = 1; i < swcr_sesnum; i++)
355 if (swcr_sessions[i] == NULL)
358 i = 1; /* NB: to silence compiler warning */
360 if (swcr_sessions == NULL || i == swcr_sesnum) {
361 if (swcr_sessions == NULL) {
362 i = 1; /* We leave swcr_sessions[0] empty */
363 swcr_sesnum = CRYPTO_SW_SESSIONS;
367 swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
369 /* Reset session number */
370 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
374 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
377 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
379 /* Copy existing sessions */
381 memcpy(swd, swcr_sessions,
382 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
383 kfree(swcr_sessions);
389 swd = &swcr_sessions[i];
393 *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
396 swcr_freesession(NULL, i);
397 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
400 memset(*swd, 0, sizeof(struct swcr_data));
402 if (cri->cri_alg < 0 ||
403 cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
404 printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
405 swcr_freesession(NULL, i);
409 algo = crypto_details[cri->cri_alg].alg_name;
410 if (!algo || !*algo) {
411 printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
412 swcr_freesession(NULL, i);
416 mode = crypto_details[cri->cri_alg].mode;
417 (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
418 (*swd)->sw_alg = cri->cri_alg;
420 spin_lock_init(&(*swd)->sw_tfm_lock);
422 /* Algorithm specific configuration */
423 switch (cri->cri_alg) {
424 case CRYPTO_NULL_CBC:
425 cri->cri_klen = 0; /* make it work with crypto API */
431 if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
432 dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
435 /* try async first */
436 (*swd)->sw_tfm = swcr_no_ablk ? NULL :
437 crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
438 if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) {
439 dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
440 (*swd)->sw_type |= SW_TYPE_ASYNC;
442 (*swd)->sw_tfm = crypto_blkcipher_tfm(
443 crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
444 if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm))
445 dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
447 if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) {
449 dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
451 err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL;
452 (*swd)->sw_tfm = NULL; /* ensure NULL */
453 swcr_freesession(NULL, i);
458 dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
459 __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
460 for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
461 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
462 cri->cri_key[i] & 0xff);
465 if ((*swd)->sw_type & SW_TYPE_ASYNC) {
466 /* OCF doesn't enforce keys */
467 crypto_ablkcipher_set_flags(
468 __crypto_ablkcipher_cast((*swd)->sw_tfm),
469 CRYPTO_TFM_REQ_WEAK_KEY);
470 error = crypto_ablkcipher_setkey(
471 __crypto_ablkcipher_cast((*swd)->sw_tfm),
472 cri->cri_key, (cri->cri_klen + 7) / 8);
474 /* OCF doesn't enforce keys */
475 crypto_blkcipher_set_flags(
476 crypto_blkcipher_cast((*swd)->sw_tfm),
477 CRYPTO_TFM_REQ_WEAK_KEY);
478 error = crypto_blkcipher_setkey(
479 crypto_blkcipher_cast((*swd)->sw_tfm),
480 cri->cri_key, (cri->cri_klen + 7) / 8);
483 printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
484 (*swd)->sw_tfm->crt_flags);
485 swcr_freesession(NULL, i);
488 } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
489 dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
492 /* try async first */
493 (*swd)->sw_tfm = swcr_no_ahash ? NULL :
494 crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
495 if ((*swd)->sw_tfm) {
496 dprintk("%s %s hash is async\n", __FUNCTION__, algo);
497 (*swd)->sw_type |= SW_TYPE_ASYNC;
499 dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
500 (*swd)->sw_tfm = crypto_hash_tfm(
501 crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
504 if (!(*swd)->sw_tfm) {
505 dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
507 swcr_freesession(NULL, i);
511 (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
512 (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
514 if ((*swd)->u.hmac.sw_key == NULL) {
515 swcr_freesession(NULL, i);
516 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
519 memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
521 (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
522 } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
523 (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
524 __crypto_ahash_cast((*swd)->sw_tfm));
526 (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
527 crypto_hash_cast((*swd)->sw_tfm));
529 } else if ((*swd)->sw_type & SW_TYPE_COMP) {
530 (*swd)->sw_tfm = crypto_comp_tfm(
531 crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
532 if (!(*swd)->sw_tfm) {
533 dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
535 swcr_freesession(NULL, i);
538 (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
539 if ((*swd)->u.sw_comp_buf == NULL) {
540 swcr_freesession(NULL, i);
541 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
545 printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
546 swcr_freesession(NULL, i);
551 swd = &((*swd)->sw_next);
560 swcr_freesession(device_t dev, u_int64_t tid)
562 struct swcr_data *swd;
563 u_int32_t sid = CRYPTO_SESID2LID(tid);
565 dprintk("%s()\n", __FUNCTION__);
566 if (sid > swcr_sesnum || swcr_sessions == NULL ||
567 swcr_sessions[sid] == NULL) {
568 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
572 /* Silently accept and return */
576 while ((swd = swcr_sessions[sid]) != NULL) {
577 swcr_sessions[sid] = swd->sw_next;
579 switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
583 crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
586 #ifdef HAVE_ABLKCIPHER
587 case SW_TYPE_ABLKCIPHER:
588 crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
591 case SW_TYPE_BLKCIPHER:
592 crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
596 crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
600 execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm));
602 crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
605 crypto_free_tfm(swd->sw_tfm);
610 if (swd->sw_type & SW_TYPE_COMP) {
611 if (swd->u.sw_comp_buf)
612 kfree(swd->u.sw_comp_buf);
614 if (swd->u.hmac.sw_key)
615 kfree(swd->u.hmac.sw_key);
622 static void swcr_process_req_complete(struct swcr_req *req)
624 dprintk("%s()\n", __FUNCTION__);
626 if (req->sw->sw_type & SW_TYPE_INUSE) {
628 spin_lock_irqsave(&req->sw->sw_tfm_lock, flags);
629 req->sw->sw_type &= ~SW_TYPE_INUSE;
630 spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags);
633 if (req->crp->crp_etype)
636 switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
637 #if defined(HAVE_AHASH)
640 crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
641 req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
642 ahash_request_free(req->crypto_req);
645 #if defined(HAVE_ABLKCIPHER)
646 case SW_TYPE_ABLKCIPHER:
647 ablkcipher_request_free(req->crypto_req);
654 case SW_TYPE_BLKCIPHER:
657 req->crp->crp_etype = EINVAL;
661 req->crd = req->crd->crd_next;
663 swcr_process_req(req);
668 dprintk("%s crypto_done %p\n", __FUNCTION__, req);
669 crypto_done(req->crp);
670 kmem_cache_free(swcr_req_cache, req);
673 #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
674 static void swcr_process_callback(struct crypto_async_request *creq, int err)
676 struct swcr_req *req = creq->data;
678 dprintk("%s()\n", __FUNCTION__);
680 if (err == -EINPROGRESS)
682 dprintk("%s() fail %d\n", __FUNCTION__, -err);
683 req->crp->crp_etype = -err;
686 swcr_process_req_complete(req);
688 #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
691 static void swcr_process_req(struct swcr_req *req)
693 struct swcr_data *sw;
694 struct cryptop *crp = req->crp;
695 struct cryptodesc *crd = req->crd;
696 struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
697 struct uio *uiop = (struct uio *) crp->crp_buf;
698 int sg_num, sg_len, skip;
700 dprintk("%s()\n", __FUNCTION__);
703 * Find the crypto context.
705 * XXX Note that the logic here prevents us from having
706 * XXX the same algorithm multiple times in a session
707 * XXX (or rather, we can but it won't give us the right
708 * XXX results). To do that, we'd need some way of differentiating
709 * XXX between the various instances of an algorithm (so we can
710 * XXX locate the correct crypto context).
712 for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
715 /* No such context ? */
717 crp->crp_etype = EINVAL;
718 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
723 * for some types we need to ensure only one user as info is stored in
724 * the tfm during an operation that can get corrupted
726 switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
734 spin_lock_irqsave(&sw->sw_tfm_lock, flags);
735 if (sw->sw_type & SW_TYPE_INUSE) {
736 spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
737 execute_later((void (*)(void *))swcr_process_req, (void *)req);
740 sw->sw_type |= SW_TYPE_INUSE;
741 spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
746 skip = crd->crd_skip;
749 * setup the SG list skip from the start of the buffer
751 memset(req->sg, 0, sizeof(req->sg));
752 sg_init_table(req->sg, SCATTERLIST_MAX);
753 if (crp->crp_flags & CRYPTO_F_SKBUF) {
759 if (skip < skb_headlen(skb)) {
760 len = skb_headlen(skb) - skip;
761 if (len + sg_len > crd->crd_len)
762 len = crd->crd_len - sg_len;
763 sg_set_page(&req->sg[sg_num],
764 virt_to_page(skb->data + skip), len,
765 offset_in_page(skb->data + skip));
770 skip -= skb_headlen(skb);
772 for (i = 0; sg_len < crd->crd_len &&
773 i < skb_shinfo(skb)->nr_frags &&
774 sg_num < SCATTERLIST_MAX; i++) {
775 if (skip < skb_shinfo(skb)->frags[i].size) {
776 len = skb_shinfo(skb)->frags[i].size - skip;
777 if (len + sg_len > crd->crd_len)
778 len = crd->crd_len - sg_len;
779 sg_set_page(&req->sg[sg_num],
780 skb_frag_page(&skb_shinfo(skb)->frags[i]),
782 skb_shinfo(skb)->frags[i].page_offset + skip);
787 skip -= skb_shinfo(skb)->frags[i].size;
789 } else if (crp->crp_flags & CRYPTO_F_IOV) {
793 for (sg_num = 0; sg_len < crd->crd_len &&
794 sg_num < uiop->uio_iovcnt &&
795 sg_num < SCATTERLIST_MAX; sg_num++) {
796 if (skip <= uiop->uio_iov[sg_num].iov_len) {
797 len = uiop->uio_iov[sg_num].iov_len - skip;
798 if (len + sg_len > crd->crd_len)
799 len = crd->crd_len - sg_len;
800 sg_set_page(&req->sg[sg_num],
801 virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
803 offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
807 skip -= uiop->uio_iov[sg_num].iov_len;
810 sg_len = (crp->crp_ilen - skip);
811 if (sg_len > crd->crd_len)
812 sg_len = crd->crd_len;
813 sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
814 sg_len, offset_in_page(crp->crp_buf + skip));
818 sg_mark_end(&req->sg[sg_num-1]);
820 switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
828 /* check we have room for the result */
829 if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
830 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
831 "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
832 crd->crd_inject, sw->u.hmac.sw_mlen);
833 crp->crp_etype = EINVAL;
838 ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC);
839 if (!req->crypto_req) {
840 crp->crp_etype = ENOMEM;
841 dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
845 ahash_request_set_callback(req->crypto_req,
846 CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
848 memset(req->result, 0, sizeof(req->result));
850 if (sw->sw_type & SW_TYPE_AHMAC)
851 crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
852 sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
853 ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
854 ret = crypto_ahash_digest(req->crypto_req);
861 dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
862 crp->crp_etype = ret;
866 #endif /* HAVE_AHASH */
868 #ifdef HAVE_ABLKCIPHER
869 case SW_TYPE_ABLKCIPHER: {
871 unsigned char *ivp = req->iv;
873 crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
875 if (sg_len < crypto_ablkcipher_blocksize(
876 __crypto_ablkcipher_cast(sw->sw_tfm))) {
877 crp->crp_etype = EINVAL;
878 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
879 sg_len, crypto_ablkcipher_blocksize(
880 __crypto_ablkcipher_cast(sw->sw_tfm)));
884 if (ivsize > sizeof(req->iv)) {
885 crp->crp_etype = EINVAL;
886 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
890 req->crypto_req = ablkcipher_request_alloc(
891 __crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC);
892 if (!req->crypto_req) {
893 crp->crp_etype = ENOMEM;
894 dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
899 ablkcipher_request_set_callback(req->crypto_req,
900 CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
902 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
906 dprintk("%s key:", __FUNCTION__);
907 for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
908 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
909 crd->crd_key[i] & 0xff);
912 /* OCF doesn't enforce keys */
913 crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
914 CRYPTO_TFM_REQ_WEAK_KEY);
915 error = crypto_ablkcipher_setkey(
916 __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
917 (crd->crd_klen + 7) / 8);
919 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
920 error, sw->sw_tfm->crt_flags);
921 crp->crp_etype = -error;
925 if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
927 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
930 get_random_bytes(ivp, ivsize);
932 * do we have to copy the IV back to the buffer ?
934 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
935 crypto_copyback(crp->crp_flags, crp->crp_buf,
936 crd->crd_inject, ivsize, (caddr_t)ivp);
938 ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
940 ret = crypto_ablkcipher_encrypt(req->crypto_req);
942 } else { /*decrypt */
944 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
947 crypto_copydata(crp->crp_flags, crp->crp_buf,
948 crd->crd_inject, ivsize, (caddr_t)ivp);
949 ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
951 ret = crypto_ablkcipher_decrypt(req->crypto_req);
960 dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
961 crp->crp_etype = ret;
965 #endif /* HAVE_ABLKCIPHER */
967 case SW_TYPE_BLKCIPHER: {
968 unsigned char iv[EALG_MAX_BLOCK_LEN];
969 unsigned char *ivp = iv;
970 struct blkcipher_desc desc;
971 int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
973 if (sg_len < crypto_blkcipher_blocksize(
974 crypto_blkcipher_cast(sw->sw_tfm))) {
975 crp->crp_etype = EINVAL;
976 dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
977 sg_len, crypto_blkcipher_blocksize(
978 crypto_blkcipher_cast(sw->sw_tfm)));
982 if (ivsize > sizeof(iv)) {
983 crp->crp_etype = EINVAL;
984 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
988 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
992 dprintk("%s key:", __FUNCTION__);
993 for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
994 dprintk("%s0x%x", (i % 8) ? " " : "\n ",
995 crd->crd_key[i] & 0xff);
998 /* OCF doesn't enforce keys */
999 crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
1000 CRYPTO_TFM_REQ_WEAK_KEY);
1001 error = crypto_blkcipher_setkey(
1002 crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
1003 (crd->crd_klen + 7) / 8);
1005 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
1006 error, sw->sw_tfm->crt_flags);
1007 crp->crp_etype = -error;
1011 memset(&desc, 0, sizeof(desc));
1012 desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
1014 if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
1016 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
1019 get_random_bytes(ivp, ivsize);
1022 * do we have to copy the IV back to the buffer ?
1024 if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1025 crypto_copyback(crp->crp_flags, crp->crp_buf,
1026 crd->crd_inject, ivsize, (caddr_t)ivp);
1029 crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
1031 } else { /*decrypt */
1033 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
1036 crypto_copydata(crp->crp_flags, crp->crp_buf,
1037 crd->crd_inject, ivsize, (caddr_t)ivp);
1040 crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
1047 char result[HASH_MAX_LEN];
1048 struct hash_desc desc;
1050 /* check we have room for the result */
1051 if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
1052 dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
1053 "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
1054 crd->crd_inject, sw->u.hmac.sw_mlen);
1055 crp->crp_etype = EINVAL;
1059 memset(&desc, 0, sizeof(desc));
1060 desc.tfm = crypto_hash_cast(sw->sw_tfm);
1062 memset(result, 0, sizeof(result));
1064 if (sw->sw_type & SW_TYPE_HMAC) {
1065 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1066 crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
1067 req->sg, sg_num, result);
1069 crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
1070 sw->u.hmac.sw_klen);
1071 crypto_hash_digest(&desc, req->sg, sg_len, result);
1072 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
1074 } else { /* SW_TYPE_HASH */
1075 crypto_hash_digest(&desc, req->sg, sg_len, result);
1078 crypto_copyback(crp->crp_flags, crp->crp_buf,
1079 crd->crd_inject, sw->u.hmac.sw_mlen, result);
1083 case SW_TYPE_COMP: {
1085 void *obuf = sw->u.sw_comp_buf;
1086 int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
1090 * we need to use an additional copy if there is more than one
1091 * input chunk since the kernel comp routines do not handle
1092 * SG yet. Otherwise we just use the input buffer as is.
1093 * Rather than allocate another buffer we just split the tmp
1094 * buffer we already have.
1095 * Perhaps we should just use zlib directly ?
1101 for (blk = 0; blk < sg_num; blk++) {
1102 memcpy(obuf, sg_virt(&req->sg[blk]),
1103 req->sg[blk].length);
1104 obuf += req->sg[blk].length;
1108 ibuf = sg_virt(&req->sg[0]);
1110 if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
1111 ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
1112 ibuf, ilen, obuf, &olen);
1113 if (!ret && olen > crd->crd_len) {
1114 dprintk("cryptosoft: ERANGE compress %d into %d\n",
1115 crd->crd_len, olen);
1116 if (swcr_fail_if_compression_grows)
1119 } else { /* decompress */
1120 ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
1121 ibuf, ilen, obuf, &olen);
1122 if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
1123 dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
1124 "space for %d,at offset %d\n",
1125 crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
1130 dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
1133 * on success copy result back,
1134 * linux crpyto API returns -errno, we need to fix that
1136 crp->crp_etype = ret < 0 ? -ret : ret;
1138 /* copy back the result and return it's size */
1139 crypto_copyback(crp->crp_flags, crp->crp_buf,
1140 crd->crd_inject, olen, obuf);
1141 crp->crp_olen = olen;
1146 /* Unknown/unsupported algorithm */
1147 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1148 crp->crp_etype = EINVAL;
1153 swcr_process_req_complete(req);
1158 * Process a crypto request.
1161 swcr_process(device_t dev, struct cryptop *crp, int hint)
1163 struct swcr_req *req = NULL;
1166 dprintk("%s()\n", __FUNCTION__);
1169 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1175 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1176 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1177 crp->crp_etype = EINVAL;
1181 lid = crp->crp_sid & 0xffffffff;
1182 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
1183 swcr_sessions[lid] == NULL) {
1184 crp->crp_etype = ENOENT;
1185 dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
1190 * do some error checking outside of the loop for SKB and IOV processing
1191 * this leaves us with valid skb or uiop pointers for later
1193 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1194 struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
1195 if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
1196 printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
1197 skb_shinfo(skb)->nr_frags);
1200 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1201 struct uio *uiop = (struct uio *) crp->crp_buf;
1202 if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
1203 printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
1210 * setup a new request ready for queuing
1212 req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
1214 dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
1215 crp->crp_etype = ENOMEM;
1218 memset(req, 0, sizeof(*req));
1220 req->sw_head = swcr_sessions[lid];
1222 req->crd = crp->crp_desc;
1224 swcr_process_req(req);
1230 kmem_cache_free(swcr_req_cache, req);
1236 cryptosoft_init(void)
1238 int i, sw_type, mode;
1241 dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
1243 swcr_req_cache = kmem_cache_create("cryptosoft_req",
1244 sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
1245 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1249 if (!swcr_req_cache) {
1250 printk("cryptosoft: failed to create request cache\n");
1254 softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
1256 swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
1257 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1259 printk("cryptosoft: Software crypto device cannot initialize!");
1263 #define REGISTER(alg) \
1264 crypto_register(swcr_id, alg, 0,0)
1266 for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
1269 algo = crypto_details[i].alg_name;
1270 if (!algo || !*algo) {
1271 dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
1275 mode = crypto_details[i].mode;
1276 sw_type = crypto_details[i].sw_type;
1279 switch (sw_type & SW_TYPE_ALG_MASK) {
1280 case SW_TYPE_CIPHER:
1281 found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
1284 found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1287 found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1290 found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
1292 case SW_TYPE_BLKCIPHER:
1293 found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
1294 if (!found && !swcr_no_ablk)
1295 found = crypto_has_ablkcipher(algo, 0, 0);
1301 dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
1302 __FUNCTION__, sw_type, i, algo);
1309 cryptosoft_exit(void)
1311 dprintk("%s()\n", __FUNCTION__);
1312 crypto_unregister_all(swcr_id);
1314 kmem_cache_destroy(swcr_req_cache);
1317 late_initcall(cryptosoft_init);
1318 module_exit(cryptosoft_exit);
1320 MODULE_LICENSE("Dual BSD/GPL");
1321 MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
1322 MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");