create branch for barrier breaker (from trunk r41863)
[14.07/openwrt.git] / target / linux / generic / files / crypto / ocf / cryptosoft.c
1 /*
2  * An OCF module that uses the linux kernel cryptoapi, based on the
3  * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
4  * but is mostly unrecognisable,
5  *
6  * Written by David McCullough <david_mccullough@mcafee.com>
7  * Copyright (C) 2004-2011 David McCullough
8  * Copyright (C) 2004-2005 Intel Corporation.
9  *
10  * LICENSE TERMS
11  *
12  * The free distribution and use of this software in both source and binary
13  * form is allowed (with or without changes) provided that:
14  *
15  *   1. distributions of this source code include the above copyright
16  *      notice, this list of conditions and the following disclaimer;
17  *
18  *   2. distributions in binary form include the above copyright
19  *      notice, this list of conditions and the following disclaimer
20  *      in the documentation and/or other associated materials;
21  *
22  *   3. the copyright holder's name is not used to endorse products
23  *      built using this software without specific written permission.
24  *
25  * ALTERNATIVELY, provided that this notice is retained in full, this product
26  * may be distributed under the terms of the GNU General Public License (GPL),
27  * in which case the provisions of the GPL apply INSTEAD OF those given above.
28  *
29  * DISCLAIMER
30  *
31  * This software is provided 'as is' with no explicit or implied warranties
32  * in respect of its properties, including, but not limited to, correctness
33  * and/or fitness for purpose.
34  * ---------------------------------------------------------------------------
35  */
36
37 #include <linux/version.h>
38 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
39 #include <linux/config.h>
40 #endif
41 #include <linux/module.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
44 #include <linux/slab.h>
45 #include <linux/sched.h>
46 #include <linux/wait.h>
47 #include <linux/crypto.h>
48 #include <linux/mm.h>
49 #include <linux/skbuff.h>
50 #include <linux/random.h>
51 #include <linux/interrupt.h>
52 #include <linux/spinlock.h>
53 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
54 #include <linux/scatterlist.h>
55 #endif
56 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
57 #include <crypto/hash.h>
58 #endif
59
60 #include <cryptodev.h>
61 #include <uio.h>
62
63 struct {
64         softc_device_decl       sc_dev;
65 } swcr_softc;
66
67 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
68
69 #define SW_TYPE_CIPHER          0x01
70 #define SW_TYPE_HMAC            0x02
71 #define SW_TYPE_HASH            0x04
72 #define SW_TYPE_COMP            0x08
73 #define SW_TYPE_BLKCIPHER       0x10
74 #define SW_TYPE_ALG_MASK        0x1f
75
76 #define SW_TYPE_ASYNC           0x8000
77
78 #define SW_TYPE_INUSE           0x10000000
79
80 /* We change some of the above if we have an async interface */
81
82 #define SW_TYPE_ALG_AMASK       (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
83
84 #define SW_TYPE_ABLKCIPHER      (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
85 #define SW_TYPE_AHASH           (SW_TYPE_HASH | SW_TYPE_ASYNC)
86 #define SW_TYPE_AHMAC           (SW_TYPE_HMAC | SW_TYPE_ASYNC)
87
88 #define SCATTERLIST_MAX 16
89
90 struct swcr_data {
91         struct work_struct  workq;
92         int                                     sw_type;
93         int                                     sw_alg;
94         struct crypto_tfm       *sw_tfm;
95         spinlock_t                      sw_tfm_lock;
96         union {
97                 struct {
98                         char *sw_key;
99                         int  sw_klen;
100                         int  sw_mlen;
101                 } hmac;
102                 void *sw_comp_buf;
103         } u;
104         struct swcr_data        *sw_next;
105 };
106
107 struct swcr_req {
108         struct swcr_data        *sw_head;
109         struct swcr_data        *sw;
110         struct cryptop          *crp;
111         struct cryptodesc       *crd;
112         struct scatterlist       sg[SCATTERLIST_MAX];
113         unsigned char            iv[EALG_MAX_BLOCK_LEN];
114         char                             result[HASH_MAX_LEN];
115         void                            *crypto_req;
116 };
117
118 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
119 static kmem_cache_t *swcr_req_cache;
120 #else
121 static struct kmem_cache *swcr_req_cache;
122 #endif
123
124 #ifndef CRYPTO_TFM_MODE_CBC
125 /*
126  * As of linux-2.6.21 this is no longer defined, and presumably no longer
127  * needed to be passed into the crypto core code.
128  */
129 #define CRYPTO_TFM_MODE_CBC     0
130 #define CRYPTO_TFM_MODE_ECB     0
131 #endif
132
133 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
134         /*
135          * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
136          * API into old API.
137          */
138
139         /* Symmetric/Block Cipher */
140         struct blkcipher_desc
141         {
142                 struct crypto_tfm *tfm;
143                 void *info;
144         };
145         #define ecb(X)                                                          #X , CRYPTO_TFM_MODE_ECB
146         #define cbc(X)                                                          #X , CRYPTO_TFM_MODE_CBC
147         #define crypto_has_blkcipher(X, Y, Z)           crypto_alg_available(X, 0)
148         #define crypto_blkcipher_cast(X)                        X
149         #define crypto_blkcipher_tfm(X)                         X
150         #define crypto_alloc_blkcipher(X, Y, Z)         crypto_alloc_tfm(X, mode)
151         #define crypto_blkcipher_ivsize(X)                      crypto_tfm_alg_ivsize(X)
152         #define crypto_blkcipher_blocksize(X)           crypto_tfm_alg_blocksize(X)
153         #define crypto_blkcipher_setkey(X, Y, Z)        crypto_cipher_setkey(X, Y, Z)
154         #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
155                                 crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
156         #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
157                                 crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
158         #define crypto_blkcipher_set_flags(x, y)        /* nop */
159         #define crypto_free_blkcipher(x)                        crypto_free_tfm(x)
160         #define crypto_free_comp                                        crypto_free_tfm
161         #define crypto_free_hash                                        crypto_free_tfm
162
163         /* Hash/HMAC/Digest */
164         struct hash_desc
165         {
166                 struct crypto_tfm *tfm;
167         };
168         #define hmac(X)                                                 #X , 0
169         #define crypto_has_hash(X, Y, Z)                crypto_alg_available(X, 0)
170         #define crypto_hash_cast(X)                             X
171         #define crypto_hash_tfm(X)                              X
172         #define crypto_alloc_hash(X, Y, Z)              crypto_alloc_tfm(X, mode)
173         #define crypto_hash_digestsize(X)               crypto_tfm_alg_digestsize(X)
174         #define crypto_hash_digest(W, X, Y, Z)  \
175                                 crypto_digest_digest((W)->tfm, X, sg_num, Z)
176
177         /* Asymmetric Cipher */
178         #define crypto_has_cipher(X, Y, Z)              crypto_alg_available(X, 0)
179
180         /* Compression */
181         #define crypto_has_comp(X, Y, Z)                crypto_alg_available(X, 0)
182         #define crypto_comp_tfm(X)                              X
183         #define crypto_comp_cast(X)                             X
184         #define crypto_alloc_comp(X, Y, Z)              crypto_alloc_tfm(X, mode)
185         #define plain(X)        #X , 0
186 #else
187         #define ecb(X)  "ecb(" #X ")" , 0
188         #define cbc(X)  "cbc(" #X ")" , 0
189         #define hmac(X) "hmac(" #X ")" , 0
190         #define plain(X)        #X , 0
191 #endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
192
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
194 /* no ablkcipher in older kernels */
195 #define crypto_alloc_ablkcipher(a,b,c)          (NULL)
196 #define crypto_ablkcipher_tfm(x)                        ((struct crypto_tfm *)(x))
197 #define crypto_ablkcipher_set_flags(a, b)       /* nop */
198 #define crypto_ablkcipher_setkey(x, y, z)       (-EINVAL)
199 #define crypto_has_ablkcipher(a,b,c)            (0)
200 #else
201 #define HAVE_ABLKCIPHER
202 #endif
203
204 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
205 /* no ahash in older kernels */
206 #define crypto_ahash_tfm(x)                                     ((struct crypto_tfm *)(x))
207 #define crypto_alloc_ahash(a,b,c)                       (NULL)
208 #define crypto_ahash_digestsize(x)                      0
209 #else
210 #define HAVE_AHASH
211 #endif
212
213 struct crypto_details {
214         char *alg_name;
215         int mode;
216         int sw_type;
217 };
218
219 static struct crypto_details crypto_details[] = {
220         [CRYPTO_DES_CBC]         = { cbc(des),          SW_TYPE_BLKCIPHER, },
221         [CRYPTO_3DES_CBC]        = { cbc(des3_ede),     SW_TYPE_BLKCIPHER, },
222         [CRYPTO_BLF_CBC]         = { cbc(blowfish),     SW_TYPE_BLKCIPHER, },
223         [CRYPTO_CAST_CBC]        = { cbc(cast5),        SW_TYPE_BLKCIPHER, },
224         [CRYPTO_SKIPJACK_CBC]    = { cbc(skipjack),     SW_TYPE_BLKCIPHER, },
225         [CRYPTO_MD5_HMAC]        = { hmac(md5),         SW_TYPE_HMAC, },
226         [CRYPTO_SHA1_HMAC]       = { hmac(sha1),        SW_TYPE_HMAC, },
227         [CRYPTO_RIPEMD160_HMAC]  = { hmac(ripemd160),   SW_TYPE_HMAC, },
228         [CRYPTO_MD5_KPDK]        = { plain(md5-kpdk),   SW_TYPE_HASH, },
229         [CRYPTO_SHA1_KPDK]       = { plain(sha1-kpdk),  SW_TYPE_HASH, },
230         [CRYPTO_AES_CBC]         = { cbc(aes),          SW_TYPE_BLKCIPHER, },
231         [CRYPTO_ARC4]            = { ecb(arc4),         SW_TYPE_BLKCIPHER, },
232         [CRYPTO_MD5]             = { plain(md5),        SW_TYPE_HASH, },
233         [CRYPTO_SHA1]            = { plain(sha1),       SW_TYPE_HASH, },
234         [CRYPTO_NULL_HMAC]       = { hmac(digest_null), SW_TYPE_HMAC, },
235         [CRYPTO_NULL_CBC]        = { cbc(cipher_null),  SW_TYPE_BLKCIPHER, },
236         [CRYPTO_DEFLATE_COMP]    = { plain(deflate),    SW_TYPE_COMP, },
237         [CRYPTO_SHA2_256_HMAC]   = { hmac(sha256),      SW_TYPE_HMAC, },
238         [CRYPTO_SHA2_384_HMAC]   = { hmac(sha384),      SW_TYPE_HMAC, },
239         [CRYPTO_SHA2_512_HMAC]   = { hmac(sha512),      SW_TYPE_HMAC, },
240         [CRYPTO_CAMELLIA_CBC]    = { cbc(camellia),     SW_TYPE_BLKCIPHER, },
241         [CRYPTO_SHA2_256]        = { plain(sha256),     SW_TYPE_HASH, },
242         [CRYPTO_SHA2_384]        = { plain(sha384),     SW_TYPE_HASH, },
243         [CRYPTO_SHA2_512]        = { plain(sha512),     SW_TYPE_HASH, },
244         [CRYPTO_RIPEMD160]       = { plain(ripemd160),  SW_TYPE_HASH, },
245 };
246
247 int32_t swcr_id = -1;
248 module_param(swcr_id, int, 0444);
249 MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
250
251 int swcr_fail_if_compression_grows = 1;
252 module_param(swcr_fail_if_compression_grows, int, 0644);
253 MODULE_PARM_DESC(swcr_fail_if_compression_grows,
254                 "Treat compression that results in more data as a failure");
255
256 int swcr_no_ahash = 0;
257 module_param(swcr_no_ahash, int, 0644);
258 MODULE_PARM_DESC(swcr_no_ahash,
259                 "Do not use async hash/hmac even if available");
260
261 int swcr_no_ablk = 0;
262 module_param(swcr_no_ablk, int, 0644);
263 MODULE_PARM_DESC(swcr_no_ablk,
264                 "Do not use async blk ciphers even if available");
265
266 static struct swcr_data **swcr_sessions = NULL;
267 static u_int32_t swcr_sesnum = 0;
268
269 static  int swcr_process(device_t, struct cryptop *, int);
270 static  int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
271 static  int swcr_freesession(device_t, u_int64_t);
272
273 static device_method_t swcr_methods = {
274         /* crypto device methods */
275         DEVMETHOD(cryptodev_newsession, swcr_newsession),
276         DEVMETHOD(cryptodev_freesession,swcr_freesession),
277         DEVMETHOD(cryptodev_process,    swcr_process),
278 };
279
280 #define debug swcr_debug
281 int swcr_debug = 0;
282 module_param(swcr_debug, int, 0644);
283 MODULE_PARM_DESC(swcr_debug, "Enable debug");
284
285 static void swcr_process_req(struct swcr_req *req);
286
287 /*
288  * somethings just need to be run with user context no matter whether
289  * the kernel compression libs use vmalloc/vfree for example.
290  */
291
292 typedef struct {
293         struct work_struct wq;
294         void    (*func)(void *arg);
295         void    *arg;
296 } execute_later_t;
297
298 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
299 static void
300 doing_it_now(struct work_struct *wq)
301 {
302         execute_later_t *w = container_of(wq, execute_later_t, wq);
303         (w->func)(w->arg);
304         kfree(w);
305 }
306 #else
307 static void
308 doing_it_now(void *arg)
309 {
310         execute_later_t *w = (execute_later_t *) arg;
311         (w->func)(w->arg);
312         kfree(w);
313 }
314 #endif
315
316 static void
317 execute_later(void (fn)(void *), void *arg)
318 {
319         execute_later_t *w;
320
321         w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC);
322         if (w) {
323                 memset(w, '\0', sizeof(w));
324                 w->func = fn;
325                 w->arg = arg;
326 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
327                 INIT_WORK(&w->wq, doing_it_now);
328 #else
329                 INIT_WORK(&w->wq, doing_it_now, w);
330 #endif
331                 schedule_work(&w->wq);
332         }
333 }
334
335 /*
336  * Generate a new software session.
337  */
338 static int
339 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
340 {
341         struct swcr_data **swd;
342         u_int32_t i;
343         int error;
344         char *algo;
345         int mode;
346
347         dprintk("%s()\n", __FUNCTION__);
348         if (sid == NULL || cri == NULL) {
349                 dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
350                 return EINVAL;
351         }
352
353         if (swcr_sessions) {
354                 for (i = 1; i < swcr_sesnum; i++)
355                         if (swcr_sessions[i] == NULL)
356                                 break;
357         } else
358                 i = 1;          /* NB: to silence compiler warning */
359
360         if (swcr_sessions == NULL || i == swcr_sesnum) {
361                 if (swcr_sessions == NULL) {
362                         i = 1; /* We leave swcr_sessions[0] empty */
363                         swcr_sesnum = CRYPTO_SW_SESSIONS;
364                 } else
365                         swcr_sesnum *= 2;
366
367                 swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
368                 if (swd == NULL) {
369                         /* Reset session number */
370                         if (swcr_sesnum == CRYPTO_SW_SESSIONS)
371                                 swcr_sesnum = 0;
372                         else
373                                 swcr_sesnum /= 2;
374                         dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
375                         return ENOBUFS;
376                 }
377                 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
378
379                 /* Copy existing sessions */
380                 if (swcr_sessions) {
381                         memcpy(swd, swcr_sessions,
382                             (swcr_sesnum / 2) * sizeof(struct swcr_data *));
383                         kfree(swcr_sessions);
384                 }
385
386                 swcr_sessions = swd;
387         }
388
389         swd = &swcr_sessions[i];
390         *sid = i;
391
392         while (cri) {
393                 *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
394                                 SLAB_ATOMIC);
395                 if (*swd == NULL) {
396                         swcr_freesession(NULL, i);
397                         dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
398                         return ENOBUFS;
399                 }
400                 memset(*swd, 0, sizeof(struct swcr_data));
401
402                 if (cri->cri_alg < 0 ||
403                                 cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
404                         printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
405                         swcr_freesession(NULL, i);
406                         return EINVAL;
407                 }
408
409                 algo = crypto_details[cri->cri_alg].alg_name;
410                 if (!algo || !*algo) {
411                         printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
412                         swcr_freesession(NULL, i);
413                         return EINVAL;
414                 }
415
416                 mode = crypto_details[cri->cri_alg].mode;
417                 (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
418                 (*swd)->sw_alg = cri->cri_alg;
419
420                 spin_lock_init(&(*swd)->sw_tfm_lock);
421
422                 /* Algorithm specific configuration */
423                 switch (cri->cri_alg) {
424                 case CRYPTO_NULL_CBC:
425                         cri->cri_klen = 0; /* make it work with crypto API */
426                         break;
427                 default:
428                         break;
429                 }
430
431                 if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
432                         dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
433                                         algo, mode);
434
435                         /* try async first */
436                         (*swd)->sw_tfm = swcr_no_ablk ? NULL :
437                                         crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
438                         if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) {
439                                 dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
440                                 (*swd)->sw_type |= SW_TYPE_ASYNC;
441                         } else {
442                                 (*swd)->sw_tfm = crypto_blkcipher_tfm(
443                                                 crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
444                                 if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm))
445                                         dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
446                         }
447                         if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) {
448                                 int err;
449                                 dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
450                                                 algo,mode);
451                                 err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL;
452                                 (*swd)->sw_tfm = NULL; /* ensure NULL */
453                                 swcr_freesession(NULL, i);
454                                 return err;
455                         }
456
457                         if (debug) {
458                                 dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
459                                                 __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
460                                 for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
461                                         dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
462                                                         cri->cri_key[i] & 0xff);
463                                 dprintk("\n");
464                         }
465                         if ((*swd)->sw_type & SW_TYPE_ASYNC) {
466                                 /* OCF doesn't enforce keys */
467                                 crypto_ablkcipher_set_flags(
468                                                 __crypto_ablkcipher_cast((*swd)->sw_tfm),
469                                                         CRYPTO_TFM_REQ_WEAK_KEY);
470                                 error = crypto_ablkcipher_setkey(
471                                                         __crypto_ablkcipher_cast((*swd)->sw_tfm),
472                                                                 cri->cri_key, (cri->cri_klen + 7) / 8);
473                         } else {
474                                 /* OCF doesn't enforce keys */
475                                 crypto_blkcipher_set_flags(
476                                                 crypto_blkcipher_cast((*swd)->sw_tfm),
477                                                         CRYPTO_TFM_REQ_WEAK_KEY);
478                                 error = crypto_blkcipher_setkey(
479                                                         crypto_blkcipher_cast((*swd)->sw_tfm),
480                                                                 cri->cri_key, (cri->cri_klen + 7) / 8);
481                         }
482                         if (error) {
483                                 printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
484                                                 (*swd)->sw_tfm->crt_flags);
485                                 swcr_freesession(NULL, i);
486                                 return error;
487                         }
488                 } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
489                         dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
490                                         algo, mode);
491
492                         /* try async first */
493                         (*swd)->sw_tfm = swcr_no_ahash ? NULL :
494                                         crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
495                         if ((*swd)->sw_tfm) {
496                                 dprintk("%s %s hash is async\n", __FUNCTION__, algo);
497                                 (*swd)->sw_type |= SW_TYPE_ASYNC;
498                         } else {
499                                 dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
500                                 (*swd)->sw_tfm = crypto_hash_tfm(
501                                                 crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
502                         }
503
504                         if (!(*swd)->sw_tfm) {
505                                 dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
506                                                 algo, mode);
507                                 swcr_freesession(NULL, i);
508                                 return EINVAL;
509                         }
510
511                         (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
512                         (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
513                                         SLAB_ATOMIC);
514                         if ((*swd)->u.hmac.sw_key == NULL) {
515                                 swcr_freesession(NULL, i);
516                                 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
517                                 return ENOBUFS;
518                         }
519                         memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
520                         if (cri->cri_mlen) {
521                                 (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
522                         } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
523                                 (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
524                                                 __crypto_ahash_cast((*swd)->sw_tfm));
525                         } else  {
526                                 (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
527                                                 crypto_hash_cast((*swd)->sw_tfm));
528                         }
529                 } else if ((*swd)->sw_type & SW_TYPE_COMP) {
530                         (*swd)->sw_tfm = crypto_comp_tfm(
531                                         crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
532                         if (!(*swd)->sw_tfm) {
533                                 dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
534                                                 algo, mode);
535                                 swcr_freesession(NULL, i);
536                                 return EINVAL;
537                         }
538                         (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
539                         if ((*swd)->u.sw_comp_buf == NULL) {
540                                 swcr_freesession(NULL, i);
541                                 dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
542                                 return ENOBUFS;
543                         }
544                 } else {
545                         printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
546                         swcr_freesession(NULL, i);
547                         return EINVAL;
548                 }
549
550                 cri = cri->cri_next;
551                 swd = &((*swd)->sw_next);
552         }
553         return 0;
554 }
555
556 /*
557  * Free a session.
558  */
559 static int
560 swcr_freesession(device_t dev, u_int64_t tid)
561 {
562         struct swcr_data *swd;
563         u_int32_t sid = CRYPTO_SESID2LID(tid);
564
565         dprintk("%s()\n", __FUNCTION__);
566         if (sid > swcr_sesnum || swcr_sessions == NULL ||
567                         swcr_sessions[sid] == NULL) {
568                 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
569                 return(EINVAL);
570         }
571
572         /* Silently accept and return */
573         if (sid == 0)
574                 return(0);
575
576         while ((swd = swcr_sessions[sid]) != NULL) {
577                 swcr_sessions[sid] = swd->sw_next;
578                 if (swd->sw_tfm) {
579                         switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
580 #ifdef HAVE_AHASH
581                         case SW_TYPE_AHMAC:
582                         case SW_TYPE_AHASH:
583                                 crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
584                                 break;
585 #endif
586 #ifdef HAVE_ABLKCIPHER
587                         case SW_TYPE_ABLKCIPHER:
588                                 crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
589                                 break;
590 #endif
591                         case SW_TYPE_BLKCIPHER:
592                                 crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
593                                 break;
594                         case SW_TYPE_HMAC:
595                         case SW_TYPE_HASH:
596                                 crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
597                                 break;
598                         case SW_TYPE_COMP:
599                                 if (in_interrupt())
600                                         execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm));
601                                 else
602                                         crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
603                                 break;
604                         default:
605                                 crypto_free_tfm(swd->sw_tfm);
606                                 break;
607                         }
608                         swd->sw_tfm = NULL;
609                 }
610                 if (swd->sw_type & SW_TYPE_COMP) {
611                         if (swd->u.sw_comp_buf)
612                                 kfree(swd->u.sw_comp_buf);
613                 } else {
614                         if (swd->u.hmac.sw_key)
615                                 kfree(swd->u.hmac.sw_key);
616                 }
617                 kfree(swd);
618         }
619         return 0;
620 }
621
622 static void swcr_process_req_complete(struct swcr_req *req)
623 {
624         dprintk("%s()\n", __FUNCTION__);
625
626         if (req->sw->sw_type & SW_TYPE_INUSE) {
627                 unsigned long flags;
628                 spin_lock_irqsave(&req->sw->sw_tfm_lock, flags);
629                 req->sw->sw_type &= ~SW_TYPE_INUSE;
630                 spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags);
631         }
632
633         if (req->crp->crp_etype)
634                 goto done;
635
636         switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
637 #if defined(HAVE_AHASH)
638         case SW_TYPE_AHMAC:
639         case SW_TYPE_AHASH:
640                 crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
641                                 req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
642                 ahash_request_free(req->crypto_req);
643                 break;
644 #endif
645 #if defined(HAVE_ABLKCIPHER)
646         case SW_TYPE_ABLKCIPHER:
647                 ablkcipher_request_free(req->crypto_req);
648                 break;
649 #endif
650         case SW_TYPE_CIPHER:
651         case SW_TYPE_HMAC:
652         case SW_TYPE_HASH:
653         case SW_TYPE_COMP:
654         case SW_TYPE_BLKCIPHER:
655                 break;
656         default:
657                 req->crp->crp_etype = EINVAL;
658                 goto done;
659         }
660
661         req->crd = req->crd->crd_next;
662         if (req->crd) {
663                 swcr_process_req(req);
664                 return;
665         }
666
667 done:
668         dprintk("%s crypto_done %p\n", __FUNCTION__, req);
669         crypto_done(req->crp);
670         kmem_cache_free(swcr_req_cache, req);
671 }
672
673 #if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
674 static void swcr_process_callback(struct crypto_async_request *creq, int err)
675 {
676         struct swcr_req *req = creq->data;
677
678         dprintk("%s()\n", __FUNCTION__);
679         if (err) {
680                 if (err == -EINPROGRESS)
681                         return;
682                 dprintk("%s() fail %d\n", __FUNCTION__, -err);
683                 req->crp->crp_etype = -err;
684         }
685
686         swcr_process_req_complete(req);
687 }
688 #endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
689
690
691 static void swcr_process_req(struct swcr_req *req)
692 {
693         struct swcr_data *sw;
694         struct cryptop *crp = req->crp;
695         struct cryptodesc *crd = req->crd;
696         struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
697         struct uio *uiop = (struct uio *) crp->crp_buf;
698         int sg_num, sg_len, skip;
699
700         dprintk("%s()\n", __FUNCTION__);
701
702         /*
703          * Find the crypto context.
704          *
705          * XXX Note that the logic here prevents us from having
706          * XXX the same algorithm multiple times in a session
707          * XXX (or rather, we can but it won't give us the right
708          * XXX results). To do that, we'd need some way of differentiating
709          * XXX between the various instances of an algorithm (so we can
710          * XXX locate the correct crypto context).
711          */
712         for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
713                 ;
714
715         /* No such context ? */
716         if (sw == NULL) {
717                 crp->crp_etype = EINVAL;
718                 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
719                 goto done;
720         }
721
722         /*
723          * for some types we need to ensure only one user as info is stored in
724          * the tfm during an operation that can get corrupted
725          */
726         switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
727 #ifdef HAVE_AHASH
728         case SW_TYPE_AHMAC:
729         case SW_TYPE_AHASH:
730 #endif
731         case SW_TYPE_HMAC:
732         case SW_TYPE_HASH: {
733                 unsigned long flags;
734                 spin_lock_irqsave(&sw->sw_tfm_lock, flags);
735                 if (sw->sw_type & SW_TYPE_INUSE) {
736                         spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
737                         execute_later((void (*)(void *))swcr_process_req, (void *)req);
738                         return;
739                 }
740                 sw->sw_type |= SW_TYPE_INUSE;
741                 spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
742                 } break;
743         }
744
745         req->sw = sw;
746         skip = crd->crd_skip;
747
748         /*
749          * setup the SG list skip from the start of the buffer
750          */
751         memset(req->sg, 0, sizeof(req->sg));
752         sg_init_table(req->sg, SCATTERLIST_MAX);
753         if (crp->crp_flags & CRYPTO_F_SKBUF) {
754                 int i, len;
755
756                 sg_num = 0;
757                 sg_len = 0;
758
759                 if (skip < skb_headlen(skb)) {
760                         len = skb_headlen(skb) - skip;
761                         if (len + sg_len > crd->crd_len)
762                                 len = crd->crd_len - sg_len;
763                         sg_set_page(&req->sg[sg_num],
764                                 virt_to_page(skb->data + skip), len,
765                                 offset_in_page(skb->data + skip));
766                         sg_len += len;
767                         sg_num++;
768                         skip = 0;
769                 } else
770                         skip -= skb_headlen(skb);
771
772                 for (i = 0; sg_len < crd->crd_len &&
773                                         i < skb_shinfo(skb)->nr_frags &&
774                                         sg_num < SCATTERLIST_MAX; i++) {
775                         if (skip < skb_shinfo(skb)->frags[i].size) {
776                                 len = skb_shinfo(skb)->frags[i].size - skip;
777                                 if (len + sg_len > crd->crd_len)
778                                         len = crd->crd_len - sg_len;
779                                 sg_set_page(&req->sg[sg_num],
780                                         skb_frag_page(&skb_shinfo(skb)->frags[i]),
781                                         len,
782                                         skb_shinfo(skb)->frags[i].page_offset + skip);
783                                 sg_len += len;
784                                 sg_num++;
785                                 skip = 0;
786                         } else
787                                 skip -= skb_shinfo(skb)->frags[i].size;
788                 }
789         } else if (crp->crp_flags & CRYPTO_F_IOV) {
790                 int len;
791
792                 sg_len = 0;
793                 for (sg_num = 0; sg_len < crd->crd_len &&
794                                 sg_num < uiop->uio_iovcnt &&
795                                 sg_num < SCATTERLIST_MAX; sg_num++) {
796                         if (skip <= uiop->uio_iov[sg_num].iov_len) {
797                                 len = uiop->uio_iov[sg_num].iov_len - skip;
798                                 if (len + sg_len > crd->crd_len)
799                                         len = crd->crd_len - sg_len;
800                                 sg_set_page(&req->sg[sg_num],
801                                         virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
802                                         len,
803                                         offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
804                                 sg_len += len;
805                                 skip = 0;
806                         } else 
807                                 skip -= uiop->uio_iov[sg_num].iov_len;
808                 }
809         } else {
810                 sg_len = (crp->crp_ilen - skip);
811                 if (sg_len > crd->crd_len)
812                         sg_len = crd->crd_len;
813                 sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
814                         sg_len, offset_in_page(crp->crp_buf + skip));
815                 sg_num = 1;
816         }
817         if (sg_num > 0)
818                 sg_mark_end(&req->sg[sg_num-1]);
819
820         switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
821
822 #ifdef HAVE_AHASH
823         case SW_TYPE_AHMAC:
824         case SW_TYPE_AHASH:
825                 {
826                 int ret;
827
828                 /* check we have room for the result */
829                 if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
830                         dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
831                                         "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
832                                         crd->crd_inject, sw->u.hmac.sw_mlen);
833                         crp->crp_etype = EINVAL;
834                         goto done;
835                 }
836
837                 req->crypto_req =
838                                 ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC);
839                 if (!req->crypto_req) {
840                         crp->crp_etype = ENOMEM;
841                         dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
842                         goto done;
843                 }
844
845                 ahash_request_set_callback(req->crypto_req,
846                                 CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
847
848                 memset(req->result, 0, sizeof(req->result));
849
850                 if (sw->sw_type & SW_TYPE_AHMAC)
851                         crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
852                                         sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
853                 ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
854                 ret = crypto_ahash_digest(req->crypto_req);
855                 switch (ret) {
856                 case -EINPROGRESS:
857                 case -EBUSY:
858                         return;
859                 default:
860                 case 0:
861                         dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
862                         crp->crp_etype = ret;
863                         goto done;
864                 }
865                 } break;
866 #endif /* HAVE_AHASH */
867
868 #ifdef HAVE_ABLKCIPHER
869         case SW_TYPE_ABLKCIPHER: {
870                 int ret;
871                 unsigned char *ivp = req->iv;
872                 int ivsize = 
873                         crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
874
875                 if (sg_len < crypto_ablkcipher_blocksize(
876                                 __crypto_ablkcipher_cast(sw->sw_tfm))) {
877                         crp->crp_etype = EINVAL;
878                         dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
879                                         sg_len, crypto_ablkcipher_blocksize(
880                                                 __crypto_ablkcipher_cast(sw->sw_tfm)));
881                         goto done;
882                 }
883
884                 if (ivsize > sizeof(req->iv)) {
885                         crp->crp_etype = EINVAL;
886                         dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
887                         goto done;
888                 }
889
890                 req->crypto_req = ablkcipher_request_alloc(
891                                 __crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC);
892                 if (!req->crypto_req) {
893                         crp->crp_etype = ENOMEM;
894                         dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
895                                         __FILE__, __LINE__);
896                         goto done;
897                 }
898
899                 ablkcipher_request_set_callback(req->crypto_req,
900                                 CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
901
902                 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
903                         int i, error;
904
905                         if (debug) {
906                                 dprintk("%s key:", __FUNCTION__);
907                                 for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
908                                         dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
909                                                         crd->crd_key[i] & 0xff);
910                                 dprintk("\n");
911                         }
912                         /* OCF doesn't enforce keys */
913                         crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
914                                         CRYPTO_TFM_REQ_WEAK_KEY);
915                         error = crypto_ablkcipher_setkey(
916                                                 __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
917                                                 (crd->crd_klen + 7) / 8);
918                         if (error) {
919                                 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
920                                                 error, sw->sw_tfm->crt_flags);
921                                 crp->crp_etype = -error;
922                         }
923                 }
924
925                 if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
926
927                         if (crd->crd_flags & CRD_F_IV_EXPLICIT)
928                                 ivp = crd->crd_iv;
929                         else
930                                 get_random_bytes(ivp, ivsize);
931                         /*
932                          * do we have to copy the IV back to the buffer ?
933                          */
934                         if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
935                                 crypto_copyback(crp->crp_flags, crp->crp_buf,
936                                                 crd->crd_inject, ivsize, (caddr_t)ivp);
937                         }
938                         ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
939                                         sg_len, ivp);
940                         ret = crypto_ablkcipher_encrypt(req->crypto_req);
941
942                 } else { /*decrypt */
943
944                         if (crd->crd_flags & CRD_F_IV_EXPLICIT)
945                                 ivp = crd->crd_iv;
946                         else
947                                 crypto_copydata(crp->crp_flags, crp->crp_buf,
948                                                 crd->crd_inject, ivsize, (caddr_t)ivp);
949                         ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
950                                         sg_len, ivp);
951                         ret = crypto_ablkcipher_decrypt(req->crypto_req);
952                 }
953
954                 switch (ret) {
955                 case -EINPROGRESS:
956                 case -EBUSY:
957                         return;
958                 default:
959                 case 0:
960                         dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
961                         crp->crp_etype = ret;
962                         goto done;
963                 }
964                 } break;
965 #endif /* HAVE_ABLKCIPHER */
966
967         case SW_TYPE_BLKCIPHER: {
968                 unsigned char iv[EALG_MAX_BLOCK_LEN];
969                 unsigned char *ivp = iv;
970                 struct blkcipher_desc desc;
971                 int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
972
973                 if (sg_len < crypto_blkcipher_blocksize(
974                                 crypto_blkcipher_cast(sw->sw_tfm))) {
975                         crp->crp_etype = EINVAL;
976                         dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
977                                         sg_len, crypto_blkcipher_blocksize(
978                                                 crypto_blkcipher_cast(sw->sw_tfm)));
979                         goto done;
980                 }
981
982                 if (ivsize > sizeof(iv)) {
983                         crp->crp_etype = EINVAL;
984                         dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
985                         goto done;
986                 }
987
988                 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
989                         int i, error;
990
991                         if (debug) {
992                                 dprintk("%s key:", __FUNCTION__);
993                                 for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
994                                         dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
995                                                         crd->crd_key[i] & 0xff);
996                                 dprintk("\n");
997                         }
998                         /* OCF doesn't enforce keys */
999                         crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
1000                                         CRYPTO_TFM_REQ_WEAK_KEY);
1001                         error = crypto_blkcipher_setkey(
1002                                                 crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
1003                                                 (crd->crd_klen + 7) / 8);
1004                         if (error) {
1005                                 dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
1006                                                 error, sw->sw_tfm->crt_flags);
1007                                 crp->crp_etype = -error;
1008                         }
1009                 }
1010
1011                 memset(&desc, 0, sizeof(desc));
1012                 desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
1013
1014                 if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
1015
1016                         if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
1017                                 ivp = crd->crd_iv;
1018                         } else {
1019                                 get_random_bytes(ivp, ivsize);
1020                         }
1021                         /*
1022                          * do we have to copy the IV back to the buffer ?
1023                          */
1024                         if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1025                                 crypto_copyback(crp->crp_flags, crp->crp_buf,
1026                                                 crd->crd_inject, ivsize, (caddr_t)ivp);
1027                         }
1028                         desc.info = ivp;
1029                         crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
1030
1031                 } else { /*decrypt */
1032
1033                         if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
1034                                 ivp = crd->crd_iv;
1035                         } else {
1036                                 crypto_copydata(crp->crp_flags, crp->crp_buf,
1037                                                 crd->crd_inject, ivsize, (caddr_t)ivp);
1038                         }
1039                         desc.info = ivp;
1040                         crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
1041                 }
1042                 } break;
1043
1044         case SW_TYPE_HMAC:
1045         case SW_TYPE_HASH:
1046                 {
1047                 char result[HASH_MAX_LEN];
1048                 struct hash_desc desc;
1049
1050                 /* check we have room for the result */
1051                 if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
1052                         dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
1053                                         "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
1054                                         crd->crd_inject, sw->u.hmac.sw_mlen);
1055                         crp->crp_etype = EINVAL;
1056                         goto done;
1057                 }
1058
1059                 memset(&desc, 0, sizeof(desc));
1060                 desc.tfm = crypto_hash_cast(sw->sw_tfm);
1061
1062                 memset(result, 0, sizeof(result));
1063
1064                 if (sw->sw_type & SW_TYPE_HMAC) {
1065 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1066                         crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
1067                                         req->sg, sg_num, result);
1068 #else
1069                         crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
1070                                         sw->u.hmac.sw_klen);
1071                         crypto_hash_digest(&desc, req->sg, sg_len, result);
1072 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
1073                         
1074                 } else { /* SW_TYPE_HASH */
1075                         crypto_hash_digest(&desc, req->sg, sg_len, result);
1076                 }
1077
1078                 crypto_copyback(crp->crp_flags, crp->crp_buf,
1079                                 crd->crd_inject, sw->u.hmac.sw_mlen, result);
1080                 }
1081                 break;
1082
1083         case SW_TYPE_COMP: {
1084                 void *ibuf = NULL;
1085                 void *obuf = sw->u.sw_comp_buf;
1086                 int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
1087                 int ret = 0;
1088
1089                 /*
1090                  * we need to use an additional copy if there is more than one
1091                  * input chunk since the kernel comp routines do not handle
1092                  * SG yet.  Otherwise we just use the input buffer as is.
1093                  * Rather than allocate another buffer we just split the tmp
1094                  * buffer we already have.
1095                  * Perhaps we should just use zlib directly ?
1096                  */
1097                 if (sg_num > 1) {
1098                         int blk;
1099
1100                         ibuf = obuf;
1101                         for (blk = 0; blk < sg_num; blk++) {
1102                                 memcpy(obuf, sg_virt(&req->sg[blk]),
1103                                                 req->sg[blk].length);
1104                                 obuf += req->sg[blk].length;
1105                         }
1106                         olen -= sg_len;
1107                 } else
1108                         ibuf = sg_virt(&req->sg[0]);
1109
1110                 if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
1111                         ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
1112                                         ibuf, ilen, obuf, &olen);
1113                         if (!ret && olen > crd->crd_len) {
1114                                 dprintk("cryptosoft: ERANGE compress %d into %d\n",
1115                                                 crd->crd_len, olen);
1116                                 if (swcr_fail_if_compression_grows)
1117                                         ret = ERANGE;
1118                         }
1119                 } else { /* decompress */
1120                         ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
1121                                         ibuf, ilen, obuf, &olen);
1122                         if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
1123                                 dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
1124                                                 "space for %d,at offset %d\n",
1125                                                 crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
1126                                 ret = ETOOSMALL;
1127                         }
1128                 }
1129                 if (ret)
1130                         dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
1131
1132                 /*
1133                  * on success copy result back,
1134                  * linux crpyto API returns -errno,  we need to fix that
1135                  */
1136                 crp->crp_etype = ret < 0 ? -ret : ret;
1137                 if (ret == 0) {
1138                         /* copy back the result and return it's size */
1139                         crypto_copyback(crp->crp_flags, crp->crp_buf,
1140                                         crd->crd_inject, olen, obuf);
1141                         crp->crp_olen = olen;
1142                 }
1143                 } break;
1144
1145         default:
1146                 /* Unknown/unsupported algorithm */
1147                 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1148                 crp->crp_etype = EINVAL;
1149                 goto done;
1150         }
1151
1152 done:
1153         swcr_process_req_complete(req);
1154 }
1155
1156
1157 /*
1158  * Process a crypto request.
1159  */
1160 static int
1161 swcr_process(device_t dev, struct cryptop *crp, int hint)
1162 {
1163         struct swcr_req *req = NULL;
1164         u_int32_t lid;
1165
1166         dprintk("%s()\n", __FUNCTION__);
1167         /* Sanity check */
1168         if (crp == NULL) {
1169                 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1170                 return EINVAL;
1171         }
1172
1173         crp->crp_etype = 0;
1174
1175         if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1176                 dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
1177                 crp->crp_etype = EINVAL;
1178                 goto done;
1179         }
1180
1181         lid = crp->crp_sid & 0xffffffff;
1182         if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
1183                         swcr_sessions[lid] == NULL) {
1184                 crp->crp_etype = ENOENT;
1185                 dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
1186                 goto done;
1187         }
1188
1189         /*
1190          * do some error checking outside of the loop for SKB and IOV processing
1191          * this leaves us with valid skb or uiop pointers for later
1192          */
1193         if (crp->crp_flags & CRYPTO_F_SKBUF) {
1194                 struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
1195                 if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
1196                         printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
1197                                         skb_shinfo(skb)->nr_frags);
1198                         goto done;
1199                 }
1200         } else if (crp->crp_flags & CRYPTO_F_IOV) {
1201                 struct uio *uiop = (struct uio *) crp->crp_buf;
1202                 if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
1203                         printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
1204                                         uiop->uio_iovcnt);
1205                         goto done;
1206                 }
1207         }
1208
1209         /*
1210          * setup a new request ready for queuing
1211          */
1212         req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
1213         if (req == NULL) {
1214                 dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
1215                 crp->crp_etype = ENOMEM;
1216                 goto done;
1217         }
1218         memset(req, 0, sizeof(*req));
1219
1220         req->sw_head = swcr_sessions[lid];
1221         req->crp = crp;
1222         req->crd = crp->crp_desc;
1223
1224         swcr_process_req(req);
1225         return 0;
1226
1227 done:
1228         crypto_done(crp);
1229         if (req)
1230                 kmem_cache_free(swcr_req_cache, req);
1231         return 0;
1232 }
1233
1234
1235 static int
1236 cryptosoft_init(void)
1237 {
1238         int i, sw_type, mode;
1239         char *algo;
1240
1241         dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
1242
1243         swcr_req_cache = kmem_cache_create("cryptosoft_req",
1244                                 sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
1245 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1246                                 , NULL
1247 #endif
1248                                 );
1249         if (!swcr_req_cache) {
1250                 printk("cryptosoft: failed to create request cache\n");
1251                 return -ENOENT;
1252         }
1253
1254         softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
1255
1256         swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
1257                         CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1258         if (swcr_id < 0) {
1259                 printk("cryptosoft: Software crypto device cannot initialize!");
1260                 return -ENODEV;
1261         }
1262
1263 #define REGISTER(alg) \
1264                 crypto_register(swcr_id, alg, 0,0)
1265
1266         for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
1267                 int found;
1268                 
1269                 algo = crypto_details[i].alg_name;
1270                 if (!algo || !*algo) {
1271                         dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
1272                         continue;
1273                 }
1274
1275                 mode = crypto_details[i].mode;
1276                 sw_type = crypto_details[i].sw_type;
1277
1278                 found = 0;
1279                 switch (sw_type & SW_TYPE_ALG_MASK) {
1280                 case SW_TYPE_CIPHER:
1281                         found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
1282                         break;
1283                 case SW_TYPE_HMAC:
1284                         found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1285                         break;
1286                 case SW_TYPE_HASH:
1287                         found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
1288                         break;
1289                 case SW_TYPE_COMP:
1290                         found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
1291                         break;
1292                 case SW_TYPE_BLKCIPHER:
1293                         found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
1294                         if (!found && !swcr_no_ablk)
1295                                 found = crypto_has_ablkcipher(algo, 0, 0);
1296                         break;
1297                 }
1298                 if (found) {
1299                         REGISTER(i);
1300                 } else {
1301                         dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
1302                                         __FUNCTION__, sw_type, i, algo);
1303                 }
1304         }
1305         return 0;
1306 }
1307
1308 static void
1309 cryptosoft_exit(void)
1310 {
1311         dprintk("%s()\n", __FUNCTION__);
1312         crypto_unregister_all(swcr_id);
1313         swcr_id = -1;
1314         kmem_cache_destroy(swcr_req_cache);
1315 }
1316
1317 late_initcall(cryptosoft_init);
1318 module_exit(cryptosoft_exit);
1319
1320 MODULE_LICENSE("Dual BSD/GPL");
1321 MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
1322 MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");