[kernel] update ocf-linux to 20080917
[openwrt.git] / target / linux / generic-2.6 / patches-2.6.26 / 971-ocf_20080917.patch
1 --- a/drivers/char/random.c
2 +++ b/drivers/char/random.c
3 @@ -129,6 +129,9 @@
4   *                                unsigned int value);
5   *     void add_interrupt_randomness(int irq);
6   *
7 + *      void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
8 + *      int random_input_wait(void);
9 + *
10   * add_input_randomness() uses the input layer interrupt timing, as well as
11   * the event type information from the hardware.
12   *
13 @@ -140,6 +143,13 @@
14   * a better measure, since the timing of the disk interrupts are more
15   * unpredictable.
16   *
17 + * random_input_words() just provides a raw block of entropy to the input
18 + * pool, such as from a hardware entropy generator.
19 + *
20 + * random_input_wait() suspends the caller until such time as the
21 + * entropy pool falls below the write threshold, and returns a count of how
22 + * much entropy (in bits) is needed to sustain the pool.
23 + *
24   * All of these routines try to estimate how many bits of randomness a
25   * particular randomness source.  They do this by keeping track of the
26   * first and second order deltas of the event timings.
27 @@ -667,6 +677,61 @@ void add_disk_randomness(struct gendisk 
28  }
29  #endif
30  
31 +/*
32 + * random_input_words - add bulk entropy to pool
33 + *
34 + * @buf: buffer to add
35 + * @wordcount: number of __u32 words to add
36 + * @ent_count: total amount of entropy (in bits) to credit
37 + *
38 + * this provides bulk input of entropy to the input pool
39 + *
40 + */
41 +void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
42 +{
43 +       mix_pool_bytes(&input_pool, buf, wordcount*4);
44 +
45 +       credit_entropy_bits(&input_pool, ent_count);
46 +
47 +       DEBUG_ENT("crediting %d bits => %d\n",
48 +                 ent_count, input_pool.entropy_count);
49 +       /*
50 +        * Wake up waiting processes if we have enough
51 +        * entropy.
52 +        */
53 +       if (input_pool.entropy_count >= random_read_wakeup_thresh)
54 +               wake_up_interruptible(&random_read_wait);
55 +}
56 +EXPORT_SYMBOL(random_input_words);
57 +
58 +/*
59 + * random_input_wait - wait until random needs entropy
60 + *
61 + * this function sleeps until the /dev/random subsystem actually
62 + * needs more entropy, and then return the amount of entropy
63 + * that it would be nice to have added to the system.
64 + */
65 +int random_input_wait(void)
66 +{
67 +       int count;
68 +
69 +       wait_event_interruptible(random_write_wait, 
70 +                        input_pool.entropy_count < random_write_wakeup_thresh);
71 +
72 +       count = random_write_wakeup_thresh - input_pool.entropy_count;
73 +
74 +        /* likely we got woken up due to a signal */
75 +       if (count <= 0) count = random_read_wakeup_thresh; 
76 +
77 +       DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
78 +                 count,
79 +                 input_pool.entropy_count, random_write_wakeup_thresh);
80 +
81 +       return count;
82 +}
83 +EXPORT_SYMBOL(random_input_wait);
84 +
85 +
86  #define EXTRACT_SIZE 10
87  
88  /*********************************************************************
89 --- a/fs/fcntl.c
90 +++ b/fs/fcntl.c
91 @@ -191,6 +191,7 @@ asmlinkage long sys_dup(unsigned int fil
92                 ret = dupfd(file, 0, 0);
93         return ret;
94  }
95 +EXPORT_SYMBOL(sys_dup);
96  
97  #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
98  
99 --- a/include/linux/miscdevice.h
100 +++ b/include/linux/miscdevice.h
101 @@ -12,6 +12,7 @@
102  #define APOLLO_MOUSE_MINOR 7
103  #define PC110PAD_MINOR 9
104  /*#define ADB_MOUSE_MINOR 10   FIXME OBSOLETE */
105 +#define CRYPTODEV_MINOR                70      /* /dev/crypto */
106  #define WATCHDOG_MINOR         130     /* Watchdog timer     */
107  #define TEMP_MINOR             131     /* Temperature Sensor */
108  #define RTC_MINOR 135
109 --- a/include/linux/random.h
110 +++ b/include/linux/random.h
111 @@ -8,6 +8,7 @@
112  #define _LINUX_RANDOM_H
113  
114  #include <linux/ioctl.h>
115 +#include <linux/types.h> /* for __u32 in user space */
116  
117  /* ioctl()'s for the random number generator */
118  
119 @@ -32,6 +33,30 @@
120  /* Clear the entropy pool and associated counters.  (Superuser only.) */
121  #define RNDCLEARPOOL   _IO( 'R', 0x06 )
122  
123 +#ifdef CONFIG_FIPS_RNG
124 +
125 +/* Size of seed value - equal to AES blocksize */
126 +#define AES_BLOCK_SIZE_BYTES   16
127 +#define SEED_SIZE_BYTES                        AES_BLOCK_SIZE_BYTES
128 +/* Size of AES key */
129 +#define KEY_SIZE_BYTES         16
130 +
131 +/* ioctl() structure used by FIPS 140-2 Tests */
132 +struct rand_fips_test {
133 +       unsigned char key[KEY_SIZE_BYTES];                      /* Input */
134 +       unsigned char datetime[SEED_SIZE_BYTES];        /* Input */
135 +       unsigned char seed[SEED_SIZE_BYTES];            /* Input */
136 +       unsigned char result[SEED_SIZE_BYTES];          /* Output */
137 +};
138 +
139 +/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
140 +#define RNDFIPSVST     _IOWR('R', 0x10, struct rand_fips_test)
141 +
142 +/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
143 +#define RNDFIPSMCT     _IOWR('R', 0x11, struct rand_fips_test)
144 +
145 +#endif /* #ifdef CONFIG_FIPS_RNG */
146 +
147  struct rand_pool_info {
148         int     entropy_count;
149         int     buf_size;
150 @@ -48,6 +73,10 @@ extern void add_input_randomness(unsigne
151                                  unsigned int value);
152  extern void add_interrupt_randomness(int irq);
153  
154 +extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
155 +extern int random_input_wait(void);
156 +#define HAS_RANDOM_INPUT_WAIT 1
157 +
158  extern void get_random_bytes(void *buf, int nbytes);
159  void generate_random_uuid(unsigned char uuid_out[16]);
160  
161 --- /dev/null
162 +++ b/crypto/ocf/hifn/Makefile
163 @@ -0,0 +1,13 @@
164 +# for SGlinux builds
165 +-include $(ROOTDIR)/modules/.config
166 +
167 +obj-$(CONFIG_OCF_HIFN)     += hifn7751.o
168 +obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
169 +
170 +obj ?= .
171 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
172 +
173 +ifdef TOPDIR
174 +-include $(TOPDIR)/Rules.make
175 +endif
176 +
177 --- /dev/null
178 +++ b/crypto/ocf/safe/Makefile
179 @@ -0,0 +1,12 @@
180 +# for SGlinux builds
181 +-include $(ROOTDIR)/modules/.config
182 +
183 +obj-$(CONFIG_OCF_SAFE) += safe.o
184 +
185 +obj ?= .
186 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
187 +
188 +ifdef TOPDIR
189 +-include $(TOPDIR)/Rules.make
190 +endif
191 +
192 --- /dev/null
193 +++ b/crypto/ocf/Makefile
194 @@ -0,0 +1,121 @@
195 +# for SGlinux builds
196 +-include $(ROOTDIR)/modules/.config
197 +
198 +OCF_OBJS = crypto.o criov.o
199 +
200 +ifdef CONFIG_OCF_RANDOMHARVEST
201 +       OCF_OBJS += random.o
202 +endif
203 +
204 +ifdef CONFIG_OCF_FIPS
205 +       OCF_OBJS += rndtest.o
206 +endif
207 +
208 +# Add in autoconf.h to get #defines for CONFIG_xxx
209 +AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
210 +ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
211 +       EXTRA_CFLAGS += -include $(AUTOCONF_H)
212 +       export EXTRA_CFLAGS
213 +endif
214 +
215 +ifndef obj
216 +       obj ?= .
217 +       _obj = subdir
218 +       mod-subdirs := safe hifn ixp4xx talitos ocfnull
219 +       export-objs += crypto.o criov.o random.o
220 +       list-multi += ocf.o
221 +       _slash :=
222 +else
223 +       _obj = obj
224 +       _slash := /
225 +endif
226 +
227 +EXTRA_CFLAGS += -I$(obj)/.
228 +
229 +obj-$(CONFIG_OCF_OCF)         += ocf.o
230 +obj-$(CONFIG_OCF_CRYPTODEV)   += cryptodev.o
231 +obj-$(CONFIG_OCF_CRYPTOSOFT)  += cryptosoft.o
232 +obj-$(CONFIG_OCF_BENCH)       += ocf-bench.o
233 +
234 +$(_obj)-$(CONFIG_OCF_SAFE)    += safe$(_slash)
235 +$(_obj)-$(CONFIG_OCF_HIFN)    += hifn$(_slash)
236 +$(_obj)-$(CONFIG_OCF_IXP4XX)  += ixp4xx$(_slash)
237 +$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
238 +$(_obj)-$(CONFIG_OCF_PASEMI)  += pasemi$(_slash)
239 +$(_obj)-$(CONFIG_OCF_EP80579) += ep80579$(_slash)
240 +$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
241 +
242 +ocf-objs := $(OCF_OBJS)
243 +
244 +$(list-multi) dummy1: $(ocf-objs)
245 +       $(LD) -r -o $@ $(ocf-objs)
246 +
247 +.PHONY:
248 +clean:
249 +       rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
250 +       rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
251 +
252 +ifdef TOPDIR
253 +-include $(TOPDIR)/Rules.make
254 +endif
255 +
256 +#
257 +# release gen targets
258 +#
259 +
260 +.PHONY: patch
261 +patch:
262 +       REL=`date +%Y%m%d`; \
263 +               patch=ocf-linux-$$REL.patch; \
264 +               patch24=ocf-linux-24-$$REL.patch; \
265 +               patch26=ocf-linux-26-$$REL.patch; \
266 +               ( \
267 +                       find . -name Makefile; \
268 +                       find . -name Config.in; \
269 +                       find . -name Kconfig; \
270 +                       find . -name README; \
271 +                       find . -name '*.[ch]' | grep -v '.mod.c'; \
272 +               ) | while read t; do \
273 +                       diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
274 +               done > $$patch; \
275 +               cat patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
276 +               cat patches/linux-2.6.26-ocf.patch $$patch > $$patch26
277 +
278 +.PHONY: tarball
279 +tarball:
280 +       REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
281 +               CURDIR=`pwd`; \
282 +               rm -rf /tmp/ocf-linux-$$REL*; \
283 +               mkdir -p $$RELDIR/tools; \
284 +               cp README* $$RELDIR; \
285 +               cp patches/openss*.patch $$RELDIR; \
286 +               cp patches/crypto-tools.patch $$RELDIR; \
287 +               cp tools/[!C]* $$RELDIR/tools; \
288 +               cd ..; \
289 +               tar cvf $$RELDIR/ocf-linux.tar \
290 +                                       --exclude=CVS \
291 +                                       --exclude=.* \
292 +                                       --exclude=*.o \
293 +                                       --exclude=*.ko \
294 +                                       --exclude=*.mod.* \
295 +                                       --exclude=README* \
296 +                                       --exclude=ocf-*.patch \
297 +                                       --exclude=ocf/patches/openss*.patch \
298 +                                       --exclude=ocf/patches/crypto-tools.patch \
299 +                                       --exclude=ocf/tools \
300 +                                       ocf; \
301 +               gzip -9 $$RELDIR/ocf-linux.tar; \
302 +               cd /tmp; \
303 +               tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
304 +               gzip -9 ocf-linux-$$REL.tar; \
305 +               cd $$CURDIR/../../user; \
306 +               rm -rf /tmp/crypto-tools-$$REL*; \
307 +               tar cvf /tmp/crypto-tools-$$REL.tar \
308 +                                       --exclude=CVS \
309 +                                       --exclude=.* \
310 +                                       --exclude=*.o \
311 +                                       --exclude=cryptotest \
312 +                                       --exclude=cryptokeytest \
313 +                                       crypto-tools; \
314 +               gzip -9 /tmp/crypto-tools-$$REL.tar
315 +
316 --- /dev/null
317 +++ b/crypto/ocf/talitos/Makefile
318 @@ -0,0 +1,12 @@
319 +# for SGlinux builds
320 +-include $(ROOTDIR)/modules/.config
321 +
322 +obj-$(CONFIG_OCF_TALITOS) += talitos.o
323 +
324 +obj ?= .
325 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
326 +
327 +ifdef TOPDIR
328 +-include $(TOPDIR)/Rules.make
329 +endif
330 +
331 --- /dev/null
332 +++ b/crypto/ocf/ixp4xx/Makefile
333 @@ -0,0 +1,104 @@
334 +# for SGlinux builds
335 +-include $(ROOTDIR)/modules/.config
336 +
337 +#
338 +# You will need to point this at your Intel ixp425 includes,  this portion
339 +# of the Makefile only really works under SGLinux with the appropriate libs
340 +# installed.  They can be downloaded from http://www.snapgear.org/
341 +#
342 +ifeq ($(CONFIG_CPU_IXP46X),y)
343 +IXPLATFORM = ixp46X
344 +else
345 +ifeq ($(CONFIG_CPU_IXP43X),y)
346 +IXPLATFORM = ixp43X
347 +else
348 +IXPLATFORM = ixp42X
349 +endif
350 +endif
351 +
352 +ifdef CONFIG_IXP400_LIB_2_4
353 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
354 +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
355 +endif
356 +ifdef CONFIG_IXP400_LIB_2_1
357 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
358 +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
359 +endif
360 +ifdef CONFIG_IXP400_LIB_2_0
361 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
362 +OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
363 +endif
364 +ifdef IX_XSCALE_SW
365 +ifdef CONFIG_IXP400_LIB_2_4
366 +IXP_CFLAGS = \
367 +       -I$(ROOTDIR)/. \
368 +       -I$(IX_XSCALE_SW)/src/include \
369 +       -I$(OSAL_DIR)/common/include/ \
370 +       -I$(OSAL_DIR)/common/include/modules/ \
371 +       -I$(OSAL_DIR)/common/include/modules/ddk/ \
372 +       -I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
373 +       -I$(OSAL_DIR)/common/include/modules/ioMem/ \
374 +       -I$(OSAL_DIR)/common/os/linux/include/ \
375 +       -I$(OSAL_DIR)/common/os/linux/include/core/  \
376 +       -I$(OSAL_DIR)/common/os/linux/include/modules/ \
377 +       -I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
378 +       -I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
379 +       -I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
380 +       -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
381 +       -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
382 +       -DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
383 +       -DUSE_IXP4XX_CRYPTO
384 +else
385 +IXP_CFLAGS = \
386 +       -I$(ROOTDIR)/. \
387 +       -I$(IX_XSCALE_SW)/src/include \
388 +       -I$(OSAL_DIR)/ \
389 +       -I$(OSAL_DIR)/os/linux/include/ \
390 +       -I$(OSAL_DIR)/os/linux/include/modules/ \
391 +       -I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
392 +       -I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
393 +       -I$(OSAL_DIR)/os/linux/include/core/  \
394 +       -I$(OSAL_DIR)/os/linux/include/platforms/ \
395 +       -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
396 +       -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
397 +       -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
398 +       -I$(OSAL_DIR)/os/linux/include/core/ \
399 +       -I$(OSAL_DIR)/include/ \
400 +       -I$(OSAL_DIR)/include/modules/ \
401 +       -I$(OSAL_DIR)/include/modules/bufferMgt/ \
402 +       -I$(OSAL_DIR)/include/modules/ioMem/ \
403 +       -I$(OSAL_DIR)/include/platforms/ \
404 +       -I$(OSAL_DIR)/include/platforms/ixp400/ \
405 +       -DUSE_IXP4XX_CRYPTO
406 +endif
407 +endif
408 +ifdef CONFIG_IXP400_LIB_1_4
409 +IXP_CFLAGS   = \
410 +       -I$(ROOTDIR)/. \
411 +       -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
412 +       -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
413 +       -DUSE_IXP4XX_CRYPTO
414 +endif
415 +ifndef IXPDIR
416 +IXPDIR = ixp-version-is-not-supported
417 +endif
418 +
419 +ifeq ($(CONFIG_CPU_IXP46X),y)
420 +IXP_CFLAGS += -D__ixp46X
421 +else
422 +ifeq ($(CONFIG_CPU_IXP43X),y)
423 +IXP_CFLAGS += -D__ixp43X
424 +else
425 +IXP_CFLAGS += -D__ixp42X
426 +endif
427 +endif
428 +
429 +obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
430 +
431 +obj ?= .
432 +EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
433 +
434 +ifdef TOPDIR
435 +-include $(TOPDIR)/Rules.make
436 +endif
437 +
438 --- /dev/null
439 +++ b/crypto/ocf/ocfnull/Makefile
440 @@ -0,0 +1,12 @@
441 +# for SGlinux builds
442 +-include $(ROOTDIR)/modules/.config
443 +
444 +obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
445 +
446 +obj ?= .
447 +EXTRA_CFLAGS += -I$(obj)/..
448 +
449 +ifdef TOPDIR
450 +-include $(TOPDIR)/Rules.make
451 +endif
452 +
453 --- /dev/null
454 +++ b/crypto/ocf/ep80579/Makefile
455 @@ -0,0 +1,107 @@
456 +#########################################################################
457 +#
458 +#  Targets supported
459 +#  all     - builds everything and installs
460 +#  install - identical to all
461 +#  depend  - build dependencies
462 +#  clean   - clears derived objects except the .depend files
463 +#  distclean- clears all derived objects and the .depend file
464 +#  
465 +# @par
466 +# This file is provided under a dual BSD/GPLv2 license.  When using or 
467 +#   redistributing this file, you may do so under either license.
468 +# 
469 +#   GPL LICENSE SUMMARY
470 +# 
471 +#   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
472 +# 
473 +#   This program is free software; you can redistribute it and/or modify 
474 +#   it under the terms of version 2 of the GNU General Public License as
475 +#   published by the Free Software Foundation.
476 +# 
477 +#   This program is distributed in the hope that it will be useful, but 
478 +#   WITHOUT ANY WARRANTY; without even the implied warranty of 
479 +#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
480 +#   General Public License for more details.
481 +# 
482 +#   You should have received a copy of the GNU General Public License 
483 +#   along with this program; if not, write to the Free Software 
484 +#   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
485 +#   The full GNU General Public License is included in this distribution 
486 +#   in the file called LICENSE.GPL.
487 +# 
488 +#   Contact Information:
489 +#   Intel Corporation
490 +# 
491 +#   BSD LICENSE 
492 +# 
493 +#   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
494 +#   All rights reserved.
495 +# 
496 +#   Redistribution and use in source and binary forms, with or without 
497 +#   modification, are permitted provided that the following conditions 
498 +#   are met:
499 +# 
500 +#     * Redistributions of source code must retain the above copyright 
501 +#       notice, this list of conditions and the following disclaimer.
502 +#     * Redistributions in binary form must reproduce the above copyright 
503 +#       notice, this list of conditions and the following disclaimer in 
504 +#       the documentation and/or other materials provided with the 
505 +#       distribution.
506 +#     * Neither the name of Intel Corporation nor the names of its 
507 +#       contributors may be used to endorse or promote products derived 
508 +#       from this software without specific prior written permission.
509 +# 
510 +#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
511 +#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
512 +#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
513 +#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
514 +#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
515 +#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
516 +#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
517 +#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
518 +#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
519 +#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
520 +#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
521 +# 
522 +# 
523 +#  version: Security.L.1.0.130
524 +############################################################################
525 +
526 +
527 +####################Common variables and definitions########################
528 +
529 +# Ensure The ENV_DIR environmental var is defined.
530 +ifndef ICP_ENV_DIR
531 +$(error ICP_ENV_DIR is undefined. Please set the path to your environment makefile \
532 +        "-> setenv ICP_ENV_DIR <path>")
533 +endif
534 +
535 +#Add your project environment Makefile
536 +include $(ICP_ENV_DIR)/environment.mk
537 +
538 +#include the makefile with all the default and common Make variable definitions
539 +include $(ICP_BUILDSYSTEM_PATH)/build_files/common.mk
540 +
541 +#Add the name for the executable, Library or Module output definitions
542 +OUTPUT_NAME= icp_ocf
543 +
544 +# List of Source Files to be compiled 
545 +SOURCES= icp_common.c icp_sym.c icp_asym.c
546 +
547 +#common includes between all supported OSes
548 +INCLUDES= -I $(ICP_API_DIR) -I$(ICP_LAC_API) \
549 +-I$(ICP_OCF_SRC_DIR)
550 +
551 +# The location of the os level makefile needs to be changed.
552 +include $(ICP_ENV_DIR)/$(ICP_OS)_$(ICP_OS_LEVEL).mk
553 +
554 +# On the line directly below list the outputs you wish to build for,
555 +# e.g "lib_static lib_shared exe module" as show below
556 +install: module
557 +
558 +###################Include rules makefiles########################
559 +include $(ICP_BUILDSYSTEM_PATH)/build_files/rules.mk
560 +###################End of Rules inclusion#########################
561 +
562 +
563 --- /dev/null
564 +++ b/crypto/ocf/pasemi/Makefile
565 @@ -0,0 +1,12 @@
566 +# for SGlinux builds
567 +-include $(ROOTDIR)/modules/.config
568 +
569 +obj-$(CONFIG_OCF_PASEMI) += pasemi.o
570 +
571 +obj ?= .
572 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
573 +
574 +ifdef TOPDIR
575 +-include $(TOPDIR)/Rules.make
576 +endif
577 +
578 --- /dev/null
579 +++ b/crypto/ocf/Config.in
580 @@ -0,0 +1,34 @@
581 +#############################################################################
582 +
583 +mainmenu_option next_comment
584 +comment 'OCF Configuration'
585 +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
586 +dep_mbool '  enable fips RNG checks (fips check on RNG data before use)' \
587 +                               CONFIG_OCF_FIPS $CONFIG_OCF_OCF
588 +dep_mbool '  enable harvesting entropy for /dev/random' \
589 +                               CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
590 +dep_tristate '  cryptodev (user space support)' \
591 +                               CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
592 +dep_tristate '  cryptosoft (software crypto engine)' \
593 +                               CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
594 +dep_tristate '  safenet (HW crypto engine)' \
595 +                               CONFIG_OCF_SAFE $CONFIG_OCF_OCF
596 +dep_tristate '  IXP4xx (HW crypto engine)' \
597 +                               CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
598 +dep_mbool    '  Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
599 +                               CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
600 +dep_tristate '  hifn (HW crypto engine)' \
601 +                               CONFIG_OCF_HIFN $CONFIG_OCF_OCF
602 +dep_tristate '  talitos (HW crypto engine)' \
603 +                               CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
604 +dep_tristate '  pasemi (HW crypto engine)' \
605 +                               CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
606 +dep_tristate '  ep80579 (HW crypto engine)' \
607 +                               CONFIG_OCF_EP80579 $CONFIG_OCF_OCF
608 +dep_tristate '  ocfnull (does no crypto)' \
609 +                               CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
610 +dep_tristate '  ocf-bench (HW crypto in-kernel benchmark)' \
611 +                               CONFIG_OCF_BENCH $CONFIG_OCF_OCF
612 +endmenu
613 +
614 +#############################################################################
615 --- /dev/null
616 +++ b/crypto/ocf/Kconfig
617 @@ -0,0 +1,101 @@
618 +menu "OCF Configuration"
619 +
620 +config OCF_OCF
621 +       tristate "OCF (Open Cryptograhic Framework)"
622 +       help
623 +         A linux port of the OpenBSD/FreeBSD crypto framework.
624 +
625 +config OCF_RANDOMHARVEST
626 +       bool "crypto random --- harvest entropy for /dev/random"
627 +       depends on OCF_OCF
628 +       help
629 +         Includes code to harvest random numbers from devices that support it.
630 +
631 +config OCF_FIPS
632 +       bool "enable fips RNG checks"
633 +       depends on OCF_OCF && OCF_RANDOMHARVEST
634 +       help
635 +         Run all RNG provided data through a fips check before
636 +         adding it /dev/random's entropy pool.
637 +
638 +config OCF_CRYPTODEV
639 +       tristate "cryptodev (user space support)"
640 +       depends on OCF_OCF
641 +       help
642 +         The user space API to access crypto hardware.
643 +
644 +config OCF_CRYPTOSOFT
645 +       tristate "cryptosoft (software crypto engine)"
646 +       depends on OCF_OCF
647 +       help
648 +         A software driver for the OCF framework that uses
649 +         the kernel CryptoAPI.
650 +
651 +config OCF_SAFE
652 +       tristate "safenet (HW crypto engine)"
653 +       depends on OCF_OCF
654 +       help
655 +         A driver for a number of the safenet Excel crypto accelerators.
656 +         Currently tested and working on the 1141 and 1741.
657 +
658 +config OCF_IXP4XX
659 +       tristate "IXP4xx (HW crypto engine)"
660 +       depends on OCF_OCF
661 +       help
662 +         XScale IXP4xx crypto accelerator driver.  Requires the
663 +         Intel Access library.
664 +
665 +config OCF_IXP4XX_SHA1_MD5
666 +       bool "IXP4xx SHA1 and MD5 Hashing"
667 +       depends on OCF_IXP4XX
668 +       help
669 +         Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
670 +         Note: this is MUCH slower than using cryptosoft (software crypto engine).
671 +
672 +config OCF_HIFN
673 +       tristate "hifn (HW crypto engine)"
674 +       depends on OCF_OCF
675 +       help
676 +         OCF driver for various HIFN based crypto accelerators.
677 +         (7951, 7955, 7956, 7751, 7811)
678 +
679 +config OCF_HIFNHIPP
680 +       tristate "Hifn HIPP (HW packet crypto engine)"
681 +       depends on OCF_OCF
682 +       help
683 +         OCF driver for various HIFN (HIPP) based crypto accelerators
684 +         (7855)
685 +
686 +config OCF_TALITOS
687 +       tristate "talitos (HW crypto engine)"
688 +       depends on OCF_OCF
689 +       help
690 +         OCF driver for Freescale's security engine (SEC/talitos).
691 +
692 +config OCF_PASEMI
693 +       tristate "pasemi (HW crypto engine)"
694 +       depends on OCF_OCF && PPC_PASEMI
695 +       help
696 +         OCF driver for the PA Semi PWRficient DMA Engine
697 +
698 +config OCF_EP80579
699 +       tristate "ep80579 (HW crypto engine)"
700 +       depends on OCF_OCF
701 +       help
702 +         OCF driver for the Intel EP80579 Integrated Processor Product Line.
703 +
704 +config OCF_OCFNULL
705 +       tristate "ocfnull (fake crypto engine)"
706 +       depends on OCF_OCF
707 +       help
708 +         OCF driver for measuring ipsec overheads (does no crypto)
709 +
710 +config OCF_BENCH
711 +       tristate "ocf-bench (HW crypto in-kernel benchmark)"
712 +       depends on OCF_OCF
713 +       help
714 +         A very simple encryption test for the in-kernel interface
715 +         of OCF.  Also includes code to benchmark the IXP Access library
716 +         for comparison.
717 +
718 +endmenu
719 --- /dev/null
720 +++ b/crypto/ocf/README
721 @@ -0,0 +1,167 @@
722 +README - ocf-linux-20071215
723 +---------------------------
724 +
725 +This README provides instructions for getting ocf-linux compiled and
726 +operating in a generic linux environment.  For other information you
727 +might like to visit the home page for this project:
728 +
729 +    http://ocf-linux.sourceforge.net/
730 +
731 +Adding OCF to linux
732 +-------------------
733 +
734 +    Not much in this file for now,  just some notes.  I usually build
735 +    the ocf support as modules but it can be built into the kernel as
736 +    well.  To use it:
737 +
738 +    * mknod /dev/crypto c 10 70
739 +
740 +    * to add OCF to your kernel source,  you have two options.  Apply
741 +      the kernel specific patch:
742 +
743 +          cd linux-2.4*; gunzip < ocf-linux-24-XXXXXXXX.patch.gz | patch -p1
744 +          cd linux-2.6*; gunzip < ocf-linux-26-XXXXXXXX.patch.gz | patch -p1
745 +    
746 +      if you do one of the above,  then you can proceed to the next step,
747 +      or you can do the above process by hand with using the patches against
748 +      linux-2.4.35 and 2.6.23 to include the ocf code under crypto/ocf.
749 +      Here's how to add it:
750 +
751 +      for 2.4.35 (and later)
752 +
753 +          cd linux-2.4.35/crypto
754 +          tar xvzf ocf-linux.tar.gz
755 +          cd ..
756 +          patch -p1 < crypto/ocf/patches/linux-2.4.35-ocf.patch
757 +
758 +      for 2.6.23 (and later),  find the kernel patch specific (or nearest)
759 +      to your kernel versions and then:
760 +
761 +          cd linux-2.6.NN/crypto
762 +          tar xvzf ocf-linux.tar.gz
763 +          cd ..
764 +          patch -p1 < crypto/ocf/patches/linux-2.6.NN-ocf.patch
765 +
766 +      It should be easy to take this patch and apply it to other more
767 +      recent versions of the kernels.  The same patches should also work
768 +      relatively easily on kernels as old as 2.6.11 and 2.4.18.
769 +      
770 +    * under 2.4 if you are on a non-x86 platform,  you may need to:
771 +
772 +        cp linux-2.X.x/include/asm-i386/kmap_types.h linux-2.X.x/include/asm-YYY
773 +
774 +      so that you can build the kernel crypto support needed for the cryptosoft
775 +      driver.
776 +
777 +    * For simplicity you should enable all the crypto support in your kernel
778 +      except for the test driver.  Likewise for the OCF options.  Do not
779 +      enable OCF crypto drivers for HW that you do not have (for example
780 +      ixp4xx will not compile on non-Xscale systems).
781 +
782 +    * make sure that cryptodev.h (from ocf-linux.tar.gz) is installed as
783 +      crypto/cryptodev.h in an include directory that is used for building
784 +      applications for your platform.  For example on a host system that
785 +      might be:
786 +
787 +              /usr/include/crypto/cryptodev.h
788 +
789 +    * patch your openssl-0.9.8i code with the openssl-0.9.8i.patch.
790 +      (NOTE: there is no longer a need to patch ssh). The patch is against:
791 +      openssl-0_9_8e
792 +
793 +      If you need a patch for an older version of openssl,  you should look
794 +      to older OCF releases.  This patch is unlikely to work on older
795 +      openssl versions.
796 +
797 +      openssl-0.9.8i.patch
798 +                - enables --with-cryptodev for non BSD systems
799 +                - adds -cpu option to openssl speed for calculating CPU load
800 +                  under linux
801 +                - fixes null pointer in openssl speed multi thread output.
802 +                - fixes test keys to work with linux crypto's more stringent
803 +                  key checking.
804 +                - adds MD5/SHA acceleration (Ronen Shitrit), only enabled
805 +                  with the --with-cryptodev-digests option
806 +                - fixes bug in engine code caching.
807 +
808 +    * build crypto-tools-XXXXXXXX.tar.gz if you want to try some of the BSD
809 +      tools for testing OCF (ie., cryptotest).
810 +
811 +How to load the OCF drivers
812 +---------------------------
813 +
814 +    First insert the base modules:
815 +
816 +        insmod ocf
817 +        insmod cryptodev
818 +
819 +    You can then install the software OCF driver with:
820 +
821 +        insmod cryptosoft
822 +
823 +    and one or more of the OCF HW drivers with:
824 +
825 +        insmod safe
826 +        insmod hifn7751
827 +        insmod ixp4xx
828 +        ...
829 +
830 +    all the drivers take a debug option to enable verbose debug so that
831 +    you can see what is going on.  For debug you load them as:
832 +
833 +        insmod ocf crypto_debug=1
834 +        insmod cryptodev cryptodev_debug=1
835 +        insmod cryptosoft swcr_debug=1
836 +
837 +    You may load more than one OCF crypto driver but then there is no guarantee
838 +    as to which will be used.
839 +
840 +    You can also enable debug at run time on 2.6 systems with the following:
841 +
842 +        echo 1 > /sys/module/ocf/parameters/crypto_debug
843 +        echo 1 > /sys/module/cryptodev/parameters/cryptodev_debug
844 +        echo 1 > /sys/module/cryptosoft/parameters/swcr_debug
845 +        echo 1 > /sys/module/hifn7751/parameters/hifn_debug
846 +        echo 1 > /sys/module/safe/parameters/safe_debug
847 +        echo 1 > /sys/module/ixp4xx/parameters/ixp_debug
848 +        ...
849 +
850 +Testing the OCF support
851 +-----------------------
852 +
853 +    run "cryptotest",  it should do a short test for a couple of
854 +    des packets.  If it does everything is working.
855 +
856 +    If this works,  then ssh will use the driver when invoked as:
857 +
858 +        ssh -c 3des username@host
859 +
860 +    to see for sure that it is operating, enable debug as defined above.
861 +
862 +    To get a better idea of performance run:
863 +
864 +        cryptotest 100 4096
865 +
866 +    There are more options to cryptotest,  see the help.
867 +
868 +    It is also possible to use openssl to test the speed of the crypto
869 +    drivers.
870 +
871 +        openssl speed -evp des -engine cryptodev -elapsed
872 +        openssl speed -evp des3 -engine cryptodev -elapsed
873 +        openssl speed -evp aes128 -engine cryptodev -elapsed
874 +
875 +    and multiple threads (10) with:
876 +
877 +        openssl speed -evp des -engine cryptodev -elapsed -multi 10
878 +        openssl speed -evp des3 -engine cryptodev -elapsed -multi 10
879 +        openssl speed -evp aes128 -engine cryptodev -elapsed -multi 10
880 +
881 +    for public key testing you can try:
882 +
883 +        cryptokeytest
884 +        openssl speed -engine cryptodev rsa -elapsed
885 +        openssl speed -engine cryptodev dsa -elapsed
886 +
887 +David McCullough
888 +david_mccullough@securecomputing.com
889 --- /dev/null
890 +++ b/crypto/ocf/hifn/hifn7751reg.h
891 @@ -0,0 +1,540 @@
892 +/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
893 +/*     $OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $  */
894 +
895 +/*-
896 + * Invertex AEON / Hifn 7751 driver
897 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
898 + * Copyright (c) 1999 Theo de Raadt
899 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
900 + *                     http://www.netsec.net
901 + *
902 + * Please send any comments, feedback, bug-fixes, or feature requests to
903 + * software@invertex.com.
904 + *
905 + * Redistribution and use in source and binary forms, with or without
906 + * modification, are permitted provided that the following conditions
907 + * are met:
908 + *
909 + * 1. Redistributions of source code must retain the above copyright
910 + *    notice, this list of conditions and the following disclaimer.
911 + * 2. Redistributions in binary form must reproduce the above copyright
912 + *    notice, this list of conditions and the following disclaimer in the
913 + *    documentation and/or other materials provided with the distribution.
914 + * 3. The name of the author may not be used to endorse or promote products
915 + *    derived from this software without specific prior written permission.
916 + *
917 + *
918 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
919 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
920 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
921 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
922 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
923 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
924 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
925 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
926 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
927 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
928 + *
929 + * Effort sponsored in part by the Defense Advanced Research Projects
930 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
931 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
932 + *
933 + */
934 +#ifndef __HIFN_H__
935 +#define        __HIFN_H__
936 +
937 +/*
938 + * Some PCI configuration space offset defines.  The names were made
939 + * identical to the names used by the Linux kernel.
940 + */
941 +#define        HIFN_BAR0               PCIR_BAR(0)     /* PUC register map */
942 +#define        HIFN_BAR1               PCIR_BAR(1)     /* DMA register map */
943 +#define        HIFN_TRDY_TIMEOUT       0x40
944 +#define        HIFN_RETRY_TIMEOUT      0x41
945 +
946 +/*
947 + * PCI vendor and device identifiers
948 + * (the names are preserved from their OpenBSD source).
949 + */
950 +#define        PCI_VENDOR_HIFN         0x13a3          /* Hifn */
951 +#define        PCI_PRODUCT_HIFN_7751   0x0005          /* 7751 */
952 +#define        PCI_PRODUCT_HIFN_6500   0x0006          /* 6500 */
953 +#define        PCI_PRODUCT_HIFN_7811   0x0007          /* 7811 */
954 +#define        PCI_PRODUCT_HIFN_7855   0x001f          /* 7855 */
955 +#define        PCI_PRODUCT_HIFN_7951   0x0012          /* 7951 */
956 +#define        PCI_PRODUCT_HIFN_7955   0x0020          /* 7954/7955 */
957 +#define        PCI_PRODUCT_HIFN_7956   0x001d          /* 7956 */
958 +
959 +#define        PCI_VENDOR_INVERTEX     0x14e1          /* Invertex */
960 +#define        PCI_PRODUCT_INVERTEX_AEON 0x0005        /* AEON */
961 +
962 +#define        PCI_VENDOR_NETSEC       0x1660          /* NetSec */
963 +#define        PCI_PRODUCT_NETSEC_7751 0x7751          /* 7751 */
964 +
965 +/*
966 + * The values below should multiple of 4 -- and be large enough to handle
967 + * any command the driver implements.
968 + *
969 + * MAX_COMMAND = base command + mac command + encrypt command +
970 + *                     mac-key + rc4-key
971 + * MAX_RESULT  = base result + mac result + mac + encrypt result
972 + *                     
973 + *
974 + */
975 +#define        HIFN_MAX_COMMAND        (8 + 8 + 8 + 64 + 260)
976 +#define        HIFN_MAX_RESULT         (8 + 4 + 20 + 4)
977 +
978 +/*
979 + * hifn_desc_t
980 + *
981 + * Holds an individual descriptor for any of the rings.
982 + */
983 +typedef struct hifn_desc {
984 +       volatile u_int32_t l;           /* length and status bits */
985 +       volatile u_int32_t p;
986 +} hifn_desc_t;
987 +
988 +/*
989 + * Masks for the "length" field of struct hifn_desc.
990 + */
991 +#define        HIFN_D_LENGTH           0x0000ffff      /* length bit mask */
992 +#define        HIFN_D_MASKDONEIRQ      0x02000000      /* mask the done interrupt */
993 +#define        HIFN_D_DESTOVER         0x04000000      /* destination overflow */
994 +#define        HIFN_D_OVER             0x08000000      /* overflow */
995 +#define        HIFN_D_LAST             0x20000000      /* last descriptor in chain */
996 +#define        HIFN_D_JUMP             0x40000000      /* jump descriptor */
997 +#define        HIFN_D_VALID            0x80000000      /* valid bit */
998 +
999 +
1000 +/*
1001 + * Processing Unit Registers (offset from BASEREG0)
1002 + */
1003 +#define        HIFN_0_PUDATA           0x00    /* Processing Unit Data */
1004 +#define        HIFN_0_PUCTRL           0x04    /* Processing Unit Control */
1005 +#define        HIFN_0_PUISR            0x08    /* Processing Unit Interrupt Status */
1006 +#define        HIFN_0_PUCNFG           0x0c    /* Processing Unit Configuration */
1007 +#define        HIFN_0_PUIER            0x10    /* Processing Unit Interrupt Enable */
1008 +#define        HIFN_0_PUSTAT           0x14    /* Processing Unit Status/Chip ID */
1009 +#define        HIFN_0_FIFOSTAT         0x18    /* FIFO Status */
1010 +#define        HIFN_0_FIFOCNFG         0x1c    /* FIFO Configuration */
1011 +#define        HIFN_0_PUCTRL2          0x28    /* Processing Unit Control (2nd map) */
1012 +#define        HIFN_0_MUTE1            0x80
1013 +#define        HIFN_0_MUTE2            0x90
1014 +#define        HIFN_0_SPACESIZE        0x100   /* Register space size */
1015 +
1016 +/* Processing Unit Control Register (HIFN_0_PUCTRL) */
1017 +#define        HIFN_PUCTRL_CLRSRCFIFO  0x0010  /* clear source fifo */
1018 +#define        HIFN_PUCTRL_STOP        0x0008  /* stop pu */
1019 +#define        HIFN_PUCTRL_LOCKRAM     0x0004  /* lock ram */
1020 +#define        HIFN_PUCTRL_DMAENA      0x0002  /* enable dma */
1021 +#define        HIFN_PUCTRL_RESET       0x0001  /* Reset processing unit */
1022 +
1023 +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
1024 +#define        HIFN_PUISR_CMDINVAL     0x8000  /* Invalid command interrupt */
1025 +#define        HIFN_PUISR_DATAERR      0x4000  /* Data error interrupt */
1026 +#define        HIFN_PUISR_SRCFIFO      0x2000  /* Source FIFO ready interrupt */
1027 +#define        HIFN_PUISR_DSTFIFO      0x1000  /* Destination FIFO ready interrupt */
1028 +#define        HIFN_PUISR_DSTOVER      0x0200  /* Destination overrun interrupt */
1029 +#define        HIFN_PUISR_SRCCMD       0x0080  /* Source command interrupt */
1030 +#define        HIFN_PUISR_SRCCTX       0x0040  /* Source context interrupt */
1031 +#define        HIFN_PUISR_SRCDATA      0x0020  /* Source data interrupt */
1032 +#define        HIFN_PUISR_DSTDATA      0x0010  /* Destination data interrupt */
1033 +#define        HIFN_PUISR_DSTRESULT    0x0004  /* Destination result interrupt */
1034 +
1035 +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
1036 +#define        HIFN_PUCNFG_DRAMMASK    0xe000  /* DRAM size mask */
1037 +#define        HIFN_PUCNFG_DSZ_256K    0x0000  /* 256k dram */
1038 +#define        HIFN_PUCNFG_DSZ_512K    0x2000  /* 512k dram */
1039 +#define        HIFN_PUCNFG_DSZ_1M      0x4000  /* 1m dram */
1040 +#define        HIFN_PUCNFG_DSZ_2M      0x6000  /* 2m dram */
1041 +#define        HIFN_PUCNFG_DSZ_4M      0x8000  /* 4m dram */
1042 +#define        HIFN_PUCNFG_DSZ_8M      0xa000  /* 8m dram */
1043 +#define        HIFN_PUNCFG_DSZ_16M     0xc000  /* 16m dram */
1044 +#define        HIFN_PUCNFG_DSZ_32M     0xe000  /* 32m dram */
1045 +#define        HIFN_PUCNFG_DRAMREFRESH 0x1800  /* DRAM refresh rate mask */
1046 +#define        HIFN_PUCNFG_DRFR_512    0x0000  /* 512 divisor of ECLK */
1047 +#define        HIFN_PUCNFG_DRFR_256    0x0800  /* 256 divisor of ECLK */
1048 +#define        HIFN_PUCNFG_DRFR_128    0x1000  /* 128 divisor of ECLK */
1049 +#define        HIFN_PUCNFG_TCALLPHASES 0x0200  /* your guess is as good as mine... */
1050 +#define        HIFN_PUCNFG_TCDRVTOTEM  0x0100  /* your guess is as good as mine... */
1051 +#define        HIFN_PUCNFG_BIGENDIAN   0x0080  /* DMA big endian mode */
1052 +#define        HIFN_PUCNFG_BUS32       0x0040  /* Bus width 32bits */
1053 +#define        HIFN_PUCNFG_BUS16       0x0000  /* Bus width 16 bits */
1054 +#define        HIFN_PUCNFG_CHIPID      0x0020  /* Allow chipid from PUSTAT */
1055 +#define        HIFN_PUCNFG_DRAM        0x0010  /* Context RAM is DRAM */
1056 +#define        HIFN_PUCNFG_SRAM        0x0000  /* Context RAM is SRAM */
1057 +#define        HIFN_PUCNFG_COMPSING    0x0004  /* Enable single compression context */
1058 +#define        HIFN_PUCNFG_ENCCNFG     0x0002  /* Encryption configuration */
1059 +
1060 +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
1061 +#define        HIFN_PUIER_CMDINVAL     0x8000  /* Invalid command interrupt */
1062 +#define        HIFN_PUIER_DATAERR      0x4000  /* Data error interrupt */
1063 +#define        HIFN_PUIER_SRCFIFO      0x2000  /* Source FIFO ready interrupt */
1064 +#define        HIFN_PUIER_DSTFIFO      0x1000  /* Destination FIFO ready interrupt */
1065 +#define        HIFN_PUIER_DSTOVER      0x0200  /* Destination overrun interrupt */
1066 +#define        HIFN_PUIER_SRCCMD       0x0080  /* Source command interrupt */
1067 +#define        HIFN_PUIER_SRCCTX       0x0040  /* Source context interrupt */
1068 +#define        HIFN_PUIER_SRCDATA      0x0020  /* Source data interrupt */
1069 +#define        HIFN_PUIER_DSTDATA      0x0010  /* Destination data interrupt */
1070 +#define        HIFN_PUIER_DSTRESULT    0x0004  /* Destination result interrupt */
1071 +
1072 +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
1073 +#define        HIFN_PUSTAT_CMDINVAL    0x8000  /* Invalid command interrupt */
1074 +#define        HIFN_PUSTAT_DATAERR     0x4000  /* Data error interrupt */
1075 +#define        HIFN_PUSTAT_SRCFIFO     0x2000  /* Source FIFO ready interrupt */
1076 +#define        HIFN_PUSTAT_DSTFIFO     0x1000  /* Destination FIFO ready interrupt */
1077 +#define        HIFN_PUSTAT_DSTOVER     0x0200  /* Destination overrun interrupt */
1078 +#define        HIFN_PUSTAT_SRCCMD      0x0080  /* Source command interrupt */
1079 +#define        HIFN_PUSTAT_SRCCTX      0x0040  /* Source context interrupt */
1080 +#define        HIFN_PUSTAT_SRCDATA     0x0020  /* Source data interrupt */
1081 +#define        HIFN_PUSTAT_DSTDATA     0x0010  /* Destination data interrupt */
1082 +#define        HIFN_PUSTAT_DSTRESULT   0x0004  /* Destination result interrupt */
1083 +#define        HIFN_PUSTAT_CHIPREV     0x00ff  /* Chip revision mask */
1084 +#define        HIFN_PUSTAT_CHIPENA     0xff00  /* Chip enabled mask */
1085 +#define        HIFN_PUSTAT_ENA_2       0x1100  /* Level 2 enabled */
1086 +#define        HIFN_PUSTAT_ENA_1       0x1000  /* Level 1 enabled */
1087 +#define        HIFN_PUSTAT_ENA_0       0x3000  /* Level 0 enabled */
1088 +#define        HIFN_PUSTAT_REV_2       0x0020  /* 7751 PT6/2 */
1089 +#define        HIFN_PUSTAT_REV_3       0x0030  /* 7751 PT6/3 */
1090 +
1091 +/* FIFO Status Register (HIFN_0_FIFOSTAT) */
1092 +#define        HIFN_FIFOSTAT_SRC       0x7f00  /* Source FIFO available */
1093 +#define        HIFN_FIFOSTAT_DST       0x007f  /* Destination FIFO available */
1094 +
1095 +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
1096 +#define        HIFN_FIFOCNFG_THRESHOLD 0x0400  /* must be written as this value */
1097 +
1098 +/*
1099 + * DMA Interface Registers (offset from BASEREG1)
1100 + */
1101 +#define        HIFN_1_DMA_CRAR         0x0c    /* DMA Command Ring Address */
1102 +#define        HIFN_1_DMA_SRAR         0x1c    /* DMA Source Ring Address */
1103 +#define        HIFN_1_DMA_RRAR         0x2c    /* DMA Result Ring Address */
1104 +#define        HIFN_1_DMA_DRAR         0x3c    /* DMA Destination Ring Address */
1105 +#define        HIFN_1_DMA_CSR          0x40    /* DMA Status and Control */
1106 +#define        HIFN_1_DMA_IER          0x44    /* DMA Interrupt Enable */
1107 +#define        HIFN_1_DMA_CNFG         0x48    /* DMA Configuration */
1108 +#define        HIFN_1_PLL              0x4c    /* 7955/7956: PLL config */
1109 +#define        HIFN_1_7811_RNGENA      0x60    /* 7811: rng enable */
1110 +#define        HIFN_1_7811_RNGCFG      0x64    /* 7811: rng config */
1111 +#define        HIFN_1_7811_RNGDAT      0x68    /* 7811: rng data */
1112 +#define        HIFN_1_7811_RNGSTS      0x6c    /* 7811: rng status */
1113 +#define        HIFN_1_DMA_CNFG2        0x6c    /* 7955/7956: dma config #2 */
1114 +#define        HIFN_1_7811_MIPSRST     0x94    /* 7811: MIPS reset */
1115 +#define        HIFN_1_REVID            0x98    /* Revision ID */
1116 +
1117 +#define        HIFN_1_PUB_RESET        0x204   /* Public/RNG Reset */
1118 +#define        HIFN_1_PUB_BASE         0x300   /* Public Base Address */
1119 +#define        HIFN_1_PUB_OPLEN        0x304   /* 7951-compat Public Operand Length */
1120 +#define        HIFN_1_PUB_OP           0x308   /* 7951-compat Public Operand */
1121 +#define        HIFN_1_PUB_STATUS       0x30c   /* 7951-compat Public Status */
1122 +#define        HIFN_1_PUB_IEN          0x310   /* Public Interrupt enable */
1123 +#define        HIFN_1_RNG_CONFIG       0x314   /* RNG config */
1124 +#define        HIFN_1_RNG_DATA         0x318   /* RNG data */
1125 +#define        HIFN_1_PUB_MODE         0x320   /* PK mode */
1126 +#define        HIFN_1_PUB_FIFO_OPLEN   0x380   /* first element of oplen fifo */
1127 +#define        HIFN_1_PUB_FIFO_OP      0x384   /* first element of op fifo */
1128 +#define        HIFN_1_PUB_MEM          0x400   /* start of Public key memory */
1129 +#define        HIFN_1_PUB_MEMEND       0xbff   /* end of Public key memory */
1130 +
1131 +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
1132 +#define        HIFN_DMACSR_D_CTRLMASK  0xc0000000      /* Destinition Ring Control */
1133 +#define        HIFN_DMACSR_D_CTRL_NOP  0x00000000      /* Dest. Control: no-op */
1134 +#define        HIFN_DMACSR_D_CTRL_DIS  0x40000000      /* Dest. Control: disable */
1135 +#define        HIFN_DMACSR_D_CTRL_ENA  0x80000000      /* Dest. Control: enable */
1136 +#define        HIFN_DMACSR_D_ABORT     0x20000000      /* Destinition Ring PCIAbort */
1137 +#define        HIFN_DMACSR_D_DONE      0x10000000      /* Destinition Ring Done */
1138 +#define        HIFN_DMACSR_D_LAST      0x08000000      /* Destinition Ring Last */
1139 +#define        HIFN_DMACSR_D_WAIT      0x04000000      /* Destinition Ring Waiting */
1140 +#define        HIFN_DMACSR_D_OVER      0x02000000      /* Destinition Ring Overflow */
1141 +#define        HIFN_DMACSR_R_CTRL      0x00c00000      /* Result Ring Control */
1142 +#define        HIFN_DMACSR_R_CTRL_NOP  0x00000000      /* Result Control: no-op */
1143 +#define        HIFN_DMACSR_R_CTRL_DIS  0x00400000      /* Result Control: disable */
1144 +#define        HIFN_DMACSR_R_CTRL_ENA  0x00800000      /* Result Control: enable */
1145 +#define        HIFN_DMACSR_R_ABORT     0x00200000      /* Result Ring PCI Abort */
1146 +#define        HIFN_DMACSR_R_DONE      0x00100000      /* Result Ring Done */
1147 +#define        HIFN_DMACSR_R_LAST      0x00080000      /* Result Ring Last */
1148 +#define        HIFN_DMACSR_R_WAIT      0x00040000      /* Result Ring Waiting */
1149 +#define        HIFN_DMACSR_R_OVER      0x00020000      /* Result Ring Overflow */
1150 +#define        HIFN_DMACSR_S_CTRL      0x0000c000      /* Source Ring Control */
1151 +#define        HIFN_DMACSR_S_CTRL_NOP  0x00000000      /* Source Control: no-op */
1152 +#define        HIFN_DMACSR_S_CTRL_DIS  0x00004000      /* Source Control: disable */
1153 +#define        HIFN_DMACSR_S_CTRL_ENA  0x00008000      /* Source Control: enable */
1154 +#define        HIFN_DMACSR_S_ABORT     0x00002000      /* Source Ring PCI Abort */
1155 +#define        HIFN_DMACSR_S_DONE      0x00001000      /* Source Ring Done */
1156 +#define        HIFN_DMACSR_S_LAST      0x00000800      /* Source Ring Last */
1157 +#define        HIFN_DMACSR_S_WAIT      0x00000400      /* Source Ring Waiting */
1158 +#define        HIFN_DMACSR_ILLW        0x00000200      /* Illegal write (7811 only) */
1159 +#define        HIFN_DMACSR_ILLR        0x00000100      /* Illegal read (7811 only) */
1160 +#define        HIFN_DMACSR_C_CTRL      0x000000c0      /* Command Ring Control */
1161 +#define        HIFN_DMACSR_C_CTRL_NOP  0x00000000      /* Command Control: no-op */
1162 +#define        HIFN_DMACSR_C_CTRL_DIS  0x00000040      /* Command Control: disable */
1163 +#define        HIFN_DMACSR_C_CTRL_ENA  0x00000080      /* Command Control: enable */
1164 +#define        HIFN_DMACSR_C_ABORT     0x00000020      /* Command Ring PCI Abort */
1165 +#define        HIFN_DMACSR_C_DONE      0x00000010      /* Command Ring Done */
1166 +#define        HIFN_DMACSR_C_LAST      0x00000008      /* Command Ring Last */
1167 +#define        HIFN_DMACSR_C_WAIT      0x00000004      /* Command Ring Waiting */
1168 +#define        HIFN_DMACSR_PUBDONE     0x00000002      /* Public op done (7951 only) */
1169 +#define        HIFN_DMACSR_ENGINE      0x00000001      /* Command Ring Engine IRQ */
1170 +
1171 +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
1172 +#define        HIFN_DMAIER_D_ABORT     0x20000000      /* Destination Ring PCIAbort */
1173 +#define        HIFN_DMAIER_D_DONE      0x10000000      /* Destination Ring Done */
1174 +#define        HIFN_DMAIER_D_LAST      0x08000000      /* Destination Ring Last */
1175 +#define        HIFN_DMAIER_D_WAIT      0x04000000      /* Destination Ring Waiting */
1176 +#define        HIFN_DMAIER_D_OVER      0x02000000      /* Destination Ring Overflow */
1177 +#define        HIFN_DMAIER_R_ABORT     0x00200000      /* Result Ring PCI Abort */
1178 +#define        HIFN_DMAIER_R_DONE      0x00100000      /* Result Ring Done */
1179 +#define        HIFN_DMAIER_R_LAST      0x00080000      /* Result Ring Last */
1180 +#define        HIFN_DMAIER_R_WAIT      0x00040000      /* Result Ring Waiting */
1181 +#define        HIFN_DMAIER_R_OVER      0x00020000      /* Result Ring Overflow */
1182 +#define        HIFN_DMAIER_S_ABORT     0x00002000      /* Source Ring PCI Abort */
1183 +#define        HIFN_DMAIER_S_DONE      0x00001000      /* Source Ring Done */
1184 +#define        HIFN_DMAIER_S_LAST      0x00000800      /* Source Ring Last */
1185 +#define        HIFN_DMAIER_S_WAIT      0x00000400      /* Source Ring Waiting */
1186 +#define        HIFN_DMAIER_ILLW        0x00000200      /* Illegal write (7811 only) */
1187 +#define        HIFN_DMAIER_ILLR        0x00000100      /* Illegal read (7811 only) */
1188 +#define        HIFN_DMAIER_C_ABORT     0x00000020      /* Command Ring PCI Abort */
1189 +#define        HIFN_DMAIER_C_DONE      0x00000010      /* Command Ring Done */
1190 +#define        HIFN_DMAIER_C_LAST      0x00000008      /* Command Ring Last */
1191 +#define        HIFN_DMAIER_C_WAIT      0x00000004      /* Command Ring Waiting */
1192 +#define        HIFN_DMAIER_PUBDONE     0x00000002      /* public op done (7951 only) */
1193 +#define        HIFN_DMAIER_ENGINE      0x00000001      /* Engine IRQ */
1194 +
1195 +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
1196 +#define        HIFN_DMACNFG_BIGENDIAN  0x10000000      /* big endian mode */
1197 +#define        HIFN_DMACNFG_POLLFREQ   0x00ff0000      /* Poll frequency mask */
1198 +#define        HIFN_DMACNFG_UNLOCK     0x00000800
1199 +#define        HIFN_DMACNFG_POLLINVAL  0x00000700      /* Invalid Poll Scalar */
1200 +#define        HIFN_DMACNFG_LAST       0x00000010      /* Host control LAST bit */
1201 +#define        HIFN_DMACNFG_MODE       0x00000004      /* DMA mode */
1202 +#define        HIFN_DMACNFG_DMARESET   0x00000002      /* DMA Reset # */
1203 +#define        HIFN_DMACNFG_MSTRESET   0x00000001      /* Master Reset # */
1204 +
1205 +/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
1206 +#define        HIFN_DMACNFG2_PKSWAP32  (1 << 19)       /* swap the OPLEN/OP reg */
1207 +#define        HIFN_DMACNFG2_PKSWAP8   (1 << 18)       /* swap the bits of OPLEN/OP */
1208 +#define        HIFN_DMACNFG2_BAR0_SWAP32 (1<<17)       /* swap the bytes of BAR0 */
1209 +#define        HIFN_DMACNFG2_BAR1_SWAP8 (1<<16)        /* swap the bits  of BAR0 */
1210 +#define        HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
1211 +#define        HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
1212 +#define        HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
1213 +#define        HIFN_DMACNFG2_TGT_READ_BURST_SHIFT  0
1214 +
1215 +/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
1216 +#define        HIFN_7811_RNGENA_ENA    0x00000001      /* enable RNG */
1217 +
1218 +/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
1219 +#define        HIFN_7811_RNGCFG_PRE1   0x00000f00      /* first prescalar */
1220 +#define        HIFN_7811_RNGCFG_OPRE   0x00000080      /* output prescalar */
1221 +#define        HIFN_7811_RNGCFG_DEFL   0x00000f80      /* 2 words/ 1/100 sec */
1222 +
1223 +/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
1224 +#define        HIFN_7811_RNGSTS_RDY    0x00004000      /* two numbers in FIFO */
1225 +#define        HIFN_7811_RNGSTS_UFL    0x00001000      /* rng underflow */
1226 +
1227 +/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
1228 +#define        HIFN_MIPSRST_BAR2SIZE   0xffff0000      /* sdram size */
1229 +#define        HIFN_MIPSRST_GPRAMINIT  0x00008000      /* gpram can be accessed */
1230 +#define        HIFN_MIPSRST_CRAMINIT   0x00004000      /* ctxram can be accessed */
1231 +#define        HIFN_MIPSRST_LED2       0x00000400      /* external LED2 */
1232 +#define        HIFN_MIPSRST_LED1       0x00000200      /* external LED1 */
1233 +#define        HIFN_MIPSRST_LED0       0x00000100      /* external LED0 */
1234 +#define        HIFN_MIPSRST_MIPSDIS    0x00000004      /* disable MIPS */
1235 +#define        HIFN_MIPSRST_MIPSRST    0x00000002      /* warm reset MIPS */
1236 +#define        HIFN_MIPSRST_MIPSCOLD   0x00000001      /* cold reset MIPS */
1237 +
1238 +/* Public key reset register (HIFN_1_PUB_RESET) */
1239 +#define        HIFN_PUBRST_RESET       0x00000001      /* reset public/rng unit */
1240 +
1241 +/* Public operation register (HIFN_1_PUB_OP) */
1242 +#define        HIFN_PUBOP_AOFFSET      0x0000003e      /* A offset */
1243 +#define        HIFN_PUBOP_BOFFSET      0x00000fc0      /* B offset */
1244 +#define        HIFN_PUBOP_MOFFSET      0x0003f000      /* M offset */
1245 +#define        HIFN_PUBOP_OP_MASK      0x003c0000      /* Opcode: */
1246 +#define        HIFN_PUBOP_OP_NOP       0x00000000      /*  NOP */
1247 +#define        HIFN_PUBOP_OP_ADD       0x00040000      /*  ADD */
1248 +#define        HIFN_PUBOP_OP_ADDC      0x00080000      /*  ADD w/carry */
1249 +#define        HIFN_PUBOP_OP_SUB       0x000c0000      /*  SUB */
1250 +#define        HIFN_PUBOP_OP_SUBC      0x00100000      /*  SUB w/carry */
1251 +#define        HIFN_PUBOP_OP_MODADD    0x00140000      /*  Modular ADD */
1252 +#define        HIFN_PUBOP_OP_MODSUB    0x00180000      /*  Modular SUB */
1253 +#define        HIFN_PUBOP_OP_INCA      0x001c0000      /*  INC A */
1254 +#define        HIFN_PUBOP_OP_DECA      0x00200000      /*  DEC A */
1255 +#define        HIFN_PUBOP_OP_MULT      0x00240000      /*  MULT */
1256 +#define        HIFN_PUBOP_OP_MODMULT   0x00280000      /*  Modular MULT */
1257 +#define        HIFN_PUBOP_OP_MODRED    0x002c0000      /*  Modular Red */
1258 +#define        HIFN_PUBOP_OP_MODEXP    0x00300000      /*  Modular Exp */
1259 +
1260 +/* Public operand length register (HIFN_1_PUB_OPLEN) */
1261 +#define        HIFN_PUBOPLEN_MODLEN    0x0000007f
1262 +#define        HIFN_PUBOPLEN_EXPLEN    0x0003ff80
1263 +#define        HIFN_PUBOPLEN_REDLEN    0x003c0000
1264 +
1265 +/* Public status register (HIFN_1_PUB_STATUS) */
1266 +#define        HIFN_PUBSTS_DONE        0x00000001      /* operation done */
1267 +#define        HIFN_PUBSTS_CARRY       0x00000002      /* carry */
1268 +#define        HIFN_PUBSTS_FIFO_EMPTY  0x00000100      /* fifo empty */
1269 +#define        HIFN_PUBSTS_FIFO_FULL   0x00000200      /* fifo full */
1270 +#define        HIFN_PUBSTS_FIFO_OVFL   0x00000400      /* fifo overflow */
1271 +#define        HIFN_PUBSTS_FIFO_WRITE  0x000f0000      /* fifo write */
1272 +#define        HIFN_PUBSTS_FIFO_READ   0x0f000000      /* fifo read */
1273 +
1274 +/* Public interrupt enable register (HIFN_1_PUB_IEN) */
1275 +#define        HIFN_PUBIEN_DONE        0x00000001      /* operation done interrupt */
1276 +
1277 +/* Random number generator config register (HIFN_1_RNG_CONFIG) */
1278 +#define        HIFN_RNGCFG_ENA         0x00000001      /* enable rng */
1279 +
1280 +/*
1281 + * Register offsets in register set 1
1282 + */
1283 +
1284 +#define        HIFN_UNLOCK_SECRET1     0xf4
1285 +#define        HIFN_UNLOCK_SECRET2     0xfc
1286 +
1287 +/*
1288 + * PLL config register
1289 + *
1290 + * This register is present only on 7954/7955/7956 parts. It must be
1291 + * programmed according to the bus interface method used by the h/w.
1292 + * Note that the parts require a stable clock.  Since the PCI clock
1293 + * may vary the reference clock must usually be used.  To avoid
1294 + * overclocking the core logic, setup must be done carefully, refer
1295 + * to the driver for details.  The exact multiplier required varies
1296 + * by part and system configuration; refer to the Hifn documentation.
1297 + */
1298 +#define        HIFN_PLL_REF_SEL        0x00000001      /* REF/HBI clk selection */
1299 +#define        HIFN_PLL_BP             0x00000002      /* bypass (used during setup) */
1300 +/* bit 2 reserved */
1301 +#define        HIFN_PLL_PK_CLK_SEL     0x00000008      /* public key clk select */
1302 +#define        HIFN_PLL_PE_CLK_SEL     0x00000010      /* packet engine clk select */
1303 +/* bits 5-9 reserved */
1304 +#define        HIFN_PLL_MBSET          0x00000400      /* must be set to 1 */
1305 +#define        HIFN_PLL_ND             0x00003800      /* Fpll_ref multiplier select */
1306 +#define        HIFN_PLL_ND_SHIFT       11
1307 +#define        HIFN_PLL_ND_2           0x00000000      /* 2x */
1308 +#define        HIFN_PLL_ND_4           0x00000800      /* 4x */
1309 +#define        HIFN_PLL_ND_6           0x00001000      /* 6x */
1310 +#define        HIFN_PLL_ND_8           0x00001800      /* 8x */
1311 +#define        HIFN_PLL_ND_10          0x00002000      /* 10x */
1312 +#define        HIFN_PLL_ND_12          0x00002800      /* 12x */
1313 +/* bits 14-15 reserved */
1314 +#define        HIFN_PLL_IS             0x00010000      /* charge pump current select */
1315 +/* bits 17-31 reserved */
1316 +
1317 +/*
1318 + * Board configuration specifies only these bits.
1319 + */
1320 +#define        HIFN_PLL_CONFIG         (HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
1321 +
1322 +/*
1323 + * Public Key Engine Mode Register
1324 + */
1325 +#define        HIFN_PKMODE_HOSTINVERT  (1 << 0)        /* HOST INVERT */
1326 +#define        HIFN_PKMODE_ENHANCED    (1 << 1)        /* Enable enhanced mode */
1327 +
1328 +
1329 +/*********************************************************************
1330 + * Structs for board commands 
1331 + *
1332 + *********************************************************************/
1333 +
1334 +/*
1335 + * Structure to help build up the command data structure.
1336 + */
1337 +typedef struct hifn_base_command {
1338 +       volatile u_int16_t masks;
1339 +       volatile u_int16_t session_num;
1340 +       volatile u_int16_t total_source_count;
1341 +       volatile u_int16_t total_dest_count;
1342 +} hifn_base_command_t;
1343 +
1344 +#define        HIFN_BASE_CMD_MAC               0x0400
1345 +#define        HIFN_BASE_CMD_CRYPT             0x0800
1346 +#define        HIFN_BASE_CMD_DECODE            0x2000
1347 +#define        HIFN_BASE_CMD_SRCLEN_M          0xc000
1348 +#define        HIFN_BASE_CMD_SRCLEN_S          14
1349 +#define        HIFN_BASE_CMD_DSTLEN_M          0x3000
1350 +#define        HIFN_BASE_CMD_DSTLEN_S          12
1351 +#define        HIFN_BASE_CMD_LENMASK_HI        0x30000
1352 +#define        HIFN_BASE_CMD_LENMASK_LO        0x0ffff
1353 +
1354 +/*
1355 + * Structure to help build up the command data structure.
1356 + */
1357 +typedef struct hifn_crypt_command {
1358 +       volatile u_int16_t masks;
1359 +       volatile u_int16_t header_skip;
1360 +       volatile u_int16_t source_count;
1361 +       volatile u_int16_t reserved;
1362 +} hifn_crypt_command_t;
1363 +
1364 +#define        HIFN_CRYPT_CMD_ALG_MASK         0x0003          /* algorithm: */
1365 +#define        HIFN_CRYPT_CMD_ALG_DES          0x0000          /*   DES */
1366 +#define        HIFN_CRYPT_CMD_ALG_3DES         0x0001          /*   3DES */
1367 +#define        HIFN_CRYPT_CMD_ALG_RC4          0x0002          /*   RC4 */
1368 +#define        HIFN_CRYPT_CMD_ALG_AES          0x0003          /*   AES */
1369 +#define        HIFN_CRYPT_CMD_MODE_MASK        0x0018          /* Encrypt mode: */
1370 +#define        HIFN_CRYPT_CMD_MODE_ECB         0x0000          /*   ECB */
1371 +#define        HIFN_CRYPT_CMD_MODE_CBC         0x0008          /*   CBC */
1372 +#define        HIFN_CRYPT_CMD_MODE_CFB         0x0010          /*   CFB */
1373 +#define        HIFN_CRYPT_CMD_MODE_OFB         0x0018          /*   OFB */
1374 +#define        HIFN_CRYPT_CMD_CLR_CTX          0x0040          /* clear context */
1375 +#define        HIFN_CRYPT_CMD_NEW_KEY          0x0800          /* expect new key */
1376 +#define        HIFN_CRYPT_CMD_NEW_IV           0x1000          /* expect new iv */
1377 +
1378 +#define        HIFN_CRYPT_CMD_SRCLEN_M         0xc000
1379 +#define        HIFN_CRYPT_CMD_SRCLEN_S         14
1380 +
1381 +#define        HIFN_CRYPT_CMD_KSZ_MASK         0x0600          /* AES key size: */
1382 +#define        HIFN_CRYPT_CMD_KSZ_128          0x0000          /*   128 bit */
1383 +#define        HIFN_CRYPT_CMD_KSZ_192          0x0200          /*   192 bit */
1384 +#define        HIFN_CRYPT_CMD_KSZ_256          0x0400          /*   256 bit */
1385 +
1386 +/*
1387 + * Structure to help build up the command data structure.
1388 + */
1389 +typedef struct hifn_mac_command {
1390 +       volatile u_int16_t masks;
1391 +       volatile u_int16_t header_skip;
1392 +       volatile u_int16_t source_count;
1393 +       volatile u_int16_t reserved;
1394 +} hifn_mac_command_t;
1395 +
1396 +#define        HIFN_MAC_CMD_ALG_MASK           0x0001
1397 +#define        HIFN_MAC_CMD_ALG_SHA1           0x0000
1398 +#define        HIFN_MAC_CMD_ALG_MD5            0x0001
1399 +#define        HIFN_MAC_CMD_MODE_MASK          0x000c
1400 +#define        HIFN_MAC_CMD_MODE_HMAC          0x0000
1401 +#define        HIFN_MAC_CMD_MODE_SSL_MAC       0x0004
1402 +#define        HIFN_MAC_CMD_MODE_HASH          0x0008
1403 +#define        HIFN_MAC_CMD_MODE_FULL          0x0004
1404 +#define        HIFN_MAC_CMD_TRUNC              0x0010
1405 +#define        HIFN_MAC_CMD_RESULT             0x0020
1406 +#define        HIFN_MAC_CMD_APPEND             0x0040
1407 +#define        HIFN_MAC_CMD_SRCLEN_M           0xc000
1408 +#define        HIFN_MAC_CMD_SRCLEN_S           14
1409 +
1410 +/*
1411 + * MAC POS IPsec initiates authentication after encryption on encodes
1412 + * and before decryption on decodes.
1413 + */
1414 +#define        HIFN_MAC_CMD_POS_IPSEC          0x0200
1415 +#define        HIFN_MAC_CMD_NEW_KEY            0x0800
1416 +
1417 +/*
1418 + * The poll frequency and poll scalar defines are unshifted values used
1419 + * to set fields in the DMA Configuration Register.
1420 + */
1421 +#ifndef HIFN_POLL_FREQUENCY
1422 +#define        HIFN_POLL_FREQUENCY     0x1
1423 +#endif
1424 +
1425 +#ifndef HIFN_POLL_SCALAR
1426 +#define        HIFN_POLL_SCALAR        0x0
1427 +#endif
1428 +
1429 +#define        HIFN_MAX_SEGLEN         0xffff          /* maximum dma segment len */
1430 +#define        HIFN_MAX_DMALEN         0x3ffff         /* maximum dma length */
1431 +#endif /* __HIFN_H__ */
1432 --- /dev/null
1433 +++ b/crypto/ocf/hifn/hifn7751var.h
1434 @@ -0,0 +1,369 @@
1435 +/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
1436 +/*     $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $  */
1437 +
1438 +/*-
1439 + * Invertex AEON / Hifn 7751 driver
1440 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
1441 + * Copyright (c) 1999 Theo de Raadt
1442 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
1443 + *                     http://www.netsec.net
1444 + *
1445 + * Please send any comments, feedback, bug-fixes, or feature requests to
1446 + * software@invertex.com.
1447 + *
1448 + * Redistribution and use in source and binary forms, with or without
1449 + * modification, are permitted provided that the following conditions
1450 + * are met:
1451 + *
1452 + * 1. Redistributions of source code must retain the above copyright
1453 + *    notice, this list of conditions and the following disclaimer.
1454 + * 2. Redistributions in binary form must reproduce the above copyright
1455 + *    notice, this list of conditions and the following disclaimer in the
1456 + *    documentation and/or other materials provided with the distribution.
1457 + * 3. The name of the author may not be used to endorse or promote products
1458 + *    derived from this software without specific prior written permission.
1459 + *
1460 + *
1461 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1462 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1463 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1464 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1465 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1466 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1467 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1468 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1469 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1470 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1471 + *
1472 + * Effort sponsored in part by the Defense Advanced Research Projects
1473 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
1474 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
1475 + *
1476 + */
1477 +
1478 +#ifndef __HIFN7751VAR_H__
1479 +#define __HIFN7751VAR_H__
1480 +
1481 +#ifdef __KERNEL__
1482 +
1483 +/*
1484 + * Some configurable values for the driver.  By default command+result
1485 + * descriptor rings are the same size.  The src+dst descriptor rings
1486 + * are sized at 3.5x the number of potential commands.  Slower parts
1487 + * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
1488 + * src+cmd/result descriptors.  It's not clear that increasing the size
1489 + * of the descriptor rings helps performance significantly as other
1490 + * factors tend to come into play (e.g. copying misaligned packets).
1491 + */
1492 +#define        HIFN_D_CMD_RSIZE        24      /* command descriptors */
1493 +#define        HIFN_D_SRC_RSIZE        ((HIFN_D_CMD_RSIZE * 7) / 2)    /* source descriptors */
1494 +#define        HIFN_D_RES_RSIZE        HIFN_D_CMD_RSIZE        /* result descriptors */
1495 +#define        HIFN_D_DST_RSIZE        HIFN_D_SRC_RSIZE        /* destination descriptors */
1496 +
1497 +/*
1498 + *  Length values for cryptography
1499 + */
1500 +#define HIFN_DES_KEY_LENGTH            8
1501 +#define HIFN_3DES_KEY_LENGTH           24
1502 +#define HIFN_MAX_CRYPT_KEY_LENGTH      HIFN_3DES_KEY_LENGTH
1503 +#define HIFN_IV_LENGTH                 8
1504 +#define        HIFN_AES_IV_LENGTH              16
1505 +#define HIFN_MAX_IV_LENGTH             HIFN_AES_IV_LENGTH
1506 +
1507 +/*
1508 + *  Length values for authentication
1509 + */
1510 +#define HIFN_MAC_KEY_LENGTH            64
1511 +#define HIFN_MD5_LENGTH                        16
1512 +#define HIFN_SHA1_LENGTH               20
1513 +#define HIFN_MAC_TRUNC_LENGTH          12
1514 +
1515 +#define MAX_SCATTER 64
1516 +
1517 +/*
1518 + * Data structure to hold all 4 rings and any other ring related data.
1519 + */
1520 +struct hifn_dma {
1521 +       /*
1522 +        *  Descriptor rings.  We add +1 to the size to accomidate the
1523 +        *  jump descriptor.
1524 +        */
1525 +       struct hifn_desc        cmdr[HIFN_D_CMD_RSIZE+1];
1526 +       struct hifn_desc        srcr[HIFN_D_SRC_RSIZE+1];
1527 +       struct hifn_desc        dstr[HIFN_D_DST_RSIZE+1];
1528 +       struct hifn_desc        resr[HIFN_D_RES_RSIZE+1];
1529 +
1530 +       struct hifn_command     *hifn_commands[HIFN_D_RES_RSIZE];
1531 +
1532 +       u_char                  command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
1533 +       u_char                  result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
1534 +       u_int32_t               slop[HIFN_D_CMD_RSIZE];
1535 +
1536 +       u_int64_t               test_src, test_dst;
1537 +
1538 +       /*
1539 +        *  Our current positions for insertion and removal from the desriptor
1540 +        *  rings. 
1541 +        */
1542 +       int                     cmdi, srci, dsti, resi;
1543 +       volatile int            cmdu, srcu, dstu, resu;
1544 +       int                     cmdk, srck, dstk, resk;
1545 +};
1546 +
1547 +struct hifn_session {
1548 +       int hs_used;
1549 +       int hs_mlen;
1550 +       u_int8_t hs_iv[HIFN_MAX_IV_LENGTH];
1551 +};
1552 +
1553 +#define        HIFN_RING_SYNC(sc, r, i, f)                                     \
1554 +       /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1555 +
1556 +#define        HIFN_CMDR_SYNC(sc, i, f)        HIFN_RING_SYNC((sc), cmdr, (i), (f))
1557 +#define        HIFN_RESR_SYNC(sc, i, f)        HIFN_RING_SYNC((sc), resr, (i), (f))
1558 +#define        HIFN_SRCR_SYNC(sc, i, f)        HIFN_RING_SYNC((sc), srcr, (i), (f))
1559 +#define        HIFN_DSTR_SYNC(sc, i, f)        HIFN_RING_SYNC((sc), dstr, (i), (f))
1560 +
1561 +#define        HIFN_CMD_SYNC(sc, i, f)                                         \
1562 +       /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1563 +
1564 +#define        HIFN_RES_SYNC(sc, i, f)                                         \
1565 +       /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1566 +
1567 +typedef int bus_size_t;
1568 +
1569 +/*
1570 + * Holds data specific to a single HIFN board.
1571 + */
1572 +struct hifn_softc {
1573 +       softc_device_decl                sc_dev;
1574 +
1575 +       struct pci_dev          *sc_pcidev;     /* PCI device pointer */
1576 +       spinlock_t              sc_mtx;         /* per-instance lock */
1577 +
1578 +       int                     sc_num;         /* for multiple devs */
1579 +
1580 +       ocf_iomem_t             sc_bar0;
1581 +       bus_size_t              sc_bar0_lastreg;/* bar0 last reg written */
1582 +       ocf_iomem_t             sc_bar1;
1583 +       bus_size_t              sc_bar1_lastreg;/* bar1 last reg written */
1584 +
1585 +       int                     sc_irq;
1586 +
1587 +       u_int32_t               sc_dmaier;
1588 +       u_int32_t               sc_drammodel;   /* 1=dram, 0=sram */
1589 +       u_int32_t               sc_pllconfig;   /* 7954/7955/7956 PLL config */
1590 +
1591 +       struct hifn_dma         *sc_dma;
1592 +       dma_addr_t              sc_dma_physaddr;/* physical address of sc_dma */
1593 +
1594 +       int                     sc_dmansegs;
1595 +       int32_t                 sc_cid;
1596 +       int                     sc_maxses;
1597 +       int                     sc_nsessions;
1598 +       struct hifn_session     *sc_sessions;
1599 +       int                     sc_ramsize;
1600 +       int                     sc_flags;
1601 +#define        HIFN_HAS_RNG            0x1     /* includes random number generator */
1602 +#define        HIFN_HAS_PUBLIC         0x2     /* includes public key support */
1603 +#define        HIFN_HAS_AES            0x4     /* includes AES support */
1604 +#define        HIFN_IS_7811            0x8     /* Hifn 7811 part */
1605 +#define        HIFN_IS_7956            0x10    /* Hifn 7956/7955 don't have SDRAM */
1606 +
1607 +       struct timer_list       sc_tickto;      /* for managing DMA */
1608 +
1609 +       int                     sc_rngfirst;
1610 +       int                     sc_rnghz;       /* RNG polling frequency */
1611 +
1612 +       int                     sc_c_busy;      /* command ring busy */
1613 +       int                     sc_s_busy;      /* source data ring busy */
1614 +       int                     sc_d_busy;      /* destination data ring busy */
1615 +       int                     sc_r_busy;      /* result ring busy */
1616 +       int                     sc_active;      /* for initial countdown */
1617 +       int                     sc_needwakeup;  /* ops q'd wating on resources */
1618 +       int                     sc_curbatch;    /* # ops submitted w/o int */
1619 +       int                     sc_suspended;
1620 +#ifdef HIFN_VULCANDEV
1621 +       struct cdev            *sc_pkdev;
1622 +#endif
1623 +};
1624 +
1625 +#define        HIFN_LOCK(_sc)          spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
1626 +#define        HIFN_UNLOCK(_sc)        spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
1627 +
1628 +/*
1629 + *  hifn_command_t
1630 + *
1631 + *  This is the control structure used to pass commands to hifn_encrypt().
1632 + *
1633 + *  flags
1634 + *  -----
1635 + *  Flags is the bitwise "or" values for command configuration.  A single
1636 + *  encrypt direction needs to be set:
1637 + *
1638 + *     HIFN_ENCODE or HIFN_DECODE
1639 + *
1640 + *  To use cryptography, a single crypto algorithm must be included:
1641 + *
1642 + *     HIFN_CRYPT_3DES or HIFN_CRYPT_DES
1643 + *
1644 + *  To use authentication is used, a single MAC algorithm must be included:
1645 + *
1646 + *     HIFN_MAC_MD5 or HIFN_MAC_SHA1
1647 + *
1648 + *  By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
1649 + *  If the value below is set, hash values are truncated or assumed
1650 + *  truncated to 12 bytes:
1651 + *
1652 + *     HIFN_MAC_TRUNC
1653 + *
1654 + *  Keys for encryption and authentication can be sent as part of a command,
1655 + *  or the last key value used with a particular session can be retrieved
1656 + *  and used again if either of these flags are not specified.
1657 + *
1658 + *     HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
1659 + *
1660 + *  session_num
1661 + *  -----------
1662 + *  A number between 0 and 2048 (for DRAM models) or a number between 
1663 + *  0 and 768 (for SRAM models).  Those who don't want to use session
1664 + *  numbers should leave value at zero and send a new crypt key and/or
1665 + *  new MAC key on every command.  If you use session numbers and
1666 + *  don't send a key with a command, the last key sent for that same
1667 + *  session number will be used.
1668 + *
1669 + *  Warning:  Using session numbers and multiboard at the same time
1670 + *            is currently broken.
1671 + *
1672 + *  mbuf
1673 + *  ----
1674 + *  Either fill in the mbuf pointer and npa=0 or
1675 + *      fill packp[] and packl[] and set npa to > 0
1676 + * 
1677 + *  mac_header_skip
1678 + *  ---------------
1679 + *  The number of bytes of the source_buf that are skipped over before
1680 + *  authentication begins.  This must be a number between 0 and 2^16-1
1681 + *  and can be used by IPsec implementers to skip over IP headers.
1682 + *  *** Value ignored if authentication not used ***
1683 + *
1684 + *  crypt_header_skip
1685 + *  -----------------
1686 + *  The number of bytes of the source_buf that are skipped over before
1687 + *  the cryptographic operation begins.  This must be a number between 0
1688 + *  and 2^16-1.  For IPsec, this number will always be 8 bytes larger
1689 + *  than the auth_header_skip (to skip over the ESP header).
1690 + *  *** Value ignored if cryptography not used ***
1691 + *
1692 + */
1693 +struct hifn_operand {
1694 +       union {
1695 +               struct sk_buff *skb;
1696 +               struct uio *io;
1697 +               unsigned char *buf;
1698 +       } u;
1699 +       void            *map;
1700 +       bus_size_t      mapsize;
1701 +       int             nsegs;
1702 +       struct {
1703 +           dma_addr_t  ds_addr;
1704 +           int         ds_len;
1705 +       } segs[MAX_SCATTER];
1706 +};
1707 +
1708 +struct hifn_command {
1709 +       u_int16_t session_num;
1710 +       u_int16_t base_masks, cry_masks, mac_masks;
1711 +       u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
1712 +       int cklen;
1713 +       int sloplen, slopidx;
1714 +
1715 +       struct hifn_operand src;
1716 +       struct hifn_operand dst;
1717 +
1718 +       struct hifn_softc *softc;
1719 +       struct cryptop *crp;
1720 +       struct cryptodesc *enccrd, *maccrd;
1721 +};
1722 +
1723 +#define        src_skb         src.u.skb
1724 +#define        src_io          src.u.io
1725 +#define        src_map         src.map
1726 +#define        src_mapsize     src.mapsize
1727 +#define        src_segs        src.segs
1728 +#define        src_nsegs       src.nsegs
1729 +#define        src_buf         src.u.buf
1730 +
1731 +#define        dst_skb         dst.u.skb
1732 +#define        dst_io          dst.u.io
1733 +#define        dst_map         dst.map
1734 +#define        dst_mapsize     dst.mapsize
1735 +#define        dst_segs        dst.segs
1736 +#define        dst_nsegs       dst.nsegs
1737 +#define        dst_buf         dst.u.buf
1738 +
1739 +/*
1740 + *  Return values for hifn_crypto()
1741 + */
1742 +#define HIFN_CRYPTO_SUCCESS    0
1743 +#define HIFN_CRYPTO_BAD_INPUT  (-1)
1744 +#define HIFN_CRYPTO_RINGS_FULL (-2)
1745 +
1746 +/**************************************************************************
1747 + *
1748 + *  Function:  hifn_crypto
1749 + *
1750 + *  Purpose:   Called by external drivers to begin an encryption on the
1751 + *             HIFN board.
1752 + *
1753 + *  Blocking/Non-blocking Issues
1754 + *  ============================
1755 + *  The driver cannot block in hifn_crypto (no calls to tsleep) currently.
1756 + *  hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
1757 + *  room in any of the rings for the request to proceed.
1758 + *
1759 + *  Return Values
1760 + *  =============
1761 + *  0 for success, negative values on error
1762 + *
1763 + *  Defines for negative error codes are:
1764 + *  
1765 + *    HIFN_CRYPTO_BAD_INPUT  :  The passed in command had invalid settings.
1766 + *    HIFN_CRYPTO_RINGS_FULL :  All DMA rings were full and non-blocking
1767 + *                              behaviour was requested.
1768 + *
1769 + *************************************************************************/
1770 +
1771 +/*
1772 + * Convert back and forth from 'sid' to 'card' and 'session'
1773 + */
1774 +#define HIFN_CARD(sid)         (((sid) & 0xf0000000) >> 28)
1775 +#define HIFN_SESSION(sid)      ((sid) & 0x000007ff)
1776 +#define HIFN_SID(crd,ses)      (((crd) << 28) | ((ses) & 0x7ff))
1777 +
1778 +#endif /* _KERNEL */
1779 +
1780 +struct hifn_stats {
1781 +       u_int64_t hst_ibytes;
1782 +       u_int64_t hst_obytes;
1783 +       u_int32_t hst_ipackets;
1784 +       u_int32_t hst_opackets;
1785 +       u_int32_t hst_invalid;
1786 +       u_int32_t hst_nomem;            /* malloc or one of hst_nomem_* */
1787 +       u_int32_t hst_abort;
1788 +       u_int32_t hst_noirq;            /* IRQ for no reason */
1789 +       u_int32_t hst_totbatch;         /* ops submitted w/o interrupt */
1790 +       u_int32_t hst_maxbatch;         /* max ops submitted together */
1791 +       u_int32_t hst_unaligned;        /* unaligned src caused copy */
1792 +       /*
1793 +        * The following divides hst_nomem into more specific buckets.
1794 +        */
1795 +       u_int32_t hst_nomem_map;        /* bus_dmamap_create failed */
1796 +       u_int32_t hst_nomem_load;       /* bus_dmamap_load_* failed */
1797 +       u_int32_t hst_nomem_mbuf;       /* MGET* failed */
1798 +       u_int32_t hst_nomem_mcl;        /* MCLGET* failed */
1799 +       u_int32_t hst_nomem_cr;         /* out of command/result descriptor */
1800 +       u_int32_t hst_nomem_sd;         /* out of src/dst descriptors */
1801 +};
1802 +
1803 +#endif /* __HIFN7751VAR_H__ */
1804 --- /dev/null
1805 +++ b/crypto/ocf/hifn/hifn7751.c
1806 @@ -0,0 +1,2970 @@
1807 +/*     $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $  */
1808 +
1809 +/*-
1810 + * Invertex AEON / Hifn 7751 driver
1811 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
1812 + * Copyright (c) 1999 Theo de Raadt
1813 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
1814 + *                     http://www.netsec.net
1815 + * Copyright (c) 2003 Hifn Inc.
1816 + *
1817 + * This driver is based on a previous driver by Invertex, for which they
1818 + * requested:  Please send any comments, feedback, bug-fixes, or feature
1819 + * requests to software@invertex.com.
1820 + *
1821 + * Redistribution and use in source and binary forms, with or without
1822 + * modification, are permitted provided that the following conditions
1823 + * are met:
1824 + *
1825 + * 1. Redistributions of source code must retain the above copyright
1826 + *   notice, this list of conditions and the following disclaimer.
1827 + * 2. Redistributions in binary form must reproduce the above copyright
1828 + *   notice, this list of conditions and the following disclaimer in the
1829 + *   documentation and/or other materials provided with the distribution.
1830 + * 3. The name of the author may not be used to endorse or promote products
1831 + *   derived from this software without specific prior written permission.
1832 + *
1833 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1834 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1835 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1836 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1837 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1838 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1839 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1840 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1841 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1842 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1843 + *
1844 + * Effort sponsored in part by the Defense Advanced Research Projects
1845 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
1846 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
1847 + *
1848 + *
1849 +__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
1850 + */
1851 +
1852 +/*
1853 + * Driver for various Hifn encryption processors.
1854 + */
1855 +#ifndef AUTOCONF_INCLUDED
1856 +#include <linux/config.h>
1857 +#endif
1858 +#include <linux/module.h>
1859 +#include <linux/init.h>
1860 +#include <linux/list.h>
1861 +#include <linux/slab.h>
1862 +#include <linux/wait.h>
1863 +#include <linux/sched.h>
1864 +#include <linux/pci.h>
1865 +#include <linux/delay.h>
1866 +#include <linux/interrupt.h>
1867 +#include <linux/spinlock.h>
1868 +#include <linux/random.h>
1869 +#include <linux/version.h>
1870 +#include <linux/skbuff.h>
1871 +#include <asm/io.h>
1872 +
1873 +#include <cryptodev.h>
1874 +#include <uio.h>
1875 +#include <hifn/hifn7751reg.h>
1876 +#include <hifn/hifn7751var.h>
1877 +
1878 +#if 1
1879 +#define        DPRINTF(a...)   if (hifn_debug) { \
1880 +                                                       printk("%s: ", sc ? \
1881 +                                                               device_get_nameunit(sc->sc_dev) : "hifn"); \
1882 +                                                       printk(a); \
1883 +                                               } else
1884 +#else
1885 +#define        DPRINTF(a...)
1886 +#endif
1887 +
1888 +static inline int
1889 +pci_get_revid(struct pci_dev *dev)
1890 +{
1891 +       u8 rid = 0;
1892 +       pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
1893 +       return rid;
1894 +}
1895 +
1896 +static struct hifn_stats hifnstats;
1897 +
1898 +#define        debug hifn_debug
1899 +int hifn_debug = 0;
1900 +module_param(hifn_debug, int, 0644);
1901 +MODULE_PARM_DESC(hifn_debug, "Enable debug");
1902 +
1903 +int hifn_maxbatch = 1;
1904 +module_param(hifn_maxbatch, int, 0644);
1905 +MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
1906 +
1907 +#ifdef MODULE_PARM
1908 +char *hifn_pllconfig = NULL;
1909 +MODULE_PARM(hifn_pllconfig, "s");
1910 +#else
1911 +char hifn_pllconfig[32]; /* This setting is RO after loading */
1912 +module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
1913 +#endif
1914 +MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
1915 +
1916 +#ifdef HIFN_VULCANDEV
1917 +#include <sys/conf.h>
1918 +#include <sys/uio.h>
1919 +
1920 +static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
1921 +#endif
1922 +
1923 +/*
1924 + * Prototypes and count for the pci_device structure
1925 + */
1926 +static int  hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
1927 +static void hifn_remove(struct pci_dev *dev);
1928 +
1929 +static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
1930 +static int hifn_freesession(device_t, u_int64_t);
1931 +static int hifn_process(device_t, struct cryptop *, int);
1932 +
1933 +static device_method_t hifn_methods = {
1934 +       /* crypto device methods */
1935 +       DEVMETHOD(cryptodev_newsession, hifn_newsession),
1936 +       DEVMETHOD(cryptodev_freesession,hifn_freesession),
1937 +       DEVMETHOD(cryptodev_process,    hifn_process),
1938 +};
1939 +
1940 +static void hifn_reset_board(struct hifn_softc *, int);
1941 +static void hifn_reset_puc(struct hifn_softc *);
1942 +static void hifn_puc_wait(struct hifn_softc *);
1943 +static int hifn_enable_crypto(struct hifn_softc *);
1944 +static void hifn_set_retry(struct hifn_softc *sc);
1945 +static void hifn_init_dma(struct hifn_softc *);
1946 +static void hifn_init_pci_registers(struct hifn_softc *);
1947 +static int hifn_sramsize(struct hifn_softc *);
1948 +static int hifn_dramsize(struct hifn_softc *);
1949 +static int hifn_ramtype(struct hifn_softc *);
1950 +static void hifn_sessions(struct hifn_softc *);
1951 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
1952 +static irqreturn_t hifn_intr(int irq, void *arg);
1953 +#else
1954 +static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
1955 +#endif
1956 +static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
1957 +static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
1958 +static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
1959 +static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
1960 +static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
1961 +static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
1962 +static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
1963 +static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
1964 +static int hifn_init_pubrng(struct hifn_softc *);
1965 +static void hifn_tick(unsigned long arg);
1966 +static void hifn_abort(struct hifn_softc *);
1967 +static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
1968 +
1969 +static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
1970 +static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
1971 +
1972 +#ifdef CONFIG_OCF_RANDOMHARVEST
1973 +static int hifn_read_random(void *arg, u_int32_t *buf, int len);
1974 +#endif
1975 +
1976 +#define HIFN_MAX_CHIPS 8
1977 +static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
1978 +
1979 +static __inline u_int32_t
1980 +READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
1981 +{
1982 +       u_int32_t v = readl(sc->sc_bar0 + reg);
1983 +       sc->sc_bar0_lastreg = (bus_size_t) -1;
1984 +       return (v);
1985 +}
1986 +#define        WRITE_REG_0(sc, reg, val)       hifn_write_reg_0(sc, reg, val)
1987 +
1988 +static __inline u_int32_t
1989 +READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
1990 +{
1991 +       u_int32_t v = readl(sc->sc_bar1 + reg);
1992 +       sc->sc_bar1_lastreg = (bus_size_t) -1;
1993 +       return (v);
1994 +}
1995 +#define        WRITE_REG_1(sc, reg, val)       hifn_write_reg_1(sc, reg, val)
1996 +
1997 +/*
1998 + * map in a given buffer (great on some arches :-)
1999 + */
2000 +
2001 +static int
2002 +pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
2003 +{
2004 +       struct iovec *iov = uio->uio_iov;
2005 +
2006 +       DPRINTF("%s()\n", __FUNCTION__);
2007 +
2008 +       buf->mapsize = 0;
2009 +       for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
2010 +               buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
2011 +                               iov->iov_base, iov->iov_len,
2012 +                               PCI_DMA_BIDIRECTIONAL);
2013 +               buf->segs[buf->nsegs].ds_len = iov->iov_len;
2014 +               buf->mapsize += iov->iov_len;
2015 +               iov++;
2016 +               buf->nsegs++;
2017 +       }
2018 +       /* identify this buffer by the first segment */
2019 +       buf->map = (void *) buf->segs[0].ds_addr;
2020 +       return(0);
2021 +}
2022 +
2023 +/*
2024 + * map in a given sk_buff
2025 + */
2026 +
2027 +static int
2028 +pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
2029 +{
2030 +       int i;
2031 +
2032 +       DPRINTF("%s()\n", __FUNCTION__);
2033 +
2034 +       buf->mapsize = 0;
2035 +
2036 +       buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
2037 +                       skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
2038 +       buf->segs[0].ds_len = skb_headlen(skb);
2039 +       buf->mapsize += buf->segs[0].ds_len;
2040 +
2041 +       buf->nsegs = 1;
2042 +
2043 +       for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
2044 +               buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
2045 +               buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
2046 +                               page_address(skb_shinfo(skb)->frags[i].page) +
2047 +                                       skb_shinfo(skb)->frags[i].page_offset,
2048 +                               buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
2049 +               buf->mapsize += buf->segs[buf->nsegs].ds_len;
2050 +               buf->nsegs++;
2051 +       }
2052 +
2053 +       /* identify this buffer by the first segment */
2054 +       buf->map = (void *) buf->segs[0].ds_addr;
2055 +       return(0);
2056 +}
2057 +
2058 +/*
2059 + * map in a given contiguous buffer
2060 + */
2061 +
2062 +static int
2063 +pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
2064 +{
2065 +       DPRINTF("%s()\n", __FUNCTION__);
2066 +
2067 +       buf->mapsize = 0;
2068 +       buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
2069 +                       b, len, PCI_DMA_BIDIRECTIONAL);
2070 +       buf->segs[0].ds_len = len;
2071 +       buf->mapsize += buf->segs[0].ds_len;
2072 +       buf->nsegs = 1;
2073 +
2074 +       /* identify this buffer by the first segment */
2075 +       buf->map = (void *) buf->segs[0].ds_addr;
2076 +       return(0);
2077 +}
2078 +
2079 +#if 0 /* not needed at this time */
2080 +static void
2081 +pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
2082 +{
2083 +       int i;
2084 +
2085 +       DPRINTF("%s()\n", __FUNCTION__);
2086 +       for (i = 0; i < buf->nsegs; i++)
2087 +               pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
2088 +                               buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
2089 +}
2090 +#endif
2091 +
2092 +static void
2093 +pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
2094 +{
2095 +       int i;
2096 +       DPRINTF("%s()\n", __FUNCTION__);
2097 +       for (i = 0; i < buf->nsegs; i++) {
2098 +               pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
2099 +                               buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
2100 +               buf->segs[i].ds_addr = 0;
2101 +               buf->segs[i].ds_len = 0;
2102 +       }
2103 +       buf->nsegs = 0;
2104 +       buf->mapsize = 0;
2105 +       buf->map = 0;
2106 +}
2107 +
2108 +static const char*
2109 +hifn_partname(struct hifn_softc *sc)
2110 +{
2111 +       /* XXX sprintf numbers when not decoded */
2112 +       switch (pci_get_vendor(sc->sc_pcidev)) {
2113 +       case PCI_VENDOR_HIFN:
2114 +               switch (pci_get_device(sc->sc_pcidev)) {
2115 +               case PCI_PRODUCT_HIFN_6500:     return "Hifn 6500";
2116 +               case PCI_PRODUCT_HIFN_7751:     return "Hifn 7751";
2117 +               case PCI_PRODUCT_HIFN_7811:     return "Hifn 7811";
2118 +               case PCI_PRODUCT_HIFN_7951:     return "Hifn 7951";
2119 +               case PCI_PRODUCT_HIFN_7955:     return "Hifn 7955";
2120 +               case PCI_PRODUCT_HIFN_7956:     return "Hifn 7956";
2121 +               }
2122 +               return "Hifn unknown-part";
2123 +       case PCI_VENDOR_INVERTEX:
2124 +               switch (pci_get_device(sc->sc_pcidev)) {
2125 +               case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
2126 +               }
2127 +               return "Invertex unknown-part";
2128 +       case PCI_VENDOR_NETSEC:
2129 +               switch (pci_get_device(sc->sc_pcidev)) {
2130 +               case PCI_PRODUCT_NETSEC_7751:   return "NetSec 7751";
2131 +               }
2132 +               return "NetSec unknown-part";
2133 +       }
2134 +       return "Unknown-vendor unknown-part";
2135 +}
2136 +
2137 +static u_int
2138 +checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
2139 +{
2140 +       struct hifn_softc *sc = pci_get_drvdata(dev);
2141 +       if (v > max) {
2142 +               device_printf(sc->sc_dev, "Warning, %s %u out of range, "
2143 +                       "using max %u\n", what, v, max);
2144 +               v = max;
2145 +       } else if (v < min) {
2146 +               device_printf(sc->sc_dev, "Warning, %s %u out of range, "
2147 +                       "using min %u\n", what, v, min);
2148 +               v = min;
2149 +       }
2150 +       return v;
2151 +}
2152 +
2153 +/*
2154 + * Select PLL configuration for 795x parts.  This is complicated in
2155 + * that we cannot determine the optimal parameters without user input.
2156 + * The reference clock is derived from an external clock through a
2157 + * multiplier.  The external clock is either the host bus (i.e. PCI)
2158 + * or an external clock generator.  When using the PCI bus we assume
2159 + * the clock is either 33 or 66 MHz; for an external source we cannot
2160 + * tell the speed.
2161 + *
2162 + * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
2163 + * for an external source, followed by the frequency.  We calculate
2164 + * the appropriate multiplier and PLL register contents accordingly.
2165 + * When no configuration is given we default to "pci66" since that
2166 + * always will allow the card to work.  If a card is using the PCI
2167 + * bus clock and in a 33MHz slot then it will be operating at half
2168 + * speed until the correct information is provided.
2169 + *
2170 + * We use a default setting of "ext66" because according to Mike Ham
2171 + * of HiFn, almost every board in existence has an external crystal
2172 + * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
2173 + * because PCI33 can have clocks from 0 to 33Mhz, and some have
2174 + * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
2175 + */
2176 +static void
2177 +hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
2178 +{
2179 +       const char *pllspec = hifn_pllconfig;
2180 +       u_int freq, mul, fl, fh;
2181 +       u_int32_t pllconfig;
2182 +       char *nxt;
2183 +
2184 +       if (pllspec == NULL)
2185 +               pllspec = "ext66";
2186 +       fl = 33, fh = 66;
2187 +       pllconfig = 0;
2188 +       if (strncmp(pllspec, "ext", 3) == 0) {
2189 +               pllspec += 3;
2190 +               pllconfig |= HIFN_PLL_REF_SEL;
2191 +               switch (pci_get_device(dev)) {
2192 +               case PCI_PRODUCT_HIFN_7955:
2193 +               case PCI_PRODUCT_HIFN_7956:
2194 +                       fl = 20, fh = 100;
2195 +                       break;
2196 +#ifdef notyet
2197 +               case PCI_PRODUCT_HIFN_7954:
2198 +                       fl = 20, fh = 66;
2199 +                       break;
2200 +#endif
2201 +               }
2202 +       } else if (strncmp(pllspec, "pci", 3) == 0)
2203 +               pllspec += 3;
2204 +       freq = strtoul(pllspec, &nxt, 10);
2205 +       if (nxt == pllspec)
2206 +               freq = 66;
2207 +       else
2208 +               freq = checkmaxmin(dev, "frequency", freq, fl, fh);
2209 +       /*
2210 +        * Calculate multiplier.  We target a Fck of 266 MHz,
2211 +        * allowing only even values, possibly rounded down.
2212 +        * Multipliers > 8 must set the charge pump current.
2213 +        */
2214 +       mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
2215 +       pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
2216 +       if (mul > 8)
2217 +               pllconfig |= HIFN_PLL_IS;
2218 +       *pll = pllconfig;
2219 +}
2220 +
2221 +/*
2222 + * Attach an interface that successfully probed.
2223 + */
2224 +static int
2225 +hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2226 +{
2227 +       struct hifn_softc *sc = NULL;
2228 +       char rbase;
2229 +       u_int16_t ena, rev;
2230 +       int rseg, rc;
2231 +       unsigned long mem_start, mem_len;
2232 +       static int num_chips = 0;
2233 +
2234 +       DPRINTF("%s()\n", __FUNCTION__);
2235 +
2236 +       if (pci_enable_device(dev) < 0)
2237 +               return(-ENODEV);
2238 +
2239 +       if (pci_set_mwi(dev))
2240 +               return(-ENODEV);
2241 +
2242 +       if (!dev->irq) {
2243 +               printk("hifn: found device with no IRQ assigned. check BIOS settings!");
2244 +               pci_disable_device(dev);
2245 +               return(-ENODEV);
2246 +       }
2247 +
2248 +       sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
2249 +       if (!sc)
2250 +               return(-ENOMEM);
2251 +       memset(sc, 0, sizeof(*sc));
2252 +
2253 +       softc_device_init(sc, "hifn", num_chips, hifn_methods);
2254 +
2255 +       sc->sc_pcidev = dev;
2256 +       sc->sc_irq = -1;
2257 +       sc->sc_cid = -1;
2258 +       sc->sc_num = num_chips++;
2259 +       if (sc->sc_num < HIFN_MAX_CHIPS)
2260 +               hifn_chip_idx[sc->sc_num] = sc;
2261 +
2262 +       pci_set_drvdata(sc->sc_pcidev, sc);
2263 +
2264 +       spin_lock_init(&sc->sc_mtx);
2265 +
2266 +       /* XXX handle power management */
2267 +
2268 +       /*
2269 +        * The 7951 and 795x have a random number generator and
2270 +        * public key support; note this.
2271 +        */
2272 +       if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2273 +           (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
2274 +            pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
2275 +            pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
2276 +               sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
2277 +       /*
2278 +        * The 7811 has a random number generator and
2279 +        * we also note it's identity 'cuz of some quirks.
2280 +        */
2281 +       if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2282 +           pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
2283 +               sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
2284 +
2285 +       /*
2286 +        * The 795x parts support AES.
2287 +        */
2288 +       if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2289 +           (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
2290 +            pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
2291 +               sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
2292 +               /*
2293 +                * Select PLL configuration.  This depends on the
2294 +                * bus and board design and must be manually configured
2295 +                * if the default setting is unacceptable.
2296 +                */
2297 +               hifn_getpllconfig(dev, &sc->sc_pllconfig);
2298 +       }
2299 +
2300 +       /*
2301 +        * Setup PCI resources. Note that we record the bus
2302 +        * tag and handle for each register mapping, this is
2303 +        * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
2304 +        * and WRITE_REG_1 macros throughout the driver.
2305 +        */
2306 +       mem_start = pci_resource_start(sc->sc_pcidev, 0);
2307 +       mem_len   = pci_resource_len(sc->sc_pcidev, 0);
2308 +       sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
2309 +       if (!sc->sc_bar0) {
2310 +               device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
2311 +               goto fail;
2312 +       }
2313 +       sc->sc_bar0_lastreg = (bus_size_t) -1;
2314 +
2315 +       mem_start = pci_resource_start(sc->sc_pcidev, 1);
2316 +       mem_len   = pci_resource_len(sc->sc_pcidev, 1);
2317 +       sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
2318 +       if (!sc->sc_bar1) {
2319 +               device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
2320 +               goto fail;
2321 +       }
2322 +       sc->sc_bar1_lastreg = (bus_size_t) -1;
2323 +
2324 +       /* fix up the bus size */
2325 +       if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
2326 +               device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
2327 +               goto fail;
2328 +       }
2329 +       if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
2330 +               device_printf(sc->sc_dev,
2331 +                               "No usable consistent DMA configuration, aborting.\n");
2332 +               goto fail;
2333 +       }
2334 +
2335 +       hifn_set_retry(sc);
2336 +
2337 +       /*
2338 +        * Setup the area where the Hifn DMA's descriptors
2339 +        * and associated data structures.
2340 +        */
2341 +       sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
2342 +                       sizeof(*sc->sc_dma),
2343 +                       &sc->sc_dma_physaddr);
2344 +       if (!sc->sc_dma) {
2345 +               device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
2346 +               goto fail;
2347 +       }
2348 +       bzero(sc->sc_dma, sizeof(*sc->sc_dma));
2349 +
2350 +       /*
2351 +        * Reset the board and do the ``secret handshake''
2352 +        * to enable the crypto support.  Then complete the
2353 +        * initialization procedure by setting up the interrupt
2354 +        * and hooking in to the system crypto support so we'll
2355 +        * get used for system services like the crypto device,
2356 +        * IPsec, RNG device, etc.
2357 +        */
2358 +       hifn_reset_board(sc, 0);
2359 +
2360 +       if (hifn_enable_crypto(sc) != 0) {
2361 +               device_printf(sc->sc_dev, "crypto enabling failed\n");
2362 +               goto fail;
2363 +       }
2364 +       hifn_reset_puc(sc);
2365 +
2366 +       hifn_init_dma(sc);
2367 +       hifn_init_pci_registers(sc);
2368 +
2369 +       pci_set_master(sc->sc_pcidev);
2370 +
2371 +       /* XXX can't dynamically determine ram type for 795x; force dram */
2372 +       if (sc->sc_flags & HIFN_IS_7956)
2373 +               sc->sc_drammodel = 1;
2374 +       else if (hifn_ramtype(sc))
2375 +               goto fail;
2376 +
2377 +       if (sc->sc_drammodel == 0)
2378 +               hifn_sramsize(sc);
2379 +       else
2380 +               hifn_dramsize(sc);
2381 +
2382 +       /*
2383 +        * Workaround for NetSec 7751 rev A: half ram size because two
2384 +        * of the address lines were left floating
2385 +        */
2386 +       if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
2387 +           pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
2388 +           pci_get_revid(dev) == 0x61) /*XXX???*/
2389 +               sc->sc_ramsize >>= 1;
2390 +
2391 +       /*
2392 +        * Arrange the interrupt line.
2393 +        */
2394 +       rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
2395 +       if (rc) {
2396 +               device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
2397 +               goto fail;
2398 +       }
2399 +       sc->sc_irq = dev->irq;
2400 +
2401 +       hifn_sessions(sc);
2402 +
2403 +       /*
2404 +        * NB: Keep only the low 16 bits; this masks the chip id
2405 +        *     from the 7951.
2406 +        */
2407 +       rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
2408 +
2409 +       rseg = sc->sc_ramsize / 1024;
2410 +       rbase = 'K';
2411 +       if (sc->sc_ramsize >= (1024 * 1024)) {
2412 +               rbase = 'M';
2413 +               rseg /= 1024;
2414 +       }
2415 +       device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
2416 +               hifn_partname(sc), rev,
2417 +               rseg, rbase, sc->sc_drammodel ? 'd' : 's');
2418 +       if (sc->sc_flags & HIFN_IS_7956)
2419 +               printf(", pll=0x%x<%s clk, %ux mult>",
2420 +                       sc->sc_pllconfig,
2421 +                       sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
2422 +                       2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
2423 +       printf("\n");
2424 +
2425 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
2426 +       if (sc->sc_cid < 0) {
2427 +               device_printf(sc->sc_dev, "could not get crypto driver id\n");
2428 +               goto fail;
2429 +       }
2430 +
2431 +       WRITE_REG_0(sc, HIFN_0_PUCNFG,
2432 +           READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
2433 +       ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2434 +
2435 +       switch (ena) {
2436 +       case HIFN_PUSTAT_ENA_2:
2437 +               crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
2438 +               crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
2439 +               if (sc->sc_flags & HIFN_HAS_AES)
2440 +                       crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
2441 +               /*FALLTHROUGH*/
2442 +       case HIFN_PUSTAT_ENA_1:
2443 +               crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
2444 +               crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
2445 +               crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
2446 +               crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
2447 +               crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
2448 +               break;
2449 +       }
2450 +
2451 +       if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
2452 +               hifn_init_pubrng(sc);
2453 +
2454 +       init_timer(&sc->sc_tickto);
2455 +       sc->sc_tickto.function = hifn_tick;
2456 +       sc->sc_tickto.data = (unsigned long) sc->sc_num;
2457 +       mod_timer(&sc->sc_tickto, jiffies + HZ);
2458 +
2459 +       return (0);
2460 +
2461 +fail:
2462 +    if (sc->sc_cid >= 0)
2463 +        crypto_unregister_all(sc->sc_cid);
2464 +    if (sc->sc_irq != -1)
2465 +        free_irq(sc->sc_irq, sc);
2466 +    if (sc->sc_dma) {
2467 +               /* Turn off DMA polling */
2468 +               WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2469 +                       HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2470 +
2471 +        pci_free_consistent(sc->sc_pcidev,
2472 +                               sizeof(*sc->sc_dma),
2473 +                sc->sc_dma, sc->sc_dma_physaddr);
2474 +       }
2475 +    kfree(sc);
2476 +       return (-ENXIO);
2477 +}
2478 +
2479 +/*
2480 + * Detach an interface that successfully probed.
2481 + */
2482 +static void
2483 +hifn_remove(struct pci_dev *dev)
2484 +{
2485 +       struct hifn_softc *sc = pci_get_drvdata(dev);
2486 +       unsigned long l_flags;
2487 +
2488 +       DPRINTF("%s()\n", __FUNCTION__);
2489 +
2490 +       KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
2491 +
2492 +       /* disable interrupts */
2493 +       HIFN_LOCK(sc);
2494 +       WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
2495 +       HIFN_UNLOCK(sc);
2496 +
2497 +       /*XXX other resources */
2498 +       del_timer_sync(&sc->sc_tickto);
2499 +
2500 +       /* Turn off DMA polling */
2501 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2502 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2503 +
2504 +       crypto_unregister_all(sc->sc_cid);
2505 +
2506 +       free_irq(sc->sc_irq, sc);
2507 +
2508 +       pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
2509 +                sc->sc_dma, sc->sc_dma_physaddr);
2510 +}
2511 +
2512 +
2513 +static int
2514 +hifn_init_pubrng(struct hifn_softc *sc)
2515 +{
2516 +       int i;
2517 +
2518 +       DPRINTF("%s()\n", __FUNCTION__);
2519 +
2520 +       if ((sc->sc_flags & HIFN_IS_7811) == 0) {
2521 +               /* Reset 7951 public key/rng engine */
2522 +               WRITE_REG_1(sc, HIFN_1_PUB_RESET,
2523 +                   READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
2524 +
2525 +               for (i = 0; i < 100; i++) {
2526 +                       DELAY(1000);
2527 +                       if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
2528 +                           HIFN_PUBRST_RESET) == 0)
2529 +                               break;
2530 +               }
2531 +
2532 +               if (i == 100) {
2533 +                       device_printf(sc->sc_dev, "public key init failed\n");
2534 +                       return (1);
2535 +               }
2536 +       }
2537 +
2538 +       /* Enable the rng, if available */
2539 +#ifdef CONFIG_OCF_RANDOMHARVEST
2540 +       if (sc->sc_flags & HIFN_HAS_RNG) {
2541 +               if (sc->sc_flags & HIFN_IS_7811) {
2542 +                       u_int32_t r;
2543 +                       r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
2544 +                       if (r & HIFN_7811_RNGENA_ENA) {
2545 +                               r &= ~HIFN_7811_RNGENA_ENA;
2546 +                               WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
2547 +                       }
2548 +                       WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
2549 +                           HIFN_7811_RNGCFG_DEFL);
2550 +                       r |= HIFN_7811_RNGENA_ENA;
2551 +                       WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
2552 +               } else
2553 +                       WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
2554 +                           READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
2555 +                           HIFN_RNGCFG_ENA);
2556 +
2557 +               sc->sc_rngfirst = 1;
2558 +               crypto_rregister(sc->sc_cid, hifn_read_random, sc);
2559 +       }
2560 +#endif
2561 +
2562 +       /* Enable public key engine, if available */
2563 +       if (sc->sc_flags & HIFN_HAS_PUBLIC) {
2564 +               WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
2565 +               sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
2566 +               WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2567 +#ifdef HIFN_VULCANDEV
2568 +               sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, 
2569 +                                       UID_ROOT, GID_WHEEL, 0666,
2570 +                                       "vulcanpk");
2571 +               sc->sc_pkdev->si_drv1 = sc;
2572 +#endif
2573 +       }
2574 +
2575 +       return (0);
2576 +}
2577 +
2578 +#ifdef CONFIG_OCF_RANDOMHARVEST
2579 +static int
2580 +hifn_read_random(void *arg, u_int32_t *buf, int len)
2581 +{
2582 +       struct hifn_softc *sc = (struct hifn_softc *) arg;
2583 +       u_int32_t sts;
2584 +       int i, rc = 0;
2585 +
2586 +       if (len <= 0)
2587 +               return rc;
2588 +
2589 +       if (sc->sc_flags & HIFN_IS_7811) {
2590 +               /* ONLY VALID ON 7811!!!! */
2591 +               for (i = 0; i < 5; i++) {
2592 +                       sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
2593 +                       if (sts & HIFN_7811_RNGSTS_UFL) {
2594 +                               device_printf(sc->sc_dev,
2595 +                                             "RNG underflow: disabling\n");
2596 +                               /* DAVIDM perhaps return -1 */
2597 +                               break;
2598 +                       }
2599 +                       if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
2600 +                               break;
2601 +
2602 +                       /*
2603 +                        * There are at least two words in the RNG FIFO
2604 +                        * at this point.
2605 +                        */
2606 +                       if (rc < len)
2607 +                               buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
2608 +                       if (rc < len)
2609 +                               buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
2610 +               }
2611 +       } else
2612 +               buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
2613 +
2614 +       /* NB: discard first data read */
2615 +       if (sc->sc_rngfirst) {
2616 +               sc->sc_rngfirst = 0;
2617 +               rc = 0;
2618 +       }
2619 +
2620 +       return(rc);
2621 +}
2622 +#endif /* CONFIG_OCF_RANDOMHARVEST */
2623 +
2624 +static void
2625 +hifn_puc_wait(struct hifn_softc *sc)
2626 +{
2627 +       int i;
2628 +       int reg = HIFN_0_PUCTRL;
2629 +
2630 +       if (sc->sc_flags & HIFN_IS_7956) {
2631 +               reg = HIFN_0_PUCTRL2;
2632 +       }
2633 +
2634 +       for (i = 5000; i > 0; i--) {
2635 +               DELAY(1);
2636 +               if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
2637 +                       break;
2638 +       }
2639 +       if (!i)
2640 +               device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
2641 +                               READ_REG_0(sc, HIFN_0_PUCTRL));
2642 +}
2643 +
2644 +/*
2645 + * Reset the processing unit.
2646 + */
2647 +static void
2648 +hifn_reset_puc(struct hifn_softc *sc)
2649 +{
2650 +       /* Reset processing unit */
2651 +       int reg = HIFN_0_PUCTRL;
2652 +
2653 +       if (sc->sc_flags & HIFN_IS_7956) {
2654 +               reg = HIFN_0_PUCTRL2;
2655 +       }
2656 +       WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
2657 +
2658 +       hifn_puc_wait(sc);
2659 +}
2660 +
2661 +/*
2662 + * Set the Retry and TRDY registers; note that we set them to
2663 + * zero because the 7811 locks up when forced to retry (section
2664 + * 3.6 of "Specification Update SU-0014-04".  Not clear if we
2665 + * should do this for all Hifn parts, but it doesn't seem to hurt.
2666 + */
2667 +static void
2668 +hifn_set_retry(struct hifn_softc *sc)
2669 +{
2670 +       DPRINTF("%s()\n", __FUNCTION__);
2671 +       /* NB: RETRY only responds to 8-bit reads/writes */
2672 +       pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
2673 +       pci_write_config_dword(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
2674 +}
2675 +
2676 +/*
2677 + * Resets the board.  Values in the regesters are left as is
2678 + * from the reset (i.e. initial values are assigned elsewhere).
2679 + */
2680 +static void
2681 +hifn_reset_board(struct hifn_softc *sc, int full)
2682 +{
2683 +       u_int32_t reg;
2684 +
2685 +       DPRINTF("%s()\n", __FUNCTION__);
2686 +       /*
2687 +        * Set polling in the DMA configuration register to zero.  0x7 avoids
2688 +        * resetting the board and zeros out the other fields.
2689 +        */
2690 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2691 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2692 +
2693 +       /*
2694 +        * Now that polling has been disabled, we have to wait 1 ms
2695 +        * before resetting the board.
2696 +        */
2697 +       DELAY(1000);
2698 +
2699 +       /* Reset the DMA unit */
2700 +       if (full) {
2701 +               WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
2702 +               DELAY(1000);
2703 +       } else {
2704 +               WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
2705 +                   HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
2706 +               hifn_reset_puc(sc);
2707 +       }
2708 +
2709 +       KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
2710 +       bzero(sc->sc_dma, sizeof(*sc->sc_dma));
2711 +
2712 +       /* Bring dma unit out of reset */
2713 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2714 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2715 +
2716 +       hifn_puc_wait(sc);
2717 +       hifn_set_retry(sc);
2718 +
2719 +       if (sc->sc_flags & HIFN_IS_7811) {
2720 +               for (reg = 0; reg < 1000; reg++) {
2721 +                       if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
2722 +                           HIFN_MIPSRST_CRAMINIT)
2723 +                               break;
2724 +                       DELAY(1000);
2725 +               }
2726 +               if (reg == 1000)
2727 +                       device_printf(sc->sc_dev, ": cram init timeout\n");
2728 +       } else {
2729 +         /* set up DMA configuration register #2 */
2730 +         /* turn off all PK and BAR0 swaps */
2731 +         WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
2732 +                     (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
2733 +                     (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
2734 +                     (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
2735 +                     (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
2736 +       }
2737 +}
2738 +
2739 +static u_int32_t
2740 +hifn_next_signature(u_int32_t a, u_int cnt)
2741 +{
2742 +       int i;
2743 +       u_int32_t v;
2744 +
2745 +       for (i = 0; i < cnt; i++) {
2746 +
2747 +               /* get the parity */
2748 +               v = a & 0x80080125;
2749 +               v ^= v >> 16;
2750 +               v ^= v >> 8;
2751 +               v ^= v >> 4;
2752 +               v ^= v >> 2;
2753 +               v ^= v >> 1;
2754 +
2755 +               a = (v & 1) ^ (a << 1);
2756 +       }
2757 +
2758 +       return a;
2759 +}
2760 +
2761 +
2762 +/*
2763 + * Checks to see if crypto is already enabled.  If crypto isn't enable,
2764 + * "hifn_enable_crypto" is called to enable it.  The check is important,
2765 + * as enabling crypto twice will lock the board.
2766 + */
2767 +static int 
2768 +hifn_enable_crypto(struct hifn_softc *sc)
2769 +{
2770 +       u_int32_t dmacfg, ramcfg, encl, addr, i;
2771 +       char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2772 +                                         0x00, 0x00, 0x00, 0x00 };
2773 +
2774 +       DPRINTF("%s()\n", __FUNCTION__);
2775 +
2776 +       ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
2777 +       dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
2778 +
2779 +       /*
2780 +        * The RAM config register's encrypt level bit needs to be set before
2781 +        * every read performed on the encryption level register.
2782 +        */
2783 +       WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
2784 +
2785 +       encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2786 +
2787 +       /*
2788 +        * Make sure we don't re-unlock.  Two unlocks kills chip until the
2789 +        * next reboot.
2790 +        */
2791 +       if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
2792 +#ifdef HIFN_DEBUG
2793 +               if (hifn_debug)
2794 +                       device_printf(sc->sc_dev,
2795 +                           "Strong crypto already enabled!\n");
2796 +#endif
2797 +               goto report;
2798 +       }
2799 +
2800 +       if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
2801 +#ifdef HIFN_DEBUG
2802 +               if (hifn_debug)
2803 +                       device_printf(sc->sc_dev,
2804 +                             "Unknown encryption level 0x%x\n", encl);
2805 +#endif
2806 +               return 1;
2807 +       }
2808 +
2809 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
2810 +           HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2811 +       DELAY(1000);
2812 +       addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
2813 +       DELAY(1000);
2814 +       WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
2815 +       DELAY(1000);
2816 +
2817 +       for (i = 0; i <= 12; i++) {
2818 +               addr = hifn_next_signature(addr, offtbl[i] + 0x101);
2819 +               WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
2820 +
2821 +               DELAY(1000);
2822 +       }
2823 +
2824 +       WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
2825 +       encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2826 +
2827 +#ifdef HIFN_DEBUG
2828 +       if (hifn_debug) {
2829 +               if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
2830 +                       device_printf(sc->sc_dev, "Engine is permanently "
2831 +                               "locked until next system reset!\n");
2832 +               else
2833 +                       device_printf(sc->sc_dev, "Engine enabled "
2834 +                               "successfully!\n");
2835 +       }
2836 +#endif
2837 +
2838 +report:
2839 +       WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
2840 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
2841 +
2842 +       switch (encl) {
2843 +       case HIFN_PUSTAT_ENA_1:
2844 +       case HIFN_PUSTAT_ENA_2:
2845 +               break;
2846 +       case HIFN_PUSTAT_ENA_0:
2847 +       default:
2848 +               device_printf(sc->sc_dev, "disabled\n");
2849 +               break;
2850 +       }
2851 +
2852 +       return 0;
2853 +}
2854 +
2855 +/*
2856 + * Give initial values to the registers listed in the "Register Space"
2857 + * section of the HIFN Software Development reference manual.
2858 + */
2859 +static void 
2860 +hifn_init_pci_registers(struct hifn_softc *sc)
2861 +{
2862 +       DPRINTF("%s()\n", __FUNCTION__);
2863 +
2864 +       /* write fixed values needed by the Initialization registers */
2865 +       WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
2866 +       WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
2867 +       WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
2868 +
2869 +       /* write all 4 ring address registers */
2870 +       WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
2871 +           offsetof(struct hifn_dma, cmdr[0]));
2872 +       WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
2873 +           offsetof(struct hifn_dma, srcr[0]));
2874 +       WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
2875 +           offsetof(struct hifn_dma, dstr[0]));
2876 +       WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
2877 +           offsetof(struct hifn_dma, resr[0]));
2878 +
2879 +       DELAY(2000);
2880 +
2881 +       /* write status register */
2882 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
2883 +           HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
2884 +           HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
2885 +           HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
2886 +           HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
2887 +           HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
2888 +           HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
2889 +           HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
2890 +           HIFN_DMACSR_S_WAIT |
2891 +           HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
2892 +           HIFN_DMACSR_C_WAIT |
2893 +           HIFN_DMACSR_ENGINE |
2894 +           ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
2895 +               HIFN_DMACSR_PUBDONE : 0) |
2896 +           ((sc->sc_flags & HIFN_IS_7811) ?
2897 +               HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
2898 +
2899 +       sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
2900 +       sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
2901 +           HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
2902 +           HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
2903 +           ((sc->sc_flags & HIFN_IS_7811) ?
2904 +               HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
2905 +       sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2906 +       WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2907 +
2908 +
2909 +       if (sc->sc_flags & HIFN_IS_7956) {
2910 +               u_int32_t pll;
2911 +
2912 +               WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
2913 +                   HIFN_PUCNFG_TCALLPHASES |
2914 +                   HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
2915 +
2916 +               /* turn off the clocks and insure bypass is set */
2917 +               pll = READ_REG_1(sc, HIFN_1_PLL);
2918 +               pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
2919 +                 | HIFN_PLL_BP | HIFN_PLL_MBSET;
2920 +               WRITE_REG_1(sc, HIFN_1_PLL, pll);
2921 +               DELAY(10*1000);         /* 10ms */
2922 +
2923 +               /* change configuration */
2924 +               pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
2925 +               WRITE_REG_1(sc, HIFN_1_PLL, pll);
2926 +               DELAY(10*1000);         /* 10ms */
2927 +
2928 +               /* disable bypass */
2929 +               pll &= ~HIFN_PLL_BP;
2930 +               WRITE_REG_1(sc, HIFN_1_PLL, pll);
2931 +               /* enable clocks with new configuration */
2932 +               pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
2933 +               WRITE_REG_1(sc, HIFN_1_PLL, pll);
2934 +       } else {
2935 +               WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
2936 +                   HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
2937 +                   HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
2938 +                   (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
2939 +       }
2940 +
2941 +       WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
2942 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2943 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
2944 +           ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
2945 +           ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
2946 +}
2947 +
2948 +/*
2949 + * The maximum number of sessions supported by the card
2950 + * is dependent on the amount of context ram, which
2951 + * encryption algorithms are enabled, and how compression
2952 + * is configured.  This should be configured before this
2953 + * routine is called.
2954 + */
2955 +static void
2956 +hifn_sessions(struct hifn_softc *sc)
2957 +{
2958 +       u_int32_t pucnfg;
2959 +       int ctxsize;
2960 +
2961 +       DPRINTF("%s()\n", __FUNCTION__);
2962 +
2963 +       pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
2964 +
2965 +       if (pucnfg & HIFN_PUCNFG_COMPSING) {
2966 +               if (pucnfg & HIFN_PUCNFG_ENCCNFG)
2967 +                       ctxsize = 128;
2968 +               else
2969 +                       ctxsize = 512;
2970 +               /*
2971 +                * 7955/7956 has internal context memory of 32K
2972 +                */
2973 +               if (sc->sc_flags & HIFN_IS_7956)
2974 +                       sc->sc_maxses = 32768 / ctxsize;
2975 +               else
2976 +                       sc->sc_maxses = 1 +
2977 +                           ((sc->sc_ramsize - 32768) / ctxsize);
2978 +       } else
2979 +               sc->sc_maxses = sc->sc_ramsize / 16384;
2980 +
2981 +       if (sc->sc_maxses > 2048)
2982 +               sc->sc_maxses = 2048;
2983 +}
2984 +
2985 +/*
2986 + * Determine ram type (sram or dram).  Board should be just out of a reset
2987 + * state when this is called.
2988 + */
2989 +static int
2990 +hifn_ramtype(struct hifn_softc *sc)
2991 +{
2992 +       u_int8_t data[8], dataexpect[8];
2993 +       int i;
2994 +
2995 +       for (i = 0; i < sizeof(data); i++)
2996 +               data[i] = dataexpect[i] = 0x55;
2997 +       if (hifn_writeramaddr(sc, 0, data))
2998 +               return (-1);
2999 +       if (hifn_readramaddr(sc, 0, data))
3000 +               return (-1);
3001 +       if (bcmp(data, dataexpect, sizeof(data)) != 0) {
3002 +               sc->sc_drammodel = 1;
3003 +               return (0);
3004 +       }
3005 +
3006 +       for (i = 0; i < sizeof(data); i++)
3007 +               data[i] = dataexpect[i] = 0xaa;
3008 +       if (hifn_writeramaddr(sc, 0, data))
3009 +               return (-1);
3010 +       if (hifn_readramaddr(sc, 0, data))
3011 +               return (-1);
3012 +       if (bcmp(data, dataexpect, sizeof(data)) != 0) {
3013 +               sc->sc_drammodel = 1;
3014 +               return (0);
3015 +       }
3016 +
3017 +       return (0);
3018 +}
3019 +
3020 +#define        HIFN_SRAM_MAX           (32 << 20)
3021 +#define        HIFN_SRAM_STEP_SIZE     16384
3022 +#define        HIFN_SRAM_GRANULARITY   (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
3023 +
3024 +static int
3025 +hifn_sramsize(struct hifn_softc *sc)
3026 +{
3027 +       u_int32_t a;
3028 +       u_int8_t data[8];
3029 +       u_int8_t dataexpect[sizeof(data)];
3030 +       int32_t i;
3031 +
3032 +       for (i = 0; i < sizeof(data); i++)
3033 +               data[i] = dataexpect[i] = i ^ 0x5a;
3034 +
3035 +       for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
3036 +               a = i * HIFN_SRAM_STEP_SIZE;
3037 +               bcopy(&i, data, sizeof(i));
3038 +               hifn_writeramaddr(sc, a, data);
3039 +       }
3040 +
3041 +       for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
3042 +               a = i * HIFN_SRAM_STEP_SIZE;
3043 +               bcopy(&i, dataexpect, sizeof(i));
3044 +               if (hifn_readramaddr(sc, a, data) < 0)
3045 +                       return (0);
3046 +               if (bcmp(data, dataexpect, sizeof(data)) != 0)
3047 +                       return (0);
3048 +               sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
3049 +       }
3050 +
3051 +       return (0);
3052 +}
3053 +
3054 +/*
3055 + * XXX For dram boards, one should really try all of the
3056 + * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
3057 + * is already set up correctly.
3058 + */
3059 +static int
3060 +hifn_dramsize(struct hifn_softc *sc)
3061 +{
3062 +       u_int32_t cnfg;
3063 +
3064 +       if (sc->sc_flags & HIFN_IS_7956) {
3065 +               /*
3066 +                * 7955/7956 have a fixed internal ram of only 32K.
3067 +                */
3068 +               sc->sc_ramsize = 32768;
3069 +       } else {
3070 +               cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
3071 +                   HIFN_PUCNFG_DRAMMASK;
3072 +               sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
3073 +       }
3074 +       return (0);
3075 +}
3076 +
3077 +static void
3078 +hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
3079 +{
3080 +       struct hifn_dma *dma = sc->sc_dma;
3081 +
3082 +       DPRINTF("%s()\n", __FUNCTION__);
3083 +
3084 +       if (dma->cmdi == HIFN_D_CMD_RSIZE) {
3085 +               dma->cmdi = 0;
3086 +               dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3087 +               wmb();
3088 +               dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
3089 +               HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
3090 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3091 +       }
3092 +       *cmdp = dma->cmdi++;
3093 +       dma->cmdk = dma->cmdi;
3094 +
3095 +       if (dma->srci == HIFN_D_SRC_RSIZE) {
3096 +               dma->srci = 0;
3097 +               dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3098 +               wmb();
3099 +               dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
3100 +               HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
3101 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3102 +       }
3103 +       *srcp = dma->srci++;
3104 +       dma->srck = dma->srci;
3105 +
3106 +       if (dma->dsti == HIFN_D_DST_RSIZE) {
3107 +               dma->dsti = 0;
3108 +               dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3109 +               wmb();
3110 +               dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
3111 +               HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
3112 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3113 +       }
3114 +       *dstp = dma->dsti++;
3115 +       dma->dstk = dma->dsti;
3116 +
3117 +       if (dma->resi == HIFN_D_RES_RSIZE) {
3118 +               dma->resi = 0;
3119 +               dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3120 +               wmb();
3121 +               dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
3122 +               HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
3123 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3124 +       }
3125 +       *resp = dma->resi++;
3126 +       dma->resk = dma->resi;
3127 +}
3128 +
3129 +static int
3130 +hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
3131 +{
3132 +       struct hifn_dma *dma = sc->sc_dma;
3133 +       hifn_base_command_t wc;
3134 +       const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
3135 +       int r, cmdi, resi, srci, dsti;
3136 +
3137 +       DPRINTF("%s()\n", __FUNCTION__);
3138 +
3139 +       wc.masks = htole16(3 << 13);
3140 +       wc.session_num = htole16(addr >> 14);
3141 +       wc.total_source_count = htole16(8);
3142 +       wc.total_dest_count = htole16(addr & 0x3fff);
3143 +
3144 +       hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
3145 +
3146 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3147 +           HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
3148 +           HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
3149 +
3150 +       /* build write command */
3151 +       bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
3152 +       *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
3153 +       bcopy(data, &dma->test_src, sizeof(dma->test_src));
3154 +
3155 +       dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
3156 +           + offsetof(struct hifn_dma, test_src));
3157 +       dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
3158 +           + offsetof(struct hifn_dma, test_dst));
3159 +
3160 +       dma->cmdr[cmdi].l = htole32(16 | masks);
3161 +       dma->srcr[srci].l = htole32(8 | masks);
3162 +       dma->dstr[dsti].l = htole32(4 | masks);
3163 +       dma->resr[resi].l = htole32(4 | masks);
3164 +
3165 +       for (r = 10000; r >= 0; r--) {
3166 +               DELAY(10);
3167 +               if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
3168 +                       break;
3169 +       }
3170 +       if (r == 0) {
3171 +               device_printf(sc->sc_dev, "writeramaddr -- "
3172 +                   "result[%d](addr %d) still valid\n", resi, addr);
3173 +               r = -1;
3174 +               return (-1);
3175 +       } else
3176 +               r = 0;
3177 +
3178 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3179 +           HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
3180 +           HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
3181 +
3182 +       return (r);
3183 +}
3184 +
3185 +static int
3186 +hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
3187 +{
3188 +       struct hifn_dma *dma = sc->sc_dma;
3189 +       hifn_base_command_t rc;
3190 +       const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
3191 +       int r, cmdi, srci, dsti, resi;
3192 +
3193 +       DPRINTF("%s()\n", __FUNCTION__);
3194 +
3195 +       rc.masks = htole16(2 << 13);
3196 +       rc.session_num = htole16(addr >> 14);
3197 +       rc.total_source_count = htole16(addr & 0x3fff);
3198 +       rc.total_dest_count = htole16(8);
3199 +
3200 +       hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
3201 +
3202 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3203 +           HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
3204 +           HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
3205 +
3206 +       bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
3207 +       *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
3208 +
3209 +       dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
3210 +           offsetof(struct hifn_dma, test_src));
3211 +       dma->test_src = 0;
3212 +       dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
3213 +           offsetof(struct hifn_dma, test_dst));
3214 +       dma->test_dst = 0;
3215 +       dma->cmdr[cmdi].l = htole32(8 | masks);
3216 +       dma->srcr[srci].l = htole32(8 | masks);
3217 +       dma->dstr[dsti].l = htole32(8 | masks);
3218 +       dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
3219 +
3220 +       for (r = 10000; r >= 0; r--) {
3221 +               DELAY(10);
3222 +               if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
3223 +                       break;
3224 +       }
3225 +       if (r == 0) {
3226 +               device_printf(sc->sc_dev, "readramaddr -- "
3227 +                   "result[%d](addr %d) still valid\n", resi, addr);
3228 +               r = -1;
3229 +       } else {
3230 +               r = 0;
3231 +               bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
3232 +       }
3233 +
3234 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3235 +           HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
3236 +           HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
3237 +
3238 +       return (r);
3239 +}
3240 +
3241 +/*
3242 + * Initialize the descriptor rings.
3243 + */
3244 +static void 
3245 +hifn_init_dma(struct hifn_softc *sc)
3246 +{
3247 +       struct hifn_dma *dma = sc->sc_dma;
3248 +       int i;
3249 +
3250 +       DPRINTF("%s()\n", __FUNCTION__);
3251 +
3252 +       hifn_set_retry(sc);
3253 +
3254 +       /* initialize static pointer values */
3255 +       for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
3256 +               dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
3257 +                   offsetof(struct hifn_dma, command_bufs[i][0]));
3258 +       for (i = 0; i < HIFN_D_RES_RSIZE; i++)
3259 +               dma->resr[i].p = htole32(sc->sc_dma_physaddr +
3260 +                   offsetof(struct hifn_dma, result_bufs[i][0]));
3261 +
3262 +       dma->cmdr[HIFN_D_CMD_RSIZE].p =
3263 +           htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
3264 +       dma->srcr[HIFN_D_SRC_RSIZE].p =
3265 +           htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
3266 +       dma->dstr[HIFN_D_DST_RSIZE].p =
3267 +           htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
3268 +       dma->resr[HIFN_D_RES_RSIZE].p =
3269 +           htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
3270 +
3271 +       dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
3272 +       dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
3273 +       dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
3274 +}
3275 +
3276 +/*
3277 + * Writes out the raw command buffer space.  Returns the
3278 + * command buffer size.
3279 + */
3280 +static u_int
3281 +hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
3282 +{
3283 +       struct hifn_softc *sc = NULL;
3284 +       u_int8_t *buf_pos;
3285 +       hifn_base_command_t *base_cmd;
3286 +       hifn_mac_command_t *mac_cmd;
3287 +       hifn_crypt_command_t *cry_cmd;
3288 +       int using_mac, using_crypt, len, ivlen;
3289 +       u_int32_t dlen, slen;
3290 +
3291 +       DPRINTF("%s()\n", __FUNCTION__);
3292 +
3293 +       buf_pos = buf;
3294 +       using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
3295 +       using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
3296 +
3297 +       base_cmd = (hifn_base_command_t *)buf_pos;
3298 +       base_cmd->masks = htole16(cmd->base_masks);
3299 +       slen = cmd->src_mapsize;
3300 +       if (cmd->sloplen)
3301 +               dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
3302 +       else
3303 +               dlen = cmd->dst_mapsize;
3304 +       base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
3305 +       base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
3306 +       dlen >>= 16;
3307 +       slen >>= 16;
3308 +       base_cmd->session_num = htole16(
3309 +           ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
3310 +           ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
3311 +       buf_pos += sizeof(hifn_base_command_t);
3312 +
3313 +       if (using_mac) {
3314 +               mac_cmd = (hifn_mac_command_t *)buf_pos;
3315 +               dlen = cmd->maccrd->crd_len;
3316 +               mac_cmd->source_count = htole16(dlen & 0xffff);
3317 +               dlen >>= 16;
3318 +               mac_cmd->masks = htole16(cmd->mac_masks |
3319 +                   ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
3320 +               mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
3321 +               mac_cmd->reserved = 0;
3322 +               buf_pos += sizeof(hifn_mac_command_t);
3323 +       }
3324 +
3325 +       if (using_crypt) {
3326 +               cry_cmd = (hifn_crypt_command_t *)buf_pos;
3327 +               dlen = cmd->enccrd->crd_len;
3328 +               cry_cmd->source_count = htole16(dlen & 0xffff);
3329 +               dlen >>= 16;
3330 +               cry_cmd->masks = htole16(cmd->cry_masks |
3331 +                   ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
3332 +               cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
3333 +               cry_cmd->reserved = 0;
3334 +               buf_pos += sizeof(hifn_crypt_command_t);
3335 +       }
3336 +
3337 +       if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
3338 +               bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
3339 +               buf_pos += HIFN_MAC_KEY_LENGTH;
3340 +       }
3341 +
3342 +       if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
3343 +               switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
3344 +               case HIFN_CRYPT_CMD_ALG_3DES:
3345 +                       bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
3346 +                       buf_pos += HIFN_3DES_KEY_LENGTH;
3347 +                       break;
3348 +               case HIFN_CRYPT_CMD_ALG_DES:
3349 +                       bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
3350 +                       buf_pos += HIFN_DES_KEY_LENGTH;
3351 +                       break;
3352 +               case HIFN_CRYPT_CMD_ALG_RC4:
3353 +                       len = 256;
3354 +                       do {
3355 +                               int clen;
3356 +
3357 +                               clen = MIN(cmd->cklen, len);
3358 +                               bcopy(cmd->ck, buf_pos, clen);
3359 +                               len -= clen;
3360 +                               buf_pos += clen;
3361 +                       } while (len > 0);
3362 +                       bzero(buf_pos, 4);
3363 +                       buf_pos += 4;
3364 +                       break;
3365 +               case HIFN_CRYPT_CMD_ALG_AES:
3366 +                       /*
3367 +                        * AES keys are variable 128, 192 and
3368 +                        * 256 bits (16, 24 and 32 bytes).
3369 +                        */
3370 +                       bcopy(cmd->ck, buf_pos, cmd->cklen);
3371 +                       buf_pos += cmd->cklen;
3372 +                       break;
3373 +               }
3374 +       }
3375 +
3376 +       if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
3377 +               switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
3378 +               case HIFN_CRYPT_CMD_ALG_AES:
3379 +                       ivlen = HIFN_AES_IV_LENGTH;
3380 +                       break;
3381 +               default:
3382 +                       ivlen = HIFN_IV_LENGTH;
3383 +                       break;
3384 +               }
3385 +               bcopy(cmd->iv, buf_pos, ivlen);
3386 +               buf_pos += ivlen;
3387 +       }
3388 +
3389 +       if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
3390 +               bzero(buf_pos, 8);
3391 +               buf_pos += 8;
3392 +       }
3393 +
3394 +       return (buf_pos - buf);
3395 +}
3396 +
3397 +static int
3398 +hifn_dmamap_aligned(struct hifn_operand *op)
3399 +{
3400 +       struct hifn_softc *sc = NULL;
3401 +       int i;
3402 +
3403 +       DPRINTF("%s()\n", __FUNCTION__);
3404 +
3405 +       for (i = 0; i < op->nsegs; i++) {
3406 +               if (op->segs[i].ds_addr & 3)
3407 +                       return (0);
3408 +               if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
3409 +                       return (0);
3410 +       }
3411 +       return (1);
3412 +}
3413 +
3414 +static __inline int
3415 +hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
3416 +{
3417 +       struct hifn_dma *dma = sc->sc_dma;
3418 +
3419 +       if (++idx == HIFN_D_DST_RSIZE) {
3420 +               dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
3421 +                   HIFN_D_MASKDONEIRQ);
3422 +               HIFN_DSTR_SYNC(sc, idx,
3423 +                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3424 +               idx = 0;
3425 +       }
3426 +       return (idx);
3427 +}
3428 +
3429 +static int
3430 +hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
3431 +{
3432 +       struct hifn_dma *dma = sc->sc_dma;
3433 +       struct hifn_operand *dst = &cmd->dst;
3434 +       u_int32_t p, l;
3435 +       int idx, used = 0, i;
3436 +
3437 +       DPRINTF("%s()\n", __FUNCTION__);
3438 +
3439 +       idx = dma->dsti;
3440 +       for (i = 0; i < dst->nsegs - 1; i++) {
3441 +               dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
3442 +               dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
3443 +               wmb();
3444 +               dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3445 +               HIFN_DSTR_SYNC(sc, idx,
3446 +                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3447 +               used++;
3448 +
3449 +               idx = hifn_dmamap_dstwrap(sc, idx);
3450 +       }
3451 +
3452 +       if (cmd->sloplen == 0) {
3453 +               p = dst->segs[i].ds_addr;
3454 +               l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
3455 +                   dst->segs[i].ds_len;
3456 +       } else {
3457 +               p = sc->sc_dma_physaddr +
3458 +                   offsetof(struct hifn_dma, slop[cmd->slopidx]);
3459 +               l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
3460 +                   sizeof(u_int32_t);
3461 +
3462 +               if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
3463 +                       dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
3464 +                       dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
3465 +                           (dst->segs[i].ds_len - cmd->sloplen));
3466 +                       wmb();
3467 +                       dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3468 +                       HIFN_DSTR_SYNC(sc, idx,
3469 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3470 +                       used++;
3471 +
3472 +                       idx = hifn_dmamap_dstwrap(sc, idx);
3473 +               }
3474 +       }
3475 +       dma->dstr[idx].p = htole32(p);
3476 +       dma->dstr[idx].l = htole32(l);
3477 +       wmb();
3478 +       dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3479 +       HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3480 +       used++;
3481 +
3482 +       idx = hifn_dmamap_dstwrap(sc, idx);
3483 +
3484 +       dma->dsti = idx;
3485 +       dma->dstu += used;
3486 +       return (idx);
3487 +}
3488 +
3489 +static __inline int
3490 +hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
3491 +{
3492 +       struct hifn_dma *dma = sc->sc_dma;
3493 +
3494 +       if (++idx == HIFN_D_SRC_RSIZE) {
3495 +               dma->srcr[idx].l = htole32(HIFN_D_VALID |
3496 +                   HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
3497 +               HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
3498 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3499 +               idx = 0;
3500 +       }
3501 +       return (idx);
3502 +}
3503 +
3504 +static int
3505 +hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
3506 +{
3507 +       struct hifn_dma *dma = sc->sc_dma;
3508 +       struct hifn_operand *src = &cmd->src;
3509 +       int idx, i;
3510 +       u_int32_t last = 0;
3511 +
3512 +       DPRINTF("%s()\n", __FUNCTION__);
3513 +
3514 +       idx = dma->srci;
3515 +       for (i = 0; i < src->nsegs; i++) {
3516 +               if (i == src->nsegs - 1)
3517 +                       last = HIFN_D_LAST;
3518 +
3519 +               dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
3520 +               dma->srcr[idx].l = htole32(src->segs[i].ds_len |
3521 +                   HIFN_D_MASKDONEIRQ | last);
3522 +               wmb();
3523 +               dma->srcr[idx].l |= htole32(HIFN_D_VALID);
3524 +               HIFN_SRCR_SYNC(sc, idx,
3525 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3526 +
3527 +               idx = hifn_dmamap_srcwrap(sc, idx);
3528 +       }
3529 +       dma->srci = idx;
3530 +       dma->srcu += src->nsegs;
3531 +       return (idx);
3532 +} 
3533 +
3534 +
3535 +static int 
3536 +hifn_crypto(
3537 +       struct hifn_softc *sc,
3538 +       struct hifn_command *cmd,
3539 +       struct cryptop *crp,
3540 +       int hint)
3541 +{
3542 +       struct  hifn_dma *dma = sc->sc_dma;
3543 +       u_int32_t cmdlen, csr;
3544 +       int cmdi, resi, err = 0;
3545 +       unsigned long l_flags;
3546 +
3547 +       DPRINTF("%s()\n", __FUNCTION__);
3548 +
3549 +       /*
3550 +        * need 1 cmd, and 1 res
3551 +        *
3552 +        * NB: check this first since it's easy.
3553 +        */
3554 +       HIFN_LOCK(sc);
3555 +       if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
3556 +           (dma->resu + 1) > HIFN_D_RES_RSIZE) {
3557 +#ifdef HIFN_DEBUG
3558 +               if (hifn_debug) {
3559 +                       device_printf(sc->sc_dev,
3560 +                               "cmd/result exhaustion, cmdu %u resu %u\n",
3561 +                               dma->cmdu, dma->resu);
3562 +               }
3563 +#endif
3564 +               hifnstats.hst_nomem_cr++;
3565 +               sc->sc_needwakeup |= CRYPTO_SYMQ;
3566 +               HIFN_UNLOCK(sc);
3567 +               return (ERESTART);
3568 +       }
3569 +
3570 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
3571 +               if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
3572 +                       hifnstats.hst_nomem_load++;
3573 +                       err = ENOMEM;
3574 +                       goto err_srcmap1;
3575 +               }
3576 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
3577 +               if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
3578 +                       hifnstats.hst_nomem_load++;
3579 +                       err = ENOMEM;
3580 +                       goto err_srcmap1;
3581 +               }
3582 +       } else {
3583 +               if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
3584 +                       hifnstats.hst_nomem_load++;
3585 +                       err = ENOMEM;
3586 +                       goto err_srcmap1;
3587 +               }
3588 +       }
3589 +
3590 +       if (hifn_dmamap_aligned(&cmd->src)) {
3591 +               cmd->sloplen = cmd->src_mapsize & 3;
3592 +               cmd->dst = cmd->src;
3593 +       } else {
3594 +               if (crp->crp_flags & CRYPTO_F_IOV) {
3595 +                       DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
3596 +                       err = EINVAL;
3597 +                       goto err_srcmap;
3598 +               } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
3599 +#ifdef NOTYET
3600 +                       int totlen, len;
3601 +                       struct mbuf *m, *m0, *mlast;
3602 +
3603 +                       KASSERT(cmd->dst_m == cmd->src_m,
3604 +                               ("hifn_crypto: dst_m initialized improperly"));
3605 +                       hifnstats.hst_unaligned++;
3606 +                       /*
3607 +                        * Source is not aligned on a longword boundary.
3608 +                        * Copy the data to insure alignment.  If we fail
3609 +                        * to allocate mbufs or clusters while doing this
3610 +                        * we return ERESTART so the operation is requeued
3611 +                        * at the crypto later, but only if there are
3612 +                        * ops already posted to the hardware; otherwise we
3613 +                        * have no guarantee that we'll be re-entered.
3614 +                        */
3615 +                       totlen = cmd->src_mapsize;
3616 +                       if (cmd->src_m->m_flags & M_PKTHDR) {
3617 +                               len = MHLEN;
3618 +                               MGETHDR(m0, M_DONTWAIT, MT_DATA);
3619 +                               if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
3620 +                                       m_free(m0);
3621 +                                       m0 = NULL;
3622 +                               }
3623 +                       } else {
3624 +                               len = MLEN;
3625 +                               MGET(m0, M_DONTWAIT, MT_DATA);
3626 +                       }
3627 +                       if (m0 == NULL) {
3628 +                               hifnstats.hst_nomem_mbuf++;
3629 +                               err = dma->cmdu ? ERESTART : ENOMEM;
3630 +                               goto err_srcmap;
3631 +                       }
3632 +                       if (totlen >= MINCLSIZE) {
3633 +                               MCLGET(m0, M_DONTWAIT);
3634 +                               if ((m0->m_flags & M_EXT) == 0) {
3635 +                                       hifnstats.hst_nomem_mcl++;
3636 +                                       err = dma->cmdu ? ERESTART : ENOMEM;
3637 +                                       m_freem(m0);
3638 +                                       goto err_srcmap;
3639 +                               }
3640 +                               len = MCLBYTES;
3641 +                       }
3642 +                       totlen -= len;
3643 +                       m0->m_pkthdr.len = m0->m_len = len;
3644 +                       mlast = m0;
3645 +
3646 +                       while (totlen > 0) {
3647 +                               MGET(m, M_DONTWAIT, MT_DATA);
3648 +                               if (m == NULL) {
3649 +                                       hifnstats.hst_nomem_mbuf++;
3650 +                                       err = dma->cmdu ? ERESTART : ENOMEM;
3651 +                                       m_freem(m0);
3652 +                                       goto err_srcmap;
3653 +                               }
3654 +                               len = MLEN;
3655 +                               if (totlen >= MINCLSIZE) {
3656 +                                       MCLGET(m, M_DONTWAIT);
3657 +                                       if ((m->m_flags & M_EXT) == 0) {
3658 +                                               hifnstats.hst_nomem_mcl++;
3659 +                                               err = dma->cmdu ? ERESTART : ENOMEM;
3660 +                                               mlast->m_next = m;
3661 +                                               m_freem(m0);
3662 +                                               goto err_srcmap;
3663 +                                       }
3664 +                                       len = MCLBYTES;
3665 +                               }
3666 +
3667 +                               m->m_len = len;
3668 +                               m0->m_pkthdr.len += len;
3669 +                               totlen -= len;
3670 +
3671 +                               mlast->m_next = m;
3672 +                               mlast = m;
3673 +                       }
3674 +                       cmd->dst_m = m0;
3675 +#else
3676 +                       device_printf(sc->sc_dev,
3677 +                                       "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
3678 +                                       __FILE__, __LINE__);
3679 +                       err = EINVAL;
3680 +                       goto err_srcmap;
3681 +#endif
3682 +               } else {
3683 +                       device_printf(sc->sc_dev,
3684 +                                       "%s,%d: unaligned contig buffers not implemented\n",
3685 +                                       __FILE__, __LINE__);
3686 +                       err = EINVAL;
3687 +                       goto err_srcmap;
3688 +               }
3689 +       }
3690 +
3691 +       if (cmd->dst_map == NULL) {
3692 +               if (crp->crp_flags & CRYPTO_F_SKBUF) {
3693 +                       if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
3694 +                               hifnstats.hst_nomem_map++;
3695 +                               err = ENOMEM;
3696 +                               goto err_dstmap1;
3697 +                       }
3698 +               } else if (crp->crp_flags & CRYPTO_F_IOV) {
3699 +                       if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
3700 +                               hifnstats.hst_nomem_load++;
3701 +                               err = ENOMEM;
3702 +                               goto err_dstmap1;
3703 +                       }
3704 +               } else {
3705 +                       if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
3706 +                               hifnstats.hst_nomem_load++;
3707 +                               err = ENOMEM;
3708 +                               goto err_dstmap1;
3709 +                       }
3710 +               }
3711 +       }
3712 +
3713 +#ifdef HIFN_DEBUG
3714 +       if (hifn_debug) {
3715 +               device_printf(sc->sc_dev,
3716 +                   "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
3717 +                   READ_REG_1(sc, HIFN_1_DMA_CSR),
3718 +                   READ_REG_1(sc, HIFN_1_DMA_IER),
3719 +                   dma->cmdu, dma->srcu, dma->dstu, dma->resu,
3720 +                   cmd->src_nsegs, cmd->dst_nsegs);
3721 +       }
3722 +#endif
3723 +
3724 +#if 0
3725 +       if (cmd->src_map == cmd->dst_map) {
3726 +               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3727 +                   BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
3728 +       } else {
3729 +               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3730 +                   BUS_DMASYNC_PREWRITE);
3731 +               bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3732 +                   BUS_DMASYNC_PREREAD);
3733 +       }
3734 +#endif
3735 +
3736 +       /*
3737 +        * need N src, and N dst
3738 +        */
3739 +       if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
3740 +           (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
3741 +#ifdef HIFN_DEBUG
3742 +               if (hifn_debug) {
3743 +                       device_printf(sc->sc_dev,
3744 +                               "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
3745 +                               dma->srcu, cmd->src_nsegs,
3746 +                               dma->dstu, cmd->dst_nsegs);
3747 +               }
3748 +#endif
3749 +               hifnstats.hst_nomem_sd++;
3750 +               err = ERESTART;
3751 +               goto err_dstmap;
3752 +       }
3753 +
3754 +       if (dma->cmdi == HIFN_D_CMD_RSIZE) {
3755 +               dma->cmdi = 0;
3756 +               dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3757 +               wmb();
3758 +               dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
3759 +               HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
3760 +                   BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3761 +       }
3762 +       cmdi = dma->cmdi++;
3763 +       cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
3764 +       HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
3765 +
3766 +       /* .p for command/result already set */
3767 +       dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
3768 +           HIFN_D_MASKDONEIRQ);
3769 +       wmb();
3770 +       dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
3771 +       HIFN_CMDR_SYNC(sc, cmdi,
3772 +           BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3773 +       dma->cmdu++;
3774 +
3775 +       /*
3776 +        * We don't worry about missing an interrupt (which a "command wait"
3777 +        * interrupt salvages us from), unless there is more than one command
3778 +        * in the queue.
3779 +        */
3780 +       if (dma->cmdu > 1) {
3781 +               sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
3782 +               WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
3783 +       }
3784 +
3785 +       hifnstats.hst_ipackets++;
3786 +       hifnstats.hst_ibytes += cmd->src_mapsize;
3787 +
3788 +       hifn_dmamap_load_src(sc, cmd);
3789 +
3790 +       /*
3791 +        * Unlike other descriptors, we don't mask done interrupt from
3792 +        * result descriptor.
3793 +        */
3794 +#ifdef HIFN_DEBUG
3795 +       if (hifn_debug)
3796 +               device_printf(sc->sc_dev, "load res\n");
3797 +#endif
3798 +       if (dma->resi == HIFN_D_RES_RSIZE) {
3799 +               dma->resi = 0;
3800 +               dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3801 +               wmb();
3802 +               dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
3803 +               HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
3804 +                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3805 +       }
3806 +       resi = dma->resi++;
3807 +       KASSERT(dma->hifn_commands[resi] == NULL,
3808 +               ("hifn_crypto: command slot %u busy", resi));
3809 +       dma->hifn_commands[resi] = cmd;
3810 +       HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
3811 +       if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
3812 +               dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
3813 +                   HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
3814 +               wmb();
3815 +               dma->resr[resi].l |= htole32(HIFN_D_VALID);
3816 +               sc->sc_curbatch++;
3817 +               if (sc->sc_curbatch > hifnstats.hst_maxbatch)
3818 +                       hifnstats.hst_maxbatch = sc->sc_curbatch;
3819 +               hifnstats.hst_totbatch++;
3820 +       } else {
3821 +               dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
3822 +               wmb();
3823 +               dma->resr[resi].l |= htole32(HIFN_D_VALID);
3824 +               sc->sc_curbatch = 0;
3825 +       }
3826 +       HIFN_RESR_SYNC(sc, resi,
3827 +           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3828 +       dma->resu++;
3829 +
3830 +       if (cmd->sloplen)
3831 +               cmd->slopidx = resi;
3832 +
3833 +       hifn_dmamap_load_dst(sc, cmd);
3834 +
3835 +       csr = 0;
3836 +       if (sc->sc_c_busy == 0) {
3837 +               csr |= HIFN_DMACSR_C_CTRL_ENA;
3838 +               sc->sc_c_busy = 1;
3839 +       }
3840 +       if (sc->sc_s_busy == 0) {
3841 +               csr |= HIFN_DMACSR_S_CTRL_ENA;
3842 +               sc->sc_s_busy = 1;
3843 +       }
3844 +       if (sc->sc_r_busy == 0) {
3845 +               csr |= HIFN_DMACSR_R_CTRL_ENA;
3846 +               sc->sc_r_busy = 1;
3847 +       }
3848 +       if (sc->sc_d_busy == 0) {
3849 +               csr |= HIFN_DMACSR_D_CTRL_ENA;
3850 +               sc->sc_d_busy = 1;
3851 +       }
3852 +       if (csr)
3853 +               WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
3854 +
3855 +#ifdef HIFN_DEBUG
3856 +       if (hifn_debug) {
3857 +               device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
3858 +                   READ_REG_1(sc, HIFN_1_DMA_CSR),
3859 +                   READ_REG_1(sc, HIFN_1_DMA_IER));
3860 +       }
3861 +#endif
3862 +
3863 +       sc->sc_active = 5;
3864 +       HIFN_UNLOCK(sc);
3865 +       KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
3866 +       return (err);           /* success */
3867 +
3868 +err_dstmap:
3869 +       if (cmd->src_map != cmd->dst_map)
3870 +               pci_unmap_buf(sc, &cmd->dst);
3871 +err_dstmap1:
3872 +err_srcmap:
3873 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
3874 +               if (cmd->src_skb != cmd->dst_skb)
3875 +#ifdef NOTYET
3876 +                       m_freem(cmd->dst_m);
3877 +#else
3878 +                       device_printf(sc->sc_dev,
3879 +                                       "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
3880 +                                       __FILE__, __LINE__);
3881 +#endif
3882 +       }
3883 +       pci_unmap_buf(sc, &cmd->src);
3884 +err_srcmap1:
3885 +       HIFN_UNLOCK(sc);
3886 +       return (err);
3887 +}
3888 +
3889 +static void
3890 +hifn_tick(unsigned long arg)
3891 +{
3892 +       struct hifn_softc *sc;
3893 +       unsigned long l_flags;
3894 +
3895 +       if (arg >= HIFN_MAX_CHIPS)
3896 +               return;
3897 +       sc = hifn_chip_idx[arg];
3898 +       if (!sc)
3899 +               return;
3900 +
3901 +       HIFN_LOCK(sc);
3902 +       if (sc->sc_active == 0) {
3903 +               struct hifn_dma *dma = sc->sc_dma;
3904 +               u_int32_t r = 0;
3905 +
3906 +               if (dma->cmdu == 0 && sc->sc_c_busy) {
3907 +                       sc->sc_c_busy = 0;
3908 +                       r |= HIFN_DMACSR_C_CTRL_DIS;
3909 +               }
3910 +               if (dma->srcu == 0 && sc->sc_s_busy) {
3911 +                       sc->sc_s_busy = 0;
3912 +                       r |= HIFN_DMACSR_S_CTRL_DIS;
3913 +               }
3914 +               if (dma->dstu == 0 && sc->sc_d_busy) {
3915 +                       sc->sc_d_busy = 0;
3916 +                       r |= HIFN_DMACSR_D_CTRL_DIS;
3917 +               }
3918 +               if (dma->resu == 0 && sc->sc_r_busy) {
3919 +                       sc->sc_r_busy = 0;
3920 +                       r |= HIFN_DMACSR_R_CTRL_DIS;
3921 +               }
3922 +               if (r)
3923 +                       WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
3924 +       } else
3925 +               sc->sc_active--;
3926 +       HIFN_UNLOCK(sc);
3927 +       mod_timer(&sc->sc_tickto, jiffies + HZ);
3928 +}
3929 +
3930 +static irqreturn_t
3931 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
3932 +hifn_intr(int irq, void *arg)
3933 +#else
3934 +hifn_intr(int irq, void *arg, struct pt_regs *regs)
3935 +#endif
3936 +{
3937 +       struct hifn_softc *sc = arg;
3938 +       struct hifn_dma *dma;
3939 +       u_int32_t dmacsr, restart;
3940 +       int i, u;
3941 +       unsigned long l_flags;
3942 +
3943 +       dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
3944 +
3945 +       /* Nothing in the DMA unit interrupted */
3946 +       if ((dmacsr & sc->sc_dmaier) == 0)
3947 +               return IRQ_NONE;
3948 +
3949 +       HIFN_LOCK(sc);
3950 +
3951 +       dma = sc->sc_dma;
3952 +
3953 +#ifdef HIFN_DEBUG
3954 +       if (hifn_debug) {
3955 +               device_printf(sc->sc_dev,
3956 +                   "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
3957 +                   dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
3958 +                   dma->cmdi, dma->srci, dma->dsti, dma->resi,
3959 +                   dma->cmdk, dma->srck, dma->dstk, dma->resk,
3960 +                   dma->cmdu, dma->srcu, dma->dstu, dma->resu);
3961 +       }
3962 +#endif
3963 +
3964 +       WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
3965 +
3966 +       if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
3967 +           (dmacsr & HIFN_DMACSR_PUBDONE))
3968 +               WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
3969 +                   READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
3970 +
3971 +       restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
3972 +       if (restart)
3973 +               device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
3974 +
3975 +       if (sc->sc_flags & HIFN_IS_7811) {
3976 +               if (dmacsr & HIFN_DMACSR_ILLR)
3977 +                       device_printf(sc->sc_dev, "illegal read\n");
3978 +               if (dmacsr & HIFN_DMACSR_ILLW)
3979 +                       device_printf(sc->sc_dev, "illegal write\n");
3980 +       }
3981 +
3982 +       restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
3983 +           HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
3984 +       if (restart) {
3985 +               device_printf(sc->sc_dev, "abort, resetting.\n");
3986 +               hifnstats.hst_abort++;
3987 +               hifn_abort(sc);
3988 +               HIFN_UNLOCK(sc);
3989 +               return IRQ_HANDLED;
3990 +       }
3991 +
3992 +       if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
3993 +               /*
3994 +                * If no slots to process and we receive a "waiting on
3995 +                * command" interrupt, we disable the "waiting on command"
3996 +                * (by clearing it).
3997 +                */
3998 +               sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
3999 +               WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
4000 +       }
4001 +
4002 +       /* clear the rings */
4003 +       i = dma->resk; u = dma->resu;
4004 +       while (u != 0) {
4005 +               HIFN_RESR_SYNC(sc, i,
4006 +                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4007 +               if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
4008 +                       HIFN_RESR_SYNC(sc, i,
4009 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4010 +                       break;
4011 +               }
4012 +
4013 +               if (i != HIFN_D_RES_RSIZE) {
4014 +                       struct hifn_command *cmd;
4015 +                       u_int8_t *macbuf = NULL;
4016 +
4017 +                       HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
4018 +                       cmd = dma->hifn_commands[i];
4019 +                       KASSERT(cmd != NULL,
4020 +                               ("hifn_intr: null command slot %u", i));
4021 +                       dma->hifn_commands[i] = NULL;
4022 +
4023 +                       if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
4024 +                               macbuf = dma->result_bufs[i];
4025 +                               macbuf += 12;
4026 +                       }
4027 +
4028 +                       hifn_callback(sc, cmd, macbuf);
4029 +                       hifnstats.hst_opackets++;
4030 +                       u--;
4031 +               }
4032 +
4033 +               if (++i == (HIFN_D_RES_RSIZE + 1))
4034 +                       i = 0;
4035 +       }
4036 +       dma->resk = i; dma->resu = u;
4037 +
4038 +       i = dma->srck; u = dma->srcu;
4039 +       while (u != 0) {
4040 +               if (i == HIFN_D_SRC_RSIZE)
4041 +                       i = 0;
4042 +               HIFN_SRCR_SYNC(sc, i,
4043 +                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4044 +               if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
4045 +                       HIFN_SRCR_SYNC(sc, i,
4046 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4047 +                       break;
4048 +               }
4049 +               i++, u--;
4050 +       }
4051 +       dma->srck = i; dma->srcu = u;
4052 +
4053 +       i = dma->cmdk; u = dma->cmdu;
4054 +       while (u != 0) {
4055 +               HIFN_CMDR_SYNC(sc, i,
4056 +                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4057 +               if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
4058 +                       HIFN_CMDR_SYNC(sc, i,
4059 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4060 +                       break;
4061 +               }
4062 +               if (i != HIFN_D_CMD_RSIZE) {
4063 +                       u--;
4064 +                       HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
4065 +               }
4066 +               if (++i == (HIFN_D_CMD_RSIZE + 1))
4067 +                       i = 0;
4068 +       }
4069 +       dma->cmdk = i; dma->cmdu = u;
4070 +
4071 +       HIFN_UNLOCK(sc);
4072 +
4073 +       if (sc->sc_needwakeup) {                /* XXX check high watermark */
4074 +               int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
4075 +#ifdef HIFN_DEBUG
4076 +               if (hifn_debug)
4077 +                       device_printf(sc->sc_dev,
4078 +                               "wakeup crypto (%x) u %d/%d/%d/%d\n",
4079 +                               sc->sc_needwakeup,
4080 +                               dma->cmdu, dma->srcu, dma->dstu, dma->resu);
4081 +#endif
4082 +               sc->sc_needwakeup &= ~wakeup;
4083 +               crypto_unblock(sc->sc_cid, wakeup);
4084 +       }
4085 +
4086 +       return IRQ_HANDLED;
4087 +}
4088 +
4089 +/*
4090 + * Allocate a new 'session' and return an encoded session id.  'sidp'
4091 + * contains our registration id, and should contain an encoded session
4092 + * id on successful allocation.
4093 + */
4094 +static int
4095 +hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
4096 +{
4097 +       struct hifn_softc *sc = device_get_softc(dev);
4098 +       struct cryptoini *c;
4099 +       int mac = 0, cry = 0, sesn;
4100 +       struct hifn_session *ses = NULL;
4101 +       unsigned long l_flags;
4102 +
4103 +       DPRINTF("%s()\n", __FUNCTION__);
4104 +
4105 +       KASSERT(sc != NULL, ("hifn_newsession: null softc"));
4106 +       if (sidp == NULL || cri == NULL || sc == NULL) {
4107 +               DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
4108 +               return (EINVAL);
4109 +       }
4110 +
4111 +       HIFN_LOCK(sc);
4112 +       if (sc->sc_sessions == NULL) {
4113 +               ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
4114 +                               SLAB_ATOMIC);
4115 +               if (ses == NULL) {
4116 +                       HIFN_UNLOCK(sc);
4117 +                       return (ENOMEM);
4118 +               }
4119 +               sesn = 0;
4120 +               sc->sc_nsessions = 1;
4121 +       } else {
4122 +               for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
4123 +                       if (!sc->sc_sessions[sesn].hs_used) {
4124 +                               ses = &sc->sc_sessions[sesn];
4125 +                               break;
4126 +                       }
4127 +               }
4128 +
4129 +               if (ses == NULL) {
4130 +                       sesn = sc->sc_nsessions;
4131 +                       ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
4132 +                                       SLAB_ATOMIC);
4133 +                       if (ses == NULL) {
4134 +                               HIFN_UNLOCK(sc);
4135 +                               return (ENOMEM);
4136 +                       }
4137 +                       bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
4138 +                       bzero(sc->sc_sessions, sesn * sizeof(*ses));
4139 +                       kfree(sc->sc_sessions);
4140 +                       sc->sc_sessions = ses;
4141 +                       ses = &sc->sc_sessions[sesn];
4142 +                       sc->sc_nsessions++;
4143 +               }
4144 +       }
4145 +       HIFN_UNLOCK(sc);
4146 +
4147 +       bzero(ses, sizeof(*ses));
4148 +       ses->hs_used = 1;
4149 +
4150 +       for (c = cri; c != NULL; c = c->cri_next) {
4151 +               switch (c->cri_alg) {
4152 +               case CRYPTO_MD5:
4153 +               case CRYPTO_SHA1:
4154 +               case CRYPTO_MD5_HMAC:
4155 +               case CRYPTO_SHA1_HMAC:
4156 +                       if (mac) {
4157 +                               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4158 +                               return (EINVAL);
4159 +                       }
4160 +                       mac = 1;
4161 +                       ses->hs_mlen = c->cri_mlen;
4162 +                       if (ses->hs_mlen == 0) {
4163 +                               switch (c->cri_alg) {
4164 +                               case CRYPTO_MD5:
4165 +                               case CRYPTO_MD5_HMAC:
4166 +                                       ses->hs_mlen = 16;
4167 +                                       break;
4168 +                               case CRYPTO_SHA1:
4169 +                               case CRYPTO_SHA1_HMAC:
4170 +                                       ses->hs_mlen = 20;
4171 +                                       break;
4172 +                               }
4173 +                       }
4174 +                       break;
4175 +               case CRYPTO_DES_CBC:
4176 +               case CRYPTO_3DES_CBC:
4177 +               case CRYPTO_AES_CBC:
4178 +                       /* XXX this may read fewer, does it matter? */
4179 +                       read_random(ses->hs_iv,
4180 +                               c->cri_alg == CRYPTO_AES_CBC ?
4181 +                                       HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4182 +                       /*FALLTHROUGH*/
4183 +               case CRYPTO_ARC4:
4184 +                       if (cry) {
4185 +                               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4186 +                               return (EINVAL);
4187 +                       }
4188 +                       cry = 1;
4189 +                       break;
4190 +               default:
4191 +                       DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4192 +                       return (EINVAL);
4193 +               }
4194 +       }
4195 +       if (mac == 0 && cry == 0) {
4196 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4197 +               return (EINVAL);
4198 +       }
4199 +
4200 +       *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
4201 +
4202 +       return (0);
4203 +}
4204 +
4205 +/*
4206 + * Deallocate a session.
4207 + * XXX this routine should run a zero'd mac/encrypt key into context ram.
4208 + * XXX to blow away any keys already stored there.
4209 + */
4210 +static int
4211 +hifn_freesession(device_t dev, u_int64_t tid)
4212 +{
4213 +       struct hifn_softc *sc = device_get_softc(dev);
4214 +       int session, error;
4215 +       u_int32_t sid = CRYPTO_SESID2LID(tid);
4216 +       unsigned long l_flags;
4217 +
4218 +       DPRINTF("%s()\n", __FUNCTION__);
4219 +
4220 +       KASSERT(sc != NULL, ("hifn_freesession: null softc"));
4221 +       if (sc == NULL) {
4222 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4223 +               return (EINVAL);
4224 +       }
4225 +
4226 +       HIFN_LOCK(sc);
4227 +       session = HIFN_SESSION(sid);
4228 +       if (session < sc->sc_nsessions) {
4229 +               bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
4230 +               error = 0;
4231 +       } else {
4232 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4233 +               error = EINVAL;
4234 +       }
4235 +       HIFN_UNLOCK(sc);
4236 +
4237 +       return (error);
4238 +}
4239 +
4240 +static int
4241 +hifn_process(device_t dev, struct cryptop *crp, int hint)
4242 +{
4243 +       struct hifn_softc *sc = device_get_softc(dev);
4244 +       struct hifn_command *cmd = NULL;
4245 +       int session, err, ivlen;
4246 +       struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
4247 +
4248 +       DPRINTF("%s()\n", __FUNCTION__);
4249 +
4250 +       if (crp == NULL || crp->crp_callback == NULL) {
4251 +               hifnstats.hst_invalid++;
4252 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4253 +               return (EINVAL);
4254 +       }
4255 +       session = HIFN_SESSION(crp->crp_sid);
4256 +
4257 +       if (sc == NULL || session >= sc->sc_nsessions) {
4258 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4259 +               err = EINVAL;
4260 +               goto errout;
4261 +       }
4262 +
4263 +       cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
4264 +       if (cmd == NULL) {
4265 +               hifnstats.hst_nomem++;
4266 +               err = ENOMEM;
4267 +               goto errout;
4268 +       }
4269 +       memset(cmd, 0, sizeof(*cmd));
4270 +
4271 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
4272 +               cmd->src_skb = (struct sk_buff *)crp->crp_buf;
4273 +               cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
4274 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
4275 +               cmd->src_io = (struct uio *)crp->crp_buf;
4276 +               cmd->dst_io = (struct uio *)crp->crp_buf;
4277 +       } else {
4278 +               cmd->src_buf = crp->crp_buf;
4279 +               cmd->dst_buf = crp->crp_buf;
4280 +       }
4281 +
4282 +       crd1 = crp->crp_desc;
4283 +       if (crd1 == NULL) {
4284 +               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4285 +               err = EINVAL;
4286 +               goto errout;
4287 +       }
4288 +       crd2 = crd1->crd_next;
4289 +
4290 +       if (crd2 == NULL) {
4291 +               if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
4292 +                   crd1->crd_alg == CRYPTO_SHA1_HMAC ||
4293 +                   crd1->crd_alg == CRYPTO_SHA1 ||
4294 +                   crd1->crd_alg == CRYPTO_MD5) {
4295 +                       maccrd = crd1;
4296 +                       enccrd = NULL;
4297 +               } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
4298 +                   crd1->crd_alg == CRYPTO_3DES_CBC ||
4299 +                   crd1->crd_alg == CRYPTO_AES_CBC ||
4300 +                   crd1->crd_alg == CRYPTO_ARC4) {
4301 +                       if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
4302 +                               cmd->base_masks |= HIFN_BASE_CMD_DECODE;
4303 +                       maccrd = NULL;
4304 +                       enccrd = crd1;
4305 +               } else {
4306 +                       DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4307 +                       err = EINVAL;
4308 +                       goto errout;
4309 +               }
4310 +       } else {
4311 +               if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
4312 +                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
4313 +                     crd1->crd_alg == CRYPTO_MD5 ||
4314 +                     crd1->crd_alg == CRYPTO_SHA1) &&
4315 +                   (crd2->crd_alg == CRYPTO_DES_CBC ||
4316 +                    crd2->crd_alg == CRYPTO_3DES_CBC ||
4317 +                    crd2->crd_alg == CRYPTO_AES_CBC ||
4318 +                    crd2->crd_alg == CRYPTO_ARC4) &&
4319 +                   ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
4320 +                       cmd->base_masks = HIFN_BASE_CMD_DECODE;
4321 +                       maccrd = crd1;
4322 +                       enccrd = crd2;
4323 +               } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
4324 +                    crd1->crd_alg == CRYPTO_ARC4 ||
4325 +                    crd1->crd_alg == CRYPTO_3DES_CBC ||
4326 +                    crd1->crd_alg == CRYPTO_AES_CBC) &&
4327 +                   (crd2->crd_alg == CRYPTO_MD5_HMAC ||
4328 +                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
4329 +                     crd2->crd_alg == CRYPTO_MD5 ||
4330 +                     crd2->crd_alg == CRYPTO_SHA1) &&
4331 +                   (crd1->crd_flags & CRD_F_ENCRYPT)) {
4332 +                       enccrd = crd1;
4333 +                       maccrd = crd2;
4334 +               } else {
4335 +                       /*
4336 +                        * We cannot order the 7751 as requested
4337 +                        */
4338 +                       DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
4339 +                       err = EINVAL;
4340 +                       goto errout;
4341 +               }
4342 +       }
4343 +
4344 +       if (enccrd) {
4345 +               cmd->enccrd = enccrd;
4346 +               cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
4347 +               switch (enccrd->crd_alg) {
4348 +               case CRYPTO_ARC4:
4349 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
4350 +                       break;
4351 +               case CRYPTO_DES_CBC:
4352 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
4353 +                           HIFN_CRYPT_CMD_MODE_CBC |
4354 +                           HIFN_CRYPT_CMD_NEW_IV;
4355 +                       break;
4356 +               case CRYPTO_3DES_CBC:
4357 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
4358 +                           HIFN_CRYPT_CMD_MODE_CBC |
4359 +                           HIFN_CRYPT_CMD_NEW_IV;
4360 +                       break;
4361 +               case CRYPTO_AES_CBC:
4362 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
4363 +                           HIFN_CRYPT_CMD_MODE_CBC |
4364 +                           HIFN_CRYPT_CMD_NEW_IV;
4365 +                       break;
4366 +               default:
4367 +                       DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4368 +                       err = EINVAL;
4369 +                       goto errout;
4370 +               }
4371 +               if (enccrd->crd_alg != CRYPTO_ARC4) {
4372 +                       ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
4373 +                               HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4374 +                       if (enccrd->crd_flags & CRD_F_ENCRYPT) {
4375 +                               if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
4376 +                                       bcopy(enccrd->crd_iv, cmd->iv, ivlen);
4377 +                               else
4378 +                                       bcopy(sc->sc_sessions[session].hs_iv,
4379 +                                           cmd->iv, ivlen);
4380 +
4381 +                               if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
4382 +                                   == 0) {
4383 +                                       crypto_copyback(crp->crp_flags,
4384 +                                           crp->crp_buf, enccrd->crd_inject,
4385 +                                           ivlen, cmd->iv);
4386 +                               }
4387 +                       } else {
4388 +                               if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
4389 +                                       bcopy(enccrd->crd_iv, cmd->iv, ivlen);
4390 +                               else {
4391 +                                       crypto_copydata(crp->crp_flags,
4392 +                                           crp->crp_buf, enccrd->crd_inject,
4393 +                                           ivlen, cmd->iv);
4394 +                               }
4395 +                       }
4396 +               }
4397 +
4398 +               if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
4399 +                       cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
4400 +               cmd->ck = enccrd->crd_key;
4401 +               cmd->cklen = enccrd->crd_klen >> 3;
4402 +               cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
4403 +
4404 +               /* 
4405 +                * Need to specify the size for the AES key in the masks.
4406 +                */
4407 +               if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
4408 +                   HIFN_CRYPT_CMD_ALG_AES) {
4409 +                       switch (cmd->cklen) {
4410 +                       case 16:
4411 +                               cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
4412 +                               break;
4413 +                       case 24:
4414 +                               cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
4415 +                               break;
4416 +                       case 32:
4417 +                               cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
4418 +                               break;
4419 +                       default:
4420 +                               DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4421 +                               err = EINVAL;
4422 +                               goto errout;
4423 +                       }
4424 +               }
4425 +       }
4426 +
4427 +       if (maccrd) {
4428 +               cmd->maccrd = maccrd;
4429 +               cmd->base_masks |= HIFN_BASE_CMD_MAC;
4430 +
4431 +               switch (maccrd->crd_alg) {
4432 +               case CRYPTO_MD5:
4433 +                       cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
4434 +                           HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
4435 +                           HIFN_MAC_CMD_POS_IPSEC;
4436 +                       break;
4437 +               case CRYPTO_MD5_HMAC:
4438 +                       cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
4439 +                           HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
4440 +                           HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
4441 +                       break;
4442 +               case CRYPTO_SHA1:
4443 +                       cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
4444 +                           HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
4445 +                           HIFN_MAC_CMD_POS_IPSEC;
4446 +                       break;
4447 +               case CRYPTO_SHA1_HMAC:
4448 +                       cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
4449 +                           HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
4450 +                           HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
4451 +                       break;
4452 +               }
4453 +
4454 +               if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
4455 +                    maccrd->crd_alg == CRYPTO_MD5_HMAC) {
4456 +                       cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
4457 +                       bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
4458 +                       bzero(cmd->mac + (maccrd->crd_klen >> 3),
4459 +                           HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
4460 +               }
4461 +       }
4462 +
4463 +       cmd->crp = crp;
4464 +       cmd->session_num = session;
4465 +       cmd->softc = sc;
4466 +
4467 +       err = hifn_crypto(sc, cmd, crp, hint);
4468 +       if (!err) {
4469 +               return 0;
4470 +       } else if (err == ERESTART) {
4471 +               /*
4472 +                * There weren't enough resources to dispatch the request
4473 +                * to the part.  Notify the caller so they'll requeue this
4474 +                * request and resubmit it again soon.
4475 +                */
4476 +#ifdef HIFN_DEBUG
4477 +               if (hifn_debug)
4478 +                       device_printf(sc->sc_dev, "requeue request\n");
4479 +#endif
4480 +               kfree(cmd);
4481 +               sc->sc_needwakeup |= CRYPTO_SYMQ;
4482 +               return (err);
4483 +       }
4484 +
4485 +errout:
4486 +       if (cmd != NULL)
4487 +               kfree(cmd);
4488 +       if (err == EINVAL)
4489 +               hifnstats.hst_invalid++;
4490 +       else
4491 +               hifnstats.hst_nomem++;
4492 +       crp->crp_etype = err;
4493 +       crypto_done(crp);
4494 +       return (err);
4495 +}
4496 +
4497 +static void
4498 +hifn_abort(struct hifn_softc *sc)
4499 +{
4500 +       struct hifn_dma *dma = sc->sc_dma;
4501 +       struct hifn_command *cmd;
4502 +       struct cryptop *crp;
4503 +       int i, u;
4504 +
4505 +       DPRINTF("%s()\n", __FUNCTION__);
4506 +
4507 +       i = dma->resk; u = dma->resu;
4508 +       while (u != 0) {
4509 +               cmd = dma->hifn_commands[i];
4510 +               KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
4511 +               dma->hifn_commands[i] = NULL;
4512 +               crp = cmd->crp;
4513 +
4514 +               if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
4515 +                       /* Salvage what we can. */
4516 +                       u_int8_t *macbuf;
4517 +
4518 +                       if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
4519 +                               macbuf = dma->result_bufs[i];
4520 +                               macbuf += 12;
4521 +                       } else
4522 +                               macbuf = NULL;
4523 +                       hifnstats.hst_opackets++;
4524 +                       hifn_callback(sc, cmd, macbuf);
4525 +               } else {
4526 +#if 0
4527 +                       if (cmd->src_map == cmd->dst_map) {
4528 +                               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4529 +                                   BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4530 +                       } else {
4531 +                               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4532 +                                   BUS_DMASYNC_POSTWRITE);
4533 +                               bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
4534 +                                   BUS_DMASYNC_POSTREAD);
4535 +                       }
4536 +#endif
4537 +
4538 +                       if (cmd->src_skb != cmd->dst_skb) {
4539 +#ifdef NOTYET
4540 +                               m_freem(cmd->src_m);
4541 +                               crp->crp_buf = (caddr_t)cmd->dst_m;
4542 +#else
4543 +                               device_printf(sc->sc_dev,
4544 +                                               "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
4545 +                                               __FILE__, __LINE__);
4546 +#endif
4547 +                       }
4548 +
4549 +                       /* non-shared buffers cannot be restarted */
4550 +                       if (cmd->src_map != cmd->dst_map) {
4551 +                               /*
4552 +                                * XXX should be EAGAIN, delayed until
4553 +                                * after the reset.
4554 +                                */
4555 +                               crp->crp_etype = ENOMEM;
4556 +                               pci_unmap_buf(sc, &cmd->dst);
4557 +                       } else
4558 +                               crp->crp_etype = ENOMEM;
4559 +
4560 +                       pci_unmap_buf(sc, &cmd->src);
4561 +
4562 +                       kfree(cmd);
4563 +                       if (crp->crp_etype != EAGAIN)
4564 +                               crypto_done(crp);
4565 +               }
4566 +
4567 +               if (++i == HIFN_D_RES_RSIZE)
4568 +                       i = 0;
4569 +               u--;
4570 +       }
4571 +       dma->resk = i; dma->resu = u;
4572 +
4573 +       hifn_reset_board(sc, 1);
4574 +       hifn_init_dma(sc);
4575 +       hifn_init_pci_registers(sc);
4576 +}
4577 +
4578 +static void
4579 +hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
4580 +{
4581 +       struct hifn_dma *dma = sc->sc_dma;
4582 +       struct cryptop *crp = cmd->crp;
4583 +       struct cryptodesc *crd;
4584 +       int i, u, ivlen;
4585 +
4586 +       DPRINTF("%s()\n", __FUNCTION__);
4587 +
4588 +#if 0
4589 +       if (cmd->src_map == cmd->dst_map) {
4590 +               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4591 +                   BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
4592 +       } else {
4593 +               bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4594 +                   BUS_DMASYNC_POSTWRITE);
4595 +               bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
4596 +                   BUS_DMASYNC_POSTREAD);
4597 +       }
4598 +#endif
4599 +
4600 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
4601 +               if (cmd->src_skb != cmd->dst_skb) {
4602 +#ifdef NOTYET
4603 +                       crp->crp_buf = (caddr_t)cmd->dst_m;
4604 +                       totlen = cmd->src_mapsize;
4605 +                       for (m = cmd->dst_m; m != NULL; m = m->m_next) {
4606 +                               if (totlen < m->m_len) {
4607 +                                       m->m_len = totlen;
4608 +                                       totlen = 0;
4609 +                               } else
4610 +                                       totlen -= m->m_len;
4611 +                       }
4612 +                       cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
4613 +                       m_freem(cmd->src_m);
4614 +#else
4615 +                       device_printf(sc->sc_dev,
4616 +                                       "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
4617 +                                       __FILE__, __LINE__);
4618 +#endif
4619 +               }
4620 +       }
4621 +
4622 +       if (cmd->sloplen != 0) {
4623 +               crypto_copyback(crp->crp_flags, crp->crp_buf,
4624 +                   cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
4625 +                   (caddr_t)&dma->slop[cmd->slopidx]);
4626 +       }
4627 +
4628 +       i = dma->dstk; u = dma->dstu;
4629 +       while (u != 0) {
4630 +               if (i == HIFN_D_DST_RSIZE)
4631 +                       i = 0;
4632 +#if 0
4633 +               bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
4634 +                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4635 +#endif
4636 +               if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
4637 +#if 0
4638 +                       bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
4639 +                           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4640 +#endif
4641 +                       break;
4642 +               }
4643 +               i++, u--;
4644 +       }
4645 +       dma->dstk = i; dma->dstu = u;
4646 +
4647 +       hifnstats.hst_obytes += cmd->dst_mapsize;
4648 +
4649 +       if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
4650 +           HIFN_BASE_CMD_CRYPT) {
4651 +               for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4652 +                       if (crd->crd_alg != CRYPTO_DES_CBC &&
4653 +                           crd->crd_alg != CRYPTO_3DES_CBC &&
4654 +                           crd->crd_alg != CRYPTO_AES_CBC)
4655 +                               continue;
4656 +                       ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
4657 +                               HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4658 +                       crypto_copydata(crp->crp_flags, crp->crp_buf,
4659 +                           crd->crd_skip + crd->crd_len - ivlen, ivlen,
4660 +                           cmd->softc->sc_sessions[cmd->session_num].hs_iv);
4661 +                       break;
4662 +               }
4663 +       }
4664 +
4665 +       if (macbuf != NULL) {
4666 +               for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4667 +                        int len;
4668 +
4669 +                       if (crd->crd_alg != CRYPTO_MD5 &&
4670 +                           crd->crd_alg != CRYPTO_SHA1 &&
4671 +                           crd->crd_alg != CRYPTO_MD5_HMAC &&
4672 +                           crd->crd_alg != CRYPTO_SHA1_HMAC) {
4673 +                               continue;
4674 +                       }
4675 +                       len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
4676 +                       crypto_copyback(crp->crp_flags, crp->crp_buf,
4677 +                           crd->crd_inject, len, macbuf);
4678 +                       break;
4679 +               }
4680 +       }
4681 +
4682 +       if (cmd->src_map != cmd->dst_map)
4683 +               pci_unmap_buf(sc, &cmd->dst);
4684 +       pci_unmap_buf(sc, &cmd->src);
4685 +       kfree(cmd);
4686 +       crypto_done(crp);
4687 +}
4688 +
4689 +/*
4690 + * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
4691 + * and Group 1 registers; avoid conditions that could create
4692 + * burst writes by doing a read in between the writes.
4693 + *
4694 + * NB: The read we interpose is always to the same register;
4695 + *     we do this because reading from an arbitrary (e.g. last)
4696 + *     register may not always work.
4697 + */
4698 +static void
4699 +hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
4700 +{
4701 +       if (sc->sc_flags & HIFN_IS_7811) {
4702 +               if (sc->sc_bar0_lastreg == reg - 4)
4703 +                       readl(sc->sc_bar0 + HIFN_0_PUCNFG);
4704 +               sc->sc_bar0_lastreg = reg;
4705 +       }
4706 +       writel(val, sc->sc_bar0 + reg);
4707 +}
4708 +
4709 +static void
4710 +hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
4711 +{
4712 +       if (sc->sc_flags & HIFN_IS_7811) {
4713 +               if (sc->sc_bar1_lastreg == reg - 4)
4714 +                       readl(sc->sc_bar1 + HIFN_1_REVID);
4715 +               sc->sc_bar1_lastreg = reg;
4716 +       }
4717 +       writel(val, sc->sc_bar1 + reg);
4718 +}
4719 +
4720 +
4721 +static struct pci_device_id hifn_pci_tbl[] = {
4722 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
4723 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4724 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
4725 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4726 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
4727 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4728 +       { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
4729 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4730 +       { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
4731 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4732 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
4733 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4734 +       /*
4735 +        * Other vendors share this PCI ID as well, such as
4736 +        * http://www.powercrypt.com, and obviously they also
4737 +        * use the same key.
4738 +        */
4739 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
4740 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4741 +       { 0, 0, 0, 0, 0, 0, }
4742 +};
4743 +MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
4744 +
4745 +static struct pci_driver hifn_driver = {
4746 +       .name         = "hifn",
4747 +       .id_table     = hifn_pci_tbl,
4748 +       .probe        = hifn_probe,
4749 +       .remove       = hifn_remove,
4750 +       /* add PM stuff here one day */
4751 +};
4752 +
4753 +static int __init hifn_init (void)
4754 +{
4755 +       struct hifn_softc *sc = NULL;
4756 +       int rc;
4757 +
4758 +       DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
4759 +
4760 +       rc = pci_register_driver(&hifn_driver);
4761 +       pci_register_driver_compat(&hifn_driver, rc);
4762 +
4763 +       return rc;
4764 +}
4765 +
4766 +static void __exit hifn_exit (void)
4767 +{
4768 +       pci_unregister_driver(&hifn_driver);
4769 +}
4770 +
4771 +module_init(hifn_init);
4772 +module_exit(hifn_exit);
4773 +
4774 +MODULE_LICENSE("BSD");
4775 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
4776 +MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
4777 --- /dev/null
4778 +++ b/crypto/ocf/hifn/hifnHIPP.c
4779 @@ -0,0 +1,420 @@
4780 +/*-
4781 + * Driver for Hifn HIPP-I/II chipset
4782 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
4783 + *
4784 + * Redistribution and use in source and binary forms, with or without
4785 + * modification, are permitted provided that the following conditions
4786 + * are met:
4787 + *
4788 + * 1. Redistributions of source code must retain the above copyright
4789 + *   notice, this list of conditions and the following disclaimer.
4790 + * 2. Redistributions in binary form must reproduce the above copyright
4791 + *   notice, this list of conditions and the following disclaimer in the
4792 + *   documentation and/or other materials provided with the distribution.
4793 + * 3. The name of the author may not be used to endorse or promote products
4794 + *   derived from this software without specific prior written permission.
4795 + *
4796 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
4797 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
4798 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
4799 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
4800 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
4801 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
4802 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
4803 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4804 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
4805 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4806 + *
4807 + * Effort sponsored by Hifn Inc.
4808 + *
4809 + */
4810 +
4811 +/*
4812 + * Driver for various Hifn encryption processors.
4813 + */
4814 +#ifndef AUTOCONF_INCLUDED
4815 +#include <linux/config.h>
4816 +#endif
4817 +#include <linux/module.h>
4818 +#include <linux/init.h>
4819 +#include <linux/list.h>
4820 +#include <linux/slab.h>
4821 +#include <linux/wait.h>
4822 +#include <linux/sched.h>
4823 +#include <linux/pci.h>
4824 +#include <linux/delay.h>
4825 +#include <linux/interrupt.h>
4826 +#include <linux/spinlock.h>
4827 +#include <linux/random.h>
4828 +#include <linux/version.h>
4829 +#include <linux/skbuff.h>
4830 +#include <linux/uio.h>
4831 +#include <linux/sysfs.h>
4832 +#include <linux/miscdevice.h>
4833 +#include <asm/io.h>
4834 +
4835 +#include <cryptodev.h>
4836 +
4837 +#include "hifnHIPPreg.h"
4838 +#include "hifnHIPPvar.h"
4839 +
4840 +#if 1
4841 +#define        DPRINTF(a...)   if (hipp_debug) { \
4842 +                                                       printk("%s: ", sc ? \
4843 +                                                               device_get_nameunit(sc->sc_dev) : "hifn"); \
4844 +                                                       printk(a); \
4845 +                                               } else
4846 +#else
4847 +#define        DPRINTF(a...)
4848 +#endif
4849 +
4850 +typedef int bus_size_t;
4851 +
4852 +static inline int
4853 +pci_get_revid(struct pci_dev *dev)
4854 +{
4855 +       u8 rid = 0;
4856 +       pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
4857 +       return rid;
4858 +}
4859 +
4860 +#define debug hipp_debug
4861 +int hipp_debug = 0;
4862 +module_param(hipp_debug, int, 0644);
4863 +MODULE_PARM_DESC(hipp_debug, "Enable debug");
4864 +
4865 +int hipp_maxbatch = 1;
4866 +module_param(hipp_maxbatch, int, 0644);
4867 +MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
4868 +
4869 +static int  hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
4870 +static void hipp_remove(struct pci_dev *dev);
4871 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
4872 +static irqreturn_t hipp_intr(int irq, void *arg);
4873 +#else
4874 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
4875 +#endif
4876 +
4877 +static int hipp_num_chips = 0;
4878 +static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
4879 +
4880 +static int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
4881 +static int hipp_freesession(device_t, u_int64_t);
4882 +static int hipp_process(device_t, struct cryptop *, int);
4883 +
4884 +static device_method_t hipp_methods = {
4885 +       /* crypto device methods */
4886 +       DEVMETHOD(cryptodev_newsession, hipp_newsession),
4887 +       DEVMETHOD(cryptodev_freesession,hipp_freesession),
4888 +       DEVMETHOD(cryptodev_process,    hipp_process),
4889 +};
4890 +
4891 +static __inline u_int32_t
4892 +READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
4893 +{
4894 +       u_int32_t v = readl(sc->sc_bar[barno] + reg);
4895 +       //sc->sc_bar0_lastreg = (bus_size_t) -1;
4896 +       return (v);
4897 +}
4898 +static __inline void
4899 +WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
4900 +{
4901 +       writel(val, sc->sc_bar[barno] + reg);
4902 +}
4903 +
4904 +#define READ_REG_0(sc, reg)         READ_REG(sc, 0, reg)
4905 +#define WRITE_REG_0(sc, reg, val)   WRITE_REG(sc,0, reg, val)
4906 +#define READ_REG_1(sc, reg)         READ_REG(sc, 1, reg)
4907 +#define WRITE_REG_1(sc, reg, val)   WRITE_REG(sc,1, reg, val)
4908 +
4909 +static int
4910 +hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
4911 +{
4912 +       return EINVAL;
4913 +}
4914 +
4915 +static int
4916 +hipp_freesession(device_t dev, u_int64_t tid)
4917 +{
4918 +       return EINVAL;
4919 +}
4920 +
4921 +static int
4922 +hipp_process(device_t dev, struct cryptop *crp, int hint)
4923 +{
4924 +       return EINVAL;
4925 +}
4926 +
4927 +static const char*
4928 +hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
4929 +{
4930 +       char *n = NULL;
4931 +
4932 +       switch (pci_get_vendor(sc->sc_pcidev)) {
4933 +       case PCI_VENDOR_HIFN:
4934 +               switch (pci_get_device(sc->sc_pcidev)) {
4935 +               case PCI_PRODUCT_HIFN_7855:     n = "Hifn 7855";
4936 +               case PCI_PRODUCT_HIFN_8155:     n = "Hifn 8155";
4937 +               case PCI_PRODUCT_HIFN_6500:     n = "Hifn 6500";
4938 +               }
4939 +       }
4940 +
4941 +       if(n==NULL) {
4942 +               snprintf(buf, blen, "VID=%02x,PID=%02x",
4943 +                        pci_get_vendor(sc->sc_pcidev),
4944 +                        pci_get_device(sc->sc_pcidev));
4945 +       } else {
4946 +               buf[0]='\0';
4947 +               strncat(buf, n, blen);
4948 +       }
4949 +       return buf;
4950 +}
4951 +
4952 +struct hipp_fs_entry {
4953 +       struct attribute attr;
4954 +       /* other stuff */
4955 +};
4956 +
4957 +
4958 +static ssize_t
4959 +cryptoid_show(struct device *dev,
4960 +             struct device_attribute *attr,
4961 +             char *buf)                                                
4962 +{                                                              
4963 +       struct hipp_softc *sc;                                  
4964 +
4965 +       sc = pci_get_drvdata(to_pci_dev (dev));
4966 +       return sprintf (buf, "%d\n", sc->sc_cid);
4967 +}
4968 +
4969 +struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
4970 +
4971 +/*
4972 + * Attach an interface that successfully probed.
4973 + */
4974 +static int
4975 +hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
4976 +{
4977 +       struct hipp_softc *sc = NULL;
4978 +       int i;
4979 +       //char rbase;
4980 +       //u_int16_t ena;
4981 +       int rev;
4982 +       //int rseg;
4983 +       int rc;
4984 +
4985 +       DPRINTF("%s()\n", __FUNCTION__);
4986 +
4987 +       if (pci_enable_device(dev) < 0)
4988 +               return(-ENODEV);
4989 +
4990 +       if (pci_set_mwi(dev))
4991 +               return(-ENODEV);
4992 +
4993 +       if (!dev->irq) {
4994 +               printk("hifn: found device with no IRQ assigned. check BIOS settings!");
4995 +               pci_disable_device(dev);
4996 +               return(-ENODEV);
4997 +       }
4998 +
4999 +       sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
5000 +       if (!sc)
5001 +               return(-ENOMEM);
5002 +       memset(sc, 0, sizeof(*sc));
5003 +
5004 +       softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
5005 +
5006 +       sc->sc_pcidev = dev;
5007 +       sc->sc_irq = -1;
5008 +       sc->sc_cid = -1;
5009 +       sc->sc_num = hipp_num_chips++;
5010 +
5011 +       if (sc->sc_num < HIPP_MAX_CHIPS)
5012 +               hipp_chip_idx[sc->sc_num] = sc;
5013 +
5014 +       pci_set_drvdata(sc->sc_pcidev, sc);
5015 +
5016 +       spin_lock_init(&sc->sc_mtx);
5017 +
5018 +       /*
5019 +        * Setup PCI resources.
5020 +        * The READ_REG_0, WRITE_REG_0, READ_REG_1,
5021 +        * and WRITE_REG_1 macros throughout the driver are used
5022 +        * to permit better debugging.
5023 +        */
5024 +       for(i=0; i<4; i++) {
5025 +               unsigned long mem_start, mem_len;
5026 +               mem_start = pci_resource_start(sc->sc_pcidev, i);
5027 +               mem_len   = pci_resource_len(sc->sc_pcidev, i);
5028 +               sc->sc_barphy[i] = (caddr_t)mem_start;
5029 +               sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
5030 +               if (!sc->sc_bar[i]) {
5031 +                       device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
5032 +                       goto fail;
5033 +               }
5034 +       }
5035 +
5036 +       //hipp_reset_board(sc, 0);
5037 +       pci_set_master(sc->sc_pcidev);
5038 +
5039 +       /*
5040 +        * Arrange the interrupt line.
5041 +        */
5042 +       rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
5043 +       if (rc) {
5044 +               device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
5045 +               goto fail;
5046 +       }
5047 +       sc->sc_irq = dev->irq;
5048 +
5049 +       rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
5050 +
5051 +       {
5052 +               char b[32];
5053 +               device_printf(sc->sc_dev, "%s, rev %u",
5054 +                             hipp_partname(sc, b, sizeof(b)), rev);
5055 +       }
5056 +
5057 +#if 0
5058 +       if (sc->sc_flags & HIFN_IS_7956)
5059 +               printf(", pll=0x%x<%s clk, %ux mult>",
5060 +                       sc->sc_pllconfig,
5061 +                       sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
5062 +                       2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
5063 +#endif
5064 +       printf("\n");
5065 +
5066 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
5067 +       if (sc->sc_cid < 0) {
5068 +               device_printf(sc->sc_dev, "could not get crypto driver id\n");
5069 +               goto fail;
5070 +       }
5071 +
5072 +#if 0 /* cannot work with a non-GPL module */
5073 +       /* make a sysfs entry to let the world know what entry we got */
5074 +       sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
5075 +#endif
5076 +
5077 +#if 0
5078 +       init_timer(&sc->sc_tickto);
5079 +       sc->sc_tickto.function = hifn_tick;
5080 +       sc->sc_tickto.data = (unsigned long) sc->sc_num;
5081 +       mod_timer(&sc->sc_tickto, jiffies + HZ);
5082 +#endif
5083 +
5084 +#if 0 /* no code here yet ?? */
5085 +       crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
5086 +#endif
5087 +
5088 +       return (0);
5089 +
5090 +fail:
5091 +       if (sc->sc_cid >= 0)
5092 +               crypto_unregister_all(sc->sc_cid);
5093 +       if (sc->sc_irq != -1)
5094 +               free_irq(sc->sc_irq, sc);
5095 +       
5096 +#if 0
5097 +       if (sc->sc_dma) {
5098 +               /* Turn off DMA polling */
5099 +               WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
5100 +                           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
5101 +               
5102 +               pci_free_consistent(sc->sc_pcidev,
5103 +                                   sizeof(*sc->sc_dma),
5104 +                                   sc->sc_dma, sc->sc_dma_physaddr);
5105 +       }
5106 +#endif
5107 +       kfree(sc);
5108 +       return (-ENXIO);
5109 +}
5110 +
5111 +/*
5112 + * Detach an interface that successfully probed.
5113 + */
5114 +static void
5115 +hipp_remove(struct pci_dev *dev)
5116 +{
5117 +       struct hipp_softc *sc = pci_get_drvdata(dev);
5118 +       unsigned long l_flags;
5119 +
5120 +       DPRINTF("%s()\n", __FUNCTION__);
5121 +
5122 +       /* disable interrupts */
5123 +       HIPP_LOCK(sc);
5124 +
5125 +#if 0
5126 +       WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
5127 +       HIFN_UNLOCK(sc);
5128 +
5129 +       /*XXX other resources */
5130 +       del_timer_sync(&sc->sc_tickto);
5131 +
5132 +       /* Turn off DMA polling */
5133 +       WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
5134 +           HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
5135 +#endif
5136 +
5137 +       crypto_unregister_all(sc->sc_cid);
5138 +
5139 +       free_irq(sc->sc_irq, sc);
5140 +
5141 +#if 0
5142 +       pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
5143 +                sc->sc_dma, sc->sc_dma_physaddr);
5144 +#endif
5145 +}
5146 +
5147 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
5148 +static irqreturn_t hipp_intr(int irq, void *arg)
5149 +#else
5150 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
5151 +#endif
5152 +{
5153 +       struct hipp_softc *sc = arg;
5154 +
5155 +       sc = sc; /* shut up compiler */
5156 +
5157 +       return IRQ_HANDLED;
5158 +}
5159 +
5160 +static struct pci_device_id hipp_pci_tbl[] = {
5161 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
5162 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5163 +       { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
5164 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5165 +};
5166 +MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
5167 +
5168 +static struct pci_driver hipp_driver = {
5169 +       .name         = "hipp",
5170 +       .id_table     = hipp_pci_tbl,
5171 +       .probe        = hipp_probe,
5172 +       .remove       = hipp_remove,
5173 +       /* add PM stuff here one day */
5174 +};
5175 +
5176 +static int __init hipp_init (void)
5177 +{
5178 +       struct hipp_softc *sc = NULL;
5179 +       int rc;
5180 +
5181 +       DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
5182 +
5183 +       rc = pci_register_driver(&hipp_driver);
5184 +       pci_register_driver_compat(&hipp_driver, rc);
5185 +
5186 +       return rc;
5187 +}
5188 +
5189 +static void __exit hipp_exit (void)
5190 +{
5191 +       pci_unregister_driver(&hipp_driver);
5192 +}
5193 +
5194 +module_init(hipp_init);
5195 +module_exit(hipp_exit);
5196 +
5197 +MODULE_LICENSE("BSD");
5198 +MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
5199 +MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
5200 --- /dev/null
5201 +++ b/crypto/ocf/hifn/hifnHIPPreg.h
5202 @@ -0,0 +1,46 @@
5203 +/*-
5204 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
5205 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
5206 + *
5207 + * Redistribution and use in source and binary forms, with or without
5208 + * modification, are permitted provided that the following conditions
5209 + * are met:
5210 + *
5211 + * 1. Redistributions of source code must retain the above copyright
5212 + *    notice, this list of conditions and the following disclaimer.
5213 + * 2. Redistributions in binary form must reproduce the above copyright
5214 + *    notice, this list of conditions and the following disclaimer in the
5215 + *    documentation and/or other materials provided with the distribution.
5216 + * 3. The name of the author may not be used to endorse or promote products
5217 + *    derived from this software without specific prior written permission.
5218 + *
5219 + *
5220 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
5221 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5222 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5223 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5224 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
5225 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5226 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5227 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5228 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
5229 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5230 + *
5231 + * Effort sponsored by Hifn inc.
5232 + *
5233 + */
5234 +
5235 +#ifndef __HIFNHIPP_H__
5236 +#define        __HIFNHIPP_H__
5237 +
5238 +/*
5239 + * PCI vendor and device identifiers
5240 + */
5241 +#define        PCI_VENDOR_HIFN         0x13a3          /* Hifn */
5242 +#define        PCI_PRODUCT_HIFN_6500   0x0006          /* 6500 */
5243 +#define        PCI_PRODUCT_HIFN_7855   0x001f          /* 7855 */
5244 +#define        PCI_PRODUCT_HIFN_8155   0x999           /* XXX 8155 */
5245 +
5246 +#define HIPP_1_REVID            0x01 /* BOGUS */
5247 +
5248 +#endif /* __HIPP_H__ */
5249 --- /dev/null
5250 +++ b/crypto/ocf/hifn/hifnHIPPvar.h
5251 @@ -0,0 +1,93 @@
5252 +/*
5253 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
5254 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> * 
5255 + *
5256 + * Redistribution and use in source and binary forms, with or without
5257 + * modification, are permitted provided that the following conditions
5258 + * are met:
5259 + *
5260 + * 1. Redistributions of source code must retain the above copyright
5261 + *    notice, this list of conditions and the following disclaimer.
5262 + * 2. Redistributions in binary form must reproduce the above copyright
5263 + *    notice, this list of conditions and the following disclaimer in the
5264 + *    documentation and/or other materials provided with the distribution.
5265 + * 3. The name of the author may not be used to endorse or promote products
5266 + *    derived from this software without specific prior written permission.
5267 + *
5268 + *
5269 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
5270 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5271 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5272 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5273 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
5274 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5275 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5276 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5277 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
5278 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5279 + *
5280 + * Effort sponsored by Hifn inc.
5281 + *
5282 + */
5283 +
5284 +#ifndef __HIFNHIPPVAR_H__
5285 +#define __HIFNHIPPVAR_H__
5286 +
5287 +#define HIPP_MAX_CHIPS 8
5288 +
5289 +/*
5290 + * Holds data specific to a single Hifn HIPP-I board.
5291 + */
5292 +struct hipp_softc {
5293 +       softc_device_decl                sc_dev;
5294 +
5295 +       struct pci_dev          *sc_pcidev;     /* device backpointer */
5296 +       ocf_iomem_t             sc_bar[5];
5297 +       caddr_t                 sc_barphy[5];   /* physical address */
5298 +       int                     sc_num;         /* for multiple devs */
5299 +       spinlock_t              sc_mtx;         /* per-instance lock */
5300 +       int32_t                 sc_cid;
5301 +       int                     sc_irq;
5302 +
5303 +#if 0
5304 +
5305 +       u_int32_t               sc_dmaier;
5306 +       u_int32_t               sc_drammodel;   /* 1=dram, 0=sram */
5307 +       u_int32_t               sc_pllconfig;   /* 7954/7955/7956 PLL config */
5308 +
5309 +       struct hifn_dma         *sc_dma;
5310 +       dma_addr_t              sc_dma_physaddr;/* physical address of sc_dma */
5311 +
5312 +       int                     sc_dmansegs;
5313 +       int                     sc_maxses;
5314 +       int                     sc_nsessions;
5315 +       struct hifn_session     *sc_sessions;
5316 +       int                     sc_ramsize;
5317 +       int                     sc_flags;
5318 +#define        HIFN_HAS_RNG            0x1     /* includes random number generator */
5319 +#define        HIFN_HAS_PUBLIC         0x2     /* includes public key support */
5320 +#define        HIFN_HAS_AES            0x4     /* includes AES support */
5321 +#define        HIFN_IS_7811            0x8     /* Hifn 7811 part */
5322 +#define        HIFN_IS_7956            0x10    /* Hifn 7956/7955 don't have SDRAM */
5323 +
5324 +       struct timer_list       sc_tickto;      /* for managing DMA */
5325 +
5326 +       int                     sc_rngfirst;
5327 +       int                     sc_rnghz;       /* RNG polling frequency */
5328 +
5329 +       int                     sc_c_busy;      /* command ring busy */
5330 +       int                     sc_s_busy;      /* source data ring busy */
5331 +       int                     sc_d_busy;      /* destination data ring busy */
5332 +       int                     sc_r_busy;      /* result ring busy */
5333 +       int                     sc_active;      /* for initial countdown */
5334 +       int                     sc_needwakeup;  /* ops q'd wating on resources */
5335 +       int                     sc_curbatch;    /* # ops submitted w/o int */
5336 +       int                     sc_suspended;
5337 +       struct miscdevice       sc_miscdev;
5338 +#endif
5339 +};
5340 +
5341 +#define        HIPP_LOCK(_sc)          spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
5342 +#define        HIPP_UNLOCK(_sc)        spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
5343 +
5344 +#endif /* __HIFNHIPPVAR_H__ */
5345 --- /dev/null
5346 +++ b/crypto/ocf/safe/md5.c
5347 @@ -0,0 +1,308 @@
5348 +/*     $KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $     */
5349 +/*
5350 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5351 + * All rights reserved.
5352 + *
5353 + * Redistribution and use in source and binary forms, with or without
5354 + * modification, are permitted provided that the following conditions
5355 + * are met:
5356 + * 1. Redistributions of source code must retain the above copyright
5357 + *    notice, this list of conditions and the following disclaimer.
5358 + * 2. Redistributions in binary form must reproduce the above copyright
5359 + *    notice, this list of conditions and the following disclaimer in the
5360 + *    documentation and/or other materials provided with the distribution.
5361 + * 3. Neither the name of the project nor the names of its contributors
5362 + *    may be used to endorse or promote products derived from this software
5363 + *    without specific prior written permission.
5364 + *
5365 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
5366 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5367 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5368 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
5369 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5370 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5371 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5372 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5373 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5374 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5375 + * SUCH DAMAGE.
5376 + */
5377 +
5378 +#if 0
5379 +#include <sys/cdefs.h>
5380 +__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
5381 +
5382 +#include <sys/types.h>
5383 +#include <sys/cdefs.h>
5384 +#include <sys/time.h>
5385 +#include <sys/systm.h>
5386 +#include <crypto/md5.h>
5387 +#endif
5388 +
5389 +#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
5390 +
5391 +#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
5392 +#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
5393 +#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
5394 +#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
5395 +
5396 +#define ROUND1(a, b, c, d, k, s, i) { \
5397 +       (a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
5398 +       (a) = SHIFT((a), (s)); \
5399 +       (a) = (b) + (a); \
5400 +}
5401 +
5402 +#define ROUND2(a, b, c, d, k, s, i) { \
5403 +       (a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
5404 +       (a) = SHIFT((a), (s)); \
5405 +       (a) = (b) + (a); \
5406 +}
5407 +
5408 +#define ROUND3(a, b, c, d, k, s, i) { \
5409 +       (a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
5410 +       (a) = SHIFT((a), (s)); \
5411 +       (a) = (b) + (a); \
5412 +}
5413 +
5414 +#define ROUND4(a, b, c, d, k, s, i) { \
5415 +       (a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
5416 +       (a) = SHIFT((a), (s)); \
5417 +       (a) = (b) + (a); \
5418 +}
5419 +
5420 +#define Sa      7
5421 +#define Sb     12
5422 +#define Sc     17
5423 +#define Sd     22
5424 +
5425 +#define Se      5
5426 +#define Sf      9
5427 +#define Sg     14
5428 +#define Sh     20
5429 +
5430 +#define Si      4
5431 +#define Sj     11
5432 +#define Sk     16
5433 +#define Sl     23
5434 +
5435 +#define Sm      6
5436 +#define Sn     10
5437 +#define So     15
5438 +#define Sp     21
5439 +
5440 +#define MD5_A0 0x67452301
5441 +#define MD5_B0 0xefcdab89
5442 +#define MD5_C0 0x98badcfe
5443 +#define MD5_D0 0x10325476
5444 +
5445 +/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
5446 +static const u_int32_t T[65] = {
5447 +       0,
5448 +       0xd76aa478,     0xe8c7b756,     0x242070db,     0xc1bdceee,
5449 +       0xf57c0faf,     0x4787c62a,     0xa8304613,     0xfd469501,
5450 +       0x698098d8,     0x8b44f7af,     0xffff5bb1,     0x895cd7be,
5451 +       0x6b901122,     0xfd987193,     0xa679438e,     0x49b40821,
5452 +
5453 +       0xf61e2562,     0xc040b340,     0x265e5a51,     0xe9b6c7aa,
5454 +       0xd62f105d,     0x2441453,      0xd8a1e681,     0xe7d3fbc8,
5455 +       0x21e1cde6,     0xc33707d6,     0xf4d50d87,     0x455a14ed,
5456 +       0xa9e3e905,     0xfcefa3f8,     0x676f02d9,     0x8d2a4c8a,
5457 +
5458 +       0xfffa3942,     0x8771f681,     0x6d9d6122,     0xfde5380c,
5459 +       0xa4beea44,     0x4bdecfa9,     0xf6bb4b60,     0xbebfbc70,
5460 +       0x289b7ec6,     0xeaa127fa,     0xd4ef3085,     0x4881d05,
5461 +       0xd9d4d039,     0xe6db99e5,     0x1fa27cf8,     0xc4ac5665,
5462 +
5463 +       0xf4292244,     0x432aff97,     0xab9423a7,     0xfc93a039,
5464 +       0x655b59c3,     0x8f0ccc92,     0xffeff47d,     0x85845dd1,
5465 +       0x6fa87e4f,     0xfe2ce6e0,     0xa3014314,     0x4e0811a1,
5466 +       0xf7537e82,     0xbd3af235,     0x2ad7d2bb,     0xeb86d391,
5467 +};
5468 +
5469 +static const u_int8_t md5_paddat[MD5_BUFLEN] = {
5470 +       0x80,   0,      0,      0,      0,      0,      0,      0,
5471 +       0,      0,      0,      0,      0,      0,      0,      0,
5472 +       0,      0,      0,      0,      0,      0,      0,      0,
5473 +       0,      0,      0,      0,      0,      0,      0,      0,
5474 +       0,      0,      0,      0,      0,      0,      0,      0,
5475 +       0,      0,      0,      0,      0,      0,      0,      0,
5476 +       0,      0,      0,      0,      0,      0,      0,      0,
5477 +       0,      0,      0,      0,      0,      0,      0,      0,      
5478 +};
5479 +
5480 +static void md5_calc(u_int8_t *, md5_ctxt *);
5481 +
5482 +void md5_init(ctxt)
5483 +       md5_ctxt *ctxt;
5484 +{
5485 +       ctxt->md5_n = 0;
5486 +       ctxt->md5_i = 0;
5487 +       ctxt->md5_sta = MD5_A0;
5488 +       ctxt->md5_stb = MD5_B0;
5489 +       ctxt->md5_stc = MD5_C0;
5490 +       ctxt->md5_std = MD5_D0;
5491 +       bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
5492 +}
5493 +
5494 +void md5_loop(ctxt, input, len)
5495 +       md5_ctxt *ctxt;
5496 +       u_int8_t *input;
5497 +       u_int len; /* number of bytes */
5498 +{
5499 +       u_int gap, i;
5500 +
5501 +       ctxt->md5_n += len * 8; /* byte to bit */
5502 +       gap = MD5_BUFLEN - ctxt->md5_i;
5503 +
5504 +       if (len >= gap) {
5505 +               bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
5506 +                       gap);
5507 +               md5_calc(ctxt->md5_buf, ctxt);
5508 +
5509 +               for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
5510 +                       md5_calc((u_int8_t *)(input + i), ctxt);
5511 +               }
5512 +               
5513 +               ctxt->md5_i = len - i;
5514 +               bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
5515 +       } else {
5516 +               bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
5517 +                       len);
5518 +               ctxt->md5_i += len;
5519 +       }
5520 +}
5521 +
5522 +void md5_pad(ctxt)
5523 +       md5_ctxt *ctxt;
5524 +{
5525 +       u_int gap;
5526 +
5527 +       /* Don't count up padding. Keep md5_n. */       
5528 +       gap = MD5_BUFLEN - ctxt->md5_i;
5529 +       if (gap > 8) {
5530 +               bcopy(md5_paddat,
5531 +                     (void *)(ctxt->md5_buf + ctxt->md5_i),
5532 +                     gap - sizeof(ctxt->md5_n));
5533 +       } else {
5534 +               /* including gap == 8 */
5535 +               bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
5536 +                       gap);
5537 +               md5_calc(ctxt->md5_buf, ctxt);
5538 +               bcopy((md5_paddat + gap),
5539 +                     (void *)ctxt->md5_buf,
5540 +                     MD5_BUFLEN - sizeof(ctxt->md5_n));
5541 +       }
5542 +
5543 +       /* 8 byte word */       
5544 +#if BYTE_ORDER == LITTLE_ENDIAN
5545 +       bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
5546 +#endif
5547 +#if BYTE_ORDER == BIG_ENDIAN
5548 +       ctxt->md5_buf[56] = ctxt->md5_n8[7];
5549 +       ctxt->md5_buf[57] = ctxt->md5_n8[6];
5550 +       ctxt->md5_buf[58] = ctxt->md5_n8[5];
5551 +       ctxt->md5_buf[59] = ctxt->md5_n8[4];
5552 +       ctxt->md5_buf[60] = ctxt->md5_n8[3];
5553 +       ctxt->md5_buf[61] = ctxt->md5_n8[2];
5554 +       ctxt->md5_buf[62] = ctxt->md5_n8[1];
5555 +       ctxt->md5_buf[63] = ctxt->md5_n8[0];
5556 +#endif
5557 +
5558 +       md5_calc(ctxt->md5_buf, ctxt);
5559 +}
5560 +
5561 +void md5_result(digest, ctxt)
5562 +       u_int8_t *digest;
5563 +       md5_ctxt *ctxt;
5564 +{
5565 +       /* 4 byte words */
5566 +#if BYTE_ORDER == LITTLE_ENDIAN
5567 +       bcopy(&ctxt->md5_st8[0], digest, 16);
5568 +#endif
5569 +#if BYTE_ORDER == BIG_ENDIAN
5570 +       digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
5571 +       digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
5572 +       digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
5573 +       digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
5574 +       digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
5575 +       digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
5576 +       digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
5577 +       digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
5578 +#endif
5579 +}
5580 +
5581 +static void md5_calc(b64, ctxt)
5582 +       u_int8_t *b64;
5583 +       md5_ctxt *ctxt;
5584 +{
5585 +       u_int32_t A = ctxt->md5_sta;
5586 +       u_int32_t B = ctxt->md5_stb;
5587 +       u_int32_t C = ctxt->md5_stc;
5588 +       u_int32_t D = ctxt->md5_std;
5589 +#if BYTE_ORDER == LITTLE_ENDIAN
5590 +       u_int32_t *X = (u_int32_t *)b64;
5591 +#endif 
5592 +#if BYTE_ORDER == BIG_ENDIAN
5593 +       /* 4 byte words */
5594 +       /* what a brute force but fast! */
5595 +       u_int32_t X[16];
5596 +       u_int8_t *y = (u_int8_t *)X;
5597 +       y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
5598 +       y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
5599 +       y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
5600 +       y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
5601 +       y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
5602 +       y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
5603 +       y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
5604 +       y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
5605 +       y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
5606 +       y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
5607 +       y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
5608 +       y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
5609 +       y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
5610 +       y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
5611 +       y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
5612 +       y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
5613 +#endif
5614 +
5615 +       ROUND1(A, B, C, D,  0, Sa,  1); ROUND1(D, A, B, C,  1, Sb,  2);
5616 +       ROUND1(C, D, A, B,  2, Sc,  3); ROUND1(B, C, D, A,  3, Sd,  4);
5617 +       ROUND1(A, B, C, D,  4, Sa,  5); ROUND1(D, A, B, C,  5, Sb,  6);
5618 +       ROUND1(C, D, A, B,  6, Sc,  7); ROUND1(B, C, D, A,  7, Sd,  8);
5619 +       ROUND1(A, B, C, D,  8, Sa,  9); ROUND1(D, A, B, C,  9, Sb, 10);
5620 +       ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
5621 +       ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
5622 +       ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
5623 +       
5624 +       ROUND2(A, B, C, D,  1, Se, 17); ROUND2(D, A, B, C,  6, Sf, 18);
5625 +       ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A,  0, Sh, 20);
5626 +       ROUND2(A, B, C, D,  5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
5627 +       ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A,  4, Sh, 24);
5628 +       ROUND2(A, B, C, D,  9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
5629 +       ROUND2(C, D, A, B,  3, Sg, 27); ROUND2(B, C, D, A,  8, Sh, 28);
5630 +       ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C,  2, Sf, 30);
5631 +       ROUND2(C, D, A, B,  7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
5632 +
5633 +       ROUND3(A, B, C, D,  5, Si, 33); ROUND3(D, A, B, C,  8, Sj, 34);
5634 +       ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
5635 +       ROUND3(A, B, C, D,  1, Si, 37); ROUND3(D, A, B, C,  4, Sj, 38);
5636 +       ROUND3(C, D, A, B,  7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
5637 +       ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C,  0, Sj, 42);
5638 +       ROUND3(C, D, A, B,  3, Sk, 43); ROUND3(B, C, D, A,  6, Sl, 44);
5639 +       ROUND3(A, B, C, D,  9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
5640 +       ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A,  2, Sl, 48);
5641 +       
5642 +       ROUND4(A, B, C, D,  0, Sm, 49); ROUND4(D, A, B, C,  7, Sn, 50); 
5643 +       ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A,  5, Sp, 52); 
5644 +       ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C,  3, Sn, 54); 
5645 +       ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A,  1, Sp, 56); 
5646 +       ROUND4(A, B, C, D,  8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58); 
5647 +       ROUND4(C, D, A, B,  6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60); 
5648 +       ROUND4(A, B, C, D,  4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62); 
5649 +       ROUND4(C, D, A, B,  2, So, 63); ROUND4(B, C, D, A,  9, Sp, 64);
5650 +
5651 +       ctxt->md5_sta += A;
5652 +       ctxt->md5_stb += B;
5653 +       ctxt->md5_stc += C;
5654 +       ctxt->md5_std += D;
5655 +}
5656 --- /dev/null
5657 +++ b/crypto/ocf/safe/md5.h
5658 @@ -0,0 +1,76 @@
5659 +/*     $FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $   */
5660 +/*     $KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $   */
5661 +
5662 +/*
5663 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5664 + * All rights reserved.
5665 + *
5666 + * Redistribution and use in source and binary forms, with or without
5667 + * modification, are permitted provided that the following conditions
5668 + * are met:
5669 + * 1. Redistributions of source code must retain the above copyright
5670 + *    notice, this list of conditions and the following disclaimer.
5671 + * 2. Redistributions in binary form must reproduce the above copyright
5672 + *    notice, this list of conditions and the following disclaimer in the
5673 + *    documentation and/or other materials provided with the distribution.
5674 + * 3. Neither the name of the project nor the names of its contributors
5675 + *    may be used to endorse or promote products derived from this software
5676 + *    without specific prior written permission.
5677 + *
5678 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
5679 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5680 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5681 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
5682 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5683 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5684 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5685 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5686 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5687 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5688 + * SUCH DAMAGE.
5689 + */
5690 +
5691 +#ifndef _NETINET6_MD5_H_
5692 +#define _NETINET6_MD5_H_
5693 +
5694 +#define MD5_BUFLEN     64
5695 +
5696 +typedef struct {
5697 +       union {
5698 +               u_int32_t       md5_state32[4];
5699 +               u_int8_t        md5_state8[16];
5700 +       } md5_st;
5701 +
5702 +#define md5_sta                md5_st.md5_state32[0]
5703 +#define md5_stb                md5_st.md5_state32[1]
5704 +#define md5_stc                md5_st.md5_state32[2]
5705 +#define md5_std                md5_st.md5_state32[3]
5706 +#define md5_st8                md5_st.md5_state8
5707 +
5708 +       union {
5709 +               u_int64_t       md5_count64;
5710 +               u_int8_t        md5_count8[8];
5711 +       } md5_count;
5712 +#define md5_n  md5_count.md5_count64
5713 +#define md5_n8 md5_count.md5_count8
5714 +
5715 +       u_int   md5_i;
5716 +       u_int8_t        md5_buf[MD5_BUFLEN];
5717 +} md5_ctxt;
5718 +
5719 +extern void md5_init(md5_ctxt *);
5720 +extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
5721 +extern void md5_pad(md5_ctxt *);
5722 +extern void md5_result(u_int8_t *, md5_ctxt *);
5723 +
5724 +/* compatibility */
5725 +#define MD5_CTX                md5_ctxt
5726 +#define MD5Init(x)     md5_init((x))
5727 +#define MD5Update(x, y, z)     md5_loop((x), (y), (z))
5728 +#define MD5Final(x, y) \
5729 +do {                           \
5730 +       md5_pad((y));           \
5731 +       md5_result((x), (y));   \
5732 +} while (0)
5733 +
5734 +#endif /* ! _NETINET6_MD5_H_*/
5735 --- /dev/null
5736 +++ b/crypto/ocf/safe/safe.c
5737 @@ -0,0 +1,2288 @@
5738 +/*-
5739 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
5740 + * Copyright (C) 2004-2007 David McCullough
5741 + * The license and original author are listed below.
5742 + *
5743 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
5744 + * Copyright (c) 2003 Global Technology Associates, Inc.
5745 + * All rights reserved.
5746 + *
5747 + * Redistribution and use in source and binary forms, with or without
5748 + * modification, are permitted provided that the following conditions
5749 + * are met:
5750 + * 1. Redistributions of source code must retain the above copyright
5751 + *    notice, this list of conditions and the following disclaimer.
5752 + * 2. Redistributions in binary form must reproduce the above copyright
5753 + *    notice, this list of conditions and the following disclaimer in the
5754 + *    documentation and/or other materials provided with the distribution.
5755 + *
5756 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
5757 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5758 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5759 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
5760 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5761 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5762 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5763 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5764 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5765 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5766 + * SUCH DAMAGE.
5767 + *
5768 +__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
5769 + */
5770 +
5771 +#ifndef AUTOCONF_INCLUDED
5772 +#include <linux/config.h>
5773 +#endif
5774 +#include <linux/module.h>
5775 +#include <linux/kernel.h>
5776 +#include <linux/init.h>
5777 +#include <linux/list.h>
5778 +#include <linux/slab.h>
5779 +#include <linux/wait.h>
5780 +#include <linux/sched.h>
5781 +#include <linux/pci.h>
5782 +#include <linux/delay.h>
5783 +#include <linux/interrupt.h>
5784 +#include <linux/spinlock.h>
5785 +#include <linux/random.h>
5786 +#include <linux/version.h>
5787 +#include <linux/skbuff.h>
5788 +#include <asm/io.h>
5789 +
5790 +/*
5791 + * SafeNet SafeXcel-1141 hardware crypto accelerator
5792 + */
5793 +
5794 +#include <cryptodev.h>
5795 +#include <uio.h>
5796 +#include <safe/safereg.h>
5797 +#include <safe/safevar.h>
5798 +
5799 +#if 1
5800 +#define        DPRINTF(a)      do { \
5801 +                                               if (debug) { \
5802 +                                                       printk("%s: ", sc ? \
5803 +                                                               device_get_nameunit(sc->sc_dev) : "safe"); \
5804 +                                                       printk a; \
5805 +                                               } \
5806 +                                       } while (0)
5807 +#else
5808 +#define        DPRINTF(a)
5809 +#endif
5810 +
5811 +/*
5812 + * until we find a cleaner way, include the BSD md5/sha1 code
5813 + * here
5814 + */
5815 +#define HMAC_HACK 1
5816 +#ifdef HMAC_HACK
5817 +#define LITTLE_ENDIAN 1234
5818 +#define BIG_ENDIAN 4321
5819 +#ifdef __LITTLE_ENDIAN
5820 +#define BYTE_ORDER LITTLE_ENDIAN
5821 +#endif
5822 +#ifdef __BIG_ENDIAN
5823 +#define BYTE_ORDER BIG_ENDIAN
5824 +#endif
5825 +#include <safe/md5.h>
5826 +#include <safe/md5.c>
5827 +#include <safe/sha1.h>
5828 +#include <safe/sha1.c>
5829 +
5830 +u_int8_t hmac_ipad_buffer[64] = {
5831 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5832 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5833 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5834 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5835 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5836 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5837 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5838 +    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
5839 +};
5840 +
5841 +u_int8_t hmac_opad_buffer[64] = {
5842 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5843 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5844 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5845 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5846 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5847 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5848 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5849 +    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
5850 +};
5851 +#endif /* HMAC_HACK */
5852 +
5853 +/* add proc entry for this */
5854 +struct safe_stats safestats;
5855 +
5856 +#define debug safe_debug
5857 +int safe_debug = 0;
5858 +module_param(safe_debug, int, 0644);
5859 +MODULE_PARM_DESC(safe_debug, "Enable debug");
5860 +
5861 +static void safe_callback(struct safe_softc *, struct safe_ringentry *);
5862 +static void safe_feed(struct safe_softc *, struct safe_ringentry *);
5863 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
5864 +static void safe_rng_init(struct safe_softc *);
5865 +int safe_rngbufsize = 8;               /* 32 bytes each read  */
5866 +module_param(safe_rngbufsize, int, 0644);
5867 +MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
5868 +int safe_rngmaxalarm = 8;              /* max alarms before reset */
5869 +module_param(safe_rngmaxalarm, int, 0644);
5870 +MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
5871 +#endif /* SAFE_NO_RNG */
5872 +
5873 +static void safe_totalreset(struct safe_softc *sc);
5874 +static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
5875 +static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
5876 +static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
5877 +static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
5878 +static int safe_kstart(struct safe_softc *sc);
5879 +static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
5880 +static void safe_kfeed(struct safe_softc *sc);
5881 +static void safe_kpoll(unsigned long arg);
5882 +static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
5883 +                                                               u_int32_t len, struct crparam *n);
5884 +
5885 +static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
5886 +static int safe_freesession(device_t, u_int64_t);
5887 +static int safe_process(device_t, struct cryptop *, int);
5888 +
5889 +static device_method_t safe_methods = {
5890 +       /* crypto device methods */
5891 +       DEVMETHOD(cryptodev_newsession, safe_newsession),
5892 +       DEVMETHOD(cryptodev_freesession,safe_freesession),
5893 +       DEVMETHOD(cryptodev_process,    safe_process),
5894 +       DEVMETHOD(cryptodev_kprocess,   safe_kprocess),
5895 +};
5896 +
5897 +#define        READ_REG(sc,r)                  readl((sc)->sc_base_addr + (r))
5898 +#define WRITE_REG(sc,r,val)            writel((val), (sc)->sc_base_addr + (r))
5899 +
5900 +#define SAFE_MAX_CHIPS 8
5901 +static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
5902 +
5903 +/*
5904 + * split our buffers up into safe DMAable byte fragments to avoid lockup
5905 + * bug in 1141 HW on rev 1.0.
5906 + */
5907 +
5908 +static int
5909 +pci_map_linear(
5910 +       struct safe_softc *sc,
5911 +       struct safe_operand *buf,
5912 +       void *addr,
5913 +       int len)
5914 +{
5915 +       dma_addr_t tmp;
5916 +       int chunk, tlen = len;
5917 +
5918 +       tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
5919 +
5920 +       buf->mapsize += len;
5921 +       while (len > 0) {
5922 +               chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
5923 +               buf->segs[buf->nsegs].ds_addr = tmp;
5924 +               buf->segs[buf->nsegs].ds_len  = chunk;
5925 +               buf->segs[buf->nsegs].ds_tlen = tlen;
5926 +               buf->nsegs++;
5927 +               tmp  += chunk;
5928 +               len  -= chunk;
5929 +               tlen = 0;
5930 +       }
5931 +       return 0;
5932 +}
5933 +
5934 +/*
5935 + * map in a given uio buffer (great on some arches :-)
5936 + */
5937 +
5938 +static int
5939 +pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
5940 +{
5941 +       struct iovec *iov = uio->uio_iov;
5942 +       int n;
5943 +
5944 +       DPRINTF(("%s()\n", __FUNCTION__));
5945 +
5946 +       buf->mapsize = 0;
5947 +       buf->nsegs = 0;
5948 +
5949 +       for (n = 0; n < uio->uio_iovcnt; n++) {
5950 +               pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
5951 +               iov++;
5952 +       }
5953 +
5954 +       /* identify this buffer by the first segment */
5955 +       buf->map = (void *) buf->segs[0].ds_addr;
5956 +       return(0);
5957 +}
5958 +
5959 +/*
5960 + * map in a given sk_buff
5961 + */
5962 +
5963 +static int
5964 +pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
5965 +{
5966 +       int i;
5967 +
5968 +       DPRINTF(("%s()\n", __FUNCTION__));
5969 +
5970 +       buf->mapsize = 0;
5971 +       buf->nsegs = 0;
5972 +
5973 +       pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
5974 +
5975 +       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5976 +               pci_map_linear(sc, buf,
5977 +                               page_address(skb_shinfo(skb)->frags[i].page) +
5978 +                                                       skb_shinfo(skb)->frags[i].page_offset,
5979 +                               skb_shinfo(skb)->frags[i].size);
5980 +       }
5981 +
5982 +       /* identify this buffer by the first segment */
5983 +       buf->map = (void *) buf->segs[0].ds_addr;
5984 +       return(0);
5985 +}
5986 +
5987 +
5988 +#if 0 /* not needed at this time */
5989 +static void
5990 +pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
5991 +{
5992 +       int i;
5993 +
5994 +       DPRINTF(("%s()\n", __FUNCTION__));
5995 +       for (i = 0; i < buf->nsegs; i++)
5996 +               pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
5997 +                               buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
5998 +}
5999 +#endif
6000 +
6001 +static void
6002 +pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
6003 +{
6004 +       int i;
6005 +       DPRINTF(("%s()\n", __FUNCTION__));
6006 +       for (i = 0; i < buf->nsegs; i++) {
6007 +               if (buf->segs[i].ds_tlen) {
6008 +                       DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
6009 +                       pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
6010 +                                       buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
6011 +                       DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
6012 +               }
6013 +               buf->segs[i].ds_addr = 0;
6014 +               buf->segs[i].ds_len = 0;
6015 +               buf->segs[i].ds_tlen = 0;
6016 +       }
6017 +       buf->nsegs = 0;
6018 +       buf->mapsize = 0;
6019 +       buf->map = 0;
6020 +}
6021 +
6022 +
6023 +/*
6024 + * SafeXcel Interrupt routine
6025 + */
6026 +static irqreturn_t
6027 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
6028 +safe_intr(int irq, void *arg)
6029 +#else
6030 +safe_intr(int irq, void *arg, struct pt_regs *regs)
6031 +#endif
6032 +{
6033 +       struct safe_softc *sc = arg;
6034 +       int stat;
6035 +       unsigned long flags;
6036 +
6037 +       stat = READ_REG(sc, SAFE_HM_STAT);
6038 +
6039 +       DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
6040 +
6041 +       if (stat == 0)          /* shared irq, not for us */
6042 +               return IRQ_NONE;
6043 +
6044 +       WRITE_REG(sc, SAFE_HI_CLR, stat);       /* IACK */
6045 +
6046 +       if ((stat & SAFE_INT_PE_DDONE)) {
6047 +               /*
6048 +                * Descriptor(s) done; scan the ring and
6049 +                * process completed operations.
6050 +                */
6051 +               spin_lock_irqsave(&sc->sc_ringmtx, flags);
6052 +               while (sc->sc_back != sc->sc_front) {
6053 +                       struct safe_ringentry *re = sc->sc_back;
6054 +
6055 +#ifdef SAFE_DEBUG
6056 +                       if (debug) {
6057 +                               safe_dump_ringstate(sc, __func__);
6058 +                               safe_dump_request(sc, __func__, re);
6059 +                       }
6060 +#endif
6061 +                       /*
6062 +                        * safe_process marks ring entries that were allocated
6063 +                        * but not used with a csr of zero.  This insures the
6064 +                        * ring front pointer never needs to be set backwards
6065 +                        * in the event that an entry is allocated but not used
6066 +                        * because of a setup error.
6067 +                        */
6068 +                       DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
6069 +                       if (re->re_desc.d_csr != 0) {
6070 +                               if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
6071 +                                       DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
6072 +                                       break;
6073 +                               }
6074 +                               if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
6075 +                                       DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
6076 +                                       break;
6077 +                               }
6078 +                               sc->sc_nqchip--;
6079 +                               safe_callback(sc, re);
6080 +                       }
6081 +                       if (++(sc->sc_back) == sc->sc_ringtop)
6082 +                               sc->sc_back = sc->sc_ring;
6083 +               }
6084 +               spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6085 +       }
6086 +
6087 +       /*
6088 +        * Check to see if we got any DMA Error
6089 +        */
6090 +       if (stat & SAFE_INT_PE_ERROR) {
6091 +               printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
6092 +                               (int)READ_REG(sc, SAFE_PE_DMASTAT));
6093 +               safestats.st_dmaerr++;
6094 +               safe_totalreset(sc);
6095 +#if 0
6096 +               safe_feed(sc);
6097 +#endif
6098 +       }
6099 +
6100 +       if (sc->sc_needwakeup) {                /* XXX check high watermark */
6101 +               int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
6102 +               DPRINTF(("%s: wakeup crypto %x\n", __func__,
6103 +                       sc->sc_needwakeup));
6104 +               sc->sc_needwakeup &= ~wakeup;
6105 +               crypto_unblock(sc->sc_cid, wakeup);
6106 +       }
6107 +       
6108 +       return IRQ_HANDLED;
6109 +}
6110 +
6111 +/*
6112 + * safe_feed() - post a request to chip
6113 + */
6114 +static void
6115 +safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
6116 +{
6117 +       DPRINTF(("%s()\n", __FUNCTION__));
6118 +#ifdef SAFE_DEBUG
6119 +       if (debug) {
6120 +               safe_dump_ringstate(sc, __func__);
6121 +               safe_dump_request(sc, __func__, re);
6122 +       }
6123 +#endif
6124 +       sc->sc_nqchip++;
6125 +       if (sc->sc_nqchip > safestats.st_maxqchip)
6126 +               safestats.st_maxqchip = sc->sc_nqchip;
6127 +       /* poke h/w to check descriptor ring, any value can be written */
6128 +       WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
6129 +}
6130 +
6131 +#define        N(a)    (sizeof(a) / sizeof (a[0]))
6132 +static void
6133 +safe_setup_enckey(struct safe_session *ses, caddr_t key)
6134 +{
6135 +       int i;
6136 +
6137 +       bcopy(key, ses->ses_key, ses->ses_klen / 8);
6138 +
6139 +       /* PE is little-endian, insure proper byte order */
6140 +       for (i = 0; i < N(ses->ses_key); i++)
6141 +               ses->ses_key[i] = htole32(ses->ses_key[i]);
6142 +}
6143 +
6144 +static void
6145 +safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
6146 +{
6147 +#ifdef HMAC_HACK
6148 +       MD5_CTX md5ctx;
6149 +       SHA1_CTX sha1ctx;
6150 +       int i;
6151 +
6152 +
6153 +       for (i = 0; i < klen; i++)
6154 +               key[i] ^= HMAC_IPAD_VAL;
6155 +
6156 +       if (algo == CRYPTO_MD5_HMAC) {
6157 +               MD5Init(&md5ctx);
6158 +               MD5Update(&md5ctx, key, klen);
6159 +               MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
6160 +               bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
6161 +       } else {
6162 +               SHA1Init(&sha1ctx);
6163 +               SHA1Update(&sha1ctx, key, klen);
6164 +               SHA1Update(&sha1ctx, hmac_ipad_buffer,
6165 +                   SHA1_HMAC_BLOCK_LEN - klen);
6166 +               bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
6167 +       }
6168 +
6169 +       for (i = 0; i < klen; i++)
6170 +               key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
6171 +
6172 +       if (algo == CRYPTO_MD5_HMAC) {
6173 +               MD5Init(&md5ctx);
6174 +               MD5Update(&md5ctx, key, klen);
6175 +               MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
6176 +               bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
6177 +       } else {
6178 +               SHA1Init(&sha1ctx);
6179 +               SHA1Update(&sha1ctx, key, klen);
6180 +               SHA1Update(&sha1ctx, hmac_opad_buffer,
6181 +                   SHA1_HMAC_BLOCK_LEN - klen);
6182 +               bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
6183 +       }
6184 +
6185 +       for (i = 0; i < klen; i++)
6186 +               key[i] ^= HMAC_OPAD_VAL;
6187 +
6188 +#if 0
6189 +       /*
6190 +        * this code prevents SHA working on a BE host,
6191 +        * so it is obviously wrong.  I think the byte
6192 +        * swap setup we do with the chip fixes this for us
6193 +        */
6194 +
6195 +       /* PE is little-endian, insure proper byte order */
6196 +       for (i = 0; i < N(ses->ses_hminner); i++) {
6197 +               ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
6198 +               ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
6199 +       }
6200 +#endif
6201 +#else /* HMAC_HACK */
6202 +       printk("safe: md5/sha not implemented\n");
6203 +#endif /* HMAC_HACK */
6204 +}
6205 +#undef N
6206 +
6207 +/*
6208 + * Allocate a new 'session' and return an encoded session id.  'sidp'
6209 + * contains our registration id, and should contain an encoded session
6210 + * id on successful allocation.
6211 + */
6212 +static int
6213 +safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
6214 +{
6215 +       struct safe_softc *sc = device_get_softc(dev);
6216 +       struct cryptoini *c, *encini = NULL, *macini = NULL;
6217 +       struct safe_session *ses = NULL;
6218 +       int sesn;
6219 +
6220 +       DPRINTF(("%s()\n", __FUNCTION__));
6221 +
6222 +       if (sidp == NULL || cri == NULL || sc == NULL)
6223 +               return (EINVAL);
6224 +
6225 +       for (c = cri; c != NULL; c = c->cri_next) {
6226 +               if (c->cri_alg == CRYPTO_MD5_HMAC ||
6227 +                   c->cri_alg == CRYPTO_SHA1_HMAC ||
6228 +                   c->cri_alg == CRYPTO_NULL_HMAC) {
6229 +                       if (macini)
6230 +                               return (EINVAL);
6231 +                       macini = c;
6232 +               } else if (c->cri_alg == CRYPTO_DES_CBC ||
6233 +                   c->cri_alg == CRYPTO_3DES_CBC ||
6234 +                   c->cri_alg == CRYPTO_AES_CBC ||
6235 +                   c->cri_alg == CRYPTO_NULL_CBC) {
6236 +                       if (encini)
6237 +                               return (EINVAL);
6238 +                       encini = c;
6239 +               } else
6240 +                       return (EINVAL);
6241 +       }
6242 +       if (encini == NULL && macini == NULL)
6243 +               return (EINVAL);
6244 +       if (encini) {                   /* validate key length */
6245 +               switch (encini->cri_alg) {
6246 +               case CRYPTO_DES_CBC:
6247 +                       if (encini->cri_klen != 64)
6248 +                               return (EINVAL);
6249 +                       break;
6250 +               case CRYPTO_3DES_CBC:
6251 +                       if (encini->cri_klen != 192)
6252 +                               return (EINVAL);
6253 +                       break;
6254 +               case CRYPTO_AES_CBC:
6255 +                       if (encini->cri_klen != 128 &&
6256 +                           encini->cri_klen != 192 &&
6257 +                           encini->cri_klen != 256)
6258 +                               return (EINVAL);
6259 +                       break;
6260 +               }
6261 +       }
6262 +
6263 +       if (sc->sc_sessions == NULL) {
6264 +               ses = sc->sc_sessions = (struct safe_session *)
6265 +                       kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
6266 +               if (ses == NULL)
6267 +                       return (ENOMEM);
6268 +               memset(ses, 0, sizeof(struct safe_session));
6269 +               sesn = 0;
6270 +               sc->sc_nsessions = 1;
6271 +       } else {
6272 +               for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
6273 +                       if (sc->sc_sessions[sesn].ses_used == 0) {
6274 +                               ses = &sc->sc_sessions[sesn];
6275 +                               break;
6276 +                       }
6277 +               }
6278 +
6279 +               if (ses == NULL) {
6280 +                       sesn = sc->sc_nsessions;
6281 +                       ses = (struct safe_session *)
6282 +                               kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
6283 +                       if (ses == NULL)
6284 +                               return (ENOMEM);
6285 +                       memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
6286 +                       bcopy(sc->sc_sessions, ses, sesn *
6287 +                           sizeof(struct safe_session));
6288 +                       bzero(sc->sc_sessions, sesn *
6289 +                           sizeof(struct safe_session));
6290 +                       kfree(sc->sc_sessions);
6291 +                       sc->sc_sessions = ses;
6292 +                       ses = &sc->sc_sessions[sesn];
6293 +                       sc->sc_nsessions++;
6294 +               }
6295 +       }
6296 +
6297 +       bzero(ses, sizeof(struct safe_session));
6298 +       ses->ses_used = 1;
6299 +
6300 +       if (encini) {
6301 +               /* get an IV */
6302 +               /* XXX may read fewer than requested */
6303 +               read_random(ses->ses_iv, sizeof(ses->ses_iv));
6304 +
6305 +               ses->ses_klen = encini->cri_klen;
6306 +               if (encini->cri_key != NULL)
6307 +                       safe_setup_enckey(ses, encini->cri_key);
6308 +       }
6309 +
6310 +       if (macini) {
6311 +               ses->ses_mlen = macini->cri_mlen;
6312 +               if (ses->ses_mlen == 0) {
6313 +                       if (macini->cri_alg == CRYPTO_MD5_HMAC)
6314 +                               ses->ses_mlen = MD5_HASH_LEN;
6315 +                       else
6316 +                               ses->ses_mlen = SHA1_HASH_LEN;
6317 +               }
6318 +
6319 +               if (macini->cri_key != NULL) {
6320 +                       safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
6321 +                           macini->cri_klen / 8);
6322 +               }
6323 +       }
6324 +
6325 +       *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
6326 +       return (0);
6327 +}
6328 +
6329 +/*
6330 + * Deallocate a session.
6331 + */
6332 +static int
6333 +safe_freesession(device_t dev, u_int64_t tid)
6334 +{
6335 +       struct safe_softc *sc = device_get_softc(dev);
6336 +       int session, ret;
6337 +       u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
6338 +
6339 +       DPRINTF(("%s()\n", __FUNCTION__));
6340 +
6341 +       if (sc == NULL)
6342 +               return (EINVAL);
6343 +
6344 +       session = SAFE_SESSION(sid);
6345 +       if (session < sc->sc_nsessions) {
6346 +               bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
6347 +               ret = 0;
6348 +       } else
6349 +               ret = EINVAL;
6350 +       return (ret);
6351 +}
6352 +
6353 +
6354 +static int
6355 +safe_process(device_t dev, struct cryptop *crp, int hint)
6356 +{
6357 +       struct safe_softc *sc = device_get_softc(dev);
6358 +       int err = 0, i, nicealign, uniform;
6359 +       struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
6360 +       int bypass, oplen, ivsize;
6361 +       caddr_t iv;
6362 +       int16_t coffset;
6363 +       struct safe_session *ses;
6364 +       struct safe_ringentry *re;
6365 +       struct safe_sarec *sa;
6366 +       struct safe_pdesc *pd;
6367 +       u_int32_t cmd0, cmd1, staterec;
6368 +       unsigned long flags;
6369 +
6370 +       DPRINTF(("%s()\n", __FUNCTION__));
6371 +
6372 +       if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
6373 +               safestats.st_invalid++;
6374 +               return (EINVAL);
6375 +       }
6376 +       if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
6377 +               safestats.st_badsession++;
6378 +               return (EINVAL);
6379 +       }
6380 +
6381 +       spin_lock_irqsave(&sc->sc_ringmtx, flags);
6382 +       if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
6383 +               safestats.st_ringfull++;
6384 +               sc->sc_needwakeup |= CRYPTO_SYMQ;
6385 +               spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6386 +               return (ERESTART);
6387 +       }
6388 +       re = sc->sc_front;
6389 +
6390 +       staterec = re->re_sa.sa_staterec;       /* save */
6391 +       /* NB: zero everything but the PE descriptor */
6392 +       bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
6393 +       re->re_sa.sa_staterec = staterec;       /* restore */
6394 +
6395 +       re->re_crp = crp;
6396 +       re->re_sesn = SAFE_SESSION(crp->crp_sid);
6397 +
6398 +       re->re_src.nsegs = 0;
6399 +       re->re_dst.nsegs = 0;
6400 +
6401 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
6402 +               re->re_src_skb = (struct sk_buff *)crp->crp_buf;
6403 +               re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
6404 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
6405 +               re->re_src_io = (struct uio *)crp->crp_buf;
6406 +               re->re_dst_io = (struct uio *)crp->crp_buf;
6407 +       } else {
6408 +               safestats.st_badflags++;
6409 +               err = EINVAL;
6410 +               goto errout;    /* XXX we don't handle contiguous blocks! */
6411 +       }
6412 +
6413 +       sa = &re->re_sa;
6414 +       ses = &sc->sc_sessions[re->re_sesn];
6415 +
6416 +       crd1 = crp->crp_desc;
6417 +       if (crd1 == NULL) {
6418 +               safestats.st_nodesc++;
6419 +               err = EINVAL;
6420 +               goto errout;
6421 +       }
6422 +       crd2 = crd1->crd_next;
6423 +
6424 +       cmd0 = SAFE_SA_CMD0_BASIC;              /* basic group operation */
6425 +       cmd1 = 0;
6426 +       if (crd2 == NULL) {
6427 +               if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
6428 +                   crd1->crd_alg == CRYPTO_SHA1_HMAC ||
6429 +                   crd1->crd_alg == CRYPTO_NULL_HMAC) {
6430 +                       maccrd = crd1;
6431 +                       enccrd = NULL;
6432 +                       cmd0 |= SAFE_SA_CMD0_OP_HASH;
6433 +               } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
6434 +                   crd1->crd_alg == CRYPTO_3DES_CBC ||
6435 +                   crd1->crd_alg == CRYPTO_AES_CBC ||
6436 +                   crd1->crd_alg == CRYPTO_NULL_CBC) {
6437 +                       maccrd = NULL;
6438 +                       enccrd = crd1;
6439 +                       cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
6440 +               } else {
6441 +                       safestats.st_badalg++;
6442 +                       err = EINVAL;
6443 +                       goto errout;
6444 +               }
6445 +       } else {
6446 +               if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
6447 +                   crd1->crd_alg == CRYPTO_SHA1_HMAC ||
6448 +                   crd1->crd_alg == CRYPTO_NULL_HMAC) &&
6449 +                   (crd2->crd_alg == CRYPTO_DES_CBC ||
6450 +                       crd2->crd_alg == CRYPTO_3DES_CBC ||
6451 +                       crd2->crd_alg == CRYPTO_AES_CBC ||
6452 +                       crd2->crd_alg == CRYPTO_NULL_CBC) &&
6453 +                   ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
6454 +                       maccrd = crd1;
6455 +                       enccrd = crd2;
6456 +               } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
6457 +                   crd1->crd_alg == CRYPTO_3DES_CBC ||
6458 +                   crd1->crd_alg == CRYPTO_AES_CBC ||
6459 +                   crd1->crd_alg == CRYPTO_NULL_CBC) &&
6460 +                   (crd2->crd_alg == CRYPTO_MD5_HMAC ||
6461 +                       crd2->crd_alg == CRYPTO_SHA1_HMAC ||
6462 +                       crd2->crd_alg == CRYPTO_NULL_HMAC) &&
6463 +                   (crd1->crd_flags & CRD_F_ENCRYPT)) {
6464 +                       enccrd = crd1;
6465 +                       maccrd = crd2;
6466 +               } else {
6467 +                       safestats.st_badalg++;
6468 +                       err = EINVAL;
6469 +                       goto errout;
6470 +               }
6471 +               cmd0 |= SAFE_SA_CMD0_OP_BOTH;
6472 +       }
6473 +
6474 +       if (enccrd) {
6475 +               if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
6476 +                       safe_setup_enckey(ses, enccrd->crd_key);
6477 +
6478 +               if (enccrd->crd_alg == CRYPTO_DES_CBC) {
6479 +                       cmd0 |= SAFE_SA_CMD0_DES;
6480 +                       cmd1 |= SAFE_SA_CMD1_CBC;
6481 +                       ivsize = 2*sizeof(u_int32_t);
6482 +               } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
6483 +                       cmd0 |= SAFE_SA_CMD0_3DES;
6484 +                       cmd1 |= SAFE_SA_CMD1_CBC;
6485 +                       ivsize = 2*sizeof(u_int32_t);
6486 +               } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
6487 +                       cmd0 |= SAFE_SA_CMD0_AES;
6488 +                       cmd1 |= SAFE_SA_CMD1_CBC;
6489 +                       if (ses->ses_klen == 128)
6490 +                            cmd1 |=  SAFE_SA_CMD1_AES128;
6491 +                       else if (ses->ses_klen == 192)
6492 +                            cmd1 |=  SAFE_SA_CMD1_AES192;
6493 +                       else
6494 +                            cmd1 |=  SAFE_SA_CMD1_AES256;
6495 +                       ivsize = 4*sizeof(u_int32_t);
6496 +               } else {
6497 +                       cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
6498 +                       ivsize = 0;
6499 +               }
6500 +
6501 +               /*
6502 +                * Setup encrypt/decrypt state.  When using basic ops
6503 +                * we can't use an inline IV because hash/crypt offset
6504 +                * must be from the end of the IV to the start of the
6505 +                * crypt data and this leaves out the preceding header
6506 +                * from the hash calculation.  Instead we place the IV
6507 +                * in the state record and set the hash/crypt offset to
6508 +                * copy both the header+IV.
6509 +                */
6510 +               if (enccrd->crd_flags & CRD_F_ENCRYPT) {
6511 +                       cmd0 |= SAFE_SA_CMD0_OUTBOUND;
6512 +
6513 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
6514 +                               iv = enccrd->crd_iv;
6515 +                       else
6516 +                               iv = (caddr_t) ses->ses_iv;
6517 +                       if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
6518 +                               crypto_copyback(crp->crp_flags, crp->crp_buf,
6519 +                                   enccrd->crd_inject, ivsize, iv);
6520 +                       }
6521 +                       bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
6522 +                       /* make iv LE */
6523 +                       for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
6524 +                               re->re_sastate.sa_saved_iv[i] =
6525 +                                       cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
6526 +                       cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
6527 +                       re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
6528 +               } else {
6529 +                       cmd0 |= SAFE_SA_CMD0_INBOUND;
6530 +
6531 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
6532 +                               bcopy(enccrd->crd_iv,
6533 +                                       re->re_sastate.sa_saved_iv, ivsize);
6534 +                       } else {
6535 +                               crypto_copydata(crp->crp_flags, crp->crp_buf,
6536 +                                   enccrd->crd_inject, ivsize,
6537 +                                   (caddr_t)re->re_sastate.sa_saved_iv);
6538 +                       }
6539 +                       /* make iv LE */
6540 +                       for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
6541 +                               re->re_sastate.sa_saved_iv[i] =
6542 +                                       cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
6543 +                       cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
6544 +               }
6545 +               /*
6546 +                * For basic encryption use the zero pad algorithm.
6547 +                * This pads results to an 8-byte boundary and
6548 +                * suppresses padding verification for inbound (i.e.
6549 +                * decrypt) operations.
6550 +                *
6551 +                * NB: Not sure if the 8-byte pad boundary is a problem.
6552 +                */
6553 +               cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
6554 +
6555 +               /* XXX assert key bufs have the same size */
6556 +               bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
6557 +       }
6558 +
6559 +       if (maccrd) {
6560 +               if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
6561 +                       safe_setup_mackey(ses, maccrd->crd_alg,
6562 +                           maccrd->crd_key, maccrd->crd_klen / 8);
6563 +               }
6564 +
6565 +               if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
6566 +                       cmd0 |= SAFE_SA_CMD0_MD5;
6567 +                       cmd1 |= SAFE_SA_CMD1_HMAC;      /* NB: enable HMAC */
6568 +               } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
6569 +                       cmd0 |= SAFE_SA_CMD0_SHA1;
6570 +                       cmd1 |= SAFE_SA_CMD1_HMAC;      /* NB: enable HMAC */
6571 +               } else {
6572 +                       cmd0 |= SAFE_SA_CMD0_HASH_NULL;
6573 +               }
6574 +               /*
6575 +                * Digest data is loaded from the SA and the hash
6576 +                * result is saved to the state block where we
6577 +                * retrieve it for return to the caller.
6578 +                */
6579 +               /* XXX assert digest bufs have the same size */
6580 +               bcopy(ses->ses_hminner, sa->sa_indigest,
6581 +                       sizeof(sa->sa_indigest));
6582 +               bcopy(ses->ses_hmouter, sa->sa_outdigest,
6583 +                       sizeof(sa->sa_outdigest));
6584 +
6585 +               cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
6586 +               re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
6587 +       }
6588 +
6589 +       if (enccrd && maccrd) {
6590 +               /*
6591 +                * The offset from hash data to the start of
6592 +                * crypt data is the difference in the skips.
6593 +                */
6594 +               bypass = maccrd->crd_skip;
6595 +               coffset = enccrd->crd_skip - maccrd->crd_skip;
6596 +               if (coffset < 0) {
6597 +                       DPRINTF(("%s: hash does not precede crypt; "
6598 +                               "mac skip %u enc skip %u\n",
6599 +                               __func__, maccrd->crd_skip, enccrd->crd_skip));
6600 +                       safestats.st_skipmismatch++;
6601 +                       err = EINVAL;
6602 +                       goto errout;
6603 +               }
6604 +               oplen = enccrd->crd_skip + enccrd->crd_len;
6605 +               if (maccrd->crd_skip + maccrd->crd_len != oplen) {
6606 +                       DPRINTF(("%s: hash amount %u != crypt amount %u\n",
6607 +                               __func__, maccrd->crd_skip + maccrd->crd_len,
6608 +                               oplen));
6609 +                       safestats.st_lenmismatch++;
6610 +                       err = EINVAL;
6611 +                       goto errout;
6612 +               }
6613 +#ifdef SAFE_DEBUG
6614 +               if (debug) {
6615 +                       printf("mac: skip %d, len %d, inject %d\n",
6616 +                           maccrd->crd_skip, maccrd->crd_len,
6617 +                           maccrd->crd_inject);
6618 +                       printf("enc: skip %d, len %d, inject %d\n",
6619 +                           enccrd->crd_skip, enccrd->crd_len,
6620 +                           enccrd->crd_inject);
6621 +                       printf("bypass %d coffset %d oplen %d\n",
6622 +                               bypass, coffset, oplen);
6623 +               }
6624 +#endif
6625 +               if (coffset & 3) {      /* offset must be 32-bit aligned */
6626 +                       DPRINTF(("%s: coffset %u misaligned\n",
6627 +                               __func__, coffset));
6628 +                       safestats.st_coffmisaligned++;
6629 +                       err = EINVAL;
6630 +                       goto errout;
6631 +               }
6632 +               coffset >>= 2;
6633 +               if (coffset > 255) {    /* offset must be <256 dwords */
6634 +                       DPRINTF(("%s: coffset %u too big\n",
6635 +                               __func__, coffset));
6636 +                       safestats.st_cofftoobig++;
6637 +                       err = EINVAL;
6638 +                       goto errout;
6639 +               }
6640 +               /*
6641 +                * Tell the hardware to copy the header to the output.
6642 +                * The header is defined as the data from the end of
6643 +                * the bypass to the start of data to be encrypted. 
6644 +                * Typically this is the inline IV.  Note that you need
6645 +                * to do this even if src+dst are the same; it appears
6646 +                * that w/o this bit the crypted data is written
6647 +                * immediately after the bypass data.
6648 +                */
6649 +               cmd1 |= SAFE_SA_CMD1_HDRCOPY;
6650 +               /*
6651 +                * Disable IP header mutable bit handling.  This is
6652 +                * needed to get correct HMAC calculations.
6653 +                */
6654 +               cmd1 |= SAFE_SA_CMD1_MUTABLE;
6655 +       } else {
6656 +               if (enccrd) {
6657 +                       bypass = enccrd->crd_skip;
6658 +                       oplen = bypass + enccrd->crd_len;
6659 +               } else {
6660 +                       bypass = maccrd->crd_skip;
6661 +                       oplen = bypass + maccrd->crd_len;
6662 +               }
6663 +               coffset = 0;
6664 +       }
6665 +       /* XXX verify multiple of 4 when using s/g */
6666 +       if (bypass > 96) {              /* bypass offset must be <= 96 bytes */
6667 +               DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
6668 +               safestats.st_bypasstoobig++;
6669 +               err = EINVAL;
6670 +               goto errout;
6671 +       }
6672 +
6673 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
6674 +               if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
6675 +                       safestats.st_noload++;
6676 +                       err = ENOMEM;
6677 +                       goto errout;
6678 +               }
6679 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
6680 +               if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
6681 +                       safestats.st_noload++;
6682 +                       err = ENOMEM;
6683 +                       goto errout;
6684 +               }
6685 +       }
6686 +       nicealign = safe_dmamap_aligned(sc, &re->re_src);
6687 +       uniform = safe_dmamap_uniform(sc, &re->re_src);
6688 +
6689 +       DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
6690 +               nicealign, uniform, re->re_src.nsegs));
6691 +       if (re->re_src.nsegs > 1) {
6692 +               re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
6693 +                       ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
6694 +               for (i = 0; i < re->re_src_nsegs; i++) {
6695 +                       /* NB: no need to check if there's space */
6696 +                       pd = sc->sc_spfree;
6697 +                       if (++(sc->sc_spfree) == sc->sc_springtop)
6698 +                               sc->sc_spfree = sc->sc_spring;
6699 +
6700 +                       KASSERT((pd->pd_flags&3) == 0 ||
6701 +                               (pd->pd_flags&3) == SAFE_PD_DONE,
6702 +                               ("bogus source particle descriptor; flags %x",
6703 +                               pd->pd_flags));
6704 +                       pd->pd_addr = re->re_src_segs[i].ds_addr;
6705 +                       pd->pd_size = re->re_src_segs[i].ds_len;
6706 +                       pd->pd_flags = SAFE_PD_READY;
6707 +               }
6708 +               cmd0 |= SAFE_SA_CMD0_IGATHER;
6709 +       } else {
6710 +               /*
6711 +                * No need for gather, reference the operand directly.
6712 +                */
6713 +               re->re_desc.d_src = re->re_src_segs[0].ds_addr;
6714 +       }
6715 +
6716 +       if (enccrd == NULL && maccrd != NULL) {
6717 +               /*
6718 +                * Hash op; no destination needed.
6719 +                */
6720 +       } else {
6721 +               if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
6722 +                       if (!nicealign) {
6723 +                               safestats.st_iovmisaligned++;
6724 +                               err = EINVAL;
6725 +                               goto errout;
6726 +                       }
6727 +                       if (uniform != 1) {
6728 +                               device_printf(sc->sc_dev, "!uniform source\n");
6729 +                               if (!uniform) {
6730 +                                       /*
6731 +                                        * There's no way to handle the DMA
6732 +                                        * requirements with this uio.  We
6733 +                                        * could create a separate DMA area for
6734 +                                        * the result and then copy it back,
6735 +                                        * but for now we just bail and return
6736 +                                        * an error.  Note that uio requests
6737 +                                        * > SAFE_MAX_DSIZE are handled because
6738 +                                        * the DMA map and segment list for the
6739 +                                        * destination wil result in a
6740 +                                        * destination particle list that does
6741 +                                        * the necessary scatter DMA.
6742 +                                        */ 
6743 +                                       safestats.st_iovnotuniform++;
6744 +                                       err = EINVAL;
6745 +                                       goto errout;
6746 +                               }
6747 +                       } else
6748 +                               re->re_dst = re->re_src;
6749 +               } else {
6750 +                       safestats.st_badflags++;
6751 +                       err = EINVAL;
6752 +                       goto errout;
6753 +               }
6754 +
6755 +               if (re->re_dst.nsegs > 1) {
6756 +                       re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
6757 +                           ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
6758 +                       for (i = 0; i < re->re_dst_nsegs; i++) {
6759 +                               pd = sc->sc_dpfree;
6760 +                               KASSERT((pd->pd_flags&3) == 0 ||
6761 +                                       (pd->pd_flags&3) == SAFE_PD_DONE,
6762 +                                       ("bogus dest particle descriptor; flags %x",
6763 +                                               pd->pd_flags));
6764 +                               if (++(sc->sc_dpfree) == sc->sc_dpringtop)
6765 +                                       sc->sc_dpfree = sc->sc_dpring;
6766 +                               pd->pd_addr = re->re_dst_segs[i].ds_addr;
6767 +                               pd->pd_flags = SAFE_PD_READY;
6768 +                       }
6769 +                       cmd0 |= SAFE_SA_CMD0_OSCATTER;
6770 +               } else {
6771 +                       /*
6772 +                        * No need for scatter, reference the operand directly.
6773 +                        */
6774 +                       re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
6775 +               }
6776 +       }
6777 +
6778 +       /*
6779 +        * All done with setup; fillin the SA command words
6780 +        * and the packet engine descriptor.  The operation
6781 +        * is now ready for submission to the hardware.
6782 +        */
6783 +       sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
6784 +       sa->sa_cmd1 = cmd1
6785 +                   | (coffset << SAFE_SA_CMD1_OFFSET_S)
6786 +                   | SAFE_SA_CMD1_SAREV1       /* Rev 1 SA data structure */
6787 +                   | SAFE_SA_CMD1_SRPCI
6788 +                   ;
6789 +       /*
6790 +        * NB: the order of writes is important here.  In case the
6791 +        * chip is scanning the ring because of an outstanding request
6792 +        * it might nab this one too.  In that case we need to make
6793 +        * sure the setup is complete before we write the length
6794 +        * field of the descriptor as it signals the descriptor is
6795 +        * ready for processing.
6796 +        */
6797 +       re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
6798 +       if (maccrd)
6799 +               re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
6800 +       wmb();
6801 +       re->re_desc.d_len = oplen
6802 +                         | SAFE_PE_LEN_READY
6803 +                         | (bypass << SAFE_PE_LEN_BYPASS_S)
6804 +                         ;
6805 +
6806 +       safestats.st_ipackets++;
6807 +       safestats.st_ibytes += oplen;
6808 +
6809 +       if (++(sc->sc_front) == sc->sc_ringtop)
6810 +               sc->sc_front = sc->sc_ring;
6811 +
6812 +       /* XXX honor batching */
6813 +       safe_feed(sc, re);
6814 +       spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6815 +       return (0);
6816 +
6817 +errout:
6818 +       if (re->re_src.map != re->re_dst.map)
6819 +               pci_unmap_operand(sc, &re->re_dst);
6820 +       if (re->re_src.map)
6821 +               pci_unmap_operand(sc, &re->re_src);
6822 +       spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6823 +       if (err != ERESTART) {
6824 +               crp->crp_etype = err;
6825 +               crypto_done(crp);
6826 +       } else {
6827 +               sc->sc_needwakeup |= CRYPTO_SYMQ;
6828 +       }
6829 +       return (err);
6830 +}
6831 +
6832 +static void
6833 +safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
6834 +{
6835 +       struct cryptop *crp = (struct cryptop *)re->re_crp;
6836 +       struct cryptodesc *crd;
6837 +
6838 +       DPRINTF(("%s()\n", __FUNCTION__));
6839 +
6840 +       safestats.st_opackets++;
6841 +       safestats.st_obytes += re->re_dst.mapsize;
6842 +
6843 +       if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
6844 +               device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
6845 +                       re->re_desc.d_csr,
6846 +                       re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
6847 +               safestats.st_peoperr++;
6848 +               crp->crp_etype = EIO;           /* something more meaningful? */
6849 +       }
6850 +
6851 +       if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
6852 +               pci_unmap_operand(sc, &re->re_dst);
6853 +       pci_unmap_operand(sc, &re->re_src);
6854 +
6855 +       /* 
6856 +        * If result was written to a differet mbuf chain, swap
6857 +        * it in as the return value and reclaim the original.
6858 +        */
6859 +       if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
6860 +               device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
6861 +               /* kfree_skb(skb) */
6862 +               /* crp->crp_buf = (caddr_t)re->re_dst_skb */
6863 +               return;
6864 +       }
6865 +
6866 +       if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
6867 +               /* copy out IV for future use */
6868 +               for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
6869 +                       int i;
6870 +                       int ivsize;
6871 +
6872 +                       if (crd->crd_alg == CRYPTO_DES_CBC ||
6873 +                           crd->crd_alg == CRYPTO_3DES_CBC) {
6874 +                               ivsize = 2*sizeof(u_int32_t);
6875 +                       } else if (crd->crd_alg == CRYPTO_AES_CBC) {
6876 +                               ivsize = 4*sizeof(u_int32_t);
6877 +                       } else
6878 +                               continue;
6879 +                       crypto_copydata(crp->crp_flags, crp->crp_buf,
6880 +                           crd->crd_skip + crd->crd_len - ivsize, ivsize,
6881 +                           (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
6882 +                       for (i = 0;
6883 +                                       i < ivsize/sizeof(sc->sc_sessions[re->re_sesn].ses_iv[0]);
6884 +                                       i++)
6885 +                               sc->sc_sessions[re->re_sesn].ses_iv[i] =
6886 +                                       cpu_to_le32(sc->sc_sessions[re->re_sesn].ses_iv[i]);
6887 +                       break;
6888 +               }
6889 +       }
6890 +
6891 +       if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
6892 +               /* copy out ICV result */
6893 +               for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
6894 +                       if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
6895 +                           crd->crd_alg == CRYPTO_SHA1_HMAC ||
6896 +                           crd->crd_alg == CRYPTO_NULL_HMAC))
6897 +                               continue;
6898 +                       if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
6899 +                               /*
6900 +                                * SHA-1 ICV's are byte-swapped; fix 'em up
6901 +                                * before copy them to their destination.
6902 +                                */
6903 +                               re->re_sastate.sa_saved_indigest[0] =
6904 +                                       cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
6905 +                               re->re_sastate.sa_saved_indigest[1] = 
6906 +                                       cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
6907 +                               re->re_sastate.sa_saved_indigest[2] =
6908 +                                       cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
6909 +                       } else {
6910 +                               re->re_sastate.sa_saved_indigest[0] =
6911 +                                       cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
6912 +                               re->re_sastate.sa_saved_indigest[1] = 
6913 +                                       cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
6914 +                               re->re_sastate.sa_saved_indigest[2] =
6915 +                                       cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
6916 +                       }
6917 +                       crypto_copyback(crp->crp_flags, crp->crp_buf,
6918 +                           crd->crd_inject,
6919 +                           sc->sc_sessions[re->re_sesn].ses_mlen,
6920 +                           (caddr_t)re->re_sastate.sa_saved_indigest);
6921 +                       break;
6922 +               }
6923 +       }
6924 +       crypto_done(crp);
6925 +}
6926 +
6927 +
6928 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
6929 +#define        SAFE_RNG_MAXWAIT        1000
6930 +
6931 +static void
6932 +safe_rng_init(struct safe_softc *sc)
6933 +{
6934 +       u_int32_t w, v;
6935 +       int i;
6936 +
6937 +       DPRINTF(("%s()\n", __FUNCTION__));
6938 +
6939 +       WRITE_REG(sc, SAFE_RNG_CTRL, 0);
6940 +       /* use default value according to the manual */
6941 +       WRITE_REG(sc, SAFE_RNG_CNFG, 0x834);    /* magic from SafeNet */
6942 +       WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
6943 +
6944 +       /*
6945 +        * There is a bug in rev 1.0 of the 1140 that when the RNG
6946 +        * is brought out of reset the ready status flag does not
6947 +        * work until the RNG has finished its internal initialization.
6948 +        *
6949 +        * So in order to determine the device is through its
6950 +        * initialization we must read the data register, using the
6951 +        * status reg in the read in case it is initialized.  Then read
6952 +        * the data register until it changes from the first read.
6953 +        * Once it changes read the data register until it changes
6954 +        * again.  At this time the RNG is considered initialized. 
6955 +        * This could take between 750ms - 1000ms in time.
6956 +        */
6957 +       i = 0;
6958 +       w = READ_REG(sc, SAFE_RNG_OUT);
6959 +       do {
6960 +               v = READ_REG(sc, SAFE_RNG_OUT);
6961 +               if (v != w) {
6962 +                       w = v;
6963 +                       break;
6964 +               }
6965 +               DELAY(10);
6966 +       } while (++i < SAFE_RNG_MAXWAIT);
6967 +
6968 +       /* Wait Until data changes again */
6969 +       i = 0;
6970 +       do {
6971 +               v = READ_REG(sc, SAFE_RNG_OUT);
6972 +               if (v != w)
6973 +                       break;
6974 +               DELAY(10);
6975 +       } while (++i < SAFE_RNG_MAXWAIT);
6976 +}
6977 +
6978 +static __inline void
6979 +safe_rng_disable_short_cycle(struct safe_softc *sc)
6980 +{
6981 +       DPRINTF(("%s()\n", __FUNCTION__));
6982 +
6983 +       WRITE_REG(sc, SAFE_RNG_CTRL,
6984 +               READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
6985 +}
6986 +
6987 +static __inline void
6988 +safe_rng_enable_short_cycle(struct safe_softc *sc)
6989 +{
6990 +       DPRINTF(("%s()\n", __FUNCTION__));
6991 +
6992 +       WRITE_REG(sc, SAFE_RNG_CTRL, 
6993 +               READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
6994 +}
6995 +
6996 +static __inline u_int32_t
6997 +safe_rng_read(struct safe_softc *sc)
6998 +{
6999 +       int i;
7000 +
7001 +       i = 0;
7002 +       while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
7003 +               ;
7004 +       return READ_REG(sc, SAFE_RNG_OUT);
7005 +}
7006 +
7007 +static int
7008 +safe_read_random(void *arg, u_int32_t *buf, int maxwords)
7009 +{
7010 +       struct safe_softc *sc = (struct safe_softc *) arg;
7011 +       int i, rc;
7012 +
7013 +       DPRINTF(("%s()\n", __FUNCTION__));
7014 +       
7015 +       safestats.st_rng++;
7016 +       /*
7017 +        * Fetch the next block of data.
7018 +        */
7019 +       if (maxwords > safe_rngbufsize)
7020 +               maxwords = safe_rngbufsize;
7021 +       if (maxwords > SAFE_RNG_MAXBUFSIZ)
7022 +               maxwords = SAFE_RNG_MAXBUFSIZ;
7023 +retry:
7024 +       /* read as much as we can */
7025 +       for (rc = 0; rc < maxwords; rc++) {
7026 +               if (READ_REG(sc, SAFE_RNG_STAT) != 0)
7027 +                       break;
7028 +               buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
7029 +       }
7030 +       if (rc == 0)
7031 +               return 0;
7032 +       /*
7033 +        * Check the comparator alarm count and reset the h/w if
7034 +        * it exceeds our threshold.  This guards against the
7035 +        * hardware oscillators resonating with external signals.
7036 +        */
7037 +       if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
7038 +               u_int32_t freq_inc, w;
7039 +
7040 +               DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
7041 +                       (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
7042 +               safestats.st_rngalarm++;
7043 +               safe_rng_enable_short_cycle(sc);
7044 +               freq_inc = 18;
7045 +               for (i = 0; i < 64; i++) {
7046 +                       w = READ_REG(sc, SAFE_RNG_CNFG);
7047 +                       freq_inc = ((w + freq_inc) & 0x3fL);
7048 +                       w = ((w & ~0x3fL) | freq_inc);
7049 +                       WRITE_REG(sc, SAFE_RNG_CNFG, w);
7050 +
7051 +                       WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
7052 +
7053 +                       (void) safe_rng_read(sc);
7054 +                       DELAY(25);
7055 +
7056 +                       if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
7057 +                               safe_rng_disable_short_cycle(sc);
7058 +                               goto retry;
7059 +                       }
7060 +                       freq_inc = 1;
7061 +               }
7062 +               safe_rng_disable_short_cycle(sc);
7063 +       } else
7064 +               WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
7065 +
7066 +       return(rc);
7067 +}
7068 +#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
7069 +
7070 +
7071 +/*
7072 + * Resets the board.  Values in the regesters are left as is
7073 + * from the reset (i.e. initial values are assigned elsewhere).
7074 + */
7075 +static void
7076 +safe_reset_board(struct safe_softc *sc)
7077 +{
7078 +       u_int32_t v;
7079 +       /*
7080 +        * Reset the device.  The manual says no delay
7081 +        * is needed between marking and clearing reset.
7082 +        */
7083 +       DPRINTF(("%s()\n", __FUNCTION__));
7084 +
7085 +       v = READ_REG(sc, SAFE_PE_DMACFG) &~
7086 +               (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
7087 +                SAFE_PE_DMACFG_SGRESET);
7088 +       WRITE_REG(sc, SAFE_PE_DMACFG, v
7089 +                                   | SAFE_PE_DMACFG_PERESET
7090 +                                   | SAFE_PE_DMACFG_PDRRESET
7091 +                                   | SAFE_PE_DMACFG_SGRESET);
7092 +       WRITE_REG(sc, SAFE_PE_DMACFG, v);
7093 +}
7094 +
7095 +/*
7096 + * Initialize registers we need to touch only once.
7097 + */
7098 +static void
7099 +safe_init_board(struct safe_softc *sc)
7100 +{
7101 +       u_int32_t v, dwords;
7102 +
7103 +       DPRINTF(("%s()\n", __FUNCTION__));
7104 +
7105 +       v = READ_REG(sc, SAFE_PE_DMACFG);
7106 +       v &=~ (   SAFE_PE_DMACFG_PEMODE
7107 +                       | SAFE_PE_DMACFG_FSENA          /* failsafe enable */
7108 +                       | SAFE_PE_DMACFG_GPRPCI         /* gather ring on PCI */
7109 +                       | SAFE_PE_DMACFG_SPRPCI         /* scatter ring on PCI */
7110 +                       | SAFE_PE_DMACFG_ESDESC         /* endian-swap descriptors */
7111 +                       | SAFE_PE_DMACFG_ESPDESC        /* endian-swap part. desc's */
7112 +                       | SAFE_PE_DMACFG_ESSA           /* endian-swap SA's */
7113 +                       | SAFE_PE_DMACFG_ESPACKET       /* swap the packet data */
7114 +                 );
7115 +       v |= SAFE_PE_DMACFG_FSENA               /* failsafe enable */
7116 +         |  SAFE_PE_DMACFG_GPRPCI              /* gather ring on PCI */
7117 +         |  SAFE_PE_DMACFG_SPRPCI              /* scatter ring on PCI */
7118 +         |  SAFE_PE_DMACFG_ESDESC              /* endian-swap descriptors */
7119 +         |  SAFE_PE_DMACFG_ESPDESC             /* endian-swap part. desc's */
7120 +         |  SAFE_PE_DMACFG_ESSA                /* endian-swap SA's */
7121 +#if 0
7122 +         |  SAFE_PE_DMACFG_ESPACKET    /* swap the packet data */
7123 +#endif
7124 +         ;
7125 +       WRITE_REG(sc, SAFE_PE_DMACFG, v);
7126 +
7127 +#ifdef __BIG_ENDIAN
7128 +       /* tell the safenet that we are 4321 and not 1234 */
7129 +       WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
7130 +#endif
7131 +
7132 +       if (sc->sc_chiprev == SAFE_REV(1,0)) {
7133 +               /*
7134 +                * Avoid large PCI DMA transfers.  Rev 1.0 has a bug where
7135 +                * "target mode transfers" done while the chip is DMA'ing
7136 +                * >1020 bytes cause the hardware to lockup.  To avoid this
7137 +                * we reduce the max PCI transfer size and use small source
7138 +                * particle descriptors (<= 256 bytes).
7139 +                */
7140 +               WRITE_REG(sc, SAFE_DMA_CFG, 256);
7141 +               device_printf(sc->sc_dev,
7142 +                       "Reduce max DMA size to %u words for rev %u.%u WAR\n",
7143 +                       (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
7144 +                       (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
7145 +                       (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
7146 +               sc->sc_max_dsize = 256;
7147 +       } else {
7148 +               sc->sc_max_dsize = SAFE_MAX_DSIZE;
7149 +       }
7150 +
7151 +       /* NB: operands+results are overlaid */
7152 +       WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
7153 +       WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
7154 +       /*
7155 +        * Configure ring entry size and number of items in the ring.
7156 +        */
7157 +       KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
7158 +               ("PE ring entry not 32-bit aligned!"));
7159 +       dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
7160 +       WRITE_REG(sc, SAFE_PE_RINGCFG,
7161 +               (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
7162 +       WRITE_REG(sc, SAFE_PE_RINGPOLL, 0);     /* disable polling */
7163 +
7164 +       WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
7165 +       WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
7166 +       WRITE_REG(sc, SAFE_PE_PARTSIZE,
7167 +               (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
7168 +       /*
7169 +        * NB: destination particles are fixed size.  We use
7170 +        *     an mbuf cluster and require all results go to
7171 +        *     clusters or smaller.
7172 +        */
7173 +       WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
7174 +
7175 +       /* it's now safe to enable PE mode, do it */
7176 +       WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
7177 +
7178 +       /*
7179 +        * Configure hardware to use level-triggered interrupts and
7180 +        * to interrupt after each descriptor is processed.
7181 +        */
7182 +       WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
7183 +       WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
7184 +       WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
7185 +       WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
7186 +}
7187 +
7188 +
7189 +/*
7190 + * Clean up after a chip crash.
7191 + * It is assumed that the caller in splimp()
7192 + */
7193 +static void
7194 +safe_cleanchip(struct safe_softc *sc)
7195 +{
7196 +       DPRINTF(("%s()\n", __FUNCTION__));
7197 +
7198 +       if (sc->sc_nqchip != 0) {
7199 +               struct safe_ringentry *re = sc->sc_back;
7200 +
7201 +               while (re != sc->sc_front) {
7202 +                       if (re->re_desc.d_csr != 0)
7203 +                               safe_free_entry(sc, re);
7204 +                       if (++re == sc->sc_ringtop)
7205 +                               re = sc->sc_ring;
7206 +               }
7207 +               sc->sc_back = re;
7208 +               sc->sc_nqchip = 0;
7209 +       }
7210 +}
7211 +
7212 +/*
7213 + * free a safe_q
7214 + * It is assumed that the caller is within splimp().
7215 + */
7216 +static int
7217 +safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
7218 +{
7219 +       struct cryptop *crp;
7220 +
7221 +       DPRINTF(("%s()\n", __FUNCTION__));
7222 +
7223 +       /*
7224 +        * Free header MCR
7225 +        */
7226 +       if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
7227 +#ifdef NOTYET
7228 +               m_freem(re->re_dst_m);
7229 +#else
7230 +               printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
7231 +#endif
7232 +
7233 +       crp = (struct cryptop *)re->re_crp;
7234 +       
7235 +       re->re_desc.d_csr = 0;
7236 +       
7237 +       crp->crp_etype = EFAULT;
7238 +       crypto_done(crp);
7239 +       return(0);
7240 +}
7241 +
7242 +/*
7243 + * Routine to reset the chip and clean up.
7244 + * It is assumed that the caller is in splimp()
7245 + */
7246 +static void
7247 +safe_totalreset(struct safe_softc *sc)
7248 +{
7249 +       DPRINTF(("%s()\n", __FUNCTION__));
7250 +
7251 +       safe_reset_board(sc);
7252 +       safe_init_board(sc);
7253 +       safe_cleanchip(sc);
7254 +}
7255 +
7256 +/*
7257 + * Is the operand suitable aligned for direct DMA.  Each
7258 + * segment must be aligned on a 32-bit boundary and all
7259 + * but the last segment must be a multiple of 4 bytes.
7260 + */
7261 +static int
7262 +safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
7263 +{
7264 +       int i;
7265 +
7266 +       DPRINTF(("%s()\n", __FUNCTION__));
7267 +
7268 +       for (i = 0; i < op->nsegs; i++) {
7269 +               if (op->segs[i].ds_addr & 3)
7270 +                       return (0);
7271 +               if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
7272 +                       return (0);
7273 +       }
7274 +       return (1);
7275 +}
7276 +
7277 +/*
7278 + * Is the operand suitable for direct DMA as the destination
7279 + * of an operation.  The hardware requires that each ``particle''
7280 + * but the last in an operation result have the same size.  We
7281 + * fix that size at SAFE_MAX_DSIZE bytes.  This routine returns
7282 + * 0 if some segment is not a multiple of of this size, 1 if all
7283 + * segments are exactly this size, or 2 if segments are at worst
7284 + * a multple of this size.
7285 + */
7286 +static int
7287 +safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
7288 +{
7289 +       int result = 1;
7290 +
7291 +       DPRINTF(("%s()\n", __FUNCTION__));
7292 +
7293 +       if (op->nsegs > 0) {
7294 +               int i;
7295 +
7296 +               for (i = 0; i < op->nsegs-1; i++) {
7297 +                       if (op->segs[i].ds_len % sc->sc_max_dsize)
7298 +                               return (0);
7299 +                       if (op->segs[i].ds_len != sc->sc_max_dsize)
7300 +                               result = 2;
7301 +               }
7302 +       }
7303 +       return (result);
7304 +}
7305 +
7306 +static int
7307 +safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
7308 +{
7309 +       struct safe_softc *sc = device_get_softc(dev);
7310 +       struct safe_pkq *q;
7311 +       unsigned long flags;
7312 +
7313 +       DPRINTF(("%s()\n", __FUNCTION__));
7314 +
7315 +       if (sc == NULL) {
7316 +               krp->krp_status = EINVAL;
7317 +               goto err;
7318 +       }
7319 +
7320 +       if (krp->krp_op != CRK_MOD_EXP) {
7321 +               krp->krp_status = EOPNOTSUPP;
7322 +               goto err;
7323 +       }
7324 +
7325 +       q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
7326 +       if (q == NULL) {
7327 +               krp->krp_status = ENOMEM;
7328 +               goto err;
7329 +       }
7330 +       memset(q, 0, sizeof(*q));
7331 +       q->pkq_krp = krp;
7332 +       INIT_LIST_HEAD(&q->pkq_list);
7333 +
7334 +       spin_lock_irqsave(&sc->sc_pkmtx, flags);
7335 +       list_add_tail(&q->pkq_list, &sc->sc_pkq);
7336 +       safe_kfeed(sc);
7337 +       spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
7338 +       return (0);
7339 +
7340 +err:
7341 +       crypto_kdone(krp);
7342 +       return (0);
7343 +}
7344 +
7345 +#define        SAFE_CRK_PARAM_BASE     0
7346 +#define        SAFE_CRK_PARAM_EXP      1
7347 +#define        SAFE_CRK_PARAM_MOD      2
7348 +
7349 +static int
7350 +safe_kstart(struct safe_softc *sc)
7351 +{
7352 +       struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
7353 +       int exp_bits, mod_bits, base_bits;
7354 +       u_int32_t op, a_off, b_off, c_off, d_off;
7355 +
7356 +       DPRINTF(("%s()\n", __FUNCTION__));
7357 +
7358 +       if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
7359 +               krp->krp_status = EINVAL;
7360 +               return (1);
7361 +       }
7362 +
7363 +       base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
7364 +       if (base_bits > 2048)
7365 +               goto too_big;
7366 +       if (base_bits <= 0)             /* 5. base not zero */
7367 +               goto too_small;
7368 +
7369 +       exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
7370 +       if (exp_bits > 2048)
7371 +               goto too_big;
7372 +       if (exp_bits <= 0)              /* 1. exponent word length > 0 */
7373 +               goto too_small;         /* 4. exponent not zero */
7374 +
7375 +       mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
7376 +       if (mod_bits > 2048)
7377 +               goto too_big;
7378 +       if (mod_bits <= 32)             /* 2. modulus word length > 1 */
7379 +               goto too_small;         /* 8. MSW of modulus != zero */
7380 +       if (mod_bits < exp_bits)        /* 3 modulus len >= exponent len */
7381 +               goto too_small;
7382 +       if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
7383 +               goto bad_domain;        /* 6. modulus is odd */
7384 +       if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
7385 +               goto too_small;         /* make sure result will fit */
7386 +
7387 +       /* 7. modulus > base */
7388 +       if (mod_bits < base_bits)
7389 +               goto too_small;
7390 +       if (mod_bits == base_bits) {
7391 +               u_int8_t *basep, *modp;
7392 +               int i;
7393 +
7394 +               basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
7395 +                   ((base_bits + 7) / 8) - 1;
7396 +               modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
7397 +                   ((mod_bits + 7) / 8) - 1;
7398 +               
7399 +               for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
7400 +                       if (*modp < *basep)
7401 +                               goto too_small;
7402 +                       if (*modp > *basep)
7403 +                               break;
7404 +               }
7405 +       }
7406 +
7407 +       /* And on the 9th step, he rested. */
7408 +
7409 +       WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
7410 +       WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
7411 +       if (mod_bits > 1024) {
7412 +               op = SAFE_PK_FUNC_EXP4;
7413 +               a_off = 0x000;
7414 +               b_off = 0x100;
7415 +               c_off = 0x200;
7416 +               d_off = 0x300;
7417 +       } else {
7418 +               op = SAFE_PK_FUNC_EXP16;
7419 +               a_off = 0x000;
7420 +               b_off = 0x080;
7421 +               c_off = 0x100;
7422 +               d_off = 0x180;
7423 +       }
7424 +       sc->sc_pk_reslen = b_off - a_off;
7425 +       sc->sc_pk_resoff = d_off;
7426 +
7427 +       /* A is exponent, B is modulus, C is base, D is result */
7428 +       safe_kload_reg(sc, a_off, b_off - a_off,
7429 +           &krp->krp_param[SAFE_CRK_PARAM_EXP]);
7430 +       WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
7431 +       safe_kload_reg(sc, b_off, b_off - a_off,
7432 +           &krp->krp_param[SAFE_CRK_PARAM_MOD]);
7433 +       WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
7434 +       safe_kload_reg(sc, c_off, b_off - a_off,
7435 +           &krp->krp_param[SAFE_CRK_PARAM_BASE]);
7436 +       WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
7437 +       WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
7438 +
7439 +       WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
7440 +
7441 +       return (0);
7442 +
7443 +too_big:
7444 +       krp->krp_status = E2BIG;
7445 +       return (1);
7446 +too_small:
7447 +       krp->krp_status = ERANGE;
7448 +       return (1);
7449 +bad_domain:
7450 +       krp->krp_status = EDOM;
7451 +       return (1);
7452 +}
7453 +
7454 +static int
7455 +safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
7456 +{
7457 +       u_int plen = (cr->crp_nbits + 7) / 8;
7458 +       int i, sig = plen * 8;
7459 +       u_int8_t c, *p = cr->crp_p;
7460 +
7461 +       DPRINTF(("%s()\n", __FUNCTION__));
7462 +
7463 +       for (i = plen - 1; i >= 0; i--) {
7464 +               c = p[i];
7465 +               if (c != 0) {
7466 +                       while ((c & 0x80) == 0) {
7467 +                               sig--;
7468 +                               c <<= 1;
7469 +                       }
7470 +                       break;
7471 +               }
7472 +               sig -= 8;
7473 +       }
7474 +       return (sig);
7475 +}
7476 +
7477 +static void
7478 +safe_kfeed(struct safe_softc *sc)
7479 +{
7480 +       struct safe_pkq *q, *tmp;
7481 +
7482 +       DPRINTF(("%s()\n", __FUNCTION__));
7483 +
7484 +       if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
7485 +               return;
7486 +       if (sc->sc_pkq_cur != NULL)
7487 +               return;
7488 +       list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
7489 +               sc->sc_pkq_cur = q;
7490 +               list_del(&q->pkq_list);
7491 +               if (safe_kstart(sc) != 0) {
7492 +                       crypto_kdone(q->pkq_krp);
7493 +                       kfree(q);
7494 +                       sc->sc_pkq_cur = NULL;
7495 +               } else {
7496 +                       /* op started, start polling */
7497 +                       mod_timer(&sc->sc_pkto, jiffies + 1);
7498 +                       break;
7499 +               }
7500 +       }
7501 +}
7502 +
7503 +static void
7504 +safe_kpoll(unsigned long arg)
7505 +{
7506 +       struct safe_softc *sc = NULL;
7507 +       struct safe_pkq *q;
7508 +       struct crparam *res;
7509 +       int i;
7510 +       u_int32_t buf[64];
7511 +       unsigned long flags;
7512 +
7513 +       DPRINTF(("%s()\n", __FUNCTION__));
7514 +
7515 +       if (arg >= SAFE_MAX_CHIPS)
7516 +               return;
7517 +       sc = safe_chip_idx[arg];
7518 +       if (!sc) {
7519 +               DPRINTF(("%s() - bad callback\n", __FUNCTION__));
7520 +               return;
7521 +       }
7522 +
7523 +       spin_lock_irqsave(&sc->sc_pkmtx, flags);
7524 +       if (sc->sc_pkq_cur == NULL)
7525 +               goto out;
7526 +       if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
7527 +               /* still running, check back later */
7528 +               mod_timer(&sc->sc_pkto, jiffies + 1);
7529 +               goto out;
7530 +       }
7531 +
7532 +       q = sc->sc_pkq_cur;
7533 +       res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
7534 +       bzero(buf, sizeof(buf));
7535 +       bzero(res->crp_p, (res->crp_nbits + 7) / 8);
7536 +       for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
7537 +               buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
7538 +                   sc->sc_pk_resoff + (i << 2)));
7539 +       bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
7540 +       /*
7541 +        * reduce the bits that need copying if possible
7542 +        */
7543 +       res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
7544 +       res->crp_nbits = safe_ksigbits(sc, res);
7545 +
7546 +       for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
7547 +               WRITE_REG(sc, i, 0);
7548 +
7549 +       crypto_kdone(q->pkq_krp);
7550 +       kfree(q);
7551 +       sc->sc_pkq_cur = NULL;
7552 +
7553 +       safe_kfeed(sc);
7554 +out:
7555 +       spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
7556 +}
7557 +
7558 +static void
7559 +safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
7560 +    struct crparam *n)
7561 +{
7562 +       u_int32_t buf[64], i;
7563 +
7564 +       DPRINTF(("%s()\n", __FUNCTION__));
7565 +
7566 +       bzero(buf, sizeof(buf));
7567 +       bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
7568 +
7569 +       for (i = 0; i < len >> 2; i++)
7570 +               WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
7571 +                   cpu_to_le32(buf[i]));
7572 +}
7573 +
7574 +#ifdef SAFE_DEBUG
7575 +static void
7576 +safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
7577 +{
7578 +       printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
7579 +               , tag
7580 +               , READ_REG(sc, SAFE_DMA_ENDIAN)
7581 +               , READ_REG(sc, SAFE_DMA_SRCADDR)
7582 +               , READ_REG(sc, SAFE_DMA_DSTADDR)
7583 +               , READ_REG(sc, SAFE_DMA_STAT)
7584 +       );
7585 +}
7586 +
7587 +static void
7588 +safe_dump_intrstate(struct safe_softc *sc, const char *tag)
7589 +{
7590 +       printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
7591 +               , tag
7592 +               , READ_REG(sc, SAFE_HI_CFG)
7593 +               , READ_REG(sc, SAFE_HI_MASK)
7594 +               , READ_REG(sc, SAFE_HI_DESC_CNT)
7595 +               , READ_REG(sc, SAFE_HU_STAT)
7596 +               , READ_REG(sc, SAFE_HM_STAT)
7597 +       );
7598 +}
7599 +
7600 +static void
7601 +safe_dump_ringstate(struct safe_softc *sc, const char *tag)
7602 +{
7603 +       u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
7604 +
7605 +       /* NB: assume caller has lock on ring */
7606 +       printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
7607 +               tag,
7608 +               estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
7609 +               (unsigned long)(sc->sc_back - sc->sc_ring),
7610 +               (unsigned long)(sc->sc_front - sc->sc_ring));
7611 +}
7612 +
7613 +static void
7614 +safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
7615 +{
7616 +       int ix, nsegs;
7617 +
7618 +       ix = re - sc->sc_ring;
7619 +       printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
7620 +               , tag
7621 +               , re, ix
7622 +               , re->re_desc.d_csr
7623 +               , re->re_desc.d_src
7624 +               , re->re_desc.d_dst
7625 +               , re->re_desc.d_sa
7626 +               , re->re_desc.d_len
7627 +       );
7628 +       if (re->re_src.nsegs > 1) {
7629 +               ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
7630 +                       sizeof(struct safe_pdesc);
7631 +               for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
7632 +                       printf(" spd[%u] %p: %p size %u flags %x"
7633 +                               , ix, &sc->sc_spring[ix]
7634 +                               , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
7635 +                               , sc->sc_spring[ix].pd_size
7636 +                               , sc->sc_spring[ix].pd_flags
7637 +                       );
7638 +                       if (sc->sc_spring[ix].pd_size == 0)
7639 +                               printf(" (zero!)");
7640 +                       printf("\n");
7641 +                       if (++ix == SAFE_TOTAL_SPART)
7642 +                               ix = 0;
7643 +               }
7644 +       }
7645 +       if (re->re_dst.nsegs > 1) {
7646 +               ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
7647 +                       sizeof(struct safe_pdesc);
7648 +               for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
7649 +                       printf(" dpd[%u] %p: %p flags %x\n"
7650 +                               , ix, &sc->sc_dpring[ix]
7651 +                               , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
7652 +                               , sc->sc_dpring[ix].pd_flags
7653 +                       );
7654 +                       if (++ix == SAFE_TOTAL_DPART)
7655 +                               ix = 0;
7656 +               }
7657 +       }
7658 +       printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
7659 +               re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
7660 +       printf("sa: key %x %x %x %x %x %x %x %x\n"
7661 +               , re->re_sa.sa_key[0]
7662 +               , re->re_sa.sa_key[1]
7663 +               , re->re_sa.sa_key[2]
7664 +               , re->re_sa.sa_key[3]
7665 +               , re->re_sa.sa_key[4]
7666 +               , re->re_sa.sa_key[5]
7667 +               , re->re_sa.sa_key[6]
7668 +               , re->re_sa.sa_key[7]
7669 +       );
7670 +       printf("sa: indigest %x %x %x %x %x\n"
7671 +               , re->re_sa.sa_indigest[0]
7672 +               , re->re_sa.sa_indigest[1]
7673 +               , re->re_sa.sa_indigest[2]
7674 +               , re->re_sa.sa_indigest[3]
7675 +               , re->re_sa.sa_indigest[4]
7676 +       );
7677 +       printf("sa: outdigest %x %x %x %x %x\n"
7678 +               , re->re_sa.sa_outdigest[0]
7679 +               , re->re_sa.sa_outdigest[1]
7680 +               , re->re_sa.sa_outdigest[2]
7681 +               , re->re_sa.sa_outdigest[3]
7682 +               , re->re_sa.sa_outdigest[4]
7683 +       );
7684 +       printf("sr: iv %x %x %x %x\n"
7685 +               , re->re_sastate.sa_saved_iv[0]
7686 +               , re->re_sastate.sa_saved_iv[1]
7687 +               , re->re_sastate.sa_saved_iv[2]
7688 +               , re->re_sastate.sa_saved_iv[3]
7689 +       );
7690 +       printf("sr: hashbc %u indigest %x %x %x %x %x\n"
7691 +               , re->re_sastate.sa_saved_hashbc
7692 +               , re->re_sastate.sa_saved_indigest[0]
7693 +               , re->re_sastate.sa_saved_indigest[1]
7694 +               , re->re_sastate.sa_saved_indigest[2]
7695 +               , re->re_sastate.sa_saved_indigest[3]
7696 +               , re->re_sastate.sa_saved_indigest[4]
7697 +       );
7698 +}
7699 +
7700 +static void
7701 +safe_dump_ring(struct safe_softc *sc, const char *tag)
7702 +{
7703 +       unsigned long flags;
7704 +
7705 +       spin_lock_irqsave(&sc->sc_ringmtx, flags);
7706 +       printf("\nSafeNet Ring State:\n");
7707 +       safe_dump_intrstate(sc, tag);
7708 +       safe_dump_dmastatus(sc, tag);
7709 +       safe_dump_ringstate(sc, tag);
7710 +       if (sc->sc_nqchip) {
7711 +               struct safe_ringentry *re = sc->sc_back;
7712 +               do {
7713 +                       safe_dump_request(sc, tag, re);
7714 +                       if (++re == sc->sc_ringtop)
7715 +                               re = sc->sc_ring;
7716 +               } while (re != sc->sc_front);
7717 +       }
7718 +       spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
7719 +}
7720 +#endif /* SAFE_DEBUG */
7721 +
7722 +
7723 +static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
7724 +{
7725 +       struct safe_softc *sc = NULL;
7726 +       u32 mem_start, mem_len, cmd;
7727 +       int i, rc, devinfo;
7728 +       dma_addr_t raddr;
7729 +       static int num_chips = 0;
7730 +
7731 +       DPRINTF(("%s()\n", __FUNCTION__));
7732 +
7733 +       if (pci_enable_device(dev) < 0)
7734 +               return(-ENODEV);
7735 +
7736 +       if (!dev->irq) {
7737 +               printk("safe: found device with no IRQ assigned. check BIOS settings!");
7738 +               pci_disable_device(dev);
7739 +               return(-ENODEV);
7740 +       }
7741 +
7742 +       if (pci_set_mwi(dev)) {
7743 +               printk("safe: pci_set_mwi failed!");
7744 +               return(-ENODEV);
7745 +       }
7746 +
7747 +       sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
7748 +       if (!sc)
7749 +               return(-ENOMEM);
7750 +       memset(sc, 0, sizeof(*sc));
7751 +
7752 +       softc_device_init(sc, "safe", num_chips, safe_methods);
7753 +
7754 +       sc->sc_irq = -1;
7755 +       sc->sc_cid = -1;
7756 +       sc->sc_pcidev = dev;
7757 +       if (num_chips < SAFE_MAX_CHIPS) {
7758 +               safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
7759 +               num_chips++;
7760 +       }
7761 +
7762 +       INIT_LIST_HEAD(&sc->sc_pkq);
7763 +       spin_lock_init(&sc->sc_pkmtx);
7764 +
7765 +       pci_set_drvdata(sc->sc_pcidev, sc);
7766 +
7767 +       /* we read its hardware registers as memory */
7768 +       mem_start = pci_resource_start(sc->sc_pcidev, 0);
7769 +       mem_len   = pci_resource_len(sc->sc_pcidev, 0);
7770 +
7771 +       sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
7772 +       if (!sc->sc_base_addr) {
7773 +               device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
7774 +                               mem_start, mem_start + mem_len - 1);
7775 +               goto out;
7776 +       }
7777 +
7778 +       /* fix up the bus size */
7779 +       if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
7780 +               device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
7781 +               goto out;
7782 +       }
7783 +       if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
7784 +               device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
7785 +               goto out;
7786 +       }
7787 +
7788 +       pci_set_master(sc->sc_pcidev);
7789 +
7790 +       pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
7791 +
7792 +       if (!(cmd & PCI_COMMAND_MEMORY)) {
7793 +               device_printf(sc->sc_dev, "failed to enable memory mapping\n");
7794 +               goto out;
7795 +       }
7796 +
7797 +       if (!(cmd & PCI_COMMAND_MASTER)) {
7798 +               device_printf(sc->sc_dev, "failed to enable bus mastering\n");
7799 +               goto out;
7800 +       }
7801 +
7802 +       rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
7803 +       if (rc) {
7804 +               device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
7805 +               goto out;
7806 +       }
7807 +       sc->sc_irq = dev->irq;
7808 +
7809 +       sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
7810 +                       (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
7811 +
7812 +       /*
7813 +        * Allocate packet engine descriptors.
7814 +        */
7815 +       sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7816 +                       SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7817 +                       &sc->sc_ringalloc.dma_paddr);
7818 +       if (!sc->sc_ringalloc.dma_vaddr) {
7819 +               device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
7820 +               goto out;
7821 +       }
7822 +
7823 +       /*
7824 +        * Hookup the static portion of all our data structures.
7825 +        */
7826 +       sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
7827 +       sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
7828 +       sc->sc_front = sc->sc_ring;
7829 +       sc->sc_back = sc->sc_ring;
7830 +       raddr = sc->sc_ringalloc.dma_paddr;
7831 +       bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
7832 +       for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
7833 +               struct safe_ringentry *re = &sc->sc_ring[i];
7834 +
7835 +               re->re_desc.d_sa = raddr +
7836 +                       offsetof(struct safe_ringentry, re_sa);
7837 +               re->re_sa.sa_staterec = raddr +
7838 +                       offsetof(struct safe_ringentry, re_sastate);
7839 +
7840 +               raddr += sizeof (struct safe_ringentry);
7841 +       }
7842 +       spin_lock_init(&sc->sc_ringmtx);
7843 +
7844 +       /*
7845 +        * Allocate scatter and gather particle descriptors.
7846 +        */
7847 +       sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7848 +                       SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
7849 +                       &sc->sc_spalloc.dma_paddr);
7850 +       if (!sc->sc_spalloc.dma_vaddr) {
7851 +               device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
7852 +               goto out;
7853 +       }
7854 +       sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
7855 +       sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
7856 +       sc->sc_spfree = sc->sc_spring;
7857 +       bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
7858 +
7859 +       sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7860 +                       SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7861 +                       &sc->sc_dpalloc.dma_paddr);
7862 +       if (!sc->sc_dpalloc.dma_vaddr) {
7863 +               device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
7864 +               goto out;
7865 +       }
7866 +       sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
7867 +       sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
7868 +       sc->sc_dpfree = sc->sc_dpring;
7869 +       bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
7870 +
7871 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
7872 +       if (sc->sc_cid < 0) {
7873 +               device_printf(sc->sc_dev, "could not get crypto driver id\n");
7874 +               goto out;
7875 +       }
7876 +
7877 +       printf("%s:", device_get_nameunit(sc->sc_dev));
7878 +
7879 +       devinfo = READ_REG(sc, SAFE_DEVINFO);
7880 +       if (devinfo & SAFE_DEVINFO_RNG) {
7881 +               sc->sc_flags |= SAFE_FLAGS_RNG;
7882 +               printf(" rng");
7883 +       }
7884 +       if (devinfo & SAFE_DEVINFO_PKEY) {
7885 +               printf(" key");
7886 +               sc->sc_flags |= SAFE_FLAGS_KEY;
7887 +               crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
7888 +#if 0
7889 +               crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
7890 +#endif
7891 +               init_timer(&sc->sc_pkto);
7892 +               sc->sc_pkto.function = safe_kpoll;
7893 +               sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
7894 +       }
7895 +       if (devinfo & SAFE_DEVINFO_DES) {
7896 +               printf(" des/3des");
7897 +               crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
7898 +               crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
7899 +       }
7900 +       if (devinfo & SAFE_DEVINFO_AES) {
7901 +               printf(" aes");
7902 +               crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
7903 +       }
7904 +       if (devinfo & SAFE_DEVINFO_MD5) {
7905 +               printf(" md5");
7906 +               crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
7907 +       }
7908 +       if (devinfo & SAFE_DEVINFO_SHA1) {
7909 +               printf(" sha1");
7910 +               crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
7911 +       }
7912 +       printf(" null");
7913 +       crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
7914 +       crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
7915 +       /* XXX other supported algorithms */
7916 +       printf("\n");
7917 +
7918 +       safe_reset_board(sc);           /* reset h/w */
7919 +       safe_init_board(sc);            /* init h/w */
7920 +
7921 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
7922 +       if (sc->sc_flags & SAFE_FLAGS_RNG) {
7923 +               safe_rng_init(sc);
7924 +               crypto_rregister(sc->sc_cid, safe_read_random, sc);
7925 +       }
7926 +#endif /* SAFE_NO_RNG */
7927 +
7928 +       return (0);
7929 +
7930 +out:
7931 +       if (sc->sc_cid >= 0)
7932 +               crypto_unregister_all(sc->sc_cid);
7933 +       if (sc->sc_irq != -1)
7934 +               free_irq(sc->sc_irq, sc);
7935 +       if (sc->sc_ringalloc.dma_vaddr)
7936 +               pci_free_consistent(sc->sc_pcidev,
7937 +                               SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7938 +                               sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
7939 +       if (sc->sc_spalloc.dma_vaddr)
7940 +               pci_free_consistent(sc->sc_pcidev,
7941 +                               SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7942 +                               sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
7943 +       if (sc->sc_dpalloc.dma_vaddr)
7944 +               pci_free_consistent(sc->sc_pcidev,
7945 +                               SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7946 +                               sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
7947 +       kfree(sc);
7948 +       return(-ENODEV);
7949 +}
7950 +
7951 +static void safe_remove(struct pci_dev *dev)
7952 +{
7953 +       struct safe_softc *sc = pci_get_drvdata(dev);
7954 +
7955 +       DPRINTF(("%s()\n", __FUNCTION__));
7956 +
7957 +       /* XXX wait/abort active ops */
7958 +
7959 +       WRITE_REG(sc, SAFE_HI_MASK, 0);         /* disable interrupts */
7960 +
7961 +       del_timer_sync(&sc->sc_pkto);
7962 +
7963 +       crypto_unregister_all(sc->sc_cid);
7964 +
7965 +       safe_cleanchip(sc);
7966 +
7967 +       if (sc->sc_irq != -1)
7968 +               free_irq(sc->sc_irq, sc);
7969 +       if (sc->sc_ringalloc.dma_vaddr)
7970 +               pci_free_consistent(sc->sc_pcidev,
7971 +                               SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7972 +                               sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
7973 +       if (sc->sc_spalloc.dma_vaddr)
7974 +               pci_free_consistent(sc->sc_pcidev,
7975 +                               SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7976 +                               sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
7977 +       if (sc->sc_dpalloc.dma_vaddr)
7978 +               pci_free_consistent(sc->sc_pcidev,
7979 +                               SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7980 +                               sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
7981 +       sc->sc_irq = -1;
7982 +       sc->sc_ringalloc.dma_vaddr = NULL;
7983 +       sc->sc_spalloc.dma_vaddr = NULL;
7984 +       sc->sc_dpalloc.dma_vaddr = NULL;
7985 +}
7986 +
7987 +static struct pci_device_id safe_pci_tbl[] = {
7988 +       { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
7989 +         PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
7990 +       { },
7991 +};
7992 +MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
7993 +
7994 +static struct pci_driver safe_driver = {
7995 +       .name         = "safe",
7996 +       .id_table     = safe_pci_tbl,
7997 +       .probe        = safe_probe,
7998 +       .remove       = safe_remove,
7999 +       /* add PM stuff here one day */
8000 +};
8001 +
8002 +static int __init safe_init (void)
8003 +{
8004 +       struct safe_softc *sc = NULL;
8005 +       int rc;
8006 +
8007 +       DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
8008 +
8009 +       rc = pci_register_driver(&safe_driver);
8010 +       pci_register_driver_compat(&safe_driver, rc);
8011 +
8012 +       return rc;
8013 +}
8014 +
8015 +static void __exit safe_exit (void)
8016 +{
8017 +       pci_unregister_driver(&safe_driver);
8018 +}
8019 +
8020 +module_init(safe_init);
8021 +module_exit(safe_exit);
8022 +
8023 +MODULE_LICENSE("BSD");
8024 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
8025 +MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
8026 --- /dev/null
8027 +++ b/crypto/ocf/safe/sha1.c
8028 @@ -0,0 +1,279 @@
8029 +/*     $KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $    */
8030 +/*
8031 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
8032 + * All rights reserved.
8033 + *
8034 + * Redistribution and use in source and binary forms, with or without
8035 + * modification, are permitted provided that the following conditions
8036 + * are met:
8037 + * 1. Redistributions of source code must retain the above copyright
8038 + *    notice, this list of conditions and the following disclaimer.
8039 + * 2. Redistributions in binary form must reproduce the above copyright
8040 + *    notice, this list of conditions and the following disclaimer in the
8041 + *    documentation and/or other materials provided with the distribution.
8042 + * 3. Neither the name of the project nor the names of its contributors
8043 + *    may be used to endorse or promote products derived from this software
8044 + *    without specific prior written permission.
8045 + *
8046 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
8047 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8048 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8049 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
8050 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8051 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8052 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8053 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8054 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8055 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8056 + * SUCH DAMAGE.
8057 + */
8058 +
8059 +/*
8060 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
8061 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
8062 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
8063 + */
8064 +
8065 +#if 0
8066 +#include <sys/cdefs.h>
8067 +__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
8068 +
8069 +#include <sys/types.h>
8070 +#include <sys/cdefs.h>
8071 +#include <sys/time.h>
8072 +#include <sys/systm.h>
8073 +
8074 +#include <crypto/sha1.h>
8075 +#endif
8076 +
8077 +/* sanity check */
8078 +#if BYTE_ORDER != BIG_ENDIAN
8079 +# if BYTE_ORDER != LITTLE_ENDIAN
8080 +#  define unsupported 1
8081 +# endif
8082 +#endif
8083 +
8084 +#ifndef unsupported
8085 +
8086 +/* constant table */
8087 +static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
8088 +#define        K(t)    _K[(t) / 20]
8089 +
8090 +#define        F0(b, c, d)     (((b) & (c)) | ((~(b)) & (d)))
8091 +#define        F1(b, c, d)     (((b) ^ (c)) ^ (d))
8092 +#define        F2(b, c, d)     (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
8093 +#define        F3(b, c, d)     (((b) ^ (c)) ^ (d))
8094 +
8095 +#define        S(n, x)         (((x) << (n)) | ((x) >> (32 - n)))
8096 +
8097 +#undef H
8098 +#define        H(n)    (ctxt->h.b32[(n)])
8099 +#define        COUNT   (ctxt->count)
8100 +#define        BCOUNT  (ctxt->c.b64[0] / 8)
8101 +#define        W(n)    (ctxt->m.b32[(n)])
8102 +
8103 +#define        PUTBYTE(x)      { \
8104 +       ctxt->m.b8[(COUNT % 64)] = (x);         \
8105 +       COUNT++;                                \
8106 +       COUNT %= 64;                            \
8107 +       ctxt->c.b64[0] += 8;                    \
8108 +       if (COUNT % 64 == 0)                    \
8109 +               sha1_step(ctxt);                \
8110 +     }
8111 +
8112 +#define        PUTPAD(x)       { \
8113 +       ctxt->m.b8[(COUNT % 64)] = (x);         \
8114 +       COUNT++;                                \
8115 +       COUNT %= 64;                            \
8116 +       if (COUNT % 64 == 0)                    \
8117 +               sha1_step(ctxt);                \
8118 +     }
8119 +
8120 +static void sha1_step(struct sha1_ctxt *);
8121 +
8122 +static void
8123 +sha1_step(ctxt)
8124 +       struct sha1_ctxt *ctxt;
8125 +{
8126 +       u_int32_t       a, b, c, d, e;
8127 +       size_t t, s;
8128 +       u_int32_t       tmp;
8129 +
8130 +#if BYTE_ORDER == LITTLE_ENDIAN
8131 +       struct sha1_ctxt tctxt;
8132 +       bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
8133 +       ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
8134 +       ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
8135 +       ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
8136 +       ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
8137 +       ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
8138 +       ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
8139 +       ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
8140 +       ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
8141 +       ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
8142 +       ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
8143 +       ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
8144 +       ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
8145 +       ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
8146 +       ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
8147 +       ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
8148 +       ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
8149 +       ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
8150 +       ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
8151 +       ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
8152 +       ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
8153 +       ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
8154 +       ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
8155 +       ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
8156 +       ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
8157 +       ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
8158 +       ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
8159 +       ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
8160 +       ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
8161 +       ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
8162 +       ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
8163 +       ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
8164 +       ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
8165 +#endif
8166 +
8167 +       a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
8168 +
8169 +       for (t = 0; t < 20; t++) {
8170 +               s = t & 0x0f;
8171 +               if (t >= 16) {
8172 +                       W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8173 +               }
8174 +               tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
8175 +               e = d; d = c; c = S(30, b); b = a; a = tmp;
8176 +       }
8177 +       for (t = 20; t < 40; t++) {
8178 +               s = t & 0x0f;
8179 +               W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8180 +               tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
8181 +               e = d; d = c; c = S(30, b); b = a; a = tmp;
8182 +       }
8183 +       for (t = 40; t < 60; t++) {
8184 +               s = t & 0x0f;
8185 +               W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8186 +               tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
8187 +               e = d; d = c; c = S(30, b); b = a; a = tmp;
8188 +       }
8189 +       for (t = 60; t < 80; t++) {
8190 +               s = t & 0x0f;
8191 +               W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8192 +               tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
8193 +               e = d; d = c; c = S(30, b); b = a; a = tmp;
8194 +       }
8195 +
8196 +       H(0) = H(0) + a;
8197 +       H(1) = H(1) + b;
8198 +       H(2) = H(2) + c;
8199 +       H(3) = H(3) + d;
8200 +       H(4) = H(4) + e;
8201 +
8202 +       bzero(&ctxt->m.b8[0], 64);
8203 +}
8204 +
8205 +/*------------------------------------------------------------*/
8206 +
8207 +void
8208 +sha1_init(ctxt)
8209 +       struct sha1_ctxt *ctxt;
8210 +{
8211 +       bzero(ctxt, sizeof(struct sha1_ctxt));
8212 +       H(0) = 0x67452301;
8213 +       H(1) = 0xefcdab89;
8214 +       H(2) = 0x98badcfe;
8215 +       H(3) = 0x10325476;
8216 +       H(4) = 0xc3d2e1f0;
8217 +}
8218 +
8219 +void
8220 +sha1_pad(ctxt)
8221 +       struct sha1_ctxt *ctxt;
8222 +{
8223 +       size_t padlen;          /*pad length in bytes*/
8224 +       size_t padstart;
8225 +
8226 +       PUTPAD(0x80);
8227 +
8228 +       padstart = COUNT % 64;
8229 +       padlen = 64 - padstart;
8230 +       if (padlen < 8) {
8231 +               bzero(&ctxt->m.b8[padstart], padlen);
8232 +               COUNT += padlen;
8233 +               COUNT %= 64;
8234 +               sha1_step(ctxt);
8235 +               padstart = COUNT % 64;  /* should be 0 */
8236 +               padlen = 64 - padstart; /* should be 64 */
8237 +       }
8238 +       bzero(&ctxt->m.b8[padstart], padlen - 8);
8239 +       COUNT += (padlen - 8);
8240 +       COUNT %= 64;
8241 +#if BYTE_ORDER == BIG_ENDIAN
8242 +       PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
8243 +       PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
8244 +       PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
8245 +       PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
8246 +#else
8247 +       PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
8248 +       PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
8249 +       PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
8250 +       PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
8251 +#endif
8252 +}
8253 +
8254 +void
8255 +sha1_loop(ctxt, input, len)
8256 +       struct sha1_ctxt *ctxt;
8257 +       const u_int8_t *input;
8258 +       size_t len;
8259 +{
8260 +       size_t gaplen;
8261 +       size_t gapstart;
8262 +       size_t off;
8263 +       size_t copysiz;
8264 +
8265 +       off = 0;
8266 +
8267 +       while (off < len) {
8268 +               gapstart = COUNT % 64;
8269 +               gaplen = 64 - gapstart;
8270 +
8271 +               copysiz = (gaplen < len - off) ? gaplen : len - off;
8272 +               bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
8273 +               COUNT += copysiz;
8274 +               COUNT %= 64;
8275 +               ctxt->c.b64[0] += copysiz * 8;
8276 +               if (COUNT % 64 == 0)
8277 +                       sha1_step(ctxt);
8278 +               off += copysiz;
8279 +       }
8280 +}
8281 +
8282 +void
8283 +sha1_result(ctxt, digest0)
8284 +       struct sha1_ctxt *ctxt;
8285 +       caddr_t digest0;
8286 +{
8287 +       u_int8_t *digest;
8288 +
8289 +       digest = (u_int8_t *)digest0;
8290 +       sha1_pad(ctxt);
8291 +#if BYTE_ORDER == BIG_ENDIAN
8292 +       bcopy(&ctxt->h.b8[0], digest, 20);
8293 +#else
8294 +       digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
8295 +       digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
8296 +       digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
8297 +       digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
8298 +       digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
8299 +       digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
8300 +       digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
8301 +       digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
8302 +       digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
8303 +       digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
8304 +#endif
8305 +}
8306 +
8307 +#endif /*unsupported*/
8308 --- /dev/null
8309 +++ b/crypto/ocf/safe/sha1.h
8310 @@ -0,0 +1,72 @@
8311 +/*     $FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $  */
8312 +/*     $KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $  */
8313 +
8314 +/*
8315 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
8316 + * All rights reserved.
8317 + *
8318 + * Redistribution and use in source and binary forms, with or without
8319 + * modification, are permitted provided that the following conditions
8320 + * are met:
8321 + * 1. Redistributions of source code must retain the above copyright
8322 + *    notice, this list of conditions and the following disclaimer.
8323 + * 2. Redistributions in binary form must reproduce the above copyright
8324 + *    notice, this list of conditions and the following disclaimer in the
8325 + *    documentation and/or other materials provided with the distribution.
8326 + * 3. Neither the name of the project nor the names of its contributors
8327 + *    may be used to endorse or promote products derived from this software
8328 + *    without specific prior written permission.
8329 + *
8330 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
8331 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8332 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8333 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
8334 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8335 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8336 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8337 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8338 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8339 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8340 + * SUCH DAMAGE.
8341 + */
8342 +/*
8343 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
8344 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
8345 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
8346 + */
8347 +
8348 +#ifndef _NETINET6_SHA1_H_
8349 +#define _NETINET6_SHA1_H_
8350 +
8351 +struct sha1_ctxt {
8352 +       union {
8353 +               u_int8_t        b8[20];
8354 +               u_int32_t       b32[5];
8355 +       } h;
8356 +       union {
8357 +               u_int8_t        b8[8];
8358 +               u_int64_t       b64[1];
8359 +       } c;
8360 +       union {
8361 +               u_int8_t        b8[64];
8362 +               u_int32_t       b32[16];
8363 +       } m;
8364 +       u_int8_t        count;
8365 +};
8366 +
8367 +#ifdef __KERNEL__
8368 +extern void sha1_init(struct sha1_ctxt *);
8369 +extern void sha1_pad(struct sha1_ctxt *);
8370 +extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
8371 +extern void sha1_result(struct sha1_ctxt *, caddr_t);
8372 +
8373 +/* compatibilty with other SHA1 source codes */
8374 +typedef struct sha1_ctxt SHA1_CTX;
8375 +#define SHA1Init(x)            sha1_init((x))
8376 +#define SHA1Update(x, y, z)    sha1_loop((x), (y), (z))
8377 +#define SHA1Final(x, y)                sha1_result((y), (x))
8378 +#endif /* __KERNEL__ */
8379 +
8380 +#define        SHA1_RESULTLEN  (160/8)
8381 +
8382 +#endif /*_NETINET6_SHA1_H_*/
8383 --- /dev/null
8384 +++ b/crypto/ocf/safe/safereg.h
8385 @@ -0,0 +1,421 @@
8386 +/*-
8387 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
8388 + * Copyright (c) 2003 Global Technology Associates, Inc.
8389 + * All rights reserved.
8390 + *
8391 + * Redistribution and use in source and binary forms, with or without
8392 + * modification, are permitted provided that the following conditions
8393 + * are met:
8394 + * 1. Redistributions of source code must retain the above copyright
8395 + *    notice, this list of conditions and the following disclaimer.
8396 + * 2. Redistributions in binary form must reproduce the above copyright
8397 + *    notice, this list of conditions and the following disclaimer in the
8398 + *    documentation and/or other materials provided with the distribution.
8399 + *
8400 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
8401 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8402 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8403 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8404 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8405 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8406 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8407 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8408 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8409 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8410 + * SUCH DAMAGE.
8411 + *
8412 + * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
8413 + */
8414 +#ifndef _SAFE_SAFEREG_H_
8415 +#define        _SAFE_SAFEREG_H_
8416 +
8417 +/*
8418 + * Register definitions for SafeNet SafeXcel-1141 crypto device.
8419 + * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
8420 + */
8421 +
8422 +#define BS_BAR                 0x10    /* DMA base address register */
8423 +#define        BS_TRDY_TIMEOUT         0x40    /* TRDY timeout */
8424 +#define        BS_RETRY_TIMEOUT        0x41    /* DMA retry timeout */
8425 +
8426 +#define        PCI_VENDOR_SAFENET      0x16ae          /* SafeNet, Inc. */
8427 +
8428 +/* SafeNet */
8429 +#define        PCI_PRODUCT_SAFEXCEL    0x1141          /* 1141 */
8430 +
8431 +#define        SAFE_PE_CSR             0x0000  /* Packet Enginge Ctrl/Status */
8432 +#define        SAFE_PE_SRC             0x0004  /* Packet Engine Source */
8433 +#define        SAFE_PE_DST             0x0008  /* Packet Engine Destination */
8434 +#define        SAFE_PE_SA              0x000c  /* Packet Engine SA */
8435 +#define        SAFE_PE_LEN             0x0010  /* Packet Engine Length */
8436 +#define        SAFE_PE_DMACFG          0x0040  /* Packet Engine DMA Configuration */
8437 +#define        SAFE_PE_DMASTAT         0x0044  /* Packet Engine DMA Status */
8438 +#define        SAFE_PE_PDRBASE         0x0048  /* Packet Engine Descriptor Ring Base */
8439 +#define        SAFE_PE_RDRBASE         0x004c  /* Packet Engine Result Ring Base */
8440 +#define        SAFE_PE_RINGCFG         0x0050  /* Packet Engine Ring Configuration */
8441 +#define        SAFE_PE_RINGPOLL        0x0054  /* Packet Engine Ring Poll */
8442 +#define        SAFE_PE_IRNGSTAT        0x0058  /* Packet Engine Internal Ring Status */
8443 +#define        SAFE_PE_ERNGSTAT        0x005c  /* Packet Engine External Ring Status */
8444 +#define        SAFE_PE_IOTHRESH        0x0060  /* Packet Engine I/O Threshold */
8445 +#define        SAFE_PE_GRNGBASE        0x0064  /* Packet Engine Gather Ring Base */
8446 +#define        SAFE_PE_SRNGBASE        0x0068  /* Packet Engine Scatter Ring Base */
8447 +#define        SAFE_PE_PARTSIZE        0x006c  /* Packet Engine Particlar Ring Size */
8448 +#define        SAFE_PE_PARTCFG         0x0070  /* Packet Engine Particle Ring Config */
8449 +#define        SAFE_CRYPTO_CTRL        0x0080  /* Crypto Control */
8450 +#define        SAFE_DEVID              0x0084  /* Device ID */
8451 +#define        SAFE_DEVINFO            0x0088  /* Device Info */
8452 +#define        SAFE_HU_STAT            0x00a0  /* Host Unmasked Status */
8453 +#define        SAFE_HM_STAT            0x00a4  /* Host Masked Status (read-only) */
8454 +#define        SAFE_HI_CLR             0x00a4  /* Host Clear Interrupt (write-only) */
8455 +#define        SAFE_HI_MASK            0x00a8  /* Host Mask Control */
8456 +#define        SAFE_HI_CFG             0x00ac  /* Interrupt Configuration */
8457 +#define        SAFE_HI_RD_DESCR        0x00b4  /* Force Descriptor Read */
8458 +#define        SAFE_HI_DESC_CNT        0x00b8  /* Host Descriptor Done Count */
8459 +#define        SAFE_DMA_ENDIAN         0x00c0  /* Master Endian Status */
8460 +#define        SAFE_DMA_SRCADDR        0x00c4  /* DMA Source Address Status */
8461 +#define        SAFE_DMA_DSTADDR        0x00c8  /* DMA Destination Address Status */
8462 +#define        SAFE_DMA_STAT           0x00cc  /* DMA Current Status */
8463 +#define        SAFE_DMA_CFG            0x00d4  /* DMA Configuration/Status */
8464 +#define        SAFE_ENDIAN             0x00e0  /* Endian Configuration */
8465 +#define        SAFE_PK_A_ADDR          0x0800  /* Public Key A Address */
8466 +#define        SAFE_PK_B_ADDR          0x0804  /* Public Key B Address */
8467 +#define        SAFE_PK_C_ADDR          0x0808  /* Public Key C Address */
8468 +#define        SAFE_PK_D_ADDR          0x080c  /* Public Key D Address */
8469 +#define        SAFE_PK_A_LEN           0x0810  /* Public Key A Length */
8470 +#define        SAFE_PK_B_LEN           0x0814  /* Public Key B Length */
8471 +#define        SAFE_PK_SHIFT           0x0818  /* Public Key Shift */
8472 +#define        SAFE_PK_FUNC            0x081c  /* Public Key Function */
8473 +#define SAFE_PK_RAM_START      0x1000  /* Public Key RAM start address */
8474 +#define SAFE_PK_RAM_END                0x1fff  /* Public Key RAM end address */
8475 +
8476 +#define        SAFE_RNG_OUT            0x0100  /* RNG Output */
8477 +#define        SAFE_RNG_STAT           0x0104  /* RNG Status */
8478 +#define        SAFE_RNG_CTRL           0x0108  /* RNG Control */
8479 +#define        SAFE_RNG_A              0x010c  /* RNG A */
8480 +#define        SAFE_RNG_B              0x0110  /* RNG B */
8481 +#define        SAFE_RNG_X_LO           0x0114  /* RNG X [31:0] */
8482 +#define        SAFE_RNG_X_MID          0x0118  /* RNG X [63:32] */
8483 +#define        SAFE_RNG_X_HI           0x011c  /* RNG X [80:64] */
8484 +#define        SAFE_RNG_X_CNTR         0x0120  /* RNG Counter */
8485 +#define        SAFE_RNG_ALM_CNT        0x0124  /* RNG Alarm Count */
8486 +#define        SAFE_RNG_CNFG           0x0128  /* RNG Configuration */
8487 +#define        SAFE_RNG_LFSR1_LO       0x012c  /* RNG LFSR1 [31:0] */
8488 +#define        SAFE_RNG_LFSR1_HI       0x0130  /* RNG LFSR1 [47:32] */
8489 +#define        SAFE_RNG_LFSR2_LO       0x0134  /* RNG LFSR1 [31:0] */
8490 +#define        SAFE_RNG_LFSR2_HI       0x0138  /* RNG LFSR1 [47:32] */
8491 +
8492 +#define        SAFE_PE_CSR_READY       0x00000001      /* ready for processing */
8493 +#define        SAFE_PE_CSR_DONE        0x00000002      /* h/w completed processing */
8494 +#define        SAFE_PE_CSR_LOADSA      0x00000004      /* load SA digests */
8495 +#define        SAFE_PE_CSR_HASHFINAL   0x00000010      /* do hash pad & write result */
8496 +#define        SAFE_PE_CSR_SABUSID     0x000000c0      /* bus id for SA */
8497 +#define        SAFE_PE_CSR_SAPCI       0x00000040      /* PCI bus id for SA */
8498 +#define        SAFE_PE_CSR_NXTHDR      0x0000ff00      /* next hdr value for IPsec */
8499 +#define        SAFE_PE_CSR_FPAD        0x0000ff00      /* fixed pad for basic ops */
8500 +#define        SAFE_PE_CSR_STATUS      0x00ff0000      /* operation result status */
8501 +#define        SAFE_PE_CSR_AUTH_FAIL   0x00010000      /* ICV mismatch (inbound) */
8502 +#define        SAFE_PE_CSR_PAD_FAIL    0x00020000      /* pad verify fail (inbound) */
8503 +#define        SAFE_PE_CSR_SEQ_FAIL    0x00040000      /* sequence number (inbound) */
8504 +#define        SAFE_PE_CSR_XERROR      0x00080000      /* extended error follows */
8505 +#define        SAFE_PE_CSR_XECODE      0x00f00000      /* extended error code */
8506 +#define        SAFE_PE_CSR_XECODE_S    20
8507 +#define        SAFE_PE_CSR_XECODE_BADCMD       0       /* invalid command */
8508 +#define        SAFE_PE_CSR_XECODE_BADALG       1       /* invalid algorithm */
8509 +#define        SAFE_PE_CSR_XECODE_ALGDIS       2       /* algorithm disabled */
8510 +#define        SAFE_PE_CSR_XECODE_ZEROLEN      3       /* zero packet length */
8511 +#define        SAFE_PE_CSR_XECODE_DMAERR       4       /* bus DMA error */
8512 +#define        SAFE_PE_CSR_XECODE_PIPEABORT    5       /* secondary bus DMA error */
8513 +#define        SAFE_PE_CSR_XECODE_BADSPI       6       /* IPsec SPI mismatch */
8514 +#define        SAFE_PE_CSR_XECODE_TIMEOUT      10      /* failsafe timeout */
8515 +#define        SAFE_PE_CSR_PAD         0xff000000      /* ESP padding control/status */
8516 +#define        SAFE_PE_CSR_PAD_MIN     0x00000000      /* minimum IPsec padding */
8517 +#define        SAFE_PE_CSR_PAD_16      0x08000000      /* pad to 16-byte boundary */
8518 +#define        SAFE_PE_CSR_PAD_32      0x10000000      /* pad to 32-byte boundary */
8519 +#define        SAFE_PE_CSR_PAD_64      0x20000000      /* pad to 64-byte boundary */
8520 +#define        SAFE_PE_CSR_PAD_128     0x40000000      /* pad to 128-byte boundary */
8521 +#define        SAFE_PE_CSR_PAD_256     0x80000000      /* pad to 256-byte boundary */
8522 +
8523 +/*
8524 + * Check the CSR to see if the PE has returned ownership to
8525 + * the host.  Note that before processing a descriptor this
8526 + * must be done followed by a check of the SAFE_PE_LEN register
8527 + * status bits to avoid premature processing of a descriptor
8528 + * on its way back to the host.
8529 + */
8530 +#define        SAFE_PE_CSR_IS_DONE(_csr) \
8531 +    (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
8532 +
8533 +#define        SAFE_PE_LEN_LENGTH      0x000fffff      /* total length (bytes) */
8534 +#define        SAFE_PE_LEN_READY       0x00400000      /* ready for processing */
8535 +#define        SAFE_PE_LEN_DONE        0x00800000      /* h/w completed processing */
8536 +#define        SAFE_PE_LEN_BYPASS      0xff000000      /* bypass offset (bytes) */
8537 +#define        SAFE_PE_LEN_BYPASS_S    24
8538 +
8539 +#define        SAFE_PE_LEN_IS_DONE(_len) \
8540 +    (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
8541 +
8542 +/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
8543 +#define        SAFE_INT_PE_CDONE       0x00000002      /* PE context done */
8544 +#define        SAFE_INT_PE_DDONE       0x00000008      /* PE descriptor done */
8545 +#define        SAFE_INT_PE_ERROR       0x00000010      /* PE error */
8546 +#define        SAFE_INT_PE_ODONE       0x00000020      /* PE operation done */
8547 +
8548 +#define        SAFE_HI_CFG_PULSE       0x00000001      /* use pulse interrupt */
8549 +#define        SAFE_HI_CFG_LEVEL       0x00000000      /* use level interrupt */
8550 +#define        SAFE_HI_CFG_AUTOCLR     0x00000002      /* auto-clear pulse interrupt */
8551 +
8552 +#define        SAFE_ENDIAN_PASS        0x000000e4      /* straight pass-thru */
8553 +#define        SAFE_ENDIAN_SWAB        0x0000001b      /* swap bytes in 32-bit word */
8554 +
8555 +#define        SAFE_PE_DMACFG_PERESET  0x00000001      /* reset packet engine */
8556 +#define        SAFE_PE_DMACFG_PDRRESET 0x00000002      /* reset PDR counters/ptrs */
8557 +#define        SAFE_PE_DMACFG_SGRESET  0x00000004      /* reset scatter/gather cache */
8558 +#define        SAFE_PE_DMACFG_FSENA    0x00000008      /* enable failsafe reset */
8559 +#define        SAFE_PE_DMACFG_PEMODE   0x00000100      /* packet engine mode */
8560 +#define        SAFE_PE_DMACFG_SAPREC   0x00000200      /* SA precedes packet */
8561 +#define        SAFE_PE_DMACFG_PKFOLL   0x00000400      /* packet follows descriptor */
8562 +#define        SAFE_PE_DMACFG_GPRBID   0x00003000      /* gather particle ring busid */
8563 +#define        SAFE_PE_DMACFG_GPRPCI   0x00001000      /* PCI gather particle ring */
8564 +#define        SAFE_PE_DMACFG_SPRBID   0x0000c000      /* scatter part. ring busid */
8565 +#define        SAFE_PE_DMACFG_SPRPCI   0x00004000      /* PCI scatter part. ring */
8566 +#define        SAFE_PE_DMACFG_ESDESC   0x00010000      /* endian swap descriptors */
8567 +#define        SAFE_PE_DMACFG_ESSA     0x00020000      /* endian swap SA data */
8568 +#define        SAFE_PE_DMACFG_ESPACKET 0x00040000      /* endian swap packet data */
8569 +#define        SAFE_PE_DMACFG_ESPDESC  0x00080000      /* endian swap particle desc. */
8570 +#define        SAFE_PE_DMACFG_NOPDRUP  0x00100000      /* supp. PDR ownership update */
8571 +#define        SAFE_PD_EDMACFG_PCIMODE 0x01000000      /* PCI target mode */
8572 +
8573 +#define        SAFE_PE_DMASTAT_PEIDONE 0x00000001      /* PE core input done */
8574 +#define        SAFE_PE_DMASTAT_PEODONE 0x00000002      /* PE core output done */
8575 +#define        SAFE_PE_DMASTAT_ENCDONE 0x00000004      /* encryption done */
8576 +#define        SAFE_PE_DMASTAT_IHDONE  0x00000008      /* inner hash done */
8577 +#define        SAFE_PE_DMASTAT_OHDONE  0x00000010      /* outer hash (HMAC) done */
8578 +#define        SAFE_PE_DMASTAT_PADFLT  0x00000020      /* crypto pad fault */
8579 +#define        SAFE_PE_DMASTAT_ICVFLT  0x00000040      /* ICV fault */
8580 +#define        SAFE_PE_DMASTAT_SPIMIS  0x00000080      /* SPI mismatch */
8581 +#define        SAFE_PE_DMASTAT_CRYPTO  0x00000100      /* crypto engine timeout */
8582 +#define        SAFE_PE_DMASTAT_CQACT   0x00000200      /* command queue active */
8583 +#define        SAFE_PE_DMASTAT_IRACT   0x00000400      /* input request active */
8584 +#define        SAFE_PE_DMASTAT_ORACT   0x00000800      /* output request active */
8585 +#define        SAFE_PE_DMASTAT_PEISIZE 0x003ff000      /* PE input size:32-bit words */
8586 +#define        SAFE_PE_DMASTAT_PEOSIZE 0xffc00000      /* PE out. size:32-bit words */
8587 +
8588 +#define        SAFE_PE_RINGCFG_SIZE    0x000003ff      /* ring size (descriptors) */
8589 +#define        SAFE_PE_RINGCFG_OFFSET  0xffff0000      /* offset btw desc's (dwords) */
8590 +#define        SAFE_PE_RINGCFG_OFFSET_S        16
8591 +
8592 +#define        SAFE_PE_RINGPOLL_POLL   0x00000fff      /* polling frequency/divisor */
8593 +#define        SAFE_PE_RINGPOLL_RETRY  0x03ff0000      /* polling frequency/divisor */
8594 +#define        SAFE_PE_RINGPOLL_CONT   0x80000000      /* continuously poll */
8595 +
8596 +#define        SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001     /* command queue available */
8597 +
8598 +#define        SAFE_PE_ERNGSTAT_NEXT   0x03ff0000      /* index of next packet desc. */
8599 +#define        SAFE_PE_ERNGSTAT_NEXT_S 16
8600 +
8601 +#define        SAFE_PE_IOTHRESH_INPUT  0x000003ff      /* input threshold (dwords) */
8602 +#define        SAFE_PE_IOTHRESH_OUTPUT 0x03ff0000      /* output threshold (dwords) */
8603 +
8604 +#define        SAFE_PE_PARTCFG_SIZE    0x0000ffff      /* scatter particle size */
8605 +#define        SAFE_PE_PARTCFG_GBURST  0x00030000      /* gather particle burst */
8606 +#define        SAFE_PE_PARTCFG_GBURST_2        0x00000000
8607 +#define        SAFE_PE_PARTCFG_GBURST_4        0x00010000
8608 +#define        SAFE_PE_PARTCFG_GBURST_8        0x00020000
8609 +#define        SAFE_PE_PARTCFG_GBURST_16       0x00030000
8610 +#define        SAFE_PE_PARTCFG_SBURST  0x000c0000      /* scatter particle burst */
8611 +#define        SAFE_PE_PARTCFG_SBURST_2        0x00000000
8612 +#define        SAFE_PE_PARTCFG_SBURST_4        0x00040000
8613 +#define        SAFE_PE_PARTCFG_SBURST_8        0x00080000
8614 +#define        SAFE_PE_PARTCFG_SBURST_16       0x000c0000
8615 +
8616 +#define        SAFE_PE_PARTSIZE_SCAT   0xffff0000      /* scatter particle ring size */
8617 +#define        SAFE_PE_PARTSIZE_GATH   0x0000ffff      /* gather particle ring size */
8618 +
8619 +#define        SAFE_CRYPTO_CTRL_3DES   0x00000001      /* enable 3DES support */
8620 +#define        SAFE_CRYPTO_CTRL_PKEY   0x00010000      /* enable public key support */
8621 +#define        SAFE_CRYPTO_CTRL_RNG    0x00020000      /* enable RNG support */
8622 +
8623 +#define        SAFE_DEVINFO_REV_MIN    0x0000000f      /* minor rev for chip */
8624 +#define        SAFE_DEVINFO_REV_MAJ    0x000000f0      /* major rev for chip */
8625 +#define        SAFE_DEVINFO_REV_MAJ_S  4
8626 +#define        SAFE_DEVINFO_DES        0x00000100      /* DES/3DES support present */
8627 +#define        SAFE_DEVINFO_ARC4       0x00000200      /* ARC4 support present */
8628 +#define        SAFE_DEVINFO_AES        0x00000400      /* AES support present */
8629 +#define        SAFE_DEVINFO_MD5        0x00001000      /* MD5 support present */
8630 +#define        SAFE_DEVINFO_SHA1       0x00002000      /* SHA-1 support present */
8631 +#define        SAFE_DEVINFO_RIPEMD     0x00004000      /* RIPEMD support present */
8632 +#define        SAFE_DEVINFO_DEFLATE    0x00010000      /* Deflate support present */
8633 +#define        SAFE_DEVINFO_SARAM      0x00100000      /* on-chip SA RAM present */
8634 +#define        SAFE_DEVINFO_EMIBUS     0x00200000      /* EMI bus present */
8635 +#define        SAFE_DEVINFO_PKEY       0x00400000      /* public key support present */
8636 +#define        SAFE_DEVINFO_RNG        0x00800000      /* RNG present */
8637 +
8638 +#define        SAFE_REV(_maj, _min)    (((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
8639 +#define        SAFE_REV_MAJ(_chiprev) \
8640 +       (((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
8641 +#define        SAFE_REV_MIN(_chiprev)  ((_chiprev) & SAFE_DEVINFO_REV_MIN)
8642 +
8643 +#define        SAFE_PK_FUNC_MULT       0x00000001      /* Multiply function */
8644 +#define        SAFE_PK_FUNC_SQUARE     0x00000004      /* Square function */
8645 +#define        SAFE_PK_FUNC_ADD        0x00000010      /* Add function */
8646 +#define        SAFE_PK_FUNC_SUB        0x00000020      /* Subtract function */
8647 +#define        SAFE_PK_FUNC_LSHIFT     0x00000040      /* Left-shift function */
8648 +#define        SAFE_PK_FUNC_RSHIFT     0x00000080      /* Right-shift function */
8649 +#define        SAFE_PK_FUNC_DIV        0x00000100      /* Divide function */
8650 +#define        SAFE_PK_FUNC_CMP        0x00000400      /* Compare function */
8651 +#define        SAFE_PK_FUNC_COPY       0x00000800      /* Copy function */
8652 +#define        SAFE_PK_FUNC_EXP16      0x00002000      /* Exponentiate (4-bit ACT) */
8653 +#define        SAFE_PK_FUNC_EXP4       0x00004000      /* Exponentiate (2-bit ACT) */
8654 +#define        SAFE_PK_FUNC_RUN        0x00008000      /* start/status */
8655 +
8656 +#define        SAFE_RNG_STAT_BUSY      0x00000001      /* busy, data not valid */
8657 +
8658 +#define        SAFE_RNG_CTRL_PRE_LFSR  0x00000001      /* enable output pre-LFSR */
8659 +#define        SAFE_RNG_CTRL_TST_MODE  0x00000002      /* enable test mode */
8660 +#define        SAFE_RNG_CTRL_TST_RUN   0x00000004      /* start test state machine */
8661 +#define        SAFE_RNG_CTRL_ENA_RING1 0x00000008      /* test entropy oscillator #1 */
8662 +#define        SAFE_RNG_CTRL_ENA_RING2 0x00000010      /* test entropy oscillator #2 */
8663 +#define        SAFE_RNG_CTRL_DIS_ALARM 0x00000020      /* disable RNG alarm reports */
8664 +#define        SAFE_RNG_CTRL_TST_CLOCK 0x00000040      /* enable test clock */
8665 +#define        SAFE_RNG_CTRL_SHORTEN   0x00000080      /* shorten state timers */
8666 +#define        SAFE_RNG_CTRL_TST_ALARM 0x00000100      /* simulate alarm state */
8667 +#define        SAFE_RNG_CTRL_RST_LFSR  0x00000200      /* reset LFSR */
8668 +
8669 +/*
8670 + * Packet engine descriptor.  Note that d_csr is a copy of the
8671 + * SAFE_PE_CSR register and all definitions apply, and d_len
8672 + * is a copy of the SAFE_PE_LEN register and all definitions apply.
8673 + * d_src and d_len may point directly to contiguous data or to a
8674 + * list of ``particle descriptors'' when using scatter/gather i/o.
8675 + */
8676 +struct safe_desc {
8677 +       u_int32_t       d_csr;                  /* per-packet control/status */
8678 +       u_int32_t       d_src;                  /* source address */
8679 +       u_int32_t       d_dst;                  /* destination address */
8680 +       u_int32_t       d_sa;                   /* SA address */
8681 +       u_int32_t       d_len;                  /* length, bypass, status */
8682 +};
8683 +
8684 +/*
8685 + * Scatter/Gather particle descriptor.
8686 + *
8687 + * NB: scatter descriptors do not specify a size; this is fixed
8688 + *     by the setting of the SAFE_PE_PARTCFG register.
8689 + */
8690 +struct safe_pdesc {
8691 +       u_int32_t       pd_addr;                /* particle address */
8692 +#ifdef __BIG_ENDIAN
8693 +       u_int16_t       pd_flags;               /* control word */
8694 +       u_int16_t       pd_size;                /* particle size (bytes) */
8695 +#else
8696 +       u_int16_t       pd_flags;               /* control word */
8697 +       u_int16_t       pd_size;                /* particle size (bytes) */
8698 +#endif
8699 +};
8700 +
8701 +#define        SAFE_PD_READY   0x0001                  /* ready for processing */
8702 +#define        SAFE_PD_DONE    0x0002                  /* h/w completed processing */
8703 +
8704 +/*
8705 + * Security Association (SA) Record (Rev 1).  One of these is
8706 + * required for each operation processed by the packet engine.
8707 + */
8708 +struct safe_sarec {
8709 +       u_int32_t       sa_cmd0;
8710 +       u_int32_t       sa_cmd1;
8711 +       u_int32_t       sa_resv0;
8712 +       u_int32_t       sa_resv1;
8713 +       u_int32_t       sa_key[8];              /* DES/3DES/AES key */
8714 +       u_int32_t       sa_indigest[5];         /* inner digest */
8715 +       u_int32_t       sa_outdigest[5];        /* outer digest */
8716 +       u_int32_t       sa_spi;                 /* SPI */
8717 +       u_int32_t       sa_seqnum;              /* sequence number */
8718 +       u_int32_t       sa_seqmask[2];          /* sequence number mask */
8719 +       u_int32_t       sa_resv2;
8720 +       u_int32_t       sa_staterec;            /* address of state record */
8721 +       u_int32_t       sa_resv3[2];
8722 +       u_int32_t       sa_samgmt0;             /* SA management field 0 */
8723 +       u_int32_t       sa_samgmt1;             /* SA management field 0 */
8724 +};
8725 +
8726 +#define        SAFE_SA_CMD0_OP         0x00000007      /* operation code */
8727 +#define        SAFE_SA_CMD0_OP_CRYPT   0x00000000      /* encrypt/decrypt (basic) */
8728 +#define        SAFE_SA_CMD0_OP_BOTH    0x00000001      /* encrypt-hash/hash-decrypto */
8729 +#define        SAFE_SA_CMD0_OP_HASH    0x00000003      /* hash (outbound-only) */
8730 +#define        SAFE_SA_CMD0_OP_ESP     0x00000000      /* ESP in/out (proto) */
8731 +#define        SAFE_SA_CMD0_OP_AH      0x00000001      /* AH in/out (proto) */
8732 +#define        SAFE_SA_CMD0_INBOUND    0x00000008      /* inbound operation */
8733 +#define        SAFE_SA_CMD0_OUTBOUND   0x00000000      /* outbound operation */
8734 +#define        SAFE_SA_CMD0_GROUP      0x00000030      /* operation group */
8735 +#define        SAFE_SA_CMD0_BASIC      0x00000000      /* basic operation */
8736 +#define        SAFE_SA_CMD0_PROTO      0x00000010      /* protocol/packet operation */
8737 +#define        SAFE_SA_CMD0_BUNDLE     0x00000020      /* bundled operation (resvd) */
8738 +#define        SAFE_SA_CMD0_PAD        0x000000c0      /* crypto pad method */
8739 +#define        SAFE_SA_CMD0_PAD_IPSEC  0x00000000      /* IPsec padding */
8740 +#define        SAFE_SA_CMD0_PAD_PKCS7  0x00000040      /* PKCS#7 padding */
8741 +#define        SAFE_SA_CMD0_PAD_CONS   0x00000080      /* constant padding */
8742 +#define        SAFE_SA_CMD0_PAD_ZERO   0x000000c0      /* zero padding */
8743 +#define        SAFE_SA_CMD0_CRYPT_ALG  0x00000f00      /* symmetric crypto algorithm */
8744 +#define        SAFE_SA_CMD0_DES        0x00000000      /* DES crypto algorithm */
8745 +#define        SAFE_SA_CMD0_3DES       0x00000100      /* 3DES crypto algorithm */
8746 +#define        SAFE_SA_CMD0_AES        0x00000300      /* AES crypto algorithm */
8747 +#define        SAFE_SA_CMD0_CRYPT_NULL 0x00000f00      /* null crypto algorithm */
8748 +#define        SAFE_SA_CMD0_HASH_ALG   0x0000f000      /* hash algorithm */
8749 +#define        SAFE_SA_CMD0_MD5        0x00000000      /* MD5 hash algorithm */
8750 +#define        SAFE_SA_CMD0_SHA1       0x00001000      /* SHA-1 hash algorithm */
8751 +#define        SAFE_SA_CMD0_HASH_NULL  0x0000f000      /* null hash algorithm */
8752 +#define        SAFE_SA_CMD0_HDR_PROC   0x00080000      /* header processing */
8753 +#define        SAFE_SA_CMD0_IBUSID     0x00300000      /* input bus id */
8754 +#define        SAFE_SA_CMD0_IPCI       0x00100000      /* PCI input bus id */
8755 +#define        SAFE_SA_CMD0_OBUSID     0x00c00000      /* output bus id */
8756 +#define        SAFE_SA_CMD0_OPCI       0x00400000      /* PCI output bus id */
8757 +#define        SAFE_SA_CMD0_IVLD       0x03000000      /* IV loading */
8758 +#define        SAFE_SA_CMD0_IVLD_NONE  0x00000000      /* IV no load (reuse) */
8759 +#define        SAFE_SA_CMD0_IVLD_IBUF  0x01000000      /* IV load from input buffer */
8760 +#define        SAFE_SA_CMD0_IVLD_STATE 0x02000000      /* IV load from state */
8761 +#define        SAFE_SA_CMD0_HSLD       0x0c000000      /* hash state loading */
8762 +#define        SAFE_SA_CMD0_HSLD_SA    0x00000000      /* hash state load from SA */
8763 +#define        SAFE_SA_CMD0_HSLD_STATE 0x08000000      /* hash state load from state */
8764 +#define        SAFE_SA_CMD0_HSLD_NONE  0x0c000000      /* hash state no load */
8765 +#define        SAFE_SA_CMD0_SAVEIV     0x10000000      /* save IV */
8766 +#define        SAFE_SA_CMD0_SAVEHASH   0x20000000      /* save hash state */
8767 +#define        SAFE_SA_CMD0_IGATHER    0x40000000      /* input gather */
8768 +#define        SAFE_SA_CMD0_OSCATTER   0x80000000      /* output scatter */
8769 +
8770 +#define        SAFE_SA_CMD1_HDRCOPY    0x00000002      /* copy header to output */
8771 +#define        SAFE_SA_CMD1_PAYCOPY    0x00000004      /* copy payload to output */
8772 +#define        SAFE_SA_CMD1_PADCOPY    0x00000008      /* copy pad to output */
8773 +#define        SAFE_SA_CMD1_IPV4       0x00000000      /* IPv4 protocol */
8774 +#define        SAFE_SA_CMD1_IPV6       0x00000010      /* IPv6 protocol */
8775 +#define        SAFE_SA_CMD1_MUTABLE    0x00000020      /* mutable bit processing */
8776 +#define        SAFE_SA_CMD1_SRBUSID    0x000000c0      /* state record bus id */
8777 +#define        SAFE_SA_CMD1_SRPCI      0x00000040      /* state record from PCI */
8778 +#define        SAFE_SA_CMD1_CRMODE     0x00000300      /* crypto mode */
8779 +#define        SAFE_SA_CMD1_ECB        0x00000000      /* ECB crypto mode */
8780 +#define        SAFE_SA_CMD1_CBC        0x00000100      /* CBC crypto mode */
8781 +#define        SAFE_SA_CMD1_OFB        0x00000200      /* OFB crypto mode */
8782 +#define        SAFE_SA_CMD1_CFB        0x00000300      /* CFB crypto mode */
8783 +#define        SAFE_SA_CMD1_CRFEEDBACK 0x00000c00      /* crypto feedback mode */
8784 +#define        SAFE_SA_CMD1_64BIT      0x00000000      /* 64-bit crypto feedback */
8785 +#define        SAFE_SA_CMD1_8BIT       0x00000400      /* 8-bit crypto feedback */
8786 +#define        SAFE_SA_CMD1_1BIT       0x00000800      /* 1-bit crypto feedback */
8787 +#define        SAFE_SA_CMD1_128BIT     0x00000c00      /* 128-bit crypto feedback */
8788 +#define        SAFE_SA_CMD1_OPTIONS    0x00001000      /* HMAC/options mutable bit */
8789 +#define        SAFE_SA_CMD1_HMAC       SAFE_SA_CMD1_OPTIONS
8790 +#define        SAFE_SA_CMD1_SAREV1     0x00008000      /* SA Revision 1 */
8791 +#define        SAFE_SA_CMD1_OFFSET     0x00ff0000      /* hash/crypto offset(dwords) */
8792 +#define        SAFE_SA_CMD1_OFFSET_S   16
8793 +#define        SAFE_SA_CMD1_AESKEYLEN  0x0f000000      /* AES key length */
8794 +#define        SAFE_SA_CMD1_AES128     0x02000000      /* 128-bit AES key */
8795 +#define        SAFE_SA_CMD1_AES192     0x03000000      /* 192-bit AES key */
8796 +#define        SAFE_SA_CMD1_AES256     0x04000000      /* 256-bit AES key */
8797 +
8798 +/* 
8799 + * Security Associate State Record (Rev 1).
8800 + */
8801 +struct safe_sastate {
8802 +       u_int32_t       sa_saved_iv[4];         /* saved IV (DES/3DES/AES) */
8803 +       u_int32_t       sa_saved_hashbc;        /* saved hash byte count */
8804 +       u_int32_t       sa_saved_indigest[5];   /* saved inner digest */
8805 +};
8806 +#endif /* _SAFE_SAFEREG_H_ */
8807 --- /dev/null
8808 +++ b/crypto/ocf/safe/safevar.h
8809 @@ -0,0 +1,230 @@
8810 +/*-
8811 + * The linux port of this code done by David McCullough
8812 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
8813 + * The license and original author are listed below.
8814 + *
8815 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
8816 + * Copyright (c) 2003 Global Technology Associates, Inc.
8817 + * All rights reserved.
8818 + *
8819 + * Redistribution and use in source and binary forms, with or without
8820 + * modification, are permitted provided that the following conditions
8821 + * are met:
8822 + * 1. Redistributions of source code must retain the above copyright
8823 + *    notice, this list of conditions and the following disclaimer.
8824 + * 2. Redistributions in binary form must reproduce the above copyright
8825 + *    notice, this list of conditions and the following disclaimer in the
8826 + *    documentation and/or other materials provided with the distribution.
8827 + *
8828 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
8829 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8830 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8831 + * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8832 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8833 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8834 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8835 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8836 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8837 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8838 + * SUCH DAMAGE.
8839 + *
8840 + * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
8841 + */
8842 +#ifndef _SAFE_SAFEVAR_H_
8843 +#define        _SAFE_SAFEVAR_H_
8844 +
8845 +/* Maximum queue length */
8846 +#ifndef SAFE_MAX_NQUEUE
8847 +#define SAFE_MAX_NQUEUE        60
8848 +#endif
8849 +
8850 +#define        SAFE_MAX_PART           64      /* Maximum scatter/gather depth */
8851 +#define        SAFE_DMA_BOUNDARY       0       /* No boundary for source DMA ops */
8852 +#define        SAFE_MAX_DSIZE          2048 /* MCLBYTES Fixed scatter particle size */
8853 +#define        SAFE_MAX_SSIZE          0x0ffff /* Maximum gather particle size */
8854 +#define        SAFE_MAX_DMA            0xfffff /* Maximum PE operand size (20 bits) */
8855 +/* total src+dst particle descriptors */
8856 +#define        SAFE_TOTAL_DPART        (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
8857 +#define        SAFE_TOTAL_SPART        (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
8858 +
8859 +#define        SAFE_RNG_MAXBUFSIZ      128     /* 32-bit words */
8860 +
8861 +#define        SAFE_CARD(sid)          (((sid) & 0xf0000000) >> 28)
8862 +#define        SAFE_SESSION(sid)       ( (sid) & 0x0fffffff)
8863 +#define        SAFE_SID(crd, sesn)     (((crd) << 28) | ((sesn) & 0x0fffffff))
8864 +
8865 +#define SAFE_DEF_RTY           0xff    /* PCI Retry Timeout */
8866 +#define SAFE_DEF_TOUT          0xff    /* PCI TRDY Timeout */
8867 +#define SAFE_DEF_CACHELINE     0x01    /* Cache Line setting */
8868 +
8869 +#ifdef __KERNEL__
8870 +/*
8871 + * State associated with the allocation of each chunk
8872 + * of memory setup for DMA.
8873 + */
8874 +struct safe_dma_alloc {
8875 +       dma_addr_t              dma_paddr;
8876 +       void                    *dma_vaddr;
8877 +};
8878 +
8879 +/*
8880 + * Cryptographic operand state.  One of these exists for each
8881 + * source and destination operand passed in from the crypto
8882 + * subsystem.  When possible source and destination operands
8883 + * refer to the same memory.  More often they are distinct.
8884 + * We track the virtual address of each operand as well as
8885 + * where each is mapped for DMA.
8886 + */
8887 +struct safe_operand {
8888 +       union {
8889 +               struct sk_buff *skb;
8890 +               struct uio *io;
8891 +       } u;
8892 +       void                    *map;
8893 +       int                             mapsize;        /* total number of bytes in segs */
8894 +       struct {
8895 +               dma_addr_t      ds_addr;
8896 +               int                     ds_len;
8897 +               int                     ds_tlen;
8898 +       } segs[SAFE_MAX_PART];
8899 +       int                             nsegs;
8900 +};
8901 +
8902 +/*
8903 + * Packet engine ring entry and cryptographic operation state.
8904 + * The packet engine requires a ring of descriptors that contain
8905 + * pointers to various cryptographic state.  However the ring
8906 + * configuration register allows you to specify an arbitrary size
8907 + * for ring entries.  We use this feature to collect most of the
8908 + * state for each cryptographic request into one spot.  Other than
8909 + * ring entries only the ``particle descriptors'' (scatter/gather
8910 + * lists) and the actual operand data are kept separate.  The
8911 + * particle descriptors must also be organized in rings.  The
8912 + * operand data can be located aribtrarily (modulo alignment constraints).
8913 + *
8914 + * Note that the descriptor ring is mapped onto the PCI bus so
8915 + * the hardware can DMA data.  This means the entire ring must be
8916 + * contiguous.
8917 + */
8918 +struct safe_ringentry {
8919 +       struct safe_desc        re_desc;        /* command descriptor */
8920 +       struct safe_sarec       re_sa;          /* SA record */
8921 +       struct safe_sastate     re_sastate;     /* SA state record */
8922 +
8923 +       struct cryptop          *re_crp;        /* crypto operation */
8924 +
8925 +       struct safe_operand     re_src;         /* source operand */
8926 +       struct safe_operand     re_dst;         /* destination operand */
8927 +
8928 +       int                     re_sesn;        /* crypto session ID */
8929 +       int                     re_flags;
8930 +#define        SAFE_QFLAGS_COPYOUTIV   0x1             /* copy back on completion */
8931 +#define        SAFE_QFLAGS_COPYOUTICV  0x2             /* copy back on completion */
8932 +};
8933 +
8934 +#define        re_src_skb      re_src.u.skb
8935 +#define        re_src_io       re_src.u.io
8936 +#define        re_src_map      re_src.map
8937 +#define        re_src_nsegs    re_src.nsegs
8938 +#define        re_src_segs     re_src.segs
8939 +#define        re_src_mapsize  re_src.mapsize
8940 +
8941 +#define        re_dst_skb      re_dst.u.skb
8942 +#define        re_dst_io       re_dst.u.io
8943 +#define        re_dst_map      re_dst.map
8944 +#define        re_dst_nsegs    re_dst.nsegs
8945 +#define        re_dst_segs     re_dst.segs
8946 +#define        re_dst_mapsize  re_dst.mapsize
8947 +
8948 +struct rndstate_test;
8949 +
8950 +struct safe_session {
8951 +       u_int32_t       ses_used;
8952 +       u_int32_t       ses_klen;               /* key length in bits */
8953 +       u_int32_t       ses_key[8];             /* DES/3DES/AES key */
8954 +       u_int32_t       ses_mlen;               /* hmac length in bytes */
8955 +       u_int32_t       ses_hminner[5];         /* hmac inner state */
8956 +       u_int32_t       ses_hmouter[5];         /* hmac outer state */
8957 +       u_int32_t       ses_iv[4];              /* DES/3DES/AES iv */
8958 +};
8959 +
8960 +struct safe_pkq {
8961 +       struct list_head        pkq_list;
8962 +       struct cryptkop         *pkq_krp;
8963 +};
8964 +
8965 +struct safe_softc {
8966 +       softc_device_decl       sc_dev;
8967 +       u32                     sc_irq;
8968 +
8969 +       struct pci_dev          *sc_pcidev;
8970 +       ocf_iomem_t             sc_base_addr;
8971 +
8972 +       u_int                   sc_chiprev;     /* major/minor chip revision */
8973 +       int                     sc_flags;       /* device specific flags */
8974 +#define        SAFE_FLAGS_KEY          0x01            /* has key accelerator */
8975 +#define        SAFE_FLAGS_RNG          0x02            /* hardware rng */
8976 +       int                     sc_suspended;
8977 +       int                     sc_needwakeup;  /* notify crypto layer */
8978 +       int32_t                 sc_cid;         /* crypto tag */
8979 +
8980 +       struct safe_dma_alloc   sc_ringalloc;   /* PE ring allocation state */
8981 +       struct safe_ringentry   *sc_ring;       /* PE ring */
8982 +       struct safe_ringentry   *sc_ringtop;    /* PE ring top */
8983 +       struct safe_ringentry   *sc_front;      /* next free entry */
8984 +       struct safe_ringentry   *sc_back;       /* next pending entry */
8985 +       int                     sc_nqchip;      /* # passed to chip */
8986 +       spinlock_t              sc_ringmtx;     /* PE ring lock */
8987 +       struct safe_pdesc       *sc_spring;     /* src particle ring */
8988 +       struct safe_pdesc       *sc_springtop;  /* src particle ring top */
8989 +       struct safe_pdesc       *sc_spfree;     /* next free src particle */
8990 +       struct safe_dma_alloc   sc_spalloc;     /* src particle ring state */
8991 +       struct safe_pdesc       *sc_dpring;     /* dest particle ring */
8992 +       struct safe_pdesc       *sc_dpringtop;  /* dest particle ring top */
8993 +       struct safe_pdesc       *sc_dpfree;     /* next free dest particle */
8994 +       struct safe_dma_alloc   sc_dpalloc;     /* dst particle ring state */
8995 +       int                     sc_nsessions;   /* # of sessions */
8996 +       struct safe_session     *sc_sessions;   /* sessions */
8997 +
8998 +       struct timer_list       sc_pkto;        /* PK polling */
8999 +       spinlock_t              sc_pkmtx;       /* PK lock */
9000 +       struct list_head        sc_pkq;         /* queue of PK requests */
9001 +       struct safe_pkq         *sc_pkq_cur;    /* current processing request */
9002 +       u_int32_t               sc_pk_reslen, sc_pk_resoff;
9003 +
9004 +       int                     sc_max_dsize;   /* maximum safe DMA size */
9005 +};
9006 +#endif /* __KERNEL__ */
9007 +
9008 +struct safe_stats {
9009 +       u_int64_t st_ibytes;
9010 +       u_int64_t st_obytes;
9011 +       u_int32_t st_ipackets;
9012 +       u_int32_t st_opackets;
9013 +       u_int32_t st_invalid;           /* invalid argument */
9014 +       u_int32_t st_badsession;        /* invalid session id */
9015 +       u_int32_t st_badflags;          /* flags indicate !(mbuf | uio) */
9016 +       u_int32_t st_nodesc;            /* op submitted w/o descriptors */
9017 +       u_int32_t st_badalg;            /* unsupported algorithm */
9018 +       u_int32_t st_ringfull;          /* PE descriptor ring full */
9019 +       u_int32_t st_peoperr;           /* PE marked error */
9020 +       u_int32_t st_dmaerr;            /* PE DMA error */
9021 +       u_int32_t st_bypasstoobig;      /* bypass > 96 bytes */
9022 +       u_int32_t st_skipmismatch;      /* enc part begins before auth part */
9023 +       u_int32_t st_lenmismatch;       /* enc length different auth length */
9024 +       u_int32_t st_coffmisaligned;    /* crypto offset not 32-bit aligned */
9025 +       u_int32_t st_cofftoobig;        /* crypto offset > 255 words */
9026 +       u_int32_t st_iovmisaligned;     /* iov op not aligned */
9027 +       u_int32_t st_iovnotuniform;     /* iov op not suitable */
9028 +       u_int32_t st_unaligned;         /* unaligned src caused copy */
9029 +       u_int32_t st_notuniform;        /* non-uniform src caused copy */
9030 +       u_int32_t st_nomap;             /* bus_dmamap_create failed */
9031 +       u_int32_t st_noload;            /* bus_dmamap_load_* failed */
9032 +       u_int32_t st_nombuf;            /* MGET* failed */
9033 +       u_int32_t st_nomcl;             /* MCLGET* failed */
9034 +       u_int32_t st_maxqchip;          /* max mcr1 ops out for processing */
9035 +       u_int32_t st_rng;               /* RNG requests */
9036 +       u_int32_t st_rngalarm;          /* RNG alarm requests */
9037 +       u_int32_t st_noicvcopy;         /* ICV data copies suppressed */
9038 +};
9039 +#endif /* _SAFE_SAFEVAR_H_ */
9040 --- /dev/null
9041 +++ b/crypto/ocf/crypto.c
9042 @@ -0,0 +1,1741 @@
9043 +/*-
9044 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
9045 + * Copyright (C) 2006-2007 David McCullough
9046 + * Copyright (C) 2004-2005 Intel Corporation.
9047 + * The license and original author are listed below.
9048 + *
9049 + * Redistribution and use in source and binary forms, with or without
9050 + * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
9051 + *
9052 + * modification, are permitted provided that the following conditions
9053 + * are met:
9054 + * 1. Redistributions of source code must retain the above copyright
9055 + *    notice, this list of conditions and the following disclaimer.
9056 + * 2. Redistributions in binary form must reproduce the above copyright
9057 + *    notice, this list of conditions and the following disclaimer in the
9058 + *    documentation and/or other materials provided with the distribution.
9059 + *
9060 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
9061 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
9062 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
9063 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
9064 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
9065 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
9066 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
9067 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9068 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
9069 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9070 + */
9071 +
9072 +#if 0
9073 +#include <sys/cdefs.h>
9074 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
9075 +#endif
9076 +
9077 +/*
9078 + * Cryptographic Subsystem.
9079 + *
9080 + * This code is derived from the Openbsd Cryptographic Framework (OCF)
9081 + * that has the copyright shown below.  Very little of the original
9082 + * code remains.
9083 + */
9084 +/*-
9085 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
9086 + *
9087 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
9088 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
9089 + * supported the development of this code.
9090 + *
9091 + * Copyright (c) 2000, 2001 Angelos D. Keromytis
9092 + *
9093 + * Permission to use, copy, and modify this software with or without fee
9094 + * is hereby granted, provided that this entire notice is included in
9095 + * all source code copies of any software which is or includes a copy or
9096 + * modification of this software.
9097 + *
9098 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
9099 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
9100 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
9101 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
9102 + * PURPOSE.
9103 + *
9104 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
9105 + */
9106 +
9107 +
9108 +#ifndef AUTOCONF_INCLUDED
9109 +#include <linux/config.h>
9110 +#endif
9111 +#include <linux/module.h>
9112 +#include <linux/init.h>
9113 +#include <linux/list.h>
9114 +#include <linux/slab.h>
9115 +#include <linux/wait.h>
9116 +#include <linux/sched.h>
9117 +#include <linux/spinlock.h>
9118 +#include <linux/version.h>
9119 +#include <cryptodev.h>
9120 +
9121 +/*
9122 + * keep track of whether or not we have been initialised, a big
9123 + * issue if we are linked into the kernel and a driver gets started before
9124 + * us
9125 + */
9126 +static int crypto_initted = 0;
9127 +
9128 +/*
9129 + * Crypto drivers register themselves by allocating a slot in the
9130 + * crypto_drivers table with crypto_get_driverid() and then registering
9131 + * each algorithm they support with crypto_register() and crypto_kregister().
9132 + */
9133 +
9134 +/*
9135 + * lock on driver table
9136 + * we track its state as spin_is_locked does not do anything on non-SMP boxes
9137 + */
9138 +static spinlock_t      crypto_drivers_lock;
9139 +static int                     crypto_drivers_locked;          /* for non-SMP boxes */
9140 +
9141 +#define        CRYPTO_DRIVER_LOCK() \
9142 +                       ({ \
9143 +                               spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
9144 +                               crypto_drivers_locked = 1; \
9145 +                               dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
9146 +                        })
9147 +#define        CRYPTO_DRIVER_UNLOCK() \
9148 +                       ({ \
9149 +                               dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
9150 +                               crypto_drivers_locked = 0; \
9151 +                               spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
9152 +                        })
9153 +#define        CRYPTO_DRIVER_ASSERT() \
9154 +                       ({ \
9155 +                               if (!crypto_drivers_locked) { \
9156 +                                       dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
9157 +                               } \
9158 +                        })
9159 +
9160 +/*
9161 + * Crypto device/driver capabilities structure.
9162 + *
9163 + * Synchronization:
9164 + * (d) - protected by CRYPTO_DRIVER_LOCK()
9165 + * (q) - protected by CRYPTO_Q_LOCK()
9166 + * Not tagged fields are read-only.
9167 + */
9168 +struct cryptocap {
9169 +       device_t        cc_dev;                 /* (d) device/driver */
9170 +       u_int32_t       cc_sessions;            /* (d) # of sessions */
9171 +       u_int32_t       cc_koperations;         /* (d) # os asym operations */
9172 +       /*
9173 +        * Largest possible operator length (in bits) for each type of
9174 +        * encryption algorithm. XXX not used
9175 +        */
9176 +       u_int16_t       cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
9177 +       u_int8_t        cc_alg[CRYPTO_ALGORITHM_MAX + 1];
9178 +       u_int8_t        cc_kalg[CRK_ALGORITHM_MAX + 1];
9179 +
9180 +       int             cc_flags;               /* (d) flags */
9181 +#define CRYPTOCAP_F_CLEANUP    0x80000000      /* needs resource cleanup */
9182 +       int             cc_qblocked;            /* (q) symmetric q blocked */
9183 +       int             cc_kqblocked;           /* (q) asymmetric q blocked */
9184 +};
9185 +static struct cryptocap *crypto_drivers = NULL;
9186 +static int crypto_drivers_num = 0;
9187 +
9188 +/*
9189 + * There are two queues for crypto requests; one for symmetric (e.g.
9190 + * cipher) operations and one for asymmetric (e.g. MOD)operations.
9191 + * A single mutex is used to lock access to both queues.  We could
9192 + * have one per-queue but having one simplifies handling of block/unblock
9193 + * operations.
9194 + */
9195 +static int crp_sleep = 0;
9196 +static LIST_HEAD(crp_q);               /* request queues */
9197 +static LIST_HEAD(crp_kq);
9198 +
9199 +static spinlock_t crypto_q_lock;
9200 +
9201 +int crypto_all_qblocked = 0;  /* protect with Q_LOCK */
9202 +module_param(crypto_all_qblocked, int, 0444);
9203 +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
9204 +
9205 +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
9206 +module_param(crypto_all_kqblocked, int, 0444);
9207 +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
9208 +
9209 +#define        CRYPTO_Q_LOCK() \
9210 +                       ({ \
9211 +                               spin_lock_irqsave(&crypto_q_lock, q_flags); \
9212 +                               dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
9213 +                        })
9214 +#define        CRYPTO_Q_UNLOCK() \
9215 +                       ({ \
9216 +                               dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
9217 +                               spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
9218 +                        })
9219 +
9220 +/*
9221 + * There are two queues for processing completed crypto requests; one
9222 + * for the symmetric and one for the asymmetric ops.  We only need one
9223 + * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
9224 + * mutex is used to lock access to both queues.  Note that this lock
9225 + * must be separate from the lock on request queues to insure driver
9226 + * callbacks don't generate lock order reversals.
9227 + */
9228 +static LIST_HEAD(crp_ret_q);           /* callback queues */
9229 +static LIST_HEAD(crp_ret_kq);
9230 +
9231 +static spinlock_t crypto_ret_q_lock;
9232 +#define        CRYPTO_RETQ_LOCK() \
9233 +                       ({ \
9234 +                               spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
9235 +                               dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
9236 +                        })
9237 +#define        CRYPTO_RETQ_UNLOCK() \
9238 +                       ({ \
9239 +                               dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
9240 +                               spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
9241 +                        })
9242 +#define        CRYPTO_RETQ_EMPTY()     (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
9243 +
9244 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
9245 +static kmem_cache_t *cryptop_zone;
9246 +static kmem_cache_t *cryptodesc_zone;
9247 +#else
9248 +static struct kmem_cache *cryptop_zone;
9249 +static struct kmem_cache *cryptodesc_zone;
9250 +#endif
9251 +
9252 +#define debug crypto_debug
9253 +int crypto_debug = 0;
9254 +module_param(crypto_debug, int, 0644);
9255 +MODULE_PARM_DESC(crypto_debug, "Enable debug");
9256 +EXPORT_SYMBOL(crypto_debug);
9257 +
9258 +/*
9259 + * Maximum number of outstanding crypto requests before we start
9260 + * failing requests.  We need this to prevent DOS when too many
9261 + * requests are arriving for us to keep up.  Otherwise we will
9262 + * run the system out of memory.  Since crypto is slow,  we are
9263 + * usually the bottleneck that needs to say, enough is enough.
9264 + *
9265 + * We cannot print errors when this condition occurs,  we are already too
9266 + * slow,  printing anything will just kill us
9267 + */
9268 +
9269 +static int crypto_q_cnt = 0;
9270 +module_param(crypto_q_cnt, int, 0444);
9271 +MODULE_PARM_DESC(crypto_q_cnt,
9272 +               "Current number of outstanding crypto requests");
9273 +
9274 +static int crypto_q_max = 1000;
9275 +module_param(crypto_q_max, int, 0644);
9276 +MODULE_PARM_DESC(crypto_q_max,
9277 +               "Maximum number of outstanding crypto requests");
9278 +
9279 +#define bootverbose crypto_verbose
9280 +static int crypto_verbose = 0;
9281 +module_param(crypto_verbose, int, 0644);
9282 +MODULE_PARM_DESC(crypto_verbose,
9283 +               "Enable verbose crypto startup");
9284 +
9285 +int    crypto_usercrypto = 1;  /* userland may do crypto reqs */
9286 +module_param(crypto_usercrypto, int, 0644);
9287 +MODULE_PARM_DESC(crypto_usercrypto,
9288 +          "Enable/disable user-mode access to crypto support");
9289 +
9290 +int    crypto_userasymcrypto = 1;      /* userland may do asym crypto reqs */
9291 +module_param(crypto_userasymcrypto, int, 0644);
9292 +MODULE_PARM_DESC(crypto_userasymcrypto,
9293 +          "Enable/disable user-mode access to asymmetric crypto support");
9294 +
9295 +int    crypto_devallowsoft = 0;        /* only use hardware crypto */
9296 +module_param(crypto_devallowsoft, int, 0644);
9297 +MODULE_PARM_DESC(crypto_devallowsoft,
9298 +          "Enable/disable use of software crypto support");
9299 +
9300 +static pid_t   cryptoproc = (pid_t) -1;
9301 +static struct  completion cryptoproc_exited;
9302 +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
9303 +static pid_t   cryptoretproc = (pid_t) -1;
9304 +static struct  completion cryptoretproc_exited;
9305 +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
9306 +
9307 +static int crypto_proc(void *arg);
9308 +static int crypto_ret_proc(void *arg);
9309 +static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
9310 +static int crypto_kinvoke(struct cryptkop *krp, int flags);
9311 +static void crypto_exit(void);
9312 +static  int crypto_init(void);
9313 +
9314 +static struct cryptostats cryptostats;
9315 +
9316 +static struct cryptocap *
9317 +crypto_checkdriver(u_int32_t hid)
9318 +{
9319 +       if (crypto_drivers == NULL)
9320 +               return NULL;
9321 +       return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
9322 +}
9323 +
9324 +/*
9325 + * Compare a driver's list of supported algorithms against another
9326 + * list; return non-zero if all algorithms are supported.
9327 + */
9328 +static int
9329 +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
9330 +{
9331 +       const struct cryptoini *cr;
9332 +
9333 +       /* See if all the algorithms are supported. */
9334 +       for (cr = cri; cr; cr = cr->cri_next)
9335 +               if (cap->cc_alg[cr->cri_alg] == 0)
9336 +                       return 0;
9337 +       return 1;
9338 +}
9339 +
9340 +/*
9341 + * Select a driver for a new session that supports the specified
9342 + * algorithms and, optionally, is constrained according to the flags.
9343 + * The algorithm we use here is pretty stupid; just use the
9344 + * first driver that supports all the algorithms we need. If there
9345 + * are multiple drivers we choose the driver with the fewest active
9346 + * sessions.  We prefer hardware-backed drivers to software ones.
9347 + *
9348 + * XXX We need more smarts here (in real life too, but that's
9349 + * XXX another story altogether).
9350 + */
9351 +static struct cryptocap *
9352 +crypto_select_driver(const struct cryptoini *cri, int flags)
9353 +{
9354 +       struct cryptocap *cap, *best;
9355 +       int match, hid;
9356 +
9357 +       CRYPTO_DRIVER_ASSERT();
9358 +
9359 +       /*
9360 +        * Look first for hardware crypto devices if permitted.
9361 +        */
9362 +       if (flags & CRYPTOCAP_F_HARDWARE)
9363 +               match = CRYPTOCAP_F_HARDWARE;
9364 +       else
9365 +               match = CRYPTOCAP_F_SOFTWARE;
9366 +       best = NULL;
9367 +again:
9368 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
9369 +               cap = &crypto_drivers[hid];
9370 +               /*
9371 +                * If it's not initialized, is in the process of
9372 +                * going away, or is not appropriate (hardware
9373 +                * or software based on match), then skip.
9374 +                */
9375 +               if (cap->cc_dev == NULL ||
9376 +                   (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
9377 +                   (cap->cc_flags & match) == 0)
9378 +                       continue;
9379 +
9380 +               /* verify all the algorithms are supported. */
9381 +               if (driver_suitable(cap, cri)) {
9382 +                       if (best == NULL ||
9383 +                           cap->cc_sessions < best->cc_sessions)
9384 +                               best = cap;
9385 +               }
9386 +       }
9387 +       if (best != NULL)
9388 +               return best;
9389 +       if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
9390 +               /* sort of an Algol 68-style for loop */
9391 +               match = CRYPTOCAP_F_SOFTWARE;
9392 +               goto again;
9393 +       }
9394 +       return best;
9395 +}
9396 +
9397 +/*
9398 + * Create a new session.  The crid argument specifies a crypto
9399 + * driver to use or constraints on a driver to select (hardware
9400 + * only, software only, either).  Whatever driver is selected
9401 + * must be capable of the requested crypto algorithms.
9402 + */
9403 +int
9404 +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
9405 +{
9406 +       struct cryptocap *cap;
9407 +       u_int32_t hid, lid;
9408 +       int err;
9409 +       unsigned long d_flags;
9410 +
9411 +       CRYPTO_DRIVER_LOCK();
9412 +       if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9413 +               /*
9414 +                * Use specified driver; verify it is capable.
9415 +                */
9416 +               cap = crypto_checkdriver(crid);
9417 +               if (cap != NULL && !driver_suitable(cap, cri))
9418 +                       cap = NULL;
9419 +       } else {
9420 +               /*
9421 +                * No requested driver; select based on crid flags.
9422 +                */
9423 +               cap = crypto_select_driver(cri, crid);
9424 +               /*
9425 +                * if NULL then can't do everything in one session.
9426 +                * XXX Fix this. We need to inject a "virtual" session
9427 +                * XXX layer right about here.
9428 +                */
9429 +       }
9430 +       if (cap != NULL) {
9431 +               /* Call the driver initialization routine. */
9432 +               hid = cap - crypto_drivers;
9433 +               lid = hid;              /* Pass the driver ID. */
9434 +               cap->cc_sessions++;
9435 +               CRYPTO_DRIVER_UNLOCK();
9436 +               err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
9437 +               CRYPTO_DRIVER_LOCK();
9438 +               if (err == 0) {
9439 +                       (*sid) = (cap->cc_flags & 0xff000000)
9440 +                              | (hid & 0x00ffffff);
9441 +                       (*sid) <<= 32;
9442 +                       (*sid) |= (lid & 0xffffffff);
9443 +               } else
9444 +                       cap->cc_sessions--;
9445 +       } else
9446 +               err = EINVAL;
9447 +       CRYPTO_DRIVER_UNLOCK();
9448 +       return err;
9449 +}
9450 +
9451 +static void
9452 +crypto_remove(struct cryptocap *cap)
9453 +{
9454 +       CRYPTO_DRIVER_ASSERT();
9455 +       if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
9456 +               bzero(cap, sizeof(*cap));
9457 +}
9458 +
9459 +/*
9460 + * Delete an existing session (or a reserved session on an unregistered
9461 + * driver).
9462 + */
9463 +int
9464 +crypto_freesession(u_int64_t sid)
9465 +{
9466 +       struct cryptocap *cap;
9467 +       u_int32_t hid;
9468 +       int err = 0;
9469 +       unsigned long d_flags;
9470 +
9471 +       dprintk("%s()\n", __FUNCTION__);
9472 +       CRYPTO_DRIVER_LOCK();
9473 +
9474 +       if (crypto_drivers == NULL) {
9475 +               err = EINVAL;
9476 +               goto done;
9477 +       }
9478 +
9479 +       /* Determine two IDs. */
9480 +       hid = CRYPTO_SESID2HID(sid);
9481 +
9482 +       if (hid >= crypto_drivers_num) {
9483 +               dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
9484 +               err = ENOENT;
9485 +               goto done;
9486 +       }
9487 +       cap = &crypto_drivers[hid];
9488 +
9489 +       if (cap->cc_dev) {
9490 +               CRYPTO_DRIVER_UNLOCK();
9491 +               /* Call the driver cleanup routine, if available, unlocked. */
9492 +               err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
9493 +               CRYPTO_DRIVER_LOCK();
9494 +       }
9495 +
9496 +       if (cap->cc_sessions)
9497 +               cap->cc_sessions--;
9498 +
9499 +       if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
9500 +               crypto_remove(cap);
9501 +
9502 +done:
9503 +       CRYPTO_DRIVER_UNLOCK();
9504 +       return err;
9505 +}
9506 +
9507 +/*
9508 + * Return an unused driver id.  Used by drivers prior to registering
9509 + * support for the algorithms they handle.
9510 + */
9511 +int32_t
9512 +crypto_get_driverid(device_t dev, int flags)
9513 +{
9514 +       struct cryptocap *newdrv;
9515 +       int i;
9516 +       unsigned long d_flags;
9517 +
9518 +       if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9519 +               printf("%s: no flags specified when registering driver\n",
9520 +                   device_get_nameunit(dev));
9521 +               return -1;
9522 +       }
9523 +
9524 +       CRYPTO_DRIVER_LOCK();
9525 +
9526 +       for (i = 0; i < crypto_drivers_num; i++) {
9527 +               if (crypto_drivers[i].cc_dev == NULL &&
9528 +                   (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
9529 +                       break;
9530 +               }
9531 +       }
9532 +
9533 +       /* Out of entries, allocate some more. */
9534 +       if (i == crypto_drivers_num) {
9535 +               /* Be careful about wrap-around. */
9536 +               if (2 * crypto_drivers_num <= crypto_drivers_num) {
9537 +                       CRYPTO_DRIVER_UNLOCK();
9538 +                       printk("crypto: driver count wraparound!\n");
9539 +                       return -1;
9540 +               }
9541 +
9542 +               newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
9543 +                               GFP_KERNEL);
9544 +               if (newdrv == NULL) {
9545 +                       CRYPTO_DRIVER_UNLOCK();
9546 +                       printk("crypto: no space to expand driver table!\n");
9547 +                       return -1;
9548 +               }
9549 +
9550 +               memcpy(newdrv, crypto_drivers,
9551 +                               crypto_drivers_num * sizeof(struct cryptocap));
9552 +               memset(&newdrv[crypto_drivers_num], 0,
9553 +                               crypto_drivers_num * sizeof(struct cryptocap));
9554 +
9555 +               crypto_drivers_num *= 2;
9556 +
9557 +               kfree(crypto_drivers);
9558 +               crypto_drivers = newdrv;
9559 +       }
9560 +
9561 +       /* NB: state is zero'd on free */
9562 +       crypto_drivers[i].cc_sessions = 1;      /* Mark */
9563 +       crypto_drivers[i].cc_dev = dev;
9564 +       crypto_drivers[i].cc_flags = flags;
9565 +       if (bootverbose)
9566 +               printf("crypto: assign %s driver id %u, flags %u\n",
9567 +                   device_get_nameunit(dev), i, flags);
9568 +
9569 +       CRYPTO_DRIVER_UNLOCK();
9570 +
9571 +       return i;
9572 +}
9573 +
9574 +/*
9575 + * Lookup a driver by name.  We match against the full device
9576 + * name and unit, and against just the name.  The latter gives
9577 + * us a simple widlcarding by device name.  On success return the
9578 + * driver/hardware identifier; otherwise return -1.
9579 + */
9580 +int
9581 +crypto_find_driver(const char *match)
9582 +{
9583 +       int i, len = strlen(match);
9584 +       unsigned long d_flags;
9585 +
9586 +       CRYPTO_DRIVER_LOCK();
9587 +       for (i = 0; i < crypto_drivers_num; i++) {
9588 +               device_t dev = crypto_drivers[i].cc_dev;
9589 +               if (dev == NULL ||
9590 +                   (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
9591 +                       continue;
9592 +               if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
9593 +                   strncmp(match, device_get_name(dev), len) == 0)
9594 +                       break;
9595 +       }
9596 +       CRYPTO_DRIVER_UNLOCK();
9597 +       return i < crypto_drivers_num ? i : -1;
9598 +}
9599 +
9600 +/*
9601 + * Return the device_t for the specified driver or NULL
9602 + * if the driver identifier is invalid.
9603 + */
9604 +device_t
9605 +crypto_find_device_byhid(int hid)
9606 +{
9607 +       struct cryptocap *cap = crypto_checkdriver(hid);
9608 +       return cap != NULL ? cap->cc_dev : NULL;
9609 +}
9610 +
9611 +/*
9612 + * Return the device/driver capabilities.
9613 + */
9614 +int
9615 +crypto_getcaps(int hid)
9616 +{
9617 +       struct cryptocap *cap = crypto_checkdriver(hid);
9618 +       return cap != NULL ? cap->cc_flags : 0;
9619 +}
9620 +
9621 +/*
9622 + * Register support for a key-related algorithm.  This routine
9623 + * is called once for each algorithm supported a driver.
9624 + */
9625 +int
9626 +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
9627 +{
9628 +       struct cryptocap *cap;
9629 +       int err;
9630 +       unsigned long d_flags;
9631 +
9632 +       dprintk("%s()\n", __FUNCTION__);
9633 +       CRYPTO_DRIVER_LOCK();
9634 +
9635 +       cap = crypto_checkdriver(driverid);
9636 +       if (cap != NULL &&
9637 +           (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
9638 +               /*
9639 +                * XXX Do some performance testing to determine placing.
9640 +                * XXX We probably need an auxiliary data structure that
9641 +                * XXX describes relative performances.
9642 +                */
9643 +
9644 +               cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
9645 +               if (bootverbose)
9646 +                       printf("crypto: %s registers key alg %u flags %u\n"
9647 +                               , device_get_nameunit(cap->cc_dev)
9648 +                               , kalg
9649 +                               , flags
9650 +                       );
9651 +               err = 0;
9652 +       } else
9653 +               err = EINVAL;
9654 +
9655 +       CRYPTO_DRIVER_UNLOCK();
9656 +       return err;
9657 +}
9658 +
9659 +/*
9660 + * Register support for a non-key-related algorithm.  This routine
9661 + * is called once for each such algorithm supported by a driver.
9662 + */
9663 +int
9664 +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
9665 +    u_int32_t flags)
9666 +{
9667 +       struct cryptocap *cap;
9668 +       int err;
9669 +       unsigned long d_flags;
9670 +
9671 +       dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
9672 +                       driverid, alg, maxoplen, flags);
9673 +
9674 +       CRYPTO_DRIVER_LOCK();
9675 +
9676 +       cap = crypto_checkdriver(driverid);
9677 +       /* NB: algorithms are in the range [1..max] */
9678 +       if (cap != NULL &&
9679 +           (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
9680 +               /*
9681 +                * XXX Do some performance testing to determine placing.
9682 +                * XXX We probably need an auxiliary data structure that
9683 +                * XXX describes relative performances.
9684 +                */
9685 +
9686 +               cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
9687 +               cap->cc_max_op_len[alg] = maxoplen;
9688 +               if (bootverbose)
9689 +                       printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
9690 +                               , device_get_nameunit(cap->cc_dev)
9691 +                               , alg
9692 +                               , flags
9693 +                               , maxoplen
9694 +                       );
9695 +               cap->cc_sessions = 0;           /* Unmark */
9696 +               err = 0;
9697 +       } else
9698 +               err = EINVAL;
9699 +
9700 +       CRYPTO_DRIVER_UNLOCK();
9701 +       return err;
9702 +}
9703 +
9704 +static void
9705 +driver_finis(struct cryptocap *cap)
9706 +{
9707 +       u_int32_t ses, kops;
9708 +
9709 +       CRYPTO_DRIVER_ASSERT();
9710 +
9711 +       ses = cap->cc_sessions;
9712 +       kops = cap->cc_koperations;
9713 +       bzero(cap, sizeof(*cap));
9714 +       if (ses != 0 || kops != 0) {
9715 +               /*
9716 +                * If there are pending sessions,
9717 +                * just mark as invalid.
9718 +                */
9719 +               cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
9720 +               cap->cc_sessions = ses;
9721 +               cap->cc_koperations = kops;
9722 +       }
9723 +}
9724 +
9725 +/*
9726 + * Unregister a crypto driver. If there are pending sessions using it,
9727 + * leave enough information around so that subsequent calls using those
9728 + * sessions will correctly detect the driver has been unregistered and
9729 + * reroute requests.
9730 + */
9731 +int
9732 +crypto_unregister(u_int32_t driverid, int alg)
9733 +{
9734 +       struct cryptocap *cap;
9735 +       int i, err;
9736 +       unsigned long d_flags;
9737 +
9738 +       dprintk("%s()\n", __FUNCTION__);
9739 +       CRYPTO_DRIVER_LOCK();
9740 +
9741 +       cap = crypto_checkdriver(driverid);
9742 +       if (cap != NULL &&
9743 +           (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
9744 +           cap->cc_alg[alg] != 0) {
9745 +               cap->cc_alg[alg] = 0;
9746 +               cap->cc_max_op_len[alg] = 0;
9747 +
9748 +               /* Was this the last algorithm ? */
9749 +               for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
9750 +                       if (cap->cc_alg[i] != 0)
9751 +                               break;
9752 +
9753 +               if (i == CRYPTO_ALGORITHM_MAX + 1)
9754 +                       driver_finis(cap);
9755 +               err = 0;
9756 +       } else
9757 +               err = EINVAL;
9758 +       CRYPTO_DRIVER_UNLOCK();
9759 +       return err;
9760 +}
9761 +
9762 +/*
9763 + * Unregister all algorithms associated with a crypto driver.
9764 + * If there are pending sessions using it, leave enough information
9765 + * around so that subsequent calls using those sessions will
9766 + * correctly detect the driver has been unregistered and reroute
9767 + * requests.
9768 + */
9769 +int
9770 +crypto_unregister_all(u_int32_t driverid)
9771 +{
9772 +       struct cryptocap *cap;
9773 +       int err;
9774 +       unsigned long d_flags;
9775 +
9776 +       dprintk("%s()\n", __FUNCTION__);
9777 +       CRYPTO_DRIVER_LOCK();
9778 +       cap = crypto_checkdriver(driverid);
9779 +       if (cap != NULL) {
9780 +               driver_finis(cap);
9781 +               err = 0;
9782 +       } else
9783 +               err = EINVAL;
9784 +       CRYPTO_DRIVER_UNLOCK();
9785 +
9786 +       return err;
9787 +}
9788 +
9789 +/*
9790 + * Clear blockage on a driver.  The what parameter indicates whether
9791 + * the driver is now ready for cryptop's and/or cryptokop's.
9792 + */
9793 +int
9794 +crypto_unblock(u_int32_t driverid, int what)
9795 +{
9796 +       struct cryptocap *cap;
9797 +       int err;
9798 +       unsigned long q_flags;
9799 +
9800 +       CRYPTO_Q_LOCK();
9801 +       cap = crypto_checkdriver(driverid);
9802 +       if (cap != NULL) {
9803 +               if (what & CRYPTO_SYMQ) {
9804 +                       cap->cc_qblocked = 0;
9805 +                       crypto_all_qblocked = 0;
9806 +               }
9807 +               if (what & CRYPTO_ASYMQ) {
9808 +                       cap->cc_kqblocked = 0;
9809 +                       crypto_all_kqblocked = 0;
9810 +               }
9811 +               if (crp_sleep)
9812 +                       wake_up_interruptible(&cryptoproc_wait);
9813 +               err = 0;
9814 +       } else
9815 +               err = EINVAL;
9816 +       CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
9817 +
9818 +       return err;
9819 +}
9820 +
9821 +/*
9822 + * Add a crypto request to a queue, to be processed by the kernel thread.
9823 + */
9824 +int
9825 +crypto_dispatch(struct cryptop *crp)
9826 +{
9827 +       struct cryptocap *cap;
9828 +       int result = -1;
9829 +       unsigned long q_flags;
9830 +
9831 +       dprintk("%s()\n", __FUNCTION__);
9832 +
9833 +       cryptostats.cs_ops++;
9834 +
9835 +       CRYPTO_Q_LOCK();
9836 +       if (crypto_q_cnt >= crypto_q_max) {
9837 +               CRYPTO_Q_UNLOCK();
9838 +               cryptostats.cs_drops++;
9839 +               return ENOMEM;
9840 +       }
9841 +       crypto_q_cnt++;
9842 +
9843 +       /*
9844 +        * Caller marked the request to be processed immediately; dispatch
9845 +        * it directly to the driver unless the driver is currently blocked.
9846 +        */
9847 +       if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
9848 +               int hid = CRYPTO_SESID2HID(crp->crp_sid);
9849 +               cap = crypto_checkdriver(hid);
9850 +               /* Driver cannot disappear when there is an active session. */
9851 +               KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
9852 +               if (!cap->cc_qblocked) {
9853 +                       crypto_all_qblocked = 0;
9854 +                       crypto_drivers[hid].cc_qblocked = 1;
9855 +                       CRYPTO_Q_UNLOCK();
9856 +                       result = crypto_invoke(cap, crp, 0);
9857 +                       CRYPTO_Q_LOCK();
9858 +                       if (result != ERESTART)
9859 +                               crypto_drivers[hid].cc_qblocked = 0;
9860 +               }
9861 +       }
9862 +       if (result == ERESTART) {
9863 +               /*
9864 +                * The driver ran out of resources, mark the
9865 +                * driver ``blocked'' for cryptop's and put
9866 +                * the request back in the queue.  It would
9867 +                * best to put the request back where we got
9868 +                * it but that's hard so for now we put it
9869 +                * at the front.  This should be ok; putting
9870 +                * it at the end does not work.
9871 +                */
9872 +               list_add(&crp->crp_next, &crp_q);
9873 +               cryptostats.cs_blocks++;
9874 +       } else if (result == -1) {
9875 +               TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
9876 +       }
9877 +       if (crp_sleep)
9878 +               wake_up_interruptible(&cryptoproc_wait);
9879 +       CRYPTO_Q_UNLOCK();
9880 +       return 0;
9881 +}
9882 +
9883 +/*
9884 + * Add an asymetric crypto request to a queue,
9885 + * to be processed by the kernel thread.
9886 + */
9887 +int
9888 +crypto_kdispatch(struct cryptkop *krp)
9889 +{
9890 +       int error;
9891 +       unsigned long q_flags;
9892 +
9893 +       cryptostats.cs_kops++;
9894 +
9895 +       error = crypto_kinvoke(krp, krp->krp_crid);
9896 +       if (error == ERESTART) {
9897 +               CRYPTO_Q_LOCK();
9898 +               TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
9899 +               if (crp_sleep)
9900 +                       wake_up_interruptible(&cryptoproc_wait);
9901 +               CRYPTO_Q_UNLOCK();
9902 +               error = 0;
9903 +       }
9904 +       return error;
9905 +}
9906 +
9907 +/*
9908 + * Verify a driver is suitable for the specified operation.
9909 + */
9910 +static __inline int
9911 +kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
9912 +{
9913 +       return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
9914 +}
9915 +
9916 +/*
9917 + * Select a driver for an asym operation.  The driver must
9918 + * support the necessary algorithm.  The caller can constrain
9919 + * which device is selected with the flags parameter.  The
9920 + * algorithm we use here is pretty stupid; just use the first
9921 + * driver that supports the algorithms we need. If there are
9922 + * multiple suitable drivers we choose the driver with the
9923 + * fewest active operations.  We prefer hardware-backed
9924 + * drivers to software ones when either may be used.
9925 + */
9926 +static struct cryptocap *
9927 +crypto_select_kdriver(const struct cryptkop *krp, int flags)
9928 +{
9929 +       struct cryptocap *cap, *best, *blocked;
9930 +       int match, hid;
9931 +
9932 +       CRYPTO_DRIVER_ASSERT();
9933 +
9934 +       /*
9935 +        * Look first for hardware crypto devices if permitted.
9936 +        */
9937 +       if (flags & CRYPTOCAP_F_HARDWARE)
9938 +               match = CRYPTOCAP_F_HARDWARE;
9939 +       else
9940 +               match = CRYPTOCAP_F_SOFTWARE;
9941 +       best = NULL;
9942 +       blocked = NULL;
9943 +again:
9944 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
9945 +               cap = &crypto_drivers[hid];
9946 +               /*
9947 +                * If it's not initialized, is in the process of
9948 +                * going away, or is not appropriate (hardware
9949 +                * or software based on match), then skip.
9950 +                */
9951 +               if (cap->cc_dev == NULL ||
9952 +                   (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
9953 +                   (cap->cc_flags & match) == 0)
9954 +                       continue;
9955 +
9956 +               /* verify all the algorithms are supported. */
9957 +               if (kdriver_suitable(cap, krp)) {
9958 +                       if (best == NULL ||
9959 +                           cap->cc_koperations < best->cc_koperations)
9960 +                               best = cap;
9961 +               }
9962 +       }
9963 +       if (best != NULL)
9964 +               return best;
9965 +       if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
9966 +               /* sort of an Algol 68-style for loop */
9967 +               match = CRYPTOCAP_F_SOFTWARE;
9968 +               goto again;
9969 +       }
9970 +       return best;
9971 +}
9972 +
9973 +/*
9974 + * Dispatch an assymetric crypto request.
9975 + */
9976 +static int
9977 +crypto_kinvoke(struct cryptkop *krp, int crid)
9978 +{
9979 +       struct cryptocap *cap = NULL;
9980 +       int error;
9981 +       unsigned long d_flags;
9982 +
9983 +       KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
9984 +       KASSERT(krp->krp_callback != NULL,
9985 +           ("%s: krp->crp_callback == NULL", __func__));
9986 +
9987 +       CRYPTO_DRIVER_LOCK();
9988 +       if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9989 +               cap = crypto_checkdriver(crid);
9990 +               if (cap != NULL) {
9991 +                       /*
9992 +                        * Driver present, it must support the necessary
9993 +                        * algorithm and, if s/w drivers are excluded,
9994 +                        * it must be registered as hardware-backed.
9995 +                        */
9996 +                       if (!kdriver_suitable(cap, krp) ||
9997 +                           (!crypto_devallowsoft &&
9998 +                            (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
9999 +                               cap = NULL;
10000 +               }
10001 +       } else {
10002 +               /*
10003 +                * No requested driver; select based on crid flags.
10004 +                */
10005 +               if (!crypto_devallowsoft)       /* NB: disallow s/w drivers */
10006 +                       crid &= ~CRYPTOCAP_F_SOFTWARE;
10007 +               cap = crypto_select_kdriver(krp, crid);
10008 +       }
10009 +       if (cap != NULL && !cap->cc_kqblocked) {
10010 +               krp->krp_hid = cap - crypto_drivers;
10011 +               cap->cc_koperations++;
10012 +               CRYPTO_DRIVER_UNLOCK();
10013 +               error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
10014 +               CRYPTO_DRIVER_LOCK();
10015 +               if (error == ERESTART) {
10016 +                       cap->cc_koperations--;
10017 +                       CRYPTO_DRIVER_UNLOCK();
10018 +                       return (error);
10019 +               }
10020 +               /* return the actual device used */
10021 +               krp->krp_crid = krp->krp_hid;
10022 +       } else {
10023 +               /*
10024 +                * NB: cap is !NULL if device is blocked; in
10025 +                *     that case return ERESTART so the operation
10026 +                *     is resubmitted if possible.
10027 +                */
10028 +               error = (cap == NULL) ? ENODEV : ERESTART;
10029 +       }
10030 +       CRYPTO_DRIVER_UNLOCK();
10031 +
10032 +       if (error) {
10033 +               krp->krp_status = error;
10034 +               crypto_kdone(krp);
10035 +       }
10036 +       return 0;
10037 +}
10038 +
10039 +
10040 +/*
10041 + * Dispatch a crypto request to the appropriate crypto devices.
10042 + */
10043 +static int
10044 +crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
10045 +{
10046 +       KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
10047 +       KASSERT(crp->crp_callback != NULL,
10048 +           ("%s: crp->crp_callback == NULL", __func__));
10049 +       KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
10050 +
10051 +       dprintk("%s()\n", __FUNCTION__);
10052 +
10053 +#ifdef CRYPTO_TIMING
10054 +       if (crypto_timing)
10055 +               crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
10056 +#endif
10057 +       if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
10058 +               struct cryptodesc *crd;
10059 +               u_int64_t nid;
10060 +
10061 +               /*
10062 +                * Driver has unregistered; migrate the session and return
10063 +                * an error to the caller so they'll resubmit the op.
10064 +                *
10065 +                * XXX: What if there are more already queued requests for this
10066 +                *      session?
10067 +                */
10068 +               crypto_freesession(crp->crp_sid);
10069 +
10070 +               for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
10071 +                       crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
10072 +
10073 +               /* XXX propagate flags from initial session? */
10074 +               if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
10075 +                   CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
10076 +                       crp->crp_sid = nid;
10077 +
10078 +               crp->crp_etype = EAGAIN;
10079 +               crypto_done(crp);
10080 +               return 0;
10081 +       } else {
10082 +               /*
10083 +                * Invoke the driver to process the request.
10084 +                */
10085 +               return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
10086 +       }
10087 +}
10088 +
10089 +/*
10090 + * Release a set of crypto descriptors.
10091 + */
10092 +void
10093 +crypto_freereq(struct cryptop *crp)
10094 +{
10095 +       struct cryptodesc *crd;
10096 +
10097 +       if (crp == NULL)
10098 +               return;
10099 +
10100 +#ifdef DIAGNOSTIC
10101 +       {
10102 +               struct cryptop *crp2;
10103 +               unsigned long q_flags;
10104 +
10105 +               CRYPTO_Q_LOCK();
10106 +               TAILQ_FOREACH(crp2, &crp_q, crp_next) {
10107 +                       KASSERT(crp2 != crp,
10108 +                           ("Freeing cryptop from the crypto queue (%p).",
10109 +                           crp));
10110 +               }
10111 +               CRYPTO_Q_UNLOCK();
10112 +               CRYPTO_RETQ_LOCK();
10113 +               TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
10114 +                       KASSERT(crp2 != crp,
10115 +                           ("Freeing cryptop from the return queue (%p).",
10116 +                           crp));
10117 +               }
10118 +               CRYPTO_RETQ_UNLOCK();
10119 +       }
10120 +#endif
10121 +
10122 +       while ((crd = crp->crp_desc) != NULL) {
10123 +               crp->crp_desc = crd->crd_next;
10124 +               kmem_cache_free(cryptodesc_zone, crd);
10125 +       }
10126 +       kmem_cache_free(cryptop_zone, crp);
10127 +}
10128 +
10129 +/*
10130 + * Acquire a set of crypto descriptors.
10131 + */
10132 +struct cryptop *
10133 +crypto_getreq(int num)
10134 +{
10135 +       struct cryptodesc *crd;
10136 +       struct cryptop *crp;
10137 +
10138 +       crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
10139 +       if (crp != NULL) {
10140 +               memset(crp, 0, sizeof(*crp));
10141 +               INIT_LIST_HEAD(&crp->crp_next);
10142 +               init_waitqueue_head(&crp->crp_waitq);
10143 +               while (num--) {
10144 +                       crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
10145 +                       if (crd == NULL) {
10146 +                               crypto_freereq(crp);
10147 +                               return NULL;
10148 +                       }
10149 +                       memset(crd, 0, sizeof(*crd));
10150 +                       crd->crd_next = crp->crp_desc;
10151 +                       crp->crp_desc = crd;
10152 +               }
10153 +       }
10154 +       return crp;
10155 +}
10156 +
10157 +/*
10158 + * Invoke the callback on behalf of the driver.
10159 + */
10160 +void
10161 +crypto_done(struct cryptop *crp)
10162 +{
10163 +       unsigned long q_flags;
10164 +
10165 +       dprintk("%s()\n", __FUNCTION__);
10166 +       if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
10167 +               crp->crp_flags |= CRYPTO_F_DONE;
10168 +               CRYPTO_Q_LOCK();
10169 +               crypto_q_cnt--;
10170 +               CRYPTO_Q_UNLOCK();
10171 +       } else
10172 +               printk("crypto: crypto_done op already done, flags 0x%x",
10173 +                               crp->crp_flags);
10174 +       if (crp->crp_etype != 0)
10175 +               cryptostats.cs_errs++;
10176 +       /*
10177 +        * CBIMM means unconditionally do the callback immediately;
10178 +        * CBIFSYNC means do the callback immediately only if the
10179 +        * operation was done synchronously.  Both are used to avoid
10180 +        * doing extraneous context switches; the latter is mostly
10181 +        * used with the software crypto driver.
10182 +        */
10183 +       if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
10184 +           ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
10185 +            (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
10186 +               /*
10187 +                * Do the callback directly.  This is ok when the
10188 +                * callback routine does very little (e.g. the
10189 +                * /dev/crypto callback method just does a wakeup).
10190 +                */
10191 +               crp->crp_callback(crp);
10192 +       } else {
10193 +               unsigned long r_flags;
10194 +               /*
10195 +                * Normal case; queue the callback for the thread.
10196 +                */
10197 +               CRYPTO_RETQ_LOCK();
10198 +               if (CRYPTO_RETQ_EMPTY())
10199 +                       wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
10200 +               TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
10201 +               CRYPTO_RETQ_UNLOCK();
10202 +       }
10203 +}
10204 +
10205 +/*
10206 + * Invoke the callback on behalf of the driver.
10207 + */
10208 +void
10209 +crypto_kdone(struct cryptkop *krp)
10210 +{
10211 +       struct cryptocap *cap;
10212 +       unsigned long d_flags;
10213 +
10214 +       if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
10215 +               printk("crypto: crypto_kdone op already done, flags 0x%x",
10216 +                               krp->krp_flags);
10217 +       krp->krp_flags |= CRYPTO_KF_DONE;
10218 +       if (krp->krp_status != 0)
10219 +               cryptostats.cs_kerrs++;
10220 +
10221 +       CRYPTO_DRIVER_LOCK();
10222 +       /* XXX: What if driver is loaded in the meantime? */
10223 +       if (krp->krp_hid < crypto_drivers_num) {
10224 +               cap = &crypto_drivers[krp->krp_hid];
10225 +               cap->cc_koperations--;
10226 +               KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
10227 +               if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
10228 +                       crypto_remove(cap);
10229 +       }
10230 +       CRYPTO_DRIVER_UNLOCK();
10231 +
10232 +       /*
10233 +        * CBIMM means unconditionally do the callback immediately;
10234 +        * This is used to avoid doing extraneous context switches
10235 +        */
10236 +       if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
10237 +               /*
10238 +                * Do the callback directly.  This is ok when the
10239 +                * callback routine does very little (e.g. the
10240 +                * /dev/crypto callback method just does a wakeup).
10241 +                */
10242 +               krp->krp_callback(krp);
10243 +       } else {
10244 +               unsigned long r_flags;
10245 +               /*
10246 +                * Normal case; queue the callback for the thread.
10247 +                */
10248 +               CRYPTO_RETQ_LOCK();
10249 +               if (CRYPTO_RETQ_EMPTY())
10250 +                       wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
10251 +               TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
10252 +               CRYPTO_RETQ_UNLOCK();
10253 +       }
10254 +}
10255 +
10256 +int
10257 +crypto_getfeat(int *featp)
10258 +{
10259 +       int hid, kalg, feat = 0;
10260 +       unsigned long d_flags;
10261 +
10262 +       CRYPTO_DRIVER_LOCK();
10263 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
10264 +               const struct cryptocap *cap = &crypto_drivers[hid];
10265 +
10266 +               if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
10267 +                   !crypto_devallowsoft) {
10268 +                       continue;
10269 +               }
10270 +               for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
10271 +                       if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
10272 +                               feat |=  1 << kalg;
10273 +       }
10274 +       CRYPTO_DRIVER_UNLOCK();
10275 +       *featp = feat;
10276 +       return (0);
10277 +}
10278 +
10279 +/*
10280 + * Crypto thread, dispatches crypto requests.
10281 + */
10282 +static int
10283 +crypto_proc(void *arg)
10284 +{
10285 +       struct cryptop *crp, *submit;
10286 +       struct cryptkop *krp, *krpp;
10287 +       struct cryptocap *cap;
10288 +       u_int32_t hid;
10289 +       int result, hint;
10290 +       unsigned long q_flags;
10291 +
10292 +       ocf_daemonize("crypto");
10293 +
10294 +       CRYPTO_Q_LOCK();
10295 +       for (;;) {
10296 +               /*
10297 +                * we need to make sure we don't get into a busy loop with nothing
10298 +                * to do,  the two crypto_all_*blocked vars help us find out when
10299 +                * we are all full and can do nothing on any driver or Q.  If so we
10300 +                * wait for an unblock.
10301 +                */
10302 +               crypto_all_qblocked  = !list_empty(&crp_q);
10303 +
10304 +               /*
10305 +                * Find the first element in the queue that can be
10306 +                * processed and look-ahead to see if multiple ops
10307 +                * are ready for the same driver.
10308 +                */
10309 +               submit = NULL;
10310 +               hint = 0;
10311 +               list_for_each_entry(crp, &crp_q, crp_next) {
10312 +                       hid = CRYPTO_SESID2HID(crp->crp_sid);
10313 +                       cap = crypto_checkdriver(hid);
10314 +                       /*
10315 +                        * Driver cannot disappear when there is an active
10316 +                        * session.
10317 +                        */
10318 +                       KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
10319 +                           __func__, __LINE__));
10320 +                       if (cap == NULL || cap->cc_dev == NULL) {
10321 +                               /* Op needs to be migrated, process it. */
10322 +                               if (submit == NULL)
10323 +                                       submit = crp;
10324 +                               break;
10325 +                       }
10326 +                       if (!cap->cc_qblocked) {
10327 +                               if (submit != NULL) {
10328 +                                       /*
10329 +                                        * We stop on finding another op,
10330 +                                        * regardless whether its for the same
10331 +                                        * driver or not.  We could keep
10332 +                                        * searching the queue but it might be
10333 +                                        * better to just use a per-driver
10334 +                                        * queue instead.
10335 +                                        */
10336 +                                       if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
10337 +                                               hint = CRYPTO_HINT_MORE;
10338 +                                       break;
10339 +                               } else {
10340 +                                       submit = crp;
10341 +                                       if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
10342 +                                               break;
10343 +                                       /* keep scanning for more are q'd */
10344 +                               }
10345 +                       }
10346 +               }
10347 +               if (submit != NULL) {
10348 +                       hid = CRYPTO_SESID2HID(submit->crp_sid);
10349 +                       crypto_all_qblocked = 0;
10350 +                       list_del(&submit->crp_next);
10351 +                       crypto_drivers[hid].cc_qblocked = 1;
10352 +                       cap = crypto_checkdriver(hid);
10353 +                       CRYPTO_Q_UNLOCK();
10354 +                       KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
10355 +                           __func__, __LINE__));
10356 +                       result = crypto_invoke(cap, submit, hint);
10357 +                       CRYPTO_Q_LOCK();
10358 +                       if (result == ERESTART) {
10359 +                               /*
10360 +                                * The driver ran out of resources, mark the
10361 +                                * driver ``blocked'' for cryptop's and put
10362 +                                * the request back in the queue.  It would
10363 +                                * best to put the request back where we got
10364 +                                * it but that's hard so for now we put it
10365 +                                * at the front.  This should be ok; putting
10366 +                                * it at the end does not work.
10367 +                                */
10368 +                               /* XXX validate sid again? */
10369 +                               list_add(&submit->crp_next, &crp_q);
10370 +                               cryptostats.cs_blocks++;
10371 +                       } else
10372 +                               crypto_drivers[hid].cc_qblocked=0;
10373 +               }
10374 +
10375 +               crypto_all_kqblocked = !list_empty(&crp_kq);
10376 +
10377 +               /* As above, but for key ops */
10378 +               krp = NULL;
10379 +               list_for_each_entry(krpp, &crp_kq, krp_next) {
10380 +                       cap = crypto_checkdriver(krpp->krp_hid);
10381 +                       if (cap == NULL || cap->cc_dev == NULL) {
10382 +                               /*
10383 +                                * Operation needs to be migrated, invalidate
10384 +                                * the assigned device so it will reselect a
10385 +                                * new one below.  Propagate the original
10386 +                                * crid selection flags if supplied.
10387 +                                */
10388 +                               krp->krp_hid = krp->krp_crid &
10389 +                                   (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
10390 +                               if (krp->krp_hid == 0)
10391 +                                       krp->krp_hid =
10392 +                                   CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
10393 +                               break;
10394 +                       }
10395 +                       if (!cap->cc_kqblocked) {
10396 +                               krp = krpp;
10397 +                               break;
10398 +                       }
10399 +               }
10400 +               if (krp != NULL) {
10401 +                       crypto_all_kqblocked = 0;
10402 +                       list_del(&krp->krp_next);
10403 +                       crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
10404 +                       CRYPTO_Q_UNLOCK();
10405 +                       result = crypto_kinvoke(krp, krp->krp_hid);
10406 +                       CRYPTO_Q_LOCK();
10407 +                       if (result == ERESTART) {
10408 +                               /*
10409 +                                * The driver ran out of resources, mark the
10410 +                                * driver ``blocked'' for cryptkop's and put
10411 +                                * the request back in the queue.  It would
10412 +                                * best to put the request back where we got
10413 +                                * it but that's hard so for now we put it
10414 +                                * at the front.  This should be ok; putting
10415 +                                * it at the end does not work.
10416 +                                */
10417 +                               /* XXX validate sid again? */
10418 +                               list_add(&krp->krp_next, &crp_kq);
10419 +                               cryptostats.cs_kblocks++;
10420 +                       } else
10421 +                               crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
10422 +               }
10423 +
10424 +               if (submit == NULL && krp == NULL) {
10425 +                       /*
10426 +                        * Nothing more to be processed.  Sleep until we're
10427 +                        * woken because there are more ops to process.
10428 +                        * This happens either by submission or by a driver
10429 +                        * becoming unblocked and notifying us through
10430 +                        * crypto_unblock.  Note that when we wakeup we
10431 +                        * start processing each queue again from the
10432 +                        * front. It's not clear that it's important to
10433 +                        * preserve this ordering since ops may finish
10434 +                        * out of order if dispatched to different devices
10435 +                        * and some become blocked while others do not.
10436 +                        */
10437 +                       dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
10438 +                                       __FUNCTION__,
10439 +                                       list_empty(&crp_q), crypto_all_qblocked,
10440 +                                       list_empty(&crp_kq), crypto_all_kqblocked);
10441 +                       CRYPTO_Q_UNLOCK();
10442 +                       crp_sleep = 1;
10443 +                       wait_event_interruptible(cryptoproc_wait,
10444 +                                       !(list_empty(&crp_q) || crypto_all_qblocked) ||
10445 +                                       !(list_empty(&crp_kq) || crypto_all_kqblocked) ||
10446 +                                       cryptoproc == (pid_t) -1);
10447 +                       crp_sleep = 0;
10448 +                       if (signal_pending (current)) {
10449 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10450 +                               spin_lock_irq(&current->sigmask_lock);
10451 +#endif
10452 +                               flush_signals(current);
10453 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10454 +                               spin_unlock_irq(&current->sigmask_lock);
10455 +#endif
10456 +                       }
10457 +                       CRYPTO_Q_LOCK();
10458 +                       dprintk("%s - awake\n", __FUNCTION__);
10459 +                       if (cryptoproc == (pid_t) -1)
10460 +                               break;
10461 +                       cryptostats.cs_intrs++;
10462 +               }
10463 +       }
10464 +       CRYPTO_Q_UNLOCK();
10465 +       complete_and_exit(&cryptoproc_exited, 0);
10466 +}
10467 +
10468 +/*
10469 + * Crypto returns thread, does callbacks for processed crypto requests.
10470 + * Callbacks are done here, rather than in the crypto drivers, because
10471 + * callbacks typically are expensive and would slow interrupt handling.
10472 + */
10473 +static int
10474 +crypto_ret_proc(void *arg)
10475 +{
10476 +       struct cryptop *crpt;
10477 +       struct cryptkop *krpt;
10478 +       unsigned long  r_flags;
10479 +
10480 +       ocf_daemonize("crypto_ret");
10481 +
10482 +       CRYPTO_RETQ_LOCK();
10483 +       for (;;) {
10484 +               /* Harvest return q's for completed ops */
10485 +               crpt = NULL;
10486 +               if (!list_empty(&crp_ret_q))
10487 +                       crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
10488 +               if (crpt != NULL)
10489 +                       list_del(&crpt->crp_next);
10490 +
10491 +               krpt = NULL;
10492 +               if (!list_empty(&crp_ret_kq))
10493 +                       krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
10494 +               if (krpt != NULL)
10495 +                       list_del(&krpt->krp_next);
10496 +
10497 +               if (crpt != NULL || krpt != NULL) {
10498 +                       CRYPTO_RETQ_UNLOCK();
10499 +                       /*
10500 +                        * Run callbacks unlocked.
10501 +                        */
10502 +                       if (crpt != NULL)
10503 +                               crpt->crp_callback(crpt);
10504 +                       if (krpt != NULL)
10505 +                               krpt->krp_callback(krpt);
10506 +                       CRYPTO_RETQ_LOCK();
10507 +               } else {
10508 +                       /*
10509 +                        * Nothing more to be processed.  Sleep until we're
10510 +                        * woken because there are more returns to process.
10511 +                        */
10512 +                       dprintk("%s - sleeping\n", __FUNCTION__);
10513 +                       CRYPTO_RETQ_UNLOCK();
10514 +                       wait_event_interruptible(cryptoretproc_wait,
10515 +                                       cryptoretproc == (pid_t) -1 ||
10516 +                                       !list_empty(&crp_ret_q) ||
10517 +                                       !list_empty(&crp_ret_kq));
10518 +                       if (signal_pending (current)) {
10519 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10520 +                               spin_lock_irq(&current->sigmask_lock);
10521 +#endif
10522 +                               flush_signals(current);
10523 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10524 +                               spin_unlock_irq(&current->sigmask_lock);
10525 +#endif
10526 +                       }
10527 +                       CRYPTO_RETQ_LOCK();
10528 +                       dprintk("%s - awake\n", __FUNCTION__);
10529 +                       if (cryptoretproc == (pid_t) -1) {
10530 +                               dprintk("%s - EXITING!\n", __FUNCTION__);
10531 +                               break;
10532 +                       }
10533 +                       cryptostats.cs_rets++;
10534 +               }
10535 +       }
10536 +       CRYPTO_RETQ_UNLOCK();
10537 +       complete_and_exit(&cryptoretproc_exited, 0);
10538 +}
10539 +
10540 +
10541 +#if 0 /* should put this into /proc or something */
10542 +static void
10543 +db_show_drivers(void)
10544 +{
10545 +       int hid;
10546 +
10547 +       db_printf("%12s %4s %4s %8s %2s %2s\n"
10548 +               , "Device"
10549 +               , "Ses"
10550 +               , "Kops"
10551 +               , "Flags"
10552 +               , "QB"
10553 +               , "KB"
10554 +       );
10555 +       for (hid = 0; hid < crypto_drivers_num; hid++) {
10556 +               const struct cryptocap *cap = &crypto_drivers[hid];
10557 +               if (cap->cc_dev == NULL)
10558 +                       continue;
10559 +               db_printf("%-12s %4u %4u %08x %2u %2u\n"
10560 +                   , device_get_nameunit(cap->cc_dev)
10561 +                   , cap->cc_sessions
10562 +                   , cap->cc_koperations
10563 +                   , cap->cc_flags
10564 +                   , cap->cc_qblocked
10565 +                   , cap->cc_kqblocked
10566 +               );
10567 +       }
10568 +}
10569 +
10570 +DB_SHOW_COMMAND(crypto, db_show_crypto)
10571 +{
10572 +       struct cryptop *crp;
10573 +
10574 +       db_show_drivers();
10575 +       db_printf("\n");
10576 +
10577 +       db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
10578 +           "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
10579 +           "Desc", "Callback");
10580 +       TAILQ_FOREACH(crp, &crp_q, crp_next) {
10581 +               db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
10582 +                   , (int) CRYPTO_SESID2HID(crp->crp_sid)
10583 +                   , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
10584 +                   , crp->crp_ilen, crp->crp_olen
10585 +                   , crp->crp_etype
10586 +                   , crp->crp_flags
10587 +                   , crp->crp_desc
10588 +                   , crp->crp_callback
10589 +               );
10590 +       }
10591 +       if (!TAILQ_EMPTY(&crp_ret_q)) {
10592 +               db_printf("\n%4s %4s %4s %8s\n",
10593 +                   "HID", "Etype", "Flags", "Callback");
10594 +               TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
10595 +                       db_printf("%4u %4u %04x %8p\n"
10596 +                           , (int) CRYPTO_SESID2HID(crp->crp_sid)
10597 +                           , crp->crp_etype
10598 +                           , crp->crp_flags
10599 +                           , crp->crp_callback
10600 +                       );
10601 +               }
10602 +       }
10603 +}
10604 +
10605 +DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
10606 +{
10607 +       struct cryptkop *krp;
10608 +
10609 +       db_show_drivers();
10610 +       db_printf("\n");
10611 +
10612 +       db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
10613 +           "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
10614 +       TAILQ_FOREACH(krp, &crp_kq, krp_next) {
10615 +               db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
10616 +                   , krp->krp_op
10617 +                   , krp->krp_status
10618 +                   , krp->krp_iparams, krp->krp_oparams
10619 +                   , krp->krp_crid, krp->krp_hid
10620 +                   , krp->krp_callback
10621 +               );
10622 +       }
10623 +       if (!TAILQ_EMPTY(&crp_ret_q)) {
10624 +               db_printf("%4s %5s %8s %4s %8s\n",
10625 +                   "Op", "Status", "CRID", "HID", "Callback");
10626 +               TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
10627 +                       db_printf("%4u %5u %08x %4u %8p\n"
10628 +                           , krp->krp_op
10629 +                           , krp->krp_status
10630 +                           , krp->krp_crid, krp->krp_hid
10631 +                           , krp->krp_callback
10632 +                       );
10633 +               }
10634 +       }
10635 +}
10636 +#endif
10637 +
10638 +
10639 +static int
10640 +crypto_init(void)
10641 +{
10642 +       int error;
10643 +
10644 +       dprintk("%s(0x%x)\n", __FUNCTION__, (int) crypto_init);
10645 +
10646 +       if (crypto_initted)
10647 +               return 0;
10648 +       crypto_initted = 1;
10649 +
10650 +       spin_lock_init(&crypto_drivers_lock);
10651 +       spin_lock_init(&crypto_q_lock);
10652 +       spin_lock_init(&crypto_ret_q_lock);
10653 +
10654 +       cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
10655 +                                      0, SLAB_HWCACHE_ALIGN, NULL
10656 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
10657 +                                      , NULL
10658 +#endif
10659 +                                       );
10660 +
10661 +       cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
10662 +                                      0, SLAB_HWCACHE_ALIGN, NULL
10663 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
10664 +                                      , NULL
10665 +#endif
10666 +                                       );
10667 +
10668 +       if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
10669 +               printk("crypto: crypto_init cannot setup crypto zones\n");
10670 +               error = ENOMEM;
10671 +               goto bad;
10672 +       }
10673 +
10674 +       crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
10675 +       crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
10676 +                       GFP_KERNEL);
10677 +       if (crypto_drivers == NULL) {
10678 +               printk("crypto: crypto_init cannot setup crypto drivers\n");
10679 +               error = ENOMEM;
10680 +               goto bad;
10681 +       }
10682 +
10683 +       memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
10684 +
10685 +       init_completion(&cryptoproc_exited);
10686 +       init_completion(&cryptoretproc_exited);
10687 +
10688 +       cryptoproc = 0; /* to avoid race condition where proc runs first */
10689 +       cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
10690 +       if (cryptoproc < 0) {
10691 +               error = cryptoproc;
10692 +               printk("crypto: crypto_init cannot start crypto thread; error %d",
10693 +                       error);
10694 +               goto bad;
10695 +       }
10696 +
10697 +       cryptoretproc = 0; /* to avoid race condition where proc runs first */
10698 +       cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
10699 +       if (cryptoretproc < 0) {
10700 +               error = cryptoretproc;
10701 +               printk("crypto: crypto_init cannot start cryptoret thread; error %d",
10702 +                               error);
10703 +               goto bad;
10704 +       }
10705 +
10706 +       return 0;
10707 +bad:
10708 +       crypto_exit();
10709 +       return error;
10710 +}
10711 +
10712 +
10713 +static void
10714 +crypto_exit(void)
10715 +{
10716 +       pid_t p;
10717 +       unsigned long d_flags;
10718 +
10719 +       dprintk("%s()\n", __FUNCTION__);
10720 +
10721 +       /*
10722 +        * Terminate any crypto threads.
10723 +        */
10724 +
10725 +       CRYPTO_DRIVER_LOCK();
10726 +       p = cryptoproc;
10727 +       cryptoproc = (pid_t) -1;
10728 +       kill_proc(p, SIGTERM, 1);
10729 +       wake_up_interruptible(&cryptoproc_wait);
10730 +       CRYPTO_DRIVER_UNLOCK();
10731 +
10732 +       wait_for_completion(&cryptoproc_exited);
10733 +
10734 +       CRYPTO_DRIVER_LOCK();
10735 +       p = cryptoretproc;
10736 +       cryptoretproc = (pid_t) -1;
10737 +       kill_proc(p, SIGTERM, 1);
10738 +       wake_up_interruptible(&cryptoretproc_wait);
10739 +       CRYPTO_DRIVER_UNLOCK();
10740 +
10741 +       wait_for_completion(&cryptoretproc_exited);
10742 +
10743 +       /* XXX flush queues??? */
10744 +
10745 +       /* 
10746 +        * Reclaim dynamically allocated resources.
10747 +        */
10748 +       if (crypto_drivers != NULL)
10749 +               kfree(crypto_drivers);
10750 +
10751 +       if (cryptodesc_zone != NULL)
10752 +               kmem_cache_destroy(cryptodesc_zone);
10753 +       if (cryptop_zone != NULL)
10754 +               kmem_cache_destroy(cryptop_zone);
10755 +}
10756 +
10757 +
10758 +EXPORT_SYMBOL(crypto_newsession);
10759 +EXPORT_SYMBOL(crypto_freesession);
10760 +EXPORT_SYMBOL(crypto_get_driverid);
10761 +EXPORT_SYMBOL(crypto_kregister);
10762 +EXPORT_SYMBOL(crypto_register);
10763 +EXPORT_SYMBOL(crypto_unregister);
10764 +EXPORT_SYMBOL(crypto_unregister_all);
10765 +EXPORT_SYMBOL(crypto_unblock);
10766 +EXPORT_SYMBOL(crypto_dispatch);
10767 +EXPORT_SYMBOL(crypto_kdispatch);
10768 +EXPORT_SYMBOL(crypto_freereq);
10769 +EXPORT_SYMBOL(crypto_getreq);
10770 +EXPORT_SYMBOL(crypto_done);
10771 +EXPORT_SYMBOL(crypto_kdone);
10772 +EXPORT_SYMBOL(crypto_getfeat);
10773 +EXPORT_SYMBOL(crypto_userasymcrypto);
10774 +EXPORT_SYMBOL(crypto_getcaps);
10775 +EXPORT_SYMBOL(crypto_find_driver);
10776 +EXPORT_SYMBOL(crypto_find_device_byhid);
10777 +
10778 +module_init(crypto_init);
10779 +module_exit(crypto_exit);
10780 +
10781 +MODULE_LICENSE("BSD");
10782 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
10783 +MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
10784 --- /dev/null
10785 +++ b/crypto/ocf/criov.c
10786 @@ -0,0 +1,215 @@
10787 +/*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $        */
10788 +
10789 +/*
10790 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
10791 + * Copyright (C) 2006-2007 David McCullough
10792 + * Copyright (C) 2004-2005 Intel Corporation.
10793 + * The license and original author are listed below.
10794 + *
10795 + * Copyright (c) 1999 Theo de Raadt
10796 + *
10797 + * Redistribution and use in source and binary forms, with or without
10798 + * modification, are permitted provided that the following conditions
10799 + * are met:
10800 + *
10801 + * 1. Redistributions of source code must retain the above copyright
10802 + *   notice, this list of conditions and the following disclaimer.
10803 + * 2. Redistributions in binary form must reproduce the above copyright
10804 + *   notice, this list of conditions and the following disclaimer in the
10805 + *   documentation and/or other materials provided with the distribution.
10806 + * 3. The name of the author may not be used to endorse or promote products
10807 + *   derived from this software without specific prior written permission.
10808 + *
10809 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
10810 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
10811 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
10812 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
10813 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
10814 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
10815 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
10816 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
10817 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
10818 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10819 + *
10820 +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
10821 + */
10822 +
10823 +#ifndef AUTOCONF_INCLUDED
10824 +#include <linux/config.h>
10825 +#endif
10826 +#include <linux/module.h>
10827 +#include <linux/init.h>
10828 +#include <linux/slab.h>
10829 +#include <linux/uio.h>
10830 +#include <linux/skbuff.h>
10831 +#include <linux/kernel.h>
10832 +#include <linux/mm.h>
10833 +#include <asm/io.h>
10834 +
10835 +#include <uio.h>
10836 +#include <cryptodev.h>
10837 +
10838 +/*
10839 + * This macro is only for avoiding code duplication, as we need to skip
10840 + * given number of bytes in the same way in three functions below.
10841 + */
10842 +#define        CUIO_SKIP()     do {                                            \
10843 +       KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));           \
10844 +       KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));           \
10845 +       while (off > 0) {                                               \
10846 +               KASSERT(iol >= 0, ("%s: empty in skip", __func__));     \
10847 +               if (off < iov->iov_len)                                 \
10848 +                       break;                                          \
10849 +               off -= iov->iov_len;                                    \
10850 +               iol--;                                                  \
10851 +               iov++;                                                  \
10852 +       }                                                               \
10853 +} while (0)
10854 +
10855 +void
10856 +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
10857 +{
10858 +       struct iovec *iov = uio->uio_iov;
10859 +       int iol = uio->uio_iovcnt;
10860 +       unsigned count;
10861 +
10862 +       CUIO_SKIP();
10863 +       while (len > 0) {
10864 +               KASSERT(iol >= 0, ("%s: empty", __func__));
10865 +               count = min((int)(iov->iov_len - off), len);
10866 +               memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
10867 +               len -= count;
10868 +               cp += count;
10869 +               off = 0;
10870 +               iol--;
10871 +               iov++;
10872 +       }
10873 +}
10874 +
10875 +void
10876 +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
10877 +{
10878 +       struct iovec *iov = uio->uio_iov;
10879 +       int iol = uio->uio_iovcnt;
10880 +       unsigned count;
10881 +
10882 +       CUIO_SKIP();
10883 +       while (len > 0) {
10884 +               KASSERT(iol >= 0, ("%s: empty", __func__));
10885 +               count = min((int)(iov->iov_len - off), len);
10886 +               memcpy(((caddr_t)iov->iov_base) + off, cp, count);
10887 +               len -= count;
10888 +               cp += count;
10889 +               off = 0;
10890 +               iol--;
10891 +               iov++;
10892 +       }
10893 +}
10894 +
10895 +/*
10896 + * Return a pointer to iov/offset of location in iovec list.
10897 + */
10898 +struct iovec *
10899 +cuio_getptr(struct uio *uio, int loc, int *off)
10900 +{
10901 +       struct iovec *iov = uio->uio_iov;
10902 +       int iol = uio->uio_iovcnt;
10903 +
10904 +       while (loc >= 0) {
10905 +               /* Normal end of search */
10906 +               if (loc < iov->iov_len) {
10907 +                       *off = loc;
10908 +                       return (iov);
10909 +               }
10910 +
10911 +               loc -= iov->iov_len;
10912 +               if (iol == 0) {
10913 +                       if (loc == 0) {
10914 +                               /* Point at the end of valid data */
10915 +                               *off = iov->iov_len;
10916 +                               return (iov);
10917 +                       } else
10918 +                               return (NULL);
10919 +               } else {
10920 +                       iov++, iol--;
10921 +               }
10922 +       }
10923 +
10924 +       return (NULL);
10925 +}
10926 +
10927 +EXPORT_SYMBOL(cuio_copyback);
10928 +EXPORT_SYMBOL(cuio_copydata);
10929 +EXPORT_SYMBOL(cuio_getptr);
10930 +
10931 +
10932 +static void
10933 +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
10934 +{
10935 +       int i;
10936 +       if (offset < skb_headlen(skb)) {
10937 +               memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
10938 +               len -= skb_headlen(skb);
10939 +               cp += skb_headlen(skb);
10940 +       }
10941 +       offset -= skb_headlen(skb);
10942 +       for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
10943 +               if (offset < skb_shinfo(skb)->frags[i].size) {
10944 +                       memcpy(page_address(skb_shinfo(skb)->frags[i].page) +
10945 +                                       skb_shinfo(skb)->frags[i].page_offset,
10946 +                                       cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
10947 +                       len -= skb_shinfo(skb)->frags[i].size;
10948 +                       cp += skb_shinfo(skb)->frags[i].size;
10949 +               }
10950 +               offset -= skb_shinfo(skb)->frags[i].size;
10951 +       }
10952 +}
10953 +
10954 +void
10955 +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
10956 +{
10957 +
10958 +       if ((flags & CRYPTO_F_SKBUF) != 0)
10959 +               skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
10960 +       else if ((flags & CRYPTO_F_IOV) != 0)
10961 +               cuio_copyback((struct uio *)buf, off, size, in);
10962 +       else
10963 +               bcopy(in, buf + off, size);
10964 +}
10965 +
10966 +void
10967 +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
10968 +{
10969 +
10970 +       if ((flags & CRYPTO_F_SKBUF) != 0)
10971 +               skb_copy_bits((struct sk_buff *)buf, off, out, size);
10972 +       else if ((flags & CRYPTO_F_IOV) != 0)
10973 +               cuio_copydata((struct uio *)buf, off, size, out);
10974 +       else
10975 +               bcopy(buf + off, out, size);
10976 +}
10977 +
10978 +int
10979 +crypto_apply(int flags, caddr_t buf, int off, int len,
10980 +    int (*f)(void *, void *, u_int), void *arg)
10981 +{
10982 +#if 0
10983 +       int error;
10984 +
10985 +       if ((flags & CRYPTO_F_SKBUF) != 0)
10986 +               error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
10987 +       else if ((flags & CRYPTO_F_IOV) != 0)
10988 +               error = cuio_apply((struct uio *)buf, off, len, f, arg);
10989 +       else
10990 +               error = (*f)(arg, buf + off, len);
10991 +       return (error);
10992 +#else
10993 +       KASSERT(0, ("crypto_apply not implemented!\n"));
10994 +#endif
10995 +       return 0;
10996 +}
10997 +
10998 +EXPORT_SYMBOL(crypto_copyback);
10999 +EXPORT_SYMBOL(crypto_copydata);
11000 +EXPORT_SYMBOL(crypto_apply);
11001 +
11002 --- /dev/null
11003 +++ b/crypto/ocf/uio.h
11004 @@ -0,0 +1,54 @@
11005 +#ifndef _OCF_UIO_H_
11006 +#define _OCF_UIO_H_
11007 +
11008 +#include <linux/uio.h>
11009 +
11010 +/*
11011 + * The linux uio.h doesn't have all we need.  To be fully api compatible
11012 + * with the BSD cryptodev,  we need to keep this around.  Perhaps this can
11013 + * be moved back into the linux/uio.h
11014 + *
11015 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
11016 + * Copyright (C) 2006-2007 David McCullough
11017 + * Copyright (C) 2004-2005 Intel Corporation.
11018 + *
11019 + * LICENSE TERMS
11020 + *
11021 + * The free distribution and use of this software in both source and binary
11022 + * form is allowed (with or without changes) provided that:
11023 + *
11024 + *   1. distributions of this source code include the above copyright
11025 + *      notice, this list of conditions and the following disclaimer;
11026 + *
11027 + *   2. distributions in binary form include the above copyright
11028 + *      notice, this list of conditions and the following disclaimer
11029 + *      in the documentation and/or other associated materials;
11030 + *
11031 + *   3. the copyright holder's name is not used to endorse products
11032 + *      built using this software without specific written permission.
11033 + *
11034 + * ALTERNATIVELY, provided that this notice is retained in full, this product
11035 + * may be distributed under the terms of the GNU General Public License (GPL),
11036 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
11037 + *
11038 + * DISCLAIMER
11039 + *
11040 + * This software is provided 'as is' with no explicit or implied warranties
11041 + * in respect of its properties, including, but not limited to, correctness
11042 + * and/or fitness for purpose.
11043 + * ---------------------------------------------------------------------------
11044 + */
11045 +
11046 +struct uio {
11047 +       struct  iovec *uio_iov;
11048 +       int             uio_iovcnt;
11049 +       off_t   uio_offset;
11050 +       int             uio_resid;
11051 +#if 0
11052 +       enum    uio_seg uio_segflg;
11053 +       enum    uio_rw uio_rw;
11054 +       struct  thread *uio_td;
11055 +#endif
11056 +};
11057 +
11058 +#endif
11059 --- /dev/null
11060 +++ b/crypto/ocf/talitos/talitos.c
11061 @@ -0,0 +1,1359 @@
11062 +/*
11063 + * crypto/ocf/talitos/talitos.c
11064 + *
11065 + * An OCF-Linux module that uses Freescale's SEC to do the crypto.
11066 + * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
11067 + *
11068 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
11069 + *
11070 + * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
11071 + * some code copied from files with the following:
11072 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com
11073 + *
11074 + * Redistribution and use in source and binary forms, with or without
11075 + * modification, are permitted provided that the following conditions
11076 + * are met:
11077 + *
11078 + * 1. Redistributions of source code must retain the above copyright
11079 + *    notice, this list of conditions and the following disclaimer.
11080 + * 2. Redistributions in binary form must reproduce the above copyright
11081 + *    notice, this list of conditions and the following disclaimer in the
11082 + *    documentation and/or other materials provided with the distribution.
11083 + * 3. The name of the author may not be used to endorse or promote products
11084 + *    derived from this software without specific prior written permission.
11085 + *
11086 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
11087 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
11088 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
11089 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11090 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
11091 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
11092 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
11093 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11094 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
11095 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11096 + *
11097 + * ---------------------------------------------------------------------------
11098 + *
11099 + * NOTES:
11100 + *
11101 + * The Freescale SEC (also known as 'talitos') resides on the
11102 + * internal bus, and runs asynchronous to the processor core.  It has
11103 + * a wide gamut of cryptographic acceleration features, including single-
11104 + * pass IPsec (also known as algorithm chaining).  To properly utilize 
11105 + * all of the SEC's performance enhancing features, further reworking 
11106 + * of higher level code (framework, applications) will be necessary.
11107 + *
11108 + * The following table shows which SEC version is present in which devices:
11109 + * 
11110 + * Devices       SEC version
11111 + *
11112 + * 8272, 8248    SEC 1.0
11113 + * 885, 875      SEC 1.2
11114 + * 8555E, 8541E  SEC 2.0
11115 + * 8349E         SEC 2.01
11116 + * 8548E         SEC 2.1
11117 + *
11118 + * The following table shows the features offered by each SEC version:
11119 + *
11120 + *                            Max.   chan-
11121 + * version  Bus I/F       Clock  nels  DEU AESU AFEU MDEU PKEU RNG KEU
11122 + *
11123 + * SEC 1.0  internal 64b  100MHz   4     1    1    1    1    1   1   0
11124 + * SEC 1.2  internal 32b   66MHz   1     1    1    0    1    0   0   0
11125 + * SEC 2.0  internal 64b  166MHz   4     1    1    1    1    1   1   0
11126 + * SEC 2.01 internal 64b  166MHz   4     1    1    1    1    1   1   0
11127 + * SEC 2.1  internal 64b  333MHz   4     1    1    1    1    1   1   1
11128 + *
11129 + * Each execution unit in the SEC has two modes of execution; channel and
11130 + * slave/debug.  This driver employs the channel infrastructure in the
11131 + * device for convenience.  Only the RNG is directly accessed due to the
11132 + * convenience of its random fifo pool.  The relationship between the
11133 + * channels and execution units is depicted in the following diagram:
11134 + *
11135 + *    -------   ------------
11136 + * ---| ch0 |---|          |
11137 + *    -------   |          |
11138 + *              |          |------+-------+-------+-------+------------
11139 + *    -------   |          |      |       |       |       |           |
11140 + * ---| ch1 |---|          |      |       |       |       |           |
11141 + *    -------   |          |   ------  ------  ------  ------      ------
11142 + *              |controller|   |DEU |  |AESU|  |MDEU|  |PKEU| ...  |RNG |
11143 + *    -------   |          |   ------  ------  ------  ------      ------
11144 + * ---| ch2 |---|          |      |       |       |       |           |
11145 + *    -------   |          |      |       |       |       |           |
11146 + *              |          |------+-------+-------+-------+------------
11147 + *    -------   |          |
11148 + * ---| ch3 |---|          |
11149 + *    -------   ------------
11150 + *
11151 + * Channel ch0 may drive an aes operation to the aes unit (AESU),
11152 + * and, at the same time, ch1 may drive a message digest operation
11153 + * to the mdeu. Each channel has an input descriptor FIFO, and the 
11154 + * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
11155 + * a buffer overrun error is triggered. The controller is responsible
11156 + * for fetching the data from descriptor pointers, and passing the 
11157 + * data to the appropriate EUs. The controller also writes the 
11158 + * cryptographic operation's result to memory. The SEC notifies 
11159 + * completion by triggering an interrupt and/or setting the 1st byte 
11160 + * of the hdr field to 0xff.
11161 + *
11162 + * TODO:
11163 + * o support more algorithms
11164 + * o support more versions of the SEC
11165 + * o add support for linux 2.4
11166 + * o scatter-gather (sg) support
11167 + * o add support for public key ops (PKEU)
11168 + * o add statistics
11169 + */
11170 +
11171 +#ifndef AUTOCONF_INCLUDED
11172 +#include <linux/config.h>
11173 +#endif
11174 +#include <linux/module.h>
11175 +#include <linux/init.h>
11176 +#include <linux/interrupt.h>
11177 +#include <linux/spinlock.h>
11178 +#include <linux/random.h>
11179 +#include <linux/skbuff.h>
11180 +#include <asm/scatterlist.h>
11181 +#include <linux/dma-mapping.h>  /* dma_map_single() */
11182 +#include <linux/moduleparam.h>
11183 +
11184 +#include <linux/version.h>
11185 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
11186 +#include <linux/platform_device.h>
11187 +#endif
11188 +
11189 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
11190 +#include <linux/of_platform.h>
11191 +#endif
11192 +
11193 +#include <cryptodev.h>
11194 +#include <uio.h>
11195 +
11196 +#define DRV_NAME "talitos" 
11197 +
11198 +#include "talitos_dev.h"
11199 +#include "talitos_soft.h"
11200 +
11201 +#define read_random(p,l) get_random_bytes(p,l)
11202 +
11203 +const char talitos_driver_name[] = "Talitos OCF";
11204 +const char talitos_driver_version[] = "0.2";
11205 +
11206 +static int talitos_newsession(device_t dev, u_int32_t *sidp,
11207 +                                                               struct cryptoini *cri);
11208 +static int talitos_freesession(device_t dev, u_int64_t tid);
11209 +static int talitos_process(device_t dev, struct cryptop *crp, int hint);
11210 +static void dump_talitos_status(struct talitos_softc *sc);
11211 +static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td, 
11212 +                                                               int chsel);
11213 +static void talitos_doneprocessing(struct talitos_softc *sc);
11214 +static void talitos_init_device(struct talitos_softc *sc);
11215 +static void talitos_reset_device_master(struct talitos_softc *sc);
11216 +static void talitos_reset_device(struct talitos_softc *sc);
11217 +static void talitos_errorprocessing(struct talitos_softc *sc);
11218 +#ifdef CONFIG_PPC_MERGE
11219 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
11220 +static int talitos_remove(struct of_device *ofdev);
11221 +#else
11222 +static int talitos_probe(struct platform_device *pdev);
11223 +static int talitos_remove(struct platform_device *pdev);
11224 +#endif
11225 +#ifdef CONFIG_OCF_RANDOMHARVEST
11226 +static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
11227 +static void talitos_rng_init(struct talitos_softc *sc);
11228 +#endif
11229 +
11230 +static device_method_t talitos_methods = {
11231 +       /* crypto device methods */
11232 +       DEVMETHOD(cryptodev_newsession, talitos_newsession),
11233 +       DEVMETHOD(cryptodev_freesession,talitos_freesession),
11234 +       DEVMETHOD(cryptodev_process,    talitos_process),
11235 +};
11236 +
11237 +#define debug talitos_debug
11238 +int talitos_debug = 0;
11239 +module_param(talitos_debug, int, 0644);
11240 +MODULE_PARM_DESC(talitos_debug, "Enable debug");
11241 +
11242 +static inline void talitos_write(volatile unsigned *addr, u32 val)
11243 +{
11244 +        out_be32(addr, val);
11245 +}
11246 +
11247 +static inline u32 talitos_read(volatile unsigned *addr)
11248 +{
11249 +        u32 val;
11250 +        val = in_be32(addr);
11251 +        return val;
11252 +}
11253 +
11254 +static void dump_talitos_status(struct talitos_softc *sc)
11255 +{
11256 +       unsigned int v, v_hi, i, *ptr;
11257 +       v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
11258 +       v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
11259 +       printk(KERN_INFO "%s: MCR          0x%08x_%08x\n",
11260 +                       device_get_nameunit(sc->sc_cdev), v, v_hi);
11261 +       v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
11262 +       v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
11263 +       printk(KERN_INFO "%s: IMR          0x%08x_%08x\n",
11264 +                       device_get_nameunit(sc->sc_cdev), v, v_hi);
11265 +       v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
11266 +       v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
11267 +       printk(KERN_INFO "%s: ISR          0x%08x_%08x\n",
11268 +                       device_get_nameunit(sc->sc_cdev), v, v_hi);
11269 +       for (i = 0; i < sc->sc_num_channels; i++) { 
11270 +               v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
11271 +                       TALITOS_CH_CDPR);
11272 +               v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
11273 +                       TALITOS_CH_CDPR_HI);
11274 +               printk(KERN_INFO "%s: CDPR     ch%d 0x%08x_%08x\n", 
11275 +                               device_get_nameunit(sc->sc_cdev), i, v, v_hi);
11276 +       }
11277 +       for (i = 0; i < sc->sc_num_channels; i++) { 
11278 +               v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
11279 +                       TALITOS_CH_CCPSR);
11280 +               v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET + 
11281 +                       TALITOS_CH_CCPSR_HI);
11282 +               printk(KERN_INFO "%s: CCPSR    ch%d 0x%08x_%08x\n", 
11283 +                               device_get_nameunit(sc->sc_cdev), i, v, v_hi);
11284 +       }
11285 +       ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
11286 +       for (i = 0; i < 16; i++) { 
11287 +               v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
11288 +               printk(KERN_INFO "%s: DESCBUF  ch0 0x%08x_%08x (tdp%02d)\n", 
11289 +                               device_get_nameunit(sc->sc_cdev), v, v_hi, i);
11290 +       }
11291 +       return;
11292 +}
11293 +
11294 +
11295 +#ifdef CONFIG_OCF_RANDOMHARVEST
11296 +/* 
11297 + * pull random numbers off the RNG FIFO, not exceeding amount available
11298 + */
11299 +static int
11300 +talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
11301 +{
11302 +       struct talitos_softc *sc = (struct talitos_softc *) arg;
11303 +       int rc;
11304 +       u_int32_t v;
11305 +
11306 +       DPRINTF("%s()\n", __FUNCTION__);
11307 +
11308 +       /* check for things like FIFO underflow */
11309 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
11310 +       if (unlikely(v)) {
11311 +               printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
11312 +                               device_get_nameunit(sc->sc_cdev), v);
11313 +               return 0;
11314 +       }
11315 +       /*
11316 +        * OFL is number of available 64-bit words, 
11317 +        * shift and convert to a 32-bit word count
11318 +        */
11319 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
11320 +       v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
11321 +       if (maxwords > v)
11322 +               maxwords = v;
11323 +       for (rc = 0; rc < maxwords; rc++) {
11324 +               buf[rc] = talitos_read(sc->sc_base_addr + 
11325 +                       TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
11326 +       }
11327 +       if (maxwords & 1) {
11328 +               /* 
11329 +                * RNG will complain with an AE in the RNGISR
11330 +                * if we don't complete the pairs of 32-bit reads
11331 +                * to its 64-bit register based FIFO
11332 +                */
11333 +               v = talitos_read(sc->sc_base_addr + 
11334 +                       TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
11335 +       }
11336 +
11337 +       return rc;
11338 +}
11339 +
11340 +static void
11341 +talitos_rng_init(struct talitos_softc *sc)
11342 +{
11343 +       u_int32_t v;
11344 +
11345 +       DPRINTF("%s()\n", __FUNCTION__);
11346 +       /* reset RNG EU */
11347 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
11348 +       v |= TALITOS_RNGRCR_HI_SR;
11349 +       talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
11350 +       while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI) 
11351 +               & TALITOS_RNGSR_HI_RD) == 0)
11352 +                       cpu_relax();
11353 +       /*
11354 +        * we tell the RNG to start filling the RNG FIFO
11355 +        * by writing the RNGDSR 
11356 +        */
11357 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
11358 +       talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
11359 +       /*
11360 +        * 64 bits of data will be pushed onto the FIFO every 
11361 +        * 256 SEC cycles until the FIFO is full.  The RNG then 
11362 +        * attempts to keep the FIFO full.
11363 +        */
11364 +       v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
11365 +       if (v) {
11366 +               printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
11367 +                       device_get_nameunit(sc->sc_cdev), v);
11368 +               return;
11369 +       }
11370 +       /*
11371 +        * n.b. we need to add a FIPS test here - if the RNG is going 
11372 +        * to fail, it's going to fail at reset time
11373 +        */
11374 +       return;
11375 +}
11376 +#endif /* CONFIG_OCF_RANDOMHARVEST */
11377 +
11378 +/*
11379 + * Generate a new software session.
11380 + */
11381 +static int
11382 +talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
11383 +{
11384 +       struct cryptoini *c, *encini = NULL, *macini = NULL;
11385 +       struct talitos_softc *sc = device_get_softc(dev);
11386 +       struct talitos_session *ses = NULL;
11387 +       int sesn;
11388 +
11389 +       DPRINTF("%s()\n", __FUNCTION__);
11390 +       if (sidp == NULL || cri == NULL || sc == NULL) {
11391 +               DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
11392 +               return EINVAL;
11393 +       }
11394 +       for (c = cri; c != NULL; c = c->cri_next) {
11395 +               if (c->cri_alg == CRYPTO_MD5 ||
11396 +                   c->cri_alg == CRYPTO_MD5_HMAC ||
11397 +                   c->cri_alg == CRYPTO_SHA1 ||
11398 +                   c->cri_alg == CRYPTO_SHA1_HMAC ||
11399 +                   c->cri_alg == CRYPTO_NULL_HMAC) {
11400 +                       if (macini)
11401 +                               return EINVAL;
11402 +                       macini = c;
11403 +               } else if (c->cri_alg == CRYPTO_DES_CBC ||
11404 +                   c->cri_alg == CRYPTO_3DES_CBC ||
11405 +                   c->cri_alg == CRYPTO_AES_CBC ||
11406 +                   c->cri_alg == CRYPTO_NULL_CBC) {
11407 +                       if (encini)
11408 +                               return EINVAL;
11409 +                       encini = c;
11410 +               } else {
11411 +                       DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
11412 +                       return EINVAL;
11413 +               }
11414 +       }
11415 +       if (encini == NULL && macini == NULL)
11416 +               return EINVAL;
11417 +       if (encini) {   
11418 +               /* validate key length */
11419 +               switch (encini->cri_alg) {
11420 +               case CRYPTO_DES_CBC:
11421 +                       if (encini->cri_klen != 64)
11422 +                               return EINVAL;
11423 +                       break;
11424 +               case CRYPTO_3DES_CBC:
11425 +                       if (encini->cri_klen != 192) {
11426 +                               return EINVAL;
11427 +                       }
11428 +                       break;
11429 +               case CRYPTO_AES_CBC:
11430 +                       if (encini->cri_klen != 128 &&
11431 +                           encini->cri_klen != 192 &&
11432 +                           encini->cri_klen != 256)
11433 +                               return EINVAL;
11434 +                       break;
11435 +               default:
11436 +                       DPRINTF("UNKNOWN encini->cri_alg %d\n", 
11437 +                               encini->cri_alg);
11438 +                       return EINVAL;
11439 +               }
11440 +       }
11441 +
11442 +       if (sc->sc_sessions == NULL) {
11443 +               ses = sc->sc_sessions = (struct talitos_session *)
11444 +                       kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
11445 +               if (ses == NULL)
11446 +                       return ENOMEM;
11447 +               memset(ses, 0, sizeof(struct talitos_session));
11448 +               sesn = 0;
11449 +               sc->sc_nsessions = 1;
11450 +       } else {
11451 +               for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
11452 +                       if (sc->sc_sessions[sesn].ses_used == 0) {
11453 +                               ses = &sc->sc_sessions[sesn];
11454 +                               break;
11455 +                       }
11456 +               }
11457 +
11458 +               if (ses == NULL) {
11459 +                       /* allocating session */
11460 +                       sesn = sc->sc_nsessions;
11461 +                       ses = (struct talitos_session *) kmalloc(
11462 +                               (sesn + 1) * sizeof(struct talitos_session), 
11463 +                               SLAB_ATOMIC);
11464 +                       if (ses == NULL)
11465 +                               return ENOMEM;
11466 +                       memset(ses, 0,
11467 +                               (sesn + 1) * sizeof(struct talitos_session));
11468 +                       memcpy(ses, sc->sc_sessions, 
11469 +                               sesn * sizeof(struct talitos_session));
11470 +                       memset(sc->sc_sessions, 0,
11471 +                               sesn * sizeof(struct talitos_session));
11472 +                       kfree(sc->sc_sessions);
11473 +                       sc->sc_sessions = ses;
11474 +                       ses = &sc->sc_sessions[sesn];
11475 +                       sc->sc_nsessions++;
11476 +               }
11477 +       }
11478 +
11479 +       ses->ses_used = 1;
11480 +
11481 +       if (encini) {
11482 +               /* get an IV */
11483 +               /* XXX may read fewer than requested */
11484 +               read_random(ses->ses_iv, sizeof(ses->ses_iv));
11485 +
11486 +               ses->ses_klen = (encini->cri_klen + 7) / 8;
11487 +               memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
11488 +               if (macini) {
11489 +                       /* doing hash on top of cipher */
11490 +                       ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
11491 +                       memcpy(ses->ses_hmac, macini->cri_key,
11492 +                               ses->ses_hmac_len);
11493 +               }
11494 +       } else if (macini) {
11495 +               /* doing hash */
11496 +               ses->ses_klen = (macini->cri_klen + 7) / 8;
11497 +               memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
11498 +       }
11499 +
11500 +       /* back compat way of determining MSC result len */
11501 +       if (macini) {
11502 +               ses->ses_mlen = macini->cri_mlen;
11503 +               if (ses->ses_mlen == 0) {
11504 +                       if (macini->cri_alg == CRYPTO_MD5_HMAC)
11505 +                               ses->ses_mlen = MD5_HASH_LEN;
11506 +                       else
11507 +                               ses->ses_mlen = SHA1_HASH_LEN;
11508 +               }
11509 +       }
11510 +
11511 +       /* really should make up a template td here, 
11512 +        * and only fill things like i/o and direction in process() */
11513 +
11514 +       /* assign session ID */
11515 +       *sidp = TALITOS_SID(sc->sc_num, sesn);
11516 +       return 0;
11517 +}
11518 +
11519 +/*
11520 + * Deallocate a session.
11521 + */
11522 +static int
11523 +talitos_freesession(device_t dev, u_int64_t tid)
11524 +{
11525 +       struct talitos_softc *sc = device_get_softc(dev);
11526 +       int session, ret;
11527 +       u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
11528 +
11529 +       if (sc == NULL)
11530 +               return EINVAL;
11531 +       session = TALITOS_SESSION(sid);
11532 +       if (session < sc->sc_nsessions) {
11533 +               memset(&sc->sc_sessions[session], 0,
11534 +                       sizeof(sc->sc_sessions[session]));
11535 +               ret = 0;
11536 +       } else
11537 +               ret = EINVAL;
11538 +       return ret;
11539 +}
11540 +
11541 +/*
11542 + * launch device processing - it will come back with done notification 
11543 + * in the form of an interrupt and/or HDR_DONE_BITS in header 
11544 + */
11545 +static int 
11546 +talitos_submit(
11547 +       struct talitos_softc *sc,
11548 +       struct talitos_desc *td,
11549 +       int chsel)
11550 +{
11551 +       u_int32_t v;
11552 +
11553 +       v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
11554 +       talitos_write(sc->sc_base_addr + 
11555 +               chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
11556 +       talitos_write(sc->sc_base_addr + 
11557 +               chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
11558 +       return 0;
11559 +}
11560 +
11561 +static int
11562 +talitos_process(device_t dev, struct cryptop *crp, int hint)
11563 +{
11564 +       int i, err = 0, ivsize;
11565 +       struct talitos_softc *sc = device_get_softc(dev);
11566 +       struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
11567 +       caddr_t iv;
11568 +       struct talitos_session *ses;
11569 +       struct talitos_desc *td;
11570 +       unsigned long flags;
11571 +       /* descriptor mappings */
11572 +       int hmac_key, hmac_data, cipher_iv, cipher_key, 
11573 +               in_fifo, out_fifo, cipher_iv_out;
11574 +       static int chsel = -1;
11575 +
11576 +       DPRINTF("%s()\n", __FUNCTION__);
11577 +
11578 +       if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
11579 +               return EINVAL;
11580 +       }
11581 +       crp->crp_etype = 0;
11582 +       if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
11583 +               return EINVAL;
11584 +       }
11585 +
11586 +       ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
11587 +
11588 +        /* enter the channel scheduler */ 
11589 +       spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11590 +
11591 +       /* reuse channel that already had/has requests for the required EU */
11592 +       for (i = 0; i < sc->sc_num_channels; i++) {
11593 +               if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
11594 +                       break;
11595 +       }
11596 +       if (i == sc->sc_num_channels) {
11597 +               /*
11598 +                * haven't seen this algo the last sc_num_channels or more
11599 +                * use round robin in this case
11600 +                * nb: sc->sc_num_channels must be power of 2 
11601 +                */
11602 +               chsel = (chsel + 1) & (sc->sc_num_channels - 1);
11603 +       } else {
11604 +               /*
11605 +                * matches channel with same target execution unit; 
11606 +                * use same channel in this case
11607 +                */
11608 +               chsel = i;
11609 +       }
11610 +       sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
11611 +
11612 +        /* release the channel scheduler lock */ 
11613 +       spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11614 +
11615 +       /* acquire the selected channel fifo lock */
11616 +       spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
11617 +
11618 +       /* find and reserve next available descriptor-cryptop pair */
11619 +       for (i = 0; i < sc->sc_chfifo_len; i++) {
11620 +               if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
11621 +                       /* 
11622 +                        * ensure correct descriptor formation by
11623 +                        * avoiding inadvertently setting "optional" entries
11624 +                        * e.g. not using "optional" dptr2 for MD/HMAC descs
11625 +                        */
11626 +                       memset(&sc->sc_chnfifo[chsel][i].cf_desc,
11627 +                               0, sizeof(*td));
11628 +                       /* reserve it with done notification request bit */
11629 +                       sc->sc_chnfifo[chsel][i].cf_desc.hdr |= 
11630 +                               TALITOS_DONE_NOTIFY;
11631 +                       break;
11632 +               }
11633 +       }
11634 +       spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
11635 +
11636 +       if (i == sc->sc_chfifo_len) {
11637 +               /* fifo full */
11638 +               err = ERESTART;
11639 +               goto errout;
11640 +       }
11641 +       
11642 +       td = &sc->sc_chnfifo[chsel][i].cf_desc;
11643 +       sc->sc_chnfifo[chsel][i].cf_crp = crp;
11644 +
11645 +       crd1 = crp->crp_desc;
11646 +       if (crd1 == NULL) {
11647 +               err = EINVAL;
11648 +               goto errout;
11649 +       }
11650 +       crd2 = crd1->crd_next;
11651 +       /* prevent compiler warning */
11652 +       hmac_key = 0;
11653 +       hmac_data = 0;
11654 +       if (crd2 == NULL) {
11655 +               td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
11656 +               /* assign descriptor dword ptr mappings for this desc. type */
11657 +               cipher_iv = 1;
11658 +               cipher_key = 2;
11659 +               in_fifo = 3;
11660 +               cipher_iv_out = 5;
11661 +               if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
11662 +                   crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11663 +                   crd1->crd_alg == CRYPTO_SHA1 ||
11664 +                   crd1->crd_alg == CRYPTO_MD5) {
11665 +                       out_fifo = 5;
11666 +                       maccrd = crd1;
11667 +                       enccrd = NULL;
11668 +               } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
11669 +                   crd1->crd_alg == CRYPTO_3DES_CBC ||
11670 +                   crd1->crd_alg == CRYPTO_AES_CBC ||
11671 +                   crd1->crd_alg == CRYPTO_ARC4) {
11672 +                       out_fifo = 4;
11673 +                       maccrd = NULL;
11674 +                       enccrd = crd1;
11675 +               } else {
11676 +                       DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
11677 +                       err = EINVAL;
11678 +                       goto errout;
11679 +               }
11680 +       } else {
11681 +               if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
11682 +                       td->hdr |= TD_TYPE_IPSEC_ESP;
11683 +               } else {
11684 +                       DPRINTF("unimplemented: multiple descriptor ipsec\n");
11685 +                       err = EINVAL;
11686 +                       goto errout;
11687 +               }
11688 +               /* assign descriptor dword ptr mappings for this desc. type */
11689 +               hmac_key = 0;
11690 +               hmac_data = 1;
11691 +               cipher_iv = 2;
11692 +               cipher_key = 3;
11693 +               in_fifo = 4;
11694 +               out_fifo = 5;
11695 +               cipher_iv_out = 6;
11696 +               if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
11697 +                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11698 +                     crd1->crd_alg == CRYPTO_MD5 ||
11699 +                     crd1->crd_alg == CRYPTO_SHA1) &&
11700 +                   (crd2->crd_alg == CRYPTO_DES_CBC ||
11701 +                    crd2->crd_alg == CRYPTO_3DES_CBC ||
11702 +                    crd2->crd_alg == CRYPTO_AES_CBC ||
11703 +                    crd2->crd_alg == CRYPTO_ARC4) &&
11704 +                   ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
11705 +                       maccrd = crd1;
11706 +                       enccrd = crd2;
11707 +               } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
11708 +                    crd1->crd_alg == CRYPTO_ARC4 ||
11709 +                    crd1->crd_alg == CRYPTO_3DES_CBC ||
11710 +                    crd1->crd_alg == CRYPTO_AES_CBC) &&
11711 +                   (crd2->crd_alg == CRYPTO_MD5_HMAC ||
11712 +                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
11713 +                     crd2->crd_alg == CRYPTO_MD5 ||
11714 +                     crd2->crd_alg == CRYPTO_SHA1) &&
11715 +                   (crd1->crd_flags & CRD_F_ENCRYPT)) {
11716 +                       enccrd = crd1;
11717 +                       maccrd = crd2;
11718 +               } else {
11719 +                       /* We cannot order the SEC as requested */
11720 +                       printk("%s: cannot do the order\n",
11721 +                                       device_get_nameunit(sc->sc_cdev));
11722 +                       err = EINVAL;
11723 +                       goto errout;
11724 +               }
11725 +       }
11726 +       /* assign in_fifo and out_fifo based on input/output struct type */
11727 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
11728 +               /* using SKB buffers */
11729 +               struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
11730 +               if (skb_shinfo(skb)->nr_frags) {
11731 +                       printk("%s: skb frags unimplemented\n",
11732 +                                       device_get_nameunit(sc->sc_cdev));
11733 +                       err = EINVAL;
11734 +                       goto errout;
11735 +               }
11736 +               td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data, 
11737 +                       skb->len, DMA_TO_DEVICE);
11738 +               td->ptr[in_fifo].len = skb->len;
11739 +               td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data, 
11740 +                       skb->len, DMA_TO_DEVICE);
11741 +               td->ptr[out_fifo].len = skb->len;
11742 +               td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
11743 +                       skb->len, DMA_TO_DEVICE);
11744 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
11745 +               /* using IOV buffers */
11746 +               struct uio *uiop = (struct uio *)crp->crp_buf;
11747 +               if (uiop->uio_iovcnt > 1) {
11748 +                       printk("%s: iov frags unimplemented\n",
11749 +                                       device_get_nameunit(sc->sc_cdev));
11750 +                       err = EINVAL;
11751 +                       goto errout;
11752 +               }
11753 +               td->ptr[in_fifo].ptr = dma_map_single(NULL,
11754 +                       uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
11755 +               td->ptr[in_fifo].len = crp->crp_ilen;
11756 +               /* crp_olen is never set; always use crp_ilen */
11757 +               td->ptr[out_fifo].ptr = dma_map_single(NULL,
11758 +                       uiop->uio_iov->iov_base,
11759 +                       crp->crp_ilen, DMA_TO_DEVICE);
11760 +               td->ptr[out_fifo].len = crp->crp_ilen;
11761 +       } else {
11762 +               /* using contig buffers */
11763 +               td->ptr[in_fifo].ptr = dma_map_single(NULL,
11764 +                       crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
11765 +               td->ptr[in_fifo].len = crp->crp_ilen;
11766 +               td->ptr[out_fifo].ptr = dma_map_single(NULL,
11767 +                       crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
11768 +               td->ptr[out_fifo].len = crp->crp_ilen;
11769 +       }
11770 +       if (enccrd) {
11771 +               switch (enccrd->crd_alg) {
11772 +               case CRYPTO_3DES_CBC:
11773 +                       td->hdr |= TALITOS_MODE0_DEU_3DES;
11774 +                       /* FALLTHROUGH */
11775 +               case CRYPTO_DES_CBC:
11776 +                       td->hdr |= TALITOS_SEL0_DEU
11777 +                               |  TALITOS_MODE0_DEU_CBC;
11778 +                       if (enccrd->crd_flags & CRD_F_ENCRYPT)
11779 +                               td->hdr |= TALITOS_MODE0_DEU_ENC;
11780 +                       ivsize = 2*sizeof(u_int32_t);
11781 +                       DPRINTF("%cDES ses %d ch %d len %d\n",
11782 +                               (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
11783 +                               (u32)TALITOS_SESSION(crp->crp_sid),
11784 +                               chsel, td->ptr[in_fifo].len);
11785 +                       break;
11786 +               case CRYPTO_AES_CBC:
11787 +                       td->hdr |= TALITOS_SEL0_AESU
11788 +                               |  TALITOS_MODE0_AESU_CBC;
11789 +                       if (enccrd->crd_flags & CRD_F_ENCRYPT)
11790 +                               td->hdr |= TALITOS_MODE0_AESU_ENC;
11791 +                       ivsize = 4*sizeof(u_int32_t);
11792 +                       DPRINTF("AES  ses %d ch %d len %d\n",
11793 +                               (u32)TALITOS_SESSION(crp->crp_sid),
11794 +                               chsel, td->ptr[in_fifo].len);
11795 +                       break;
11796 +               default:
11797 +                       printk("%s: unimplemented enccrd->crd_alg %d\n",
11798 +                                       device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
11799 +                       err = EINVAL;
11800 +                       goto errout;
11801 +               }
11802 +               /*
11803 +                * Setup encrypt/decrypt state.  When using basic ops
11804 +                * we can't use an inline IV because hash/crypt offset
11805 +                * must be from the end of the IV to the start of the
11806 +                * crypt data and this leaves out the preceding header
11807 +                * from the hash calculation.  Instead we place the IV
11808 +                * in the state record and set the hash/crypt offset to
11809 +                * copy both the header+IV.
11810 +                */
11811 +               if (enccrd->crd_flags & CRD_F_ENCRYPT) {
11812 +                       td->hdr |= TALITOS_DIR_OUTBOUND; 
11813 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
11814 +                               iv = enccrd->crd_iv;
11815 +                       else
11816 +                               iv = (caddr_t) ses->ses_iv;
11817 +                       if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
11818 +                               crypto_copyback(crp->crp_flags, crp->crp_buf,
11819 +                                   enccrd->crd_inject, ivsize, iv);
11820 +                       }
11821 +               } else {
11822 +                       td->hdr |= TALITOS_DIR_INBOUND; 
11823 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
11824 +                               iv = enccrd->crd_iv;
11825 +                               bcopy(enccrd->crd_iv, iv, ivsize);
11826 +                       } else {
11827 +                               iv = (caddr_t) ses->ses_iv;
11828 +                               crypto_copydata(crp->crp_flags, crp->crp_buf,
11829 +                                   enccrd->crd_inject, ivsize, iv);
11830 +                       }
11831 +               }
11832 +               td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize, 
11833 +                       DMA_TO_DEVICE);
11834 +               td->ptr[cipher_iv].len = ivsize;
11835 +               /*
11836 +                * we don't need the cipher iv out length/pointer
11837 +                * field to do ESP IPsec. Therefore we set the len field as 0,
11838 +                * which tells the SEC not to do anything with this len/ptr
11839 +                * field. Previously, when length/pointer as pointing to iv,
11840 +                * it gave us corruption of packets.
11841 +                */
11842 +               td->ptr[cipher_iv_out].len = 0;
11843 +       }
11844 +       if (enccrd && maccrd) {
11845 +               /* this is ipsec only for now */
11846 +               td->hdr |= TALITOS_SEL1_MDEU
11847 +                       |  TALITOS_MODE1_MDEU_INIT
11848 +                       |  TALITOS_MODE1_MDEU_PAD;
11849 +               switch (maccrd->crd_alg) {
11850 +                       case    CRYPTO_MD5:     
11851 +                               td->hdr |= TALITOS_MODE1_MDEU_MD5;
11852 +                               break;
11853 +                       case    CRYPTO_MD5_HMAC:        
11854 +                               td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
11855 +                               break;
11856 +                       case    CRYPTO_SHA1:    
11857 +                               td->hdr |= TALITOS_MODE1_MDEU_SHA1;
11858 +                               break;
11859 +                       case    CRYPTO_SHA1_HMAC:       
11860 +                               td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
11861 +                               break;
11862 +                       default:
11863 +                               /* We cannot order the SEC as requested */
11864 +                               printk("%s: cannot do the order\n",
11865 +                                               device_get_nameunit(sc->sc_cdev));
11866 +                               err = EINVAL;
11867 +                               goto errout;
11868 +               }
11869 +               if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
11870 +                  (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
11871 +                       /*
11872 +                        * The offset from hash data to the start of
11873 +                        * crypt data is the difference in the skips.
11874 +                        */
11875 +                       /* ipsec only for now */
11876 +                       td->ptr[hmac_key].ptr = dma_map_single(NULL, 
11877 +                               ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
11878 +                       td->ptr[hmac_key].len = ses->ses_hmac_len;
11879 +                       td->ptr[in_fifo].ptr  += enccrd->crd_skip;
11880 +                       td->ptr[in_fifo].len  =  enccrd->crd_len;
11881 +                       td->ptr[out_fifo].ptr += enccrd->crd_skip;
11882 +                       td->ptr[out_fifo].len =  enccrd->crd_len;
11883 +                       /* bytes of HMAC to postpend to ciphertext */
11884 +                       td->ptr[out_fifo].extent =  ses->ses_mlen;
11885 +                       td->ptr[hmac_data].ptr += maccrd->crd_skip; 
11886 +                       td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
11887 +               }
11888 +               if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
11889 +                       printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
11890 +                                       device_get_nameunit(sc->sc_cdev));
11891 +               }
11892 +       }
11893 +       if (!enccrd && maccrd) {
11894 +               /* single MD5 or SHA */
11895 +               td->hdr |= TALITOS_SEL0_MDEU
11896 +                               |  TALITOS_MODE0_MDEU_INIT
11897 +                               |  TALITOS_MODE0_MDEU_PAD;
11898 +               switch (maccrd->crd_alg) {
11899 +                       case    CRYPTO_MD5:     
11900 +                               td->hdr |= TALITOS_MODE0_MDEU_MD5;
11901 +                               DPRINTF("MD5  ses %d ch %d len %d\n",
11902 +                                       (u32)TALITOS_SESSION(crp->crp_sid), 
11903 +                                       chsel, td->ptr[in_fifo].len);
11904 +                               break;
11905 +                       case    CRYPTO_MD5_HMAC:        
11906 +                               td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
11907 +                               break;
11908 +                       case    CRYPTO_SHA1:    
11909 +                               td->hdr |= TALITOS_MODE0_MDEU_SHA1;
11910 +                               DPRINTF("SHA1 ses %d ch %d len %d\n",
11911 +                                       (u32)TALITOS_SESSION(crp->crp_sid), 
11912 +                                       chsel, td->ptr[in_fifo].len);
11913 +                               break;
11914 +                       case    CRYPTO_SHA1_HMAC:       
11915 +                               td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
11916 +                               break;
11917 +                       default:
11918 +                               /* We cannot order the SEC as requested */
11919 +                               DPRINTF("cannot do the order\n");
11920 +                               err = EINVAL;
11921 +                               goto errout;
11922 +               }
11923 +
11924 +               if (crp->crp_flags & CRYPTO_F_IOV)
11925 +                       td->ptr[out_fifo].ptr += maccrd->crd_inject;
11926 +
11927 +               if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
11928 +                  (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
11929 +                       td->ptr[hmac_key].ptr = dma_map_single(NULL, 
11930 +                               ses->ses_hmac, ses->ses_hmac_len, 
11931 +                               DMA_TO_DEVICE);
11932 +                       td->ptr[hmac_key].len = ses->ses_hmac_len;
11933 +               }
11934 +       } 
11935 +       else {
11936 +               /* using process key (session data has duplicate) */
11937 +               td->ptr[cipher_key].ptr = dma_map_single(NULL, 
11938 +                       enccrd->crd_key, (enccrd->crd_klen + 7) / 8, 
11939 +                       DMA_TO_DEVICE);
11940 +               td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
11941 +       }
11942 +       /* descriptor complete - GO! */
11943 +       return talitos_submit(sc, td, chsel);
11944 +
11945 +errout:
11946 +       if (err != ERESTART) {
11947 +               crp->crp_etype = err;
11948 +               crypto_done(crp);
11949 +       }
11950 +       return err;
11951 +}
11952 +
11953 +/* go through all channels descriptors, notifying OCF what has 
11954 + * _and_hasn't_ successfully completed and reset the device 
11955 + * (otherwise it's up to decoding desc hdrs!)
11956 + */
11957 +static void talitos_errorprocessing(struct talitos_softc *sc)
11958 +{
11959 +       unsigned long flags;
11960 +       int i, j;
11961 +
11962 +       /* disable further scheduling until under control */
11963 +       spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11964 +
11965 +       if (debug) dump_talitos_status(sc);
11966 +       /* go through descriptors, try and salvage those successfully done, 
11967 +        * and EIO those that weren't
11968 +        */
11969 +       for (i = 0; i < sc->sc_num_channels; i++) {
11970 +               spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
11971 +               for (j = 0; j < sc->sc_chfifo_len; j++) {
11972 +                       if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
11973 +                               if ((sc->sc_chnfifo[i][j].cf_desc.hdr 
11974 +                                       & TALITOS_HDR_DONE_BITS) 
11975 +                                       != TALITOS_HDR_DONE_BITS) {
11976 +                                       /* this one didn't finish */
11977 +                                       /* signify in crp->etype */
11978 +                                       sc->sc_chnfifo[i][j].cf_crp->crp_etype 
11979 +                                               = EIO;
11980 +                               }
11981 +                       } else
11982 +                               continue; /* free entry */
11983 +                       /* either way, notify ocf */
11984 +                       crypto_done(sc->sc_chnfifo[i][j].cf_crp);
11985 +                       /* and tag it available again
11986 +                        *
11987 +                        * memset to ensure correct descriptor formation by
11988 +                        * avoiding inadvertently setting "optional" entries
11989 +                        * e.g. not using "optional" dptr2 MD/HMAC processing
11990 +                        */
11991 +                       memset(&sc->sc_chnfifo[i][j].cf_desc,
11992 +                               0, sizeof(struct talitos_desc));
11993 +               }
11994 +               spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
11995 +       }
11996 +       /* reset and initialize the SEC h/w device */
11997 +       talitos_reset_device(sc);
11998 +       talitos_init_device(sc);
11999 +#ifdef CONFIG_OCF_RANDOMHARVEST
12000 +       if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
12001 +               talitos_rng_init(sc);
12002 +#endif
12003 +
12004 +       /* Okay. Stand by. */
12005 +       spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
12006 +
12007 +       return;
12008 +}
12009 +
12010 +/* go through all channels descriptors, notifying OCF what's been done */
12011 +static void talitos_doneprocessing(struct talitos_softc *sc)
12012 +{
12013 +       unsigned long flags;
12014 +       int i, j;
12015 +
12016 +       /* go through descriptors looking for done bits */
12017 +       for (i = 0; i < sc->sc_num_channels; i++) {
12018 +               spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
12019 +               for (j = 0; j < sc->sc_chfifo_len; j++) {
12020 +                       /* descriptor has done bits set? */
12021 +                       if ((sc->sc_chnfifo[i][j].cf_desc.hdr 
12022 +                               & TALITOS_HDR_DONE_BITS) 
12023 +                               == TALITOS_HDR_DONE_BITS) {
12024 +                               /* notify ocf */
12025 +                               crypto_done(sc->sc_chnfifo[i][j].cf_crp);
12026 +                               /* and tag it available again
12027 +                                *
12028 +                                * memset to ensure correct descriptor formation by
12029 +                                * avoiding inadvertently setting "optional" entries
12030 +                                * e.g. not using "optional" dptr2 MD/HMAC processing
12031 +                                */
12032 +                               memset(&sc->sc_chnfifo[i][j].cf_desc,
12033 +                                       0, sizeof(struct talitos_desc));
12034 +                       }
12035 +               }
12036 +               spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
12037 +       }
12038 +       return;
12039 +}
12040 +
12041 +static irqreturn_t
12042 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
12043 +talitos_intr(int irq, void *arg)
12044 +#else
12045 +talitos_intr(int irq, void *arg, struct pt_regs *regs)
12046 +#endif
12047 +{
12048 +       struct talitos_softc *sc = arg;
12049 +       u_int32_t v, v_hi;
12050 +       
12051 +       /* ack */
12052 +       v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
12053 +       v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
12054 +       talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
12055 +       talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
12056 +
12057 +       if (unlikely(v & TALITOS_ISR_ERROR)) {
12058 +               /* Okay, Houston, we've had a problem here. */
12059 +               printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
12060 +                               device_get_nameunit(sc->sc_cdev), v, v_hi);
12061 +               talitos_errorprocessing(sc);
12062 +       } else
12063 +       if (likely(v & TALITOS_ISR_DONE)) {
12064 +               talitos_doneprocessing(sc);
12065 +       }
12066 +       return IRQ_HANDLED;
12067 +}
12068 +
12069 +/*
12070 + * Initialize registers we need to touch only once.
12071 + */
12072 +static void
12073 +talitos_init_device(struct talitos_softc *sc)
12074 +{
12075 +       u_int32_t v;
12076 +       int i;
12077 +
12078 +       DPRINTF("%s()\n", __FUNCTION__);
12079 +
12080 +       /* init all channels */
12081 +       for (i = 0; i < sc->sc_num_channels; i++) {
12082 +               v = talitos_read(sc->sc_base_addr + 
12083 +                       i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
12084 +               v |= TALITOS_CH_CCCR_HI_CDWE
12085 +                 |  TALITOS_CH_CCCR_HI_CDIE;  /* invoke interrupt if done */
12086 +               talitos_write(sc->sc_base_addr + 
12087 +                       i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
12088 +       }
12089 +       /* enable all interrupts */
12090 +       v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
12091 +       v |= TALITOS_IMR_ALL;
12092 +       talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
12093 +       v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
12094 +       v |= TALITOS_IMR_HI_ERRONLY;
12095 +       talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
12096 +       return;
12097 +}
12098 +
12099 +/*
12100 + * set the master reset bit on the device.
12101 + */
12102 +static void
12103 +talitos_reset_device_master(struct talitos_softc *sc)
12104 +{
12105 +       u_int32_t v;
12106 +
12107 +       /* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
12108 +       v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
12109 +       talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
12110 +
12111 +       while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
12112 +               cpu_relax();
12113 +
12114 +       return;
12115 +}
12116 +
12117 +/*
12118 + * Resets the device.  Values in the registers are left as is
12119 + * from the reset (i.e. initial values are assigned elsewhere).
12120 + */
12121 +static void
12122 +talitos_reset_device(struct talitos_softc *sc)
12123 +{
12124 +       u_int32_t v;
12125 +       int i;
12126 +
12127 +       DPRINTF("%s()\n", __FUNCTION__);
12128 +
12129 +       /*
12130 +        * Master reset
12131 +        * errata documentation: warning: certain SEC interrupts 
12132 +        * are not fully cleared by writing the MCR:SWR bit, 
12133 +        * set bit twice to completely reset 
12134 +        */
12135 +       talitos_reset_device_master(sc);        /* once */
12136 +       talitos_reset_device_master(sc);        /* and once again */
12137 +       
12138 +       /* reset all channels */
12139 +       for (i = 0; i < sc->sc_num_channels; i++) {
12140 +               v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
12141 +                       TALITOS_CH_CCCR);
12142 +               talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
12143 +                       TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
12144 +       }
12145 +}
12146 +
12147 +/* Set up the crypto device structure, private data,
12148 + * and anything else we need before we start */
12149 +#ifdef CONFIG_PPC_MERGE
12150 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
12151 +#else
12152 +static int talitos_probe(struct platform_device *pdev)
12153 +#endif
12154 +{
12155 +       struct talitos_softc *sc = NULL;
12156 +       struct resource *r;
12157 +#ifdef CONFIG_PPC_MERGE
12158 +       struct device *device = &ofdev->dev;
12159 +       struct device_node *np = ofdev->node;
12160 +       const unsigned int *prop;
12161 +       int err;
12162 +       struct resource res;
12163 +#endif
12164 +       static int num_chips = 0;
12165 +       int rc;
12166 +       int i;
12167 +
12168 +       DPRINTF("%s()\n", __FUNCTION__);
12169 +
12170 +       sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
12171 +       if (!sc)
12172 +               return -ENOMEM;
12173 +       memset(sc, 0, sizeof(*sc));
12174 +
12175 +       softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
12176 +
12177 +       sc->sc_irq = -1;
12178 +       sc->sc_cid = -1;
12179 +#ifndef CONFIG_PPC_MERGE
12180 +       sc->sc_dev = pdev;
12181 +#endif
12182 +       sc->sc_num = num_chips++;
12183 +
12184 +#ifdef CONFIG_PPC_MERGE
12185 +       dev_set_drvdata(device, sc);
12186 +#else
12187 +       platform_set_drvdata(sc->sc_dev, sc);
12188 +#endif
12189 +
12190 +       /* get the irq line */
12191 +#ifdef CONFIG_PPC_MERGE
12192 +       err = of_address_to_resource(np, 0, &res);
12193 +       if (err)
12194 +               return -EINVAL;
12195 +       r = &res;
12196 +
12197 +       sc->sc_irq = irq_of_parse_and_map(np, 0);
12198 +#else
12199 +       /* get a pointer to the register memory */
12200 +       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12201 +
12202 +       sc->sc_irq = platform_get_irq(pdev, 0);
12203 +#endif
12204 +       rc = request_irq(sc->sc_irq, talitos_intr, 0,
12205 +                       device_get_nameunit(sc->sc_cdev), sc);
12206 +       if (rc) {
12207 +               printk(KERN_ERR "%s: failed to hook irq %d\n", 
12208 +                               device_get_nameunit(sc->sc_cdev), sc->sc_irq);
12209 +               sc->sc_irq = -1;
12210 +               goto out;
12211 +       }
12212 +
12213 +       sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
12214 +       if (!sc->sc_base_addr) {
12215 +               printk(KERN_ERR "%s: failed to ioremap\n",
12216 +                               device_get_nameunit(sc->sc_cdev));
12217 +               goto out;
12218 +       }
12219 +
12220 +       /* figure out our SEC's properties and capabilities */
12221 +       sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
12222 +                | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
12223 +       DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
12224 +
12225 +#ifdef CONFIG_PPC_MERGE
12226 +       /* get SEC properties from device tree, defaulting to SEC 2.0 */
12227 +
12228 +       prop = of_get_property(np, "num-channels", NULL);
12229 +       sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
12230 +
12231 +       prop = of_get_property(np, "channel-fifo-len", NULL);
12232 +       sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
12233 +
12234 +       prop = of_get_property(np, "exec-units-mask", NULL);
12235 +       sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
12236 +
12237 +       prop = of_get_property(np, "descriptor-types-mask", NULL);
12238 +       sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
12239 +#else
12240 +       /* bulk should go away with openfirmware flat device tree support */
12241 +       if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
12242 +               sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
12243 +               sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
12244 +               sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
12245 +               sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
12246 +       } else {
12247 +               printk(KERN_ERR "%s: failed to id device\n",
12248 +                               device_get_nameunit(sc->sc_cdev));
12249 +               goto out;
12250 +       }
12251 +#endif
12252 +
12253 +       /* + 1 is for the meta-channel lock used by the channel scheduler */
12254 +       sc->sc_chnfifolock = (spinlock_t *) kmalloc(
12255 +               (sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
12256 +       if (!sc->sc_chnfifolock)
12257 +               goto out;
12258 +       for (i = 0; i < sc->sc_num_channels + 1; i++) {
12259 +               spin_lock_init(&sc->sc_chnfifolock[i]);
12260 +       }
12261 +
12262 +       sc->sc_chnlastalg = (int *) kmalloc(
12263 +               sc->sc_num_channels * sizeof(int), GFP_KERNEL);
12264 +       if (!sc->sc_chnlastalg)
12265 +               goto out;
12266 +       memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
12267 +
12268 +       sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
12269 +               sc->sc_num_channels * sizeof(struct desc_cryptop_pair *), 
12270 +               GFP_KERNEL);
12271 +       if (!sc->sc_chnfifo)
12272 +               goto out;
12273 +       for (i = 0; i < sc->sc_num_channels; i++) {
12274 +               sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
12275 +                       sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair), 
12276 +                       GFP_KERNEL);
12277 +               if (!sc->sc_chnfifo[i])
12278 +                       goto out;
12279 +               memset(sc->sc_chnfifo[i], 0, 
12280 +                       sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
12281 +       }
12282 +
12283 +       /* reset and initialize the SEC h/w device */
12284 +       talitos_reset_device(sc);
12285 +       talitos_init_device(sc);
12286 +
12287 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
12288 +       if (sc->sc_cid < 0) {
12289 +               printk(KERN_ERR "%s: could not get crypto driver id\n",
12290 +                               device_get_nameunit(sc->sc_cdev));
12291 +               goto out;
12292 +       }
12293 +
12294 +       /* register algorithms with the framework */
12295 +       printk("%s:", device_get_nameunit(sc->sc_cdev));
12296 +
12297 +       if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)  {
12298 +               printk(" rng");
12299 +#ifdef CONFIG_OCF_RANDOMHARVEST
12300 +               talitos_rng_init(sc);
12301 +               crypto_rregister(sc->sc_cid, talitos_read_random, sc);
12302 +#endif
12303 +       }
12304 +       if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
12305 +               printk(" des/3des");
12306 +               crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
12307 +               crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
12308 +       }
12309 +       if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
12310 +               printk(" aes");
12311 +               crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
12312 +       }
12313 +       if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
12314 +               printk(" md5");
12315 +               crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
12316 +               /* HMAC support only with IPsec for now */
12317 +               crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
12318 +               printk(" sha1");
12319 +               crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
12320 +               /* HMAC support only with IPsec for now */
12321 +               crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
12322 +       }
12323 +       printk("\n");
12324 +       return 0;
12325 +
12326 +out:
12327 +#ifndef CONFIG_PPC_MERGE
12328 +       talitos_remove(pdev);
12329 +#endif
12330 +       return -ENOMEM;
12331 +}
12332 +
12333 +#ifdef CONFIG_PPC_MERGE
12334 +static int talitos_remove(struct of_device *ofdev)
12335 +#else
12336 +static int talitos_remove(struct platform_device *pdev)
12337 +#endif
12338 +{
12339 +#ifdef CONFIG_PPC_MERGE
12340 +       struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
12341 +#else
12342 +       struct talitos_softc *sc = platform_get_drvdata(pdev);
12343 +#endif
12344 +       int i;
12345 +
12346 +       DPRINTF("%s()\n", __FUNCTION__);
12347 +       if (sc->sc_cid >= 0)
12348 +               crypto_unregister_all(sc->sc_cid);
12349 +       if (sc->sc_chnfifo) {
12350 +               for (i = 0; i < sc->sc_num_channels; i++)
12351 +                       if (sc->sc_chnfifo[i])
12352 +                               kfree(sc->sc_chnfifo[i]);
12353 +               kfree(sc->sc_chnfifo);
12354 +       }
12355 +       if (sc->sc_chnlastalg)
12356 +               kfree(sc->sc_chnlastalg);
12357 +       if (sc->sc_chnfifolock)
12358 +               kfree(sc->sc_chnfifolock);
12359 +       if (sc->sc_irq != -1)
12360 +               free_irq(sc->sc_irq, sc);
12361 +       if (sc->sc_base_addr)
12362 +               iounmap((void *) sc->sc_base_addr);
12363 +       kfree(sc);
12364 +       return 0;
12365 +}
12366 +
12367 +#ifdef CONFIG_PPC_MERGE
12368 +static struct of_device_id talitos_match[] = {
12369 +       {
12370 +               .type = "crypto",
12371 +               .compatible = "talitos",
12372 +       },
12373 +       {},
12374 +};
12375 +
12376 +MODULE_DEVICE_TABLE(of, talitos_match);
12377 +
12378 +static struct of_platform_driver talitos_driver = {
12379 +       .name           = DRV_NAME,
12380 +       .match_table    = talitos_match,
12381 +       .probe          = talitos_probe,
12382 +       .remove         = talitos_remove,
12383 +};
12384 +
12385 +static int __init talitos_init(void)
12386 +{
12387 +       return of_register_platform_driver(&talitos_driver);
12388 +}
12389 +
12390 +static void __exit talitos_exit(void)
12391 +{
12392 +       of_unregister_platform_driver(&talitos_driver);
12393 +}
12394 +#else
12395 +/* Structure for a platform device driver */
12396 +static struct platform_driver talitos_driver = {
12397 +       .probe = talitos_probe,
12398 +       .remove = talitos_remove,
12399 +       .driver = {
12400 +               .name = "fsl-sec2",
12401 +       }
12402 +};
12403 +
12404 +static int __init talitos_init(void)
12405 +{
12406 +       return platform_driver_register(&talitos_driver);
12407 +}
12408 +
12409 +static void __exit talitos_exit(void)
12410 +{
12411 +       platform_driver_unregister(&talitos_driver);
12412 +}
12413 +#endif
12414 +
12415 +module_init(talitos_init);
12416 +module_exit(talitos_exit);
12417 +
12418 +MODULE_LICENSE("Dual BSD/GPL");
12419 +MODULE_AUTHOR("kim.phillips@freescale.com");
12420 +MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
12421 --- /dev/null
12422 +++ b/crypto/ocf/talitos/talitos_soft.h
12423 @@ -0,0 +1,77 @@
12424 +/*
12425 + * Freescale SEC data structures for integration with ocf-linux
12426 + *
12427 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
12428 + *
12429 + * Redistribution and use in source and binary forms, with or without
12430 + * modification, are permitted provided that the following conditions
12431 + * are met:
12432 + *
12433 + * 1. Redistributions of source code must retain the above copyright
12434 + *    notice, this list of conditions and the following disclaimer.
12435 + * 2. Redistributions in binary form must reproduce the above copyright
12436 + *    notice, this list of conditions and the following disclaimer in the
12437 + *    documentation and/or other materials provided with the distribution.
12438 + * 3. The name of the author may not be used to endorse or promote products
12439 + *    derived from this software without specific prior written permission.
12440 + *
12441 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12442 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12443 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12444 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12445 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12446 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12447 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12448 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12449 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12450 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12451 + */
12452 +
12453 +/*
12454 + * paired descriptor and associated crypto operation
12455 + */
12456 +struct desc_cryptop_pair {
12457 +       struct talitos_desc     cf_desc;        /* descriptor ptr */
12458 +       struct cryptop          *cf_crp;        /* cryptop ptr */
12459 +};
12460 +
12461 +/*
12462 + * Holds data specific to a single talitos device.
12463 + */
12464 +struct talitos_softc {
12465 +       softc_device_decl       sc_cdev;
12466 +       struct platform_device  *sc_dev;        /* device backpointer */
12467 +       ocf_iomem_t             sc_base_addr;
12468 +       int                     sc_irq;
12469 +       int                     sc_num;         /* if we have multiple chips */
12470 +       int32_t                 sc_cid;         /* crypto tag */
12471 +       u64                     sc_chiprev;     /* major/minor chip revision */
12472 +       int                     sc_nsessions;
12473 +       struct talitos_session  *sc_sessions;
12474 +       int                     sc_num_channels;/* number of crypto channels */
12475 +       int                     sc_chfifo_len;  /* channel fetch fifo len */
12476 +       int                     sc_exec_units;  /* execution units mask */
12477 +       int                     sc_desc_types;  /* descriptor types mask */
12478 +       /*
12479 +        * mutual exclusion for intra-channel resources, e.g. fetch fifos
12480 +        * the last entry is a meta-channel lock used by the channel scheduler
12481 +        */
12482 +       spinlock_t              *sc_chnfifolock;
12483 +       /* sc_chnlastalgo contains last algorithm for that channel */
12484 +       int                     *sc_chnlastalg;
12485 +       /* sc_chnfifo holds pending descriptor--crypto operation pairs */
12486 +       struct desc_cryptop_pair        **sc_chnfifo;
12487 +};
12488 +
12489 +struct talitos_session {
12490 +       u_int32_t       ses_used;
12491 +       u_int32_t       ses_klen;               /* key length in bits */
12492 +       u_int32_t       ses_key[8];             /* DES/3DES/AES key */
12493 +       u_int32_t       ses_hmac[5];            /* hmac inner state */
12494 +       u_int32_t       ses_hmac_len;           /* hmac length */
12495 +       u_int32_t       ses_iv[4];              /* DES/3DES/AES iv */
12496 +       u_int32_t       ses_mlen;               /* desired hash result len (12=ipsec or 16) */
12497 +};
12498 +
12499 +#define        TALITOS_SESSION(sid)    ((sid) & 0x0fffffff)
12500 +#define        TALITOS_SID(crd, sesn)  (((crd) << 28) | ((sesn) & 0x0fffffff))
12501 --- /dev/null
12502 +++ b/crypto/ocf/talitos/talitos_dev.h
12503 @@ -0,0 +1,277 @@
12504 +/*
12505 + * Freescale SEC (talitos) device dependent data structures
12506 + *
12507 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
12508 + *
12509 + * Redistribution and use in source and binary forms, with or without
12510 + * modification, are permitted provided that the following conditions
12511 + * are met:
12512 + *
12513 + * 1. Redistributions of source code must retain the above copyright
12514 + *    notice, this list of conditions and the following disclaimer.
12515 + * 2. Redistributions in binary form must reproduce the above copyright
12516 + *    notice, this list of conditions and the following disclaimer in the
12517 + *    documentation and/or other materials provided with the distribution.
12518 + * 3. The name of the author may not be used to endorse or promote products
12519 + *    derived from this software without specific prior written permission.
12520 + *
12521 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12522 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12523 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12524 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12525 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12526 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12527 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12528 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12529 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12530 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12531 + *
12532 + */
12533 +
12534 +/* device ID register values */
12535 +#define TALITOS_ID_SEC_2_0     0x40
12536 +#define TALITOS_ID_SEC_2_1     0x40 /* cross ref with IP block revision reg */
12537 +
12538 +/*
12539 + * following num_channels, channel-fifo-depth, exec-unit-mask, and 
12540 + * descriptor-types-mask are for forward-compatibility with openfirmware
12541 + * flat device trees
12542 + */
12543 +
12544 +/*
12545 + *  num_channels : the number of channels available in each SEC version.
12546 + */
12547 +
12548 +/* n.b. this driver requires these values be a power of 2 */
12549 +#define TALITOS_NCHANNELS_SEC_1_0      4
12550 +#define TALITOS_NCHANNELS_SEC_1_2      1
12551 +#define TALITOS_NCHANNELS_SEC_2_0      4
12552 +#define TALITOS_NCHANNELS_SEC_2_01     4
12553 +#define TALITOS_NCHANNELS_SEC_2_1      4
12554 +#define TALITOS_NCHANNELS_SEC_2_4      4
12555 +
12556 +/*
12557 + *  channel-fifo-depth : The number of descriptor
12558 + *  pointers a channel fetch fifo can hold.
12559 + */
12560 +#define TALITOS_CHFIFOLEN_SEC_1_0      1
12561 +#define TALITOS_CHFIFOLEN_SEC_1_2      1
12562 +#define TALITOS_CHFIFOLEN_SEC_2_0      24
12563 +#define TALITOS_CHFIFOLEN_SEC_2_01     24
12564 +#define TALITOS_CHFIFOLEN_SEC_2_1      24
12565 +#define TALITOS_CHFIFOLEN_SEC_2_4      24
12566 +
12567 +/* 
12568 + *  exec-unit-mask : The bitmask representing what Execution Units (EUs)
12569 + *  are available. EU information should be encoded following the SEC's 
12570 + *  EU_SEL0 bitfield documentation, i.e. as follows:
12571 + * 
12572 + *    bit 31 = set if SEC permits no-EU selection (should be always set)
12573 + *    bit 30 = set if SEC has the ARC4 EU (AFEU)
12574 + *    bit 29 = set if SEC has the des/3des EU (DEU)
12575 + *    bit 28 = set if SEC has the message digest EU (MDEU)
12576 + *    bit 27 = set if SEC has the random number generator EU (RNG)
12577 + *    bit 26 = set if SEC has the public key EU (PKEU)
12578 + *    bit 25 = set if SEC has the aes EU (AESU)
12579 + *    bit 24 = set if SEC has the Kasumi EU (KEU)
12580 + * 
12581 + */
12582 +#define TALITOS_HAS_EU_NONE            (1<<0)
12583 +#define TALITOS_HAS_EU_AFEU            (1<<1)
12584 +#define TALITOS_HAS_EU_DEU             (1<<2)
12585 +#define TALITOS_HAS_EU_MDEU            (1<<3)
12586 +#define TALITOS_HAS_EU_RNG             (1<<4)
12587 +#define TALITOS_HAS_EU_PKEU            (1<<5)
12588 +#define TALITOS_HAS_EU_AESU            (1<<6)
12589 +#define TALITOS_HAS_EU_KEU             (1<<7)
12590 +
12591 +/* the corresponding masks for each SEC version */
12592 +#define TALITOS_HAS_EUS_SEC_1_0                0x7f
12593 +#define TALITOS_HAS_EUS_SEC_1_2                0x4d
12594 +#define TALITOS_HAS_EUS_SEC_2_0                0x7f
12595 +#define TALITOS_HAS_EUS_SEC_2_01       0x7f
12596 +#define TALITOS_HAS_EUS_SEC_2_1                0xff
12597 +#define TALITOS_HAS_EUS_SEC_2_4                0x7f
12598 +
12599 +/*
12600 + *  descriptor-types-mask : The bitmask representing what descriptors
12601 + *  are available. Descriptor type information should be encoded 
12602 + *  following the SEC's Descriptor Header Dword DESC_TYPE field 
12603 + *  documentation, i.e. as follows:
12604 + *
12605 + *    bit 0  = set if SEC supports the aesu_ctr_nonsnoop desc. type
12606 + *    bit 1  = set if SEC supports the ipsec_esp descriptor type
12607 + *    bit 2  = set if SEC supports the common_nonsnoop desc. type
12608 + *    bit 3  = set if SEC supports the 802.11i AES ccmp desc. type
12609 + *    bit 4  = set if SEC supports the hmac_snoop_no_afeu desc. type
12610 + *    bit 5  = set if SEC supports the srtp descriptor type
12611 + *    bit 6  = set if SEC supports the non_hmac_snoop_no_afeu desc.type
12612 + *    bit 7  = set if SEC supports the pkeu_assemble descriptor type
12613 + *    bit 8  = set if SEC supports the aesu_key_expand_output desc.type
12614 + *    bit 9  = set if SEC supports the pkeu_ptmul descriptor type
12615 + *    bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
12616 + *    bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
12617 + *
12618 + *  ..and so on and so forth.
12619 + */
12620 +#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP       (1<<0)
12621 +#define TALITOS_HAS_DT_IPSEC_ESP               (1<<1)
12622 +#define TALITOS_HAS_DT_COMMON_NONSNOOP         (1<<2)
12623 +
12624 +/* the corresponding masks for each SEC version */
12625 +#define TALITOS_HAS_DESCTYPES_SEC_2_0  0x01010ebf
12626 +#define TALITOS_HAS_DESCTYPES_SEC_2_1  0x012b0ebf
12627 +
12628 +/* 
12629 + * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
12630 + */
12631 +
12632 +/* global register offset addresses */
12633 +#define TALITOS_ID             0x1020
12634 +#define TALITOS_ID_HI          0x1024
12635 +#define TALITOS_MCR            0x1030          /* master control register */
12636 +#define TALITOS_MCR_HI         0x1038          /* master control register */
12637 +#define TALITOS_MCR_SWR                0x1
12638 +#define TALITOS_IMR            0x1008          /* interrupt mask register */
12639 +#define TALITOS_IMR_ALL                0x00010fff      /* enable all interrupts mask */
12640 +#define TALITOS_IMR_ERRONLY    0x00010aaa      /* enable error interrupts */
12641 +#define TALITOS_IMR_HI         0x100C          /* interrupt mask register */
12642 +#define TALITOS_IMR_HI_ALL     0x00323333      /* enable all interrupts mask */
12643 +#define TALITOS_IMR_HI_ERRONLY 0x00222222      /* enable error interrupts */
12644 +#define TALITOS_ISR            0x1010          /* interrupt status register */
12645 +#define TALITOS_ISR_ERROR      0x00010faa      /* errors mask */
12646 +#define TALITOS_ISR_DONE       0x00000055      /* channel(s) done mask */
12647 +#define TALITOS_ISR_HI         0x1014          /* interrupt status register */
12648 +#define TALITOS_ICR            0x1018          /* interrupt clear register */
12649 +#define TALITOS_ICR_HI         0x101C          /* interrupt clear register */
12650 +
12651 +/* channel register address stride */
12652 +#define TALITOS_CH_OFFSET      0x100
12653 +
12654 +/* channel register offset addresses and bits */
12655 +#define TALITOS_CH_CCCR                0x1108  /* Crypto-Channel Config Register */
12656 +#define TALITOS_CH_CCCR_RESET  0x1     /* Channel Reset bit */
12657 +#define TALITOS_CH_CCCR_HI     0x110c  /* Crypto-Channel Config Register */
12658 +#define TALITOS_CH_CCCR_HI_CDWE        0x10    /* Channel done writeback enable bit */
12659 +#define TALITOS_CH_CCCR_HI_NT  0x4     /* Notification type bit */
12660 +#define TALITOS_CH_CCCR_HI_CDIE        0x2     /* Channel Done Interrupt Enable bit */
12661 +#define TALITOS_CH_CCPSR       0x1110  /* Crypto-Channel Pointer Status Reg */
12662 +#define TALITOS_CH_CCPSR_HI    0x1114  /* Crypto-Channel Pointer Status Reg */
12663 +#define TALITOS_CH_FF          0x1148  /* Fetch FIFO */
12664 +#define TALITOS_CH_FF_HI       0x114c  /* Fetch FIFO's FETCH_ADRS */
12665 +#define TALITOS_CH_CDPR                0x1140  /* Crypto-Channel Pointer Status Reg */
12666 +#define TALITOS_CH_CDPR_HI     0x1144  /* Crypto-Channel Pointer Status Reg */
12667 +#define TALITOS_CH_DESCBUF     0x1180  /* (thru 11bf) Crypto-Channel 
12668 +                                        * Descriptor Buffer (debug) */
12669 +
12670 +/* execution unit register offset addresses and bits */
12671 +#define TALITOS_DEUSR          0x2028  /* DEU status register */
12672 +#define TALITOS_DEUSR_HI       0x202c  /* DEU status register */
12673 +#define TALITOS_DEUISR         0x2030  /* DEU interrupt status register */
12674 +#define TALITOS_DEUISR_HI      0x2034  /* DEU interrupt status register */
12675 +#define TALITOS_DEUICR         0x2038  /* DEU interrupt control register */
12676 +#define TALITOS_DEUICR_HI      0x203c  /* DEU interrupt control register */
12677 +#define TALITOS_AESUISR                0x4030  /* AESU interrupt status register */
12678 +#define TALITOS_AESUISR_HI     0x4034  /* AESU interrupt status register */
12679 +#define TALITOS_AESUICR                0x4038  /* AESU interrupt control register */
12680 +#define TALITOS_AESUICR_HI     0x403c  /* AESU interrupt control register */
12681 +#define TALITOS_MDEUISR                0x6030  /* MDEU interrupt status register */
12682 +#define TALITOS_MDEUISR_HI     0x6034  /* MDEU interrupt status register */
12683 +#define TALITOS_RNGSR          0xa028  /* RNG status register */
12684 +#define TALITOS_RNGSR_HI       0xa02c  /* RNG status register */
12685 +#define TALITOS_RNGSR_HI_RD    0x1     /* RNG Reset done */
12686 +#define TALITOS_RNGSR_HI_OFL   0xff0000/* number of dwords in RNG output FIFO*/
12687 +#define TALITOS_RNGDSR         0xa010  /* RNG data size register */
12688 +#define TALITOS_RNGDSR_HI      0xa014  /* RNG data size register */
12689 +#define TALITOS_RNG_FIFO       0xa800  /* RNG FIFO - pool of random numbers */
12690 +#define TALITOS_RNGISR         0xa030  /* RNG Interrupt status register */
12691 +#define TALITOS_RNGISR_HI      0xa034  /* RNG Interrupt status register */
12692 +#define TALITOS_RNGRCR         0xa018  /* RNG Reset control register */
12693 +#define TALITOS_RNGRCR_HI      0xa01c  /* RNG Reset control register */
12694 +#define TALITOS_RNGRCR_HI_SR   0x1     /* RNG RNGRCR:Software Reset */
12695 +
12696 +/* descriptor pointer entry */
12697 +struct talitos_desc_ptr {
12698 +       u16     len;            /* length */
12699 +       u8      extent;         /* jump (to s/g link table) and extent */
12700 +       u8      res;            /* reserved */
12701 +       u32     ptr;            /* pointer */
12702 +};
12703 +
12704 +/* descriptor */
12705 +struct talitos_desc {
12706 +       u32     hdr;                            /* header */
12707 +       u32     res;                            /* reserved */
12708 +       struct talitos_desc_ptr         ptr[7]; /* ptr/len pair array */
12709 +};
12710 +
12711 +/* talitos descriptor header (hdr) bits */
12712 +
12713 +/* primary execution unit select */
12714 +#define        TALITOS_SEL0_AFEU       0x10000000
12715 +#define        TALITOS_SEL0_DEU        0x20000000
12716 +#define        TALITOS_SEL0_MDEU       0x30000000
12717 +#define        TALITOS_SEL0_RNG        0x40000000
12718 +#define        TALITOS_SEL0_PKEU       0x50000000
12719 +#define        TALITOS_SEL0_AESU       0x60000000
12720 +
12721 +/* primary execution unit mode (MODE0) and derivatives */
12722 +#define        TALITOS_MODE0_AESU_CBC          0x00200000
12723 +#define        TALITOS_MODE0_AESU_ENC          0x00100000
12724 +#define        TALITOS_MODE0_DEU_CBC           0x00400000
12725 +#define        TALITOS_MODE0_DEU_3DES          0x00200000
12726 +#define        TALITOS_MODE0_DEU_ENC           0x00100000
12727 +#define        TALITOS_MODE0_MDEU_INIT         0x01000000      /* init starting regs */
12728 +#define        TALITOS_MODE0_MDEU_HMAC         0x00800000
12729 +#define        TALITOS_MODE0_MDEU_PAD          0x00400000      /* PD */
12730 +#define        TALITOS_MODE0_MDEU_MD5          0x00200000
12731 +#define        TALITOS_MODE0_MDEU_SHA256       0x00100000
12732 +#define        TALITOS_MODE0_MDEU_SHA1         0x00000000      /* SHA-160 */
12733 +#define        TALITOS_MODE0_MDEU_MD5_HMAC     \
12734 +               (TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
12735 +#define        TALITOS_MODE0_MDEU_SHA256_HMAC  \
12736 +               (TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
12737 +#define        TALITOS_MODE0_MDEU_SHA1_HMAC    \
12738 +               (TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
12739 +
12740 +/* secondary execution unit select (SEL1) */
12741 +/* it's MDEU or nothing */
12742 +#define        TALITOS_SEL1_MDEU       0x00030000
12743 +
12744 +/* secondary execution unit mode (MODE1) and derivatives */
12745 +#define        TALITOS_MODE1_MDEU_INIT         0x00001000      /* init starting regs */
12746 +#define        TALITOS_MODE1_MDEU_HMAC         0x00000800
12747 +#define        TALITOS_MODE1_MDEU_PAD          0x00000400      /* PD */
12748 +#define        TALITOS_MODE1_MDEU_MD5          0x00000200
12749 +#define        TALITOS_MODE1_MDEU_SHA256       0x00000100
12750 +#define        TALITOS_MODE1_MDEU_SHA1         0x00000000      /* SHA-160 */
12751 +#define        TALITOS_MODE1_MDEU_MD5_HMAC     \
12752 +       (TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
12753 +#define        TALITOS_MODE1_MDEU_SHA256_HMAC  \
12754 +       (TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
12755 +#define        TALITOS_MODE1_MDEU_SHA1_HMAC    \
12756 +       (TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
12757 +
12758 +/* direction of overall data flow (DIR) */
12759 +#define        TALITOS_DIR_OUTBOUND    0x00000000
12760 +#define        TALITOS_DIR_INBOUND     0x00000002
12761 +
12762 +/* done notification (DN) */
12763 +#define        TALITOS_DONE_NOTIFY     0x00000001
12764 +
12765 +/* descriptor types */
12766 +/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
12767 +#define TD_TYPE_AESU_CTR_NONSNOOP      (0 << 3)
12768 +#define TD_TYPE_IPSEC_ESP              (1 << 3)
12769 +#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU        (2 << 3)
12770 +#define TD_TYPE_HMAC_SNOOP_NO_AFEU     (4 << 3)
12771 +
12772 +#define TALITOS_HDR_DONE_BITS  0xff000000
12773 +
12774 +#define        DPRINTF(a...)   do { \
12775 +                                               if (debug) { \
12776 +                                                       printk("%s: ", sc ? \
12777 +                                                               device_get_nameunit(sc->sc_cdev) : "talitos"); \
12778 +                                                       printk(a); \
12779 +                                               } \
12780 +                                       } while (0)
12781 --- /dev/null
12782 +++ b/crypto/ocf/random.c
12783 @@ -0,0 +1,317 @@
12784 +/*
12785 + * A system independant way of adding entropy to the kernels pool
12786 + * this way the drivers can focus on the real work and we can take
12787 + * care of pushing it to the appropriate place in the kernel.
12788 + *
12789 + * This should be fast and callable from timers/interrupts
12790 + *
12791 + * Written by David McCullough <david_mccullough@securecomputing.com>
12792 + * Copyright (C) 2006-2007 David McCullough
12793 + * Copyright (C) 2004-2005 Intel Corporation.
12794 + *
12795 + * LICENSE TERMS
12796 + *
12797 + * The free distribution and use of this software in both source and binary
12798 + * form is allowed (with or without changes) provided that:
12799 + *
12800 + *   1. distributions of this source code include the above copyright
12801 + *      notice, this list of conditions and the following disclaimer;
12802 + *
12803 + *   2. distributions in binary form include the above copyright
12804 + *      notice, this list of conditions and the following disclaimer
12805 + *      in the documentation and/or other associated materials;
12806 + *
12807 + *   3. the copyright holder's name is not used to endorse products
12808 + *      built using this software without specific written permission.
12809 + *
12810 + * ALTERNATIVELY, provided that this notice is retained in full, this product
12811 + * may be distributed under the terms of the GNU General Public License (GPL),
12812 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
12813 + *
12814 + * DISCLAIMER
12815 + *
12816 + * This software is provided 'as is' with no explicit or implied warranties
12817 + * in respect of its properties, including, but not limited to, correctness
12818 + * and/or fitness for purpose.
12819 + */
12820 +
12821 +#ifndef AUTOCONF_INCLUDED
12822 +#include <linux/config.h>
12823 +#endif
12824 +#include <linux/module.h>
12825 +#include <linux/init.h>
12826 +#include <linux/list.h>
12827 +#include <linux/slab.h>
12828 +#include <linux/wait.h>
12829 +#include <linux/sched.h>
12830 +#include <linux/spinlock.h>
12831 +#include <linux/version.h>
12832 +#include <linux/unistd.h>
12833 +#include <linux/poll.h>
12834 +#include <linux/random.h>
12835 +#include <cryptodev.h>
12836 +
12837 +#ifdef CONFIG_OCF_FIPS
12838 +#include "rndtest.h"
12839 +#endif
12840 +
12841 +#ifndef HAS_RANDOM_INPUT_WAIT
12842 +#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
12843 +#endif
12844 +
12845 +/*
12846 + * a hack to access the debug levels from the crypto driver
12847 + */
12848 +extern int crypto_debug;
12849 +#define debug crypto_debug
12850 +
12851 +/*
12852 + * a list of all registered random providers
12853 + */
12854 +static LIST_HEAD(random_ops);
12855 +static int started = 0;
12856 +static int initted = 0;
12857 +
12858 +struct random_op {
12859 +       struct list_head random_list;
12860 +       u_int32_t driverid;
12861 +       int (*read_random)(void *arg, u_int32_t *buf, int len);
12862 +       void *arg;
12863 +};
12864 +
12865 +static int random_proc(void *arg);
12866 +
12867 +static pid_t           randomproc = (pid_t) -1;
12868 +static spinlock_t      random_lock;
12869 +
12870 +/*
12871 + * just init the spin locks
12872 + */
12873 +static int
12874 +crypto_random_init(void)
12875 +{
12876 +       spin_lock_init(&random_lock);
12877 +       initted = 1;
12878 +       return(0);
12879 +}
12880 +
12881 +/*
12882 + * Add the given random reader to our list (if not present)
12883 + * and start the thread (if not already started)
12884 + *
12885 + * we have to assume that driver id is ok for now
12886 + */
12887 +int
12888 +crypto_rregister(
12889 +       u_int32_t driverid,
12890 +       int (*read_random)(void *arg, u_int32_t *buf, int len),
12891 +       void *arg)
12892 +{
12893 +       unsigned long flags;
12894 +       int ret = 0;
12895 +       struct random_op        *rops, *tmp;
12896 +
12897 +       dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
12898 +                       __FUNCTION__, driverid, read_random, arg);
12899 +
12900 +       if (!initted)
12901 +               crypto_random_init();
12902 +
12903 +#if 0
12904 +       struct cryptocap        *cap;
12905 +
12906 +       cap = crypto_checkdriver(driverid);
12907 +       if (!cap)
12908 +               return EINVAL;
12909 +#endif
12910 +
12911 +       list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12912 +               if (rops->driverid == driverid && rops->read_random == read_random)
12913 +                       return EEXIST;
12914 +       }
12915 +
12916 +       rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
12917 +       if (!rops)
12918 +               return ENOMEM;
12919 +
12920 +       rops->driverid    = driverid;
12921 +       rops->read_random = read_random;
12922 +       rops->arg = arg;
12923 +
12924 +       spin_lock_irqsave(&random_lock, flags);
12925 +       list_add_tail(&rops->random_list, &random_ops);
12926 +       if (!started) {
12927 +               randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
12928 +               if (randomproc < 0) {
12929 +                       ret = randomproc;
12930 +                       printk("crypto: crypto_rregister cannot start random thread; "
12931 +                                       "error %d", ret);
12932 +               } else
12933 +                       started = 1;
12934 +       }
12935 +       spin_unlock_irqrestore(&random_lock, flags);
12936 +
12937 +       return ret;
12938 +}
12939 +EXPORT_SYMBOL(crypto_rregister);
12940 +
12941 +int
12942 +crypto_runregister_all(u_int32_t driverid)
12943 +{
12944 +       struct random_op *rops, *tmp;
12945 +       unsigned long flags;
12946 +
12947 +       dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
12948 +
12949 +       list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12950 +               if (rops->driverid == driverid) {
12951 +                       list_del(&rops->random_list);
12952 +                       kfree(rops);
12953 +               }
12954 +       }
12955 +
12956 +       spin_lock_irqsave(&random_lock, flags);
12957 +       if (list_empty(&random_ops) && started)
12958 +               kill_proc(randomproc, SIGKILL, 1);
12959 +       spin_unlock_irqrestore(&random_lock, flags);
12960 +       return(0);
12961 +}
12962 +EXPORT_SYMBOL(crypto_runregister_all);
12963 +
12964 +/*
12965 + * while we can add entropy to random.c continue to read random data from
12966 + * the drivers and push it to random.
12967 + */
12968 +static int
12969 +random_proc(void *arg)
12970 +{
12971 +       int n;
12972 +       int wantcnt;
12973 +       int bufcnt = 0;
12974 +       int retval = 0;
12975 +       int *buf = NULL;
12976 +
12977 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
12978 +       daemonize();
12979 +       spin_lock_irq(&current->sigmask_lock);
12980 +       sigemptyset(&current->blocked);
12981 +       recalc_sigpending(current);
12982 +       spin_unlock_irq(&current->sigmask_lock);
12983 +       sprintf(current->comm, "ocf-random");
12984 +#else
12985 +       daemonize("ocf-random");
12986 +       allow_signal(SIGKILL);
12987 +#endif
12988 +
12989 +       (void) get_fs();
12990 +       set_fs(get_ds());
12991 +
12992 +#ifdef CONFIG_OCF_FIPS
12993 +#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
12994 +#else
12995 +#define NUM_INT 32
12996 +#endif
12997 +
12998 +       /*
12999 +        * some devices can transferr their RNG data direct into memory,
13000 +        * so make sure it is device friendly
13001 +        */
13002 +       buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
13003 +       if (NULL == buf) {
13004 +               printk("crypto: RNG could not allocate memory\n");
13005 +               retval = -ENOMEM;
13006 +               goto bad_alloc;
13007 +       }
13008 +
13009 +       wantcnt = NUM_INT;   /* start by adding some entropy */
13010 +
13011 +       /*
13012 +        * its possible due to errors or driver removal that we no longer
13013 +        * have anything to do,  if so exit or we will consume all the CPU
13014 +        * doing nothing
13015 +        */
13016 +       while (!list_empty(&random_ops)) {
13017 +               struct random_op        *rops, *tmp;
13018 +
13019 +#ifdef CONFIG_OCF_FIPS
13020 +               if (wantcnt)
13021 +                       wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
13022 +#endif
13023 +
13024 +               /* see if we can get enough entropy to make the world
13025 +                * a better place.
13026 +                */
13027 +               while (bufcnt < wantcnt && bufcnt < NUM_INT) {
13028 +                       list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
13029 +
13030 +                               n = (*rops->read_random)(rops->arg, &buf[bufcnt],
13031 +                                                        NUM_INT - bufcnt);
13032 +
13033 +                               /* on failure remove the random number generator */
13034 +                               if (n == -1) {
13035 +                                       list_del(&rops->random_list);
13036 +                                       printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
13037 +                                                       rops->driverid);
13038 +                                       kfree(rops);
13039 +                               } else if (n > 0)
13040 +                                       bufcnt += n;
13041 +                       }
13042 +                       /* give up CPU for a bit, just in case as this is a loop */
13043 +                       schedule();
13044 +               }
13045 +
13046 +
13047 +#ifdef CONFIG_OCF_FIPS
13048 +               if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
13049 +                       dprintk("crypto: buffer had fips errors, discarding\n");
13050 +                       bufcnt = 0;
13051 +               }
13052 +#endif
13053 +
13054 +               /*
13055 +                * if we have a certified buffer,  we can send some data
13056 +                * to /dev/random and move along
13057 +                */
13058 +               if (bufcnt > 0) {
13059 +                       /* add what we have */
13060 +                       random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
13061 +                       bufcnt = 0;
13062 +               }
13063 +
13064 +               /* give up CPU for a bit so we don't hog while filling */
13065 +               schedule();
13066 +
13067 +               /* wait for needing more */
13068 +               wantcnt = random_input_wait();
13069 +
13070 +               if (wantcnt <= 0)
13071 +                       wantcnt = 0; /* try to get some info again */
13072 +               else
13073 +                       /* round up to one word or we can loop forever */
13074 +                       wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
13075 +               if (wantcnt > NUM_INT) {
13076 +                       wantcnt = NUM_INT;
13077 +               }
13078 +
13079 +               if (signal_pending(current)) {
13080 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13081 +                       spin_lock_irq(&current->sigmask_lock);
13082 +#endif
13083 +                       flush_signals(current);
13084 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13085 +                       spin_unlock_irq(&current->sigmask_lock);
13086 +#endif
13087 +               }
13088 +       }
13089 +       
13090 +       kfree(buf);
13091 +
13092 +bad_alloc:
13093 +       spin_lock_irq(&random_lock);
13094 +       randomproc = (pid_t) -1;
13095 +       started = 0;
13096 +       spin_unlock_irq(&random_lock);
13097 +
13098 +       return retval;
13099 +}
13100 +
13101 --- /dev/null
13102 +++ b/crypto/ocf/ocf-bench.c
13103 @@ -0,0 +1,436 @@
13104 +/*
13105 + * A loadable module that benchmarks the OCF crypto speed from kernel space.
13106 + *
13107 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
13108 + *
13109 + * LICENSE TERMS
13110 + *
13111 + * The free distribution and use of this software in both source and binary
13112 + * form is allowed (with or without changes) provided that:
13113 + *
13114 + *   1. distributions of this source code include the above copyright
13115 + *      notice, this list of conditions and the following disclaimer;
13116 + *
13117 + *   2. distributions in binary form include the above copyright
13118 + *      notice, this list of conditions and the following disclaimer
13119 + *      in the documentation and/or other associated materials;
13120 + *
13121 + *   3. the copyright holder's name is not used to endorse products
13122 + *      built using this software without specific written permission.
13123 + *
13124 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13125 + * may be distributed under the terms of the GNU General Public License (GPL),
13126 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13127 + *
13128 + * DISCLAIMER
13129 + *
13130 + * This software is provided 'as is' with no explicit or implied warranties
13131 + * in respect of its properties, including, but not limited to, correctness
13132 + * and/or fitness for purpose.
13133 + */
13134 +
13135 +
13136 +#ifndef AUTOCONF_INCLUDED
13137 +#include <linux/config.h>
13138 +#endif
13139 +#include <linux/module.h>
13140 +#include <linux/init.h>
13141 +#include <linux/list.h>
13142 +#include <linux/slab.h>
13143 +#include <linux/wait.h>
13144 +#include <linux/sched.h>
13145 +#include <linux/spinlock.h>
13146 +#include <linux/version.h>
13147 +#include <linux/interrupt.h>
13148 +#include <cryptodev.h>
13149 +
13150 +#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
13151 +#define BENCH_IXP_ACCESS_LIB 1
13152 +#endif
13153 +#ifdef BENCH_IXP_ACCESS_LIB
13154 +#include <IxTypes.h>
13155 +#include <IxOsBuffMgt.h>
13156 +#include <IxNpeDl.h>
13157 +#include <IxCryptoAcc.h>
13158 +#include <IxQMgr.h>
13159 +#include <IxOsServices.h>
13160 +#include <IxOsCacheMMU.h>
13161 +#endif
13162 +
13163 +/*
13164 + * support for access lib version 1.4
13165 + */
13166 +#ifndef IX_MBUF_PRIV
13167 +#define IX_MBUF_PRIV(x) ((x)->priv)
13168 +#endif
13169 +
13170 +/*
13171 + * the number of simultaneously active requests
13172 + */
13173 +static int request_q_len = 20;
13174 +module_param(request_q_len, int, 0);
13175 +MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
13176 +/*
13177 + * how many requests we want to have processed
13178 + */
13179 +static int request_num = 1024;
13180 +module_param(request_num, int, 0);
13181 +MODULE_PARM_DESC(request_num, "run for at least this many requests");
13182 +/*
13183 + * the size of each request
13184 + */
13185 +static int request_size = 1500;
13186 +module_param(request_size, int, 0);
13187 +MODULE_PARM_DESC(request_size, "size of each request");
13188 +
13189 +/*
13190 + * a structure for each request
13191 + */
13192 +typedef struct  {
13193 +       struct work_struct work;
13194 +#ifdef BENCH_IXP_ACCESS_LIB
13195 +       IX_MBUF mbuf;
13196 +#endif
13197 +       unsigned char *buffer;
13198 +} request_t;
13199 +
13200 +static request_t *requests;
13201 +
13202 +static int outstanding;
13203 +static int total;
13204 +
13205 +/*************************************************************************/
13206 +/*
13207 + * OCF benchmark routines
13208 + */
13209 +
13210 +static uint64_t ocf_cryptoid;
13211 +static int ocf_init(void);
13212 +static int ocf_cb(struct cryptop *crp);
13213 +static void ocf_request(void *arg);
13214 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13215 +static void ocf_request_wq(struct work_struct *work);
13216 +#endif
13217 +
13218 +static int
13219 +ocf_init(void)
13220 +{
13221 +       int error;
13222 +       struct cryptoini crie, cria;
13223 +       struct cryptodesc crda, crde;
13224 +
13225 +       memset(&crie, 0, sizeof(crie));
13226 +       memset(&cria, 0, sizeof(cria));
13227 +       memset(&crde, 0, sizeof(crde));
13228 +       memset(&crda, 0, sizeof(crda));
13229 +
13230 +       cria.cri_alg  = CRYPTO_SHA1_HMAC;
13231 +       cria.cri_klen = 20 * 8;
13232 +       cria.cri_key  = "0123456789abcdefghij";
13233 +
13234 +       crie.cri_alg  = CRYPTO_3DES_CBC;
13235 +       crie.cri_klen = 24 * 8;
13236 +       crie.cri_key  = "0123456789abcdefghijklmn";
13237 +
13238 +       crie.cri_next = &cria;
13239 +
13240 +       error = crypto_newsession(&ocf_cryptoid, &crie, 0);
13241 +       if (error) {
13242 +               printk("crypto_newsession failed %d\n", error);
13243 +               return -1;
13244 +       }
13245 +       return 0;
13246 +}
13247 +
13248 +static int
13249 +ocf_cb(struct cryptop *crp)
13250 +{
13251 +       request_t *r = (request_t *) crp->crp_opaque;
13252 +
13253 +       if (crp->crp_etype)
13254 +               printk("Error in OCF processing: %d\n", crp->crp_etype);
13255 +       total++;
13256 +       crypto_freereq(crp);
13257 +       crp = NULL;
13258 +
13259 +       if (total > request_num) {
13260 +               outstanding--;
13261 +               return 0;
13262 +       }
13263 +
13264 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13265 +       INIT_WORK(&r->work, ocf_request_wq);
13266 +#else
13267 +       INIT_WORK(&r->work, ocf_request, r);
13268 +#endif
13269 +       schedule_work(&r->work);
13270 +       return 0;
13271 +}
13272 +
13273 +
13274 +static void
13275 +ocf_request(void *arg)
13276 +{
13277 +       request_t *r = arg;
13278 +       struct cryptop *crp = crypto_getreq(2);
13279 +       struct cryptodesc *crde, *crda;
13280 +
13281 +       if (!crp) {
13282 +               outstanding--;
13283 +               return;
13284 +       }
13285 +
13286 +       crde = crp->crp_desc;
13287 +       crda = crde->crd_next;
13288 +
13289 +       crda->crd_skip = 0;
13290 +       crda->crd_flags = 0;
13291 +       crda->crd_len = request_size;
13292 +       crda->crd_inject = request_size;
13293 +       crda->crd_alg = CRYPTO_SHA1_HMAC;
13294 +       crda->crd_key = "0123456789abcdefghij";
13295 +       crda->crd_klen = 20 * 8;
13296 +
13297 +       crde->crd_skip = 0;
13298 +       crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
13299 +       crde->crd_len = request_size;
13300 +       crde->crd_inject = request_size;
13301 +       crde->crd_alg = CRYPTO_3DES_CBC;
13302 +       crde->crd_key = "0123456789abcdefghijklmn";
13303 +       crde->crd_klen = 24 * 8;
13304 +
13305 +       crp->crp_ilen = request_size + 64;
13306 +       crp->crp_flags = CRYPTO_F_CBIMM;
13307 +       crp->crp_buf = (caddr_t) r->buffer;
13308 +       crp->crp_callback = ocf_cb;
13309 +       crp->crp_sid = ocf_cryptoid;
13310 +       crp->crp_opaque = (caddr_t) r;
13311 +       crypto_dispatch(crp);
13312 +}
13313 +
13314 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13315 +static void
13316 +ocf_request_wq(struct work_struct *work)
13317 +{
13318 +       request_t *r = container_of(work, request_t, work);
13319 +       ocf_request(r);
13320 +}
13321 +#endif
13322 +
13323 +/*************************************************************************/
13324 +#ifdef BENCH_IXP_ACCESS_LIB
13325 +/*************************************************************************/
13326 +/*
13327 + * CryptoAcc benchmark routines
13328 + */
13329 +
13330 +static IxCryptoAccCtx ixp_ctx;
13331 +static UINT32 ixp_ctx_id;
13332 +static IX_MBUF ixp_pri;
13333 +static IX_MBUF ixp_sec;
13334 +static int ixp_registered = 0;
13335 +
13336 +static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
13337 +                                       IxCryptoAccStatus status);
13338 +static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
13339 +                                       IxCryptoAccStatus status);
13340 +static void ixp_request(void *arg);
13341 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13342 +static void ixp_request_wq(struct work_struct *work);
13343 +#endif
13344 +
13345 +static int
13346 +ixp_init(void)
13347 +{
13348 +       IxCryptoAccStatus status;
13349 +
13350 +       ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13351 +       ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13352 +       ixp_ctx.cipherCtx.cipherKeyLen = 24;
13353 +       ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13354 +       ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
13355 +       memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
13356 +
13357 +       ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13358 +       ixp_ctx.authCtx.authDigestLen = 12;
13359 +       ixp_ctx.authCtx.aadLen = 0;
13360 +       ixp_ctx.authCtx.authKeyLen = 20;
13361 +       memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
13362 +
13363 +       ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13364 +       ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
13365 +
13366 +       IX_MBUF_MLEN(&ixp_pri)  = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
13367 +       IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13368 +       IX_MBUF_MLEN(&ixp_sec)  = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
13369 +       IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13370 +
13371 +       status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
13372 +                       ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
13373 +
13374 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
13375 +               while (!ixp_registered)
13376 +                       schedule();
13377 +               return ixp_registered < 0 ? -1 : 0;
13378 +       }
13379 +
13380 +       printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
13381 +       return -1;
13382 +}
13383 +
13384 +static void
13385 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
13386 +{
13387 +       if (bufp) {
13388 +               IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
13389 +               kfree(IX_MBUF_MDATA(bufp));
13390 +               IX_MBUF_MDATA(bufp) = NULL;
13391 +       }
13392 +
13393 +       if (IX_CRYPTO_ACC_STATUS_WAIT == status)
13394 +               return;
13395 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
13396 +               ixp_registered = 1;
13397 +       else
13398 +               ixp_registered = -1;
13399 +}
13400 +
13401 +static void
13402 +ixp_perform_cb(
13403 +       UINT32 ctx_id,
13404 +       IX_MBUF *sbufp,
13405 +       IX_MBUF *dbufp,
13406 +       IxCryptoAccStatus status)
13407 +{
13408 +       request_t *r = NULL;
13409 +
13410 +       total++;
13411 +       if (total > request_num) {
13412 +               outstanding--;
13413 +               return;
13414 +       }
13415 +
13416 +       if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
13417 +               printk("crappo %p %p\n", sbufp, r);
13418 +               outstanding--;
13419 +               return;
13420 +       }
13421 +
13422 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13423 +       INIT_WORK(&r->work, ixp_request_wq);
13424 +#else
13425 +       INIT_WORK(&r->work, ixp_request, r);
13426 +#endif
13427 +       schedule_work(&r->work);
13428 +}
13429 +
13430 +static void
13431 +ixp_request(void *arg)
13432 +{
13433 +       request_t *r = arg;
13434 +       IxCryptoAccStatus status;
13435 +
13436 +       memset(&r->mbuf, 0, sizeof(r->mbuf));
13437 +       IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
13438 +       IX_MBUF_MDATA(&r->mbuf) = r->buffer;
13439 +       IX_MBUF_PRIV(&r->mbuf) = r;
13440 +       status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
13441 +                       0, request_size, 0, request_size, request_size, r->buffer);
13442 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
13443 +               printk("status1 = %d\n", status);
13444 +               outstanding--;
13445 +               return;
13446 +       }
13447 +       return;
13448 +}
13449 +
13450 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13451 +static void
13452 +ixp_request_wq(struct work_struct *work)
13453 +{
13454 +       request_t *r = container_of(work, request_t, work);
13455 +       ixp_request(r);
13456 +}
13457 +#endif
13458 +
13459 +/*************************************************************************/
13460 +#endif /* BENCH_IXP_ACCESS_LIB */
13461 +/*************************************************************************/
13462 +
13463 +int
13464 +ocfbench_init(void)
13465 +{
13466 +       int i, jstart, jstop;
13467 +
13468 +       printk("Crypto Speed tests\n");
13469 +
13470 +       requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
13471 +       if (!requests) {
13472 +               printk("malloc failed\n");
13473 +               return -EINVAL;
13474 +       }
13475 +
13476 +       for (i = 0; i < request_q_len; i++) {
13477 +               /* +64 for return data */
13478 +               requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
13479 +               if (!requests[i].buffer) {
13480 +                       printk("malloc failed\n");
13481 +                       return -EINVAL;
13482 +               }
13483 +               memset(requests[i].buffer, '0' + i, request_size + 128);
13484 +       }
13485 +
13486 +       /*
13487 +        * OCF benchmark
13488 +        */
13489 +       printk("OCF: testing ...\n");
13490 +       ocf_init();
13491 +       total = outstanding = 0;
13492 +       jstart = jiffies;
13493 +       for (i = 0; i < request_q_len; i++) {
13494 +               outstanding++;
13495 +               ocf_request(&requests[i]);
13496 +       }
13497 +       while (outstanding > 0)
13498 +               schedule();
13499 +       jstop = jiffies;
13500 +
13501 +       printk("OCF: %d requests of %d bytes in %d jiffies\n", total, request_size,
13502 +                       jstop - jstart);
13503 +
13504 +#ifdef BENCH_IXP_ACCESS_LIB
13505 +       /*
13506 +        * IXP benchmark
13507 +        */
13508 +       printk("IXP: testing ...\n");
13509 +       ixp_init();
13510 +       total = outstanding = 0;
13511 +       jstart = jiffies;
13512 +       for (i = 0; i < request_q_len; i++) {
13513 +               outstanding++;
13514 +               ixp_request(&requests[i]);
13515 +       }
13516 +       while (outstanding > 0)
13517 +               schedule();
13518 +       jstop = jiffies;
13519 +
13520 +       printk("IXP: %d requests of %d bytes in %d jiffies\n", total, request_size,
13521 +                       jstop - jstart);
13522 +#endif /* BENCH_IXP_ACCESS_LIB */
13523 +
13524 +       for (i = 0; i < request_q_len; i++)
13525 +               kfree(requests[i].buffer);
13526 +       kfree(requests);
13527 +       return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
13528 +}
13529 +
13530 +static void __exit ocfbench_exit(void)
13531 +{
13532 +}
13533 +
13534 +module_init(ocfbench_init);
13535 +module_exit(ocfbench_exit);
13536 +
13537 +MODULE_LICENSE("BSD");
13538 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
13539 +MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
13540 --- /dev/null
13541 +++ b/crypto/ocf/ixp4xx/ixp4xx.c
13542 @@ -0,0 +1,1328 @@
13543 +/*
13544 + * An OCF module that uses Intels IXP CryptACC API to do the crypto.
13545 + * This driver requires the IXP400 Access Library that is available
13546 + * from Intel in order to operate (or compile).
13547 + *
13548 + * Written by David McCullough <david_mccullough@securecomputing.com>
13549 + * Copyright (C) 2006-2007 David McCullough
13550 + * Copyright (C) 2004-2005 Intel Corporation.
13551 + *
13552 + * LICENSE TERMS
13553 + *
13554 + * The free distribution and use of this software in both source and binary
13555 + * form is allowed (with or without changes) provided that:
13556 + *
13557 + *   1. distributions of this source code include the above copyright
13558 + *      notice, this list of conditions and the following disclaimer;
13559 + *
13560 + *   2. distributions in binary form include the above copyright
13561 + *      notice, this list of conditions and the following disclaimer
13562 + *      in the documentation and/or other associated materials;
13563 + *
13564 + *   3. the copyright holder's name is not used to endorse products
13565 + *      built using this software without specific written permission.
13566 + *
13567 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13568 + * may be distributed under the terms of the GNU General Public License (GPL),
13569 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13570 + *
13571 + * DISCLAIMER
13572 + *
13573 + * This software is provided 'as is' with no explicit or implied warranties
13574 + * in respect of its properties, including, but not limited to, correctness
13575 + * and/or fitness for purpose.
13576 + */
13577 +
13578 +#ifndef AUTOCONF_INCLUDED
13579 +#include <linux/config.h>
13580 +#endif
13581 +#include <linux/module.h>
13582 +#include <linux/init.h>
13583 +#include <linux/list.h>
13584 +#include <linux/slab.h>
13585 +#include <linux/sched.h>
13586 +#include <linux/wait.h>
13587 +#include <linux/crypto.h>
13588 +#include <linux/interrupt.h>
13589 +#include <asm/scatterlist.h>
13590 +
13591 +#include <IxTypes.h>
13592 +#include <IxOsBuffMgt.h>
13593 +#include <IxNpeDl.h>
13594 +#include <IxCryptoAcc.h>
13595 +#include <IxQMgr.h>
13596 +#include <IxOsServices.h>
13597 +#include <IxOsCacheMMU.h>
13598 +
13599 +#include <cryptodev.h>
13600 +#include <uio.h>
13601 +
13602 +#ifndef IX_MBUF_PRIV
13603 +#define IX_MBUF_PRIV(x) ((x)->priv)
13604 +#endif
13605 +
13606 +struct ixp_data;
13607 +
13608 +struct ixp_q {
13609 +       struct list_head         ixp_q_list;
13610 +       struct ixp_data         *ixp_q_data;
13611 +       struct cryptop          *ixp_q_crp;
13612 +       struct cryptodesc       *ixp_q_ccrd;
13613 +       struct cryptodesc       *ixp_q_acrd;
13614 +       IX_MBUF                          ixp_q_mbuf;
13615 +       UINT8                           *ixp_hash_dest; /* Location for hash in client buffer */
13616 +       UINT8                           *ixp_hash_src; /* Location of hash in internal buffer */
13617 +       unsigned char            ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
13618 +       unsigned char           *ixp_q_iv;
13619 +};
13620 +
13621 +struct ixp_data {
13622 +       int                                      ixp_registered;        /* is the context registered */
13623 +       int                                      ixp_crd_flags;         /* detect direction changes */
13624 +
13625 +       int                                      ixp_cipher_alg;
13626 +       int                                      ixp_auth_alg;
13627 +
13628 +       UINT32                           ixp_ctx_id;
13629 +       UINT32                           ixp_hash_key_id;       /* used when hashing */
13630 +       IxCryptoAccCtx           ixp_ctx;
13631 +       IX_MBUF                          ixp_pri_mbuf;
13632 +       IX_MBUF                          ixp_sec_mbuf;
13633 +
13634 +       struct work_struct   ixp_pending_work;
13635 +       struct work_struct   ixp_registration_work;
13636 +       struct list_head         ixp_q;                         /* unprocessed requests */
13637 +};
13638 +
13639 +#ifdef __ixp46X
13640 +
13641 +#define        MAX_IOP_SIZE    64      /* words */
13642 +#define        MAX_OOP_SIZE    128
13643 +
13644 +#define        MAX_PARAMS              3
13645 +
13646 +struct ixp_pkq {
13647 +       struct list_head                         pkq_list;
13648 +       struct cryptkop                         *pkq_krp;
13649 +
13650 +       IxCryptoAccPkeEauInOperands      pkq_op;
13651 +       IxCryptoAccPkeEauOpResult        pkq_result;
13652 +
13653 +       UINT32                                           pkq_ibuf0[MAX_IOP_SIZE];
13654 +       UINT32                                           pkq_ibuf1[MAX_IOP_SIZE];
13655 +       UINT32                                           pkq_ibuf2[MAX_IOP_SIZE];
13656 +       UINT32                                           pkq_obuf[MAX_OOP_SIZE];
13657 +};
13658 +
13659 +static LIST_HEAD(ixp_pkq); /* current PK wait list */
13660 +static struct ixp_pkq *ixp_pk_cur;
13661 +static spinlock_t ixp_pkq_lock;
13662 +
13663 +#endif /* __ixp46X */
13664 +
13665 +static int ixp_blocked = 0;
13666 +
13667 +static int32_t                  ixp_id = -1;
13668 +static struct ixp_data **ixp_sessions = NULL;
13669 +static u_int32_t                ixp_sesnum = 0;
13670 +
13671 +static int ixp_process(device_t, struct cryptop *, int);
13672 +static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
13673 +static int ixp_freesession(device_t, u_int64_t);
13674 +#ifdef __ixp46X
13675 +static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
13676 +#endif
13677 +
13678 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
13679 +static kmem_cache_t *qcache;
13680 +#else
13681 +static struct kmem_cache *qcache;
13682 +#endif
13683 +
13684 +#define debug ixp_debug
13685 +static int ixp_debug = 0;
13686 +module_param(ixp_debug, int, 0644);
13687 +MODULE_PARM_DESC(ixp_debug, "Enable debug");
13688 +
13689 +static int ixp_init_crypto = 1;
13690 +module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
13691 +MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
13692 +
13693 +static void ixp_process_pending(void *arg);
13694 +static void ixp_registration(void *arg);
13695 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13696 +static void ixp_process_pending_wq(struct work_struct *work);
13697 +static void ixp_registration_wq(struct work_struct *work);
13698 +#endif
13699 +
13700 +/*
13701 + * dummy device structure
13702 + */
13703 +
13704 +static struct {
13705 +       softc_device_decl       sc_dev;
13706 +} ixpdev;
13707 +
13708 +static device_method_t ixp_methods = {
13709 +       /* crypto device methods */
13710 +       DEVMETHOD(cryptodev_newsession, ixp_newsession),
13711 +       DEVMETHOD(cryptodev_freesession,ixp_freesession),
13712 +       DEVMETHOD(cryptodev_process,    ixp_process),
13713 +#ifdef __ixp46X
13714 +       DEVMETHOD(cryptodev_kprocess,   ixp_kprocess),
13715 +#endif
13716 +};
13717 +
13718 +/*
13719 + * Generate a new software session.
13720 + */
13721 +static int
13722 +ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
13723 +{
13724 +       struct ixp_data *ixp;
13725 +       u_int32_t i;
13726 +#define AUTH_LEN(cri, def) \
13727 +       (cri->cri_mlen ? cri->cri_mlen : (def))
13728 +
13729 +       dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
13730 +       if (sid == NULL || cri == NULL) {
13731 +               dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
13732 +               return EINVAL;
13733 +       }
13734 +
13735 +       if (ixp_sessions) {
13736 +               for (i = 1; i < ixp_sesnum; i++)
13737 +                       if (ixp_sessions[i] == NULL)
13738 +                               break;
13739 +       } else
13740 +               i = 1;          /* NB: to silence compiler warning */
13741 +
13742 +       if (ixp_sessions == NULL || i == ixp_sesnum) {
13743 +               struct ixp_data **ixpd;
13744 +
13745 +               if (ixp_sessions == NULL) {
13746 +                       i = 1; /* We leave ixp_sessions[0] empty */
13747 +                       ixp_sesnum = CRYPTO_SW_SESSIONS;
13748 +               } else
13749 +                       ixp_sesnum *= 2;
13750 +
13751 +               ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
13752 +               if (ixpd == NULL) {
13753 +                       /* Reset session number */
13754 +                       if (ixp_sesnum == CRYPTO_SW_SESSIONS)
13755 +                               ixp_sesnum = 0;
13756 +                       else
13757 +                               ixp_sesnum /= 2;
13758 +                       dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
13759 +                       return ENOBUFS;
13760 +               }
13761 +               memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
13762 +
13763 +               /* Copy existing sessions */
13764 +               if (ixp_sessions) {
13765 +                       memcpy(ixpd, ixp_sessions,
13766 +                           (ixp_sesnum / 2) * sizeof(struct ixp_data *));
13767 +                       kfree(ixp_sessions);
13768 +               }
13769 +
13770 +               ixp_sessions = ixpd;
13771 +       }
13772 +
13773 +       ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
13774 +                       SLAB_ATOMIC);
13775 +       if (ixp_sessions[i] == NULL) {
13776 +               ixp_freesession(NULL, i);
13777 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13778 +               return ENOBUFS;
13779 +       }
13780 +
13781 +       *sid = i;
13782 +
13783 +       ixp = ixp_sessions[i];
13784 +       memset(ixp, 0, sizeof(*ixp));
13785 +
13786 +       ixp->ixp_cipher_alg = -1;
13787 +       ixp->ixp_auth_alg = -1;
13788 +       ixp->ixp_ctx_id = -1;
13789 +       INIT_LIST_HEAD(&ixp->ixp_q);
13790 +
13791 +       ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13792 +
13793 +       while (cri) {
13794 +               switch (cri->cri_alg) {
13795 +               case CRYPTO_DES_CBC:
13796 +                       ixp->ixp_cipher_alg = cri->cri_alg;
13797 +                       ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
13798 +                       ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13799 +                       ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13800 +                       ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13801 +                       ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13802 +                                               IX_CRYPTO_ACC_DES_IV_64;
13803 +                       memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13804 +                                       cri->cri_key, (cri->cri_klen + 7) / 8);
13805 +                       break;
13806 +
13807 +               case CRYPTO_3DES_CBC:
13808 +                       ixp->ixp_cipher_alg = cri->cri_alg;
13809 +                       ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13810 +                       ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13811 +                       ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13812 +                       ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13813 +                       ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13814 +                                               IX_CRYPTO_ACC_DES_IV_64;
13815 +                       memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13816 +                                       cri->cri_key, (cri->cri_klen + 7) / 8);
13817 +                       break;
13818 +
13819 +               case CRYPTO_RIJNDAEL128_CBC:
13820 +                       ixp->ixp_cipher_alg = cri->cri_alg;
13821 +                       ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
13822 +                       ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13823 +                       ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13824 +                       ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
13825 +                       ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
13826 +                       memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13827 +                                       cri->cri_key, (cri->cri_klen + 7) / 8);
13828 +                       break;
13829 +
13830 +               case CRYPTO_MD5:
13831 +               case CRYPTO_MD5_HMAC:
13832 +                       ixp->ixp_auth_alg = cri->cri_alg;
13833 +                       ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
13834 +                       ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
13835 +                       ixp->ixp_ctx.authCtx.aadLen = 0;
13836 +                       /* Only MD5_HMAC needs a key */
13837 +                       if (cri->cri_alg == CRYPTO_MD5_HMAC) {
13838 +                               ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13839 +                               if (ixp->ixp_ctx.authCtx.authKeyLen >
13840 +                                               sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13841 +                                       printk(
13842 +                                               "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
13843 +                                                       cri->cri_klen);
13844 +                                       ixp_freesession(NULL, i);
13845 +                                       return EINVAL;
13846 +                               }
13847 +                               memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13848 +                                               cri->cri_key, (cri->cri_klen + 7) / 8);
13849 +                       }
13850 +                       break;
13851 +
13852 +               case CRYPTO_SHA1:
13853 +               case CRYPTO_SHA1_HMAC:
13854 +                       ixp->ixp_auth_alg = cri->cri_alg;
13855 +                       ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13856 +                       ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
13857 +                       ixp->ixp_ctx.authCtx.aadLen = 0;
13858 +                       /* Only SHA1_HMAC needs a key */
13859 +                       if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
13860 +                               ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13861 +                               if (ixp->ixp_ctx.authCtx.authKeyLen >
13862 +                                               sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13863 +                                       printk(
13864 +                                               "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
13865 +                                                       cri->cri_klen);
13866 +                                       ixp_freesession(NULL, i);
13867 +                                       return EINVAL;
13868 +                               }
13869 +                               memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13870 +                                               cri->cri_key, (cri->cri_klen + 7) / 8);
13871 +                       }
13872 +                       break;
13873 +
13874 +               default:
13875 +                       printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
13876 +                       ixp_freesession(NULL, i);
13877 +                       return EINVAL;
13878 +               }
13879 +               cri = cri->cri_next;
13880 +       }
13881 +
13882 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13883 +       INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
13884 +       INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
13885 +#else
13886 +       INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
13887 +       INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
13888 +#endif
13889 +
13890 +       return 0;
13891 +}
13892 +
13893 +
13894 +/*
13895 + * Free a session.
13896 + */
13897 +static int
13898 +ixp_freesession(device_t dev, u_int64_t tid)
13899 +{
13900 +       u_int32_t sid = CRYPTO_SESID2LID(tid);
13901 +
13902 +       dprintk("%s()\n", __FUNCTION__);
13903 +       if (sid > ixp_sesnum || ixp_sessions == NULL ||
13904 +                       ixp_sessions[sid] == NULL) {
13905 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13906 +               return EINVAL;
13907 +       }
13908 +
13909 +       /* Silently accept and return */
13910 +       if (sid == 0)
13911 +               return 0;
13912 +
13913 +       if (ixp_sessions[sid]) {
13914 +               if (ixp_sessions[sid]->ixp_ctx_id != -1) {
13915 +                       ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
13916 +                       ixp_sessions[sid]->ixp_ctx_id = -1;
13917 +               }
13918 +
13919 +               flush_scheduled_work();
13920 +
13921 +               kfree(ixp_sessions[sid]);
13922 +       }
13923 +       ixp_sessions[sid] = NULL;
13924 +       if (ixp_blocked) {
13925 +               ixp_blocked = 0;
13926 +               crypto_unblock(ixp_id, CRYPTO_SYMQ);
13927 +       }
13928 +       return 0;
13929 +}
13930 +
13931 +
13932 +/*
13933 + * callback for when hash processing is complete
13934 + */
13935 +
13936 +static void
13937 +ixp_hash_perform_cb(
13938 +       UINT32 hash_key_id,
13939 +       IX_MBUF *bufp,
13940 +       IxCryptoAccStatus status)
13941 +{
13942 +       struct ixp_q *q;
13943 +
13944 +       dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
13945 +
13946 +       if (bufp == NULL) {
13947 +               printk("ixp: NULL buf in %s\n", __FUNCTION__);
13948 +               return;
13949 +       }
13950 +
13951 +       q = IX_MBUF_PRIV(bufp);
13952 +       if (q == NULL) {
13953 +               printk("ixp: NULL priv in %s\n", __FUNCTION__);
13954 +               return;
13955 +       }
13956 +
13957 +       if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
13958 +               /* On success, need to copy hash back into original client buffer */
13959 +               memcpy(q->ixp_hash_dest, q->ixp_hash_src,
13960 +                               (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
13961 +                                       SHA1_HASH_LEN : MD5_HASH_LEN);
13962 +       }
13963 +       else {
13964 +               printk("ixp: hash perform failed status=%d\n", status);
13965 +               q->ixp_q_crp->crp_etype = EINVAL;
13966 +       }
13967 +
13968 +       /* Free internal buffer used for hashing */
13969 +       kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
13970 +
13971 +       crypto_done(q->ixp_q_crp);
13972 +       kmem_cache_free(qcache, q);
13973 +}
13974 +
13975 +/*
13976 + * setup a request and perform it
13977 + */
13978 +static void
13979 +ixp_q_process(struct ixp_q *q)
13980 +{
13981 +       IxCryptoAccStatus status;
13982 +       struct ixp_data *ixp = q->ixp_q_data;
13983 +       int auth_off = 0;
13984 +       int auth_len = 0;
13985 +       int crypt_off = 0;
13986 +       int crypt_len = 0;
13987 +       int icv_off = 0;
13988 +       char *crypt_func;
13989 +
13990 +       dprintk("%s(%p)\n", __FUNCTION__, q);
13991 +
13992 +       if (q->ixp_q_ccrd) {
13993 +               if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
13994 +                       q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
13995 +               } else {
13996 +                       q->ixp_q_iv = q->ixp_q_iv_data;
13997 +                       crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
13998 +                                       q->ixp_q_ccrd->crd_inject,
13999 +                                       ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
14000 +                                       (caddr_t) q->ixp_q_iv);
14001 +               }
14002 +
14003 +               if (q->ixp_q_acrd) {
14004 +                       auth_off = q->ixp_q_acrd->crd_skip;
14005 +                       auth_len = q->ixp_q_acrd->crd_len;
14006 +                       icv_off  = q->ixp_q_acrd->crd_inject;
14007 +               }
14008 +
14009 +               crypt_off = q->ixp_q_ccrd->crd_skip;
14010 +               crypt_len = q->ixp_q_ccrd->crd_len;
14011 +       } else { /* if (q->ixp_q_acrd) */
14012 +               auth_off = q->ixp_q_acrd->crd_skip;
14013 +               auth_len = q->ixp_q_acrd->crd_len;
14014 +               icv_off  = q->ixp_q_acrd->crd_inject;
14015 +       }
14016 +
14017 +       if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
14018 +               struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
14019 +               if (skb_shinfo(skb)->nr_frags) {
14020 +                       /*
14021 +                        * DAVIDM fix this limitation one day by using
14022 +                        * a buffer pool and chaining,  it is not currently
14023 +                        * needed for current user/kernel space acceleration
14024 +                        */
14025 +                       printk("ixp: Cannot handle fragmented skb's yet !\n");
14026 +                       q->ixp_q_crp->crp_etype = ENOENT;
14027 +                       goto done;
14028 +               }
14029 +               IX_MBUF_MLEN(&q->ixp_q_mbuf) =
14030 +                               IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =  skb->len;
14031 +               IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
14032 +       } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
14033 +               struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
14034 +               if (uiop->uio_iovcnt != 1) {
14035 +                       /*
14036 +                        * DAVIDM fix this limitation one day by using
14037 +                        * a buffer pool and chaining,  it is not currently
14038 +                        * needed for current user/kernel space acceleration
14039 +                        */
14040 +                       printk("ixp: Cannot handle more than 1 iovec yet !\n");
14041 +                       q->ixp_q_crp->crp_etype = ENOENT;
14042 +                       goto done;
14043 +               }
14044 +               IX_MBUF_MLEN(&q->ixp_q_mbuf) =
14045 +                               IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
14046 +               IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
14047 +       } else /* contig buffer */ {
14048 +               IX_MBUF_MLEN(&q->ixp_q_mbuf)  =
14049 +                               IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
14050 +               IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
14051 +       }
14052 +
14053 +       IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
14054 +
14055 +       if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
14056 +               /*
14057 +                * For SHA1 and MD5 hash, need to create an internal buffer that is big
14058 +                * enough to hold the original data + the appropriate padding for the
14059 +                * hash algorithm.
14060 +                */
14061 +               UINT8 *tbuf = NULL;
14062 +
14063 +               IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
14064 +                       ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
14065 +               tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
14066 +               
14067 +               if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
14068 +                       printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
14069 +                                       IX_MBUF_MLEN(&q->ixp_q_mbuf));
14070 +                       q->ixp_q_crp->crp_etype = ENOMEM;
14071 +                       goto done;
14072 +               }
14073 +               memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
14074 +
14075 +               /* Set location in client buffer to copy hash into */
14076 +               q->ixp_hash_dest =
14077 +                       &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
14078 +
14079 +               IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
14080 +
14081 +               /* Set location in internal buffer for where hash starts */
14082 +               q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
14083 +
14084 +               crypt_func = "ixCryptoAccHashPerform";
14085 +               status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
14086 +                               &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
14087 +                               &ixp->ixp_hash_key_id);
14088 +       }
14089 +       else {
14090 +               crypt_func = "ixCryptoAccAuthCryptPerform";
14091 +               status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
14092 +                       NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
14093 +                       q->ixp_q_iv);
14094 +       }
14095 +
14096 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14097 +               return;
14098 +
14099 +       if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
14100 +               q->ixp_q_crp->crp_etype = ENOMEM;
14101 +               goto done;
14102 +       }
14103 +
14104 +       printk("ixp: %s failed %u\n", crypt_func, status);
14105 +       q->ixp_q_crp->crp_etype = EINVAL;
14106 +
14107 +done:
14108 +       crypto_done(q->ixp_q_crp);
14109 +       kmem_cache_free(qcache, q);
14110 +}
14111 +
14112 +
14113 +/*
14114 + * because we cannot process the Q from the Register callback
14115 + * we do it here on a task Q.
14116 + */
14117 +
14118 +static void
14119 +ixp_process_pending(void *arg)
14120 +{
14121 +       struct ixp_data *ixp = arg;
14122 +       struct ixp_q *q = NULL;
14123 +
14124 +       dprintk("%s(%p)\n", __FUNCTION__, arg);
14125 +
14126 +       if (!ixp)
14127 +               return;
14128 +
14129 +       while (!list_empty(&ixp->ixp_q)) {
14130 +               q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14131 +               list_del(&q->ixp_q_list);
14132 +               ixp_q_process(q);
14133 +       }
14134 +}
14135 +
14136 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14137 +static void
14138 +ixp_process_pending_wq(struct work_struct *work)
14139 +{
14140 +       struct ixp_data *ixp = container_of(work, struct ixp_data,
14141 +                                                               ixp_pending_work);
14142 +       ixp_process_pending(ixp);
14143 +}
14144 +#endif
14145 +
14146 +/*
14147 + * callback for when context registration is complete
14148 + */
14149 +
14150 +static void
14151 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
14152 +{
14153 +       int i;
14154 +       struct ixp_data *ixp;
14155 +       struct ixp_q *q;
14156 +
14157 +       dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
14158 +
14159 +       /*
14160 +        * free any buffer passed in to this routine
14161 +        */
14162 +       if (bufp) {
14163 +               IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
14164 +               kfree(IX_MBUF_MDATA(bufp));
14165 +               IX_MBUF_MDATA(bufp) = NULL;
14166 +       }
14167 +
14168 +       for (i = 0; i < ixp_sesnum; i++) {
14169 +               ixp = ixp_sessions[i];
14170 +               if (ixp && ixp->ixp_ctx_id == ctx_id)
14171 +                       break;
14172 +       }
14173 +       if (i >= ixp_sesnum) {
14174 +               printk("ixp: invalid context id %d\n", ctx_id);
14175 +               return;
14176 +       }
14177 +
14178 +       if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
14179 +               /* this is normal to free the first of two buffers */
14180 +               dprintk("ixp: register not finished yet.\n");
14181 +               return;
14182 +       }
14183 +
14184 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
14185 +               printk("ixp: register failed 0x%x\n", status);
14186 +               while (!list_empty(&ixp->ixp_q)) {
14187 +                       q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14188 +                       list_del(&q->ixp_q_list);
14189 +                       q->ixp_q_crp->crp_etype = EINVAL;
14190 +                       crypto_done(q->ixp_q_crp);
14191 +                       kmem_cache_free(qcache, q);
14192 +               }
14193 +               return;
14194 +       }
14195 +
14196 +       /*
14197 +        * we are now registered,  we cannot start processing the Q here
14198 +        * or we get strange errors with AES (DES/3DES seem to be ok).
14199 +        */
14200 +       ixp->ixp_registered = 1;
14201 +       schedule_work(&ixp->ixp_pending_work);
14202 +}
14203 +
14204 +
14205 +/*
14206 + * callback for when data processing is complete
14207 + */
14208 +
14209 +static void
14210 +ixp_perform_cb(
14211 +       UINT32 ctx_id,
14212 +       IX_MBUF *sbufp,
14213 +       IX_MBUF *dbufp,
14214 +       IxCryptoAccStatus status)
14215 +{
14216 +       struct ixp_q *q;
14217 +
14218 +       dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
14219 +                       dbufp, status);
14220 +
14221 +       if (sbufp == NULL) {
14222 +               printk("ixp: NULL sbuf in ixp_perform_cb\n");
14223 +               return;
14224 +       }
14225 +
14226 +       q = IX_MBUF_PRIV(sbufp);
14227 +       if (q == NULL) {
14228 +               printk("ixp: NULL priv in ixp_perform_cb\n");
14229 +               return;
14230 +       }
14231 +
14232 +       if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14233 +               printk("ixp: perform failed status=%d\n", status);
14234 +               q->ixp_q_crp->crp_etype = EINVAL;
14235 +       }
14236 +
14237 +       crypto_done(q->ixp_q_crp);
14238 +       kmem_cache_free(qcache, q);
14239 +}
14240 +
14241 +
14242 +/*
14243 + * registration is not callable at IRQ time,  so we defer
14244 + * to a task queue,  this routines completes the registration for us
14245 + * when the task queue runs
14246 + *
14247 + * Unfortunately this means we cannot tell OCF that the driver is blocked,
14248 + * we do that on the next request.
14249 + */
14250 +
14251 +static void
14252 +ixp_registration(void *arg)
14253 +{
14254 +       struct ixp_data *ixp = arg;
14255 +       struct ixp_q *q = NULL;
14256 +       IX_MBUF *pri = NULL, *sec = NULL;
14257 +       int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
14258 +
14259 +       if (!ixp) {
14260 +               printk("ixp: ixp_registration with no arg\n");
14261 +               return;
14262 +       }
14263 +
14264 +       if (ixp->ixp_ctx_id != -1) {
14265 +               ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
14266 +               ixp->ixp_ctx_id = -1;
14267 +       }
14268 +
14269 +       if (list_empty(&ixp->ixp_q)) {
14270 +               printk("ixp: ixp_registration with no Q\n");
14271 +               return;
14272 +       }
14273 +
14274 +       /*
14275 +        * setup the primary and secondary buffers
14276 +        */
14277 +       q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14278 +       if (q->ixp_q_acrd) {
14279 +               pri = &ixp->ixp_pri_mbuf;
14280 +               sec = &ixp->ixp_sec_mbuf;
14281 +               IX_MBUF_MLEN(pri)  = IX_MBUF_PKT_LEN(pri) = 128;
14282 +               IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
14283 +               IX_MBUF_MLEN(sec)  = IX_MBUF_PKT_LEN(sec) = 128;
14284 +               IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
14285 +       }
14286 +
14287 +       /* Only need to register if a crypt op or HMAC op */
14288 +       if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
14289 +                               ixp->ixp_auth_alg == CRYPTO_MD5)) {
14290 +               status = ixCryptoAccCtxRegister(
14291 +                                       &ixp->ixp_ctx,
14292 +                                       pri, sec,
14293 +                                       ixp_register_cb,
14294 +                                       ixp_perform_cb,
14295 +                                       &ixp->ixp_ctx_id);
14296 +       }
14297 +       else {
14298 +               /* Otherwise we start processing pending q */
14299 +               schedule_work(&ixp->ixp_pending_work);
14300 +       }
14301 +
14302 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14303 +               return;
14304 +
14305 +       if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
14306 +               printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
14307 +               ixp_blocked = 1;
14308 +               /* perhaps we should return EGAIN on queued ops ? */
14309 +               return;
14310 +       }
14311 +
14312 +       printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
14313 +       ixp->ixp_ctx_id = -1;
14314 +
14315 +       /*
14316 +        * everything waiting is toasted
14317 +        */
14318 +       while (!list_empty(&ixp->ixp_q)) {
14319 +               q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14320 +               list_del(&q->ixp_q_list);
14321 +               q->ixp_q_crp->crp_etype = ENOENT;
14322 +               crypto_done(q->ixp_q_crp);
14323 +               kmem_cache_free(qcache, q);
14324 +       }
14325 +}
14326 +
14327 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14328 +static void
14329 +ixp_registration_wq(struct work_struct *work)
14330 +{
14331 +       struct ixp_data *ixp = container_of(work, struct ixp_data,
14332 +                                                               ixp_registration_work);
14333 +       ixp_registration(ixp);
14334 +}
14335 +#endif
14336 +
14337 +/*
14338 + * Process a request.
14339 + */
14340 +static int
14341 +ixp_process(device_t dev, struct cryptop *crp, int hint)
14342 +{
14343 +       struct ixp_data *ixp;
14344 +       unsigned int lid;
14345 +       struct ixp_q *q = NULL;
14346 +       int status;
14347 +
14348 +       dprintk("%s()\n", __FUNCTION__);
14349 +
14350 +       /* Sanity check */
14351 +       if (crp == NULL) {
14352 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
14353 +               return EINVAL;
14354 +       }
14355 +
14356 +       crp->crp_etype = 0;
14357 +
14358 +       if (ixp_blocked)
14359 +               return ERESTART;
14360 +
14361 +       if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
14362 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
14363 +               crp->crp_etype = EINVAL;
14364 +               goto done;
14365 +       }
14366 +
14367 +       /*
14368 +        * find the session we are using
14369 +        */
14370 +
14371 +       lid = crp->crp_sid & 0xffffffff;
14372 +       if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
14373 +                       ixp_sessions[lid] == NULL) {
14374 +               crp->crp_etype = ENOENT;
14375 +               dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
14376 +               goto done;
14377 +       }
14378 +       ixp = ixp_sessions[lid];
14379 +
14380 +       /*
14381 +        * setup a new request ready for queuing
14382 +        */
14383 +       q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
14384 +       if (q == NULL) {
14385 +               dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
14386 +               crp->crp_etype = ENOMEM;
14387 +               goto done;
14388 +       }
14389 +       /*
14390 +        * save some cycles by only zeroing the important bits
14391 +        */
14392 +       memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
14393 +       q->ixp_q_ccrd = NULL;
14394 +       q->ixp_q_acrd = NULL;
14395 +       q->ixp_q_crp = crp;
14396 +       q->ixp_q_data = ixp;
14397 +
14398 +       /*
14399 +        * point the cipher and auth descriptors appropriately
14400 +        * check that we have something to do
14401 +        */
14402 +       if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
14403 +               q->ixp_q_ccrd = crp->crp_desc;
14404 +       else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
14405 +               q->ixp_q_acrd = crp->crp_desc;
14406 +       else {
14407 +               crp->crp_etype = ENOENT;
14408 +               dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
14409 +               goto done;
14410 +       }
14411 +       if (crp->crp_desc->crd_next) {
14412 +               if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
14413 +                       q->ixp_q_ccrd = crp->crp_desc->crd_next;
14414 +               else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
14415 +                       q->ixp_q_acrd = crp->crp_desc->crd_next;
14416 +               else {
14417 +                       crp->crp_etype = ENOENT;
14418 +                       dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
14419 +                       goto done;
14420 +               }
14421 +       }
14422 +
14423 +       /*
14424 +        * If there is a direction change for this context then we mark it as
14425 +        * unregistered and re-register is for the new direction.  This is not
14426 +        * a very expensive operation and currently only tends to happen when
14427 +        * user-space application are doing benchmarks
14428 +        *
14429 +        * DM - we should be checking for pending requests before unregistering.
14430 +        */
14431 +       if (q->ixp_q_ccrd && ixp->ixp_registered &&
14432 +                       ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
14433 +               dprintk("%s - detected direction change on session\n", __FUNCTION__);
14434 +               ixp->ixp_registered = 0;
14435 +       }
14436 +
14437 +       /*
14438 +        * if we are registered,  call straight into the perform code
14439 +        */
14440 +       if (ixp->ixp_registered) {
14441 +               ixp_q_process(q);
14442 +               return 0;
14443 +       }
14444 +
14445 +       /*
14446 +        * the only part of the context not set in newsession is the direction
14447 +        * dependent parts
14448 +        */
14449 +       if (q->ixp_q_ccrd) {
14450 +               ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
14451 +               if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
14452 +                       ixp->ixp_ctx.operation = q->ixp_q_acrd ?
14453 +                                       IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
14454 +               } else {
14455 +                       ixp->ixp_ctx.operation = q->ixp_q_acrd ?
14456 +                                       IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
14457 +               }
14458 +       } else {
14459 +               /* q->ixp_q_acrd must be set if we are here */
14460 +               ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
14461 +       }
14462 +
14463 +       status = list_empty(&ixp->ixp_q);
14464 +       list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
14465 +       if (status)
14466 +               schedule_work(&ixp->ixp_registration_work);
14467 +       return 0;
14468 +
14469 +done:
14470 +       if (q)
14471 +               kmem_cache_free(qcache, q);
14472 +       crypto_done(crp);
14473 +       return 0;
14474 +}
14475 +
14476 +
14477 +#ifdef __ixp46X
14478 +/*
14479 + * key processing support for the ixp465
14480 + */
14481 +
14482 +
14483 +/*
14484 + * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
14485 + * assume zeroed and only copy bits that are significant
14486 + */
14487 +
14488 +static int
14489 +ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
14490 +{
14491 +       unsigned char *src = (unsigned char *) p->crp_p;
14492 +       unsigned char *dst;
14493 +       int len, bits = p->crp_nbits;
14494 +
14495 +       dprintk("%s()\n", __FUNCTION__);
14496 +
14497 +       if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
14498 +               dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
14499 +                               bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
14500 +               return -1;
14501 +       }
14502 +
14503 +       len = (bits + 31) / 32; /* the number UINT32's needed */
14504 +
14505 +       dst = (unsigned char *) &buf[len];
14506 +       dst--;
14507 +
14508 +       while (bits > 0) {
14509 +               *dst-- = *src++;
14510 +               bits -= 8;
14511 +       }
14512 +
14513 +#if 0 /* no need to zero remaining bits as it is done during request alloc */
14514 +       while (dst > (unsigned char *) buf)
14515 +               *dst-- = '\0';
14516 +#endif
14517 +
14518 +       op->pData = buf;
14519 +       op->dataLen = len;
14520 +       return 0;
14521 +}
14522 +
14523 +/*
14524 + * copy out the result,  be as forgiving as we can about small output buffers
14525 + */
14526 +
14527 +static int
14528 +ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
14529 +{
14530 +       unsigned char *dst = (unsigned char *) p->crp_p;
14531 +       unsigned char *src = (unsigned char *) buf;
14532 +       int len, z, bits = p->crp_nbits;
14533 +
14534 +       dprintk("%s()\n", __FUNCTION__);
14535 +
14536 +       len = op->dataLen * sizeof(UINT32);
14537 +
14538 +       /* skip leading zeroes to be small buffer friendly */
14539 +       z = 0;
14540 +       while (z < len && src[z] == '\0')
14541 +               z++;
14542 +
14543 +       src += len;
14544 +       src--;
14545 +       len -= z;
14546 +
14547 +       while (len > 0 && bits > 0) {
14548 +               *dst++ = *src--;
14549 +               len--;
14550 +               bits -= 8;
14551 +       }
14552 +
14553 +       while (bits > 0) {
14554 +               *dst++ = '\0';
14555 +               bits -= 8;
14556 +       }
14557 +
14558 +       if (len > 0) {
14559 +               dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
14560 +                               __FUNCTION__, len, z, p->crp_nbits / 8);
14561 +               return -1;
14562 +       }
14563 +
14564 +       return 0;
14565 +}
14566 +
14567 +
14568 +/*
14569 + * the parameter offsets for exp_mod
14570 + */
14571 +
14572 +#define IXP_PARAM_BASE 0
14573 +#define IXP_PARAM_EXP  1
14574 +#define IXP_PARAM_MOD  2
14575 +#define IXP_PARAM_RES  3
14576 +
14577 +/*
14578 + * key processing complete callback,  is also used to start processing
14579 + * by passing a NULL for pResult
14580 + */
14581 +
14582 +static void
14583 +ixp_kperform_cb(
14584 +       IxCryptoAccPkeEauOperation operation,
14585 +       IxCryptoAccPkeEauOpResult *pResult,
14586 +       BOOL carryOrBorrow,
14587 +       IxCryptoAccStatus status)
14588 +{
14589 +       struct ixp_pkq *q, *tmp;
14590 +       unsigned long flags;
14591 +
14592 +       dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
14593 +                       carryOrBorrow, status);
14594 +
14595 +       /* handle a completed request */
14596 +       if (pResult) {
14597 +               if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
14598 +                       q = ixp_pk_cur;
14599 +                       if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14600 +                               dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
14601 +                               q->pkq_krp->krp_status = ERANGE; /* could do better */
14602 +                       } else {
14603 +                               /* copy out the result */
14604 +                               if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
14605 +                                               &q->pkq_result, q->pkq_obuf))
14606 +                                       q->pkq_krp->krp_status = ERANGE;
14607 +                       }
14608 +                       crypto_kdone(q->pkq_krp);
14609 +                       kfree(q);
14610 +                       ixp_pk_cur = NULL;
14611 +               } else
14612 +                       printk("%s - callback with invalid result pointer\n", __FUNCTION__);
14613 +       }
14614 +
14615 +       spin_lock_irqsave(&ixp_pkq_lock, flags);
14616 +       if (ixp_pk_cur || list_empty(&ixp_pkq)) {
14617 +               spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14618 +               return;
14619 +       }
14620 +
14621 +       list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
14622 +
14623 +               list_del(&q->pkq_list);
14624 +               ixp_pk_cur = q;
14625 +
14626 +               spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14627 +
14628 +               status = ixCryptoAccPkeEauPerform(
14629 +                               IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
14630 +                               &q->pkq_op,
14631 +                               ixp_kperform_cb,
14632 +                               &q->pkq_result);
14633 +       
14634 +               if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
14635 +                       dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
14636 +                       return; /* callback will return here for callback */
14637 +               } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
14638 +                       printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
14639 +               } else {
14640 +                       printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
14641 +                                       __FUNCTION__, status);
14642 +               }
14643 +               q->pkq_krp->krp_status = ERANGE; /* could do better */
14644 +               crypto_kdone(q->pkq_krp);
14645 +               kfree(q);
14646 +               spin_lock_irqsave(&ixp_pkq_lock, flags);
14647 +       }
14648 +       spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14649 +}
14650 +
14651 +
14652 +static int
14653 +ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
14654 +{
14655 +       struct ixp_pkq *q;
14656 +       int rc = 0;
14657 +       unsigned long flags;
14658 +
14659 +       dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
14660 +                       krp->krp_param[IXP_PARAM_BASE].crp_nbits,
14661 +                       krp->krp_param[IXP_PARAM_EXP].crp_nbits,
14662 +                       krp->krp_param[IXP_PARAM_MOD].crp_nbits,
14663 +                       krp->krp_param[IXP_PARAM_RES].crp_nbits);
14664 +
14665 +
14666 +       if (krp->krp_op != CRK_MOD_EXP) {
14667 +               krp->krp_status = EOPNOTSUPP;
14668 +               goto err;
14669 +       }
14670 +
14671 +       q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
14672 +       if (q == NULL) {
14673 +               krp->krp_status = ENOMEM;
14674 +               goto err;
14675 +       }
14676 +
14677 +       /*
14678 +        * The PKE engine does not appear to zero the output buffer
14679 +        * appropriately, so we need to do it all here.
14680 +        */
14681 +       memset(q, 0, sizeof(*q));
14682 +
14683 +       q->pkq_krp = krp;
14684 +       INIT_LIST_HEAD(&q->pkq_list);
14685 +
14686 +       if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
14687 +                       q->pkq_ibuf0))
14688 +               rc = 1;
14689 +       if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
14690 +                               &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
14691 +               rc = 2;
14692 +       if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
14693 +                               &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
14694 +               rc = 3;
14695 +
14696 +       if (rc) {
14697 +               kfree(q);
14698 +               krp->krp_status = ERANGE;
14699 +               goto err;
14700 +       }
14701 +
14702 +       q->pkq_result.pData           = q->pkq_obuf;
14703 +       q->pkq_result.dataLen         =
14704 +                       (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
14705 +
14706 +       spin_lock_irqsave(&ixp_pkq_lock, flags);
14707 +       list_add_tail(&q->pkq_list, &ixp_pkq);
14708 +       spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14709 +
14710 +       if (!ixp_pk_cur)
14711 +               ixp_kperform_cb(0, NULL, 0, 0);
14712 +       return (0);
14713 +
14714 +err:
14715 +       crypto_kdone(krp);
14716 +       return (0);
14717 +}
14718 +
14719 +
14720 +
14721 +#ifdef CONFIG_OCF_RANDOMHARVEST
14722 +/*
14723 + * We run the random number generator output through SHA so that it
14724 + * is FIPS compliant.
14725 + */
14726 +
14727 +static volatile int sha_done = 0;
14728 +static unsigned char sha_digest[20];
14729 +
14730 +static void
14731 +ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
14732 +{
14733 +       dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
14734 +       if (sha_digest != digest)
14735 +               printk("digest error\n");
14736 +       if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14737 +               sha_done = 1;
14738 +       else
14739 +               sha_done = -status;
14740 +}
14741 +
14742 +static int
14743 +ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
14744 +{
14745 +       IxCryptoAccStatus status;
14746 +       int i, n, rc;
14747 +
14748 +       dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
14749 +       memset(buf, 0, maxwords * sizeof(*buf));
14750 +       status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
14751 +       if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14752 +               dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
14753 +                               __FUNCTION__, status);
14754 +               return 0;
14755 +       }
14756 +
14757 +       /*
14758 +        * run the random data through SHA to make it look more random
14759 +        */
14760 +
14761 +       n = sizeof(sha_digest); /* process digest bytes at a time */
14762 +
14763 +       rc = 0;
14764 +       for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
14765 +               if ((maxwords - i) * sizeof(*buf) < n)
14766 +                       n = (maxwords - i) * sizeof(*buf);
14767 +               sha_done = 0;
14768 +               status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
14769 +                               (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
14770 +               if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14771 +                       dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
14772 +                       return -EIO;
14773 +               }
14774 +               while (!sha_done)
14775 +                       schedule();
14776 +               if (sha_done < 0) {
14777 +                       dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
14778 +                       return 0;
14779 +               }
14780 +               memcpy(&buf[i], sha_digest, n);
14781 +               rc += n / sizeof(*buf);;
14782 +       }
14783 +
14784 +       return rc;
14785 +}
14786 +#endif /* CONFIG_OCF_RANDOMHARVEST */
14787 +
14788 +#endif /* __ixp46X */
14789 +
14790 +
14791 +
14792 +/*
14793 + * our driver startup and shutdown routines
14794 + */
14795 +
14796 +static int
14797 +ixp_init(void)
14798 +{
14799 +       dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
14800 +
14801 +       if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
14802 +               printk("ixCryptoAccInit failed, assuming already initialised!\n");
14803 +
14804 +       qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
14805 +                               SLAB_HWCACHE_ALIGN, NULL
14806 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
14807 +                               , NULL
14808 +#endif
14809 +                                 );
14810 +       if (!qcache) {
14811 +               printk("failed to create Qcache\n");
14812 +               return -ENOENT;
14813 +       }
14814 +
14815 +       memset(&ixpdev, 0, sizeof(ixpdev));
14816 +       softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
14817 +
14818 +       ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
14819 +                               CRYPTOCAP_F_HARDWARE);
14820 +       if (ixp_id < 0)
14821 +               panic("IXP/OCF crypto device cannot initialize!");
14822 +
14823 +#define        REGISTER(alg) \
14824 +       crypto_register(ixp_id,alg,0,0)
14825 +
14826 +       REGISTER(CRYPTO_DES_CBC);
14827 +       REGISTER(CRYPTO_3DES_CBC);
14828 +       REGISTER(CRYPTO_RIJNDAEL128_CBC);
14829 +#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
14830 +       REGISTER(CRYPTO_MD5);
14831 +       REGISTER(CRYPTO_SHA1);
14832 +#endif
14833 +       REGISTER(CRYPTO_MD5_HMAC);
14834 +       REGISTER(CRYPTO_SHA1_HMAC);
14835 +#undef REGISTER
14836 +
14837 +#ifdef __ixp46X
14838 +       spin_lock_init(&ixp_pkq_lock);
14839 +       /*
14840 +        * we do not enable the go fast options here as they can potentially
14841 +        * allow timing based attacks
14842 +        *
14843 +        * http://www.openssl.org/news/secadv_20030219.txt
14844 +        */
14845 +       ixCryptoAccPkeEauExpConfig(0, 0);
14846 +       crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
14847 +#ifdef CONFIG_OCF_RANDOMHARVEST
14848 +       crypto_rregister(ixp_id, ixp_read_random, NULL);
14849 +#endif
14850 +#endif
14851 +
14852 +       return 0;
14853 +}
14854 +
14855 +static void
14856 +ixp_exit(void)
14857 +{
14858 +       dprintk("%s()\n", __FUNCTION__);
14859 +       crypto_unregister_all(ixp_id);
14860 +       ixp_id = -1;
14861 +       kmem_cache_destroy(qcache);
14862 +       qcache = NULL;
14863 +}
14864 +
14865 +module_init(ixp_init);
14866 +module_exit(ixp_exit);
14867 +
14868 +MODULE_LICENSE("Dual BSD/GPL");
14869 +MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
14870 +MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
14871 --- /dev/null
14872 +++ b/crypto/ocf/cryptodev.c
14873 @@ -0,0 +1,1048 @@
14874 +/*     $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $  */
14875 +
14876 +/*-
14877 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
14878 + * Copyright (C) 2006-2007 David McCullough
14879 + * Copyright (C) 2004-2005 Intel Corporation.
14880 + * The license and original author are listed below.
14881 + *
14882 + * Copyright (c) 2001 Theo de Raadt
14883 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
14884 + *
14885 + * Redistribution and use in source and binary forms, with or without
14886 + * modification, are permitted provided that the following conditions
14887 + * are met:
14888 + *
14889 + * 1. Redistributions of source code must retain the above copyright
14890 + *   notice, this list of conditions and the following disclaimer.
14891 + * 2. Redistributions in binary form must reproduce the above copyright
14892 + *   notice, this list of conditions and the following disclaimer in the
14893 + *   documentation and/or other materials provided with the distribution.
14894 + * 3. The name of the author may not be used to endorse or promote products
14895 + *   derived from this software without specific prior written permission.
14896 + *
14897 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14898 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
14899 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
14900 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
14901 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
14902 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
14903 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
14904 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14905 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
14906 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14907 + *
14908 + * Effort sponsored in part by the Defense Advanced Research Projects
14909 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
14910 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
14911 + *
14912 +__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
14913 + */
14914 +
14915 +#ifndef AUTOCONF_INCLUDED
14916 +#include <linux/config.h>
14917 +#endif
14918 +#include <linux/types.h>
14919 +#include <linux/time.h>
14920 +#include <linux/delay.h>
14921 +#include <linux/list.h>
14922 +#include <linux/init.h>
14923 +#include <linux/sched.h>
14924 +#include <linux/unistd.h>
14925 +#include <linux/module.h>
14926 +#include <linux/wait.h>
14927 +#include <linux/slab.h>
14928 +#include <linux/fs.h>
14929 +#include <linux/dcache.h>
14930 +#include <linux/file.h>
14931 +#include <linux/mount.h>
14932 +#include <linux/miscdevice.h>
14933 +#include <linux/version.h>
14934 +#include <asm/uaccess.h>
14935 +
14936 +#include <cryptodev.h>
14937 +#include <uio.h>
14938 +
14939 +extern asmlinkage long sys_dup(unsigned int fildes);
14940 +
14941 +#define debug cryptodev_debug
14942 +int cryptodev_debug = 0;
14943 +module_param(cryptodev_debug, int, 0644);
14944 +MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
14945 +
14946 +struct csession_info {
14947 +       u_int16_t       blocksize;
14948 +       u_int16_t       minkey, maxkey;
14949 +
14950 +       u_int16_t       keysize;
14951 +       /* u_int16_t    hashsize;  */
14952 +       u_int16_t       authsize;
14953 +       /* u_int16_t    ctxsize; */
14954 +};
14955 +
14956 +struct csession {
14957 +       struct list_head        list;
14958 +       u_int64_t       sid;
14959 +       u_int32_t       ses;
14960 +
14961 +       wait_queue_head_t waitq;
14962 +
14963 +       u_int32_t       cipher;
14964 +
14965 +       u_int32_t       mac;
14966 +
14967 +       caddr_t         key;
14968 +       int             keylen;
14969 +       u_char          tmp_iv[EALG_MAX_BLOCK_LEN];
14970 +
14971 +       caddr_t         mackey;
14972 +       int             mackeylen;
14973 +
14974 +       struct csession_info info;
14975 +
14976 +       struct iovec    iovec;
14977 +       struct uio      uio;
14978 +       int             error;
14979 +};
14980 +
14981 +struct fcrypt {
14982 +       struct list_head        csessions;
14983 +       int             sesn;
14984 +};
14985 +
14986 +static struct csession *csefind(struct fcrypt *, u_int);
14987 +static int csedelete(struct fcrypt *, struct csession *);
14988 +static struct csession *cseadd(struct fcrypt *, struct csession *);
14989 +static struct csession *csecreate(struct fcrypt *, u_int64_t,
14990 +               struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
14991 +static int csefree(struct csession *);
14992 +
14993 +static int cryptodev_op(struct csession *, struct crypt_op *);
14994 +static int cryptodev_key(struct crypt_kop *);
14995 +static int cryptodev_find(struct crypt_find_op *);
14996 +
14997 +static int cryptodev_cb(void *);
14998 +static int cryptodev_open(struct inode *inode, struct file *filp);
14999 +
15000 +/*
15001 + * Check a crypto identifier to see if it requested
15002 + * a valid crid and it's capabilities match.
15003 + */
15004 +static int
15005 +checkcrid(int crid)
15006 +{
15007 +       int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
15008 +       int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
15009 +       int caps = 0;
15010 +       
15011 +       /* if the user hasn't selected a driver, then just call newsession */
15012 +       if (hid == 0 && typ != 0)
15013 +               return 0;
15014 +
15015 +       caps = crypto_getcaps(hid);
15016 +
15017 +       /* didn't find anything with capabilities */
15018 +       if (caps == 0) {
15019 +               dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
15020 +               return EINVAL;
15021 +       }
15022 +       
15023 +       /* the user didn't specify SW or HW, so the driver is ok */
15024 +       if (typ == 0)
15025 +               return 0;
15026 +
15027 +       /* if the type specified didn't match */
15028 +       if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
15029 +               dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
15030 +                               hid, typ, caps);
15031 +               return EINVAL;
15032 +       }
15033 +
15034 +       return 0;
15035 +}
15036 +
15037 +static int
15038 +cryptodev_op(struct csession *cse, struct crypt_op *cop)
15039 +{
15040 +       struct cryptop *crp = NULL;
15041 +       struct cryptodesc *crde = NULL, *crda = NULL;
15042 +       int error = 0;
15043 +
15044 +       dprintk("%s()\n", __FUNCTION__);
15045 +       if (cop->len > CRYPTO_MAX_DATA_LEN) {
15046 +               dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
15047 +               return (E2BIG);
15048 +       }
15049 +
15050 +       if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
15051 +               dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
15052 +                               cop->len);
15053 +               return (EINVAL);
15054 +       }
15055 +
15056 +       cse->uio.uio_iov = &cse->iovec;
15057 +       cse->uio.uio_iovcnt = 1;
15058 +       cse->uio.uio_offset = 0;
15059 +#if 0
15060 +       cse->uio.uio_resid = cop->len;
15061 +       cse->uio.uio_segflg = UIO_SYSSPACE;
15062 +       cse->uio.uio_rw = UIO_WRITE;
15063 +       cse->uio.uio_td = td;
15064 +#endif
15065 +       cse->uio.uio_iov[0].iov_len = cop->len;
15066 +       if (cse->info.authsize)
15067 +               cse->uio.uio_iov[0].iov_len += cse->info.authsize;
15068 +       cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
15069 +                       GFP_KERNEL);
15070 +
15071 +       if (cse->uio.uio_iov[0].iov_base == NULL) {
15072 +               dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
15073 +                               cse->uio.uio_iov[0].iov_len);
15074 +               return (ENOMEM);
15075 +       }
15076 +
15077 +       crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
15078 +       if (crp == NULL) {
15079 +               dprintk("%s: ENOMEM\n", __FUNCTION__);
15080 +               error = ENOMEM;
15081 +               goto bail;
15082 +       }
15083 +
15084 +       if (cse->info.authsize) {
15085 +               crda = crp->crp_desc;
15086 +               if (cse->info.blocksize)
15087 +                       crde = crda->crd_next;
15088 +       } else {
15089 +               if (cse->info.blocksize)
15090 +                       crde = crp->crp_desc;
15091 +               else {
15092 +                       dprintk("%s: bad request\n", __FUNCTION__);
15093 +                       error = EINVAL;
15094 +                       goto bail;
15095 +               }
15096 +       }
15097 +
15098 +       if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
15099 +                                       cop->len))) {
15100 +               dprintk("%s: bad copy\n", __FUNCTION__);
15101 +               goto bail;
15102 +       }
15103 +
15104 +       if (crda) {
15105 +               crda->crd_skip = 0;
15106 +               crda->crd_len = cop->len;
15107 +               crda->crd_inject = cop->len;
15108 +
15109 +               crda->crd_alg = cse->mac;
15110 +               crda->crd_key = cse->mackey;
15111 +               crda->crd_klen = cse->mackeylen * 8;
15112 +       }
15113 +
15114 +       if (crde) {
15115 +               if (cop->op == COP_ENCRYPT)
15116 +                       crde->crd_flags |= CRD_F_ENCRYPT;
15117 +               else
15118 +                       crde->crd_flags &= ~CRD_F_ENCRYPT;
15119 +               crde->crd_len = cop->len;
15120 +               crde->crd_inject = 0;
15121 +
15122 +               crde->crd_alg = cse->cipher;
15123 +               crde->crd_key = cse->key;
15124 +               crde->crd_klen = cse->keylen * 8;
15125 +       }
15126 +
15127 +       crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
15128 +       crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
15129 +                      | (cop->flags & COP_F_BATCH);
15130 +       crp->crp_buf = (caddr_t)&cse->uio;
15131 +       crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
15132 +       crp->crp_sid = cse->sid;
15133 +       crp->crp_opaque = (void *)cse;
15134 +
15135 +       if (cop->iv) {
15136 +               if (crde == NULL) {
15137 +                       error = EINVAL;
15138 +                       dprintk("%s no crde\n", __FUNCTION__);
15139 +                       goto bail;
15140 +               }
15141 +               if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
15142 +                       error = EINVAL;
15143 +                       dprintk("%s arc4 with IV\n", __FUNCTION__);
15144 +                       goto bail;
15145 +               }
15146 +               if ((error = copy_from_user(cse->tmp_iv, cop->iv,
15147 +                                               cse->info.blocksize))) {
15148 +                       dprintk("%s bad iv copy\n", __FUNCTION__);
15149 +                       goto bail;
15150 +               }
15151 +               memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
15152 +               crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
15153 +               crde->crd_skip = 0;
15154 +       } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
15155 +               crde->crd_skip = 0;
15156 +       } else if (crde) {
15157 +               crde->crd_flags |= CRD_F_IV_PRESENT;
15158 +               crde->crd_skip = cse->info.blocksize;
15159 +               crde->crd_len -= cse->info.blocksize;
15160 +       }
15161 +
15162 +       if (cop->mac && crda == NULL) {
15163 +               error = EINVAL;
15164 +               dprintk("%s no crda\n", __FUNCTION__);
15165 +               goto bail;
15166 +       }
15167 +
15168 +       /*
15169 +        * Let the dispatch run unlocked, then, interlock against the
15170 +        * callback before checking if the operation completed and going
15171 +        * to sleep.  This insures drivers don't inherit our lock which
15172 +        * results in a lock order reversal between crypto_dispatch forced
15173 +        * entry and the crypto_done callback into us.
15174 +        */
15175 +       error = crypto_dispatch(crp);
15176 +       if (error == 0) {
15177 +               dprintk("%s about to WAIT\n", __FUNCTION__);
15178 +               /*
15179 +                * we really need to wait for driver to complete to maintain
15180 +                * state,  luckily interrupts will be remembered
15181 +                */
15182 +               do {
15183 +                       error = wait_event_interruptible(crp->crp_waitq,
15184 +                                       ((crp->crp_flags & CRYPTO_F_DONE) != 0));
15185 +                       /*
15186 +                        * we can't break out of this loop or we will leave behind
15187 +                        * a huge mess,  however,  staying here means if your driver
15188 +                        * is broken user applications can hang and not be killed.
15189 +                        * The solution,  fix your driver :-)
15190 +                        */
15191 +                       if (error) {
15192 +                               schedule();
15193 +                               error = 0;
15194 +                       }
15195 +               } while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
15196 +               dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
15197 +       }
15198 +
15199 +       if (crp->crp_etype != 0) {
15200 +               error = crp->crp_etype;
15201 +               dprintk("%s error in crp processing\n", __FUNCTION__);
15202 +               goto bail;
15203 +       }
15204 +
15205 +       if (cse->error) {
15206 +               error = cse->error;
15207 +               dprintk("%s error in cse processing\n", __FUNCTION__);
15208 +               goto bail;
15209 +       }
15210 +
15211 +       if (cop->dst && (error = copy_to_user(cop->dst,
15212 +                                       cse->uio.uio_iov[0].iov_base, cop->len))) {
15213 +               dprintk("%s bad dst copy\n", __FUNCTION__);
15214 +               goto bail;
15215 +       }
15216 +
15217 +       if (cop->mac &&
15218 +                       (error=copy_to_user(cop->mac,
15219 +                               (caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
15220 +                               cse->info.authsize))) {
15221 +               dprintk("%s bad mac copy\n", __FUNCTION__);
15222 +               goto bail;
15223 +       }
15224 +
15225 +bail:
15226 +       if (crp)
15227 +               crypto_freereq(crp);
15228 +       if (cse->uio.uio_iov[0].iov_base)
15229 +               kfree(cse->uio.uio_iov[0].iov_base);
15230 +
15231 +       return (error);
15232 +}
15233 +
15234 +static int
15235 +cryptodev_cb(void *op)
15236 +{
15237 +       struct cryptop *crp = (struct cryptop *) op;
15238 +       struct csession *cse = (struct csession *)crp->crp_opaque;
15239 +       int error;
15240 +
15241 +       dprintk("%s()\n", __FUNCTION__);
15242 +       error = crp->crp_etype;
15243 +       if (error == EAGAIN) {
15244 +               crp->crp_flags &= ~CRYPTO_F_DONE;
15245 +#ifdef NOTYET
15246 +               /*
15247 +                * DAVIDM I am fairly sure that we should turn this into a batch
15248 +                * request to stop bad karma/lockup, revisit
15249 +                */
15250 +               crp->crp_flags |= CRYPTO_F_BATCH;
15251 +#endif
15252 +               return crypto_dispatch(crp);
15253 +       }
15254 +       if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
15255 +               cse->error = error;
15256 +               wake_up_interruptible(&crp->crp_waitq);
15257 +       }
15258 +       return (0);
15259 +}
15260 +
15261 +static int
15262 +cryptodevkey_cb(void *op)
15263 +{
15264 +       struct cryptkop *krp = (struct cryptkop *) op;
15265 +       dprintk("%s()\n", __FUNCTION__);
15266 +       wake_up_interruptible(&krp->krp_waitq);
15267 +       return (0);
15268 +}
15269 +
15270 +static int
15271 +cryptodev_key(struct crypt_kop *kop)
15272 +{
15273 +       struct cryptkop *krp = NULL;
15274 +       int error = EINVAL;
15275 +       int in, out, size, i;
15276 +
15277 +       dprintk("%s()\n", __FUNCTION__);
15278 +       if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
15279 +               dprintk("%s params too big\n", __FUNCTION__);
15280 +               return (EFBIG);
15281 +       }
15282 +
15283 +       in = kop->crk_iparams;
15284 +       out = kop->crk_oparams;
15285 +       switch (kop->crk_op) {
15286 +       case CRK_MOD_EXP:
15287 +               if (in == 3 && out == 1)
15288 +                       break;
15289 +               return (EINVAL);
15290 +       case CRK_MOD_EXP_CRT:
15291 +               if (in == 6 && out == 1)
15292 +                       break;
15293 +               return (EINVAL);
15294 +       case CRK_DSA_SIGN:
15295 +               if (in == 5 && out == 2)
15296 +                       break;
15297 +               return (EINVAL);
15298 +       case CRK_DSA_VERIFY:
15299 +               if (in == 7 && out == 0)
15300 +                       break;
15301 +               return (EINVAL);
15302 +       case CRK_DH_COMPUTE_KEY:
15303 +               if (in == 3 && out == 1)
15304 +                       break;
15305 +               return (EINVAL);
15306 +       default:
15307 +               return (EINVAL);
15308 +       }
15309 +
15310 +       krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
15311 +       if (!krp)
15312 +               return (ENOMEM);
15313 +       bzero(krp, sizeof *krp);
15314 +       krp->krp_op = kop->crk_op;
15315 +       krp->krp_status = kop->crk_status;
15316 +       krp->krp_iparams = kop->crk_iparams;
15317 +       krp->krp_oparams = kop->crk_oparams;
15318 +       krp->krp_crid = kop->crk_crid;
15319 +       krp->krp_status = 0;
15320 +       krp->krp_flags = CRYPTO_KF_CBIMM;
15321 +       krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
15322 +       init_waitqueue_head(&krp->krp_waitq);
15323 +
15324 +       for (i = 0; i < CRK_MAXPARAM; i++)
15325 +               krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
15326 +       for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
15327 +               size = (krp->krp_param[i].crp_nbits + 7) / 8;
15328 +               if (size == 0)
15329 +                       continue;
15330 +               krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
15331 +               if (i >= krp->krp_iparams)
15332 +                       continue;
15333 +               error = copy_from_user(krp->krp_param[i].crp_p,
15334 +                               kop->crk_param[i].crp_p, size);
15335 +               if (error)
15336 +                       goto fail;
15337 +       }
15338 +
15339 +       error = crypto_kdispatch(krp);
15340 +       if (error)
15341 +               goto fail;
15342 +
15343 +       do {
15344 +               error = wait_event_interruptible(krp->krp_waitq,
15345 +                               ((krp->krp_flags & CRYPTO_KF_DONE) != 0));
15346 +               /*
15347 +                * we can't break out of this loop or we will leave behind
15348 +                * a huge mess,  however,  staying here means if your driver
15349 +                * is broken user applications can hang and not be killed.
15350 +                * The solution,  fix your driver :-)
15351 +                */
15352 +               if (error) {
15353 +                       schedule();
15354 +                       error = 0;
15355 +               }
15356 +       } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
15357 +
15358 +       dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
15359 +       
15360 +       kop->crk_crid = krp->krp_crid;          /* device that did the work */
15361 +       if (krp->krp_status != 0) {
15362 +               error = krp->krp_status;
15363 +               goto fail;
15364 +       }
15365 +
15366 +       for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
15367 +               size = (krp->krp_param[i].crp_nbits + 7) / 8;
15368 +               if (size == 0)
15369 +                       continue;
15370 +               error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
15371 +                               size);
15372 +               if (error)
15373 +                       goto fail;
15374 +       }
15375 +
15376 +fail:
15377 +       if (krp) {
15378 +               kop->crk_status = krp->krp_status;
15379 +               for (i = 0; i < CRK_MAXPARAM; i++) {
15380 +                       if (krp->krp_param[i].crp_p)
15381 +                               kfree(krp->krp_param[i].crp_p);
15382 +               }
15383 +               kfree(krp);
15384 +       }
15385 +       return (error);
15386 +}
15387 +
15388 +static int
15389 +cryptodev_find(struct crypt_find_op *find)
15390 +{
15391 +       device_t dev;
15392 +
15393 +       if (find->crid != -1) {
15394 +               dev = crypto_find_device_byhid(find->crid);
15395 +               if (dev == NULL)
15396 +                       return (ENOENT);
15397 +               strlcpy(find->name, device_get_nameunit(dev),
15398 +                   sizeof(find->name));
15399 +       } else {
15400 +               find->crid = crypto_find_driver(find->name);
15401 +               if (find->crid == -1)
15402 +                       return (ENOENT);
15403 +       }
15404 +       return (0);
15405 +}
15406 +
15407 +static struct csession *
15408 +csefind(struct fcrypt *fcr, u_int ses)
15409 +{
15410 +       struct csession *cse;
15411 +
15412 +       dprintk("%s()\n", __FUNCTION__);
15413 +       list_for_each_entry(cse, &fcr->csessions, list)
15414 +               if (cse->ses == ses)
15415 +                       return (cse);
15416 +       return (NULL);
15417 +}
15418 +
15419 +static int
15420 +csedelete(struct fcrypt *fcr, struct csession *cse_del)
15421 +{
15422 +       struct csession *cse;
15423 +
15424 +       dprintk("%s()\n", __FUNCTION__);
15425 +       list_for_each_entry(cse, &fcr->csessions, list) {
15426 +               if (cse == cse_del) {
15427 +                       list_del(&cse->list);
15428 +                       return (1);
15429 +               }
15430 +       }
15431 +       return (0);
15432 +}
15433 +       
15434 +static struct csession *
15435 +cseadd(struct fcrypt *fcr, struct csession *cse)
15436 +{
15437 +       dprintk("%s()\n", __FUNCTION__);
15438 +       list_add_tail(&cse->list, &fcr->csessions);
15439 +       cse->ses = fcr->sesn++;
15440 +       return (cse);
15441 +}
15442 +
15443 +static struct csession *
15444 +csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
15445 +       struct cryptoini *cria, struct csession_info *info)
15446 +{
15447 +       struct csession *cse;
15448 +
15449 +       dprintk("%s()\n", __FUNCTION__);
15450 +       cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
15451 +       if (cse == NULL)
15452 +               return NULL;
15453 +       memset(cse, 0, sizeof(struct csession));
15454 +
15455 +       INIT_LIST_HEAD(&cse->list);
15456 +       init_waitqueue_head(&cse->waitq);
15457 +
15458 +       cse->key = crie->cri_key;
15459 +       cse->keylen = crie->cri_klen/8;
15460 +       cse->mackey = cria->cri_key;
15461 +       cse->mackeylen = cria->cri_klen/8;
15462 +       cse->sid = sid;
15463 +       cse->cipher = crie->cri_alg;
15464 +       cse->mac = cria->cri_alg;
15465 +       cse->info = *info;
15466 +       cseadd(fcr, cse);
15467 +       return (cse);
15468 +}
15469 +
15470 +static int
15471 +csefree(struct csession *cse)
15472 +{
15473 +       int error;
15474 +
15475 +       dprintk("%s()\n", __FUNCTION__);
15476 +       error = crypto_freesession(cse->sid);
15477 +       if (cse->key)
15478 +               kfree(cse->key);
15479 +       if (cse->mackey)
15480 +               kfree(cse->mackey);
15481 +       kfree(cse);
15482 +       return(error);
15483 +}
15484 +
15485 +static int
15486 +cryptodev_ioctl(
15487 +       struct inode *inode,
15488 +       struct file *filp,
15489 +       unsigned int cmd,
15490 +       unsigned long arg)
15491 +{
15492 +       struct cryptoini cria, crie;
15493 +       struct fcrypt *fcr = filp->private_data;
15494 +       struct csession *cse;
15495 +       struct csession_info info;
15496 +       struct session2_op sop;
15497 +       struct crypt_op cop;
15498 +       struct crypt_kop kop;
15499 +       struct crypt_find_op fop;
15500 +       u_int64_t sid;
15501 +       u_int32_t ses;
15502 +       int feat, fd, error = 0, crid;
15503 +       mm_segment_t fs;
15504 +
15505 +       dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
15506 +
15507 +       switch (cmd) {
15508 +
15509 +       case CRIOGET: {
15510 +               dprintk("%s(CRIOGET)\n", __FUNCTION__);
15511 +               fs = get_fs();
15512 +               set_fs(get_ds());
15513 +               for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
15514 +                       if (files_fdtable(current->files)->fd[fd] == filp)
15515 +                               break;
15516 +               fd = sys_dup(fd);
15517 +               set_fs(fs);
15518 +               put_user(fd, (int *) arg);
15519 +               return IS_ERR_VALUE(fd) ? fd : 0;
15520 +               }
15521 +
15522 +#define        CIOCGSESSSTR    (cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
15523 +       case CIOCGSESSION:
15524 +       case CIOCGSESSION2:
15525 +               dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
15526 +               memset(&crie, 0, sizeof(crie));
15527 +               memset(&cria, 0, sizeof(cria));
15528 +               memset(&info, 0, sizeof(info));
15529 +               memset(&sop, 0, sizeof(sop));
15530 +
15531 +               if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
15532 +                                       sizeof(struct session_op) : sizeof(sop))) {
15533 +                       dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15534 +                       error = EFAULT;
15535 +                       goto bail;
15536 +               }
15537 +
15538 +               switch (sop.cipher) {
15539 +               case 0:
15540 +                       dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
15541 +                       break;
15542 +               case CRYPTO_NULL_CBC:
15543 +                       info.blocksize = NULL_BLOCK_LEN;
15544 +                       info.minkey = NULL_MIN_KEY_LEN;
15545 +                       info.maxkey = NULL_MAX_KEY_LEN;
15546 +                       break;
15547 +               case CRYPTO_DES_CBC:
15548 +                       info.blocksize = DES_BLOCK_LEN;
15549 +                       info.minkey = DES_MIN_KEY_LEN;
15550 +                       info.maxkey = DES_MAX_KEY_LEN;
15551 +                       break;
15552 +               case CRYPTO_3DES_CBC:
15553 +                       info.blocksize = DES3_BLOCK_LEN;
15554 +                       info.minkey = DES3_MIN_KEY_LEN;
15555 +                       info.maxkey = DES3_MAX_KEY_LEN;
15556 +                       break;
15557 +               case CRYPTO_BLF_CBC:
15558 +                       info.blocksize = BLOWFISH_BLOCK_LEN;
15559 +                       info.minkey = BLOWFISH_MIN_KEY_LEN;
15560 +                       info.maxkey = BLOWFISH_MAX_KEY_LEN;
15561 +                       break;
15562 +               case CRYPTO_CAST_CBC:
15563 +                       info.blocksize = CAST128_BLOCK_LEN;
15564 +                       info.minkey = CAST128_MIN_KEY_LEN;
15565 +                       info.maxkey = CAST128_MAX_KEY_LEN;
15566 +                       break;
15567 +               case CRYPTO_SKIPJACK_CBC:
15568 +                       info.blocksize = SKIPJACK_BLOCK_LEN;
15569 +                       info.minkey = SKIPJACK_MIN_KEY_LEN;
15570 +                       info.maxkey = SKIPJACK_MAX_KEY_LEN;
15571 +                       break;
15572 +               case CRYPTO_AES_CBC:
15573 +                       info.blocksize = AES_BLOCK_LEN;
15574 +                       info.minkey = AES_MIN_KEY_LEN;
15575 +                       info.maxkey = AES_MAX_KEY_LEN;
15576 +                       break;
15577 +               case CRYPTO_ARC4:
15578 +                       info.blocksize = ARC4_BLOCK_LEN;
15579 +                       info.minkey = ARC4_MIN_KEY_LEN;
15580 +                       info.maxkey = ARC4_MAX_KEY_LEN;
15581 +                       break;
15582 +               case CRYPTO_CAMELLIA_CBC:
15583 +                       info.blocksize = CAMELLIA_BLOCK_LEN;
15584 +                       info.minkey = CAMELLIA_MIN_KEY_LEN;
15585 +                       info.maxkey = CAMELLIA_MAX_KEY_LEN;
15586 +                       break;
15587 +               default:
15588 +                       dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
15589 +                       error = EINVAL;
15590 +                       goto bail;
15591 +               }
15592 +
15593 +               switch (sop.mac) {
15594 +               case 0:
15595 +                       dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
15596 +                       break;
15597 +               case CRYPTO_NULL_HMAC:
15598 +                       info.authsize = NULL_HASH_LEN;
15599 +                       break;
15600 +               case CRYPTO_MD5:
15601 +                       info.authsize = MD5_HASH_LEN;
15602 +                       break;
15603 +               case CRYPTO_SHA1:
15604 +                       info.authsize = SHA1_HASH_LEN;
15605 +                       break;
15606 +               case CRYPTO_SHA2_256:
15607 +                       info.authsize = SHA2_256_HASH_LEN;
15608 +                       break;
15609 +               case CRYPTO_SHA2_384:
15610 +                       info.authsize = SHA2_384_HASH_LEN;
15611 +                       break;
15612 +               case CRYPTO_SHA2_512:
15613 +                       info.authsize = SHA2_512_HASH_LEN;
15614 +                       break;
15615 +               case CRYPTO_RIPEMD160:
15616 +                       info.authsize = RIPEMD160_HASH_LEN;
15617 +                       break;
15618 +               case CRYPTO_MD5_HMAC:
15619 +                       info.authsize = MD5_HASH_LEN;
15620 +                       break;
15621 +               case CRYPTO_SHA1_HMAC:
15622 +                       info.authsize = SHA1_HASH_LEN;
15623 +                       break;
15624 +               case CRYPTO_SHA2_256_HMAC:
15625 +                       info.authsize = SHA2_256_HASH_LEN;
15626 +                       break;
15627 +               case CRYPTO_SHA2_384_HMAC:
15628 +                       info.authsize = SHA2_384_HASH_LEN;
15629 +                       break;
15630 +               case CRYPTO_SHA2_512_HMAC:
15631 +                       info.authsize = SHA2_512_HASH_LEN;
15632 +                       break;
15633 +               case CRYPTO_RIPEMD160_HMAC:
15634 +                       info.authsize = RIPEMD160_HASH_LEN;
15635 +                       break;
15636 +               default:
15637 +                       dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
15638 +                       error = EINVAL;
15639 +                       goto bail;
15640 +               }
15641 +
15642 +               if (info.blocksize) {
15643 +                       crie.cri_alg = sop.cipher;
15644 +                       crie.cri_klen = sop.keylen * 8;
15645 +                       if ((info.maxkey && sop.keylen > info.maxkey) ||
15646 +                                       sop.keylen < info.minkey) {
15647 +                               dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
15648 +                               error = EINVAL;
15649 +                               goto bail;
15650 +                       }
15651 +
15652 +                       crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
15653 +                       if (copy_from_user(crie.cri_key, sop.key,
15654 +                                                       crie.cri_klen/8)) {
15655 +                               dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15656 +                               error = EFAULT;
15657 +                               goto bail;
15658 +                       }
15659 +                       if (info.authsize)
15660 +                               crie.cri_next = &cria;
15661 +               }
15662 +
15663 +               if (info.authsize) {
15664 +                       cria.cri_alg = sop.mac;
15665 +                       cria.cri_klen = sop.mackeylen * 8;
15666 +                       if ((info.maxkey && sop.mackeylen > info.maxkey) ||
15667 +                                       sop.keylen < info.minkey) {
15668 +                               dprintk("%s(%s) - mackeylen %d\n", __FUNCTION__, CIOCGSESSSTR,
15669 +                                               sop.mackeylen);
15670 +                               error = EINVAL;
15671 +                               goto bail;
15672 +                       }
15673 +
15674 +                       if (cria.cri_klen) {
15675 +                               cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
15676 +                               if (copy_from_user(cria.cri_key, sop.mackey,
15677 +                                                               cria.cri_klen / 8)) {
15678 +                                       dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15679 +                                       error = EFAULT;
15680 +                                       goto bail;
15681 +                               }
15682 +                       }
15683 +               }
15684 +
15685 +               /* NB: CIOGSESSION2 has the crid */
15686 +               if (cmd == CIOCGSESSION2) {
15687 +                       crid = sop.crid;
15688 +                       error = checkcrid(crid);
15689 +                       if (error) {
15690 +                               dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
15691 +                                               CIOCGSESSSTR, error);
15692 +                               goto bail;
15693 +                       }
15694 +               } else {
15695 +                       /* allow either HW or SW to be used */
15696 +                       crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
15697 +               }
15698 +               error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
15699 +               if (error) {
15700 +                       dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
15701 +                       goto bail;
15702 +               }
15703 +
15704 +               cse = csecreate(fcr, sid, &crie, &cria, &info);
15705 +               if (cse == NULL) {
15706 +                       crypto_freesession(sid);
15707 +                       error = EINVAL;
15708 +                       dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
15709 +                       goto bail;
15710 +               }
15711 +               sop.ses = cse->ses;
15712 +
15713 +               if (cmd == CIOCGSESSION2) {
15714 +                       /* return hardware/driver id */
15715 +                       sop.crid = CRYPTO_SESID2HID(cse->sid);
15716 +               }
15717 +
15718 +               if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
15719 +                                       sizeof(struct session_op) : sizeof(sop))) {
15720 +                       dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15721 +                       error = EFAULT;
15722 +               }
15723 +bail:
15724 +               if (error) {
15725 +                       dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
15726 +                       if (crie.cri_key)
15727 +                               kfree(crie.cri_key);
15728 +                       if (cria.cri_key)
15729 +                               kfree(cria.cri_key);
15730 +               }
15731 +               break;
15732 +       case CIOCFSESSION:
15733 +               dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
15734 +               get_user(ses, (uint32_t*)arg);
15735 +               cse = csefind(fcr, ses);
15736 +               if (cse == NULL) {
15737 +                       error = EINVAL;
15738 +                       dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
15739 +                       break;
15740 +               }
15741 +               csedelete(fcr, cse);
15742 +               error = csefree(cse);
15743 +               break;
15744 +       case CIOCCRYPT:
15745 +               dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
15746 +               if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
15747 +                       dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
15748 +                       error = EFAULT;
15749 +                       goto bail;
15750 +               }
15751 +               cse = csefind(fcr, cop.ses);
15752 +               if (cse == NULL) {
15753 +                       error = EINVAL;
15754 +                       dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
15755 +                       break;
15756 +               }
15757 +               error = cryptodev_op(cse, &cop);
15758 +               if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
15759 +                       dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
15760 +                       error = EFAULT;
15761 +                       goto bail;
15762 +               }
15763 +               break;
15764 +       case CIOCKEY:
15765 +       case CIOCKEY2:
15766 +               dprintk("%s(CIOCKEY)\n", __FUNCTION__);
15767 +               if (!crypto_userasymcrypto)
15768 +                       return (EPERM);         /* XXX compat? */
15769 +               if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
15770 +                       dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
15771 +                       error = EFAULT;
15772 +                       goto bail;
15773 +               }
15774 +               if (cmd == CIOCKEY) {
15775 +                       /* NB: crypto core enforces s/w driver use */
15776 +                       kop.crk_crid =
15777 +                           CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
15778 +               }
15779 +               error = cryptodev_key(&kop);
15780 +               if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
15781 +                       dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
15782 +                       error = EFAULT;
15783 +                       goto bail;
15784 +               }
15785 +               break;
15786 +       case CIOCASYMFEAT:
15787 +               dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
15788 +               if (!crypto_userasymcrypto) {
15789 +                       /*
15790 +                        * NB: if user asym crypto operations are
15791 +                        * not permitted return "no algorithms"
15792 +                        * so well-behaved applications will just
15793 +                        * fallback to doing them in software.
15794 +                        */
15795 +                       feat = 0;
15796 +               } else
15797 +                       error = crypto_getfeat(&feat);
15798 +               if (!error) {
15799 +                 error = copy_to_user((void*)arg, &feat, sizeof(feat));
15800 +               }
15801 +               break;
15802 +       case CIOCFINDDEV:
15803 +               if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
15804 +                       dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
15805 +                       error = EFAULT;
15806 +                       goto bail;
15807 +               }
15808 +               error = cryptodev_find(&fop);
15809 +               if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
15810 +                       dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
15811 +                       error = EFAULT;
15812 +                       goto bail;
15813 +               }
15814 +               break;
15815 +       default:
15816 +               dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
15817 +               error = EINVAL;
15818 +               break;
15819 +       }
15820 +       return(-error);
15821 +}
15822 +
15823 +#ifdef HAVE_UNLOCKED_IOCTL
15824 +static long
15825 +cryptodev_unlocked_ioctl(
15826 +       struct file *filp,
15827 +       unsigned int cmd,
15828 +       unsigned long arg)
15829 +{
15830 +       return cryptodev_ioctl(NULL, filp, cmd, arg);
15831 +}
15832 +#endif
15833 +
15834 +static int
15835 +cryptodev_open(struct inode *inode, struct file *filp)
15836 +{
15837 +       struct fcrypt *fcr;
15838 +
15839 +       dprintk("%s()\n", __FUNCTION__);
15840 +       if (filp->private_data) {
15841 +               printk("cryptodev: Private data already exists !\n");
15842 +               return(0);
15843 +       }
15844 +
15845 +       fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
15846 +       if (!fcr) {
15847 +               dprintk("%s() - malloc failed\n", __FUNCTION__);
15848 +               return(-ENOMEM);
15849 +       }
15850 +       memset(fcr, 0, sizeof(*fcr));
15851 +
15852 +       INIT_LIST_HEAD(&fcr->csessions);
15853 +       filp->private_data = fcr;
15854 +       return(0);
15855 +}
15856 +
15857 +static int
15858 +cryptodev_release(struct inode *inode, struct file *filp)
15859 +{
15860 +       struct fcrypt *fcr = filp->private_data;
15861 +       struct csession *cse, *tmp;
15862 +
15863 +       dprintk("%s()\n", __FUNCTION__);
15864 +       if (!filp) {
15865 +               printk("cryptodev: No private data on release\n");
15866 +               return(0);
15867 +       }
15868 +
15869 +       list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
15870 +               list_del(&cse->list);
15871 +               (void)csefree(cse);
15872 +       }
15873 +       filp->private_data = NULL;
15874 +       kfree(fcr);
15875 +       return(0);
15876 +}
15877 +
15878 +static struct file_operations cryptodev_fops = {
15879 +       .owner = THIS_MODULE,
15880 +       .open = cryptodev_open,
15881 +       .release = cryptodev_release,
15882 +       .ioctl = cryptodev_ioctl,
15883 +#ifdef HAVE_UNLOCKED_IOCTL
15884 +       .unlocked_ioctl = cryptodev_unlocked_ioctl,
15885 +#endif
15886 +};
15887 +
15888 +static struct miscdevice cryptodev = {
15889 +       .minor = CRYPTODEV_MINOR,
15890 +       .name = "crypto",
15891 +       .fops = &cryptodev_fops,
15892 +};
15893 +
15894 +static int __init
15895 +cryptodev_init(void)
15896 +{
15897 +       int rc;
15898 +
15899 +       dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
15900 +       rc = misc_register(&cryptodev);
15901 +       if (rc) {
15902 +               printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
15903 +               return(rc);
15904 +       }
15905 +
15906 +       return(0);
15907 +}
15908 +
15909 +static void __exit
15910 +cryptodev_exit(void)
15911 +{
15912 +       dprintk("%s()\n", __FUNCTION__);
15913 +       misc_deregister(&cryptodev);
15914 +}
15915 +
15916 +module_init(cryptodev_init);
15917 +module_exit(cryptodev_exit);
15918 +
15919 +MODULE_LICENSE("BSD");
15920 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
15921 +MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
15922 --- /dev/null
15923 +++ b/crypto/ocf/cryptodev.h
15924 @@ -0,0 +1,478 @@
15925 +/*     $FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $   */
15926 +/*     $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $     */
15927 +
15928 +/*-
15929 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
15930 + * Copyright (C) 2006-2007 David McCullough
15931 + * Copyright (C) 2004-2005 Intel Corporation.
15932 + * The license and original author are listed below.
15933 + *
15934 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
15935 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
15936 + *
15937 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
15938 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
15939 + * supported the development of this code.
15940 + *
15941 + * Copyright (c) 2000 Angelos D. Keromytis
15942 + *
15943 + * Permission to use, copy, and modify this software with or without fee
15944 + * is hereby granted, provided that this entire notice is included in
15945 + * all source code copies of any software which is or includes a copy or
15946 + * modification of this software.
15947 + *
15948 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
15949 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
15950 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
15951 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
15952 + * PURPOSE.
15953 + *
15954 + * Copyright (c) 2001 Theo de Raadt
15955 + *
15956 + * Redistribution and use in source and binary forms, with or without
15957 + * modification, are permitted provided that the following conditions
15958 + * are met:
15959 + *
15960 + * 1. Redistributions of source code must retain the above copyright
15961 + *   notice, this list of conditions and the following disclaimer.
15962 + * 2. Redistributions in binary form must reproduce the above copyright
15963 + *   notice, this list of conditions and the following disclaimer in the
15964 + *   documentation and/or other materials provided with the distribution.
15965 + * 3. The name of the author may not be used to endorse or promote products
15966 + *   derived from this software without specific prior written permission.
15967 + *
15968 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15969 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15970 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
15971 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
15972 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
15973 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
15974 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
15975 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15976 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
15977 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15978 + *
15979 + * Effort sponsored in part by the Defense Advanced Research Projects
15980 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
15981 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
15982 + *
15983 + */
15984 +
15985 +#ifndef _CRYPTO_CRYPTO_H_
15986 +#define _CRYPTO_CRYPTO_H_
15987 +
15988 +/* Some initial values */
15989 +#define CRYPTO_DRIVERS_INITIAL 4
15990 +#define CRYPTO_SW_SESSIONS     32
15991 +
15992 +/* Hash values */
15993 +#define NULL_HASH_LEN          0
15994 +#define MD5_HASH_LEN           16
15995 +#define SHA1_HASH_LEN          20
15996 +#define RIPEMD160_HASH_LEN     20
15997 +#define SHA2_256_HASH_LEN      32
15998 +#define SHA2_384_HASH_LEN      48
15999 +#define SHA2_512_HASH_LEN      64
16000 +#define MD5_KPDK_HASH_LEN      16
16001 +#define SHA1_KPDK_HASH_LEN     20
16002 +/* Maximum hash algorithm result length */
16003 +#define HASH_MAX_LEN           SHA2_512_HASH_LEN /* Keep this updated */
16004 +
16005 +/* HMAC values */
16006 +#define NULL_HMAC_BLOCK_LEN                    1
16007 +#define MD5_HMAC_BLOCK_LEN                     64
16008 +#define SHA1_HMAC_BLOCK_LEN                    64
16009 +#define RIPEMD160_HMAC_BLOCK_LEN       64
16010 +#define SHA2_256_HMAC_BLOCK_LEN                64
16011 +#define SHA2_384_HMAC_BLOCK_LEN                128
16012 +#define SHA2_512_HMAC_BLOCK_LEN                128
16013 +/* Maximum HMAC block length */
16014 +#define HMAC_MAX_BLOCK_LEN             SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
16015 +#define HMAC_IPAD_VAL                  0x36
16016 +#define HMAC_OPAD_VAL                  0x5C
16017 +
16018 +/* Encryption algorithm block sizes */
16019 +#define NULL_BLOCK_LEN                 1
16020 +#define DES_BLOCK_LEN                  8
16021 +#define DES3_BLOCK_LEN                 8
16022 +#define BLOWFISH_BLOCK_LEN             8
16023 +#define SKIPJACK_BLOCK_LEN             8
16024 +#define CAST128_BLOCK_LEN              8
16025 +#define RIJNDAEL128_BLOCK_LEN  16
16026 +#define AES_BLOCK_LEN                  RIJNDAEL128_BLOCK_LEN
16027 +#define CAMELLIA_BLOCK_LEN             16
16028 +#define ARC4_BLOCK_LEN                 1
16029 +#define EALG_MAX_BLOCK_LEN             AES_BLOCK_LEN /* Keep this updated */
16030 +
16031 +/* Encryption algorithm min and max key sizes */
16032 +#define NULL_MIN_KEY_LEN               0
16033 +#define NULL_MAX_KEY_LEN               0
16034 +#define DES_MIN_KEY_LEN                        8
16035 +#define DES_MAX_KEY_LEN                        8
16036 +#define DES3_MIN_KEY_LEN               24
16037 +#define DES3_MAX_KEY_LEN               24
16038 +#define BLOWFISH_MIN_KEY_LEN   4
16039 +#define BLOWFISH_MAX_KEY_LEN   56
16040 +#define SKIPJACK_MIN_KEY_LEN   10
16041 +#define SKIPJACK_MAX_KEY_LEN   10
16042 +#define CAST128_MIN_KEY_LEN            5
16043 +#define CAST128_MAX_KEY_LEN            16
16044 +#define RIJNDAEL128_MIN_KEY_LEN        16
16045 +#define RIJNDAEL128_MAX_KEY_LEN        32
16046 +#define AES_MIN_KEY_LEN                        RIJNDAEL128_MIN_KEY_LEN
16047 +#define AES_MAX_KEY_LEN                        RIJNDAEL128_MAX_KEY_LEN
16048 +#define CAMELLIA_MIN_KEY_LEN   16
16049 +#define CAMELLIA_MAX_KEY_LEN   32
16050 +#define ARC4_MIN_KEY_LEN               1
16051 +#define ARC4_MAX_KEY_LEN               256
16052 +
16053 +/* Max size of data that can be processed */
16054 +#define CRYPTO_MAX_DATA_LEN            64*1024 - 1
16055 +
16056 +#define CRYPTO_ALGORITHM_MIN   1
16057 +#define CRYPTO_DES_CBC                 1
16058 +#define CRYPTO_3DES_CBC                        2
16059 +#define CRYPTO_BLF_CBC                 3
16060 +#define CRYPTO_CAST_CBC                        4
16061 +#define CRYPTO_SKIPJACK_CBC            5
16062 +#define CRYPTO_MD5_HMAC                        6
16063 +#define CRYPTO_SHA1_HMAC               7
16064 +#define CRYPTO_RIPEMD160_HMAC  8
16065 +#define CRYPTO_MD5_KPDK                        9
16066 +#define CRYPTO_SHA1_KPDK               10
16067 +#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
16068 +#define CRYPTO_AES_CBC                 11 /* 128 bit blocksize -- the same as above */
16069 +#define CRYPTO_ARC4                            12
16070 +#define CRYPTO_MD5                             13
16071 +#define CRYPTO_SHA1                            14
16072 +#define CRYPTO_NULL_HMAC               15
16073 +#define CRYPTO_NULL_CBC                        16
16074 +#define CRYPTO_DEFLATE_COMP            17 /* Deflate compression algorithm */
16075 +#define CRYPTO_SHA2_256_HMAC   18
16076 +#define CRYPTO_SHA2_384_HMAC   19
16077 +#define CRYPTO_SHA2_512_HMAC   20
16078 +#define CRYPTO_CAMELLIA_CBC            21
16079 +#define CRYPTO_SHA2_256                        22
16080 +#define CRYPTO_SHA2_384                        23
16081 +#define CRYPTO_SHA2_512                        24
16082 +#define CRYPTO_RIPEMD160               25
16083 +#define CRYPTO_ALGORITHM_MAX   25 /* Keep updated - see below */
16084 +
16085 +/* Algorithm flags */
16086 +#define CRYPTO_ALG_FLAG_SUPPORTED      0x01 /* Algorithm is supported */
16087 +#define CRYPTO_ALG_FLAG_RNG_ENABLE     0x02 /* Has HW RNG for DH/DSA */
16088 +#define CRYPTO_ALG_FLAG_DSA_SHA                0x04 /* Can do SHA on msg */
16089 +
16090 +/*
16091 + * Crypto driver/device flags.  They can set in the crid
16092 + * parameter when creating a session or submitting a key
16093 + * op to affect the device/driver assigned.  If neither
16094 + * of these are specified then the crid is assumed to hold
16095 + * the driver id of an existing (and suitable) device that
16096 + * must be used to satisfy the request.
16097 + */
16098 +#define CRYPTO_FLAG_HARDWARE   0x01000000      /* hardware accelerated */
16099 +#define CRYPTO_FLAG_SOFTWARE   0x02000000      /* software implementation */
16100 +
16101 +/* NB: deprecated */
16102 +struct session_op {
16103 +       u_int32_t       cipher;         /* ie. CRYPTO_DES_CBC */
16104 +       u_int32_t       mac;            /* ie. CRYPTO_MD5_HMAC */
16105 +
16106 +       u_int32_t       keylen;         /* cipher key */
16107 +       caddr_t         key;
16108 +       int             mackeylen;      /* mac key */
16109 +       caddr_t         mackey;
16110 +
16111 +       u_int32_t       ses;            /* returns: session # */ 
16112 +};
16113 +
16114 +struct session2_op {
16115 +       u_int32_t       cipher;         /* ie. CRYPTO_DES_CBC */
16116 +       u_int32_t       mac;            /* ie. CRYPTO_MD5_HMAC */
16117 +
16118 +       u_int32_t       keylen;         /* cipher key */
16119 +       caddr_t         key;
16120 +       int             mackeylen;      /* mac key */
16121 +       caddr_t         mackey;
16122 +
16123 +       u_int32_t       ses;            /* returns: session # */ 
16124 +       int             crid;           /* driver id + flags (rw) */
16125 +       int             pad[4];         /* for future expansion */
16126 +};
16127 +
16128 +struct crypt_op {
16129 +       u_int32_t       ses;
16130 +       u_int16_t       op;             /* i.e. COP_ENCRYPT */
16131 +#define COP_NONE       0
16132 +#define COP_ENCRYPT    1
16133 +#define COP_DECRYPT    2
16134 +       u_int16_t       flags;
16135 +#define        COP_F_BATCH     0x0008          /* Batch op if possible */
16136 +       u_int           len;
16137 +       caddr_t         src, dst;       /* become iov[] inside kernel */
16138 +       caddr_t         mac;            /* must be big enough for chosen MAC */
16139 +       caddr_t         iv;
16140 +};
16141 +
16142 +/*
16143 + * Parameters for looking up a crypto driver/device by
16144 + * device name or by id.  The latter are returned for
16145 + * created sessions (crid) and completed key operations.
16146 + */
16147 +struct crypt_find_op {
16148 +       int             crid;           /* driver id + flags */
16149 +       char            name[32];       /* device/driver name */
16150 +};
16151 +
16152 +/* bignum parameter, in packed bytes, ... */
16153 +struct crparam {
16154 +       caddr_t         crp_p;
16155 +       u_int           crp_nbits;
16156 +};
16157 +
16158 +#define CRK_MAXPARAM   8
16159 +
16160 +struct crypt_kop {
16161 +       u_int           crk_op;         /* ie. CRK_MOD_EXP or other */
16162 +       u_int           crk_status;     /* return status */
16163 +       u_short         crk_iparams;    /* # of input parameters */
16164 +       u_short         crk_oparams;    /* # of output parameters */
16165 +       u_int           crk_crid;       /* NB: only used by CIOCKEY2 (rw) */
16166 +       struct crparam  crk_param[CRK_MAXPARAM];
16167 +};
16168 +#define CRK_ALGORITM_MIN       0
16169 +#define CRK_MOD_EXP            0
16170 +#define CRK_MOD_EXP_CRT                1
16171 +#define CRK_DSA_SIGN           2
16172 +#define CRK_DSA_VERIFY         3
16173 +#define CRK_DH_COMPUTE_KEY     4
16174 +#define CRK_ALGORITHM_MAX      4 /* Keep updated - see below */
16175 +
16176 +#define CRF_MOD_EXP            (1 << CRK_MOD_EXP)
16177 +#define CRF_MOD_EXP_CRT                (1 << CRK_MOD_EXP_CRT)
16178 +#define CRF_DSA_SIGN           (1 << CRK_DSA_SIGN)
16179 +#define CRF_DSA_VERIFY         (1 << CRK_DSA_VERIFY)
16180 +#define CRF_DH_COMPUTE_KEY     (1 << CRK_DH_COMPUTE_KEY)
16181 +
16182 +/*
16183 + * done against open of /dev/crypto, to get a cloned descriptor.
16184 + * Please use F_SETFD against the cloned descriptor.
16185 + */
16186 +#define CRIOGET                _IOWR('c', 100, u_int32_t)
16187 +#define CRIOASYMFEAT   CIOCASYMFEAT
16188 +#define CRIOFINDDEV    CIOCFINDDEV
16189 +
16190 +/* the following are done against the cloned descriptor */
16191 +#define CIOCGSESSION   _IOWR('c', 101, struct session_op)
16192 +#define CIOCFSESSION   _IOW('c', 102, u_int32_t)
16193 +#define CIOCCRYPT      _IOWR('c', 103, struct crypt_op)
16194 +#define CIOCKEY                _IOWR('c', 104, struct crypt_kop)
16195 +#define CIOCASYMFEAT   _IOR('c', 105, u_int32_t)
16196 +#define CIOCGSESSION2  _IOWR('c', 106, struct session2_op)
16197 +#define CIOCKEY2       _IOWR('c', 107, struct crypt_kop)
16198 +#define CIOCFINDDEV    _IOWR('c', 108, struct crypt_find_op)
16199 +
16200 +struct cryptotstat {
16201 +       struct timespec acc;            /* total accumulated time */
16202 +       struct timespec min;            /* min time */
16203 +       struct timespec max;            /* max time */
16204 +       u_int32_t       count;          /* number of observations */
16205 +};
16206 +
16207 +struct cryptostats {
16208 +       u_int32_t       cs_ops;         /* symmetric crypto ops submitted */
16209 +       u_int32_t       cs_errs;        /* symmetric crypto ops that failed */
16210 +       u_int32_t       cs_kops;        /* asymetric/key ops submitted */
16211 +       u_int32_t       cs_kerrs;       /* asymetric/key ops that failed */
16212 +       u_int32_t       cs_intrs;       /* crypto swi thread activations */
16213 +       u_int32_t       cs_rets;        /* crypto return thread activations */
16214 +       u_int32_t       cs_blocks;      /* symmetric op driver block */
16215 +       u_int32_t       cs_kblocks;     /* symmetric op driver block */
16216 +       /*
16217 +        * When CRYPTO_TIMING is defined at compile time and the
16218 +        * sysctl debug.crypto is set to 1, the crypto system will
16219 +        * accumulate statistics about how long it takes to process
16220 +        * crypto requests at various points during processing.
16221 +        */
16222 +       struct cryptotstat cs_invoke;   /* crypto_dipsatch -> crypto_invoke */
16223 +       struct cryptotstat cs_done;     /* crypto_invoke -> crypto_done */
16224 +       struct cryptotstat cs_cb;       /* crypto_done -> callback */
16225 +       struct cryptotstat cs_finis;    /* callback -> callback return */
16226 +
16227 +       u_int32_t       cs_drops;               /* crypto ops dropped due to congestion */
16228 +};
16229 +
16230 +#ifdef __KERNEL__
16231 +
16232 +/* Standard initialization structure beginning */
16233 +struct cryptoini {
16234 +       int             cri_alg;        /* Algorithm to use */
16235 +       int             cri_klen;       /* Key length, in bits */
16236 +       int             cri_mlen;       /* Number of bytes we want from the
16237 +                                          entire hash. 0 means all. */
16238 +       caddr_t         cri_key;        /* key to use */
16239 +       u_int8_t        cri_iv[EALG_MAX_BLOCK_LEN];     /* IV to use */
16240 +       struct cryptoini *cri_next;
16241 +};
16242 +
16243 +/* Describe boundaries of a single crypto operation */
16244 +struct cryptodesc {
16245 +       int             crd_skip;       /* How many bytes to ignore from start */
16246 +       int             crd_len;        /* How many bytes to process */
16247 +       int             crd_inject;     /* Where to inject results, if applicable */
16248 +       int             crd_flags;
16249 +
16250 +#define CRD_F_ENCRYPT          0x01    /* Set when doing encryption */
16251 +#define CRD_F_IV_PRESENT       0x02    /* When encrypting, IV is already in
16252 +                                          place, so don't copy. */
16253 +#define CRD_F_IV_EXPLICIT      0x04    /* IV explicitly provided */
16254 +#define CRD_F_DSA_SHA_NEEDED   0x08    /* Compute SHA-1 of buffer for DSA */
16255 +#define CRD_F_KEY_EXPLICIT     0x10    /* Key explicitly provided */
16256 +#define CRD_F_COMP             0x0f    /* Set when doing compression */
16257 +
16258 +       struct cryptoini        CRD_INI; /* Initialization/context data */
16259 +#define crd_iv         CRD_INI.cri_iv
16260 +#define crd_key                CRD_INI.cri_key
16261 +#define crd_alg                CRD_INI.cri_alg
16262 +#define crd_klen       CRD_INI.cri_klen
16263 +
16264 +       struct cryptodesc *crd_next;
16265 +};
16266 +
16267 +/* Structure describing complete operation */
16268 +struct cryptop {
16269 +       struct list_head crp_next;
16270 +       wait_queue_head_t crp_waitq;
16271 +
16272 +       u_int64_t       crp_sid;        /* Session ID */
16273 +       int             crp_ilen;       /* Input data total length */
16274 +       int             crp_olen;       /* Result total length */
16275 +
16276 +       int             crp_etype;      /*
16277 +                                        * Error type (zero means no error).
16278 +                                        * All error codes except EAGAIN
16279 +                                        * indicate possible data corruption (as in,
16280 +                                        * the data have been touched). On all
16281 +                                        * errors, the crp_sid may have changed
16282 +                                        * (reset to a new one), so the caller
16283 +                                        * should always check and use the new
16284 +                                        * value on future requests.
16285 +                                        */
16286 +       int             crp_flags;
16287 +
16288 +#define CRYPTO_F_SKBUF         0x0001  /* Input/output are skbuf chains */
16289 +#define CRYPTO_F_IOV           0x0002  /* Input/output are uio */
16290 +#define CRYPTO_F_REL           0x0004  /* Must return data in same place */
16291 +#define CRYPTO_F_BATCH         0x0008  /* Batch op if possible */
16292 +#define CRYPTO_F_CBIMM         0x0010  /* Do callback immediately */
16293 +#define CRYPTO_F_DONE          0x0020  /* Operation completed */
16294 +#define CRYPTO_F_CBIFSYNC      0x0040  /* Do CBIMM if op is synchronous */
16295 +
16296 +       caddr_t         crp_buf;        /* Data to be processed */
16297 +       caddr_t         crp_opaque;     /* Opaque pointer, passed along */
16298 +       struct cryptodesc *crp_desc;    /* Linked list of processing descriptors */
16299 +
16300 +       int (*crp_callback)(struct cryptop *); /* Callback function */
16301 +};
16302 +
16303 +#define CRYPTO_BUF_CONTIG      0x0
16304 +#define CRYPTO_BUF_IOV         0x1
16305 +#define CRYPTO_BUF_SKBUF               0x2
16306 +
16307 +#define CRYPTO_OP_DECRYPT      0x0
16308 +#define CRYPTO_OP_ENCRYPT      0x1
16309 +
16310 +/*
16311 + * Hints passed to process methods.
16312 + */
16313 +#define CRYPTO_HINT_MORE       0x1     /* more ops coming shortly */
16314 +
16315 +struct cryptkop {
16316 +       struct list_head krp_next;
16317 +       wait_queue_head_t krp_waitq;
16318 +
16319 +       int             krp_flags;
16320 +#define CRYPTO_KF_DONE         0x0001  /* Operation completed */
16321 +#define CRYPTO_KF_CBIMM                0x0002  /* Do callback immediately */
16322 +
16323 +       u_int           krp_op;         /* ie. CRK_MOD_EXP or other */
16324 +       u_int           krp_status;     /* return status */
16325 +       u_short         krp_iparams;    /* # of input parameters */
16326 +       u_short         krp_oparams;    /* # of output parameters */
16327 +       u_int           krp_crid;       /* desired device, etc. */
16328 +       u_int32_t       krp_hid;
16329 +       struct crparam  krp_param[CRK_MAXPARAM];        /* kvm */
16330 +       int             (*krp_callback)(struct cryptkop *);
16331 +};
16332 +
16333 +#include <ocf-compat.h>
16334 +
16335 +/*
16336 + * Session ids are 64 bits.  The lower 32 bits contain a "local id" which
16337 + * is a driver-private session identifier.  The upper 32 bits contain a
16338 + * "hardware id" used by the core crypto code to identify the driver and
16339 + * a copy of the driver's capabilities that can be used by client code to
16340 + * optimize operation.
16341 + */
16342 +#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
16343 +#define CRYPTO_SESID2CAPS(_sid)        (((_sid) >> 32) & 0xff000000)
16344 +#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
16345 +
16346 +extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
16347 +extern int crypto_freesession(u_int64_t sid);
16348 +#define CRYPTOCAP_F_HARDWARE   CRYPTO_FLAG_HARDWARE
16349 +#define CRYPTOCAP_F_SOFTWARE   CRYPTO_FLAG_SOFTWARE
16350 +#define CRYPTOCAP_F_SYNC       0x04000000      /* operates synchronously */
16351 +extern int32_t crypto_get_driverid(device_t dev, int flags);
16352 +extern int crypto_find_driver(const char *);
16353 +extern device_t crypto_find_device_byhid(int hid);
16354 +extern int crypto_getcaps(int hid);
16355 +extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
16356 +           u_int32_t flags);
16357 +extern int crypto_kregister(u_int32_t, int, u_int32_t);
16358 +extern int crypto_unregister(u_int32_t driverid, int alg);
16359 +extern int crypto_unregister_all(u_int32_t driverid);
16360 +extern int crypto_dispatch(struct cryptop *crp);
16361 +extern int crypto_kdispatch(struct cryptkop *);
16362 +#define CRYPTO_SYMQ    0x1
16363 +#define CRYPTO_ASYMQ   0x2
16364 +extern int crypto_unblock(u_int32_t, int);
16365 +extern void crypto_done(struct cryptop *crp);
16366 +extern void crypto_kdone(struct cryptkop *);
16367 +extern int crypto_getfeat(int *);
16368 +
16369 +extern void crypto_freereq(struct cryptop *crp);
16370 +extern struct cryptop *crypto_getreq(int num);
16371 +
16372 +extern  int crypto_usercrypto;      /* userland may do crypto requests */
16373 +extern  int crypto_userasymcrypto;  /* userland may do asym crypto reqs */
16374 +extern  int crypto_devallowsoft;    /* only use hardware crypto */
16375 +
16376 +/*
16377 + * random number support,  crypto_unregister_all will unregister
16378 + */
16379 +extern int crypto_rregister(u_int32_t driverid,
16380 +               int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
16381 +extern int crypto_runregister_all(u_int32_t driverid);
16382 +
16383 +/*
16384 + * Crypto-related utility routines used mainly by drivers.
16385 + *
16386 + * XXX these don't really belong here; but for now they're
16387 + *     kept apart from the rest of the system.
16388 + */
16389 +struct uio;
16390 +extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
16391 +extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
16392 +extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
16393 +
16394 +extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
16395 +           caddr_t in);
16396 +extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
16397 +           caddr_t out);
16398 +extern int crypto_apply(int flags, caddr_t buf, int off, int len,
16399 +           int (*f)(void *, void *, u_int), void *arg);
16400 +
16401 +#endif /* __KERNEL__ */
16402 +#endif /* _CRYPTO_CRYPTO_H_ */
16403 --- /dev/null
16404 +++ b/crypto/ocf/ocfnull/ocfnull.c
16405 @@ -0,0 +1,203 @@
16406 +/*
16407 + * An OCF module for determining the cost of crypto versus the cost of
16408 + * IPSec processing outside of OCF.  This modules gives us the effect of
16409 + * zero cost encryption,  of course you will need to run it at both ends
16410 + * since it does no crypto at all.
16411 + *
16412 + * Written by David McCullough <david_mccullough@securecomputing.com>
16413 + * Copyright (C) 2006-2007 David McCullough 
16414 + *
16415 + * LICENSE TERMS
16416 + *
16417 + * The free distribution and use of this software in both source and binary
16418 + * form is allowed (with or without changes) provided that:
16419 + *
16420 + *   1. distributions of this source code include the above copyright
16421 + *      notice, this list of conditions and the following disclaimer;
16422 + *
16423 + *   2. distributions in binary form include the above copyright
16424 + *      notice, this list of conditions and the following disclaimer
16425 + *      in the documentation and/or other associated materials;
16426 + *
16427 + *   3. the copyright holder's name is not used to endorse products
16428 + *      built using this software without specific written permission.
16429 + *
16430 + * ALTERNATIVELY, provided that this notice is retained in full, this product
16431 + * may be distributed under the terms of the GNU General Public License (GPL),
16432 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
16433 + *
16434 + * DISCLAIMER
16435 + *
16436 + * This software is provided 'as is' with no explicit or implied warranties
16437 + * in respect of its properties, including, but not limited to, correctness
16438 + * and/or fitness for purpose.
16439 + */
16440 +
16441 +#ifndef AUTOCONF_INCLUDED
16442 +#include <linux/config.h>
16443 +#endif
16444 +#include <linux/module.h>
16445 +#include <linux/init.h>
16446 +#include <linux/list.h>
16447 +#include <linux/slab.h>
16448 +#include <linux/sched.h>
16449 +#include <linux/wait.h>
16450 +#include <linux/crypto.h>
16451 +#include <linux/interrupt.h>
16452 +
16453 +#include <cryptodev.h>
16454 +#include <uio.h>
16455 +
16456 +static int32_t                  null_id = -1;
16457 +static u_int32_t                null_sesnum = 0;
16458 +
16459 +static int null_process(device_t, struct cryptop *, int);
16460 +static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
16461 +static int null_freesession(device_t, u_int64_t);
16462 +
16463 +#define debug ocfnull_debug
16464 +int ocfnull_debug = 0;
16465 +module_param(ocfnull_debug, int, 0644);
16466 +MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
16467 +
16468 +/*
16469 + * dummy device structure
16470 + */
16471 +
16472 +static struct {
16473 +       softc_device_decl       sc_dev;
16474 +} nulldev;
16475 +
16476 +static device_method_t null_methods = {
16477 +       /* crypto device methods */
16478 +       DEVMETHOD(cryptodev_newsession, null_newsession),
16479 +       DEVMETHOD(cryptodev_freesession,null_freesession),
16480 +       DEVMETHOD(cryptodev_process,    null_process),
16481 +};
16482 +
16483 +/*
16484 + * Generate a new software session.
16485 + */
16486 +static int
16487 +null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
16488 +{
16489 +       dprintk("%s()\n", __FUNCTION__);
16490 +       if (sid == NULL || cri == NULL) {
16491 +               dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
16492 +               return EINVAL;
16493 +       }
16494 +
16495 +       if (null_sesnum == 0)
16496 +               null_sesnum++;
16497 +       *sid = null_sesnum++;
16498 +       return 0;
16499 +}
16500 +
16501 +
16502 +/*
16503 + * Free a session.
16504 + */
16505 +static int
16506 +null_freesession(device_t arg, u_int64_t tid)
16507 +{
16508 +       u_int32_t sid = CRYPTO_SESID2LID(tid);
16509 +
16510 +       dprintk("%s()\n", __FUNCTION__);
16511 +       if (sid > null_sesnum) {
16512 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16513 +               return EINVAL;
16514 +       }
16515 +
16516 +       /* Silently accept and return */
16517 +       if (sid == 0)
16518 +               return 0;
16519 +       return 0;
16520 +}
16521 +
16522 +
16523 +/*
16524 + * Process a request.
16525 + */
16526 +static int
16527 +null_process(device_t arg, struct cryptop *crp, int hint)
16528 +{
16529 +       unsigned int lid;
16530 +
16531 +       dprintk("%s()\n", __FUNCTION__);
16532 +
16533 +       /* Sanity check */
16534 +       if (crp == NULL) {
16535 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16536 +               return EINVAL;
16537 +       }
16538 +
16539 +       crp->crp_etype = 0;
16540 +
16541 +       if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
16542 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16543 +               crp->crp_etype = EINVAL;
16544 +               goto done;
16545 +       }
16546 +
16547 +       /*
16548 +        * find the session we are using
16549 +        */
16550 +
16551 +       lid = crp->crp_sid & 0xffffffff;
16552 +       if (lid >= null_sesnum || lid == 0) {
16553 +               crp->crp_etype = ENOENT;
16554 +               dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
16555 +               goto done;
16556 +       }
16557 +
16558 +done:
16559 +       crypto_done(crp);
16560 +       return 0;
16561 +}
16562 +
16563 +
16564 +/*
16565 + * our driver startup and shutdown routines
16566 + */
16567 +
16568 +static int
16569 +null_init(void)
16570 +{
16571 +       dprintk("%s(%p)\n", __FUNCTION__, null_init);
16572 +
16573 +       memset(&nulldev, 0, sizeof(nulldev));
16574 +       softc_device_init(&nulldev, "ocfnull", 0, null_methods);
16575 +
16576 +       null_id = crypto_get_driverid(softc_get_device(&nulldev),
16577 +                               CRYPTOCAP_F_HARDWARE);
16578 +       if (null_id < 0)
16579 +               panic("ocfnull: crypto device cannot initialize!");
16580 +
16581 +#define        REGISTER(alg) \
16582 +       crypto_register(null_id,alg,0,0)
16583 +       REGISTER(CRYPTO_DES_CBC);
16584 +       REGISTER(CRYPTO_3DES_CBC);
16585 +       REGISTER(CRYPTO_RIJNDAEL128_CBC);
16586 +       REGISTER(CRYPTO_MD5);
16587 +       REGISTER(CRYPTO_SHA1);
16588 +       REGISTER(CRYPTO_MD5_HMAC);
16589 +       REGISTER(CRYPTO_SHA1_HMAC);
16590 +#undef REGISTER
16591 +
16592 +       return 0;
16593 +}
16594 +
16595 +static void
16596 +null_exit(void)
16597 +{
16598 +       dprintk("%s()\n", __FUNCTION__);
16599 +       crypto_unregister_all(null_id);
16600 +       null_id = -1;
16601 +}
16602 +
16603 +module_init(null_init);
16604 +module_exit(null_exit);
16605 +
16606 +MODULE_LICENSE("Dual BSD/GPL");
16607 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
16608 +MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
16609 --- /dev/null
16610 +++ b/crypto/ocf/cryptosoft.c
16611 @@ -0,0 +1,898 @@
16612 +/*
16613 + * An OCF module that uses the linux kernel cryptoapi, based on the
16614 + * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
16615 + * but is mostly unrecognisable,
16616 + *
16617 + * Written by David McCullough <david_mccullough@securecomputing.com>
16618 + * Copyright (C) 2004-2007 David McCullough
16619 + * Copyright (C) 2004-2005 Intel Corporation.
16620 + *
16621 + * LICENSE TERMS
16622 + *
16623 + * The free distribution and use of this software in both source and binary
16624 + * form is allowed (with or without changes) provided that:
16625 + *
16626 + *   1. distributions of this source code include the above copyright
16627 + *      notice, this list of conditions and the following disclaimer;
16628 + *
16629 + *   2. distributions in binary form include the above copyright
16630 + *      notice, this list of conditions and the following disclaimer
16631 + *      in the documentation and/or other associated materials;
16632 + *
16633 + *   3. the copyright holder's name is not used to endorse products
16634 + *      built using this software without specific written permission.
16635 + *
16636 + * ALTERNATIVELY, provided that this notice is retained in full, this product
16637 + * may be distributed under the terms of the GNU General Public License (GPL),
16638 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
16639 + *
16640 + * DISCLAIMER
16641 + *
16642 + * This software is provided 'as is' with no explicit or implied warranties
16643 + * in respect of its properties, including, but not limited to, correctness
16644 + * and/or fitness for purpose.
16645 + * ---------------------------------------------------------------------------
16646 + */
16647 +
16648 +#ifndef AUTOCONF_INCLUDED
16649 +#include <linux/config.h>
16650 +#endif
16651 +#include <linux/module.h>
16652 +#include <linux/init.h>
16653 +#include <linux/list.h>
16654 +#include <linux/slab.h>
16655 +#include <linux/sched.h>
16656 +#include <linux/wait.h>
16657 +#include <linux/crypto.h>
16658 +#include <linux/mm.h>
16659 +#include <linux/skbuff.h>
16660 +#include <linux/random.h>
16661 +#include <asm/scatterlist.h>
16662 +
16663 +#include <cryptodev.h>
16664 +#include <uio.h>
16665 +
16666 +struct {
16667 +       softc_device_decl       sc_dev;
16668 +} swcr_softc;
16669 +
16670 +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
16671 +
16672 +/* Software session entry */
16673 +
16674 +#define SW_TYPE_CIPHER         0
16675 +#define SW_TYPE_HMAC           1
16676 +#define SW_TYPE_AUTH2          2
16677 +#define SW_TYPE_HASH           3
16678 +#define SW_TYPE_COMP           4
16679 +#define SW_TYPE_BLKCIPHER      5
16680 +
16681 +struct swcr_data {
16682 +       int                                     sw_type;
16683 +       int                                     sw_alg;
16684 +       struct crypto_tfm       *sw_tfm;
16685 +       union {
16686 +               struct {
16687 +                       char *sw_key;
16688 +                       int  sw_klen;
16689 +                       int  sw_mlen;
16690 +               } hmac;
16691 +               void *sw_comp_buf;
16692 +       } u;
16693 +       struct swcr_data        *sw_next;
16694 +};
16695 +
16696 +#ifndef CRYPTO_TFM_MODE_CBC
16697 +/*
16698 + * As of linux-2.6.21 this is no longer defined, and presumably no longer
16699 + * needed to be passed into the crypto core code.
16700 + */
16701 +#define        CRYPTO_TFM_MODE_CBC     0
16702 +#define        CRYPTO_TFM_MODE_ECB     0
16703 +#endif
16704 +
16705 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
16706 +       /*
16707 +        * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
16708 +        * API into old API.
16709 +        */
16710 +
16711 +       /* Symmetric/Block Cipher */
16712 +       struct blkcipher_desc
16713 +       {
16714 +               struct crypto_tfm *tfm;
16715 +               void *info;
16716 +       };
16717 +       #define ecb(X)                                                          #X
16718 +       #define cbc(X)                                                          #X
16719 +       #define crypto_has_blkcipher(X, Y, Z)           crypto_alg_available(X, 0)
16720 +       #define crypto_blkcipher_cast(X)                        X
16721 +       #define crypto_blkcipher_tfm(X)                         X
16722 +       #define crypto_alloc_blkcipher(X, Y, Z)         crypto_alloc_tfm(X, mode)
16723 +       #define crypto_blkcipher_ivsize(X)                      crypto_tfm_alg_ivsize(X)
16724 +       #define crypto_blkcipher_blocksize(X)           crypto_tfm_alg_blocksize(X)
16725 +       #define crypto_blkcipher_setkey(X, Y, Z)        crypto_cipher_setkey(X, Y, Z)
16726 +       #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
16727 +                               crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
16728 +       #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
16729 +                               crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
16730 +
16731 +       /* Hash/HMAC/Digest */
16732 +       struct hash_desc
16733 +       {
16734 +               struct crypto_tfm *tfm;
16735 +       };
16736 +       #define hmac(X)                                                 #X
16737 +       #define crypto_has_hash(X, Y, Z)                crypto_alg_available(X, 0)
16738 +       #define crypto_hash_cast(X)                             X
16739 +       #define crypto_hash_tfm(X)                              X
16740 +       #define crypto_alloc_hash(X, Y, Z)              crypto_alloc_tfm(X, mode)
16741 +       #define crypto_hash_digestsize(X)               crypto_tfm_alg_digestsize(X)
16742 +       #define crypto_hash_digest(W, X, Y, Z)  \
16743 +                               crypto_digest_digest((W)->tfm, X, sg_num, Z)
16744 +
16745 +       /* Asymmetric Cipher */
16746 +       #define crypto_has_cipher(X, Y, Z)              crypto_alg_available(X, 0)
16747 +
16748 +       /* Compression */
16749 +       #define crypto_has_comp(X, Y, Z)                crypto_alg_available(X, 0)
16750 +       #define crypto_comp_tfm(X)                              X
16751 +       #define crypto_comp_cast(X)                             X
16752 +       #define crypto_alloc_comp(X, Y, Z)              crypto_alloc_tfm(X, mode)
16753 +#else
16754 +       #define ecb(X)  "ecb(" #X ")"
16755 +       #define cbc(X)  "cbc(" #X ")"
16756 +       #define hmac(X) "hmac(" #X ")"
16757 +#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
16758 +
16759 +struct crypto_details
16760 +{
16761 +       char *alg_name;
16762 +       int mode;
16763 +       int sw_type;
16764 +};
16765 +
16766 +/*
16767 + * This needs to be kept updated with CRYPTO_xxx list (cryptodev.h).
16768 + * If the Algorithm is not supported, then insert a {NULL, 0, 0} entry.
16769 + *
16770 + * IMPORTANT: The index to the array IS CRYPTO_xxx.
16771 + */
16772 +static struct crypto_details crypto_details[CRYPTO_ALGORITHM_MAX + 1] = {
16773 +       { NULL,              0,                   0 },
16774 +       /* CRYPTO_xxx index starts at 1 */
16775 +       { cbc(des),          CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16776 +       { cbc(des3_ede),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16777 +       { cbc(blowfish),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16778 +       { cbc(cast5),        CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16779 +       { cbc(skipjack),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16780 +       { hmac(md5),         0,                   SW_TYPE_HMAC },
16781 +       { hmac(sha1),        0,                   SW_TYPE_HMAC },
16782 +       { hmac(ripemd160),   0,                   SW_TYPE_HMAC },
16783 +       { "md5-kpdk??",      0,                   SW_TYPE_HASH },
16784 +       { "sha1-kpdk??",     0,                   SW_TYPE_HASH },
16785 +       { cbc(aes),          CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16786 +       { ecb(arc4),         CRYPTO_TFM_MODE_ECB, SW_TYPE_BLKCIPHER },
16787 +       { "md5",             0,                   SW_TYPE_HASH },
16788 +       { "sha1",            0,                   SW_TYPE_HASH },
16789 +       { hmac(digest_null), 0,                   SW_TYPE_HMAC },
16790 +       { cbc(cipher_null),  CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16791 +       { "deflate",         0,                   SW_TYPE_COMP },
16792 +       { hmac(sha256),      0,                   SW_TYPE_HMAC },
16793 +       { hmac(sha384),      0,                   SW_TYPE_HMAC },
16794 +       { hmac(sha512),      0,                   SW_TYPE_HMAC },
16795 +       { cbc(camellia),     CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16796 +       { "sha256",          0,                   SW_TYPE_HASH },
16797 +       { "sha384",          0,                   SW_TYPE_HASH },
16798 +       { "sha512",          0,                   SW_TYPE_HASH },
16799 +       { "ripemd160",       0,                   SW_TYPE_HASH },
16800 +};
16801 +
16802 +int32_t swcr_id = -1;
16803 +module_param(swcr_id, int, 0444);
16804 +MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
16805 +
16806 +int swcr_fail_if_compression_grows = 1;
16807 +module_param(swcr_fail_if_compression_grows, int, 0644);
16808 +MODULE_PARM_DESC(swcr_fail_if_compression_grows,
16809 +                "Treat compression that results in more data as a failure");
16810 +
16811 +static struct swcr_data **swcr_sessions = NULL;
16812 +static u_int32_t swcr_sesnum = 0;
16813 +
16814 +static int swcr_process(device_t, struct cryptop *, int);
16815 +static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
16816 +static int swcr_freesession(device_t, u_int64_t);
16817 +
16818 +static device_method_t swcr_methods = {
16819 +       /* crypto device methods */
16820 +       DEVMETHOD(cryptodev_newsession, swcr_newsession),
16821 +       DEVMETHOD(cryptodev_freesession,swcr_freesession),
16822 +       DEVMETHOD(cryptodev_process,    swcr_process),
16823 +};
16824 +
16825 +#define debug swcr_debug
16826 +int swcr_debug = 0;
16827 +module_param(swcr_debug, int, 0644);
16828 +MODULE_PARM_DESC(swcr_debug, "Enable debug");
16829 +
16830 +/*
16831 + * Generate a new software session.
16832 + */
16833 +static int
16834 +swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
16835 +{
16836 +       struct swcr_data **swd;
16837 +       u_int32_t i;
16838 +       int error;
16839 +       char *algo;
16840 +       int mode, sw_type;
16841 +
16842 +       dprintk("%s()\n", __FUNCTION__);
16843 +       if (sid == NULL || cri == NULL) {
16844 +               dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
16845 +               return EINVAL;
16846 +       }
16847 +
16848 +       if (swcr_sessions) {
16849 +               for (i = 1; i < swcr_sesnum; i++)
16850 +                       if (swcr_sessions[i] == NULL)
16851 +                               break;
16852 +       } else
16853 +               i = 1;          /* NB: to silence compiler warning */
16854 +
16855 +       if (swcr_sessions == NULL || i == swcr_sesnum) {
16856 +               if (swcr_sessions == NULL) {
16857 +                       i = 1; /* We leave swcr_sessions[0] empty */
16858 +                       swcr_sesnum = CRYPTO_SW_SESSIONS;
16859 +               } else
16860 +                       swcr_sesnum *= 2;
16861 +
16862 +               swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
16863 +               if (swd == NULL) {
16864 +                       /* Reset session number */
16865 +                       if (swcr_sesnum == CRYPTO_SW_SESSIONS)
16866 +                               swcr_sesnum = 0;
16867 +                       else
16868 +                               swcr_sesnum /= 2;
16869 +                       dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16870 +                       return ENOBUFS;
16871 +               }
16872 +               memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
16873 +
16874 +               /* Copy existing sessions */
16875 +               if (swcr_sessions) {
16876 +                       memcpy(swd, swcr_sessions,
16877 +                           (swcr_sesnum / 2) * sizeof(struct swcr_data *));
16878 +                       kfree(swcr_sessions);
16879 +               }
16880 +
16881 +               swcr_sessions = swd;
16882 +       }
16883 +
16884 +       swd = &swcr_sessions[i];
16885 +       *sid = i;
16886 +
16887 +       while (cri) {
16888 +               *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
16889 +                               SLAB_ATOMIC);
16890 +               if (*swd == NULL) {
16891 +                       swcr_freesession(NULL, i);
16892 +                       dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16893 +                       return ENOBUFS;
16894 +               }
16895 +               memset(*swd, 0, sizeof(struct swcr_data));
16896 +
16897 +               if (cri->cri_alg > CRYPTO_ALGORITHM_MAX) {
16898 +                       printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
16899 +                       swcr_freesession(NULL, i);
16900 +                       return EINVAL;
16901 +               }
16902 +
16903 +               algo = crypto_details[cri->cri_alg].alg_name;
16904 +               if (!algo || !*algo) {
16905 +                       printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
16906 +                       swcr_freesession(NULL, i);
16907 +                       return EINVAL;
16908 +               }
16909 +
16910 +               mode = crypto_details[cri->cri_alg].mode;
16911 +               sw_type = crypto_details[cri->cri_alg].sw_type;
16912 +
16913 +               /* Algorithm specific configuration */
16914 +               switch (cri->cri_alg) {
16915 +               case CRYPTO_NULL_CBC:
16916 +                       cri->cri_klen = 0; /* make it work with crypto API */
16917 +                       break;
16918 +               default:
16919 +                       break;
16920 +               }
16921 +
16922 +               if (sw_type == SW_TYPE_BLKCIPHER) {
16923 +                       dprintk("%s crypto_alloc_blkcipher(%s, 0x%x)\n", __FUNCTION__,
16924 +                                       algo, mode);
16925 +
16926 +                       (*swd)->sw_tfm = crypto_blkcipher_tfm(
16927 +                                                               crypto_alloc_blkcipher(algo, 0,
16928 +                                                                       CRYPTO_ALG_ASYNC));
16929 +                       if (!(*swd)->sw_tfm) {
16930 +                               dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s,0x%x)\n",
16931 +                                               algo,mode);
16932 +                               swcr_freesession(NULL, i);
16933 +                               return EINVAL;
16934 +                       }
16935 +
16936 +                       if (debug) {
16937 +                               dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
16938 +                                               __FUNCTION__,cri->cri_klen,(cri->cri_klen + 7)/8);
16939 +                               for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
16940 +                               {
16941 +                                       dprintk("%s0x%x", (i % 8) ? " " : "\n    ",cri->cri_key[i]);
16942 +                               }
16943 +                               dprintk("\n");
16944 +                       }
16945 +                       error = crypto_blkcipher_setkey(
16946 +                                               crypto_blkcipher_cast((*swd)->sw_tfm), cri->cri_key,
16947 +                                                       (cri->cri_klen + 7) / 8);
16948 +                       if (error) {
16949 +                               printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
16950 +                                               (*swd)->sw_tfm->crt_flags);
16951 +                               swcr_freesession(NULL, i);
16952 +                               return error;
16953 +                       }
16954 +               } else if (sw_type == SW_TYPE_HMAC || sw_type == SW_TYPE_HASH) {
16955 +                       dprintk("%s crypto_alloc_hash(%s, 0x%x)\n", __FUNCTION__,
16956 +                                       algo, mode);
16957 +
16958 +                       (*swd)->sw_tfm = crypto_hash_tfm(
16959 +                                                               crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
16960 +
16961 +                       if (!(*swd)->sw_tfm) {
16962 +                               dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
16963 +                                               algo, mode);
16964 +                               swcr_freesession(NULL, i);
16965 +                               return EINVAL;
16966 +                       }
16967 +
16968 +                       (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
16969 +                       (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
16970 +                               SLAB_ATOMIC);
16971 +                       if ((*swd)->u.hmac.sw_key == NULL) {
16972 +                               swcr_freesession(NULL, i);
16973 +                               dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16974 +                               return ENOBUFS;
16975 +                       }
16976 +                       memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
16977 +                       if (cri->cri_mlen) {
16978 +                               (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
16979 +                       } else {
16980 +                               (*swd)->u.hmac.sw_mlen =
16981 +                                               crypto_hash_digestsize(
16982 +                                                               crypto_hash_cast((*swd)->sw_tfm));
16983 +                       }
16984 +               } else if (sw_type == SW_TYPE_COMP) {
16985 +                       (*swd)->sw_tfm = crypto_comp_tfm(
16986 +                                       crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
16987 +                       if (!(*swd)->sw_tfm) {
16988 +                               dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
16989 +                                               algo, mode);
16990 +                               swcr_freesession(NULL, i);
16991 +                               return EINVAL;
16992 +                       }
16993 +                       (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
16994 +                       if ((*swd)->u.sw_comp_buf == NULL) {
16995 +                               swcr_freesession(NULL, i);
16996 +                               dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16997 +                               return ENOBUFS;
16998 +                       }
16999 +               } else {
17000 +                       printk("cryptosoft: Unhandled sw_type %d\n", sw_type);
17001 +                       swcr_freesession(NULL, i);
17002 +                       return EINVAL;
17003 +               }
17004 +
17005 +               (*swd)->sw_alg = cri->cri_alg;
17006 +               (*swd)->sw_type = sw_type;
17007 +
17008 +               cri = cri->cri_next;
17009 +               swd = &((*swd)->sw_next);
17010 +       }
17011 +       return 0;
17012 +}
17013 +
17014 +/*
17015 + * Free a session.
17016 + */
17017 +static int
17018 +swcr_freesession(device_t dev, u_int64_t tid)
17019 +{
17020 +       struct swcr_data *swd;
17021 +       u_int32_t sid = CRYPTO_SESID2LID(tid);
17022 +
17023 +       dprintk("%s()\n", __FUNCTION__);
17024 +       if (sid > swcr_sesnum || swcr_sessions == NULL ||
17025 +                       swcr_sessions[sid] == NULL) {
17026 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17027 +               return(EINVAL);
17028 +       }
17029 +
17030 +       /* Silently accept and return */
17031 +       if (sid == 0)
17032 +               return(0);
17033 +
17034 +       while ((swd = swcr_sessions[sid]) != NULL) {
17035 +               swcr_sessions[sid] = swd->sw_next;
17036 +               if (swd->sw_tfm)
17037 +                       crypto_free_tfm(swd->sw_tfm);
17038 +               if (swd->sw_type == SW_TYPE_COMP) {
17039 +                       if (swd->u.sw_comp_buf)
17040 +                               kfree(swd->u.sw_comp_buf);
17041 +               } else {
17042 +                       if (swd->u.hmac.sw_key)
17043 +                               kfree(swd->u.hmac.sw_key);
17044 +               }
17045 +               kfree(swd);
17046 +       }
17047 +       return 0;
17048 +}
17049 +
17050 +/*
17051 + * Process a software request.
17052 + */
17053 +static int
17054 +swcr_process(device_t dev, struct cryptop *crp, int hint)
17055 +{
17056 +       struct cryptodesc *crd;
17057 +       struct swcr_data *sw;
17058 +       u_int32_t lid;
17059 +#define SCATTERLIST_MAX 16
17060 +       struct scatterlist sg[SCATTERLIST_MAX];
17061 +       int sg_num, sg_len, skip;
17062 +       struct sk_buff *skb = NULL;
17063 +       struct uio *uiop = NULL;
17064 +
17065 +       dprintk("%s()\n", __FUNCTION__);
17066 +       /* Sanity check */
17067 +       if (crp == NULL) {
17068 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17069 +               return EINVAL;
17070 +       }
17071 +
17072 +       crp->crp_etype = 0;
17073 +
17074 +       if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
17075 +               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17076 +               crp->crp_etype = EINVAL;
17077 +               goto done;
17078 +       }
17079 +
17080 +       lid = crp->crp_sid & 0xffffffff;
17081 +       if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
17082 +                       swcr_sessions[lid] == NULL) {
17083 +               crp->crp_etype = ENOENT;
17084 +               dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
17085 +               goto done;
17086 +       }
17087 +
17088 +       /*
17089 +        * do some error checking outside of the loop for SKB and IOV processing
17090 +        * this leaves us with valid skb or uiop pointers for later
17091 +        */
17092 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
17093 +               skb = (struct sk_buff *) crp->crp_buf;
17094 +               if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
17095 +                       printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
17096 +                                       skb_shinfo(skb)->nr_frags);
17097 +                       goto done;
17098 +               }
17099 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
17100 +               uiop = (struct uio *) crp->crp_buf;
17101 +               if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
17102 +                       printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
17103 +                                       uiop->uio_iovcnt);
17104 +                       goto done;
17105 +               }
17106 +       }
17107 +
17108 +       /* Go through crypto descriptors, processing as we go */
17109 +       for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
17110 +               /*
17111 +                * Find the crypto context.
17112 +                *
17113 +                * XXX Note that the logic here prevents us from having
17114 +                * XXX the same algorithm multiple times in a session
17115 +                * XXX (or rather, we can but it won't give us the right
17116 +                * XXX results). To do that, we'd need some way of differentiating
17117 +                * XXX between the various instances of an algorithm (so we can
17118 +                * XXX locate the correct crypto context).
17119 +                */
17120 +               for (sw = swcr_sessions[lid]; sw && sw->sw_alg != crd->crd_alg;
17121 +                               sw = sw->sw_next)
17122 +                       ;
17123 +
17124 +               /* No such context ? */
17125 +               if (sw == NULL) {
17126 +                       crp->crp_etype = EINVAL;
17127 +                       dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17128 +                       goto done;
17129 +               }
17130 +
17131 +               skip = crd->crd_skip;
17132 +
17133 +               /*
17134 +                * setup the SG list skip from the start of the buffer
17135 +                */
17136 +               memset(sg, 0, sizeof(sg));
17137 +               if (crp->crp_flags & CRYPTO_F_SKBUF) {
17138 +                       int i, len;
17139 +
17140 +                       sg_num = 0;
17141 +                       sg_len = 0;
17142 +
17143 +                       if (skip < skb_headlen(skb)) {
17144 +                               len = skb_headlen(skb) - skip;
17145 +                               if (len + sg_len > crd->crd_len)
17146 +                                       len = crd->crd_len - sg_len;
17147 +                               sg_set_page(&sg[sg_num],
17148 +                                       virt_to_page(skb->data + skip), len,
17149 +                                       offset_in_page(skb->data + skip));
17150 +                               sg_len += len;
17151 +                               sg_num++;
17152 +                               skip = 0;
17153 +                       } else
17154 +                               skip -= skb_headlen(skb);
17155 +
17156 +                       for (i = 0; sg_len < crd->crd_len &&
17157 +                                               i < skb_shinfo(skb)->nr_frags &&
17158 +                                               sg_num < SCATTERLIST_MAX; i++) {
17159 +                               if (skip < skb_shinfo(skb)->frags[i].size) {
17160 +                                       len = skb_shinfo(skb)->frags[i].size - skip;
17161 +                                       if (len + sg_len > crd->crd_len)
17162 +                                               len = crd->crd_len - sg_len;
17163 +                                       sg_set_page(&sg[sg_num],
17164 +                                               skb_shinfo(skb)->frags[i].page,
17165 +                                               len,
17166 +                                               skb_shinfo(skb)->frags[i].page_offset + skip);
17167 +                                       sg_len += len;
17168 +                                       sg_num++;
17169 +                                       skip = 0;
17170 +                               } else
17171 +                                       skip -= skb_shinfo(skb)->frags[i].size;
17172 +                       }
17173 +               } else if (crp->crp_flags & CRYPTO_F_IOV) {
17174 +                       int len;
17175 +
17176 +                       sg_len = 0;
17177 +                       for (sg_num = 0; sg_len <= crd->crd_len &&
17178 +                                       sg_num < uiop->uio_iovcnt &&
17179 +                                       sg_num < SCATTERLIST_MAX; sg_num++) {
17180 +                               if (skip <= uiop->uio_iov[sg_num].iov_len) {
17181 +                                       len = uiop->uio_iov[sg_num].iov_len - skip;
17182 +                                       if (len + sg_len > crd->crd_len)
17183 +                                               len = crd->crd_len - sg_len;
17184 +                                       sg_set_page(&sg[sg_num],
17185 +                                               virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
17186 +                                               len,
17187 +                                               offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
17188 +                                       sg_len += len;
17189 +                                       skip = 0;
17190 +                               } else 
17191 +                                       skip -= uiop->uio_iov[sg_num].iov_len;
17192 +                       }
17193 +               } else {
17194 +                       sg_len = (crp->crp_ilen - skip);
17195 +                       if (sg_len > crd->crd_len)
17196 +                               sg_len = crd->crd_len;
17197 +                       sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip),
17198 +                               sg_len, offset_in_page(crp->crp_buf + skip));
17199 +                       sg_num = 1;
17200 +               }
17201 +
17202 +
17203 +               switch (sw->sw_type) {
17204 +               case SW_TYPE_BLKCIPHER: {
17205 +                       unsigned char iv[EALG_MAX_BLOCK_LEN];
17206 +                       unsigned char *ivp = iv;
17207 +                       int ivsize = 
17208 +                               crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
17209 +                       struct blkcipher_desc desc;
17210 +
17211 +                       if (sg_len < crypto_blkcipher_blocksize(
17212 +                                       crypto_blkcipher_cast(sw->sw_tfm))) {
17213 +                               crp->crp_etype = EINVAL;
17214 +                               dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
17215 +                                               sg_len, crypto_blkcipher_blocksize(
17216 +                                                       crypto_blkcipher_cast(sw->sw_tfm)));
17217 +                               goto done;
17218 +                       }
17219 +
17220 +                       if (ivsize > sizeof(iv)) {
17221 +                               crp->crp_etype = EINVAL;
17222 +                               dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17223 +                               goto done;
17224 +                       }
17225 +
17226 +                       if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
17227 +                               int i, error;
17228 +
17229 +                               if (debug) {
17230 +                                       dprintk("%s key:", __FUNCTION__);
17231 +                                       for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
17232 +                                               dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
17233 +                                                               crd->crd_key[i]);
17234 +                                       dprintk("\n");
17235 +                               }
17236 +                               error = crypto_blkcipher_setkey(
17237 +                                                       crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
17238 +                                                       (crd->crd_klen + 7) / 8);
17239 +                               if (error) {
17240 +                                       dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
17241 +                                                       error, sw->sw_tfm->crt_flags);
17242 +                                       crp->crp_etype = -error;
17243 +                               }
17244 +                       }
17245 +
17246 +                       memset(&desc, 0, sizeof(desc));
17247 +                       desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
17248 +
17249 +                       if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
17250 +
17251 +                               if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
17252 +                                       ivp = crd->crd_iv;
17253 +                               } else {
17254 +                                       get_random_bytes(ivp, ivsize);
17255 +                               }
17256 +                               /*
17257 +                                * do we have to copy the IV back to the buffer ?
17258 +                                */
17259 +                               if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
17260 +                                       crypto_copyback(crp->crp_flags, crp->crp_buf,
17261 +                                                       crd->crd_inject, ivsize, (caddr_t)ivp);
17262 +                               }
17263 +                               desc.info = ivp;
17264 +                               crypto_blkcipher_encrypt_iv(&desc, sg, sg, sg_len);
17265 +
17266 +                       } else { /*decrypt */
17267 +
17268 +                               if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
17269 +                                       ivp = crd->crd_iv;
17270 +                               } else {
17271 +                                       crypto_copydata(crp->crp_flags, crp->crp_buf,
17272 +                                                       crd->crd_inject, ivsize, (caddr_t)ivp);
17273 +                               }
17274 +                               desc.info = ivp;
17275 +                               crypto_blkcipher_decrypt_iv(&desc, sg, sg, sg_len);
17276 +                       }
17277 +                       } break;
17278 +               case SW_TYPE_HMAC:
17279 +               case SW_TYPE_HASH:
17280 +                       {
17281 +                       char result[HASH_MAX_LEN];
17282 +                       struct hash_desc desc;
17283 +
17284 +                       /* check we have room for the result */
17285 +                       if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
17286 +                               dprintk(
17287 +                       "cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d digestsize=%d\n",
17288 +                                               crp->crp_ilen, crd->crd_skip + sg_len, crd->crd_inject,
17289 +                                               sw->u.hmac.sw_mlen);
17290 +                               crp->crp_etype = EINVAL;
17291 +                               goto done;
17292 +                       }
17293 +
17294 +                       memset(&desc, 0, sizeof(desc));
17295 +                       desc.tfm = crypto_hash_cast(sw->sw_tfm);
17296 +
17297 +                       memset(result, 0, sizeof(result));
17298 +
17299 +                       if (sw->sw_type == SW_TYPE_HMAC) {
17300 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
17301 +                               crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
17302 +                                               sg, sg_num, result);
17303 +#else
17304 +                               crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
17305 +                                               sw->u.hmac.sw_klen);
17306 +                               crypto_hash_digest(&desc, sg, sg_len, result);
17307 +#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
17308 +                               
17309 +                       } else { /* SW_TYPE_HASH */
17310 +                               crypto_hash_digest(&desc, sg, sg_len, result);
17311 +                       }
17312 +
17313 +                       crypto_copyback(crp->crp_flags, crp->crp_buf,
17314 +                                       crd->crd_inject, sw->u.hmac.sw_mlen, result);
17315 +                       }
17316 +                       break;
17317 +
17318 +               case SW_TYPE_COMP: {
17319 +                       void *ibuf = NULL;
17320 +                       void *obuf = sw->u.sw_comp_buf;
17321 +                       int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
17322 +                       int ret = 0;
17323 +
17324 +                       /*
17325 +                        * we need to use an additional copy if there is more than one
17326 +                        * input chunk since the kernel comp routines do not handle
17327 +                        * SG yet.  Otherwise we just use the input buffer as is.
17328 +                        * Rather than allocate another buffer we just split the tmp
17329 +                        * buffer we already have.
17330 +                        * Perhaps we should just use zlib directly ?
17331 +                        */
17332 +                       if (sg_num > 1) {
17333 +                               int blk;
17334 +
17335 +                               ibuf = obuf;
17336 +                               for (blk = 0; blk < sg_num; blk++) {
17337 +                                       memcpy(obuf, sg_virt(&sg[blk]),
17338 +                                                       sg[blk].length);
17339 +                                       obuf += sg[blk].length;
17340 +                               }
17341 +                               olen -= sg_len;
17342 +                       } else
17343 +                               ibuf = sg_virt(&sg[0]);
17344 +
17345 +                       if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
17346 +                               ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
17347 +                                               ibuf, ilen, obuf, &olen);
17348 +                               if (!ret && olen > crd->crd_len) {
17349 +                                       dprintk("cryptosoft: ERANGE compress %d into %d\n",
17350 +                                                       crd->crd_len, olen);
17351 +                                       if (swcr_fail_if_compression_grows)
17352 +                                               ret = ERANGE;
17353 +                               }
17354 +                       } else { /* decompress */
17355 +                               ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
17356 +                                               ibuf, ilen, obuf, &olen);
17357 +                               if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
17358 +                                       dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
17359 +                                                       "space for %d,at offset %d\n",
17360 +                                                       crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
17361 +                                       ret = ETOOSMALL;
17362 +                               }
17363 +                       }
17364 +                       if (ret)
17365 +                               dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
17366 +
17367 +                       /*
17368 +                        * on success copy result back,
17369 +                        * linux crpyto API returns -errno,  we need to fix that
17370 +                        */
17371 +                       crp->crp_etype = ret < 0 ? -ret : ret;
17372 +                       if (ret == 0) {
17373 +                               /* copy back the result and return it's size */
17374 +                               crypto_copyback(crp->crp_flags, crp->crp_buf,
17375 +                                               crd->crd_inject, olen, obuf);
17376 +                               crp->crp_olen = olen;
17377 +                       }
17378 +
17379 +
17380 +                       } break;
17381 +
17382 +               default:
17383 +                       /* Unknown/unsupported algorithm */
17384 +                       dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17385 +                       crp->crp_etype = EINVAL;
17386 +                       goto done;
17387 +               }
17388 +       }
17389 +
17390 +done:
17391 +       crypto_done(crp);
17392 +       return 0;
17393 +}
17394 +
17395 +static int
17396 +cryptosoft_init(void)
17397 +{
17398 +       int i, sw_type, mode;
17399 +       char *algo;
17400 +
17401 +       dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
17402 +
17403 +       softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
17404 +
17405 +       swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
17406 +                       CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
17407 +       if (swcr_id < 0) {
17408 +               printk("Software crypto device cannot initialize!");
17409 +               return -ENODEV;
17410 +       }
17411 +
17412 +#define        REGISTER(alg) \
17413 +               crypto_register(swcr_id, alg, 0,0);
17414 +
17415 +       for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
17416 +       {
17417 +               
17418 +               algo = crypto_details[i].alg_name;
17419 +               if (!algo || !*algo)
17420 +               {
17421 +                       dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
17422 +                       continue;
17423 +               }
17424 +
17425 +               mode = crypto_details[i].mode;
17426 +               sw_type = crypto_details[i].sw_type;
17427 +
17428 +               switch (sw_type)
17429 +               {
17430 +                       case SW_TYPE_CIPHER:
17431 +                               if (crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC))
17432 +                               {
17433 +                                       REGISTER(i);
17434 +                               }
17435 +                               else
17436 +                               {
17437 +                                       dprintk("%s:CIPHER algorithm %d:'%s' not supported\n",
17438 +                                                               __FUNCTION__, i, algo);
17439 +                               }
17440 +                               break;
17441 +                       case SW_TYPE_HMAC:
17442 +                               if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
17443 +                               {
17444 +                                       REGISTER(i);
17445 +                               }
17446 +                               else
17447 +                               {
17448 +                                       dprintk("%s:HMAC algorithm %d:'%s' not supported\n",
17449 +                                                               __FUNCTION__, i, algo);
17450 +                               }
17451 +                               break;
17452 +                       case SW_TYPE_HASH:
17453 +                               if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
17454 +                               {
17455 +                                       REGISTER(i);
17456 +                               }
17457 +                               else
17458 +                               {
17459 +                                       dprintk("%s:HASH algorithm %d:'%s' not supported\n",
17460 +                                                               __FUNCTION__, i, algo);
17461 +                               }
17462 +                               break;
17463 +                       case SW_TYPE_COMP:
17464 +                               if (crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC))
17465 +                               {
17466 +                                       REGISTER(i);
17467 +                               }
17468 +                               else
17469 +                               {
17470 +                                       dprintk("%s:COMP algorithm %d:'%s' not supported\n",
17471 +                                                               __FUNCTION__, i, algo);
17472 +                               }
17473 +                               break;
17474 +                       case SW_TYPE_BLKCIPHER:
17475 +                               if (crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC))
17476 +                               {
17477 +                                       REGISTER(i);
17478 +                               }
17479 +                               else
17480 +                               {
17481 +                                       dprintk("%s:BLKCIPHER algorithm %d:'%s' not supported\n",
17482 +                                                               __FUNCTION__, i, algo);
17483 +                               }
17484 +                               break;
17485 +                       default:
17486 +                               dprintk(
17487 +                               "%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
17488 +                                       __FUNCTION__, sw_type, i, algo);
17489 +                               break;
17490 +               }
17491 +       }
17492 +
17493 +       return(0);
17494 +}
17495 +
17496 +static void
17497 +cryptosoft_exit(void)
17498 +{
17499 +       dprintk("%s()\n", __FUNCTION__);
17500 +       crypto_unregister_all(swcr_id);
17501 +       swcr_id = -1;
17502 +}
17503 +
17504 +module_init(cryptosoft_init);
17505 +module_exit(cryptosoft_exit);
17506 +
17507 +MODULE_LICENSE("Dual BSD/GPL");
17508 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
17509 +MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
17510 --- /dev/null
17511 +++ b/crypto/ocf/rndtest.c
17512 @@ -0,0 +1,300 @@
17513 +/*     $OpenBSD$       */
17514 +
17515 +/*
17516 + * OCF/Linux port done by David McCullough <david_mccullough@securecomputing.com>
17517 + * Copyright (C) 2006-2007 David McCullough
17518 + * Copyright (C) 2004-2005 Intel Corporation.
17519 + * The license and original author are listed below.
17520 + *
17521 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17522 + * All rights reserved.
17523 + *
17524 + * Redistribution and use in source and binary forms, with or without
17525 + * modification, are permitted provided that the following conditions
17526 + * are met:
17527 + * 1. Redistributions of source code must retain the above copyright
17528 + *    notice, this list of conditions and the following disclaimer.
17529 + * 2. Redistributions in binary form must reproduce the above copyright
17530 + *    notice, this list of conditions and the following disclaimer in the
17531 + *    documentation and/or other materials provided with the distribution.
17532 + * 3. All advertising materials mentioning features or use of this software
17533 + *    must display the following acknowledgement:
17534 + *     This product includes software developed by Jason L. Wright
17535 + * 4. The name of the author may not be used to endorse or promote products
17536 + *    derived from this software without specific prior written permission.
17537 + *
17538 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17539 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17540 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17541 + * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17542 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17543 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17544 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17545 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17546 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17547 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17548 + * POSSIBILITY OF SUCH DAMAGE.
17549 + */
17550 +
17551 +#ifndef AUTOCONF_INCLUDED
17552 +#include <linux/config.h>
17553 +#endif
17554 +#include <linux/module.h>
17555 +#include <linux/list.h>
17556 +#include <linux/wait.h>
17557 +#include <linux/time.h>
17558 +#include <linux/version.h>
17559 +#include <linux/unistd.h>
17560 +#include <linux/kernel.h>
17561 +#include <linux/string.h>
17562 +#include <linux/time.h>
17563 +#include <cryptodev.h>
17564 +#include "rndtest.h"
17565 +
17566 +static struct rndtest_stats rndstats;
17567 +
17568 +static void rndtest_test(struct rndtest_state *);
17569 +
17570 +/* The tests themselves */
17571 +static int rndtest_monobit(struct rndtest_state *);
17572 +static int rndtest_runs(struct rndtest_state *);
17573 +static int rndtest_longruns(struct rndtest_state *);
17574 +static int rndtest_chi_4(struct rndtest_state *);
17575 +
17576 +static int rndtest_runs_check(struct rndtest_state *, int, int *);
17577 +static void rndtest_runs_record(struct rndtest_state *, int, int *);
17578 +
17579 +static const struct rndtest_testfunc {
17580 +       int (*test)(struct rndtest_state *);
17581 +} rndtest_funcs[] = {
17582 +       { rndtest_monobit },
17583 +       { rndtest_runs },
17584 +       { rndtest_chi_4 },
17585 +       { rndtest_longruns },
17586 +};
17587 +
17588 +#define        RNDTEST_NTESTS  (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
17589 +
17590 +static void
17591 +rndtest_test(struct rndtest_state *rsp)
17592 +{
17593 +       int i, rv = 0;
17594 +
17595 +       rndstats.rst_tests++;
17596 +       for (i = 0; i < RNDTEST_NTESTS; i++)
17597 +               rv |= (*rndtest_funcs[i].test)(rsp);
17598 +       rsp->rs_discard = (rv != 0);
17599 +}
17600 +
17601 +
17602 +extern int crypto_debug;
17603 +#define rndtest_verbose 2
17604 +#define rndtest_report(rsp, failure, fmt, a...) \
17605 +       { if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
17606 +
17607 +#define        RNDTEST_MONOBIT_MINONES 9725
17608 +#define        RNDTEST_MONOBIT_MAXONES 10275
17609 +
17610 +static int
17611 +rndtest_monobit(struct rndtest_state *rsp)
17612 +{
17613 +       int i, ones = 0, j;
17614 +       u_int8_t r;
17615 +
17616 +       for (i = 0; i < RNDTEST_NBYTES; i++) {
17617 +               r = rsp->rs_buf[i];
17618 +               for (j = 0; j < 8; j++, r <<= 1)
17619 +                       if (r & 0x80)
17620 +                               ones++;
17621 +       }
17622 +       if (ones > RNDTEST_MONOBIT_MINONES &&
17623 +           ones < RNDTEST_MONOBIT_MAXONES) {
17624 +               if (rndtest_verbose > 1)
17625 +                       rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
17626 +                           RNDTEST_MONOBIT_MINONES, ones,
17627 +                           RNDTEST_MONOBIT_MAXONES);
17628 +               return (0);
17629 +       } else {
17630 +               if (rndtest_verbose)
17631 +                       rndtest_report(rsp, 1,
17632 +                           "monobit failed (%d ones)", ones);
17633 +               rndstats.rst_monobit++;
17634 +               return (-1);
17635 +       }
17636 +}
17637 +
17638 +#define        RNDTEST_RUNS_NINTERVAL  6
17639 +
17640 +static const struct rndtest_runs_tabs {
17641 +       u_int16_t min, max;
17642 +} rndtest_runs_tab[] = {
17643 +       { 2343, 2657 },
17644 +       { 1135, 1365 },
17645 +       { 542, 708 },
17646 +       { 251, 373 },
17647 +       { 111, 201 },
17648 +       { 111, 201 },
17649 +};
17650 +
17651 +static int
17652 +rndtest_runs(struct rndtest_state *rsp)
17653 +{
17654 +       int i, j, ones, zeros, rv = 0;
17655 +       int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
17656 +       u_int8_t c;
17657 +
17658 +       bzero(onei, sizeof(onei));
17659 +       bzero(zeroi, sizeof(zeroi));
17660 +       ones = zeros = 0;
17661 +       for (i = 0; i < RNDTEST_NBYTES; i++) {
17662 +               c = rsp->rs_buf[i];
17663 +               for (j = 0; j < 8; j++, c <<= 1) {
17664 +                       if (c & 0x80) {
17665 +                               ones++;
17666 +                               rndtest_runs_record(rsp, zeros, zeroi);
17667 +                               zeros = 0;
17668 +                       } else {
17669 +                               zeros++;
17670 +                               rndtest_runs_record(rsp, ones, onei);
17671 +                               ones = 0;
17672 +                       }
17673 +               }
17674 +       }
17675 +       rndtest_runs_record(rsp, ones, onei);
17676 +       rndtest_runs_record(rsp, zeros, zeroi);
17677 +
17678 +       rv |= rndtest_runs_check(rsp, 0, zeroi);
17679 +       rv |= rndtest_runs_check(rsp, 1, onei);
17680 +
17681 +       if (rv)
17682 +               rndstats.rst_runs++;
17683 +
17684 +       return (rv);
17685 +}
17686 +
17687 +static void
17688 +rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
17689 +{
17690 +       if (len == 0)
17691 +               return;
17692 +       if (len > RNDTEST_RUNS_NINTERVAL)
17693 +               len = RNDTEST_RUNS_NINTERVAL;
17694 +       len -= 1;
17695 +       intrv[len]++;
17696 +}
17697 +
17698 +static int
17699 +rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
17700 +{
17701 +       int i, rv = 0;
17702 +
17703 +       for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
17704 +               if (src[i] < rndtest_runs_tab[i].min ||
17705 +                   src[i] > rndtest_runs_tab[i].max) {
17706 +                       rndtest_report(rsp, 1,
17707 +                           "%s interval %d failed (%d, %d-%d)",
17708 +                           val ? "ones" : "zeros",
17709 +                           i + 1, src[i], rndtest_runs_tab[i].min,
17710 +                           rndtest_runs_tab[i].max);
17711 +                       rv = -1;
17712 +               } else {
17713 +                       rndtest_report(rsp, 0,
17714 +                           "runs pass %s interval %d (%d < %d < %d)",
17715 +                           val ? "ones" : "zeros",
17716 +                           i + 1, rndtest_runs_tab[i].min, src[i],
17717 +                           rndtest_runs_tab[i].max);
17718 +               }
17719 +       }
17720 +       return (rv);
17721 +}
17722 +
17723 +static int
17724 +rndtest_longruns(struct rndtest_state *rsp)
17725 +{
17726 +       int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
17727 +       u_int8_t c;
17728 +
17729 +       for (i = 0; i < RNDTEST_NBYTES; i++) {
17730 +               c = rsp->rs_buf[i];
17731 +               for (j = 0; j < 8; j++, c <<= 1) {
17732 +                       if (c & 0x80) {
17733 +                               zeros = 0;
17734 +                               ones++;
17735 +                               if (ones > maxones)
17736 +                                       maxones = ones;
17737 +                       } else {
17738 +                               ones = 0;
17739 +                               zeros++;
17740 +                               if (zeros > maxzeros)
17741 +                                       maxzeros = zeros;
17742 +                       }
17743 +               }
17744 +       }
17745 +
17746 +       if (maxones < 26 && maxzeros < 26) {
17747 +               rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
17748 +                       maxones, maxzeros);
17749 +               return (0);
17750 +       } else {
17751 +               rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
17752 +                       maxones, maxzeros);
17753 +               rndstats.rst_longruns++;
17754 +               return (-1);
17755 +       }
17756 +}
17757 +
17758 +/*
17759 + * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
17760 + * but it is really the chi^2 test over 4 bits (the poker test as described
17761 + * by Knuth vol 2 is something different, and I take him as authoritative
17762 + * on nomenclature over NIST).
17763 + */
17764 +#define        RNDTEST_CHI4_K  16
17765 +#define        RNDTEST_CHI4_K_MASK     (RNDTEST_CHI4_K - 1)
17766 +
17767 +/*
17768 + * The unnormalized values are used so that we don't have to worry about
17769 + * fractional precision.  The "real" value is found by:
17770 + *     (V - 1562500) * (16 / 5000) = Vn   (where V is the unnormalized value)
17771 + */
17772 +#define        RNDTEST_CHI4_VMIN       1563181         /* 2.1792 */
17773 +#define        RNDTEST_CHI4_VMAX       1576929         /* 46.1728 */
17774 +
17775 +static int
17776 +rndtest_chi_4(struct rndtest_state *rsp)
17777 +{
17778 +       unsigned int freq[RNDTEST_CHI4_K], i, sum;
17779 +
17780 +       for (i = 0; i < RNDTEST_CHI4_K; i++)
17781 +               freq[i] = 0;
17782 +
17783 +       /* Get number of occurances of each 4 bit pattern */
17784 +       for (i = 0; i < RNDTEST_NBYTES; i++) {
17785 +               freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
17786 +               freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
17787 +       }
17788 +
17789 +       for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
17790 +               sum += freq[i] * freq[i];
17791 +
17792 +       if (sum >= 1563181 && sum <= 1576929) {
17793 +               rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
17794 +               return (0);
17795 +       } else {
17796 +               rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
17797 +               rndstats.rst_chi++;
17798 +               return (-1);
17799 +       }
17800 +}
17801 +
17802 +int
17803 +rndtest_buf(unsigned char *buf)
17804 +{
17805 +       struct rndtest_state rsp;
17806 +
17807 +       memset(&rsp, 0, sizeof(rsp));
17808 +       rsp.rs_buf = buf;
17809 +       rndtest_test(&rsp);
17810 +       return(rsp.rs_discard);
17811 +}
17812 +
17813 --- /dev/null
17814 +++ b/crypto/ocf/rndtest.h
17815 @@ -0,0 +1,54 @@
17816 +/*     $FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $     */
17817 +/*     $OpenBSD$       */
17818 +
17819 +/*
17820 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17821 + * All rights reserved.
17822 + *
17823 + * Redistribution and use in source and binary forms, with or without
17824 + * modification, are permitted provided that the following conditions
17825 + * are met:
17826 + * 1. Redistributions of source code must retain the above copyright
17827 + *    notice, this list of conditions and the following disclaimer.
17828 + * 2. Redistributions in binary form must reproduce the above copyright
17829 + *    notice, this list of conditions and the following disclaimer in the
17830 + *    documentation and/or other materials provided with the distribution.
17831 + * 3. All advertising materials mentioning features or use of this software
17832 + *    must display the following acknowledgement:
17833 + *     This product includes software developed by Jason L. Wright
17834 + * 4. The name of the author may not be used to endorse or promote products
17835 + *    derived from this software without specific prior written permission.
17836 + *
17837 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17838 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17839 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17840 + * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17841 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17842 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17843 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17844 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17845 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17846 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17847 + * POSSIBILITY OF SUCH DAMAGE.
17848 + */
17849 +
17850 +
17851 +/* Some of the tests depend on these values */
17852 +#define        RNDTEST_NBYTES  2500
17853 +#define        RNDTEST_NBITS   (8 * RNDTEST_NBYTES)
17854 +
17855 +struct rndtest_state {
17856 +       int             rs_discard;     /* discard/accept random data */
17857 +       u_int8_t        *rs_buf;
17858 +};
17859 +
17860 +struct rndtest_stats {
17861 +       u_int32_t       rst_discard;    /* number of bytes discarded */
17862 +       u_int32_t       rst_tests;      /* number of test runs */
17863 +       u_int32_t       rst_monobit;    /* monobit test failures */
17864 +       u_int32_t       rst_runs;       /* 0/1 runs failures */
17865 +       u_int32_t       rst_longruns;   /* longruns failures */
17866 +       u_int32_t       rst_chi;        /* chi^2 failures */
17867 +};
17868 +
17869 +extern int rndtest_buf(unsigned char *buf);
17870 --- /dev/null
17871 +++ b/crypto/ocf/ocf-compat.h
17872 @@ -0,0 +1,270 @@
17873 +#ifndef _BSD_COMPAT_H_
17874 +#define _BSD_COMPAT_H_ 1
17875 +/****************************************************************************/
17876 +/*
17877 + * Provide compat routines for older linux kernels and BSD kernels
17878 + *
17879 + * Written by David McCullough <david_mccullough@securecomputing.com>
17880 + * Copyright (C) 2007 David McCullough <david_mccullough@securecomputing.com>
17881 + *
17882 + * LICENSE TERMS
17883 + *
17884 + * The free distribution and use of this software in both source and binary
17885 + * form is allowed (with or without changes) provided that:
17886 + *
17887 + *   1. distributions of this source code include the above copyright
17888 + *      notice, this list of conditions and the following disclaimer;
17889 + *
17890 + *   2. distributions in binary form include the above copyright
17891 + *      notice, this list of conditions and the following disclaimer
17892 + *      in the documentation and/or other associated materials;
17893 + *
17894 + *   3. the copyright holder's name is not used to endorse products
17895 + *      built using this software without specific written permission.
17896 + *
17897 + * ALTERNATIVELY, provided that this notice is retained in full, this file
17898 + * may be distributed under the terms of the GNU General Public License (GPL),
17899 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
17900 + *
17901 + * DISCLAIMER
17902 + *
17903 + * This software is provided 'as is' with no explicit or implied warranties
17904 + * in respect of its properties, including, but not limited to, correctness
17905 + * and/or fitness for purpose.
17906 + */
17907 +/****************************************************************************/
17908 +#ifdef __KERNEL__
17909 +/*
17910 + * fake some BSD driver interface stuff specifically for OCF use
17911 + */
17912 +
17913 +typedef struct ocf_device *device_t;
17914 +
17915 +typedef struct {
17916 +       int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
17917 +       int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
17918 +       int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
17919 +       int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
17920 +} device_method_t;
17921 +#define DEVMETHOD(id, func)    id: func
17922 +
17923 +struct ocf_device {
17924 +       char name[32];          /* the driver name */
17925 +       char nameunit[32];      /* the driver name + HW instance */
17926 +       int  unit;
17927 +       device_method_t methods;
17928 +       void *softc;
17929 +};
17930 +
17931 +#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
17932 +       ((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
17933 +#define CRYPTODEV_FREESESSION(dev, sid) \
17934 +       ((*(dev)->methods.cryptodev_freesession)(dev, sid))
17935 +#define CRYPTODEV_PROCESS(dev, crp, hint) \
17936 +       ((*(dev)->methods.cryptodev_process)(dev, crp, hint))
17937 +#define CRYPTODEV_KPROCESS(dev, krp, hint) \
17938 +       ((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
17939 +
17940 +#define device_get_name(dev)   ((dev)->name)
17941 +#define device_get_nameunit(dev)       ((dev)->nameunit)
17942 +#define device_get_unit(dev)   ((dev)->unit)
17943 +#define device_get_softc(dev)  ((dev)->softc)
17944 +
17945 +#define        softc_device_decl \
17946 +               struct ocf_device _device; \
17947 +               device_t
17948 +
17949 +#define        softc_device_init(_sc, _name, _unit, _methods) \
17950 +       if (1) {\
17951 +       strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
17952 +       snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
17953 +       (_sc)->_device.unit = _unit; \
17954 +       (_sc)->_device.methods = _methods; \
17955 +       (_sc)->_device.softc = (void *) _sc; \
17956 +       *(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
17957 +       } else
17958 +
17959 +#define        softc_get_device(_sc)   (&(_sc)->_device)
17960 +
17961 +/*
17962 + * iomem support for 2.4 and 2.6 kernels
17963 + */
17964 +#include <linux/version.h>
17965 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17966 +#define ocf_iomem_t    unsigned long
17967 +
17968 +/*
17969 + * implement simple workqueue like support for older kernels
17970 + */
17971 +
17972 +#include <linux/tqueue.h>
17973 +
17974 +#define work_struct tq_struct
17975 +
17976 +#define INIT_WORK(wp, fp, ap) \
17977 +       do { \
17978 +               (wp)->sync = 0; \
17979 +               (wp)->routine = (fp); \
17980 +               (wp)->data = (ap); \
17981 +       } while (0)
17982 +
17983 +#define schedule_work(wp) \
17984 +       do { \
17985 +               queue_task((wp), &tq_immediate); \
17986 +               mark_bh(IMMEDIATE_BH); \
17987 +       } while (0)
17988 +
17989 +#define flush_scheduled_work() run_task_queue(&tq_immediate)
17990 +
17991 +#else
17992 +#define ocf_iomem_t    void __iomem *
17993 +
17994 +#include <linux/workqueue.h>
17995 +
17996 +#endif
17997 +
17998 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
17999 +#include <linux/fdtable.h>
18000 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
18001 +#define files_fdtable(files)   (files)
18002 +#endif
18003 +
18004 +#ifdef MODULE_PARM
18005 +#undef module_param    /* just in case */
18006 +#define        module_param(a,b,c)             MODULE_PARM(a,"i")
18007 +#endif
18008 +
18009 +#define bzero(s,l)             memset(s,0,l)
18010 +#define bcopy(s,d,l)   memcpy(d,s,l)
18011 +#define bcmp(x, y, l)  memcmp(x,y,l)
18012 +
18013 +#define MIN(x,y)       ((x) < (y) ? (x) : (y))
18014 +
18015 +#define device_printf(dev, a...) ({ \
18016 +                               printk("%s: ", device_get_nameunit(dev)); printk(a); \
18017 +                       })
18018 +
18019 +#undef printf
18020 +#define printf(fmt...) printk(fmt)
18021 +
18022 +#define KASSERT(c,p)   if (!(c)) { printk p ; } else
18023 +
18024 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
18025 +#define ocf_daemonize(str) \
18026 +       daemonize(); \
18027 +       spin_lock_irq(&current->sigmask_lock); \
18028 +       sigemptyset(&current->blocked); \
18029 +       recalc_sigpending(current); \
18030 +       spin_unlock_irq(&current->sigmask_lock); \
18031 +       sprintf(current->comm, str);
18032 +#else
18033 +#define ocf_daemonize(str) daemonize(str);
18034 +#endif
18035 +
18036 +#define        TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
18037 +#define        TAILQ_EMPTY(q)  list_empty(q)
18038 +#define        TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
18039 +
18040 +#define read_random(p,l) get_random_bytes(p,l)
18041 +
18042 +#define DELAY(x)       ((x) > 2000 ? mdelay((x)/1000) : udelay(x))
18043 +#define strtoul simple_strtoul
18044 +
18045 +#define pci_get_vendor(dev)    ((dev)->vendor)
18046 +#define pci_get_device(dev)    ((dev)->device)
18047 +
18048 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
18049 +#define pci_set_consistent_dma_mask(dev, mask) (0)
18050 +#endif
18051 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
18052 +#define pci_dma_sync_single_for_cpu pci_dma_sync_single
18053 +#endif
18054 +
18055 +#ifndef DMA_32BIT_MASK
18056 +#define DMA_32BIT_MASK  0x00000000ffffffffULL
18057 +#endif
18058 +
18059 +#define htole32(x)     cpu_to_le32(x)
18060 +#define htobe32(x)     cpu_to_be32(x)
18061 +#define htole16(x)     cpu_to_le16(x)
18062 +#define htobe16(x)     cpu_to_be16(x)
18063 +
18064 +/* older kernels don't have these */
18065 +
18066 +#ifndef IRQ_NONE
18067 +#define IRQ_NONE
18068 +#define IRQ_HANDLED
18069 +#define irqreturn_t void
18070 +#endif
18071 +#ifndef IRQF_SHARED
18072 +#define IRQF_SHARED    SA_SHIRQ
18073 +#endif
18074 +
18075 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
18076 +# define strlcpy(dest,src,len) \
18077 +               ({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
18078 +#endif
18079 +
18080 +#ifndef MAX_ERRNO
18081 +#define MAX_ERRNO      4095
18082 +#endif
18083 +#ifndef IS_ERR_VALUE
18084 +#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
18085 +#endif
18086 +
18087 +/*
18088 + * common debug for all
18089 + */
18090 +#if 1
18091 +#define dprintk(a...)  do { if (debug) printk(a); } while(0)
18092 +#else
18093 +#define dprintk(a...)
18094 +#endif
18095 +
18096 +#ifndef SLAB_ATOMIC
18097 +/* Changed in 2.6.20, must use GFP_ATOMIC now */
18098 +#define        SLAB_ATOMIC     GFP_ATOMIC
18099 +#endif
18100 +
18101 +/*
18102 + * need some additional support for older kernels */
18103 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
18104 +#define pci_register_driver_compat(driver, rc) \
18105 +       do { \
18106 +               if ((rc) > 0) { \
18107 +                       (rc) = 0; \
18108 +               } else if (rc == 0) { \
18109 +                       (rc) = -ENODEV; \
18110 +               } else { \
18111 +                       pci_unregister_driver(driver); \
18112 +               } \
18113 +       } while (0)
18114 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
18115 +#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
18116 +#else
18117 +#define pci_register_driver_compat(driver,rc)
18118 +#endif
18119 +
18120 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
18121 +
18122 +#include <asm/scatterlist.h>
18123 +
18124 +static inline void sg_set_page(struct scatterlist *sg,  struct page *page,
18125 +                              unsigned int len, unsigned int offset)
18126 +{
18127 +       sg->page = page;
18128 +       sg->offset = offset;
18129 +       sg->length = len;
18130 +}
18131 +
18132 +static inline void *sg_virt(struct scatterlist *sg)
18133 +{
18134 +       return page_address(sg->page) + sg->offset;
18135 +}
18136 +
18137 +#endif
18138 +
18139 +#endif /* __KERNEL__ */
18140 +
18141 +/****************************************************************************/
18142 +#endif /* _BSD_COMPAT_H_ */
18143 --- /dev/null
18144 +++ b/crypto/ocf/ep80579/icp_asym.c
18145 @@ -0,0 +1,1375 @@
18146 +/***************************************************************************
18147 + *
18148 + * This file is provided under a dual BSD/GPLv2 license.  When using or 
18149 + *   redistributing this file, you may do so under either license.
18150 + * 
18151 + *   GPL LICENSE SUMMARY
18152 + * 
18153 + *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
18154 + * 
18155 + *   This program is free software; you can redistribute it and/or modify 
18156 + *   it under the terms of version 2 of the GNU General Public License as
18157 + *   published by the Free Software Foundation.
18158 + * 
18159 + *   This program is distributed in the hope that it will be useful, but 
18160 + *   WITHOUT ANY WARRANTY; without even the implied warranty of 
18161 + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
18162 + *   General Public License for more details.
18163 + * 
18164 + *   You should have received a copy of the GNU General Public License 
18165 + *   along with this program; if not, write to the Free Software 
18166 + *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18167 + *   The full GNU General Public License is included in this distribution 
18168 + *   in the file called LICENSE.GPL.
18169 + * 
18170 + *   Contact Information:
18171 + *   Intel Corporation
18172 + * 
18173 + *   BSD LICENSE 
18174 + * 
18175 + *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
18176 + *   All rights reserved.
18177 + * 
18178 + *   Redistribution and use in source and binary forms, with or without 
18179 + *   modification, are permitted provided that the following conditions 
18180 + *   are met:
18181 + * 
18182 + *     * Redistributions of source code must retain the above copyright 
18183 + *       notice, this list of conditions and the following disclaimer.
18184 + *     * Redistributions in binary form must reproduce the above copyright 
18185 + *       notice, this list of conditions and the following disclaimer in 
18186 + *       the documentation and/or other materials provided with the 
18187 + *       distribution.
18188 + *     * Neither the name of Intel Corporation nor the names of its 
18189 + *       contributors may be used to endorse or promote products derived 
18190 + *       from this software without specific prior written permission.
18191 + * 
18192 + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
18193 + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
18194 + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
18195 + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
18196 + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
18197 + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
18198 + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
18199 + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
18200 + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
18201 + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
18202 + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
18203 + * 
18204 + * 
18205 + *  version: Security.L.1.0.130
18206 + *
18207 + ***************************************************************************/
18208 +
18209 +#include "icp_ocf.h"
18210 +
18211 +/*The following define values (containing the word 'INDEX') are used to find
18212 +the index of each input buffer of the crypto_kop struct (see OCF cryptodev.h).
18213 +These values were found through analysis of the OCF OpenSSL patch. If the
18214 +calling program uses different input buffer positions, these defines will have
18215 +to be changed.*/
18216 +
18217 +/*DIFFIE HELLMAN buffer index values*/
18218 +#define ICP_DH_KRP_PARAM_PRIME_INDEX                           (0)
18219 +#define ICP_DH_KRP_PARAM_BASE_INDEX                            (1)
18220 +#define ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX                   (2)
18221 +#define ICP_DH_KRP_PARAM_RESULT_INDEX                          (3)
18222 +
18223 +/*MOD EXP buffer index values*/
18224 +#define ICP_MOD_EXP_KRP_PARAM_BASE_INDEX                       (0)
18225 +#define ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX                   (1)
18226 +#define ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX                    (2)
18227 +#define ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX                     (3)
18228 +
18229 +#define SINGLE_BYTE_VALUE                                      (4)
18230 +
18231 +/*MOD EXP CRT buffer index values*/
18232 +#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX                        (0)
18233 +#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX                        (1)
18234 +#define ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX                      (2)
18235 +#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX            (3)
18236 +#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX            (4)
18237 +#define ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX             (5)
18238 +#define ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX                 (6)
18239 +
18240 +/*DSA sign buffer index values*/
18241 +#define ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX                      (0)
18242 +#define ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX                   (1)
18243 +#define ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX                   (2)
18244 +#define ICP_DSA_SIGN_KRP_PARAM_G_INDEX                         (3)
18245 +#define ICP_DSA_SIGN_KRP_PARAM_X_INDEX                         (4)
18246 +#define ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX                  (5)
18247 +#define ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX                  (6)
18248 +
18249 +/*DSA verify buffer index values*/
18250 +#define ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX                    (0)
18251 +#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX                 (1)
18252 +#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX                 (2)
18253 +#define ICP_DSA_VERIFY_KRP_PARAM_G_INDEX                       (3)
18254 +#define ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX                  (4)
18255 +#define ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX                   (5)
18256 +#define ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX                   (6)
18257 +
18258 +/*DSA sign prime Q vs random number K size check values*/
18259 +#define DONT_RUN_LESS_THAN_CHECK                               (0)
18260 +#define FAIL_A_IS_GREATER_THAN_B                               (1)
18261 +#define FAIL_A_IS_EQUAL_TO_B                                   (1)
18262 +#define SUCCESS_A_IS_LESS_THAN_B                               (0)
18263 +#define DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS             (500)
18264 +
18265 +/* We need to set a cryptokp success value just in case it is set or allocated
18266 +   and not set to zero outside of this module */
18267 +#define CRYPTO_OP_SUCCESS                                      (0)
18268 +
18269 +static int icp_ocfDrvDHComputeKey(struct cryptkop *krp);
18270 +
18271 +static int icp_ocfDrvModExp(struct cryptkop *krp);
18272 +
18273 +static int icp_ocfDrvModExpCRT(struct cryptkop *krp);
18274 +
18275 +static int
18276 +icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck);
18277 +
18278 +static int icp_ocfDrvDsaSign(struct cryptkop *krp);
18279 +
18280 +static int icp_ocfDrvDsaVerify(struct cryptkop *krp);
18281 +
18282 +static void
18283 +icp_ocfDrvDhP1CallBack(void *callbackTag,
18284 +                      CpaStatus status,
18285 +                      void *pOpData, CpaFlatBuffer * pLocalOctetStringPV);
18286 +
18287 +static void
18288 +icp_ocfDrvModExpCallBack(void *callbackTag,
18289 +                        CpaStatus status,
18290 +                        void *pOpData, CpaFlatBuffer * pResult);
18291 +
18292 +static void
18293 +icp_ocfDrvModExpCRTCallBack(void *callbackTag,
18294 +                           CpaStatus status,
18295 +                           void *pOpData, CpaFlatBuffer * pOutputData);
18296 +
18297 +static void
18298 +icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
18299 +                           CpaStatus status,
18300 +                           void *pOpData, CpaBoolean verifyStatus);
18301 +
18302 +static void
18303 +icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
18304 +                           CpaStatus status,
18305 +                           void *pOpData,
18306 +                           CpaBoolean protocolStatus,
18307 +                           CpaFlatBuffer * pR, CpaFlatBuffer * pS);
18308 +
18309 +/* Name        : icp_ocfDrvPkeProcess
18310 + *
18311 + * Description : This function will choose which PKE process to follow
18312 + * based on the input arguments
18313 + */
18314 +int icp_ocfDrvPkeProcess(device_t dev, struct cryptkop *krp, int hint)
18315 +{
18316 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18317 +
18318 +       if (NULL == krp) {
18319 +               DPRINTK("%s(): Invalid input parameters, cryptkop = %p\n",
18320 +                       __FUNCTION__, krp);
18321 +               return EINVAL;
18322 +       }
18323 +
18324 +       if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
18325 +               krp->krp_status = ECANCELED;
18326 +               return ECANCELED;
18327 +       }
18328 +
18329 +       switch (krp->krp_op) {
18330 +       case CRK_DH_COMPUTE_KEY:
18331 +               DPRINTK("%s() doing DH_COMPUTE_KEY\n", __FUNCTION__);
18332 +               lacStatus = icp_ocfDrvDHComputeKey(krp);
18333 +               if (CPA_STATUS_SUCCESS != lacStatus) {
18334 +                       EPRINTK("%s(): icp_ocfDrvDHComputeKey failed "
18335 +                               "(%d).\n", __FUNCTION__, lacStatus);
18336 +                       krp->krp_status = ECANCELED;
18337 +                       return ECANCELED;
18338 +               }
18339 +
18340 +               break;
18341 +
18342 +       case CRK_MOD_EXP:
18343 +               DPRINTK("%s() doing MOD_EXP \n", __FUNCTION__);
18344 +               lacStatus = icp_ocfDrvModExp(krp);
18345 +               if (CPA_STATUS_SUCCESS != lacStatus) {
18346 +                       EPRINTK("%s(): icp_ocfDrvModExp failed (%d).\n",
18347 +                               __FUNCTION__, lacStatus);
18348 +                       krp->krp_status = ECANCELED;
18349 +                       return ECANCELED;
18350 +               }
18351 +
18352 +               break;
18353 +
18354 +       case CRK_MOD_EXP_CRT:
18355 +               DPRINTK("%s() doing MOD_EXP_CRT \n", __FUNCTION__);
18356 +               lacStatus = icp_ocfDrvModExpCRT(krp);
18357 +               if (CPA_STATUS_SUCCESS != lacStatus) {
18358 +                       EPRINTK("%s(): icp_ocfDrvModExpCRT "
18359 +                               "failed (%d).\n", __FUNCTION__, lacStatus);
18360 +                       krp->krp_status = ECANCELED;
18361 +                       return ECANCELED;
18362 +               }
18363 +
18364 +               break;
18365 +
18366 +       case CRK_DSA_SIGN:
18367 +               DPRINTK("%s() doing DSA_SIGN \n", __FUNCTION__);
18368 +               lacStatus = icp_ocfDrvDsaSign(krp);
18369 +               if (CPA_STATUS_SUCCESS != lacStatus) {
18370 +                       EPRINTK("%s(): icp_ocfDrvDsaSign "
18371 +                               "failed (%d).\n", __FUNCTION__, lacStatus);
18372 +                       krp->krp_status = ECANCELED;
18373 +                       return ECANCELED;
18374 +               }
18375 +
18376 +               break;
18377 +
18378 +       case CRK_DSA_VERIFY:
18379 +               DPRINTK("%s() doing DSA_VERIFY \n", __FUNCTION__);
18380 +               lacStatus = icp_ocfDrvDsaVerify(krp);
18381 +               if (CPA_STATUS_SUCCESS != lacStatus) {
18382 +                       EPRINTK("%s(): icp_ocfDrvDsaVerify "
18383 +                               "failed (%d).\n", __FUNCTION__, lacStatus);
18384 +                       krp->krp_status = ECANCELED;
18385 +                       return ECANCELED;
18386 +               }
18387 +
18388 +               break;
18389 +
18390 +       default:
18391 +               EPRINTK("%s(): Asymettric function not "
18392 +                       "supported (%d).\n", __FUNCTION__, krp->krp_op);
18393 +               krp->krp_status = EOPNOTSUPP;
18394 +               return EOPNOTSUPP;
18395 +       }
18396 +
18397 +       return ICP_OCF_DRV_STATUS_SUCCESS;
18398 +}
18399 +
18400 +/* Name        : icp_ocfDrvSwapBytes
18401 + *
18402 + * Description : This function is used to swap the byte order of a buffer.
18403 + * It has been seen that in general we are passed little endian byte order
18404 + * buffers, but LAC only accepts big endian byte order buffers.
18405 + */
18406 +static void inline
18407 +icp_ocfDrvSwapBytes(u_int8_t * num, u_int32_t buff_len_bytes)
18408 +{
18409 +
18410 +       int i;
18411 +       u_int8_t *end_ptr;
18412 +       u_int8_t hold_val;
18413 +
18414 +       end_ptr = num + (buff_len_bytes - 1);
18415 +       buff_len_bytes = buff_len_bytes >> 1;
18416 +       for (i = 0; i < buff_len_bytes; i++) {
18417 +               hold_val = *num;
18418 +               *num = *end_ptr;
18419 +               num++;
18420 +               *end_ptr = hold_val;
18421 +               end_ptr--;
18422 +       }
18423 +}
18424 +
18425 +/* Name        : icp_ocfDrvDHComputeKey
18426 + *
18427 + * Description : This function will map Diffie Hellman calls from OCF
18428 + * to the LAC API. OCF uses this function for Diffie Hellman Phase1 and
18429 + * Phase2. LAC has a separate Diffie Hellman Phase2 call, however both phases
18430 + * break down to a modular exponentiation.
18431 + */
18432 +static int icp_ocfDrvDHComputeKey(struct cryptkop *krp)
18433 +{
18434 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18435 +       void *callbackTag = NULL;
18436 +       CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
18437 +       CpaFlatBuffer *pLocalOctetStringPV = NULL;
18438 +       uint32_t dh_prime_len_bytes = 0, dh_prime_len_bits = 0;
18439 +
18440 +       /* Input checks - check prime is a multiple of 8 bits to allow for
18441 +          allocation later */
18442 +       dh_prime_len_bits =
18443 +           (krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_nbits);
18444 +
18445 +       /* LAC can reject prime lengths based on prime key sizes, we just
18446 +          need to make sure we can allocate space for the base and
18447 +          exponent buffers correctly */
18448 +       if ((dh_prime_len_bits % NUM_BITS_IN_BYTE) != 0) {
18449 +               APRINTK("%s(): Warning Prime number buffer size is not a "
18450 +                       "multiple of 8 bits\n", __FUNCTION__);
18451 +       }
18452 +
18453 +       /* Result storage space should be the same size as the prime as this
18454 +          value can take up the same amount of storage space */
18455 +       if (dh_prime_len_bits !=
18456 +           krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits) {
18457 +               DPRINTK("%s(): Return Buffer must be the same size "
18458 +                       "as the Prime buffer\n", __FUNCTION__);
18459 +               krp->krp_status = EINVAL;
18460 +               return EINVAL;
18461 +       }
18462 +       /* Switch to size in bytes */
18463 +       BITS_TO_BYTES(dh_prime_len_bytes, dh_prime_len_bits);
18464 +
18465 +       callbackTag = krp;
18466 +
18467 +       pPhase1OpData = kmem_cache_zalloc(drvDH_zone, GFP_KERNEL);
18468 +       if (NULL == pPhase1OpData) {
18469 +               APRINTK("%s():Failed to get memory for key gen data\n",
18470 +                       __FUNCTION__);
18471 +               krp->krp_status = ENOMEM;
18472 +               return ENOMEM;
18473 +       }
18474 +
18475 +       pLocalOctetStringPV = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18476 +       if (NULL == pLocalOctetStringPV) {
18477 +               APRINTK("%s():Failed to get memory for pLocalOctetStringPV\n",
18478 +                       __FUNCTION__);
18479 +               kmem_cache_free(drvDH_zone, pPhase1OpData);
18480 +               krp->krp_status = ENOMEM;
18481 +               return ENOMEM;
18482 +       }
18483 +
18484 +       /* Link parameters */
18485 +       pPhase1OpData->primeP.pData =
18486 +           krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_p;
18487 +
18488 +       pPhase1OpData->primeP.dataLenInBytes = dh_prime_len_bytes;
18489 +
18490 +       icp_ocfDrvSwapBytes(pPhase1OpData->primeP.pData, dh_prime_len_bytes);
18491 +
18492 +       pPhase1OpData->baseG.pData =
18493 +           krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_p;
18494 +
18495 +       BITS_TO_BYTES(pPhase1OpData->baseG.dataLenInBytes,
18496 +                     krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_nbits);
18497 +
18498 +       icp_ocfDrvSwapBytes(pPhase1OpData->baseG.pData,
18499 +                           pPhase1OpData->baseG.dataLenInBytes);
18500 +
18501 +       pPhase1OpData->privateValueX.pData =
18502 +           krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].crp_p;
18503 +
18504 +       BITS_TO_BYTES(pPhase1OpData->privateValueX.dataLenInBytes,
18505 +                     krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].
18506 +                     crp_nbits);
18507 +
18508 +       icp_ocfDrvSwapBytes(pPhase1OpData->privateValueX.pData,
18509 +                           pPhase1OpData->privateValueX.dataLenInBytes);
18510 +
18511 +       /* Output parameters */
18512 +       pLocalOctetStringPV->pData =
18513 +           krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_p;
18514 +
18515 +       BITS_TO_BYTES(pLocalOctetStringPV->dataLenInBytes,
18516 +                     krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits);
18517 +
18518 +       lacStatus = cpaCyDhKeyGenPhase1(CPA_INSTANCE_HANDLE_SINGLE,
18519 +                                       icp_ocfDrvDhP1CallBack,
18520 +                                       callbackTag, pPhase1OpData,
18521 +                                       pLocalOctetStringPV);
18522 +
18523 +       if (CPA_STATUS_SUCCESS != lacStatus) {
18524 +               EPRINTK("%s(): DH Phase 1 Key Gen failed (%d).\n",
18525 +                       __FUNCTION__, lacStatus);
18526 +               icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
18527 +               kmem_cache_free(drvDH_zone, pPhase1OpData);
18528 +       }
18529 +
18530 +       return lacStatus;
18531 +}
18532 +
18533 +/* Name        : icp_ocfDrvModExp
18534 + *
18535 + * Description : This function will map ordinary Modular Exponentiation calls
18536 + * from OCF to the LAC API.
18537 + *
18538 + */
18539 +static int icp_ocfDrvModExp(struct cryptkop *krp)
18540 +{
18541 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18542 +       void *callbackTag = NULL;
18543 +       CpaCyLnModExpOpData *pModExpOpData = NULL;
18544 +       CpaFlatBuffer *pResult = NULL;
18545 +
18546 +       if ((krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits %
18547 +            NUM_BITS_IN_BYTE) != 0) {
18548 +               DPRINTK("%s(): Warning - modulus buffer size (%d) is not a "
18549 +                       "multiple of 8 bits\n", __FUNCTION__,
18550 +                       krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
18551 +                       crp_nbits);
18552 +       }
18553 +
18554 +       /* Result storage space should be the same size as the prime as this
18555 +          value can take up the same amount of storage space */
18556 +       if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits >
18557 +           krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_nbits) {
18558 +               APRINTK("%s(): Return Buffer size must be the same or"
18559 +                       " greater than the Modulus buffer\n", __FUNCTION__);
18560 +               krp->krp_status = EINVAL;
18561 +               return EINVAL;
18562 +       }
18563 +
18564 +       callbackTag = krp;
18565 +
18566 +       pModExpOpData = kmem_cache_zalloc(drvLnModExp_zone, GFP_KERNEL);
18567 +       if (NULL == pModExpOpData) {
18568 +               APRINTK("%s():Failed to get memory for key gen data\n",
18569 +                       __FUNCTION__);
18570 +               krp->krp_status = ENOMEM;
18571 +               return ENOMEM;
18572 +       }
18573 +
18574 +       pResult = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18575 +       if (NULL == pResult) {
18576 +               APRINTK("%s():Failed to get memory for ModExp result\n",
18577 +                       __FUNCTION__);
18578 +               kmem_cache_free(drvLnModExp_zone, pModExpOpData);
18579 +               krp->krp_status = ENOMEM;
18580 +               return ENOMEM;
18581 +       }
18582 +
18583 +       /* Link parameters */
18584 +       pModExpOpData->modulus.pData =
18585 +           krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_p;
18586 +       BITS_TO_BYTES(pModExpOpData->modulus.dataLenInBytes,
18587 +                     krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
18588 +                     crp_nbits);
18589 +
18590 +       icp_ocfDrvSwapBytes(pModExpOpData->modulus.pData,
18591 +                           pModExpOpData->modulus.dataLenInBytes);
18592 +
18593 +       /*OCF patch to Openswan Pluto regularly sends the base value as 2
18594 +          bits in size. In this case, it has been found it is better to
18595 +          use the base size memory space as the input buffer (if the number
18596 +          is in bits is less than a byte, the number of bits is the input
18597 +          value) */
18598 +       if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits <
18599 +           NUM_BITS_IN_BYTE) {
18600 +               DPRINTK("%s : base is small (%d)\n", __FUNCTION__, krp->
18601 +                       krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
18602 +               pModExpOpData->base.dataLenInBytes = SINGLE_BYTE_VALUE;
18603 +               pModExpOpData->base.pData =
18604 +                   (uint8_t *) & (krp->
18605 +                                  krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
18606 +                                  crp_nbits);
18607 +               *((uint32_t *) pModExpOpData->base.pData) =
18608 +                   htonl(*((uint32_t *) pModExpOpData->base.pData));
18609 +
18610 +       } else {
18611 +
18612 +               DPRINTK("%s : base is big (%d)\n", __FUNCTION__, krp->
18613 +                       krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
18614 +               pModExpOpData->base.pData =
18615 +                   krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_p;
18616 +               BITS_TO_BYTES(pModExpOpData->base.dataLenInBytes,
18617 +                             krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
18618 +                             crp_nbits);
18619 +               icp_ocfDrvSwapBytes(pModExpOpData->base.pData,
18620 +                                   pModExpOpData->base.dataLenInBytes);
18621 +       }
18622 +
18623 +       pModExpOpData->exponent.pData =
18624 +           krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].crp_p;
18625 +       BITS_TO_BYTES(pModExpOpData->exponent.dataLenInBytes,
18626 +                     krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].
18627 +                     crp_nbits);
18628 +
18629 +       icp_ocfDrvSwapBytes(pModExpOpData->exponent.pData,
18630 +                           pModExpOpData->exponent.dataLenInBytes);
18631 +       /* Output parameters */
18632 +       pResult->pData =
18633 +           krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_p,
18634 +           BITS_TO_BYTES(pResult->dataLenInBytes,
18635 +                         krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].
18636 +                         crp_nbits);
18637 +
18638 +       lacStatus = cpaCyLnModExp(CPA_INSTANCE_HANDLE_SINGLE,
18639 +                                 icp_ocfDrvModExpCallBack,
18640 +                                 callbackTag, pModExpOpData, pResult);
18641 +
18642 +       if (CPA_STATUS_SUCCESS != lacStatus) {
18643 +               EPRINTK("%s(): Mod Exp Operation failed (%d).\n",
18644 +                       __FUNCTION__, lacStatus);
18645 +               krp->krp_status = ECANCELED;
18646 +               icp_ocfDrvFreeFlatBuffer(pResult);
18647 +               kmem_cache_free(drvLnModExp_zone, pModExpOpData);
18648 +       }
18649 +
18650 +       return lacStatus;
18651 +}
18652 +
18653 +/* Name        : icp_ocfDrvModExpCRT
18654 + *
18655 + * Description : This function will map ordinary Modular Exponentiation Chinese
18656 + * Remainder Theorem implementaion calls from OCF to the LAC API.
18657 + *
18658 + * Note : Mod Exp CRT for this driver is accelerated through LAC RSA type 2
18659 + * decrypt operation. Therefore P and Q input values must always be prime
18660 + * numbers. Although basic primality checks are done in LAC, it is up to the
18661 + * user to do any correct prime number checking before passing the inputs.
18662 + */
18663 +
18664 +static int icp_ocfDrvModExpCRT(struct cryptkop *krp)
18665 +{
18666 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18667 +       CpaCyRsaDecryptOpData *rsaDecryptOpData = NULL;
18668 +       void *callbackTag = NULL;
18669 +       CpaFlatBuffer *pOutputData = NULL;
18670 +
18671 +       /*Parameter input checks are all done by LAC, no need to repeat
18672 +          them here. */
18673 +       callbackTag = krp;
18674 +
18675 +       rsaDecryptOpData = kmem_cache_zalloc(drvRSADecrypt_zone, GFP_KERNEL);
18676 +       if (NULL == rsaDecryptOpData) {
18677 +               APRINTK("%s():Failed to get memory"
18678 +                       " for MOD EXP CRT Op data struct\n", __FUNCTION__);
18679 +               krp->krp_status = ENOMEM;
18680 +               return ENOMEM;
18681 +       }
18682 +
18683 +       rsaDecryptOpData->pRecipientPrivateKey
18684 +           = kmem_cache_zalloc(drvRSAPrivateKey_zone, GFP_KERNEL);
18685 +       if (NULL == rsaDecryptOpData->pRecipientPrivateKey) {
18686 +               APRINTK("%s():Failed to get memory for MOD EXP CRT"
18687 +                       " private key values struct\n", __FUNCTION__);
18688 +               kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
18689 +               krp->krp_status = ENOMEM;
18690 +               return ENOMEM;
18691 +       }
18692 +
18693 +       rsaDecryptOpData->pRecipientPrivateKey->
18694 +           version = CPA_CY_RSA_VERSION_TWO_PRIME;
18695 +       rsaDecryptOpData->pRecipientPrivateKey->
18696 +           privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
18697 +
18698 +       pOutputData = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18699 +       if (NULL == pOutputData) {
18700 +               APRINTK("%s():Failed to get memory"
18701 +                       " for MOD EXP CRT output data\n", __FUNCTION__);
18702 +               kmem_cache_free(drvRSAPrivateKey_zone,
18703 +                               rsaDecryptOpData->pRecipientPrivateKey);
18704 +               kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
18705 +               krp->krp_status = ENOMEM;
18706 +               return ENOMEM;
18707 +       }
18708 +
18709 +       rsaDecryptOpData->pRecipientPrivateKey->
18710 +           version = CPA_CY_RSA_VERSION_TWO_PRIME;
18711 +       rsaDecryptOpData->pRecipientPrivateKey->
18712 +           privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
18713 +
18714 +       /* Link parameters */
18715 +       rsaDecryptOpData->inputData.pData =
18716 +           krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].crp_p;
18717 +       BITS_TO_BYTES(rsaDecryptOpData->inputData.dataLenInBytes,
18718 +                     krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].
18719 +                     crp_nbits);
18720 +
18721 +       icp_ocfDrvSwapBytes(rsaDecryptOpData->inputData.pData,
18722 +                           rsaDecryptOpData->inputData.dataLenInBytes);
18723 +
18724 +       rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime1P.pData =
18725 +           krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].crp_p;
18726 +       BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
18727 +                     prime1P.dataLenInBytes,
18728 +                     krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].
18729 +                     crp_nbits);
18730 +
18731 +       icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18732 +                           privateKeyRep2.prime1P.pData,
18733 +                           rsaDecryptOpData->pRecipientPrivateKey->
18734 +                           privateKeyRep2.prime1P.dataLenInBytes);
18735 +
18736 +       rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime2Q.pData =
18737 +           krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].crp_p;
18738 +       BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
18739 +                     prime2Q.dataLenInBytes,
18740 +                     krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].
18741 +                     crp_nbits);
18742 +
18743 +       icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18744 +                           privateKeyRep2.prime2Q.pData,
18745 +                           rsaDecryptOpData->pRecipientPrivateKey->
18746 +                           privateKeyRep2.prime2Q.dataLenInBytes);
18747 +
18748 +       rsaDecryptOpData->pRecipientPrivateKey->
18749 +           privateKeyRep2.exponent1Dp.pData =
18750 +           krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].crp_p;
18751 +       BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
18752 +                     exponent1Dp.dataLenInBytes,
18753 +                     krp->
18754 +                     krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].
18755 +                     crp_nbits);
18756 +
18757 +       icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18758 +                           privateKeyRep2.exponent1Dp.pData,
18759 +                           rsaDecryptOpData->pRecipientPrivateKey->
18760 +                           privateKeyRep2.exponent1Dp.dataLenInBytes);
18761 +
18762 +       rsaDecryptOpData->pRecipientPrivateKey->
18763 +           privateKeyRep2.exponent2Dq.pData =
18764 +           krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].crp_p;
18765 +       BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
18766 +                     privateKeyRep2.exponent2Dq.dataLenInBytes,
18767 +                     krp->
18768 +                     krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].
18769 +                     crp_nbits);
18770 +
18771 +       icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18772 +                           privateKeyRep2.exponent2Dq.pData,
18773 +                           rsaDecryptOpData->pRecipientPrivateKey->
18774 +                           privateKeyRep2.exponent2Dq.dataLenInBytes);
18775 +
18776 +       rsaDecryptOpData->pRecipientPrivateKey->
18777 +           privateKeyRep2.coefficientQInv.pData =
18778 +           krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].crp_p;
18779 +       BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
18780 +                     privateKeyRep2.coefficientQInv.dataLenInBytes,
18781 +                     krp->
18782 +                     krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].
18783 +                     crp_nbits);
18784 +
18785 +       icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
18786 +                           privateKeyRep2.coefficientQInv.pData,
18787 +                           rsaDecryptOpData->pRecipientPrivateKey->
18788 +                           privateKeyRep2.coefficientQInv.dataLenInBytes);
18789 +
18790 +       /* Output Parameter */
18791 +       pOutputData->pData =
18792 +           krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].crp_p;
18793 +       BITS_TO_BYTES(pOutputData->dataLenInBytes,
18794 +                     krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].
18795 +                     crp_nbits);
18796 +
18797 +       lacStatus = cpaCyRsaDecrypt(CPA_INSTANCE_HANDLE_SINGLE,
18798 +                                   icp_ocfDrvModExpCRTCallBack,
18799 +                                   callbackTag, rsaDecryptOpData, pOutputData);
18800 +
18801 +       if (CPA_STATUS_SUCCESS != lacStatus) {
18802 +               EPRINTK("%s(): Mod Exp CRT Operation failed (%d).\n",
18803 +                       __FUNCTION__, lacStatus);
18804 +               krp->krp_status = ECANCELED;
18805 +               icp_ocfDrvFreeFlatBuffer(pOutputData);
18806 +               kmem_cache_free(drvRSAPrivateKey_zone,
18807 +                               rsaDecryptOpData->pRecipientPrivateKey);
18808 +               kmem_cache_free(drvRSADecrypt_zone, rsaDecryptOpData);
18809 +       }
18810 +
18811 +       return lacStatus;
18812 +}
18813 +
18814 +/* Name        : icp_ocfDrvCheckALessThanB
18815 + *
18816 + * Description : This function will check whether the first argument is less
18817 + * than the second. It is used to check whether the DSA RS sign Random K
18818 + * value is less than the Prime Q value (as defined in the specification)
18819 + *
18820 + */
18821 +static int
18822 +icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck)
18823 +{
18824 +
18825 +       uint8_t *MSB_K = pK->pData;
18826 +       uint8_t *MSB_Q = pQ->pData;
18827 +       uint32_t buffer_lengths_in_bytes = pQ->dataLenInBytes;
18828 +
18829 +       if (DONT_RUN_LESS_THAN_CHECK == *doCheck) {
18830 +               return FAIL_A_IS_GREATER_THAN_B;
18831 +       }
18832 +
18833 +/*Check MSBs
18834 +if A == B, check next MSB
18835 +if A > B, return A_IS_GREATER_THAN_B
18836 +if A < B, return A_IS_LESS_THAN_B (success)
18837 +*/
18838 +       while (*MSB_K == *MSB_Q) {
18839 +               MSB_K++;
18840 +               MSB_Q++;
18841 +
18842 +               buffer_lengths_in_bytes--;
18843 +               if (0 == buffer_lengths_in_bytes) {
18844 +                       DPRINTK("%s() Buffers have equal value!!\n",
18845 +                               __FUNCTION__);
18846 +                       return FAIL_A_IS_EQUAL_TO_B;
18847 +               }
18848 +
18849 +       }
18850 +
18851 +       if (*MSB_K < *MSB_Q) {
18852 +               return SUCCESS_A_IS_LESS_THAN_B;
18853 +       } else {
18854 +               return FAIL_A_IS_GREATER_THAN_B;
18855 +       }
18856 +
18857 +}
18858 +
18859 +/* Name        : icp_ocfDrvDsaSign
18860 + *
18861 + * Description : This function will map DSA RS Sign from OCF to the LAC API.
18862 + *
18863 + * NOTE: From looking at OCF patch to OpenSSL and even the number of input
18864 + * parameters, OCF expects us to generate the random seed value. This value
18865 + * is generated and passed to LAC, however the number is discared in the
18866 + * callback and not returned to the user.
18867 + */
18868 +static int icp_ocfDrvDsaSign(struct cryptkop *krp)
18869 +{
18870 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
18871 +       CpaCyDsaRSSignOpData *dsaRsSignOpData = NULL;
18872 +       void *callbackTag = NULL;
18873 +       CpaCyRandGenOpData randGenOpData;
18874 +       int primeQSizeInBytes = 0;
18875 +       int doCheck = 0;
18876 +       CpaFlatBuffer randData;
18877 +       CpaBoolean protocolStatus = CPA_FALSE;
18878 +       CpaFlatBuffer *pR = NULL;
18879 +       CpaFlatBuffer *pS = NULL;
18880 +
18881 +       callbackTag = krp;
18882 +
18883 +       BITS_TO_BYTES(primeQSizeInBytes,
18884 +                     krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
18885 +                     crp_nbits);
18886 +
18887 +       if (DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES != primeQSizeInBytes) {
18888 +               APRINTK("%s(): DSA PRIME Q size not equal to the "
18889 +                       "FIPS defined 20bytes, = %d\n",
18890 +                       __FUNCTION__, primeQSizeInBytes);
18891 +               krp->krp_status = EDOM;
18892 +               return EDOM;
18893 +       }
18894 +
18895 +       dsaRsSignOpData = kmem_cache_zalloc(drvDSARSSign_zone, GFP_KERNEL);
18896 +       if (NULL == dsaRsSignOpData) {
18897 +               APRINTK("%s():Failed to get memory"
18898 +                       " for DSA RS Sign Op data struct\n", __FUNCTION__);
18899 +               krp->krp_status = ENOMEM;
18900 +               return ENOMEM;
18901 +       }
18902 +
18903 +       dsaRsSignOpData->K.pData =
18904 +           kmem_cache_alloc(drvDSARSSignKValue_zone, GFP_ATOMIC);
18905 +
18906 +       if (NULL == dsaRsSignOpData->K.pData) {
18907 +               APRINTK("%s():Failed to get memory"
18908 +                       " for DSA RS Sign Op Random value\n", __FUNCTION__);
18909 +               kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18910 +               krp->krp_status = ENOMEM;
18911 +               return ENOMEM;
18912 +       }
18913 +
18914 +       pR = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18915 +       if (NULL == pR) {
18916 +               APRINTK("%s():Failed to get memory"
18917 +                       " for DSA signature R\n", __FUNCTION__);
18918 +               kmem_cache_free(drvDSARSSignKValue_zone,
18919 +                               dsaRsSignOpData->K.pData);
18920 +               kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18921 +               krp->krp_status = ENOMEM;
18922 +               return ENOMEM;
18923 +       }
18924 +
18925 +       pS = kmem_cache_zalloc(drvFlatBuffer_zone, GFP_KERNEL);
18926 +       if (NULL == pS) {
18927 +               APRINTK("%s():Failed to get memory"
18928 +                       " for DSA signature S\n", __FUNCTION__);
18929 +               icp_ocfDrvFreeFlatBuffer(pR);
18930 +               kmem_cache_free(drvDSARSSignKValue_zone,
18931 +                               dsaRsSignOpData->K.pData);
18932 +               kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18933 +               krp->krp_status = ENOMEM;
18934 +               return ENOMEM;
18935 +       }
18936 +
18937 +       /*link prime number parameter for ease of processing */
18938 +       dsaRsSignOpData->P.pData =
18939 +           krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].crp_p;
18940 +       BITS_TO_BYTES(dsaRsSignOpData->P.dataLenInBytes,
18941 +                     krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].
18942 +                     crp_nbits);
18943 +
18944 +       icp_ocfDrvSwapBytes(dsaRsSignOpData->P.pData,
18945 +                           dsaRsSignOpData->P.dataLenInBytes);
18946 +
18947 +       dsaRsSignOpData->Q.pData =
18948 +           krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].crp_p;
18949 +       BITS_TO_BYTES(dsaRsSignOpData->Q.dataLenInBytes,
18950 +                     krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
18951 +                     crp_nbits);
18952 +
18953 +       icp_ocfDrvSwapBytes(dsaRsSignOpData->Q.pData,
18954 +                           dsaRsSignOpData->Q.dataLenInBytes);
18955 +
18956 +       /*generate random number with equal buffer size to Prime value Q,
18957 +          but value less than Q */
18958 +       dsaRsSignOpData->K.dataLenInBytes = dsaRsSignOpData->Q.dataLenInBytes;
18959 +
18960 +       randGenOpData.generateBits = CPA_TRUE;
18961 +       randGenOpData.lenInBytes = dsaRsSignOpData->K.dataLenInBytes;
18962 +
18963 +       icp_ocfDrvPtrAndLenToFlatBuffer(dsaRsSignOpData->K.pData,
18964 +                                       dsaRsSignOpData->K.dataLenInBytes,
18965 +                                       &randData);
18966 +
18967 +       doCheck = 0;
18968 +       while (icp_ocfDrvCheckALessThanB(&(dsaRsSignOpData->K),
18969 +                                        &(dsaRsSignOpData->Q), &doCheck)) {
18970 +
18971 +               if (CPA_STATUS_SUCCESS
18972 +                   != cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
18973 +                                   NULL, NULL, &randGenOpData, &randData)) {
18974 +                       APRINTK("%s(): ERROR - Failed to generate DSA RS Sign K"
18975 +                               "value\n", __FUNCTION__);
18976 +                       icp_ocfDrvFreeFlatBuffer(pS);
18977 +                       icp_ocfDrvFreeFlatBuffer(pR);
18978 +                       kmem_cache_free(drvDSARSSignKValue_zone,
18979 +                                       dsaRsSignOpData->K.pData);
18980 +                       kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18981 +                       krp->krp_status = EAGAIN;
18982 +                       return EAGAIN;
18983 +               }
18984 +
18985 +               doCheck++;
18986 +               if (DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS == doCheck) {
18987 +                       APRINTK("%s(): ERROR - Failed to find DSA RS Sign K "
18988 +                               "value less than Q value\n", __FUNCTION__);
18989 +                       icp_ocfDrvFreeFlatBuffer(pS);
18990 +                       icp_ocfDrvFreeFlatBuffer(pR);
18991 +                       kmem_cache_free(drvDSARSSignKValue_zone,
18992 +                                       dsaRsSignOpData->K.pData);
18993 +                       kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
18994 +                       krp->krp_status = EAGAIN;
18995 +                       return EAGAIN;
18996 +               }
18997 +
18998 +       }
18999 +       /*Rand Data - no need to swap bytes for pK */
19000 +
19001 +       /* Link parameters */
19002 +       dsaRsSignOpData->G.pData =
19003 +           krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_p;
19004 +       BITS_TO_BYTES(dsaRsSignOpData->G.dataLenInBytes,
19005 +                     krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_nbits);
19006 +
19007 +       icp_ocfDrvSwapBytes(dsaRsSignOpData->G.pData,
19008 +                           dsaRsSignOpData->G.dataLenInBytes);
19009 +
19010 +       dsaRsSignOpData->X.pData =
19011 +           krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_p;
19012 +       BITS_TO_BYTES(dsaRsSignOpData->X.dataLenInBytes,
19013 +                     krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_nbits);
19014 +       icp_ocfDrvSwapBytes(dsaRsSignOpData->X.pData,
19015 +                           dsaRsSignOpData->X.dataLenInBytes);
19016 +
19017 +       dsaRsSignOpData->M.pData =
19018 +           krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].crp_p;
19019 +       BITS_TO_BYTES(dsaRsSignOpData->M.dataLenInBytes,
19020 +                     krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].
19021 +                     crp_nbits);
19022 +       icp_ocfDrvSwapBytes(dsaRsSignOpData->M.pData,
19023 +                           dsaRsSignOpData->M.dataLenInBytes);
19024 +
19025 +       /* Output Parameters */
19026 +       pS->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].crp_p;
19027 +       BITS_TO_BYTES(pS->dataLenInBytes,
19028 +                     krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].
19029 +                     crp_nbits);
19030 +
19031 +       pR->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].crp_p;
19032 +       BITS_TO_BYTES(pR->dataLenInBytes,
19033 +                     krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].
19034 +                     crp_nbits);
19035 +
19036 +       lacStatus = cpaCyDsaSignRS(CPA_INSTANCE_HANDLE_SINGLE,
19037 +                                  icp_ocfDrvDsaRSSignCallBack,
19038 +                                  callbackTag, dsaRsSignOpData,
19039 +                                  &protocolStatus, pR, pS);
19040 +
19041 +       if (CPA_STATUS_SUCCESS != lacStatus) {
19042 +               EPRINTK("%s(): DSA RS Sign Operation failed (%d).\n",
19043 +                       __FUNCTION__, lacStatus);
19044 +               krp->krp_status = ECANCELED;
19045 +               icp_ocfDrvFreeFlatBuffer(pS);
19046 +               icp_ocfDrvFreeFlatBuffer(pR);
19047 +               kmem_cache_free(drvDSARSSignKValue_zone,
19048 +                               dsaRsSignOpData->K.pData);
19049 +               kmem_cache_free(drvDSARSSign_zone, dsaRsSignOpData);
19050 +       }
19051 +
19052 +       return lacStatus;
19053 +}
19054 +
19055 +/* Name        : icp_ocfDrvDsaVerify
19056 + *
19057 + * Description : This function will map DSA RS Verify from OCF to the LAC API.
19058 + *
19059 + */
19060 +static int icp_ocfDrvDsaVerify(struct cryptkop *krp)
19061 +{
19062 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
19063 +       CpaCyDsaVerifyOpData *dsaVerifyOpData = NULL;
19064 +       void *callbackTag = NULL;
19065 +       CpaBoolean verifyStatus = CPA_FALSE;
19066 +
19067 +       callbackTag = krp;
19068 +
19069 +       dsaVerifyOpData = kmem_cache_zalloc(drvDSAVerify_zone, GFP_KERNEL);
19070 +       if (NULL == dsaVerifyOpData) {
19071 +               APRINTK("%s():Failed to get memory"
19072 +                       " for DSA Verify Op data struct\n", __FUNCTION__);
19073 +               krp->krp_status = ENOMEM;
19074 +               return ENOMEM;
19075 +       }
19076 +
19077 +       /* Link parameters */
19078 +       dsaVerifyOpData->P.pData =
19079 +           krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].crp_p;
19080 +       BITS_TO_BYTES(dsaVerifyOpData->P.dataLenInBytes,
19081 +                     krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].
19082 +                     crp_nbits);
19083 +       icp_ocfDrvSwapBytes(dsaVerifyOpData->P.pData,
19084 +                           dsaVerifyOpData->P.dataLenInBytes);
19085 +
19086 +       dsaVerifyOpData->Q.pData =
19087 +           krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].crp_p;
19088 +       BITS_TO_BYTES(dsaVerifyOpData->Q.dataLenInBytes,
19089 +                     krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].
19090 +                     crp_nbits);
19091 +       icp_ocfDrvSwapBytes(dsaVerifyOpData->Q.pData,
19092 +                           dsaVerifyOpData->Q.dataLenInBytes);
19093 +
19094 +       dsaVerifyOpData->G.pData =
19095 +           krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].crp_p;
19096 +       BITS_TO_BYTES(dsaVerifyOpData->G.dataLenInBytes,
19097 +                     krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].
19098 +                     crp_nbits);
19099 +       icp_ocfDrvSwapBytes(dsaVerifyOpData->G.pData,
19100 +                           dsaVerifyOpData->G.dataLenInBytes);
19101 +
19102 +       dsaVerifyOpData->Y.pData =
19103 +           krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].crp_p;
19104 +       BITS_TO_BYTES(dsaVerifyOpData->Y.dataLenInBytes,
19105 +                     krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].
19106 +                     crp_nbits);
19107 +       icp_ocfDrvSwapBytes(dsaVerifyOpData->Y.pData,
19108 +                           dsaVerifyOpData->Y.dataLenInBytes);
19109 +
19110 +       dsaVerifyOpData->M.pData =
19111 +           krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].crp_p;
19112 +       BITS_TO_BYTES(dsaVerifyOpData->M.dataLenInBytes,
19113 +                     krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].
19114 +                     crp_nbits);
19115 +       icp_ocfDrvSwapBytes(dsaVerifyOpData->M.pData,
19116 +                           dsaVerifyOpData->M.dataLenInBytes);
19117 +
19118 +       dsaVerifyOpData->R.pData =
19119 +           krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].crp_p;
19120 +       BITS_TO_BYTES(dsaVerifyOpData->R.dataLenInBytes,
19121 +                     krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].
19122 +                     crp_nbits);
19123 +       icp_ocfDrvSwapBytes(dsaVerifyOpData->R.pData,
19124 +                           dsaVerifyOpData->R.dataLenInBytes);
19125 +
19126 +       dsaVerifyOpData->S.pData =
19127 +           krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].crp_p;
19128 +       BITS_TO_BYTES(dsaVerifyOpData->S.dataLenInBytes,
19129 +                     krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].
19130 +                     crp_nbits);
19131 +       icp_ocfDrvSwapBytes(dsaVerifyOpData->S.pData,
19132 +                           dsaVerifyOpData->S.dataLenInBytes);
19133 +
19134 +       lacStatus = cpaCyDsaVerify(CPA_INSTANCE_HANDLE_SINGLE,
19135 +                                  icp_ocfDrvDsaVerifyCallBack,
19136 +                                  callbackTag, dsaVerifyOpData, &verifyStatus);
19137 +
19138 +       if (CPA_STATUS_SUCCESS != lacStatus) {
19139 +               EPRINTK("%s(): DSA Verify Operation failed (%d).\n",
19140 +                       __FUNCTION__, lacStatus);
19141 +               kmem_cache_free(drvDSAVerify_zone, dsaVerifyOpData);
19142 +               krp->krp_status = ECANCELED;
19143 +       }
19144 +
19145 +       return lacStatus;
19146 +}
19147 +
19148 +/* Name        : icp_ocfDrvReadRandom
19149 + *
19150 + * Description : This function will map RNG functionality calls from OCF
19151 + * to the LAC API.
19152 + */
19153 +int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords)
19154 +{
19155 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
19156 +       CpaCyRandGenOpData randGenOpData;
19157 +       CpaFlatBuffer randData;
19158 +
19159 +       if (NULL == buf) {
19160 +               APRINTK("%s(): Invalid input parameters\n", __FUNCTION__);
19161 +               return EINVAL;
19162 +       }
19163 +
19164 +       /* maxwords here is number of integers to generate data for */
19165 +       randGenOpData.generateBits = CPA_TRUE;
19166 +
19167 +       randGenOpData.lenInBytes = maxwords * sizeof(uint32_t);
19168 +
19169 +       icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *) buf,
19170 +                                       randGenOpData.lenInBytes, &randData);
19171 +
19172 +       lacStatus = cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
19173 +                                NULL, NULL, &randGenOpData, &randData);
19174 +       if (CPA_STATUS_SUCCESS != lacStatus) {
19175 +               EPRINTK("%s(): icp_LacSymRandGen failed (%d). \n",
19176 +                       __FUNCTION__, lacStatus);
19177 +               return RETURN_RAND_NUM_GEN_FAILED;
19178 +       }
19179 +
19180 +       return randGenOpData.lenInBytes / sizeof(uint32_t);
19181 +}
19182 +
19183 +/* Name        : icp_ocfDrvDhP1Callback
19184 + *
19185 + * Description : When this function returns it signifies that the LAC
19186 + * component has completed the DH operation.
19187 + */
19188 +static void
19189 +icp_ocfDrvDhP1CallBack(void *callbackTag,
19190 +                      CpaStatus status,
19191 +                      void *pOpData, CpaFlatBuffer * pLocalOctetStringPV)
19192 +{
19193 +       struct cryptkop *krp = NULL;
19194 +       CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
19195 +
19196 +       if (NULL == callbackTag) {
19197 +               DPRINTK("%s(): Invalid input parameters - "
19198 +                       "callbackTag data is NULL\n", __FUNCTION__);
19199 +               return;
19200 +       }
19201 +       krp = (struct cryptkop *)callbackTag;
19202 +
19203 +       if (NULL == pOpData) {
19204 +               DPRINTK("%s(): Invalid input parameters - "
19205 +                       "Operation Data is NULL\n", __FUNCTION__);
19206 +               krp->krp_status = ECANCELED;
19207 +               crypto_kdone(krp);
19208 +               return;
19209 +       }
19210 +       pPhase1OpData = (CpaCyDhPhase1KeyGenOpData *) pOpData;
19211 +
19212 +       if (NULL == pLocalOctetStringPV) {
19213 +               DPRINTK("%s(): Invalid input parameters - "
19214 +                       "pLocalOctetStringPV Data is NULL\n", __FUNCTION__);
19215 +               memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
19216 +               kmem_cache_free(drvDH_zone, pPhase1OpData);
19217 +               krp->krp_status = ECANCELED;
19218 +               crypto_kdone(krp);
19219 +               return;
19220 +       }
19221 +
19222 +       if (CPA_STATUS_SUCCESS == status) {
19223 +               krp->krp_status = CRYPTO_OP_SUCCESS;
19224 +       } else {
19225 +               APRINTK("%s(): Diffie Hellman Phase1 Key Gen failed - "
19226 +                       "Operation Status = %d\n", __FUNCTION__, status);
19227 +               krp->krp_status = ECANCELED;
19228 +       }
19229 +
19230 +       icp_ocfDrvSwapBytes(pLocalOctetStringPV->pData,
19231 +                           pLocalOctetStringPV->dataLenInBytes);
19232 +
19233 +       icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
19234 +       memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
19235 +       kmem_cache_free(drvDH_zone, pPhase1OpData);
19236 +
19237 +       crypto_kdone(krp);
19238 +
19239 +       return;
19240 +}
19241 +
19242 +/* Name        : icp_ocfDrvModExpCallBack
19243 + *
19244 + * Description : When this function returns it signifies that the LAC
19245 + * component has completed the Mod Exp operation.
19246 + */
19247 +static void
19248 +icp_ocfDrvModExpCallBack(void *callbackTag,
19249 +                        CpaStatus status,
19250 +                        void *pOpdata, CpaFlatBuffer * pResult)
19251 +{
19252 +       struct cryptkop *krp = NULL;
19253 +       CpaCyLnModExpOpData *pLnModExpOpData = NULL;
19254 +
19255 +       if (NULL == callbackTag) {
19256 +               DPRINTK("%s(): Invalid input parameters - "
19257 +                       "callbackTag data is NULL\n", __FUNCTION__);
19258 +               return;
19259 +       }
19260 +       krp = (struct cryptkop *)callbackTag;
19261 +
19262 +       if (NULL == pOpdata) {
19263 +               DPRINTK("%s(): Invalid Mod Exp input parameters - "
19264 +                       "Operation Data is NULL\n", __FUNCTION__);
19265 +               krp->krp_status = ECANCELED;
19266 +               crypto_kdone(krp);
19267 +               return;
19268 +       }
19269 +       pLnModExpOpData = (CpaCyLnModExpOpData *) pOpdata;
19270 +
19271 +       if (NULL == pResult) {
19272 +               DPRINTK("%s(): Invalid input parameters - "
19273 +                       "pResult data is NULL\n", __FUNCTION__);
19274 +               krp->krp_status = ECANCELED;
19275 +               memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
19276 +               kmem_cache_free(drvLnModExp_zone, pLnModExpOpData);
19277 +               crypto_kdone(krp);
19278 +               return;
19279 +       }
19280 +
19281 +       if (CPA_STATUS_SUCCESS == status) {
19282 +               krp->krp_status = CRYPTO_OP_SUCCESS;
19283 +       } else {
19284 +               APRINTK("%s(): LAC Mod Exp Operation failed - "
19285 +                       "Operation Status = %d\n", __FUNCTION__, status);
19286 +               krp->krp_status = ECANCELED;
19287 +       }
19288 +
19289 +       icp_ocfDrvSwapBytes(pResult->pData, pResult->dataLenInBytes);
19290 +
19291 +       /*switch base size value back to original */
19292 +       if (pLnModExpOpData->base.pData ==
19293 +           (uint8_t *) & (krp->
19294 +                          krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
19295 +                          crp_nbits)) {
19296 +               *((uint32_t *) pLnModExpOpData->base.pData) =
19297 +                   ntohl(*((uint32_t *) pLnModExpOpData->base.pData));
19298 +       }
19299 +       icp_ocfDrvFreeFlatBuffer(pResult);
19300 +       memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
19301 +       kmem_cache_free(drvLnModExp_zone, pLnModExpOpData);
19302 +
19303 +       crypto_kdone(krp);
19304 +
19305 +       return;
19306 +
19307 +}
19308 +
19309 +/* Name        : icp_ocfDrvModExpCRTCallBack
19310 + *
19311 + * Description : When this function returns it signifies that the LAC
19312 + * component has completed the Mod Exp CRT operation.
19313 + */
19314 +static void
19315 +icp_ocfDrvModExpCRTCallBack(void *callbackTag,
19316 +                           CpaStatus status,
19317 +                           void *pOpData, CpaFlatBuffer * pOutputData)
19318 +{
19319 +       struct cryptkop *krp = NULL;
19320 +       CpaCyRsaDecryptOpData *pDecryptData = NULL;
19321 +
19322 +       if (NULL == callbackTag) {
19323 +               DPRINTK("%s(): Invalid input parameters - "
19324 +                       "callbackTag data is NULL\n", __FUNCTION__);
19325 +               return;
19326 +       }
19327 +
19328 +       krp = (struct cryptkop *)callbackTag;
19329 +
19330 +       if (NULL == pOpData) {
19331 +               DPRINTK("%s(): Invalid input parameters - "
19332 +                       "Operation Data is NULL\n", __FUNCTION__);
19333 +               krp->krp_status = ECANCELED;
19334 +               crypto_kdone(krp);
19335 +               return;
19336 +       }
19337 +       pDecryptData = (CpaCyRsaDecryptOpData *) pOpData;
19338 +
19339 +       if (NULL == pOutputData) {
19340 +               DPRINTK("%s(): Invalid input parameter - "
19341 +                       "pOutputData is NULL\n", __FUNCTION__);
19342 +               memset(pDecryptData->pRecipientPrivateKey, 0,
19343 +                      sizeof(CpaCyRsaPrivateKey));
19344 +               kmem_cache_free(drvRSAPrivateKey_zone,
19345 +                               pDecryptData->pRecipientPrivateKey);
19346 +               memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
19347 +               kmem_cache_free(drvRSADecrypt_zone, pDecryptData);
19348 +               krp->krp_status = ECANCELED;
19349 +               crypto_kdone(krp);
19350 +               return;
19351 +       }
19352 +
19353 +       if (CPA_STATUS_SUCCESS == status) {
19354 +               krp->krp_status = CRYPTO_OP_SUCCESS;
19355 +       } else {
19356 +               APRINTK("%s(): LAC Mod Exp CRT operation failed - "
19357 +                       "Operation Status = %d\n", __FUNCTION__, status);
19358 +               krp->krp_status = ECANCELED;
19359 +       }
19360 +
19361 +       icp_ocfDrvSwapBytes(pOutputData->pData, pOutputData->dataLenInBytes);
19362 +
19363 +       icp_ocfDrvFreeFlatBuffer(pOutputData);
19364 +       memset(pDecryptData->pRecipientPrivateKey, 0,
19365 +              sizeof(CpaCyRsaPrivateKey));
19366 +       kmem_cache_free(drvRSAPrivateKey_zone,
19367 +                       pDecryptData->pRecipientPrivateKey);
19368 +       memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
19369 +       kmem_cache_free(drvRSADecrypt_zone, pDecryptData);
19370 +
19371 +       crypto_kdone(krp);
19372 +
19373 +       return;
19374 +}
19375 +
19376 +/* Name        : icp_ocfDrvDsaRSSignCallBack
19377 + *
19378 + * Description : When this function returns it signifies that the LAC
19379 + * component has completed the DSA RS sign operation.
19380 + */
19381 +static void
19382 +icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
19383 +                           CpaStatus status,
19384 +                           void *pOpData,
19385 +                           CpaBoolean protocolStatus,
19386 +                           CpaFlatBuffer * pR, CpaFlatBuffer * pS)
19387 +{
19388 +       struct cryptkop *krp = NULL;
19389 +       CpaCyDsaRSSignOpData *pSignData = NULL;
19390 +
19391 +       if (NULL == callbackTag) {
19392 +               DPRINTK("%s(): Invalid input parameters - "
19393 +                       "callbackTag data is NULL\n", __FUNCTION__);
19394 +               return;
19395 +       }
19396 +
19397 +       krp = (struct cryptkop *)callbackTag;
19398 +
19399 +       if (NULL == pOpData) {
19400 +               DPRINTK("%s(): Invalid input parameters - "
19401 +                       "Operation Data is NULL\n", __FUNCTION__);
19402 +               krp->krp_status = ECANCELED;
19403 +               crypto_kdone(krp);
19404 +               return;
19405 +       }
19406 +       pSignData = (CpaCyDsaRSSignOpData *) pOpData;
19407 +
19408 +       if (NULL == pR) {
19409 +               DPRINTK("%s(): Invalid input parameter - "
19410 +                       "pR sign is NULL\n", __FUNCTION__);
19411 +               icp_ocfDrvFreeFlatBuffer(pS);
19412 +               kmem_cache_free(drvDSARSSign_zone, pSignData);
19413 +               krp->krp_status = ECANCELED;
19414 +               crypto_kdone(krp);
19415 +               return;
19416 +       }
19417 +
19418 +       if (NULL == pS) {
19419 +               DPRINTK("%s(): Invalid input parameter - "
19420 +                       "pS sign is NULL\n", __FUNCTION__);
19421 +               icp_ocfDrvFreeFlatBuffer(pR);
19422 +               kmem_cache_free(drvDSARSSign_zone, pSignData);
19423 +               krp->krp_status = ECANCELED;
19424 +               crypto_kdone(krp);
19425 +               return;
19426 +       }
19427 +
19428 +       if (CPA_STATUS_SUCCESS != status) {
19429 +               APRINTK("%s(): LAC DSA RS Sign operation failed - "
19430 +                       "Operation Status = %d\n", __FUNCTION__, status);
19431 +               krp->krp_status = ECANCELED;
19432 +       } else {
19433 +               krp->krp_status = CRYPTO_OP_SUCCESS;
19434 +
19435 +               if (CPA_TRUE != protocolStatus) {
19436 +                       DPRINTK("%s(): LAC DSA RS Sign operation failed due "
19437 +                               "to protocol error\n", __FUNCTION__);
19438 +                       krp->krp_status = EIO;
19439 +               }
19440 +       }
19441 +
19442 +       /* Swap bytes only when the callback status is successful and
19443 +          protocolStatus is set to true */
19444 +       if (CPA_STATUS_SUCCESS == status && CPA_TRUE == protocolStatus) {
19445 +               icp_ocfDrvSwapBytes(pR->pData, pR->dataLenInBytes);
19446 +               icp_ocfDrvSwapBytes(pS->pData, pS->dataLenInBytes);
19447 +       }
19448 +
19449 +       icp_ocfDrvFreeFlatBuffer(pR);
19450 +       icp_ocfDrvFreeFlatBuffer(pS);
19451 +       memset(pSignData->K.pData, 0, pSignData->K.dataLenInBytes);
19452 +       kmem_cache_free(drvDSARSSignKValue_zone, pSignData->K.pData);
19453 +       memset(pSignData, 0, sizeof(CpaCyDsaRSSignOpData));
19454 +       kmem_cache_free(drvDSARSSign_zone, pSignData);
19455 +       crypto_kdone(krp);
19456 +
19457 +       return;
19458 +}
19459 +
19460 +/* Name        : icp_ocfDrvDsaVerifyCallback
19461 + *
19462 + * Description : When this function returns it signifies that the LAC
19463 + * component has completed the DSA Verify operation.
19464 + */
19465 +static void
19466 +icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
19467 +                           CpaStatus status,
19468 +                           void *pOpData, CpaBoolean verifyStatus)
19469 +{
19470 +
19471 +       struct cryptkop *krp = NULL;
19472 +       CpaCyDsaVerifyOpData *pVerData = NULL;
19473 +
19474 +       if (NULL == callbackTag) {
19475 +               DPRINTK("%s(): Invalid input parameters - "
19476 +                       "callbackTag data is NULL\n", __FUNCTION__);
19477 +               return;
19478 +       }
19479 +
19480 +       krp = (struct cryptkop *)callbackTag;
19481 +
19482 +       if (NULL == pOpData) {
19483 +               DPRINTK("%s(): Invalid input parameters - "
19484 +                       "Operation Data is NULL\n", __FUNCTION__);
19485 +               krp->krp_status = ECANCELED;
19486 +               crypto_kdone(krp);
19487 +               return;
19488 +       }
19489 +       pVerData = (CpaCyDsaVerifyOpData *) pOpData;
19490 +
19491 +       if (CPA_STATUS_SUCCESS != status) {
19492 +               APRINTK("%s(): LAC DSA Verify operation failed - "
19493 +                       "Operation Status = %d\n", __FUNCTION__, status);
19494 +               krp->krp_status = ECANCELED;
19495 +       } else {
19496 +               krp->krp_status = CRYPTO_OP_SUCCESS;
19497 +
19498 +               if (CPA_TRUE != verifyStatus) {
19499 +                       DPRINTK("%s(): DSA signature invalid\n", __FUNCTION__);
19500 +                       krp->krp_status = EIO;
19501 +               }
19502 +       }
19503 +
19504 +       /* Swap bytes only when the callback status is successful and
19505 +          verifyStatus is set to true */
19506 +       /*Just swapping back the key values for now. Possibly all
19507 +          swapped buffers need to be reverted */
19508 +       if (CPA_STATUS_SUCCESS == status && CPA_TRUE == verifyStatus) {
19509 +               icp_ocfDrvSwapBytes(pVerData->R.pData,
19510 +                                   pVerData->R.dataLenInBytes);
19511 +               icp_ocfDrvSwapBytes(pVerData->S.pData,
19512 +                                   pVerData->S.dataLenInBytes);
19513 +       }
19514 +
19515 +       memset(pVerData, 0, sizeof(CpaCyDsaVerifyOpData));
19516 +       kmem_cache_free(drvDSAVerify_zone, pVerData);
19517 +       crypto_kdone(krp);
19518 +
19519 +       return;
19520 +}
19521 --- /dev/null
19522 +++ b/crypto/ocf/ep80579/icp_common.c
19523 @@ -0,0 +1,891 @@
19524 +/***************************************************************************
19525 + *
19526 + * This file is provided under a dual BSD/GPLv2 license.  When using or 
19527 + *   redistributing this file, you may do so under either license.
19528 + * 
19529 + *   GPL LICENSE SUMMARY
19530 + * 
19531 + *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
19532 + * 
19533 + *   This program is free software; you can redistribute it and/or modify 
19534 + *   it under the terms of version 2 of the GNU General Public License as
19535 + *   published by the Free Software Foundation.
19536 + * 
19537 + *   This program is distributed in the hope that it will be useful, but 
19538 + *   WITHOUT ANY WARRANTY; without even the implied warranty of 
19539 + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
19540 + *   General Public License for more details.
19541 + * 
19542 + *   You should have received a copy of the GNU General Public License 
19543 + *   along with this program; if not, write to the Free Software 
19544 + *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19545 + *   The full GNU General Public License is included in this distribution 
19546 + *   in the file called LICENSE.GPL.
19547 + * 
19548 + *   Contact Information:
19549 + *   Intel Corporation
19550 + * 
19551 + *   BSD LICENSE 
19552 + * 
19553 + *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
19554 + *   All rights reserved.
19555 + * 
19556 + *   Redistribution and use in source and binary forms, with or without 
19557 + *   modification, are permitted provided that the following conditions 
19558 + *   are met:
19559 + * 
19560 + *     * Redistributions of source code must retain the above copyright 
19561 + *       notice, this list of conditions and the following disclaimer.
19562 + *     * Redistributions in binary form must reproduce the above copyright 
19563 + *       notice, this list of conditions and the following disclaimer in 
19564 + *       the documentation and/or other materials provided with the 
19565 + *       distribution.
19566 + *     * Neither the name of Intel Corporation nor the names of its 
19567 + *       contributors may be used to endorse or promote products derived 
19568 + *       from this software without specific prior written permission.
19569 + * 
19570 + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
19571 + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
19572 + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
19573 + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
19574 + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
19575 + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
19576 + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
19577 + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
19578 + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
19579 + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
19580 + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19581 + * 
19582 + * 
19583 + *  version: Security.L.1.0.130
19584 + *
19585 + ***************************************************************************/
19586 +
19587 +/*
19588 + * An OCF module that uses Intel® QuickAssist Integrated Accelerator to do the 
19589 + * crypto.
19590 + *
19591 + * This driver requires the ICP Access Library that is available from Intel in
19592 + * order to operate.
19593 + */
19594 +
19595 +#include "icp_ocf.h"
19596 +
19597 +#define ICP_OCF_COMP_NAME                      "ICP_OCF"
19598 +#define ICP_OCF_VER_MAIN                       (2)
19599 +#define ICP_OCF_VER_MJR                                (0)
19600 +#define ICP_OCF_VER_MNR                        (0)
19601 +
19602 +#define MAX_DEREG_RETRIES                      (100)
19603 +#define DEFAULT_DEREG_RETRIES                  (10)
19604 +#define DEFAULT_DEREG_DELAY_IN_JIFFIES         (10)
19605 +
19606 +/* This defines the maximum number of sessions possible between OCF
19607 +   and the OCF Tolapai Driver. If set to zero, there is no limit. */
19608 +#define DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT   (0)
19609 +#define NUM_SUPPORTED_CAPABILITIES             (21)
19610 +
19611 +/*Slabs zones*/
19612 +struct kmem_cache *drvSessionData_zone = NULL;
19613 +struct kmem_cache *drvOpData_zone = NULL;
19614 +struct kmem_cache *drvDH_zone = NULL;
19615 +struct kmem_cache *drvLnModExp_zone = NULL;
19616 +struct kmem_cache *drvRSADecrypt_zone = NULL;
19617 +struct kmem_cache *drvRSAPrivateKey_zone = NULL;
19618 +struct kmem_cache *drvDSARSSign_zone = NULL;
19619 +struct kmem_cache *drvDSARSSignKValue_zone = NULL;
19620 +struct kmem_cache *drvDSAVerify_zone = NULL;
19621 +
19622 +/*Slab zones for flatbuffers and bufferlist*/
19623 +struct kmem_cache *drvFlatBuffer_zone = NULL;
19624 +
19625 +static int icp_ocfDrvInit(void);
19626 +static void icp_ocfDrvExit(void);
19627 +static void icp_ocfDrvFreeCaches(void);
19628 +static void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg);
19629 +
19630 +int32_t icp_ocfDrvDriverId = INVALID_DRIVER_ID;
19631 +
19632 +/* Module parameter - gives the number of times LAC deregistration shall be
19633 +   re-tried */
19634 +int num_dereg_retries = DEFAULT_DEREG_RETRIES;
19635 +
19636 +/* Module parameter - gives the delay time in jiffies before a LAC session 
19637 +   shall be attempted to be deregistered again */
19638 +int dereg_retry_delay_in_jiffies = DEFAULT_DEREG_DELAY_IN_JIFFIES;
19639 +
19640 +/* Module parameter - gives the maximum number of sessions possible between
19641 +   OCF and the OCF Tolapai Driver. If set to zero, there is no limit.*/
19642 +int max_sessions = DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT;
19643 +
19644 +/* This is set when the module is removed from the system, no further
19645 +   processing can take place if this is set */
19646 +atomic_t icp_ocfDrvIsExiting = ATOMIC_INIT(0);
19647 +
19648 +/* This is used to show how many lac sessions were not deregistered*/
19649 +atomic_t lac_session_failed_dereg_count = ATOMIC_INIT(0);
19650 +
19651 +/* This is used to track the number of registered sessions between OCF and
19652 + * and the OCF Tolapai driver, when max_session is set to value other than
19653 + * zero. This ensures that the max_session set for the OCF and the driver
19654 + * is equal to the LAC registered sessions */
19655 +atomic_t num_ocf_to_drv_registered_sessions = ATOMIC_INIT(0);
19656 +
19657 +/* Head of linked list used to store session data */
19658 +struct list_head icp_ocfDrvGlobalSymListHead;
19659 +struct list_head icp_ocfDrvGlobalSymListHead_FreeMemList;
19660 +
19661 +spinlock_t icp_ocfDrvSymSessInfoListSpinlock = SPIN_LOCK_UNLOCKED;
19662 +rwlock_t icp_kmem_cache_destroy_alloc_lock = RW_LOCK_UNLOCKED;
19663 +
19664 +struct workqueue_struct *icp_ocfDrvFreeLacSessionWorkQ;
19665 +
19666 +struct icp_drvBuffListInfo defBuffListInfo;
19667 +
19668 +static struct {
19669 +       softc_device_decl sc_dev;
19670 +} icpDev;
19671 +
19672 +static device_method_t icp_methods = {
19673 +       /* crypto device methods */
19674 +       DEVMETHOD(cryptodev_newsession, icp_ocfDrvNewSession),
19675 +       DEVMETHOD(cryptodev_freesession, icp_ocfDrvFreeLACSession),
19676 +       DEVMETHOD(cryptodev_process, icp_ocfDrvSymProcess),
19677 +       DEVMETHOD(cryptodev_kprocess, icp_ocfDrvPkeProcess),
19678 +};
19679 +
19680 +module_param(num_dereg_retries, int, S_IRUGO);
19681 +module_param(dereg_retry_delay_in_jiffies, int, S_IRUGO);
19682 +module_param(max_sessions, int, S_IRUGO);
19683 +
19684 +MODULE_PARM_DESC(num_dereg_retries,
19685 +                "Number of times to retry LAC Sym Session Deregistration. "
19686 +                "Default 10, Max 100");
19687 +MODULE_PARM_DESC(dereg_retry_delay_in_jiffies, "Delay in jiffies "
19688 +                "(added to a schedule() function call) before a LAC Sym "
19689 +                "Session Dereg is retried. Default 10");
19690 +MODULE_PARM_DESC(max_sessions, "This sets the maximum number of sessions "
19691 +                "between OCF and this driver. If this value is set to zero, "
19692 +                "max session count checking is disabled. Default is zero(0)");
19693 +
19694 +/* Name        : icp_ocfDrvInit
19695 + *
19696 + * Description : This function will register all the symmetric and asymmetric
19697 + * functionality that will be accelerated by the hardware. It will also
19698 + * get a unique driver ID from the OCF and initialise all slab caches
19699 + */
19700 +static int __init icp_ocfDrvInit(void)
19701 +{
19702 +       int ocfStatus = 0;
19703 +
19704 +       IPRINTK("=== %s ver %d.%d.%d ===\n", ICP_OCF_COMP_NAME,
19705 +               ICP_OCF_VER_MAIN, ICP_OCF_VER_MJR, ICP_OCF_VER_MNR);
19706 +
19707 +       if (MAX_DEREG_RETRIES < num_dereg_retries) {
19708 +               EPRINTK("Session deregistration retry count set to greater "
19709 +                       "than %d", MAX_DEREG_RETRIES);
19710 +               return -1;
19711 +       }
19712 +
19713 +       /* Initialize and Start the Cryptographic component */
19714 +       if (CPA_STATUS_SUCCESS !=
19715 +           cpaCyStartInstance(CPA_INSTANCE_HANDLE_SINGLE)) {
19716 +               EPRINTK("Failed to initialize and start the instance "
19717 +                       "of the Cryptographic component.\n");
19718 +               return -1;
19719 +       }
19720 +
19721 +       /* Set the default size of BufferList to allocate */
19722 +       memset(&defBuffListInfo, 0, sizeof(struct icp_drvBuffListInfo));
19723 +       if (ICP_OCF_DRV_STATUS_SUCCESS !=
19724 +           icp_ocfDrvBufferListMemInfo(ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS,
19725 +                                       &defBuffListInfo)) {
19726 +               EPRINTK("Failed to get bufferlist memory info.\n");
19727 +               return -1;
19728 +       }
19729 +
19730 +       /*Register OCF Tolapai Driver with OCF */
19731 +       memset(&icpDev, 0, sizeof(icpDev));
19732 +       softc_device_init(&icpDev, "icp", 0, icp_methods);
19733 +
19734 +       icp_ocfDrvDriverId = crypto_get_driverid(softc_get_device(&icpDev),
19735 +                                                CRYPTOCAP_F_HARDWARE);
19736 +
19737 +       if (icp_ocfDrvDriverId < 0) {
19738 +               EPRINTK("%s : ICP driver failed to register with OCF!\n",
19739 +                       __FUNCTION__);
19740 +               return -ENODEV;
19741 +       }
19742 +
19743 +       /*Create all the slab caches used by the OCF Tolapai Driver */
19744 +       drvSessionData_zone =
19745 +           ICP_CACHE_CREATE("ICP Session Data", struct icp_drvSessionData);
19746 +       ICP_CACHE_NULL_CHECK(drvSessionData_zone);
19747 +
19748 +       /* 
19749 +        * Allocation of the OpData includes the allocation space for meta data.
19750 +        * The memory after the opData structure is reserved for this meta data.
19751 +        */
19752 +       drvOpData_zone =
19753 +           kmem_cache_create("ICP Op Data", sizeof(struct icp_drvOpData) +
19754 +                   defBuffListInfo.metaSize ,0, SLAB_HWCACHE_ALIGN, NULL, NULL);
19755 +
19756 +
19757 +       ICP_CACHE_NULL_CHECK(drvOpData_zone);
19758 +
19759 +       drvDH_zone = ICP_CACHE_CREATE("ICP DH data", CpaCyDhPhase1KeyGenOpData);
19760 +       ICP_CACHE_NULL_CHECK(drvDH_zone);
19761 +
19762 +       drvLnModExp_zone =
19763 +           ICP_CACHE_CREATE("ICP ModExp data", CpaCyLnModExpOpData);
19764 +       ICP_CACHE_NULL_CHECK(drvLnModExp_zone);
19765 +
19766 +       drvRSADecrypt_zone =
19767 +           ICP_CACHE_CREATE("ICP RSA decrypt data", CpaCyRsaDecryptOpData);
19768 +       ICP_CACHE_NULL_CHECK(drvRSADecrypt_zone);
19769 +
19770 +       drvRSAPrivateKey_zone =
19771 +           ICP_CACHE_CREATE("ICP RSA private key data", CpaCyRsaPrivateKey);
19772 +       ICP_CACHE_NULL_CHECK(drvRSAPrivateKey_zone);
19773 +
19774 +       drvDSARSSign_zone =
19775 +           ICP_CACHE_CREATE("ICP DSA Sign", CpaCyDsaRSSignOpData);
19776 +       ICP_CACHE_NULL_CHECK(drvDSARSSign_zone);
19777 +
19778 +       /*too awkward to use a macro here */
19779 +       drvDSARSSignKValue_zone =
19780 +           kmem_cache_create("ICP DSA Sign Rand Val",
19781 +                             DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES, 0,
19782 +                             SLAB_HWCACHE_ALIGN, NULL, NULL);
19783 +       ICP_CACHE_NULL_CHECK(drvDSARSSignKValue_zone);
19784 +
19785 +       drvDSAVerify_zone =
19786 +           ICP_CACHE_CREATE("ICP DSA Verify", CpaCyDsaVerifyOpData);
19787 +       ICP_CACHE_NULL_CHECK(drvDSAVerify_zone);
19788 +
19789 +       drvFlatBuffer_zone =
19790 +           ICP_CACHE_CREATE("ICP Flat Buffers", CpaFlatBuffer);
19791 +       ICP_CACHE_NULL_CHECK(drvFlatBuffer_zone);
19792 +
19793 +       /* Register the ICP symmetric crypto support. */
19794 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_NULL_CBC);
19795 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_DES_CBC);
19796 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_3DES_CBC);
19797 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_AES_CBC);
19798 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_ARC4);
19799 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_MD5);
19800 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_MD5_HMAC);
19801 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA1);
19802 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA1_HMAC);
19803 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_256);
19804 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_256_HMAC);
19805 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_384);
19806 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_384_HMAC);
19807 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_512);
19808 +       ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(CRYPTO_SHA2_512_HMAC);
19809 +
19810 +       /* Register the ICP asymmetric algorithm support */
19811 +       ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DH_COMPUTE_KEY);
19812 +       ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_MOD_EXP);
19813 +       ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_MOD_EXP_CRT);
19814 +       ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DSA_SIGN);
19815 +       ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(CRK_DSA_VERIFY);
19816 +
19817 +       /* Register the ICP random number generator support */
19818 +       if (OCF_REGISTRATION_STATUS_SUCCESS ==
19819 +           crypto_rregister(icp_ocfDrvDriverId, icp_ocfDrvReadRandom, NULL)) {
19820 +               ocfStatus++;
19821 +       }
19822 +
19823 +       if (OCF_ZERO_FUNCTIONALITY_REGISTERED == ocfStatus) {
19824 +               DPRINTK("%s: Failed to register any device capabilities\n",
19825 +                       __FUNCTION__);
19826 +               icp_ocfDrvFreeCaches();
19827 +               icp_ocfDrvDriverId = INVALID_DRIVER_ID;
19828 +               return -ECANCELED;
19829 +       }
19830 +
19831 +       DPRINTK("%s: Registered %d of %d device capabilities\n",
19832 +               __FUNCTION__, ocfStatus, NUM_SUPPORTED_CAPABILITIES);
19833 +
19834 +/*Session data linked list used during module exit*/
19835 +       INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead);
19836 +       INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead_FreeMemList);
19837 +
19838 +       icp_ocfDrvFreeLacSessionWorkQ =
19839 +           create_singlethread_workqueue("ocfLacDeregWorkQueue");
19840 +
19841 +       return 0;
19842 +}
19843 +
19844 +/* Name        : icp_ocfDrvExit
19845 + *
19846 + * Description : This function will deregister all the symmetric sessions
19847 + * registered with the LAC component. It will also deregister all symmetric
19848 + * and asymmetric functionality that can be accelerated by the hardware via OCF
19849 + * and random number generation if it is enabled.
19850 + */
19851 +static void icp_ocfDrvExit(void)
19852 +{
19853 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
19854 +       struct icp_drvSessionData *sessionData = NULL;
19855 +       struct icp_drvSessionData *tempSessionData = NULL;
19856 +       int i, remaining_delay_time_in_jiffies = 0;
19857 +       /* There is a possibility of a process or new session command being   */
19858 +       /* sent before this variable is incremented. The aim of this variable */
19859 +       /* is to stop a loop of calls creating a deadlock situation which     */
19860 +       /* would prevent the driver from exiting.                             */
19861 +
19862 +       atomic_inc(&icp_ocfDrvIsExiting);
19863 +
19864 +       /*Existing sessions will be routed to another driver after these calls */
19865 +       crypto_unregister_all(icp_ocfDrvDriverId);
19866 +       crypto_runregister_all(icp_ocfDrvDriverId);
19867 +
19868 +       /*If any sessions are waiting to be deregistered, do that. This also 
19869 +          flushes the work queue */
19870 +       destroy_workqueue(icp_ocfDrvFreeLacSessionWorkQ);
19871 +
19872 +       /*ENTER CRITICAL SECTION */
19873 +       spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
19874 +       list_for_each_entry_safe(tempSessionData, sessionData,
19875 +                                &icp_ocfDrvGlobalSymListHead, listNode) {
19876 +               for (i = 0; i < num_dereg_retries; i++) {
19877 +                       /*No harm if bad input - LAC will handle error cases */
19878 +                       if (ICP_SESSION_RUNNING == tempSessionData->inUse) {
19879 +                               lacStatus =
19880 +                                   cpaCySymRemoveSession
19881 +                                   (CPA_INSTANCE_HANDLE_SINGLE,
19882 +                                    tempSessionData->sessHandle);
19883 +                               if (CPA_STATUS_SUCCESS == lacStatus) {
19884 +                                       /* Succesfully deregistered */
19885 +                                       break;
19886 +                               } else if (CPA_STATUS_RETRY != lacStatus) {
19887 +                                       atomic_inc
19888 +                                           (&lac_session_failed_dereg_count);
19889 +                                       break;
19890 +                               }
19891 +
19892 +                               /*schedule_timout returns the time left for completion if 
19893 +                                * this task is set to TASK_INTERRUPTIBLE */
19894 +                               remaining_delay_time_in_jiffies =
19895 +                                   dereg_retry_delay_in_jiffies;
19896 +                               while (0 > remaining_delay_time_in_jiffies) {
19897 +                                       remaining_delay_time_in_jiffies =
19898 +                                           schedule_timeout
19899 +                                           (remaining_delay_time_in_jiffies);
19900 +                               }
19901 +
19902 +                               DPRINTK
19903 +                                   ("%s(): Retry %d to deregistrate the session\n",
19904 +                                    __FUNCTION__, i);
19905 +                       }
19906 +               }
19907 +
19908 +               /*remove from current list */
19909 +               list_del(&(tempSessionData->listNode));
19910 +               /*add to free mem linked list */
19911 +               list_add(&(tempSessionData->listNode),
19912 +                        &icp_ocfDrvGlobalSymListHead_FreeMemList);
19913 +
19914 +       }
19915 +
19916 +       /*EXIT CRITICAL SECTION */
19917 +       spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
19918 +
19919 +       /*set back to initial values */
19920 +       sessionData = NULL;
19921 +       /*still have a reference in our list! */
19922 +       tempSessionData = NULL;
19923 +       /*free memory */
19924 +       list_for_each_entry_safe(tempSessionData, sessionData,
19925 +                                &icp_ocfDrvGlobalSymListHead_FreeMemList,
19926 +                                listNode) {
19927 +
19928 +               list_del(&(tempSessionData->listNode));
19929 +               /* Free allocated CpaCySymSessionCtx */
19930 +               if (NULL != tempSessionData->sessHandle) {
19931 +                       kfree(tempSessionData->sessHandle);
19932 +               }
19933 +               memset(tempSessionData, 0, sizeof(struct icp_drvSessionData));
19934 +               kmem_cache_free(drvSessionData_zone, tempSessionData);
19935 +       }
19936 +
19937 +       if (0 != atomic_read(&lac_session_failed_dereg_count)) {
19938 +               DPRINTK("%s(): %d LAC sessions were not deregistered "
19939 +                       "correctly. This is not a clean exit! \n",
19940 +                       __FUNCTION__,
19941 +                       atomic_read(&lac_session_failed_dereg_count));
19942 +       }
19943 +
19944 +       icp_ocfDrvFreeCaches();
19945 +       icp_ocfDrvDriverId = INVALID_DRIVER_ID;
19946 +
19947 +       /* Shutdown the Cryptographic component */
19948 +       lacStatus = cpaCyStopInstance(CPA_INSTANCE_HANDLE_SINGLE);
19949 +       if (CPA_STATUS_SUCCESS != lacStatus) {
19950 +               DPRINTK("%s(): Failed to stop instance of the "
19951 +                       "Cryptographic component.(status == %d)\n",
19952 +                       __FUNCTION__, lacStatus);
19953 +       }
19954 +
19955 +}
19956 +
19957 +/* Name        : icp_ocfDrvFreeCaches
19958 + *
19959 + * Description : This function deregisters all slab caches
19960 + */
19961 +static void icp_ocfDrvFreeCaches(void)
19962 +{
19963 +       if (atomic_read(&icp_ocfDrvIsExiting) != CPA_TRUE) {
19964 +               atomic_set(&icp_ocfDrvIsExiting, 1);
19965 +       }
19966 +
19967 +       /*Sym Zones */
19968 +       ICP_CACHE_DESTROY(drvSessionData_zone);
19969 +       ICP_CACHE_DESTROY(drvOpData_zone);
19970 +
19971 +       /*Asym zones */
19972 +       ICP_CACHE_DESTROY(drvDH_zone);
19973 +       ICP_CACHE_DESTROY(drvLnModExp_zone);
19974 +       ICP_CACHE_DESTROY(drvRSADecrypt_zone);
19975 +       ICP_CACHE_DESTROY(drvRSAPrivateKey_zone);
19976 +       ICP_CACHE_DESTROY(drvDSARSSignKValue_zone);
19977 +       ICP_CACHE_DESTROY(drvDSARSSign_zone);
19978 +       ICP_CACHE_DESTROY(drvDSAVerify_zone);
19979 +
19980 +       /*FlatBuffer and BufferList Zones */
19981 +       ICP_CACHE_DESTROY(drvFlatBuffer_zone);
19982 +
19983 +}
19984 +
19985 +/* Name        : icp_ocfDrvDeregRetry
19986 + *
19987 + * Description : This function will try to farm the session deregistration
19988 + * off to a work queue. If it fails, nothing more can be done and it
19989 + * returns an error
19990 + */
19991 +
19992 +int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister)
19993 +{
19994 +       struct icp_ocfDrvFreeLacSession *workstore = NULL;
19995 +
19996 +       DPRINTK("%s(): Retry - Deregistering session (%p)\n",
19997 +               __FUNCTION__, sessionToDeregister);
19998 +
19999 +       /*make sure the session is not available to be allocated during this
20000 +          process */
20001 +       atomic_inc(&lac_session_failed_dereg_count);
20002 +
20003 +       /*Farm off to work queue */
20004 +       workstore =
20005 +           kmalloc(sizeof(struct icp_ocfDrvFreeLacSession), GFP_ATOMIC);
20006 +       if (NULL == workstore) {
20007 +               DPRINTK("%s(): unable to free session - no memory available "
20008 +                       "for work queue\n", __FUNCTION__);
20009 +               return ENOMEM;
20010 +       }
20011 +
20012 +       workstore->sessionToDeregister = sessionToDeregister;
20013 +
20014 +       INIT_WORK(&(workstore->work), icp_ocfDrvDeferedFreeLacSessionProcess,
20015 +                 workstore);
20016 +       queue_work(icp_ocfDrvFreeLacSessionWorkQ, &(workstore->work));
20017 +
20018 +       return ICP_OCF_DRV_STATUS_SUCCESS;
20019 +
20020 +}
20021 +
20022 +/* Name        : icp_ocfDrvDeferedFreeLacSessionProcess
20023 + *
20024 + * Description : This function will retry (module input parameter)
20025 + * 'num_dereg_retries' times to deregister any symmetric session that recieves a
20026 + * CPA_STATUS_RETRY message from the LAC component. This function is run in
20027 + * Thread context because it is called from a worker thread
20028 + */
20029 +static void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg)
20030 +{
20031 +       struct icp_ocfDrvFreeLacSession *workstore = NULL;
20032 +       CpaCySymSessionCtx sessionToDeregister = NULL;
20033 +       int i = 0;
20034 +       int remaining_delay_time_in_jiffies = 0;
20035 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
20036 +
20037 +       workstore = (struct icp_ocfDrvFreeLacSession *)arg;
20038 +       if (NULL == workstore) {
20039 +               DPRINTK("%s() function called with null parameter \n",
20040 +                       __FUNCTION__);
20041 +               return;
20042 +       }
20043 +
20044 +       sessionToDeregister = workstore->sessionToDeregister;
20045 +       kfree(workstore);
20046 +
20047 +       /*if exiting, give deregistration one more blast only */
20048 +       if (atomic_read(&icp_ocfDrvIsExiting) == CPA_TRUE) {
20049 +               lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
20050 +                                                 sessionToDeregister);
20051 +
20052 +               if (lacStatus != CPA_STATUS_SUCCESS) {
20053 +                       DPRINTK("%s() Failed to Dereg LAC session %p "
20054 +                               "during module exit\n", __FUNCTION__,
20055 +                               sessionToDeregister);
20056 +                       return;
20057 +               }
20058 +
20059 +               atomic_dec(&lac_session_failed_dereg_count);
20060 +               return;
20061 +       }
20062 +
20063 +       for (i = 0; i <= num_dereg_retries; i++) {
20064 +               lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
20065 +                                                 sessionToDeregister);
20066 +
20067 +               if (lacStatus == CPA_STATUS_SUCCESS) {
20068 +                       atomic_dec(&lac_session_failed_dereg_count);
20069 +                       return;
20070 +               }
20071 +               if (lacStatus != CPA_STATUS_RETRY) {
20072 +                       DPRINTK("%s() Failed to deregister session - lacStatus "
20073 +                               " = %d", __FUNCTION__, lacStatus);
20074 +                       break;
20075 +               }
20076 +
20077 +               /*schedule_timout returns the time left for completion if this
20078 +                  task is set to TASK_INTERRUPTIBLE */
20079 +               remaining_delay_time_in_jiffies = dereg_retry_delay_in_jiffies;
20080 +               while (0 > remaining_delay_time_in_jiffies) {
20081 +                       remaining_delay_time_in_jiffies =
20082 +                           schedule_timeout(remaining_delay_time_in_jiffies);
20083 +               }
20084 +
20085 +       }
20086 +
20087 +       DPRINTK("%s(): Unable to deregister session\n", __FUNCTION__);
20088 +       DPRINTK("%s(): Number of unavailable LAC sessions = %d\n", __FUNCTION__,
20089 +               atomic_read(&lac_session_failed_dereg_count));
20090 +}
20091 +
20092 +/* Name        : icp_ocfDrvPtrAndLenToFlatBuffer 
20093 + *
20094 + * Description : This function converts a "pointer and length" buffer 
20095 + * structure to Fredericksburg Flat Buffer (CpaFlatBuffer) format.
20096 + *
20097 + * This function assumes that the data passed in are valid.
20098 + */
20099 +inline void
20100 +icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
20101 +                               CpaFlatBuffer * pFlatBuffer)
20102 +{
20103 +       pFlatBuffer->pData = pData;
20104 +       pFlatBuffer->dataLenInBytes = len;
20105 +}
20106 +
20107 +/* Name        : icp_ocfDrvSingleSkBuffToFlatBuffer 
20108 + *
20109 + * Description : This function converts a single socket buffer (sk_buff)
20110 + * structure to a Fredericksburg Flat Buffer (CpaFlatBuffer) format.
20111 + *
20112 + * This function assumes that the data passed in are valid.
20113 + */
20114 +static inline void
20115 +icp_ocfDrvSingleSkBuffToFlatBuffer(struct sk_buff *pSkb,
20116 +                                  CpaFlatBuffer * pFlatBuffer)
20117 +{
20118 +       pFlatBuffer->pData = pSkb->data;
20119 +       pFlatBuffer->dataLenInBytes = skb_headlen(pSkb);
20120 +}
20121 +
20122 +/* Name        : icp_ocfDrvSkBuffToBufferList 
20123 + *
20124 + * Description : This function converts a socket buffer (sk_buff) structure to
20125 + * Fredericksburg Scatter/Gather (CpaBufferList) buffer format.
20126 + *
20127 + * This function assumes that the bufferlist has been allocated with the correct
20128 + * number of buffer arrays.
20129 + * 
20130 + */
20131 +inline int
20132 +icp_ocfDrvSkBuffToBufferList(struct sk_buff *pSkb, CpaBufferList * bufferList)
20133 +{
20134 +       CpaFlatBuffer *curFlatBuffer = NULL;
20135 +       char *skbuffPageAddr = NULL;
20136 +       struct sk_buff *pCurFrag = NULL;
20137 +       struct skb_shared_info *pShInfo = NULL;
20138 +       uint32_t page_offset = 0, i = 0;
20139 +
20140 +       DPRINTK("%s(): Entry Point\n", __FUNCTION__);
20141 +
20142 +       /*
20143 +        * In all cases, the first skb needs to be translated to FlatBuffer.
20144 +        * Perform a buffer translation for the first skbuff
20145 +        */
20146 +       curFlatBuffer = bufferList->pBuffers;
20147 +       icp_ocfDrvSingleSkBuffToFlatBuffer(pSkb, curFlatBuffer);
20148 +
20149 +       /* Set the userData to point to the original sk_buff */
20150 +       bufferList->pUserData = (void *)pSkb;
20151 +
20152 +       /* We now know we'll have at least one element in the SGL */
20153 +       bufferList->numBuffers = 1;
20154 +
20155 +       if (0 == skb_is_nonlinear(pSkb)) {
20156 +               /* Is a linear buffer - therefore it's a single skbuff */
20157 +               DPRINTK("%s(): Exit Point\n", __FUNCTION__);
20158 +               return ICP_OCF_DRV_STATUS_SUCCESS;
20159 +       }
20160 +
20161 +       curFlatBuffer++;
20162 +       pShInfo = skb_shinfo(pSkb);
20163 +       if (pShInfo->frag_list != NULL && pShInfo->nr_frags != 0) {
20164 +               EPRINTK("%s():"
20165 +                       "Translation for a combination of frag_list "
20166 +                       "and frags[] array not supported!\n", __FUNCTION__);
20167 +               return ICP_OCF_DRV_STATUS_FAIL;
20168 +       } else if (pShInfo->frag_list != NULL) {
20169 +               /*
20170 +                * Non linear skbuff supported through frag_list 
20171 +                * Perform translation for each fragment (sk_buff)
20172 +                * in the frag_list of the first sk_buff.
20173 +                */
20174 +               for (pCurFrag = pShInfo->frag_list;
20175 +                    pCurFrag != NULL; pCurFrag = pCurFrag->next) {
20176 +                       icp_ocfDrvSingleSkBuffToFlatBuffer(pCurFrag,
20177 +                                                          curFlatBuffer);
20178 +                       curFlatBuffer++;
20179 +                       bufferList->numBuffers++;
20180 +               }
20181 +       } else if (pShInfo->nr_frags != 0) {
20182 +               /*
20183 +                * Perform translation for each fragment in frags array
20184 +                * and add to the BufferList
20185 +                */
20186 +               for (i = 0; i < pShInfo->nr_frags; i++) {
20187 +                       /* Get the page address and offset of this frag */
20188 +                       skbuffPageAddr = (char *)pShInfo->frags[i].page;
20189 +                       page_offset = pShInfo->frags[i].page_offset;
20190 +
20191 +                       /* Convert a pointer and length to a flat buffer */
20192 +                       icp_ocfDrvPtrAndLenToFlatBuffer(skbuffPageAddr +
20193 +                                                       page_offset,
20194 +                                                       pShInfo->frags[i].size,
20195 +                                                       curFlatBuffer);
20196 +                       curFlatBuffer++;
20197 +                       bufferList->numBuffers++;
20198 +               }
20199 +       } else {
20200 +               EPRINTK("%s():" "Could not recognize skbuff fragments!\n",
20201 +                       __FUNCTION__);
20202 +               return ICP_OCF_DRV_STATUS_FAIL;
20203 +       }
20204 +
20205 +       DPRINTK("%s(): Exit Point\n", __FUNCTION__);
20206 +       return ICP_OCF_DRV_STATUS_SUCCESS;
20207 +}
20208 +
20209 +/* Name        : icp_ocfDrvBufferListToSkBuff 
20210 + *
20211 + * Description : This function converts a Fredericksburg Scatter/Gather 
20212 + * (CpaBufferList) buffer format to socket buffer structure.
20213 + */
20214 +inline int
20215 +icp_ocfDrvBufferListToSkBuff(CpaBufferList * bufferList, struct sk_buff **skb)
20216 +{
20217 +       DPRINTK("%s(): Entry Point\n", __FUNCTION__);
20218 +
20219 +       /* Retrieve the orignal skbuff */
20220 +       *skb = (struct sk_buff *)bufferList->pUserData;
20221 +       if (NULL == *skb) {
20222 +               EPRINTK("%s():"
20223 +                       "Error on converting from a BufferList. "
20224 +                       "The BufferList does not contain an sk_buff.\n",
20225 +                       __FUNCTION__);
20226 +               return ICP_OCF_DRV_STATUS_FAIL;
20227 +       }
20228 +       DPRINTK("%s(): Exit Point\n", __FUNCTION__);
20229 +       return ICP_OCF_DRV_STATUS_SUCCESS;
20230 +}
20231 +
20232 +/* Name        : icp_ocfDrvPtrAndLenToBufferList
20233 + *
20234 + * Description : This function converts a "pointer and length" buffer
20235 + * structure to Fredericksburg Scatter/Gather Buffer (CpaBufferList) format.
20236 + *
20237 + * This function assumes that the data passed in are valid.
20238 + */
20239 +inline void
20240 +icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
20241 +                               CpaBufferList * pBufferList)
20242 +{
20243 +       pBufferList->numBuffers = 1;
20244 +       pBufferList->pBuffers->pData = pDataIn;
20245 +       pBufferList->pBuffers->dataLenInBytes = length;
20246 +}
20247 +
20248 +/* Name        : icp_ocfDrvBufferListToPtrAndLen
20249 + *
20250 + * Description : This function converts Fredericksburg Scatter/Gather Buffer
20251 + * (CpaBufferList) format to a "pointer and length" buffer structure.
20252 + *
20253 + * This function assumes that the data passed in are valid.
20254 + */
20255 +inline void
20256 +icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
20257 +                               void **ppDataOut, uint32_t * pLength)
20258 +{
20259 +       *ppDataOut = pBufferList->pBuffers->pData;
20260 +       *pLength = pBufferList->pBuffers->dataLenInBytes;
20261 +}
20262 +
20263 +/* Name        : icp_ocfDrvBufferListMemInfo
20264 + *
20265 + * Description : This function will set the number of flat buffers in 
20266 + * bufferlist, the size of memory to allocate for the pPrivateMetaData 
20267 + * member of the CpaBufferList.
20268 + */
20269 +int
20270 +icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
20271 +                           struct icp_drvBuffListInfo *buffListInfo)
20272 +{
20273 +       buffListInfo->numBuffers = numBuffers;
20274 +
20275 +       if (CPA_STATUS_SUCCESS !=
20276 +           cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
20277 +                                      buffListInfo->numBuffers,
20278 +                                      &(buffListInfo->metaSize))) {
20279 +               EPRINTK("%s() Failed to get buffer list meta size.\n",
20280 +                       __FUNCTION__);
20281 +               return ICP_OCF_DRV_STATUS_FAIL;
20282 +       }
20283 +
20284 +       return ICP_OCF_DRV_STATUS_SUCCESS;
20285 +}
20286 +
20287 +/* Name        : icp_ocfDrvGetSkBuffFrags
20288 + *
20289 + * Description : This function will determine the number of 
20290 + * fragments in a socket buffer(sk_buff).
20291 + */
20292 +inline uint16_t icp_ocfDrvGetSkBuffFrags(struct sk_buff * pSkb)
20293 +{
20294 +       uint16_t numFrags = 0;
20295 +       struct sk_buff *pCurFrag = NULL;
20296 +       struct skb_shared_info *pShInfo = NULL;
20297 +
20298 +       if (NULL == pSkb)
20299 +               return 0;
20300 +
20301 +       numFrags = 1;
20302 +       if (0 == skb_is_nonlinear(pSkb)) {
20303 +               /* Linear buffer - it's a single skbuff */
20304 +               return numFrags;
20305 +       }
20306 +
20307 +       pShInfo = skb_shinfo(pSkb);
20308 +       if (NULL != pShInfo->frag_list && 0 != pShInfo->nr_frags) {
20309 +               EPRINTK("%s(): Combination of frag_list "
20310 +                       "and frags[] array not supported!\n", __FUNCTION__);
20311 +               return 0;
20312 +       } else if (0 != pShInfo->nr_frags) {
20313 +               numFrags += pShInfo->nr_frags;
20314 +               return numFrags;
20315 +       } else if (NULL != pShInfo->frag_list) {
20316 +               for (pCurFrag = pShInfo->frag_list;
20317 +                    pCurFrag != NULL; pCurFrag = pCurFrag->next) {
20318 +                       numFrags++;
20319 +               }
20320 +               return numFrags;
20321 +       } else {
20322 +               return 0;
20323 +       }
20324 +}
20325 +
20326 +/* Name        : icp_ocfDrvFreeFlatBuffer
20327 + *
20328 + * Description : This function will deallocate flat buffer.
20329 + */
20330 +inline void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer)
20331 +{
20332 +       if (pFlatBuffer != NULL) {
20333 +               memset(pFlatBuffer, 0, sizeof(CpaFlatBuffer));
20334 +               kmem_cache_free(drvFlatBuffer_zone, pFlatBuffer);
20335 +       }
20336 +}
20337 +
20338 +/* Name        : icp_ocfDrvAllocMetaData
20339 + *
20340 + * Description : This function will allocate memory for the
20341 + * pPrivateMetaData member of CpaBufferList.
20342 + */
20343 +inline int
20344 +icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
20345 +        const struct icp_drvOpData *pOpData)
20346 +{
20347 +       Cpa32U metaSize = 0;
20348 +
20349 +       if (pBufferList->numBuffers <= ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS){
20350 +           void *pOpDataStartAddr = (void *)pOpData;
20351 +
20352 +           if (0 == defBuffListInfo.metaSize) {
20353 +                       pBufferList->pPrivateMetaData = NULL;
20354 +                       return ICP_OCF_DRV_STATUS_SUCCESS;
20355 +               }
20356 +               /*
20357 +                * The meta data allocation has been included as part of the 
20358 +                * op data.  It has been pre-allocated in memory just after the
20359 +                * icp_drvOpData structure.
20360 +                */
20361 +               pBufferList->pPrivateMetaData = pOpDataStartAddr +
20362 +                       sizeof(struct icp_drvOpData);
20363 +       } else {
20364 +               if (CPA_STATUS_SUCCESS !=
20365 +                   cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
20366 +                                              pBufferList->numBuffers,
20367 +                                              &metaSize)) {
20368 +                       EPRINTK("%s() Failed to get buffer list meta size.\n",
20369 +                               __FUNCTION__);
20370 +                       return ICP_OCF_DRV_STATUS_FAIL;
20371 +               }
20372 +
20373 +               if (0 == metaSize) {
20374 +                       pBufferList->pPrivateMetaData = NULL;
20375 +                       return ICP_OCF_DRV_STATUS_SUCCESS;
20376 +               }
20377 +
20378 +               pBufferList->pPrivateMetaData = kmalloc(metaSize, GFP_ATOMIC);
20379 +       }
20380 +       if (NULL == pBufferList->pPrivateMetaData) {
20381 +               EPRINTK("%s() Failed to allocate pPrivateMetaData.\n",
20382 +                       __FUNCTION__);
20383 +               return ICP_OCF_DRV_STATUS_FAIL;
20384 +       }
20385 +
20386 +       return ICP_OCF_DRV_STATUS_SUCCESS;
20387 +}
20388 +
20389 +/* Name        : icp_ocfDrvFreeMetaData
20390 + *
20391 + * Description : This function will deallocate pPrivateMetaData memory.
20392 + */
20393 +inline void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList)
20394 +{
20395 +       if (NULL == pBufferList->pPrivateMetaData) {
20396 +               return;
20397 +       }
20398 +
20399 +       /*
20400 +        * Only free the meta data if the BufferList has more than 
20401 +        * ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS number of buffers.
20402 +        * Otherwise, the meta data shall be freed when the icp_drvOpData is
20403 +        * freed.
20404 +        */
20405 +       if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < pBufferList->numBuffers){
20406 +               kfree(pBufferList->pPrivateMetaData);
20407 +       }
20408 +}
20409 +
20410 +module_init(icp_ocfDrvInit);
20411 +module_exit(icp_ocfDrvExit);
20412 +MODULE_LICENSE("Dual BSD/GPL");
20413 +MODULE_AUTHOR("Intel");
20414 +MODULE_DESCRIPTION("OCF Driver for Intel Quick Assist crypto acceleration");
20415 --- /dev/null
20416 +++ b/crypto/ocf/ep80579/icp_ocf.h
20417 @@ -0,0 +1,363 @@
20418 +/***************************************************************************
20419 + *
20420 + * This file is provided under a dual BSD/GPLv2 license.  When using or 
20421 + *   redistributing this file, you may do so under either license.
20422 + * 
20423 + *   GPL LICENSE SUMMARY
20424 + * 
20425 + *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
20426 + * 
20427 + *   This program is free software; you can redistribute it and/or modify 
20428 + *   it under the terms of version 2 of the GNU General Public License as
20429 + *   published by the Free Software Foundation.
20430 + * 
20431 + *   This program is distributed in the hope that it will be useful, but 
20432 + *   WITHOUT ANY WARRANTY; without even the implied warranty of 
20433 + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
20434 + *   General Public License for more details.
20435 + * 
20436 + *   You should have received a copy of the GNU General Public License 
20437 + *   along with this program; if not, write to the Free Software 
20438 + *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20439 + *   The full GNU General Public License is included in this distribution 
20440 + *   in the file called LICENSE.GPL.
20441 + * 
20442 + *   Contact Information:
20443 + *   Intel Corporation
20444 + * 
20445 + *   BSD LICENSE 
20446 + * 
20447 + *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
20448 + *   All rights reserved.
20449 + * 
20450 + *   Redistribution and use in source and binary forms, with or without 
20451 + *   modification, are permitted provided that the following conditions 
20452 + *   are met:
20453 + * 
20454 + *     * Redistributions of source code must retain the above copyright 
20455 + *       notice, this list of conditions and the following disclaimer.
20456 + *     * Redistributions in binary form must reproduce the above copyright 
20457 + *       notice, this list of conditions and the following disclaimer in 
20458 + *       the documentation and/or other materials provided with the 
20459 + *       distribution.
20460 + *     * Neither the name of Intel Corporation nor the names of its 
20461 + *       contributors may be used to endorse or promote products derived 
20462 + *       from this software without specific prior written permission.
20463 + * 
20464 + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
20465 + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
20466 + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
20467 + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
20468 + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
20469 + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
20470 + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
20471 + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
20472 + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
20473 + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
20474 + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20475 + * 
20476 + * 
20477 + *  version: Security.L.1.0.130
20478 + *
20479 + ***************************************************************************/
20480 +
20481 +/*
20482 + * OCF drv driver header file for the Intel ICP processor.
20483 + */
20484 +
20485 +#ifndef ICP_OCF_H
20486 +#define ICP_OCF_H
20487 +
20488 +#include <linux/crypto.h>
20489 +#include <linux/delay.h>
20490 +#include <linux/skbuff.h>
20491 +
20492 +#include "cryptodev.h"
20493 +#include "uio.h"
20494 +
20495 +#include "cpa.h"
20496 +#include "cpa_cy_im.h"
20497 +#include "cpa_cy_sym.h"
20498 +#include "cpa_cy_rand.h"
20499 +#include "cpa_cy_dh.h"
20500 +#include "cpa_cy_rsa.h"
20501 +#include "cpa_cy_ln.h"
20502 +#include "cpa_cy_common.h"
20503 +#include "cpa_cy_dsa.h"
20504 +
20505 +#define NUM_BITS_IN_BYTE (8)
20506 +#define NUM_BITS_IN_BYTE_MINUS_ONE (NUM_BITS_IN_BYTE -1)
20507 +#define INVALID_DRIVER_ID (-1)
20508 +#define RETURN_RAND_NUM_GEN_FAILED (-1)
20509 +
20510 +/*This is define means only one operation can be chained to another
20511 +(resulting in one chain of two operations)*/
20512 +#define MAX_NUM_OF_CHAINED_OPS (1)
20513 +/*This is the max block cipher initialisation vector*/
20514 +#define MAX_IV_LEN_IN_BYTES (20)
20515 +/*This is used to check whether the OCF to this driver session limit has
20516 +  been disabled*/
20517 +#define NO_OCF_TO_DRV_MAX_SESSIONS             (0)
20518 +
20519 +/*OCF values mapped here*/
20520 +#define ICP_SHA1_DIGEST_SIZE_IN_BYTES          (SHA1_HASH_LEN)
20521 +#define ICP_SHA256_DIGEST_SIZE_IN_BYTES        (SHA2_256_HASH_LEN)
20522 +#define ICP_SHA384_DIGEST_SIZE_IN_BYTES        (SHA2_384_HASH_LEN)
20523 +#define ICP_SHA512_DIGEST_SIZE_IN_BYTES        (SHA2_512_HASH_LEN)
20524 +#define ICP_MD5_DIGEST_SIZE_IN_BYTES           (MD5_HASH_LEN)
20525 +#define ARC4_COUNTER_LEN                       (ARC4_BLOCK_LEN)
20526 +
20527 +#define OCF_REGISTRATION_STATUS_SUCCESS        (0)
20528 +#define OCF_ZERO_FUNCTIONALITY_REGISTERED      (0)
20529 +#define ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR    (0)
20530 +#define ICP_OCF_DRV_STATUS_SUCCESS             (0)
20531 +#define ICP_OCF_DRV_STATUS_FAIL                (1)
20532 +
20533 +/*Turn on/off debug options*/
20534 +#define ICP_OCF_PRINT_DEBUG_MESSAGES           (0)
20535 +#define ICP_OCF_PRINT_KERN_ALERT               (1)
20536 +#define ICP_OCF_PRINT_KERN_ERRS                        (1)
20537 +
20538 +/*DSA Prime Q size in bytes (as defined in the standard) */
20539 +#define DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES       (20)
20540 +
20541 +/*MACRO DEFINITIONS*/
20542 +
20543 +#define BITS_TO_BYTES(bytes, bits)                                     \
20544 +       bytes = (bits + NUM_BITS_IN_BYTE_MINUS_ONE) / NUM_BITS_IN_BYTE
20545 +
20546 +#define ICP_CACHE_CREATE(cache_ID, cache_name)                                 \
20547 +       kmem_cache_create(cache_ID, sizeof(cache_name),0,               \
20548 +               SLAB_HWCACHE_ALIGN, NULL, NULL);
20549 +
20550 +#define ICP_CACHE_NULL_CHECK(slab_zone)                                        \
20551 +{                                                                      \
20552 +       if(NULL == slab_zone){                                          \
20553 +               icp_ocfDrvFreeCaches();                                 \
20554 +               EPRINTK("%s() line %d: Not enough memory!\n",           \
20555 +                       __FUNCTION__, __LINE__);                        \
20556 +               return ENOMEM;                                          \
20557 +       }                                                               \
20558 +}
20559 +
20560 +#define ICP_CACHE_DESTROY(slab_zone)                                   \
20561 +{                                                                       \
20562 +        if(NULL != slab_zone){                                         \
20563 +                kmem_cache_destroy(slab_zone);                         \
20564 +                slab_zone = NULL;                                      \
20565 +        }                                                              \
20566 +}
20567 +
20568 +#define ICP_REGISTER_SYM_FUNCTIONALITY_WITH_OCF(alg)                   \
20569 +{                                                                      \
20570 +       if(OCF_REGISTRATION_STATUS_SUCCESS ==                           \
20571 +               crypto_register(icp_ocfDrvDriverId,                     \
20572 +                                   alg,                                \
20573 +                                   0,                                  \
20574 +                                   0)) {                               \
20575 +               ocfStatus++;                                            \
20576 +       }                                                               \
20577 +}
20578 +
20579 +#define ICP_REGISTER_ASYM_FUNCTIONALITY_WITH_OCF(alg)                  \
20580 +{                                                                      \
20581 +       if(OCF_REGISTRATION_STATUS_SUCCESS ==                           \
20582 +               crypto_kregister(icp_ocfDrvDriverId,                    \
20583 +                                     alg,                              \
20584 +                                     0)){                              \
20585 +               ocfStatus++;                                            \
20586 +       }                                                               \
20587 +}
20588 +
20589 +#if ICP_OCF_PRINT_DEBUG_MESSAGES == 1
20590 +#define DPRINTK(args...)      \
20591 +{                            \
20592 +                printk(args); \
20593 +}
20594 +
20595 +#else                          //ICP_OCF_PRINT_DEBUG_MESSAGES == 1
20596 +
20597 +#define DPRINTK(args...)
20598 +
20599 +#endif                         //ICP_OCF_PRINT_DEBUG_MESSAGES == 1
20600 +
20601 +#if ICP_OCF_PRINT_KERN_ALERT == 1
20602 +#define APRINTK(args...)                                               \
20603 +{                                                                      \
20604 +       printk(KERN_ALERT args);                                                \
20605 +}
20606 +
20607 +#else                          //ICP_OCF_PRINT_KERN_ALERT == 1
20608 +
20609 +#define APRINTK(args...)
20610 +
20611 +#endif                         //ICP_OCF_PRINT_KERN_ALERT == 1
20612 +
20613 +#if ICP_OCF_PRINT_KERN_ERRS == 1
20614 +#define EPRINTK(args...)      \
20615 +{                            \
20616 +       printk(KERN_ERR args); \
20617 +}
20618 +
20619 +#else                          //ICP_OCF_PRINT_KERN_ERRS == 1
20620 +
20621 +#define EPRINTK(args...)
20622 +
20623 +#endif                         //ICP_OCF_PRINT_KERN_ERRS == 1
20624 +
20625 +#define IPRINTK(args...)      \
20626 +{                            \
20627 +      printk(KERN_INFO args); \
20628 +}
20629 +
20630 +/*END OF MACRO DEFINITIONS*/
20631 +
20632 +typedef enum {
20633 +       ICP_OCF_DRV_ALG_CIPHER = 0,
20634 +       ICP_OCF_DRV_ALG_HASH
20635 +} icp_ocf_drv_alg_type_t;
20636 +
20637 +/* These are all defined in icp_common.c */
20638 +extern atomic_t lac_session_failed_dereg_count;
20639 +extern atomic_t icp_ocfDrvIsExiting;
20640 +extern atomic_t num_ocf_to_drv_registered_sessions;
20641 +
20642 +/*These are use inputs used in icp_sym.c and icp_common.c
20643 +  They are instantiated in icp_common.c*/
20644 +extern int max_sessions;
20645 +
20646 +extern int32_t icp_ocfDrvDriverId;
20647 +extern struct list_head icp_ocfDrvGlobalSymListHead;
20648 +extern struct list_head icp_ocfDrvGlobalSymListHead_FreeMemList;
20649 +extern struct workqueue_struct *icp_ocfDrvFreeLacSessionWorkQ;
20650 +extern spinlock_t icp_ocfDrvSymSessInfoListSpinlock;
20651 +extern rwlock_t icp_kmem_cache_destroy_alloc_lock;
20652 +
20653 +/*Slab zones for symettric functionality, instantiated in icp_common.c*/
20654 +extern struct kmem_cache *drvSessionData_zone;
20655 +extern struct kmem_cache *drvOpData_zone;
20656 +
20657 +/*Slabs zones for asymettric functionality, instantiated in icp_common.c*/
20658 +extern struct kmem_cache *drvDH_zone;
20659 +extern struct kmem_cache *drvLnModExp_zone;
20660 +extern struct kmem_cache *drvRSADecrypt_zone;
20661 +extern struct kmem_cache *drvRSAPrivateKey_zone;
20662 +extern struct kmem_cache *drvDSARSSign_zone;
20663 +extern struct kmem_cache *drvDSARSSignKValue_zone;
20664 +extern struct kmem_cache *drvDSAVerify_zone;
20665 +
20666 +/*Slab zones for flatbuffers and bufferlist*/
20667 +extern struct kmem_cache *drvFlatBuffer_zone;
20668 +
20669 +#define ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS     (16)
20670 +
20671 +struct icp_drvBuffListInfo {
20672 +       Cpa16U numBuffers;
20673 +       Cpa32U metaSize;
20674 +       Cpa32U metaOffset;
20675 +       Cpa32U buffListSize;
20676 +};
20677 +extern struct icp_drvBuffListInfo defBuffListInfo;
20678 +
20679 +/*
20680 +* This struct is used to keep a reference to the relevant node in the list
20681 +* of sessionData structs, to the buffer type required by OCF and to the OCF
20682 +* provided crp struct that needs to be returned. All this info is needed in
20683 +* the callback function.
20684 +*
20685 +* IV can sometimes be stored in non-contiguous memory (e.g. skbuff
20686 +* linked/frag list, therefore a contiguous memory space for the IV data must be
20687 +* created and passed to LAC
20688 +*
20689 +*/
20690 +struct icp_drvOpData {
20691 +       CpaCySymOpData lacOpData;
20692 +       uint32_t digestSizeInBytes;
20693 +       struct cryptop *crp;
20694 +       uint8_t bufferType;
20695 +       uint8_t ivData[MAX_IV_LEN_IN_BYTES];
20696 +       uint16_t numBufferListArray;
20697 +       CpaBufferList srcBuffer;
20698 +       CpaFlatBuffer bufferListArray[ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS];
20699 +       CpaBoolean verifyResult;
20700 +};
20701 +/*Values used to derisk chances of performs being called against
20702 +deregistered sessions (for which the slab page has been reclaimed)
20703 +This is not a fix - since page frames are reclaimed from a slab, one cannot
20704 +rely on that memory not being re-used by another app.*/
20705 +typedef enum {
20706 +       ICP_SESSION_INITIALISED = 0x5C5C5C,
20707 +       ICP_SESSION_RUNNING = 0x005C00,
20708 +       ICP_SESSION_DEREGISTERED = 0xC5C5C5
20709 +} usage_derisk;
20710 +
20711 +/*
20712 +This is the OCF<->OCF_DRV session object:
20713 +
20714 +1.The first member is a listNode. These session objects are added to a linked
20715 +  list in order to make it easier to remove them all at session exit time.
20716 +2.The second member is used to give the session object state and derisk the
20717 +  possibility of OCF batch calls executing against a deregistered session (as
20718 +  described above).
20719 +3.The third member is a LAC<->OCF_DRV session handle (initialised with the first
20720 +  perform request for that session).
20721 +4.The fourth is the LAC session context. All the parameters for this structure
20722 +  are only known when the first perform request for this session occurs. That is
20723 +  why the OCF Tolapai Driver only registers a new LAC session at perform time
20724 +*/
20725 +struct icp_drvSessionData {
20726 +       struct list_head listNode;
20727 +       usage_derisk inUse;
20728 +       CpaCySymSessionCtx sessHandle;
20729 +       CpaCySymSessionSetupData lacSessCtx;
20730 +};
20731 +
20732 +/* This struct is required for deferred session
20733 + deregistration as a work queue function can
20734 + only have one argument*/
20735 +struct icp_ocfDrvFreeLacSession {
20736 +       CpaCySymSessionCtx sessionToDeregister;
20737 +       struct work_struct work;
20738 +};
20739 +
20740 +int icp_ocfDrvNewSession(device_t dev, uint32_t * sild, struct cryptoini *cri);
20741 +
20742 +int icp_ocfDrvFreeLACSession(device_t dev, uint64_t sid);
20743 +
20744 +int icp_ocfDrvSymProcess(device_t dev, struct cryptop *crp, int hint);
20745 +
20746 +int icp_ocfDrvPkeProcess(device_t dev, struct cryptkop *krp, int hint);
20747 +
20748 +int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords);
20749 +
20750 +int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister);
20751 +
20752 +int icp_ocfDrvSkBuffToBufferList(struct sk_buff *skb,
20753 +                                CpaBufferList * bufferList);
20754 +
20755 +int icp_ocfDrvBufferListToSkBuff(CpaBufferList * bufferList,
20756 +                                struct sk_buff **skb);
20757 +
20758 +void icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
20759 +                                    CpaFlatBuffer * pFlatBuffer);
20760 +
20761 +void icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
20762 +                                    CpaBufferList * pBufferList);
20763 +
20764 +void icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
20765 +                                    void **ppDataOut, uint32_t * pLength);
20766 +
20767 +int icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
20768 +                               struct icp_drvBuffListInfo *buffListInfo);
20769 +
20770 +uint16_t icp_ocfDrvGetSkBuffFrags(struct sk_buff *pSkb);
20771 +
20772 +void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer);
20773 +
20774 +int icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList, 
20775 +                const struct icp_drvOpData *pOpData);
20776 +
20777 +void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList);
20778 +
20779 +#endif
20780 +/* ICP_OCF_H */
20781 --- /dev/null
20782 +++ b/crypto/ocf/ep80579/icp_sym.c
20783 @@ -0,0 +1,1382 @@
20784 +/***************************************************************************
20785 + *
20786 + * This file is provided under a dual BSD/GPLv2 license.  When using or 
20787 + *   redistributing this file, you may do so under either license.
20788 + * 
20789 + *   GPL LICENSE SUMMARY
20790 + * 
20791 + *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
20792 + * 
20793 + *   This program is free software; you can redistribute it and/or modify 
20794 + *   it under the terms of version 2 of the GNU General Public License as
20795 + *   published by the Free Software Foundation.
20796 + * 
20797 + *   This program is distributed in the hope that it will be useful, but 
20798 + *   WITHOUT ANY WARRANTY; without even the implied warranty of 
20799 + *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
20800 + *   General Public License for more details.
20801 + * 
20802 + *   You should have received a copy of the GNU General Public License 
20803 + *   along with this program; if not, write to the Free Software 
20804 + *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20805 + *   The full GNU General Public License is included in this distribution 
20806 + *   in the file called LICENSE.GPL.
20807 + * 
20808 + *   Contact Information:
20809 + *   Intel Corporation
20810 + * 
20811 + *   BSD LICENSE 
20812 + * 
20813 + *   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
20814 + *   All rights reserved.
20815 + * 
20816 + *   Redistribution and use in source and binary forms, with or without 
20817 + *   modification, are permitted provided that the following conditions 
20818 + *   are met:
20819 + * 
20820 + *     * Redistributions of source code must retain the above copyright 
20821 + *       notice, this list of conditions and the following disclaimer.
20822 + *     * Redistributions in binary form must reproduce the above copyright 
20823 + *       notice, this list of conditions and the following disclaimer in 
20824 + *       the documentation and/or other materials provided with the 
20825 + *       distribution.
20826 + *     * Neither the name of Intel Corporation nor the names of its 
20827 + *       contributors may be used to endorse or promote products derived 
20828 + *       from this software without specific prior written permission.
20829 + * 
20830 + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
20831 + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
20832 + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
20833 + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
20834 + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
20835 + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
20836 + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
20837 + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
20838 + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
20839 + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
20840 + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
20841 + * 
20842 + * 
20843 + *  version: Security.L.1.0.130
20844 + *
20845 + ***************************************************************************/
20846 +/*
20847 + * An OCF module that uses the API for Intel® QuickAssist Technology to do the
20848 + * cryptography.
20849 + *
20850 + * This driver requires the ICP Access Library that is available from Intel in
20851 + * order to operate.
20852 + */
20853 +
20854 +#include "icp_ocf.h"
20855 +
20856 +/*This is the call back function for all symmetric cryptographic processes.
20857 +  Its main functionality is to free driver crypto operation structure and to 
20858 +  call back to OCF*/
20859 +static void
20860 +icp_ocfDrvSymCallBack(void *callbackTag,
20861 +                     CpaStatus status,
20862 +                     const CpaCySymOp operationType,
20863 +                     void *pOpData,
20864 +                     CpaBufferList * pDstBuffer, CpaBoolean verifyResult);
20865 +
20866 +/*This function is used to extract crypto processing information from the OCF
20867 +  inputs, so as that it may be passed onto LAC*/
20868 +static int
20869 +icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
20870 +                          struct cryptodesc *crp_desc);
20871 +
20872 +/*This function checks whether the crp_desc argument pertains to a digest or a
20873 +  cipher operation*/
20874 +static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc);
20875 +
20876 +/*This function copies all the passed in session context information and stores
20877 +  it in a LAC context structure*/
20878 +static int
20879 +icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
20880 +                        CpaCySymSessionSetupData * lacSessCtx);
20881 +
20882 +/*This top level function is used to find a pointer to where a digest is 
20883 +  stored/needs to be inserted. */
20884 +static uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
20885 +                                           struct cryptodesc *crp_desc);
20886 +
20887 +/*This function is called when a digest pointer has to be found within a
20888 +  SKBUFF.*/
20889 +static inline uint8_t *icp_ocfDrvSkbuffDigestPointerFind(struct icp_drvOpData
20890 +                                                        *drvOpData,
20891 +                                                        int offsetInBytes,
20892 +                                                        uint32_t
20893 +                                                        digestSizeInBytes);
20894 +
20895 +/*The following two functions are called if the SKBUFF digest pointer is not 
20896 +  positioned in the linear portion of the buffer (i.e. it is in a linked SKBUFF
20897 +   or page fragment).*/
20898 +/*This function takes care of the page fragment case.*/
20899 +static inline uint8_t *icp_ocfDrvDigestSkbNRFragsCheck(struct sk_buff *skb,
20900 +                                                      struct skb_shared_info
20901 +                                                      *skb_shared,
20902 +                                                      int offsetInBytes,
20903 +                                                      uint32_t
20904 +                                                      digestSizeInBytes);
20905 +
20906 +/*This function takes care of the linked list case.*/
20907 +static inline uint8_t *icp_ocfDrvDigestSkbFragListCheck(struct sk_buff *skb,
20908 +                                                       struct skb_shared_info
20909 +                                                       *skb_shared,
20910 +                                                       int offsetInBytes,
20911 +                                                       uint32_t
20912 +                                                       digestSizeInBytes);
20913 +
20914 +/*This function is used to free an OCF->OCF_DRV session object*/
20915 +static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData);
20916 +
20917 +/*max IOV buffs supported in a UIO structure*/
20918 +#define NUM_IOV_SUPPORTED              (1)
20919 +
20920 +/* Name        : icp_ocfDrvSymCallBack
20921 + *
20922 + * Description : When this function returns it signifies that the LAC
20923 + * component has completed the relevant symmetric operation. 
20924 + *
20925 + * Notes : The callbackTag is a pointer to an icp_drvOpData. This memory
20926 + * object was passed to LAC for the cryptographic processing and contains all
20927 + * the relevant information for cleaning up buffer handles etc. so that the
20928 + * OCF Tolapai Driver portion of this crypto operation can be fully completed.
20929 + */
20930 +static void
20931 +icp_ocfDrvSymCallBack(void *callbackTag,
20932 +                     CpaStatus status,
20933 +                     const CpaCySymOp operationType,
20934 +                     void *pOpData,
20935 +                     CpaBufferList * pDstBuffer, CpaBoolean verifyResult)
20936 +{
20937 +       struct cryptop *crp = NULL;
20938 +       struct icp_drvOpData *temp_drvOpData =
20939 +           (struct icp_drvOpData *)callbackTag;
20940 +       uint64_t *tempBasePtr = NULL;
20941 +       uint32_t tempLen = 0;
20942 +
20943 +       if (NULL == temp_drvOpData) {
20944 +               DPRINTK("%s(): The callback from the LAC component"
20945 +                       " has failed due to Null userOpaque data"
20946 +                       "(status == %d).\n", __FUNCTION__, status);
20947 +               DPRINTK("%s(): Unable to call OCF back! \n", __FUNCTION__);
20948 +               return;
20949 +       }
20950 +
20951 +       crp = temp_drvOpData->crp;
20952 +       crp->crp_etype = ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR;
20953 +
20954 +       if (NULL == pOpData) {
20955 +               DPRINTK("%s(): The callback from the LAC component"
20956 +                       " has failed due to Null Symmetric Op data"
20957 +                       "(status == %d).\n", __FUNCTION__, status);
20958 +               crp->crp_etype = ECANCELED;
20959 +               crypto_done(crp);
20960 +               return;
20961 +       }
20962 +
20963 +       if (NULL == pDstBuffer) {
20964 +               DPRINTK("%s(): The callback from the LAC component"
20965 +                       " has failed due to Null Dst Bufferlist data"
20966 +                       "(status == %d).\n", __FUNCTION__, status);
20967 +               crp->crp_etype = ECANCELED;
20968 +               crypto_done(crp);
20969 +               return;
20970 +       }
20971 +
20972 +       if (CPA_STATUS_SUCCESS == status) {
20973 +
20974 +               if (temp_drvOpData->bufferType == CRYPTO_F_SKBUF) {
20975 +                       if (ICP_OCF_DRV_STATUS_SUCCESS !=
20976 +                           icp_ocfDrvBufferListToSkBuff(pDstBuffer,
20977 +                                                        (struct sk_buff **)
20978 +                                                        &(crp->crp_buf))) {
20979 +                               EPRINTK("%s(): BufferList to SkBuff "
20980 +                                       "conversion error.\n", __FUNCTION__);
20981 +                               crp->crp_etype = EPERM;
20982 +                       }
20983 +               } else {
20984 +                       icp_ocfDrvBufferListToPtrAndLen(pDstBuffer,
20985 +                                                       (void **)&tempBasePtr,
20986 +                                                       &tempLen);
20987 +                       crp->crp_olen = (int)tempLen;
20988 +               }
20989 +
20990 +       } else {
20991 +               DPRINTK("%s(): The callback from the LAC component has failed"
20992 +                       "(status == %d).\n", __FUNCTION__, status);
20993 +
20994 +               crp->crp_etype = ECANCELED;
20995 +       }
20996 +
20997 +       if (temp_drvOpData->numBufferListArray >
20998 +           ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
20999 +               kfree(pDstBuffer->pBuffers);
21000 +       }
21001 +       icp_ocfDrvFreeMetaData(pDstBuffer);
21002 +       kmem_cache_free(drvOpData_zone, temp_drvOpData);
21003 +
21004 +       /* Invoke the OCF callback function */
21005 +       crypto_done(crp);
21006 +
21007 +       return;
21008 +}
21009 +
21010 +/* Name        : icp_ocfDrvNewSession 
21011 + *
21012 + * Description : This function will create a new Driver<->OCF session
21013 + *
21014 + * Notes : LAC session registration happens during the first perform call.
21015 + * That is the first time we know all information about a given session.
21016 + */
21017 +int icp_ocfDrvNewSession(device_t dev, uint32_t * sid, struct cryptoini *cri)
21018 +{
21019 +       struct icp_drvSessionData *sessionData = NULL;
21020 +       uint32_t delete_session = 0;
21021 +
21022 +       /* The SID passed in should be our driver ID. We can return the     */
21023 +       /* local ID (LID) which is a unique identifier which we can use     */
21024 +       /* to differentiate between the encrypt/decrypt LAC session handles */
21025 +       if (NULL == sid) {
21026 +               EPRINTK("%s(): Invalid input parameters - NULL sid.\n",
21027 +                       __FUNCTION__);
21028 +               return EINVAL;
21029 +       }
21030 +
21031 +       if (NULL == cri) {
21032 +               EPRINTK("%s(): Invalid input parameters - NULL cryptoini.\n",
21033 +                       __FUNCTION__);
21034 +               return EINVAL;
21035 +       }
21036 +
21037 +       if (icp_ocfDrvDriverId != *sid) {
21038 +               EPRINTK("%s(): Invalid input parameters - bad driver ID\n",
21039 +                       __FUNCTION__);
21040 +               EPRINTK("\t sid = 0x08%p \n \t cri = 0x08%p \n", sid, cri);
21041 +               return EINVAL;
21042 +       }
21043 +
21044 +       sessionData = kmem_cache_zalloc(drvSessionData_zone, GFP_ATOMIC);
21045 +       if (NULL == sessionData) {
21046 +               DPRINTK("%s():No memory for Session Data\n", __FUNCTION__);
21047 +               return ENOMEM;
21048 +       }
21049 +
21050 +       /*ENTER CRITICAL SECTION */
21051 +       spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21052 +       /*put this check in the spinlock so no new sessions can be added to the
21053 +          linked list when we are exiting */
21054 +       if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
21055 +               delete_session++;
21056 +
21057 +       } else if (NO_OCF_TO_DRV_MAX_SESSIONS != max_sessions) {
21058 +               if (atomic_read(&num_ocf_to_drv_registered_sessions) >=
21059 +                   (max_sessions -
21060 +                    atomic_read(&lac_session_failed_dereg_count))) {
21061 +                       delete_session++;
21062 +               } else {
21063 +                       atomic_inc(&num_ocf_to_drv_registered_sessions);
21064 +                       /* Add to session data linked list */
21065 +                       list_add(&(sessionData->listNode),
21066 +                                &icp_ocfDrvGlobalSymListHead);
21067 +               }
21068 +
21069 +       } else if (NO_OCF_TO_DRV_MAX_SESSIONS == max_sessions) {
21070 +               list_add(&(sessionData->listNode),
21071 +                        &icp_ocfDrvGlobalSymListHead);
21072 +       }
21073 +
21074 +       sessionData->inUse = ICP_SESSION_INITIALISED;
21075 +
21076 +       /*EXIT CRITICAL SECTION */
21077 +       spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21078 +
21079 +       if (delete_session) {
21080 +               DPRINTK("%s():No Session handles available\n", __FUNCTION__);
21081 +               kmem_cache_free(drvSessionData_zone, sessionData);
21082 +               return EPERM;
21083 +       }
21084 +
21085 +       if (ICP_OCF_DRV_STATUS_SUCCESS !=
21086 +           icp_ocfDrvAlgorithmSetup(cri, &(sessionData->lacSessCtx))) {
21087 +               DPRINTK("%s():algorithm not supported\n", __FUNCTION__);
21088 +               icp_ocfDrvFreeOCFSession(sessionData);
21089 +               return EINVAL;
21090 +       }
21091 +
21092 +       if (cri->cri_next) {
21093 +               if (cri->cri_next->cri_next != NULL) {
21094 +                       DPRINTK("%s():only two chained algorithms supported\n",
21095 +                               __FUNCTION__);
21096 +                       icp_ocfDrvFreeOCFSession(sessionData);
21097 +                       return EPERM;
21098 +               }
21099 +
21100 +               if (ICP_OCF_DRV_STATUS_SUCCESS !=
21101 +                   icp_ocfDrvAlgorithmSetup(cri->cri_next,
21102 +                                            &(sessionData->lacSessCtx))) {
21103 +                       DPRINTK("%s():second algorithm not supported\n",
21104 +                               __FUNCTION__);
21105 +                       icp_ocfDrvFreeOCFSession(sessionData);
21106 +                       return EINVAL;
21107 +               }
21108 +
21109 +               sessionData->lacSessCtx.symOperation =
21110 +                   CPA_CY_SYM_OP_ALGORITHM_CHAINING;
21111 +       }
21112 +
21113 +       *sid = (uint32_t) sessionData;
21114 +
21115 +       return ICP_OCF_DRV_STATUS_SUCCESS;
21116 +}
21117 +
21118 +/* Name        : icp_ocfDrvAlgorithmSetup
21119 + *
21120 + * Description : This function builds the session context data from the
21121 + * information supplied through OCF. Algorithm chain order and whether the
21122 + * session is Encrypt/Decrypt can only be found out at perform time however, so
21123 + * the session is registered with LAC at that time.
21124 + */
21125 +static int
21126 +icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
21127 +                        CpaCySymSessionSetupData * lacSessCtx)
21128 +{
21129 +
21130 +       lacSessCtx->sessionPriority = CPA_CY_PRIORITY_NORMAL;
21131 +
21132 +       switch (cri->cri_alg) {
21133 +
21134 +       case CRYPTO_NULL_CBC:
21135 +               DPRINTK("%s(): NULL CBC\n", __FUNCTION__);
21136 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21137 +               lacSessCtx->cipherSetupData.cipherAlgorithm =
21138 +                   CPA_CY_SYM_CIPHER_NULL;
21139 +               lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21140 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21141 +               lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21142 +               break;
21143 +
21144 +       case CRYPTO_DES_CBC:
21145 +               DPRINTK("%s(): DES CBC\n", __FUNCTION__);
21146 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21147 +               lacSessCtx->cipherSetupData.cipherAlgorithm =
21148 +                   CPA_CY_SYM_CIPHER_DES_CBC;
21149 +               lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21150 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21151 +               lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21152 +               break;
21153 +
21154 +       case CRYPTO_3DES_CBC:
21155 +               DPRINTK("%s(): 3DES CBC\n", __FUNCTION__);
21156 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21157 +               lacSessCtx->cipherSetupData.cipherAlgorithm =
21158 +                   CPA_CY_SYM_CIPHER_3DES_CBC;
21159 +               lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21160 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21161 +               lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21162 +               break;
21163 +
21164 +       case CRYPTO_AES_CBC:
21165 +               DPRINTK("%s(): AES CBC\n", __FUNCTION__);
21166 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21167 +               lacSessCtx->cipherSetupData.cipherAlgorithm =
21168 +                   CPA_CY_SYM_CIPHER_AES_CBC;
21169 +               lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21170 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21171 +               lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21172 +               break;
21173 +
21174 +       case CRYPTO_ARC4:
21175 +               DPRINTK("%s(): ARC4\n", __FUNCTION__);
21176 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
21177 +               lacSessCtx->cipherSetupData.cipherAlgorithm =
21178 +                   CPA_CY_SYM_CIPHER_ARC4;
21179 +               lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
21180 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21181 +               lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
21182 +               break;
21183 +
21184 +       case CRYPTO_SHA1:
21185 +               DPRINTK("%s(): SHA1\n", __FUNCTION__);
21186 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21187 +               lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
21188 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21189 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21190 +                   (cri->cri_mlen ?
21191 +                    cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
21192 +
21193 +               break;
21194 +
21195 +       case CRYPTO_SHA1_HMAC:
21196 +               DPRINTK("%s(): SHA1_HMAC\n", __FUNCTION__);
21197 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21198 +               lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
21199 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21200 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21201 +                   (cri->cri_mlen ?
21202 +                    cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
21203 +               lacSessCtx->hashSetupData.authModeSetupData.authKey =
21204 +                   cri->cri_key;
21205 +               lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21206 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21207 +               lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21208 +
21209 +               break;
21210 +
21211 +       case CRYPTO_SHA2_256:
21212 +               DPRINTK("%s(): SHA256\n", __FUNCTION__);
21213 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21214 +               lacSessCtx->hashSetupData.hashAlgorithm =
21215 +                   CPA_CY_SYM_HASH_SHA256;
21216 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21217 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21218 +                   (cri->cri_mlen ?
21219 +                    cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
21220 +
21221 +               break;
21222 +
21223 +       case CRYPTO_SHA2_256_HMAC:
21224 +               DPRINTK("%s(): SHA256_HMAC\n", __FUNCTION__);
21225 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21226 +               lacSessCtx->hashSetupData.hashAlgorithm =
21227 +                   CPA_CY_SYM_HASH_SHA256;
21228 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21229 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21230 +                   (cri->cri_mlen ?
21231 +                    cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
21232 +               lacSessCtx->hashSetupData.authModeSetupData.authKey =
21233 +                   cri->cri_key;
21234 +               lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21235 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21236 +               lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21237 +
21238 +               break;
21239 +
21240 +       case CRYPTO_SHA2_384:
21241 +               DPRINTK("%s(): SHA384\n", __FUNCTION__);
21242 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21243 +               lacSessCtx->hashSetupData.hashAlgorithm =
21244 +                   CPA_CY_SYM_HASH_SHA384;
21245 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21246 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21247 +                   (cri->cri_mlen ?
21248 +                    cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
21249 +
21250 +               break;
21251 +
21252 +       case CRYPTO_SHA2_384_HMAC:
21253 +               DPRINTK("%s(): SHA384_HMAC\n", __FUNCTION__);
21254 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21255 +               lacSessCtx->hashSetupData.hashAlgorithm =
21256 +                   CPA_CY_SYM_HASH_SHA384;
21257 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21258 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21259 +                   (cri->cri_mlen ?
21260 +                    cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
21261 +               lacSessCtx->hashSetupData.authModeSetupData.authKey =
21262 +                   cri->cri_key;
21263 +               lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21264 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21265 +               lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21266 +
21267 +               break;
21268 +
21269 +       case CRYPTO_SHA2_512:
21270 +               DPRINTK("%s(): SHA512\n", __FUNCTION__);
21271 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21272 +               lacSessCtx->hashSetupData.hashAlgorithm =
21273 +                   CPA_CY_SYM_HASH_SHA512;
21274 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21275 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21276 +                   (cri->cri_mlen ?
21277 +                    cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
21278 +
21279 +               break;
21280 +
21281 +       case CRYPTO_SHA2_512_HMAC:
21282 +               DPRINTK("%s(): SHA512_HMAC\n", __FUNCTION__);
21283 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21284 +               lacSessCtx->hashSetupData.hashAlgorithm =
21285 +                   CPA_CY_SYM_HASH_SHA512;
21286 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21287 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21288 +                   (cri->cri_mlen ?
21289 +                    cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
21290 +               lacSessCtx->hashSetupData.authModeSetupData.authKey =
21291 +                   cri->cri_key;
21292 +               lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21293 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21294 +               lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21295 +
21296 +               break;
21297 +
21298 +       case CRYPTO_MD5:
21299 +               DPRINTK("%s(): MD5\n", __FUNCTION__);
21300 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21301 +               lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
21302 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
21303 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21304 +                   (cri->cri_mlen ?
21305 +                    cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
21306 +
21307 +               break;
21308 +
21309 +       case CRYPTO_MD5_HMAC:
21310 +               DPRINTK("%s(): MD5_HMAC\n", __FUNCTION__);
21311 +               lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
21312 +               lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
21313 +               lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
21314 +               lacSessCtx->hashSetupData.digestResultLenInBytes =
21315 +                   (cri->cri_mlen ?
21316 +                    cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
21317 +               lacSessCtx->hashSetupData.authModeSetupData.authKey =
21318 +                   cri->cri_key;
21319 +               lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
21320 +                   cri->cri_klen / NUM_BITS_IN_BYTE;
21321 +               lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
21322 +
21323 +               break;
21324 +
21325 +       default:
21326 +               DPRINTK("%s(): ALG Setup FAIL\n", __FUNCTION__);
21327 +               return ICP_OCF_DRV_STATUS_FAIL;
21328 +       }
21329 +
21330 +       return ICP_OCF_DRV_STATUS_SUCCESS;
21331 +}
21332 +
21333 +/* Name        : icp_ocfDrvFreeOCFSession
21334 + *
21335 + * Description : This function deletes all existing Session data representing
21336 + * the Cryptographic session established between OCF and this driver. This
21337 + * also includes freeing the memory allocated for the session context. The
21338 + * session object is also removed from the session linked list.
21339 + */
21340 +static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData)
21341 +{
21342 +
21343 +       sessionData->inUse = ICP_SESSION_DEREGISTERED;
21344 +
21345 +       /*ENTER CRITICAL SECTION */
21346 +       spin_lock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21347 +
21348 +       if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
21349 +               /*If the Driver is exiting, allow that process to
21350 +                  handle any deletions */
21351 +               /*EXIT CRITICAL SECTION */
21352 +               spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21353 +               return;
21354 +       }
21355 +
21356 +       atomic_dec(&num_ocf_to_drv_registered_sessions);
21357 +
21358 +       list_del(&(sessionData->listNode));
21359 +
21360 +       /*EXIT CRITICAL SECTION */
21361 +       spin_unlock_bh(&icp_ocfDrvSymSessInfoListSpinlock);
21362 +
21363 +       if (NULL != sessionData->sessHandle) {
21364 +               kfree(sessionData->sessHandle);
21365 +       }
21366 +       kmem_cache_free(drvSessionData_zone, sessionData);
21367 +}
21368 +
21369 +/* Name        : icp_ocfDrvFreeLACSession
21370 + *
21371 + * Description : This attempts to deregister a LAC session. If it fails, the
21372 + * deregistation retry function is called.
21373 + */
21374 +int icp_ocfDrvFreeLACSession(device_t dev, uint64_t sid)
21375 +{
21376 +       CpaCySymSessionCtx sessionToDeregister = NULL;
21377 +       struct icp_drvSessionData *sessionData = NULL;
21378 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
21379 +       int retval = 0;
21380 +
21381 +       sessionData = (struct icp_drvSessionData *)CRYPTO_SESID2LID(sid);
21382 +       if (NULL == sessionData) {
21383 +               EPRINTK("%s(): OCF Free session called with Null Session ID.\n",
21384 +                       __FUNCTION__);
21385 +               return EINVAL;
21386 +       }
21387 +
21388 +       sessionToDeregister = sessionData->sessHandle;
21389 +
21390 +       if (ICP_SESSION_INITIALISED == sessionData->inUse) {
21391 +               DPRINTK("%s() Session not registered with LAC\n", __FUNCTION__);
21392 +       } else if (NULL == sessionData->sessHandle) {
21393 +               EPRINTK
21394 +                   ("%s(): OCF Free session called with Null Session Handle.\n",
21395 +                    __FUNCTION__);
21396 +               return EINVAL;
21397 +       } else {
21398 +               lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
21399 +                                                 sessionToDeregister);
21400 +               if (CPA_STATUS_RETRY == lacStatus) {
21401 +                       if (ICP_OCF_DRV_STATUS_SUCCESS !=
21402 +                           icp_ocfDrvDeregRetry(&sessionToDeregister)) {
21403 +                               /* the retry function increments the 
21404 +                                  dereg failed count */
21405 +                               DPRINTK("%s(): LAC failed to deregister the "
21406 +                                       "session. (localSessionId= %p)\n",
21407 +                                       __FUNCTION__, sessionToDeregister);
21408 +                               retval = EPERM;
21409 +                       }
21410 +
21411 +               } else if (CPA_STATUS_SUCCESS != lacStatus) {
21412 +                       DPRINTK("%s(): LAC failed to deregister the session. "
21413 +                               "localSessionId= %p, lacStatus = %d\n",
21414 +                               __FUNCTION__, sessionToDeregister, lacStatus);
21415 +                       atomic_inc(&lac_session_failed_dereg_count);
21416 +                       retval = EPERM;
21417 +               }
21418 +       }
21419 +
21420 +       icp_ocfDrvFreeOCFSession(sessionData);
21421 +       return retval;
21422 +
21423 +}
21424 +
21425 +/* Name        : icp_ocfDrvAlgCheck 
21426 + *
21427 + * Description : This function checks whether the cryptodesc argument pertains
21428 + * to a sym or hash function
21429 + */
21430 +static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc)
21431 +{
21432 +
21433 +       if (crp_desc->crd_alg == CRYPTO_3DES_CBC ||
21434 +           crp_desc->crd_alg == CRYPTO_AES_CBC ||
21435 +           crp_desc->crd_alg == CRYPTO_DES_CBC ||
21436 +           crp_desc->crd_alg == CRYPTO_NULL_CBC ||
21437 +           crp_desc->crd_alg == CRYPTO_ARC4) {
21438 +               return ICP_OCF_DRV_ALG_CIPHER;
21439 +       }
21440 +
21441 +       return ICP_OCF_DRV_ALG_HASH;
21442 +}
21443 +
21444 +/* Name        : icp_ocfDrvSymProcess 
21445 + *
21446 + * Description : This function will map symmetric functionality calls from OCF
21447 + * to the LAC API. It will also allocate memory to store the session context.
21448 + * 
21449 + * Notes: If it is the first perform call for a given session, then a LAC
21450 + * session is registered. After the session is registered, no checks as
21451 + * to whether session paramaters have changed (e.g. alg chain order) are
21452 + * done.
21453 + */
21454 +int icp_ocfDrvSymProcess(device_t dev, struct cryptop *crp, int hint)
21455 +{
21456 +       struct icp_drvSessionData *sessionData = NULL;
21457 +       struct icp_drvOpData *drvOpData = NULL;
21458 +       CpaStatus lacStatus = CPA_STATUS_SUCCESS;
21459 +       Cpa32U sessionCtxSizeInBytes = 0;
21460 +       uint16_t numBufferListArray = 0;
21461 +
21462 +       if (NULL == crp) {
21463 +               DPRINTK("%s(): Invalid input parameters, cryptop is NULL\n",
21464 +                       __FUNCTION__);
21465 +               return EINVAL;
21466 +       }
21467 +
21468 +       if (NULL == crp->crp_desc) {
21469 +               DPRINTK("%s(): Invalid input parameters, no crp_desc attached "
21470 +                       "to crp\n", __FUNCTION__);
21471 +               crp->crp_etype = EINVAL;
21472 +               return EINVAL;
21473 +       }
21474 +
21475 +       if (NULL == crp->crp_buf) {
21476 +               DPRINTK("%s(): Invalid input parameters, no buffer attached "
21477 +                       "to crp\n", __FUNCTION__);
21478 +               crp->crp_etype = EINVAL;
21479 +               return EINVAL;
21480 +       }
21481 +
21482 +       if (CPA_TRUE == atomic_read(&icp_ocfDrvIsExiting)) {
21483 +               crp->crp_etype = EFAULT;
21484 +               return EFAULT;
21485 +       }
21486 +
21487 +       sessionData = (struct icp_drvSessionData *)
21488 +           (CRYPTO_SESID2LID(crp->crp_sid));
21489 +       if (NULL == sessionData) {
21490 +               DPRINTK("%s(): Invalid input parameters, Null Session ID \n",
21491 +                       __FUNCTION__);
21492 +               crp->crp_etype = EINVAL;
21493 +               return EINVAL;
21494 +       }
21495 +
21496 +/*If we get a request against a deregisted session, cancel operation*/
21497 +       if (ICP_SESSION_DEREGISTERED == sessionData->inUse) {
21498 +               DPRINTK("%s(): Session ID %d was deregistered \n",
21499 +                       __FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
21500 +               crp->crp_etype = EFAULT;
21501 +               return EFAULT;
21502 +       }
21503 +
21504 +/*If none of the session states are set, then the session structure was either
21505 +  not initialised properly or we are reading from a freed memory area (possible
21506 +  due to OCF batch mode not removing queued requests against deregistered 
21507 +  sessions*/
21508 +       if (ICP_SESSION_INITIALISED != sessionData->inUse &&
21509 +           ICP_SESSION_RUNNING != sessionData->inUse) {
21510 +               DPRINTK("%s(): Session - ID %d - not properly initialised or "
21511 +                       "memory freed back to the kernel \n",
21512 +                       __FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
21513 +               crp->crp_etype = EINVAL;
21514 +               return EINVAL;
21515 +       }
21516 +
21517 +       /*For the below checks, remember error checking is already done in LAC.
21518 +          We're not validating inputs subsequent to registration */
21519 +       if (sessionData->inUse == ICP_SESSION_INITIALISED) {
21520 +               DPRINTK("%s(): Initialising session\n", __FUNCTION__);
21521 +
21522 +               if (NULL != crp->crp_desc->crd_next) {
21523 +                       if (ICP_OCF_DRV_ALG_CIPHER ==
21524 +                           icp_ocfDrvAlgCheck(crp->crp_desc)) {
21525 +
21526 +                               sessionData->lacSessCtx.algChainOrder =
21527 +                                   CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
21528 +
21529 +                               if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
21530 +                                       sessionData->lacSessCtx.cipherSetupData.
21531 +                                           cipherDirection =
21532 +                                           CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
21533 +                               } else {
21534 +                                       sessionData->lacSessCtx.cipherSetupData.
21535 +                                           cipherDirection =
21536 +                                           CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
21537 +                               }
21538 +                       } else {
21539 +                               sessionData->lacSessCtx.algChainOrder =
21540 +                                   CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
21541 +
21542 +                               if (crp->crp_desc->crd_next->crd_flags &
21543 +                                   CRD_F_ENCRYPT) {
21544 +                                       sessionData->lacSessCtx.cipherSetupData.
21545 +                                           cipherDirection =
21546 +                                           CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
21547 +                               } else {
21548 +                                       sessionData->lacSessCtx.cipherSetupData.
21549 +                                           cipherDirection =
21550 +                                           CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
21551 +                               }
21552 +
21553 +                       }
21554 +
21555 +               } else if (ICP_OCF_DRV_ALG_CIPHER ==
21556 +                          icp_ocfDrvAlgCheck(crp->crp_desc)) {
21557 +                       if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
21558 +                               sessionData->lacSessCtx.cipherSetupData.
21559 +                                   cipherDirection =
21560 +                                   CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
21561 +                       } else {
21562 +                               sessionData->lacSessCtx.cipherSetupData.
21563 +                                   cipherDirection =
21564 +                                   CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
21565 +                       }
21566 +
21567 +               }
21568 +
21569 +               /*No action required for standalone Auth here */
21570 +
21571 +               /* Allocate memory for SymSessionCtx before the Session Registration */
21572 +               lacStatus =
21573 +                   cpaCySymSessionCtxGetSize(CPA_INSTANCE_HANDLE_SINGLE,
21574 +                                             &(sessionData->lacSessCtx),
21575 +                                             &sessionCtxSizeInBytes);
21576 +               if (CPA_STATUS_SUCCESS != lacStatus) {
21577 +                       EPRINTK("%s(): cpaCySymSessionCtxGetSize failed - %d\n",
21578 +                               __FUNCTION__, lacStatus);
21579 +                       return EINVAL;
21580 +               }
21581 +               sessionData->sessHandle =
21582 +                   kmalloc(sessionCtxSizeInBytes, GFP_ATOMIC);
21583 +               if (NULL == sessionData->sessHandle) {
21584 +                       EPRINTK
21585 +                           ("%s(): Failed to get memory for SymSessionCtx\n",
21586 +                            __FUNCTION__);
21587 +                       return ENOMEM;
21588 +               }
21589 +
21590 +               lacStatus = cpaCySymInitSession(CPA_INSTANCE_HANDLE_SINGLE,
21591 +                                               icp_ocfDrvSymCallBack,
21592 +                                               &(sessionData->lacSessCtx),
21593 +                                               sessionData->sessHandle);
21594 +
21595 +               if (CPA_STATUS_SUCCESS != lacStatus) {
21596 +                       EPRINTK("%s(): cpaCySymInitSession failed -%d \n",
21597 +                               __FUNCTION__, lacStatus);
21598 +                       return EFAULT;
21599 +               }
21600 +
21601 +               sessionData->inUse = ICP_SESSION_RUNNING;
21602 +       }
21603 +
21604 +       drvOpData = kmem_cache_zalloc(drvOpData_zone, GFP_ATOMIC);
21605 +       if (NULL == drvOpData) {
21606 +               EPRINTK("%s():Failed to get memory for drvOpData\n",
21607 +                       __FUNCTION__);
21608 +               crp->crp_etype = ENOMEM;
21609 +               return ENOMEM;
21610 +       }
21611 +
21612 +       drvOpData->lacOpData.pSessionCtx = sessionData->sessHandle;
21613 +       drvOpData->digestSizeInBytes = sessionData->lacSessCtx.hashSetupData.
21614 +           digestResultLenInBytes;
21615 +       drvOpData->crp = crp;
21616 +
21617 +       /* Set the default buffer list array memory allocation */
21618 +       drvOpData->srcBuffer.pBuffers = drvOpData->bufferListArray;
21619 +       drvOpData->numBufferListArray = ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS;
21620 +
21621 +       /* 
21622 +        * Allocate buffer list array memory allocation if the
21623 +        * data fragment is more than the default allocation
21624 +        */
21625 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
21626 +               numBufferListArray = icp_ocfDrvGetSkBuffFrags((struct sk_buff *)
21627 +                                                             crp->crp_buf);
21628 +               if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < numBufferListArray) {
21629 +                       DPRINTK("%s() numBufferListArray more than default\n",
21630 +                               __FUNCTION__);
21631 +                       drvOpData->srcBuffer.pBuffers = NULL;
21632 +                       drvOpData->srcBuffer.pBuffers =
21633 +                           kmalloc(numBufferListArray *
21634 +                                   sizeof(CpaFlatBuffer), GFP_ATOMIC);
21635 +                       if (NULL == drvOpData->srcBuffer.pBuffers) {
21636 +                               EPRINTK("%s() Failed to get memory for "
21637 +                                       "pBuffers\n", __FUNCTION__);
21638 +                               kmem_cache_free(drvOpData_zone, drvOpData);
21639 +                               crp->crp_etype = ENOMEM;
21640 +                               return ENOMEM;
21641 +                       }
21642 +                       drvOpData->numBufferListArray = numBufferListArray;
21643 +               }
21644 +       }
21645 +
21646 +       /*
21647 +        * Check the type of buffer structure we got and convert it into
21648 +        * CpaBufferList format.
21649 +        */
21650 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
21651 +               if (ICP_OCF_DRV_STATUS_SUCCESS !=
21652 +                   icp_ocfDrvSkBuffToBufferList((struct sk_buff *)crp->crp_buf,
21653 +                                                &(drvOpData->srcBuffer))) {
21654 +                       EPRINTK("%s():Failed to translate from SK_BUF "
21655 +                               "to bufferlist\n", __FUNCTION__);
21656 +                       crp->crp_etype = EINVAL;
21657 +                       goto err;
21658 +               }
21659 +
21660 +               drvOpData->bufferType = CRYPTO_F_SKBUF;
21661 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
21662 +               /* OCF only supports IOV of one entry. */
21663 +               if (NUM_IOV_SUPPORTED ==
21664 +                   ((struct uio *)(crp->crp_buf))->uio_iovcnt) {
21665 +
21666 +                       icp_ocfDrvPtrAndLenToBufferList(((struct uio *)(crp->
21667 +                                                                       crp_buf))->
21668 +                                                       uio_iov[0].iov_base,
21669 +                                                       ((struct uio *)(crp->
21670 +                                                                       crp_buf))->
21671 +                                                       uio_iov[0].iov_len,
21672 +                                                       &(drvOpData->
21673 +                                                         srcBuffer));
21674 +
21675 +                       drvOpData->bufferType = CRYPTO_F_IOV;
21676 +
21677 +               } else {
21678 +                       DPRINTK("%s():Unable to handle IOVs with lengths of "
21679 +                               "greater than one!\n", __FUNCTION__);
21680 +                       crp->crp_etype = EINVAL;
21681 +                       goto err;
21682 +               }
21683 +
21684 +       } else {
21685 +               icp_ocfDrvPtrAndLenToBufferList(crp->crp_buf,
21686 +                                               crp->crp_ilen,
21687 +                                               &(drvOpData->srcBuffer));
21688 +
21689 +               drvOpData->bufferType = CRYPTO_BUF_CONTIG;
21690 +       }
21691 +
21692 +       if (ICP_OCF_DRV_STATUS_SUCCESS !=
21693 +           icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->crp_desc)) {
21694 +               crp->crp_etype = EINVAL;
21695 +               goto err;
21696 +       }
21697 +
21698 +       if (drvOpData->crp->crp_desc->crd_next != NULL) {
21699 +               if (icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->
21700 +                                              crp_desc->crd_next)) {
21701 +                       crp->crp_etype = EINVAL;
21702 +                       goto err;
21703 +               }
21704 +
21705 +       }
21706 +
21707 +       /* Allocate srcBuffer's private meta data */
21708 +       if (ICP_OCF_DRV_STATUS_SUCCESS !=
21709 +           icp_ocfDrvAllocMetaData(&(drvOpData->srcBuffer), drvOpData)) {
21710 +               EPRINTK("%s() icp_ocfDrvAllocMetaData failed\n", __FUNCTION__);
21711 +               memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
21712 +               crp->crp_etype = EINVAL;
21713 +               goto err;
21714 +       }
21715 +
21716 +       /* Perform "in-place" crypto operation */
21717 +       lacStatus = cpaCySymPerformOp(CPA_INSTANCE_HANDLE_SINGLE,
21718 +                                     (void *)drvOpData,
21719 +                                     &(drvOpData->lacOpData),
21720 +                                     &(drvOpData->srcBuffer),
21721 +                                     &(drvOpData->srcBuffer),
21722 +                                     &(drvOpData->verifyResult));
21723 +       if (CPA_STATUS_RETRY == lacStatus) {
21724 +               DPRINTK("%s(): cpaCySymPerformOp retry, lacStatus = %d\n",
21725 +                       __FUNCTION__, lacStatus);
21726 +               memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
21727 +               crp->crp_etype = EINVAL;
21728 +               goto err;
21729 +       }
21730 +       if (CPA_STATUS_SUCCESS != lacStatus) {
21731 +               EPRINTK("%s(): cpaCySymPerformOp failed, lacStatus = %d\n",
21732 +                       __FUNCTION__, lacStatus);
21733 +               memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
21734 +               crp->crp_etype = EINVAL;
21735 +               goto err;
21736 +       }
21737 +
21738 +       return 0;               //OCF success status value
21739 +
21740 +      err:
21741 +       if (drvOpData->numBufferListArray > ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
21742 +               kfree(drvOpData->srcBuffer.pBuffers);
21743 +       }
21744 +       icp_ocfDrvFreeMetaData(&(drvOpData->srcBuffer));
21745 +       kmem_cache_free(drvOpData_zone, drvOpData);
21746 +
21747 +       return crp->crp_etype;
21748 +}
21749 +
21750 +/* Name        : icp_ocfDrvProcessDataSetup
21751 + *
21752 + * Description : This function will setup all the cryptographic operation data
21753 + *               that is required by LAC to execute the operation.
21754 + */
21755 +static int icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
21756 +                                     struct cryptodesc *crp_desc)
21757 +{
21758 +       CpaCyRandGenOpData randGenOpData;
21759 +       CpaFlatBuffer randData;
21760 +
21761 +       drvOpData->lacOpData.packetType = CPA_CY_SYM_PACKET_TYPE_FULL;
21762 +
21763 +       /* Convert from the cryptop to the ICP LAC crypto parameters */
21764 +       switch (crp_desc->crd_alg) {
21765 +       case CRYPTO_NULL_CBC:
21766 +               drvOpData->lacOpData.
21767 +                   cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21768 +               drvOpData->lacOpData.
21769 +                   messageLenToCipherInBytes = crp_desc->crd_len;
21770 +               drvOpData->verifyResult = CPA_FALSE;
21771 +               drvOpData->lacOpData.ivLenInBytes = NULL_BLOCK_LEN;
21772 +               break;
21773 +       case CRYPTO_DES_CBC:
21774 +               drvOpData->lacOpData.
21775 +                   cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21776 +               drvOpData->lacOpData.
21777 +                   messageLenToCipherInBytes = crp_desc->crd_len;
21778 +               drvOpData->verifyResult = CPA_FALSE;
21779 +               drvOpData->lacOpData.ivLenInBytes = DES_BLOCK_LEN;
21780 +               break;
21781 +       case CRYPTO_3DES_CBC:
21782 +               drvOpData->lacOpData.
21783 +                   cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21784 +               drvOpData->lacOpData.
21785 +                   messageLenToCipherInBytes = crp_desc->crd_len;
21786 +               drvOpData->verifyResult = CPA_FALSE;
21787 +               drvOpData->lacOpData.ivLenInBytes = DES3_BLOCK_LEN;
21788 +               break;
21789 +       case CRYPTO_ARC4:
21790 +               drvOpData->lacOpData.
21791 +                   cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21792 +               drvOpData->lacOpData.
21793 +                   messageLenToCipherInBytes = crp_desc->crd_len;
21794 +               drvOpData->verifyResult = CPA_FALSE;
21795 +               drvOpData->lacOpData.ivLenInBytes = ARC4_COUNTER_LEN;
21796 +               break;
21797 +       case CRYPTO_AES_CBC:
21798 +               drvOpData->lacOpData.
21799 +                   cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
21800 +               drvOpData->lacOpData.
21801 +                   messageLenToCipherInBytes = crp_desc->crd_len;
21802 +               drvOpData->verifyResult = CPA_FALSE;
21803 +               drvOpData->lacOpData.ivLenInBytes = RIJNDAEL128_BLOCK_LEN;
21804 +               break;
21805 +       case CRYPTO_SHA1:
21806 +       case CRYPTO_SHA1_HMAC:
21807 +       case CRYPTO_SHA2_256:
21808 +       case CRYPTO_SHA2_256_HMAC:
21809 +       case CRYPTO_SHA2_384:
21810 +       case CRYPTO_SHA2_384_HMAC:
21811 +       case CRYPTO_SHA2_512:
21812 +       case CRYPTO_SHA2_512_HMAC:
21813 +       case CRYPTO_MD5:
21814 +       case CRYPTO_MD5_HMAC:
21815 +               drvOpData->lacOpData.
21816 +                   hashStartSrcOffsetInBytes = crp_desc->crd_skip;
21817 +               drvOpData->lacOpData.
21818 +                   messageLenToHashInBytes = crp_desc->crd_len;
21819 +               drvOpData->lacOpData.
21820 +                   pDigestResult =
21821 +                   icp_ocfDrvDigestPointerFind(drvOpData, crp_desc);
21822 +
21823 +               if (NULL == drvOpData->lacOpData.pDigestResult) {
21824 +                       DPRINTK("%s(): ERROR - could not calculate "
21825 +                               "Digest Result memory address\n", __FUNCTION__);
21826 +                       return ICP_OCF_DRV_STATUS_FAIL;
21827 +               }
21828 +
21829 +               drvOpData->lacOpData.digestVerify = CPA_FALSE;
21830 +               break;
21831 +       default:
21832 +               DPRINTK("%s(): Crypto process error - algorithm not "
21833 +                       "found \n", __FUNCTION__);
21834 +               return ICP_OCF_DRV_STATUS_FAIL;
21835 +       }
21836 +
21837 +       /* Figure out what the IV is supposed to be */
21838 +       if ((crp_desc->crd_alg == CRYPTO_DES_CBC) ||
21839 +           (crp_desc->crd_alg == CRYPTO_3DES_CBC) ||
21840 +           (crp_desc->crd_alg == CRYPTO_AES_CBC)) {
21841 +               /*ARC4 doesn't use an IV */
21842 +               if (crp_desc->crd_flags & CRD_F_IV_EXPLICIT) {
21843 +                       /* Explicit IV provided to OCF */
21844 +                       drvOpData->lacOpData.pIv = crp_desc->crd_iv;
21845 +               } else {
21846 +                       /* IV is not explicitly provided to OCF */
21847 +
21848 +                       /* Point the LAC OP Data IV pointer to our allocated
21849 +                          storage location for this session. */
21850 +                       drvOpData->lacOpData.pIv = drvOpData->ivData;
21851 +
21852 +                       if ((crp_desc->crd_flags & CRD_F_ENCRYPT) &&
21853 +                           ((crp_desc->crd_flags & CRD_F_IV_PRESENT) == 0)) {
21854 +
21855 +                               /* Encrypting - need to create IV */
21856 +                               randGenOpData.generateBits = CPA_TRUE;
21857 +                               randGenOpData.lenInBytes = MAX_IV_LEN_IN_BYTES;
21858 +
21859 +                               icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *)
21860 +                                                               drvOpData->
21861 +                                                               ivData,
21862 +                                                               MAX_IV_LEN_IN_BYTES,
21863 +                                                               &randData);
21864 +
21865 +                               if (CPA_STATUS_SUCCESS !=
21866 +                                   cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
21867 +                                                NULL, NULL,
21868 +                                                &randGenOpData, &randData)) {
21869 +                                       DPRINTK("%s(): ERROR - Failed to"
21870 +                                               " generate"
21871 +                                               " Initialisation Vector\n",
21872 +                                               __FUNCTION__);
21873 +                                       return ICP_OCF_DRV_STATUS_FAIL;
21874 +                               }
21875 +
21876 +                               crypto_copyback(drvOpData->crp->
21877 +                                               crp_flags,
21878 +                                               drvOpData->crp->crp_buf,
21879 +                                               crp_desc->crd_inject,
21880 +                                               drvOpData->lacOpData.
21881 +                                               ivLenInBytes,
21882 +                                               (caddr_t) (drvOpData->lacOpData.
21883 +                                                          pIv));
21884 +                       } else {
21885 +                               /* Reading IV from buffer */
21886 +                               crypto_copydata(drvOpData->crp->
21887 +                                               crp_flags,
21888 +                                               drvOpData->crp->crp_buf,
21889 +                                               crp_desc->crd_inject,
21890 +                                               drvOpData->lacOpData.
21891 +                                               ivLenInBytes,
21892 +                                               (caddr_t) (drvOpData->lacOpData.
21893 +                                                          pIv));
21894 +                       }
21895 +
21896 +               }
21897 +
21898 +       }
21899 +
21900 +       return ICP_OCF_DRV_STATUS_SUCCESS;
21901 +}
21902 +
21903 +/* Name        : icp_ocfDrvDigestPointerFind
21904 + *
21905 + * Description : This function is used to find the memory address of where the
21906 + * digest information shall be stored in. Input buffer types are an skbuff, iov
21907 + * or flat buffer. The address is found using the buffer data start address and
21908 + * an offset.
21909 + *
21910 + * Note: In the case of a linux skbuff, the digest address may exist within
21911 + * a memory space linked to from the start buffer. These linked memory spaces
21912 + * must be traversed by the data length offset in order to find the digest start
21913 + * address. Whether there is enough space for the digest must also be checked.
21914 + */
21915 +
21916 +static uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
21917 +                                           struct cryptodesc *crp_desc)
21918 +{
21919 +
21920 +       int offsetInBytes = crp_desc->crd_inject;
21921 +       uint32_t digestSizeInBytes = drvOpData->digestSizeInBytes;
21922 +       uint8_t *flat_buffer_base = NULL;
21923 +       int flat_buffer_length = 0;
21924 +       struct sk_buff *skb;
21925 +
21926 +       if (drvOpData->crp->crp_flags & CRYPTO_F_SKBUF) {
21927 +               /*check if enough overall space to store hash */
21928 +               skb = (struct sk_buff *)(drvOpData->crp->crp_buf);
21929 +
21930 +               if (skb->len < (offsetInBytes + digestSizeInBytes)) {
21931 +                       DPRINTK("%s() Not enough space for Digest"
21932 +                               " payload after the offset (%d), "
21933 +                               "digest size (%d) \n", __FUNCTION__,
21934 +                               offsetInBytes, digestSizeInBytes);
21935 +                       return NULL;
21936 +               }
21937 +
21938 +               return icp_ocfDrvSkbuffDigestPointerFind(drvOpData,
21939 +                                                        offsetInBytes,
21940 +                                                        digestSizeInBytes);
21941 +
21942 +       } else {
21943 +               /* IOV or flat buffer */
21944 +               if (drvOpData->crp->crp_flags & CRYPTO_F_IOV) {
21945 +                       /*single IOV check has already been done */
21946 +                       flat_buffer_base = ((struct uio *)
21947 +                                           (drvOpData->crp->crp_buf))->
21948 +                           uio_iov[0].iov_base;
21949 +                       flat_buffer_length = ((struct uio *)
21950 +                                             (drvOpData->crp->crp_buf))->
21951 +                           uio_iov[0].iov_len;
21952 +               } else {
21953 +                       flat_buffer_base = (uint8_t *) drvOpData->crp->crp_buf;
21954 +                       flat_buffer_length = drvOpData->crp->crp_ilen;
21955 +               }
21956 +
21957 +               if (flat_buffer_length < (offsetInBytes + digestSizeInBytes)) {
21958 +                       DPRINTK("%s() Not enough space for Digest "
21959 +                               "(IOV/Flat Buffer) \n", __FUNCTION__);
21960 +                       return NULL;
21961 +               } else {
21962 +                       return (uint8_t *) (flat_buffer_base + offsetInBytes);
21963 +               }
21964 +       }
21965 +       DPRINTK("%s() Should not reach this point\n", __FUNCTION__);
21966 +       return NULL;
21967 +}
21968 +
21969 +/* Name        : icp_ocfDrvSkbuffDigestPointerFind
21970 + *
21971 + * Description : This function is used by icp_ocfDrvDigestPointerFind to process
21972 + * the non-linear portion of the skbuff if the fragmentation type is a linked
21973 + * list (frag_list is not NULL in the skb_shared_info structure)
21974 + */
21975 +static inline uint8_t *icp_ocfDrvSkbuffDigestPointerFind(struct icp_drvOpData
21976 +                                                        *drvOpData,
21977 +                                                        int offsetInBytes,
21978 +                                                        uint32_t
21979 +                                                        digestSizeInBytes)
21980 +{
21981 +
21982 +       struct sk_buff *skb = NULL;
21983 +       struct skb_shared_info *skb_shared = NULL;
21984 +
21985 +       uint32_t skbuffisnonlinear = 0;
21986 +
21987 +       uint32_t skbheadlen = 0;
21988 +
21989 +       skb = (struct sk_buff *)(drvOpData->crp->crp_buf);
21990 +       skbuffisnonlinear = skb_is_nonlinear(skb);
21991 +
21992 +       skbheadlen = skb_headlen(skb);
21993 +
21994 +       /*Linear skb checks */
21995 +       if (skbheadlen > offsetInBytes) {
21996 +
21997 +               if (skbheadlen >= (offsetInBytes + digestSizeInBytes)) {
21998 +                       return (uint8_t *) (skb->data + offsetInBytes);
21999 +               } else {
22000 +                       DPRINTK("%s() Auth payload stretches "
22001 +                               "accross contiguous memory\n", __FUNCTION__);
22002 +                       return NULL;
22003 +               }
22004 +       } else {
22005 +               if (skbuffisnonlinear) {
22006 +                       offsetInBytes -= skbheadlen;
22007 +               } else {
22008 +                       DPRINTK("%s() Offset outside of buffer boundaries\n",
22009 +                               __FUNCTION__);
22010 +                       return NULL;
22011 +               }
22012 +       }
22013 +
22014 +       /*Non Linear checks */
22015 +       skb_shared = (struct skb_shared_info *)(skb->end);
22016 +       if (unlikely(NULL == skb_shared)) {
22017 +               DPRINTK("%s() skbuff shared info stucture is NULL! \n",
22018 +                       __FUNCTION__);
22019 +               return NULL;
22020 +       } else if ((0 != skb_shared->nr_frags) &&
22021 +                  (skb_shared->frag_list != NULL)) {
22022 +               DPRINTK("%s() skbuff nr_frags AND "
22023 +                       "frag_list not supported \n", __FUNCTION__);
22024 +               return NULL;
22025 +       }
22026 +
22027 +       /*TCP segmentation more likely than IP fragmentation */
22028 +       if (likely(0 != skb_shared->nr_frags)) {
22029 +               return icp_ocfDrvDigestSkbNRFragsCheck(skb, skb_shared,
22030 +                                                      offsetInBytes,
22031 +                                                      digestSizeInBytes);
22032 +       } else if (skb_shared->frag_list != NULL) {
22033 +               return icp_ocfDrvDigestSkbFragListCheck(skb, skb_shared,
22034 +                                                       offsetInBytes,
22035 +                                                       digestSizeInBytes);
22036 +       } else {
22037 +               DPRINTK("%s() skbuff is non-linear but does not show any "
22038 +                       "linked data\n", __FUNCTION__);
22039 +               return NULL;
22040 +       }
22041 +
22042 +}
22043 +
22044 +/* Name        : icp_ocfDrvDigestSkbNRFragsCheck
22045 + *
22046 + * Description : This function is used by icp_ocfDrvSkbuffDigestPointerFind to
22047 + * process the non-linear portion of the skbuff, if the fragmentation type is
22048 + * page fragments
22049 + */
22050 +static inline uint8_t *icp_ocfDrvDigestSkbNRFragsCheck(struct sk_buff *skb,
22051 +                                                      struct skb_shared_info
22052 +                                                      *skb_shared,
22053 +                                                      int offsetInBytes,
22054 +                                                      uint32_t
22055 +                                                      digestSizeInBytes)
22056 +{
22057 +       int i = 0;
22058 +       /*nr_frags starts from 1 */
22059 +       if (MAX_SKB_FRAGS < skb_shared->nr_frags) {
22060 +               DPRINTK("%s error processing skbuff "
22061 +                       "page frame -- MAX FRAGS exceeded \n", __FUNCTION__);
22062 +               return NULL;
22063 +       }
22064 +
22065 +       for (i = 0; i < skb_shared->nr_frags; i++) {
22066 +
22067 +               if (offsetInBytes >= skb_shared->frags[i].size) {
22068 +                       /*offset still greater than data position */
22069 +                       offsetInBytes -= skb_shared->frags[i].size;
22070 +               } else {
22071 +                       /* found the page containing start of hash */
22072 +
22073 +                       if (NULL == skb_shared->frags[i].page) {
22074 +                               DPRINTK("%s() Linked page is NULL!\n",
22075 +                                       __FUNCTION__);
22076 +                               return NULL;
22077 +                       }
22078 +
22079 +                       if (offsetInBytes + digestSizeInBytes >
22080 +                           skb_shared->frags[i].size) {
22081 +                               DPRINTK("%s() Auth payload stretches accross "
22082 +                                       "contiguous memory\n", __FUNCTION__);
22083 +                               return NULL;
22084 +                       } else {
22085 +                               return (uint8_t *) (skb_shared->frags[i].page +
22086 +                                                   skb_shared->frags[i].
22087 +                                                   page_offset +
22088 +                                                   offsetInBytes);
22089 +                       }
22090 +               }
22091 +               /*only possible if internal page sizes are set wrong */
22092 +               if (offsetInBytes < 0) {
22093 +                       DPRINTK("%s error processing skbuff page frame "
22094 +                               "-- offset calculation \n", __FUNCTION__);
22095 +                       return NULL;
22096 +               }
22097 +       }
22098 +       /*only possible if internal page sizes are set wrong */
22099 +       DPRINTK("%s error processing skbuff page frame "
22100 +               "-- ran out of page fragments, remaining offset = %d \n",
22101 +               __FUNCTION__, offsetInBytes);
22102 +       return NULL;
22103 +
22104 +}
22105 +
22106 +/* Name        : icp_ocfDrvDigestSkbFragListCheck
22107 + *
22108 + * Description : This function is used by icp_ocfDrvSkbuffDigestPointerFind to 
22109 + * process the non-linear portion of the skbuff, if the fragmentation type is 
22110 + * a linked list
22111 + * 
22112 + */
22113 +static inline uint8_t *icp_ocfDrvDigestSkbFragListCheck(struct sk_buff *skb,
22114 +                                                       struct skb_shared_info
22115 +                                                       *skb_shared,
22116 +                                                       int offsetInBytes,
22117 +                                                       uint32_t
22118 +                                                       digestSizeInBytes)
22119 +{
22120 +
22121 +       struct sk_buff *skb_list = skb_shared->frag_list;
22122 +       /*check added for readability */
22123 +       if (NULL == skb_list) {
22124 +               DPRINTK("%s error processing skbuff "
22125 +                       "-- no more list! \n", __FUNCTION__);
22126 +               return NULL;
22127 +       }
22128 +
22129 +       for (; skb_list; skb_list = skb_list->next) {
22130 +               if (NULL == skb_list) {
22131 +                       DPRINTK("%s error processing skbuff "
22132 +                               "-- no more list! \n", __FUNCTION__);
22133 +                       return NULL;
22134 +               }
22135 +
22136 +               if (offsetInBytes >= skb_list->len) {
22137 +                       offsetInBytes -= skb_list->len;
22138 +
22139 +               } else {
22140 +                       if (offsetInBytes + digestSizeInBytes > skb_list->len) {
22141 +                               DPRINTK("%s() Auth payload stretches accross "
22142 +                                       "contiguous memory\n", __FUNCTION__);
22143 +                               return NULL;
22144 +                       } else {
22145 +                               return (uint8_t *)
22146 +                                   (skb_list->data + offsetInBytes);
22147 +                       }
22148 +
22149 +               }
22150 +
22151 +               /*This check is only needed if internal skb_list length values
22152 +                  are set wrong. */
22153 +               if (0 > offsetInBytes) {
22154 +                       DPRINTK("%s() error processing skbuff object -- offset "
22155 +                               "calculation \n", __FUNCTION__);
22156 +                       return NULL;
22157 +               }
22158 +
22159 +       }
22160 +
22161 +       /*catch all for unusual for-loop exit. 
22162 +          This code should never be reached */
22163 +       DPRINTK("%s() Catch-All hit! Process error.\n", __FUNCTION__);
22164 +       return NULL;
22165 +}
22166 --- /dev/null
22167 +++ b/crypto/ocf/pasemi/pasemi.c
22168 @@ -0,0 +1,1009 @@
22169 +/*
22170 + * Copyright (C) 2007 PA Semi, Inc
22171 + *
22172 + * Driver for the PA Semi PWRficient DMA Crypto Engine
22173 + *
22174 + * This program is free software; you can redistribute it and/or modify
22175 + * it under the terms of the GNU General Public License version 2 as
22176 + * published by the Free Software Foundation.
22177 + *
22178 + * This program is distributed in the hope that it will be useful,
22179 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
22180 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22181 + * GNU General Public License for more details.
22182 + *
22183 + * You should have received a copy of the GNU General Public License
22184 + * along with this program; if not, write to the Free Software
22185 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
22186 + */
22187 +
22188 +#ifndef AUTOCONF_INCLUDED
22189 +#include <linux/config.h>
22190 +#endif
22191 +#include <linux/module.h>
22192 +#include <linux/init.h>
22193 +#include <linux/interrupt.h>
22194 +#include <linux/timer.h>
22195 +#include <linux/random.h>
22196 +#include <linux/skbuff.h>
22197 +#include <asm/scatterlist.h>
22198 +#include <linux/moduleparam.h>
22199 +#include <linux/pci.h>
22200 +#include <cryptodev.h>
22201 +#include <uio.h>
22202 +#include "pasemi_fnu.h"
22203 +
22204 +#define DRV_NAME "pasemi"
22205 +
22206 +#define TIMER_INTERVAL 1000
22207 +
22208 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
22209 +static struct pasdma_status volatile * dma_status;
22210 +
22211 +static int debug;
22212 +module_param(debug, int, 0644);
22213 +MODULE_PARM_DESC(debug, "Enable debug");
22214 +
22215 +static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
22216 +{
22217 +       desc->postop = 0;
22218 +       desc->quad[0] = hdr;
22219 +       desc->quad_cnt = 1;
22220 +       desc->size = 1;
22221 +}
22222 +
22223 +static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
22224 +{
22225 +       desc->quad[desc->quad_cnt++] = val;
22226 +       desc->size = (desc->quad_cnt + 1) / 2;
22227 +}
22228 +
22229 +static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
22230 +{
22231 +       desc->quad[0] |= hdr;
22232 +}
22233 +
22234 +static int pasemi_desc_size(struct pasemi_desc *desc)
22235 +{
22236 +       return desc->size;
22237 +}
22238 +
22239 +static void pasemi_ring_add_desc(
22240 +                                struct pasemi_fnu_txring *ring,
22241 +                                struct pasemi_desc *desc,
22242 +                                struct cryptop *crp) {
22243 +       int i;
22244 +       int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
22245 +
22246 +       TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
22247 +       TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
22248 +       TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
22249 +
22250 +       for (i = 0; i < desc->quad_cnt; i += 2) {
22251 +               ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
22252 +               ring->desc[ring_index] = desc->quad[i];
22253 +               ring->desc[ring_index + 1] = desc->quad[i + 1];
22254 +               ring->next_to_fill++;
22255 +       }
22256 +
22257 +       if (desc->quad_cnt & 1)
22258 +               ring->desc[ring_index + 1] = 0;
22259 +}
22260 +
22261 +static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
22262 +{
22263 +       out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
22264 +                incr);
22265 +}
22266 +
22267 +/*
22268 + * Generate a new software session.
22269 + */
22270 +static int
22271 +pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
22272 +{
22273 +       struct cryptoini *c, *encini = NULL, *macini = NULL;
22274 +       struct pasemi_softc *sc = device_get_softc(dev);
22275 +       struct pasemi_session *ses = NULL, **sespp;
22276 +       int sesn, blksz = 0;
22277 +       u64 ccmd = 0;
22278 +       unsigned long flags;
22279 +       struct pasemi_desc init_desc;
22280 +       struct pasemi_fnu_txring *txring;
22281 +
22282 +       DPRINTF("%s()\n", __FUNCTION__);
22283 +       if (sidp == NULL || cri == NULL || sc == NULL) {
22284 +               DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
22285 +               return -EINVAL;
22286 +       }
22287 +       for (c = cri; c != NULL; c = c->cri_next) {
22288 +               if (ALG_IS_SIG(c->cri_alg)) {
22289 +                       if (macini)
22290 +                               return -EINVAL;
22291 +                       macini = c;
22292 +               } else if (ALG_IS_CIPHER(c->cri_alg)) {
22293 +                       if (encini)
22294 +                               return -EINVAL;
22295 +                       encini = c;
22296 +               } else {
22297 +                       DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
22298 +                       return -EINVAL;
22299 +               }
22300 +       }
22301 +       if (encini == NULL && macini == NULL)
22302 +               return -EINVAL;
22303 +       if (encini) {
22304 +               /* validate key length */
22305 +               switch (encini->cri_alg) {
22306 +               case CRYPTO_DES_CBC:
22307 +                       if (encini->cri_klen != 64)
22308 +                               return -EINVAL;
22309 +                       ccmd = DMA_CALGO_DES;
22310 +                       break;
22311 +               case CRYPTO_3DES_CBC:
22312 +                       if (encini->cri_klen != 192)
22313 +                               return -EINVAL;
22314 +                       ccmd = DMA_CALGO_3DES;
22315 +                       break;
22316 +               case CRYPTO_AES_CBC:
22317 +                       if (encini->cri_klen != 128 &&
22318 +                           encini->cri_klen != 192 &&
22319 +                           encini->cri_klen != 256)
22320 +                               return -EINVAL;
22321 +                       ccmd = DMA_CALGO_AES;
22322 +                       break;
22323 +               case CRYPTO_ARC4:
22324 +                       if (encini->cri_klen != 128)
22325 +                               return -EINVAL;
22326 +                       ccmd = DMA_CALGO_ARC;
22327 +                       break;
22328 +               default:
22329 +                       DPRINTF("UNKNOWN encini->cri_alg %d\n",
22330 +                               encini->cri_alg);
22331 +                       return -EINVAL;
22332 +               }
22333 +       }
22334 +
22335 +       if (macini) {
22336 +               switch (macini->cri_alg) {
22337 +               case CRYPTO_MD5:
22338 +               case CRYPTO_MD5_HMAC:
22339 +                       blksz = 16;
22340 +                       break;
22341 +               case CRYPTO_SHA1:
22342 +               case CRYPTO_SHA1_HMAC:
22343 +                       blksz = 20;
22344 +                       break;
22345 +               default:
22346 +                       DPRINTF("UNKNOWN macini->cri_alg %d\n",
22347 +                               macini->cri_alg);
22348 +                       return -EINVAL;
22349 +               }
22350 +               if (((macini->cri_klen + 7) / 8) > blksz) {
22351 +                       DPRINTF("key length %d bigger than blksize %d not supported\n",
22352 +                               ((macini->cri_klen + 7) / 8), blksz);
22353 +                       return -EINVAL;
22354 +               }
22355 +       }
22356 +
22357 +       for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
22358 +               if (sc->sc_sessions[sesn] == NULL) {
22359 +                       sc->sc_sessions[sesn] = (struct pasemi_session *)
22360 +                               kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
22361 +                       ses = sc->sc_sessions[sesn];
22362 +                       break;
22363 +               } else if (sc->sc_sessions[sesn]->used == 0) {
22364 +                       ses = sc->sc_sessions[sesn];
22365 +                       break;
22366 +               }
22367 +       }
22368 +
22369 +       if (ses == NULL) {
22370 +               sespp = (struct pasemi_session **)
22371 +                       kzalloc(sc->sc_nsessions * 2 *
22372 +                               sizeof(struct pasemi_session *), GFP_ATOMIC);
22373 +               if (sespp == NULL)
22374 +                       return -ENOMEM;
22375 +               memcpy(sespp, sc->sc_sessions,
22376 +                      sc->sc_nsessions * sizeof(struct pasemi_session *));
22377 +               kfree(sc->sc_sessions);
22378 +               sc->sc_sessions = sespp;
22379 +               sesn = sc->sc_nsessions;
22380 +               ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
22381 +                       kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
22382 +               if (ses == NULL)
22383 +                       return -ENOMEM;
22384 +               sc->sc_nsessions *= 2;
22385 +       }
22386 +
22387 +       ses->used = 1;
22388 +
22389 +       ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
22390 +                                      sizeof(struct pasemi_session), DMA_TO_DEVICE);
22391 +
22392 +       /* enter the channel scheduler */
22393 +       spin_lock_irqsave(&sc->sc_chnlock, flags);
22394 +
22395 +       /* ARC4 has to be processed by the even channel */
22396 +       if (encini && (encini->cri_alg == CRYPTO_ARC4))
22397 +               ses->chan = sc->sc_lastchn & ~1;
22398 +       else
22399 +               ses->chan = sc->sc_lastchn;
22400 +       sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
22401 +
22402 +       spin_unlock_irqrestore(&sc->sc_chnlock, flags);
22403 +
22404 +       txring = &sc->tx[ses->chan];
22405 +
22406 +       if (encini) {
22407 +               ses->ccmd = ccmd;
22408 +
22409 +               /* get an IV */
22410 +               /* XXX may read fewer than requested */
22411 +               get_random_bytes(ses->civ, sizeof(ses->civ));
22412 +
22413 +               ses->keysz = (encini->cri_klen - 63) / 64;
22414 +               memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
22415 +
22416 +               pasemi_desc_start(&init_desc,
22417 +                                 XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
22418 +               pasemi_desc_build(&init_desc,
22419 +                                 XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
22420 +       }
22421 +       if (macini) {
22422 +               if (macini->cri_alg == CRYPTO_MD5_HMAC ||
22423 +                   macini->cri_alg == CRYPTO_SHA1_HMAC)
22424 +                       memcpy(ses->hkey, macini->cri_key, blksz);
22425 +               else {
22426 +                       /* Load initialization constants(RFC 1321, 3174) */
22427 +                       ses->hiv[0] = 0x67452301efcdab89ULL;
22428 +                       ses->hiv[1] = 0x98badcfe10325476ULL;
22429 +                       ses->hiv[2] = 0xc3d2e1f000000000ULL;
22430 +               }
22431 +               ses->hseq = 0ULL;
22432 +       }
22433 +
22434 +       spin_lock_irqsave(&txring->fill_lock, flags);
22435 +
22436 +       if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
22437 +            txring->next_to_clean) > TX_RING_SIZE) {
22438 +               spin_unlock_irqrestore(&txring->fill_lock, flags);
22439 +               return ERESTART;
22440 +       }
22441 +
22442 +       if (encini) {
22443 +               pasemi_ring_add_desc(txring, &init_desc, NULL);
22444 +               pasemi_ring_incr(sc, ses->chan,
22445 +                                pasemi_desc_size(&init_desc));
22446 +       }
22447 +
22448 +       txring->sesn = sesn;
22449 +       spin_unlock_irqrestore(&txring->fill_lock, flags);
22450 +
22451 +       *sidp = PASEMI_SID(sesn);
22452 +       return 0;
22453 +}
22454 +
22455 +/*
22456 + * Deallocate a session.
22457 + */
22458 +static int
22459 +pasemi_freesession(device_t dev, u_int64_t tid)
22460 +{
22461 +       struct pasemi_softc *sc = device_get_softc(dev);
22462 +       int session;
22463 +       u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
22464 +
22465 +       DPRINTF("%s()\n", __FUNCTION__);
22466 +
22467 +       if (sc == NULL)
22468 +               return -EINVAL;
22469 +       session = PASEMI_SESSION(sid);
22470 +       if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
22471 +               return -EINVAL;
22472 +
22473 +       pci_unmap_single(sc->dma_pdev,
22474 +                        sc->sc_sessions[session]->dma_addr,
22475 +                        sizeof(struct pasemi_session), DMA_TO_DEVICE);
22476 +       memset(sc->sc_sessions[session], 0,
22477 +              sizeof(struct pasemi_session));
22478 +
22479 +       return 0;
22480 +}
22481 +
22482 +static int
22483 +pasemi_process(device_t dev, struct cryptop *crp, int hint)
22484 +{
22485 +
22486 +       int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
22487 +       struct pasemi_softc *sc = device_get_softc(dev);
22488 +       struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
22489 +       caddr_t ivp;
22490 +       struct pasemi_desc init_desc, work_desc;
22491 +       struct pasemi_session *ses;
22492 +       struct sk_buff *skb;
22493 +       struct uio *uiop;
22494 +       unsigned long flags;
22495 +       struct pasemi_fnu_txring *txring;
22496 +
22497 +       DPRINTF("%s()\n", __FUNCTION__);
22498 +
22499 +       if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
22500 +               return -EINVAL;
22501 +
22502 +       crp->crp_etype = 0;
22503 +       if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
22504 +               return -EINVAL;
22505 +
22506 +       ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
22507 +
22508 +       crd1 = crp->crp_desc;
22509 +       if (crd1 == NULL) {
22510 +               err = -EINVAL;
22511 +               goto errout;
22512 +       }
22513 +       crd2 = crd1->crd_next;
22514 +
22515 +       if (ALG_IS_SIG(crd1->crd_alg)) {
22516 +               maccrd = crd1;
22517 +               if (crd2 == NULL)
22518 +                       enccrd = NULL;
22519 +               else if (ALG_IS_CIPHER(crd2->crd_alg) &&
22520 +                        (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
22521 +                       enccrd = crd2;
22522 +               else
22523 +                       goto erralg;
22524 +       } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
22525 +               enccrd = crd1;
22526 +               if (crd2 == NULL)
22527 +                       maccrd = NULL;
22528 +               else if (ALG_IS_SIG(crd2->crd_alg) &&
22529 +                        (crd1->crd_flags & CRD_F_ENCRYPT))
22530 +                       maccrd = crd2;
22531 +               else
22532 +                       goto erralg;
22533 +       } else
22534 +               goto erralg;
22535 +
22536 +       chsel = ses->chan;
22537 +
22538 +       txring = &sc->tx[chsel];
22539 +
22540 +       if (enccrd && !maccrd) {
22541 +               if (enccrd->crd_alg == CRYPTO_ARC4)
22542 +                       reinit = 1;
22543 +               reinit_size = 0x40;
22544 +               srclen = crp->crp_ilen;
22545 +
22546 +               pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
22547 +                                 | XCT_FUN_FUN(chsel));
22548 +               if (enccrd->crd_flags & CRD_F_ENCRYPT)
22549 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
22550 +               else
22551 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
22552 +       } else if (enccrd && maccrd) {
22553 +               if (enccrd->crd_alg == CRYPTO_ARC4)
22554 +                       reinit = 1;
22555 +               reinit_size = 0x68;
22556 +
22557 +               if (enccrd->crd_flags & CRD_F_ENCRYPT) {
22558 +                       /* Encrypt -> Authenticate */
22559 +                       pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
22560 +                                         | XCT_FUN_A | XCT_FUN_FUN(chsel));
22561 +                       srclen = maccrd->crd_skip + maccrd->crd_len;
22562 +               } else {
22563 +                       /* Authenticate -> Decrypt */
22564 +                       pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
22565 +                                         | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
22566 +                       pasemi_desc_build(&work_desc, 0);
22567 +                       pasemi_desc_build(&work_desc, 0);
22568 +                       pasemi_desc_build(&work_desc, 0);
22569 +                       work_desc.postop = PASEMI_CHECK_SIG;
22570 +                       srclen = crp->crp_ilen;
22571 +               }
22572 +
22573 +               pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
22574 +               pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
22575 +       } else if (!enccrd && maccrd) {
22576 +               srclen = maccrd->crd_len;
22577 +
22578 +               pasemi_desc_start(&init_desc,
22579 +                                 XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
22580 +               pasemi_desc_build(&init_desc,
22581 +                                 XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
22582 +
22583 +               pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
22584 +                                 | XCT_FUN_A | XCT_FUN_FUN(chsel));
22585 +       }
22586 +
22587 +       if (enccrd) {
22588 +               switch (enccrd->crd_alg) {
22589 +               case CRYPTO_3DES_CBC:
22590 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
22591 +                                       XCT_FUN_BCM_CBC);
22592 +                       ivsize = sizeof(u64);
22593 +                       break;
22594 +               case CRYPTO_DES_CBC:
22595 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
22596 +                                       XCT_FUN_BCM_CBC);
22597 +                       ivsize = sizeof(u64);
22598 +                       break;
22599 +               case CRYPTO_AES_CBC:
22600 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
22601 +                                       XCT_FUN_BCM_CBC);
22602 +                       ivsize = 2 * sizeof(u64);
22603 +                       break;
22604 +               case CRYPTO_ARC4:
22605 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
22606 +                       ivsize = 0;
22607 +                       break;
22608 +               default:
22609 +                       printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
22610 +                              enccrd->crd_alg);
22611 +                       err = -EINVAL;
22612 +                       goto errout;
22613 +               }
22614 +
22615 +               ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
22616 +               if (enccrd->crd_flags & CRD_F_ENCRYPT) {
22617 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
22618 +                               memcpy(ivp, enccrd->crd_iv, ivsize);
22619 +                       /* If IV is not present in the buffer already, it has to be copied there */
22620 +                       if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
22621 +                               crypto_copyback(crp->crp_flags, crp->crp_buf,
22622 +                                               enccrd->crd_inject, ivsize, ivp);
22623 +               } else {
22624 +                       if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
22625 +                               /* IV is provided expicitly in descriptor */
22626 +                               memcpy(ivp, enccrd->crd_iv, ivsize);
22627 +                       else
22628 +                               /* IV is provided in the packet */
22629 +                               crypto_copydata(crp->crp_flags, crp->crp_buf,
22630 +                                               enccrd->crd_inject, ivsize,
22631 +                                               ivp);
22632 +               }
22633 +       }
22634 +
22635 +       if (maccrd) {
22636 +               switch (maccrd->crd_alg) {
22637 +               case CRYPTO_MD5:
22638 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
22639 +                                       XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
22640 +                       break;
22641 +               case CRYPTO_SHA1:
22642 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
22643 +                                       XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
22644 +                       break;
22645 +               case CRYPTO_MD5_HMAC:
22646 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
22647 +                                       XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
22648 +                       break;
22649 +               case CRYPTO_SHA1_HMAC:
22650 +                       pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
22651 +                                       XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
22652 +                       break;
22653 +               default:
22654 +                       printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
22655 +                              maccrd->crd_alg);
22656 +                       err = -EINVAL;
22657 +                       goto errout;
22658 +               }
22659 +       }
22660 +
22661 +       if (crp->crp_flags & CRYPTO_F_SKBUF) {
22662 +               /* using SKB buffers */
22663 +               skb = (struct sk_buff *)crp->crp_buf;
22664 +               if (skb_shinfo(skb)->nr_frags) {
22665 +                       printk(DRV_NAME ": skb frags unimplemented\n");
22666 +                       err = -EINVAL;
22667 +                       goto errout;
22668 +               }
22669 +               pasemi_desc_build(
22670 +                       &work_desc,
22671 +                       XCT_FUN_DST_PTR(skb->len, pci_map_single(
22672 +                                               sc->dma_pdev, skb->data,
22673 +                                               skb->len, DMA_TO_DEVICE)));
22674 +               pasemi_desc_build(
22675 +                       &work_desc,
22676 +                       XCT_FUN_SRC_PTR(
22677 +                               srclen, pci_map_single(
22678 +                                       sc->dma_pdev, skb->data,
22679 +                                       srclen, DMA_TO_DEVICE)));
22680 +               pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
22681 +       } else if (crp->crp_flags & CRYPTO_F_IOV) {
22682 +               /* using IOV buffers */
22683 +               uiop = (struct uio *)crp->crp_buf;
22684 +               if (uiop->uio_iovcnt > 1) {
22685 +                       printk(DRV_NAME ": iov frags unimplemented\n");
22686 +                       err = -EINVAL;
22687 +                       goto errout;
22688 +               }
22689 +
22690 +               /* crp_olen is never set; always use crp_ilen */
22691 +               pasemi_desc_build(
22692 +                       &work_desc,
22693 +                       XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
22694 +                                               sc->dma_pdev,
22695 +                                               uiop->uio_iov->iov_base,
22696 +                                               crp->crp_ilen, DMA_TO_DEVICE)));
22697 +               pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
22698 +
22699 +               pasemi_desc_build(
22700 +                       &work_desc,
22701 +                       XCT_FUN_SRC_PTR(srclen, pci_map_single(
22702 +                                               sc->dma_pdev,
22703 +                                               uiop->uio_iov->iov_base,
22704 +                                               srclen, DMA_TO_DEVICE)));
22705 +       } else {
22706 +               /* using contig buffers */
22707 +               pasemi_desc_build(
22708 +                       &work_desc,
22709 +                       XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
22710 +                                               sc->dma_pdev,
22711 +                                               crp->crp_buf,
22712 +                                               crp->crp_ilen, DMA_TO_DEVICE)));
22713 +               pasemi_desc_build(
22714 +                       &work_desc,
22715 +                       XCT_FUN_SRC_PTR(srclen, pci_map_single(
22716 +                                               sc->dma_pdev,
22717 +                                               crp->crp_buf, srclen,
22718 +                                               DMA_TO_DEVICE)));
22719 +               pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
22720 +       }
22721 +
22722 +       spin_lock_irqsave(&txring->fill_lock, flags);
22723 +
22724 +       if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
22725 +               txring->sesn = PASEMI_SESSION(crp->crp_sid);
22726 +               reinit = 1;
22727 +       }
22728 +
22729 +       if (enccrd) {
22730 +               pasemi_desc_start(&init_desc,
22731 +                                 XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
22732 +               pasemi_desc_build(&init_desc,
22733 +                                 XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
22734 +       }
22735 +
22736 +       if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
22737 +             pasemi_desc_size(&work_desc)) -
22738 +            txring->next_to_clean) > TX_RING_SIZE) {
22739 +               spin_unlock_irqrestore(&txring->fill_lock, flags);
22740 +               err = ERESTART;
22741 +               goto errout;
22742 +       }
22743 +
22744 +       pasemi_ring_add_desc(txring, &init_desc, NULL);
22745 +       pasemi_ring_add_desc(txring, &work_desc, crp);
22746 +
22747 +       pasemi_ring_incr(sc, chsel,
22748 +                        pasemi_desc_size(&init_desc) +
22749 +                        pasemi_desc_size(&work_desc));
22750 +
22751 +       spin_unlock_irqrestore(&txring->fill_lock, flags);
22752 +
22753 +       mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
22754 +
22755 +       return 0;
22756 +
22757 +erralg:
22758 +       printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
22759 +              crd1->crd_alg, crd2->crd_alg);
22760 +       err = -EINVAL;
22761 +
22762 +errout:
22763 +       if (err != ERESTART) {
22764 +               crp->crp_etype = err;
22765 +               crypto_done(crp);
22766 +       }
22767 +       return err;
22768 +}
22769 +
22770 +static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
22771 +{
22772 +       int i, j, ring_idx;
22773 +       struct pasemi_fnu_txring *ring = &sc->tx[chan];
22774 +       u16 delta_cnt;
22775 +       int flags, loops = 10;
22776 +       int desc_size;
22777 +       struct cryptop *crp;
22778 +
22779 +       spin_lock_irqsave(&ring->clean_lock, flags);
22780 +
22781 +       while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
22782 +                            & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
22783 +              && loops--) {
22784 +
22785 +               for (i = 0; i < delta_cnt; i++) {
22786 +                       desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
22787 +                       crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
22788 +                       if (crp) {
22789 +                               ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
22790 +                               if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
22791 +                                       /* Need to make sure signature matched,
22792 +                                        * if not - return error */
22793 +                                       if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
22794 +                                               crp->crp_etype = -EINVAL;
22795 +                               }
22796 +                               crypto_done(TX_DESC_INFO(ring,
22797 +                                                        ring->next_to_clean).cf_crp);
22798 +                               TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
22799 +                               pci_unmap_single(
22800 +                                       sc->dma_pdev,
22801 +                                       XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
22802 +                                       PCI_DMA_TODEVICE);
22803 +
22804 +                               ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
22805 +
22806 +                               ring->next_to_clean++;
22807 +                               for (j = 1; j < desc_size; j++) {
22808 +                                       ring_idx = 2 *
22809 +                                               (ring->next_to_clean &
22810 +                                                (TX_RING_SIZE-1));
22811 +                                       pci_unmap_single(
22812 +                                               sc->dma_pdev,
22813 +                                               XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
22814 +                                               PCI_DMA_TODEVICE);
22815 +                                       if (ring->desc[ring_idx + 1])
22816 +                                               pci_unmap_single(
22817 +                                                       sc->dma_pdev,
22818 +                                                       XCT_PTR_ADDR_LEN(
22819 +                                                               ring->desc[
22820 +                                                                       ring_idx + 1]),
22821 +                                                       PCI_DMA_TODEVICE);
22822 +                                       ring->desc[ring_idx] =
22823 +                                               ring->desc[ring_idx + 1] = 0;
22824 +                                       ring->next_to_clean++;
22825 +                               }
22826 +                       } else {
22827 +                               for (j = 0; j < desc_size; j++) {
22828 +                                       ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
22829 +                                       ring->desc[ring_idx] =
22830 +                                               ring->desc[ring_idx + 1] = 0;
22831 +                                       ring->next_to_clean++;
22832 +                               }
22833 +                       }
22834 +               }
22835 +
22836 +               ring->total_pktcnt += delta_cnt;
22837 +       }
22838 +       spin_unlock_irqrestore(&ring->clean_lock, flags);
22839 +
22840 +       return 0;
22841 +}
22842 +
22843 +static void sweepup_tx(struct pasemi_softc *sc)
22844 +{
22845 +       int i;
22846 +
22847 +       for (i = 0; i < sc->sc_num_channels; i++)
22848 +               pasemi_clean_tx(sc, i);
22849 +}
22850 +
22851 +static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
22852 +{
22853 +       struct pasemi_softc *sc = arg;
22854 +       unsigned int reg;
22855 +       int chan = irq - sc->base_irq;
22856 +       int chan_index = sc->base_chan + chan;
22857 +       u64 stat = dma_status->tx_sta[chan_index];
22858 +
22859 +       DPRINTF("%s()\n", __FUNCTION__);
22860 +
22861 +       if (!(stat & PAS_STATUS_CAUSE_M))
22862 +               return IRQ_NONE;
22863 +
22864 +       pasemi_clean_tx(sc, chan);
22865 +
22866 +       stat = dma_status->tx_sta[chan_index];
22867 +
22868 +       reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
22869 +               PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
22870 +
22871 +       if (stat & PAS_STATUS_SOFT)
22872 +               reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
22873 +
22874 +       out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
22875 +
22876 +
22877 +       return IRQ_HANDLED;
22878 +}
22879 +
22880 +static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
22881 +{
22882 +       u32 val;
22883 +       int chan_index = chan + sc->base_chan;
22884 +       int ret;
22885 +       struct pasemi_fnu_txring *ring;
22886 +
22887 +       ring = &sc->tx[chan];
22888 +
22889 +       spin_lock_init(&ring->fill_lock);
22890 +       spin_lock_init(&ring->clean_lock);
22891 +
22892 +       ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
22893 +                                 TX_RING_SIZE, GFP_KERNEL);
22894 +       if (!ring->desc_info)
22895 +               return -ENOMEM;
22896 +
22897 +       /* Allocate descriptors */
22898 +       ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
22899 +                                       TX_RING_SIZE *
22900 +                                       2 * sizeof(u64),
22901 +                                       &ring->dma, GFP_KERNEL);
22902 +       if (!ring->desc)
22903 +               return -ENOMEM;
22904 +
22905 +       memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
22906 +
22907 +       out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
22908 +
22909 +       ring->total_pktcnt = 0;
22910 +
22911 +       out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
22912 +                PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
22913 +
22914 +       val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
22915 +       val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
22916 +
22917 +       out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
22918 +
22919 +       out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
22920 +                PAS_DMA_TXCHAN_CFG_TY_FUNC |
22921 +                PAS_DMA_TXCHAN_CFG_TATTR(chan) |
22922 +                PAS_DMA_TXCHAN_CFG_WT(2));
22923 +
22924 +       /* enable tx channel */
22925 +       out_le32(sc->dma_regs +
22926 +                PAS_DMA_TXCHAN_TCMDSTA(chan_index),
22927 +                PAS_DMA_TXCHAN_TCMDSTA_EN);
22928 +
22929 +       out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
22930 +                PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
22931 +
22932 +       ring->next_to_fill = 0;
22933 +       ring->next_to_clean = 0;
22934 +
22935 +       snprintf(ring->irq_name, sizeof(ring->irq_name),
22936 +                "%s%d", "crypto", chan);
22937 +
22938 +       ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
22939 +       ret = request_irq(ring->irq, (irq_handler_t)
22940 +                         pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
22941 +       if (ret) {
22942 +               printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
22943 +                      ring->irq, ret);
22944 +               ring->irq = -1;
22945 +               return ret;
22946 +       }
22947 +
22948 +       setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
22949 +
22950 +       return 0;
22951 +}
22952 +
22953 +static device_method_t pasemi_methods = {
22954 +       /* crypto device methods */
22955 +       DEVMETHOD(cryptodev_newsession,         pasemi_newsession),
22956 +       DEVMETHOD(cryptodev_freesession,        pasemi_freesession),
22957 +       DEVMETHOD(cryptodev_process,            pasemi_process),
22958 +};
22959 +
22960 +/* Set up the crypto device structure, private data,
22961 + * and anything else we need before we start */
22962 +
22963 +static int __devinit
22964 +pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
22965 +{
22966 +       struct pasemi_softc *sc;
22967 +       int ret, i;
22968 +
22969 +       DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
22970 +
22971 +       sc = kzalloc(sizeof(*sc), GFP_KERNEL);
22972 +       if (!sc)
22973 +               return -ENOMEM;
22974 +
22975 +       softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
22976 +
22977 +       pci_set_drvdata(pdev, sc);
22978 +
22979 +       spin_lock_init(&sc->sc_chnlock);
22980 +
22981 +       sc->sc_sessions = (struct pasemi_session **)
22982 +               kzalloc(PASEMI_INITIAL_SESSIONS *
22983 +                       sizeof(struct pasemi_session *), GFP_ATOMIC);
22984 +       if (sc->sc_sessions == NULL) {
22985 +               ret = -ENOMEM;
22986 +               goto out;
22987 +       }
22988 +
22989 +       sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
22990 +       sc->sc_lastchn = 0;
22991 +       sc->base_irq = pdev->irq + 6;
22992 +       sc->base_chan = 6;
22993 +       sc->sc_cid = -1;
22994 +       sc->dma_pdev = pdev;
22995 +
22996 +       sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
22997 +       if (!sc->iob_pdev) {
22998 +               dev_err(&pdev->dev, "Can't find I/O Bridge\n");
22999 +               ret = -ENODEV;
23000 +               goto out;
23001 +       }
23002 +
23003 +       /* This is hardcoded and ugly, but we have some firmware versions
23004 +        * who don't provide the register space in the device tree. Luckily
23005 +        * they are at well-known locations so we can just do the math here.
23006 +        */
23007 +       sc->dma_regs =
23008 +               ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
23009 +       sc->iob_regs =
23010 +               ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
23011 +       if (!sc->dma_regs || !sc->iob_regs) {
23012 +               dev_err(&pdev->dev, "Can't map registers\n");
23013 +               ret = -ENODEV;
23014 +               goto out;
23015 +       }
23016 +
23017 +       dma_status = __ioremap(0xfd800000, 0x1000, 0);
23018 +       if (!dma_status) {
23019 +               ret = -ENODEV;
23020 +               dev_err(&pdev->dev, "Can't map dmastatus space\n");
23021 +               goto out;
23022 +       }
23023 +
23024 +       sc->tx = (struct pasemi_fnu_txring *)
23025 +               kzalloc(sizeof(struct pasemi_fnu_txring)
23026 +                       * 8, GFP_KERNEL);
23027 +       if (!sc->tx) {
23028 +               ret = -ENOMEM;
23029 +               goto out;
23030 +       }
23031 +
23032 +       /* Initialize the h/w */
23033 +       out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
23034 +                (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
23035 +                 PAS_DMA_COM_CFG_FWF));
23036 +       out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
23037 +
23038 +       for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
23039 +               sc->sc_num_channels++;
23040 +               ret = pasemi_dma_setup_tx_resources(sc, i);
23041 +               if (ret)
23042 +                       goto out;
23043 +       }
23044 +
23045 +       sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
23046 +                                        CRYPTOCAP_F_HARDWARE);
23047 +       if (sc->sc_cid < 0) {
23048 +               printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
23049 +               ret = -ENXIO;
23050 +               goto out;
23051 +       }
23052 +
23053 +       /* register algorithms with the framework */
23054 +       printk(DRV_NAME ":");
23055 +
23056 +       crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
23057 +       crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
23058 +       crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
23059 +       crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
23060 +       crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
23061 +       crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
23062 +       crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
23063 +       crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
23064 +
23065 +       return 0;
23066 +
23067 +out:
23068 +       pasemi_dma_remove(pdev);
23069 +       return ret;
23070 +}
23071 +
23072 +#define MAX_RETRIES 5000
23073 +
23074 +static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
23075 +{
23076 +       struct pasemi_fnu_txring *ring = &sc->tx[chan];
23077 +       int chan_index = chan + sc->base_chan;
23078 +       int retries;
23079 +       u32 stat;
23080 +
23081 +       /* Stop the channel */
23082 +       out_le32(sc->dma_regs +
23083 +                PAS_DMA_TXCHAN_TCMDSTA(chan_index),
23084 +                PAS_DMA_TXCHAN_TCMDSTA_ST);
23085 +
23086 +       for (retries = 0; retries < MAX_RETRIES; retries++) {
23087 +               stat = in_le32(sc->dma_regs +
23088 +                              PAS_DMA_TXCHAN_TCMDSTA(chan_index));
23089 +               if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
23090 +                       break;
23091 +               cond_resched();
23092 +       }
23093 +
23094 +       if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
23095 +               dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
23096 +                       chan_index);
23097 +
23098 +       /* Disable the channel */
23099 +       out_le32(sc->dma_regs +
23100 +                PAS_DMA_TXCHAN_TCMDSTA(chan_index),
23101 +                0);
23102 +
23103 +       if (ring->desc_info)
23104 +               kfree((void *) ring->desc_info);
23105 +       if (ring->desc)
23106 +               dma_free_coherent(&sc->dma_pdev->dev,
23107 +                                 TX_RING_SIZE *
23108 +                                 2 * sizeof(u64),
23109 +                                 (void *) ring->desc, ring->dma);
23110 +       if (ring->irq != -1)
23111 +               free_irq(ring->irq, sc);
23112 +
23113 +       del_timer(&ring->crypto_timer);
23114 +}
23115 +
23116 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
23117 +{
23118 +       struct pasemi_softc *sc = pci_get_drvdata(pdev);
23119 +       int i;
23120 +
23121 +       DPRINTF("%s()\n", __FUNCTION__);
23122 +
23123 +       if (sc->sc_cid >= 0) {
23124 +               crypto_unregister_all(sc->sc_cid);
23125 +       }
23126 +
23127 +       if (sc->tx) {
23128 +               for (i = 0; i < sc->sc_num_channels; i++)
23129 +                       pasemi_free_tx_resources(sc, i);
23130 +
23131 +               kfree(sc->tx);
23132 +       }
23133 +       if (sc->sc_sessions) {
23134 +               for (i = 0; i < sc->sc_nsessions; i++)
23135 +                       kfree(sc->sc_sessions[i]);
23136 +               kfree(sc->sc_sessions);
23137 +       }
23138 +       if (sc->iob_pdev)
23139 +               pci_dev_put(sc->iob_pdev);
23140 +       if (sc->dma_regs)
23141 +               iounmap(sc->dma_regs);
23142 +       if (sc->iob_regs)
23143 +               iounmap(sc->iob_regs);
23144 +       kfree(sc);
23145 +}
23146 +
23147 +static struct pci_device_id pasemi_dma_pci_tbl[] = {
23148 +       { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
23149 +};
23150 +
23151 +MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
23152 +
23153 +static struct pci_driver pasemi_dma_driver = {
23154 +       .name           = "pasemi_dma",
23155 +       .id_table       = pasemi_dma_pci_tbl,
23156 +       .probe          = pasemi_dma_probe,
23157 +       .remove         = __devexit_p(pasemi_dma_remove),
23158 +};
23159 +
23160 +static void __exit pasemi_dma_cleanup_module(void)
23161 +{
23162 +       pci_unregister_driver(&pasemi_dma_driver);
23163 +       __iounmap(dma_status);
23164 +       dma_status = NULL;
23165 +}
23166 +
23167 +int pasemi_dma_init_module(void)
23168 +{
23169 +       return pci_register_driver(&pasemi_dma_driver);
23170 +}
23171 +
23172 +module_init(pasemi_dma_init_module);
23173 +module_exit(pasemi_dma_cleanup_module);
23174 +
23175 +MODULE_LICENSE("Dual BSD/GPL");
23176 +MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
23177 +MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
23178 --- /dev/null
23179 +++ b/crypto/ocf/pasemi/pasemi_fnu.h
23180 @@ -0,0 +1,410 @@
23181 +/*
23182 + * Copyright (C) 2007 PA Semi, Inc
23183 + *
23184 + * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
23185 + * hardware register layouts.
23186 + *
23187 + * This program is free software; you can redistribute it and/or modify
23188 + * it under the terms of the GNU General Public License version 2 as
23189 + * published by the Free Software Foundation.
23190 + *
23191 + * This program is distributed in the hope that it will be useful,
23192 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
23193 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
23194 + * GNU General Public License for more details.
23195 + *
23196 + * You should have received a copy of the GNU General Public License
23197 + * along with this program; if not, write to the Free Software
23198 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23199 + */
23200 +
23201 +#ifndef PASEMI_FNU_H
23202 +#define PASEMI_FNU_H
23203 +
23204 +#include <linux/spinlock.h>
23205 +
23206 +#define        PASEMI_SESSION(sid)     ((sid) & 0xffffffff)
23207 +#define        PASEMI_SID(sesn)        ((sesn) & 0xffffffff)
23208 +#define        DPRINTF(a...)   if (debug) { printk(DRV_NAME ": " a); }
23209 +
23210 +/* Must be a power of two */
23211 +#define RX_RING_SIZE 512
23212 +#define TX_RING_SIZE 512
23213 +#define TX_DESC(ring, num)     ((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
23214 +#define TX_DESC_INFO(ring, num)        ((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
23215 +#define MAX_DESC_SIZE 8
23216 +#define PASEMI_INITIAL_SESSIONS 10
23217 +#define PASEMI_FNU_CHANNELS 8
23218 +
23219 +/* DMA descriptor */
23220 +struct pasemi_desc {
23221 +       u64 quad[2*MAX_DESC_SIZE];
23222 +       int quad_cnt;
23223 +       int size;
23224 +       int postop;
23225 +};
23226 +
23227 +/*
23228 + * Holds per descriptor data
23229 + */
23230 +struct pasemi_desc_info {
23231 +       int                     desc_size;
23232 +       int                     desc_postop;
23233 +#define PASEMI_CHECK_SIG 0x1
23234 +
23235 +       struct cryptop          *cf_crp;
23236 +};
23237 +
23238 +/*
23239 + * Holds per channel data
23240 + */
23241 +struct pasemi_fnu_txring {
23242 +       volatile u64            *desc;
23243 +       volatile struct
23244 +       pasemi_desc_info        *desc_info;
23245 +       dma_addr_t              dma;
23246 +       struct timer_list       crypto_timer;
23247 +       spinlock_t              fill_lock;
23248 +       spinlock_t              clean_lock;
23249 +       unsigned int            next_to_fill;
23250 +       unsigned int            next_to_clean;
23251 +       u16                     total_pktcnt;
23252 +       int                     irq;
23253 +       int                     sesn;
23254 +       char                    irq_name[10];
23255 +};
23256 +
23257 +/*
23258 + * Holds data specific to a single pasemi device.
23259 + */
23260 +struct pasemi_softc {
23261 +       softc_device_decl       sc_cdev;
23262 +       struct pci_dev          *dma_pdev;      /* device backpointer */
23263 +       struct pci_dev          *iob_pdev;      /* device backpointer */
23264 +       void __iomem            *dma_regs;
23265 +       void __iomem            *iob_regs;
23266 +       int                     base_irq;
23267 +       int                     base_chan;
23268 +       int32_t                 sc_cid;         /* crypto tag */
23269 +       int                     sc_nsessions;
23270 +       struct pasemi_session   **sc_sessions;
23271 +       int                     sc_num_channels;/* number of crypto channels */
23272 +
23273 +       /* pointer to the array of txring datastructures, one txring per channel */
23274 +       struct pasemi_fnu_txring *tx;
23275 +
23276 +       /*
23277 +        * mutual exclusion for the channel scheduler
23278 +        */
23279 +       spinlock_t              sc_chnlock;
23280 +       /* last channel used, for now use round-robin to allocate channels */
23281 +       int                     sc_lastchn;
23282 +};
23283 +
23284 +struct pasemi_session {
23285 +       u64 civ[2];
23286 +       u64 keysz;
23287 +       u64 key[4];
23288 +       u64 ccmd;
23289 +       u64 hkey[4];
23290 +       u64 hseq;
23291 +       u64 giv[2];
23292 +       u64 hiv[4];
23293 +
23294 +       int used;
23295 +       dma_addr_t      dma_addr;
23296 +       int chan;
23297 +};
23298 +
23299 +/* status register layout in IOB region, at 0xfd800000 */
23300 +struct pasdma_status {
23301 +       u64 rx_sta[64];
23302 +       u64 tx_sta[20];
23303 +};
23304 +
23305 +#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC)            || \
23306 +                               (alg == CRYPTO_3DES_CBC)        || \
23307 +                               (alg == CRYPTO_AES_CBC)         || \
23308 +                               (alg == CRYPTO_ARC4)            || \
23309 +                               (alg == CRYPTO_NULL_CBC))
23310 +
23311 +#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5)                   || \
23312 +                               (alg == CRYPTO_MD5_HMAC)        || \
23313 +                               (alg == CRYPTO_SHA1)            || \
23314 +                               (alg == CRYPTO_SHA1_HMAC)       || \
23315 +                               (alg == CRYPTO_NULL_HMAC))
23316 +
23317 +enum {
23318 +       PAS_DMA_COM_TXCMD = 0x100,      /* Transmit Command Register  */
23319 +       PAS_DMA_COM_TXSTA = 0x104,      /* Transmit Status Register   */
23320 +       PAS_DMA_COM_RXCMD = 0x108,      /* Receive Command Register   */
23321 +       PAS_DMA_COM_RXSTA = 0x10c,      /* Receive Status Register    */
23322 +       PAS_DMA_COM_CFG   = 0x114,      /* DMA Configuration Register */
23323 +};
23324 +
23325 +/* All these registers live in the PCI configuration space for the DMA PCI
23326 + * device. Use the normal PCI config access functions for them.
23327 + */
23328 +
23329 +#define PAS_DMA_COM_CFG_FWF    0x18000000
23330 +
23331 +#define PAS_DMA_COM_TXCMD_EN   0x00000001 /* enable */
23332 +#define PAS_DMA_COM_TXSTA_ACT  0x00000001 /* active */
23333 +#define PAS_DMA_COM_RXCMD_EN   0x00000001 /* enable */
23334 +#define PAS_DMA_COM_RXSTA_ACT  0x00000001 /* active */
23335 +
23336 +#define _PAS_DMA_TXCHAN_STRIDE 0x20    /* Size per channel             */
23337 +#define _PAS_DMA_TXCHAN_TCMDSTA        0x300   /* Command / Status             */
23338 +#define _PAS_DMA_TXCHAN_CFG    0x304   /* Configuration                */
23339 +#define _PAS_DMA_TXCHAN_DSCRBU 0x308   /* Descriptor BU Allocation     */
23340 +#define _PAS_DMA_TXCHAN_INCR   0x310   /* Descriptor increment         */
23341 +#define _PAS_DMA_TXCHAN_CNT    0x314   /* Descriptor count/offset      */
23342 +#define _PAS_DMA_TXCHAN_BASEL  0x318   /* Descriptor ring base (low)   */
23343 +#define _PAS_DMA_TXCHAN_BASEU  0x31c   /*                      (high)  */
23344 +#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
23345 +#define    PAS_DMA_TXCHAN_TCMDSTA_EN   0x00000001      /* Enabled */
23346 +#define    PAS_DMA_TXCHAN_TCMDSTA_ST   0x00000002      /* Stop interface */
23347 +#define    PAS_DMA_TXCHAN_TCMDSTA_ACT  0x00010000      /* Active */
23348 +#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
23349 +#define    PAS_DMA_TXCHAN_CFG_TY_FUNC  0x00000002      /* Type = interface */
23350 +#define    PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000      /* Type = interface */
23351 +#define    PAS_DMA_TXCHAN_CFG_TATTR_M  0x0000003c
23352 +#define    PAS_DMA_TXCHAN_CFG_TATTR_S  2
23353 +#define    PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
23354 +                                        PAS_DMA_TXCHAN_CFG_TATTR_M)
23355 +#define    PAS_DMA_TXCHAN_CFG_WT_M     0x000001c0
23356 +#define    PAS_DMA_TXCHAN_CFG_WT_S     6
23357 +#define    PAS_DMA_TXCHAN_CFG_WT(x)    (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
23358 +                                        PAS_DMA_TXCHAN_CFG_WT_M)
23359 +#define    PAS_DMA_TXCHAN_CFG_LPSQ_FAST        0x00000400
23360 +#define    PAS_DMA_TXCHAN_CFG_LPDQ_FAST        0x00000800
23361 +#define    PAS_DMA_TXCHAN_CFG_CF       0x00001000      /* Clean first line */
23362 +#define    PAS_DMA_TXCHAN_CFG_CL       0x00002000      /* Clean last line */
23363 +#define    PAS_DMA_TXCHAN_CFG_UP       0x00004000      /* update tx descr when sent */
23364 +#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
23365 +#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
23366 +#define    PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
23367 +#define    PAS_DMA_TXCHAN_BASEL_BRBL_S 0
23368 +#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)        (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
23369 +                                        PAS_DMA_TXCHAN_BASEL_BRBL_M)
23370 +#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
23371 +#define    PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
23372 +#define    PAS_DMA_TXCHAN_BASEU_BRBH_S 0
23373 +#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)        (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
23374 +                                        PAS_DMA_TXCHAN_BASEU_BRBH_M)
23375 +/* # of cache lines worth of buffer ring */
23376 +#define    PAS_DMA_TXCHAN_BASEU_SIZ_M  0x3fff0000
23377 +#define    PAS_DMA_TXCHAN_BASEU_SIZ_S  16              /* 0 = 16K */
23378 +#define    PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
23379 +                                        PAS_DMA_TXCHAN_BASEU_SIZ_M)
23380 +
23381 +#define    PAS_STATUS_PCNT_M           0x000000000000ffffull
23382 +#define    PAS_STATUS_PCNT_S           0
23383 +#define    PAS_STATUS_DCNT_M           0x00000000ffff0000ull
23384 +#define    PAS_STATUS_DCNT_S           16
23385 +#define    PAS_STATUS_BPCNT_M          0x0000ffff00000000ull
23386 +#define    PAS_STATUS_BPCNT_S          32
23387 +#define    PAS_STATUS_CAUSE_M          0xf000000000000000ull
23388 +#define    PAS_STATUS_TIMER            0x1000000000000000ull
23389 +#define    PAS_STATUS_ERROR            0x2000000000000000ull
23390 +#define    PAS_STATUS_SOFT             0x4000000000000000ull
23391 +#define    PAS_STATUS_INT              0x8000000000000000ull
23392 +
23393 +#define PAS_IOB_DMA_RXCH_CFG(i)                (0x1100 + (i)*4)
23394 +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M                0x00000fff
23395 +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S                0
23396 +#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)       (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
23397 +                                                PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
23398 +#define PAS_IOB_DMA_TXCH_CFG(i)                (0x1200 + (i)*4)
23399 +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M                0x00000fff
23400 +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S                0
23401 +#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)       (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
23402 +                                                PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
23403 +#define PAS_IOB_DMA_RXCH_STAT(i)       (0x1300 + (i)*4)
23404 +#define    PAS_IOB_DMA_RXCH_STAT_INTGEN        0x00001000
23405 +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M      0x00000fff
23406 +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S      0
23407 +#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)     (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
23408 +                                                PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
23409 +#define PAS_IOB_DMA_TXCH_STAT(i)       (0x1400 + (i)*4)
23410 +#define    PAS_IOB_DMA_TXCH_STAT_INTGEN        0x00001000
23411 +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M      0x00000fff
23412 +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S      0
23413 +#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)     (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
23414 +                                                PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
23415 +#define PAS_IOB_DMA_RXCH_RESET(i)      (0x1500 + (i)*4)
23416 +#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M       0xffff0000
23417 +#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S       16
23418 +#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)      (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
23419 +                                                PAS_IOB_DMA_RXCH_RESET_PCNT_M)
23420 +#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST      0x00000020
23421 +#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST      0x00000010
23422 +#define    PAS_IOB_DMA_RXCH_RESET_TINTC                0x00000008
23423 +#define    PAS_IOB_DMA_RXCH_RESET_DINTC                0x00000004
23424 +#define    PAS_IOB_DMA_RXCH_RESET_SINTC                0x00000002
23425 +#define    PAS_IOB_DMA_RXCH_RESET_PINTC                0x00000001
23426 +#define PAS_IOB_DMA_TXCH_RESET(i)      (0x1600 + (i)*4)
23427 +#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M       0xffff0000
23428 +#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S       16
23429 +#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)      (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
23430 +                                                PAS_IOB_DMA_TXCH_RESET_PCNT_M)
23431 +#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST      0x00000020
23432 +#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST      0x00000010
23433 +#define    PAS_IOB_DMA_TXCH_RESET_TINTC                0x00000008
23434 +#define    PAS_IOB_DMA_TXCH_RESET_DINTC                0x00000004
23435 +#define    PAS_IOB_DMA_TXCH_RESET_SINTC                0x00000002
23436 +#define    PAS_IOB_DMA_TXCH_RESET_PINTC                0x00000001
23437 +
23438 +#define PAS_IOB_DMA_COM_TIMEOUTCFG             0x1700
23439 +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M   0x00ffffff
23440 +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S   0
23441 +#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)  (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
23442 +                                                PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
23443 +
23444 +/* Transmit descriptor fields */
23445 +#define        XCT_MACTX_T             0x8000000000000000ull
23446 +#define        XCT_MACTX_ST            0x4000000000000000ull
23447 +#define XCT_MACTX_NORES                0x0000000000000000ull
23448 +#define XCT_MACTX_8BRES                0x1000000000000000ull
23449 +#define XCT_MACTX_24BRES       0x2000000000000000ull
23450 +#define XCT_MACTX_40BRES       0x3000000000000000ull
23451 +#define XCT_MACTX_I            0x0800000000000000ull
23452 +#define XCT_MACTX_O            0x0400000000000000ull
23453 +#define XCT_MACTX_E            0x0200000000000000ull
23454 +#define XCT_MACTX_VLAN_M       0x0180000000000000ull
23455 +#define XCT_MACTX_VLAN_NOP     0x0000000000000000ull
23456 +#define XCT_MACTX_VLAN_REMOVE  0x0080000000000000ull
23457 +#define XCT_MACTX_VLAN_INSERT   0x0100000000000000ull
23458 +#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000ull
23459 +#define XCT_MACTX_CRC_M                0x0060000000000000ull
23460 +#define XCT_MACTX_CRC_NOP      0x0000000000000000ull
23461 +#define XCT_MACTX_CRC_INSERT   0x0020000000000000ull
23462 +#define XCT_MACTX_CRC_PAD      0x0040000000000000ull
23463 +#define XCT_MACTX_CRC_REPLACE  0x0060000000000000ull
23464 +#define XCT_MACTX_SS           0x0010000000000000ull
23465 +#define XCT_MACTX_LLEN_M       0x00007fff00000000ull
23466 +#define XCT_MACTX_LLEN_S       32ull
23467 +#define XCT_MACTX_LLEN(x)      ((((long)(x)) << XCT_MACTX_LLEN_S) & \
23468 +                                XCT_MACTX_LLEN_M)
23469 +#define XCT_MACTX_IPH_M                0x00000000f8000000ull
23470 +#define XCT_MACTX_IPH_S                27ull
23471 +#define XCT_MACTX_IPH(x)       ((((long)(x)) << XCT_MACTX_IPH_S) & \
23472 +                                XCT_MACTX_IPH_M)
23473 +#define XCT_MACTX_IPO_M                0x0000000007c00000ull
23474 +#define XCT_MACTX_IPO_S                22ull
23475 +#define XCT_MACTX_IPO(x)       ((((long)(x)) << XCT_MACTX_IPO_S) & \
23476 +                                XCT_MACTX_IPO_M)
23477 +#define XCT_MACTX_CSUM_M       0x0000000000000060ull
23478 +#define XCT_MACTX_CSUM_NOP     0x0000000000000000ull
23479 +#define XCT_MACTX_CSUM_TCP     0x0000000000000040ull
23480 +#define XCT_MACTX_CSUM_UDP     0x0000000000000060ull
23481 +#define XCT_MACTX_V6           0x0000000000000010ull
23482 +#define XCT_MACTX_C            0x0000000000000004ull
23483 +#define XCT_MACTX_AL2          0x0000000000000002ull
23484 +
23485 +#define XCT_PTR_T              0x8000000000000000ull
23486 +#define XCT_PTR_LEN_M          0x7ffff00000000000ull
23487 +#define XCT_PTR_LEN_S          44
23488 +#define XCT_PTR_LEN(x)         ((((long)(x)) << XCT_PTR_LEN_S) & \
23489 +                                XCT_PTR_LEN_M)
23490 +#define XCT_PTR_ADDR_M         0x00000fffffffffffull
23491 +#define XCT_PTR_ADDR_S         0
23492 +#define XCT_PTR_ADDR(x)                ((((long)(x)) << XCT_PTR_ADDR_S) & \
23493 +                                XCT_PTR_ADDR_M)
23494 +
23495 +/* Function descriptor fields */
23496 +#define        XCT_FUN_T               0x8000000000000000ull
23497 +#define        XCT_FUN_ST              0x4000000000000000ull
23498 +#define XCT_FUN_NORES          0x0000000000000000ull
23499 +#define XCT_FUN_8BRES          0x1000000000000000ull
23500 +#define XCT_FUN_24BRES         0x2000000000000000ull
23501 +#define XCT_FUN_40BRES         0x3000000000000000ull
23502 +#define XCT_FUN_I              0x0800000000000000ull
23503 +#define XCT_FUN_O              0x0400000000000000ull
23504 +#define XCT_FUN_E              0x0200000000000000ull
23505 +#define XCT_FUN_FUN_S          54
23506 +#define XCT_FUN_FUN_M          0x01c0000000000000ull
23507 +#define XCT_FUN_FUN(num)       ((((long)(num)) << XCT_FUN_FUN_S) & \
23508 +                               XCT_FUN_FUN_M)
23509 +#define XCT_FUN_CRM_NOP                0x0000000000000000ull
23510 +#define XCT_FUN_CRM_SIG                0x0008000000000000ull
23511 +#define XCT_FUN_CRM_ENC                0x0010000000000000ull
23512 +#define XCT_FUN_CRM_DEC                0x0018000000000000ull
23513 +#define XCT_FUN_CRM_SIG_ENC    0x0020000000000000ull
23514 +#define XCT_FUN_CRM_ENC_SIG    0x0028000000000000ull
23515 +#define XCT_FUN_CRM_SIG_DEC    0x0030000000000000ull
23516 +#define XCT_FUN_CRM_DEC_SIG    0x0038000000000000ull
23517 +#define XCT_FUN_LLEN_M         0x0007ffff00000000ull
23518 +#define XCT_FUN_LLEN_S         32ULL
23519 +#define XCT_FUN_LLEN(x)                ((((long)(x)) << XCT_FUN_LLEN_S) & \
23520 +                                XCT_FUN_LLEN_M)
23521 +#define XCT_FUN_SHL_M          0x00000000f8000000ull
23522 +#define XCT_FUN_SHL_S          27ull
23523 +#define XCT_FUN_SHL(x)         ((((long)(x)) << XCT_FUN_SHL_S) & \
23524 +                                XCT_FUN_SHL_M)
23525 +#define XCT_FUN_CHL_M          0x0000000007c00000ull
23526 +#define XCT_FUN_CHL_S          22ull
23527 +#define XCT_FUN_CHL(x)         ((((long)(x)) << XCT_FUN_CHL_S) & \
23528 +                                XCT_FUN_CHL_M)
23529 +#define XCT_FUN_HSZ_M          0x00000000003c0000ull
23530 +#define XCT_FUN_HSZ_S          18ull
23531 +#define XCT_FUN_HSZ(x)         ((((long)(x)) << XCT_FUN_HSZ_S) & \
23532 +                                XCT_FUN_HSZ_M)
23533 +#define XCT_FUN_ALG_DES                0x0000000000000000ull
23534 +#define XCT_FUN_ALG_3DES       0x0000000000008000ull
23535 +#define XCT_FUN_ALG_AES                0x0000000000010000ull
23536 +#define XCT_FUN_ALG_ARC                0x0000000000018000ull
23537 +#define XCT_FUN_ALG_KASUMI     0x0000000000020000ull
23538 +#define XCT_FUN_BCM_ECB                0x0000000000000000ull
23539 +#define XCT_FUN_BCM_CBC                0x0000000000001000ull
23540 +#define XCT_FUN_BCM_CFB                0x0000000000002000ull
23541 +#define XCT_FUN_BCM_OFB                0x0000000000003000ull
23542 +#define XCT_FUN_BCM_CNT                0x0000000000003800ull
23543 +#define XCT_FUN_BCM_KAS_F8     0x0000000000002800ull
23544 +#define XCT_FUN_BCM_KAS_F9     0x0000000000001800ull
23545 +#define XCT_FUN_BCP_NO_PAD     0x0000000000000000ull
23546 +#define XCT_FUN_BCP_ZRO                0x0000000000000200ull
23547 +#define XCT_FUN_BCP_PL         0x0000000000000400ull
23548 +#define XCT_FUN_BCP_INCR       0x0000000000000600ull
23549 +#define XCT_FUN_SIG_MD5                (0ull << 4)
23550 +#define XCT_FUN_SIG_SHA1       (2ull << 4)
23551 +#define XCT_FUN_SIG_HMAC_MD5   (8ull << 4)
23552 +#define XCT_FUN_SIG_HMAC_SHA1  (10ull << 4)
23553 +#define XCT_FUN_A              0x0000000000000008ull
23554 +#define XCT_FUN_C              0x0000000000000004ull
23555 +#define XCT_FUN_AL2            0x0000000000000002ull
23556 +#define XCT_FUN_SE             0x0000000000000001ull
23557 +
23558 +#define XCT_FUN_SRC_PTR(len, addr)     (XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
23559 +#define XCT_FUN_DST_PTR(len, addr)     (XCT_FUN_SRC_PTR(len, addr) | \
23560 +                                       0x8000000000000000ull)
23561 +
23562 +#define XCT_CTRL_HDR_FUN_NUM_M         0x01c0000000000000ull
23563 +#define XCT_CTRL_HDR_FUN_NUM_S         54
23564 +#define XCT_CTRL_HDR_LEN_M             0x0007ffff00000000ull
23565 +#define XCT_CTRL_HDR_LEN_S             32
23566 +#define XCT_CTRL_HDR_REG_M             0x00000000000000ffull
23567 +#define XCT_CTRL_HDR_REG_S             0
23568 +
23569 +#define XCT_CTRL_HDR(funcN,len,reg)    (0x9400000000000000ull | \
23570 +                       ((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
23571 +                       & XCT_CTRL_HDR_FUN_NUM_M) | \
23572 +                       ((((long)(len)) << \
23573 +                       XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
23574 +                       ((((long)(reg)) << \
23575 +                       XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
23576 +
23577 +/* Function config command options */
23578 +#define        DMA_CALGO_DES                   0x00
23579 +#define        DMA_CALGO_3DES                  0x01
23580 +#define        DMA_CALGO_AES                   0x02
23581 +#define        DMA_CALGO_ARC                   0x03
23582 +
23583 +#define DMA_FN_CIV0                    0x02
23584 +#define DMA_FN_CIV1                    0x03
23585 +#define DMA_FN_HKEY0                   0x0a
23586 +
23587 +#define XCT_PTR_ADDR_LEN(ptr)          ((ptr) & XCT_PTR_ADDR_M), \
23588 +                       (((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
23589 +
23590 +#endif /* PASEMI_FNU_H */