1 diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
2 index 2fb0fdf..9ba9759 100644
3 --- a/drivers/crypto/Kconfig
4 +++ b/drivers/crypto/Kconfig
5 @@ -436,4 +436,21 @@ config CRYPTO_DEV_QCE
6 hardware. To compile this driver as a module, choose M here. The
7 module will be called qcrypto.
9 +config CRYPTO_DEV_SUNXI_SS
10 + tristate "Support for Allwinner Security System cryptographic accelerator"
11 + depends on ARCH_SUNXI
16 + select CRYPTO_BLKCIPHER
18 + Some Allwinner SoC have a crypto accelerator named
19 + Security System. Select this if you want to use it.
20 + The Security System handle AES/DES/3DES ciphers in CBC mode
21 + and SHA1 and MD5 hash algorithms.
23 + To compile this driver as a module, choose M here: the module
24 + will be called sunxi-ss.
27 diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
28 index 3924f93..856545c 100644
29 --- a/drivers/crypto/Makefile
30 +++ b/drivers/crypto/Makefile
31 @@ -25,3 +25,4 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
32 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
33 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
34 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
35 +obj-$(CONFIG_CRYPTO_DEV_SUNXI_SS) += sunxi-ss/
36 diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
38 index 0000000..8bb287d
40 +++ b/drivers/crypto/sunxi-ss/Makefile
42 +obj-$(CONFIG_CRYPTO_DEV_SUNXI_SS) += sunxi-ss.o
43 +sunxi-ss-y += sunxi-ss-core.o sunxi-ss-hash.o sunxi-ss-cipher.o
44 diff --git a/drivers/crypto/sunxi-ss/sunxi-ss-cipher.c b/drivers/crypto/sunxi-ss/sunxi-ss-cipher.c
46 index 0000000..8d0416e
48 +++ b/drivers/crypto/sunxi-ss/sunxi-ss-cipher.c
51 + * sunxi-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
53 + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
55 + * This file add support for AES cipher with 128,192,256 bits
56 + * keysize in CBC mode.
57 + * Add support also for DES and 3DES in CBC mode.
59 + * You could find the datasheet in Documentation/arm/sunxi/README
61 + * This program is free software; you can redistribute it and/or modify
62 + * it under the terms of the GNU General Public License as published by
63 + * the Free Software Foundation; either version 2 of the License, or
64 + * (at your option) any later version.
66 +#include "sunxi-ss.h"
68 +extern struct sunxi_ss_ctx *ss;
70 +static int sunxi_ss_cipher(struct ablkcipher_request *areq, u32 mode)
72 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
73 + struct sunxi_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
74 + const char *cipher_type;
76 + if (areq->nbytes == 0)
79 + if (areq->info == NULL) {
80 + dev_err(ss->dev, "ERROR: Empty IV\n");
84 + if (areq->src == NULL || areq->dst == NULL) {
85 + dev_err(ss->dev, "ERROR: Some SGs are NULL\n");
89 + cipher_type = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
91 + if (strcmp("cbc(aes)", cipher_type) == 0) {
92 + mode |= SS_OP_AES | SS_CBC | SS_ENABLED | op->keymode;
93 + return sunxi_ss_aes_poll(areq, mode);
96 + if (strcmp("cbc(des)", cipher_type) == 0) {
97 + mode |= SS_OP_DES | SS_CBC | SS_ENABLED | op->keymode;
98 + return sunxi_ss_des_poll(areq, mode);
101 + if (strcmp("cbc(des3_ede)", cipher_type) == 0) {
102 + mode |= SS_OP_3DES | SS_CBC | SS_ENABLED | op->keymode;
103 + return sunxi_ss_des_poll(areq, mode);
106 + dev_err(ss->dev, "ERROR: Cipher %s not handled\n", cipher_type);
110 +int sunxi_ss_cipher_encrypt(struct ablkcipher_request *areq)
112 + return sunxi_ss_cipher(areq, SS_ENCRYPTION);
115 +int sunxi_ss_cipher_decrypt(struct ablkcipher_request *areq)
117 + return sunxi_ss_cipher(areq, SS_DECRYPTION);
120 +int sunxi_ss_cipher_init(struct crypto_tfm *tfm)
122 + struct sunxi_tfm_ctx *op = crypto_tfm_ctx(tfm);
124 + memset(op, 0, sizeof(struct sunxi_tfm_ctx));
129 + * Optimized function for the case where we have only one SG,
130 + * so we can use kmap_atomic
132 +static int sunxi_ss_aes_poll_atomic(struct ablkcipher_request *areq)
135 + struct scatterlist *in_sg = areq->src;
136 + struct scatterlist *out_sg = areq->dst;
139 + unsigned int ileft = areq->nbytes;
140 + unsigned int oleft = areq->nbytes;
148 + src_addr = kmap_atomic(sg_page(in_sg)) + in_sg->offset;
149 + if (src_addr == NULL) {
150 + dev_err(ss->dev, "kmap_atomic error for src SG\n");
151 + writel(0, ss->base + SS_CTL);
152 + mutex_unlock(&ss->lock);
156 + dst_addr = kmap_atomic(sg_page(out_sg)) + out_sg->offset;
157 + if (dst_addr == NULL) {
158 + dev_err(ss->dev, "kmap_atomic error for dst SG\n");
159 + writel(0, ss->base + SS_CTL);
160 + kunmap_atomic(src_addr);
161 + mutex_unlock(&ss->lock);
165 + src32 = (u32 *)src_addr;
166 + dst32 = (u32 *)dst_addr;
167 + ileft = areq->nbytes / 4;
168 + oleft = areq->nbytes / 4;
171 + if (ileft > 0 && rx_cnt > 0) {
172 + todo = min(rx_cnt, ileft);
175 + writel_relaxed(*src32++,
179 + } while (todo > 0);
182 + todo = min(tx_cnt, oleft);
185 + *dst32++ = readl_relaxed(ss->base +
188 + } while (todo > 0);
190 + spaces = readl_relaxed(ss->base + SS_FCSR);
191 + rx_cnt = SS_RXFIFO_SPACES(spaces);
192 + tx_cnt = SS_TXFIFO_SPACES(spaces);
193 + } while (oleft > 0);
194 + writel(0, ss->base + SS_CTL);
195 + kunmap_atomic(src_addr);
196 + kunmap_atomic(dst_addr);
197 + mutex_unlock(&ss->lock);
201 +int sunxi_ss_aes_poll(struct ablkcipher_request *areq, u32 mode)
204 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
205 + struct sunxi_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
206 + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
207 + /* when activating SS, the default FIFO space is 32 */
212 + struct scatterlist *in_sg = areq->src;
213 + struct scatterlist *out_sg = areq->dst;
216 + unsigned int ileft = areq->nbytes;
217 + unsigned int oleft = areq->nbytes;
218 + unsigned int sgileft = areq->src->length;
219 + unsigned int sgoleft = areq->dst->length;
224 + mutex_lock(&ss->lock);
226 + for (i = 0; i < op->keylen; i += 4)
227 + writel(*(op->key + i/4), ss->base + SS_KEY0 + i);
229 + if (areq->info != NULL) {
230 + for (i = 0; i < 4 && i < ivsize / 4; i++) {
231 + v = *(u32 *)(areq->info + i * 4);
232 + writel(v, ss->base + SS_IV0 + i * 4);
235 + writel(mode, ss->base + SS_CTL);
237 + /* If we have only one SG, we can use kmap_atomic */
238 + if (sg_next(in_sg) == NULL && sg_next(out_sg) == NULL)
239 + return sunxi_ss_aes_poll_atomic(areq);
242 + * If we have more than one SG, we cannot use kmap_atomic since
243 + * we hold the mapping too long
245 + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
246 + if (src_addr == NULL) {
247 + dev_err(ss->dev, "KMAP error for src SG\n");
248 + mutex_unlock(&ss->lock);
251 + dst_addr = kmap(sg_page(out_sg)) + out_sg->offset;
252 + if (dst_addr == NULL) {
253 + kunmap(sg_page(in_sg));
254 + dev_err(ss->dev, "KMAP error for dst SG\n");
255 + mutex_unlock(&ss->lock);
258 + src32 = (u32 *)src_addr;
259 + dst32 = (u32 *)dst_addr;
260 + ileft = areq->nbytes / 4;
261 + oleft = areq->nbytes / 4;
262 + sgileft = in_sg->length / 4;
263 + sgoleft = out_sg->length / 4;
265 + spaces = readl_relaxed(ss->base + SS_FCSR);
266 + rx_cnt = SS_RXFIFO_SPACES(spaces);
267 + tx_cnt = SS_TXFIFO_SPACES(spaces);
268 + todo = min3(rx_cnt, ileft, sgileft);
274 + writel_relaxed(*src32++, ss->base + SS_RXFIFO);
277 + if (in_sg != NULL && sgileft == 0 && ileft > 0) {
278 + kunmap(sg_page(in_sg));
279 + in_sg = sg_next(in_sg);
280 + while (in_sg != NULL && in_sg->length == 0)
281 + in_sg = sg_next(in_sg);
282 + if (in_sg != NULL && ileft > 0) {
283 + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
284 + if (src_addr == NULL) {
285 + dev_err(ss->dev, "ERROR: KMAP for src SG\n");
286 + mutex_unlock(&ss->lock);
290 + sgileft = in_sg->length / 4;
293 + /* do not test oleft since when oleft == 0 we have finished */
294 + todo = min3(tx_cnt, oleft, sgoleft);
300 + *dst32++ = readl_relaxed(ss->base + SS_TXFIFO);
303 + if (out_sg != NULL && sgoleft == 0 && oleft >= 0) {
304 + kunmap(sg_page(out_sg));
305 + out_sg = sg_next(out_sg);
306 + while (out_sg != NULL && out_sg->length == 0)
307 + out_sg = sg_next(out_sg);
308 + if (out_sg != NULL && oleft > 0) {
309 + dst_addr = kmap(sg_page(out_sg)) +
311 + if (dst_addr == NULL) {
312 + dev_err(ss->dev, "KMAP error\n");
313 + mutex_unlock(&ss->lock);
317 + sgoleft = out_sg->length / 4;
320 + } while (oleft > 0);
322 + writel_relaxed(0, ss->base + SS_CTL);
323 + mutex_unlock(&ss->lock);
328 + * Pure CPU way of doing DES/3DES with SS
329 + * Since DES and 3DES SGs could be smaller than 4 bytes, I use sg_copy_to_buffer
330 + * for "linearize" them.
331 + * The problem with that is that I alloc (2 x areq->nbytes) for buf_in/buf_out
332 + * TODO: change this system, I need to support other mode than CBC where len
333 + * is not a multiple of 4 and the hack of linearize use too much memory
334 + * SGsrc -> buf_in -> SS -> buf_out -> SGdst
336 +int sunxi_ss_des_poll(struct ablkcipher_request *areq, u32 mode)
339 + size_t nb_in_sg_tx, nb_in_sg_rx;
341 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
342 + struct sunxi_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
343 + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
349 + struct scatterlist *in_sg = areq->src;
350 + struct scatterlist *out_sg = areq->dst;
353 + * if we have only SGs with size multiple of 4,
354 + * we can use the SS AES function
356 + while (in_sg != NULL && no_chunk == 1) {
357 + if ((in_sg->length % 4) != 0)
359 + in_sg = sg_next(in_sg);
361 + while (out_sg != NULL && no_chunk == 1) {
362 + if ((out_sg->length % 4) != 0)
364 + out_sg = sg_next(out_sg);
368 + return sunxi_ss_aes_poll(areq, mode);
371 + out_sg = areq->dst;
373 + nb_in_sg_rx = sg_nents(in_sg);
374 + nb_in_sg_tx = sg_nents(out_sg);
377 + * buf_in and buf_out are allocated only one time
378 + * then we keep the buffer until driver end
379 + * the allocation can only grow more
380 + * we do not reduce it for simplification
382 + mutex_lock(&ss->bufin_lock);
383 + if (ss->buf_in == NULL) {
384 + ss->buf_in = kmalloc(areq->nbytes, GFP_KERNEL);
385 + ss->buf_in_size = areq->nbytes;
387 + if (areq->nbytes > ss->buf_in_size) {
389 + ss->buf_in = kmalloc(areq->nbytes, GFP_KERNEL);
390 + ss->buf_in_size = areq->nbytes;
393 + if (ss->buf_in == NULL) {
394 + ss->buf_in_size = 0;
395 + mutex_unlock(&ss->bufin_lock);
396 + dev_err(ss->dev, "Unable to allocate pages.\n");
399 + mutex_lock(&ss->bufout_lock);
400 + if (ss->buf_out == NULL) {
401 + ss->buf_out = kmalloc(areq->nbytes, GFP_KERNEL);
402 + if (ss->buf_out == NULL) {
403 + ss->buf_out_size = 0;
404 + mutex_unlock(&ss->bufin_lock);
405 + mutex_unlock(&ss->bufout_lock);
406 + dev_err(ss->dev, "Unable to allocate pages.\n");
409 + ss->buf_out_size = areq->nbytes;
411 + if (areq->nbytes > ss->buf_out_size) {
412 + kfree(ss->buf_out);
413 + ss->buf_out = kmalloc(areq->nbytes, GFP_KERNEL);
414 + if (ss->buf_out == NULL) {
415 + ss->buf_out_size = 0;
416 + mutex_unlock(&ss->bufin_lock);
417 + mutex_unlock(&ss->bufout_lock);
418 + dev_err(ss->dev, "Unable to allocate pages.\n");
421 + ss->buf_out_size = areq->nbytes;
425 + sg_copy_to_buffer(areq->src, nb_in_sg_rx, ss->buf_in, areq->nbytes);
429 + mutex_lock(&ss->lock);
431 + for (i = 0; i < op->keylen; i += 4)
432 + writel(*(op->key + i/4), ss->base + SS_KEY0 + i);
433 + if (areq->info != NULL) {
434 + for (i = 0; i < 4 && i < ivsize / 4; i++) {
435 + v = *(u32 *)(areq->info + i * 4);
436 + writel(v, ss->base + SS_IV0 + i * 4);
439 + writel(mode, ss->base + SS_CTL);
442 + if (rx_cnt == 0 || tx_cnt == 0) {
443 + spaces = readl(ss->base + SS_FCSR);
444 + rx_cnt = SS_RXFIFO_SPACES(spaces);
445 + tx_cnt = SS_TXFIFO_SPACES(spaces);
447 + if (rx_cnt > 0 && ir < areq->nbytes) {
449 + value = *(u32 *)(ss->buf_in + ir);
450 + writel(value, ss->base + SS_RXFIFO);
453 + } while (rx_cnt > 0 && ir < areq->nbytes);
455 + if (tx_cnt > 0 && it < areq->nbytes) {
457 + value = readl(ss->base + SS_TXFIFO);
458 + *(u32 *)(ss->buf_out + it) = value;
461 + } while (tx_cnt > 0 && it < areq->nbytes);
463 + if (ir == areq->nbytes) {
464 + mutex_unlock(&ss->bufin_lock);
467 + } while (it < areq->nbytes);
469 + writel(0, ss->base + SS_CTL);
470 + mutex_unlock(&ss->lock);
473 + * a simple optimization, since we dont need the hardware for this copy
474 + * we release the lock and do the copy. With that we gain 5/10% perf
476 + sg_copy_from_buffer(areq->dst, nb_in_sg_tx, ss->buf_out, areq->nbytes);
478 + mutex_unlock(&ss->bufout_lock);
482 +/* check and set the AES key, prepare the mode to be used */
483 +int sunxi_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
484 + unsigned int keylen)
486 + struct sunxi_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
490 + op->keymode = SS_AES_128BITS;
493 + op->keymode = SS_AES_192BITS;
496 + op->keymode = SS_AES_256BITS;
499 + dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
500 + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
503 + op->keylen = keylen;
504 + memcpy(op->key, key, keylen);
508 +/* check and set the DES key, prepare the mode to be used */
509 +int sunxi_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
510 + unsigned int keylen)
512 + struct sunxi_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
514 + if (keylen != DES_KEY_SIZE) {
515 + dev_err(ss->dev, "Invalid keylen %u\n", keylen);
516 + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
519 + op->keylen = keylen;
520 + memcpy(op->key, key, keylen);
524 +/* check and set the 3DES key, prepare the mode to be used */
525 +int sunxi_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
526 + unsigned int keylen)
528 + struct sunxi_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
530 + if (keylen != 3 * DES_KEY_SIZE) {
531 + dev_err(ss->dev, "Invalid keylen %u\n", keylen);
532 + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
535 + op->keylen = keylen;
536 + memcpy(op->key, key, keylen);
539 diff --git a/drivers/crypto/sunxi-ss/sunxi-ss-core.c b/drivers/crypto/sunxi-ss/sunxi-ss-core.c
541 index 0000000..e66d7e2
543 +++ b/drivers/crypto/sunxi-ss/sunxi-ss-core.c
546 + * sunxi-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC
548 + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
550 + * Core file which registers crypto algorithms supported by the SS.
552 + * You could find a link for the datasheet in Documentation/arm/sunxi/README
554 + * This program is free software; you can redistribute it and/or modify
555 + * it under the terms of the GNU General Public License as published by
556 + * the Free Software Foundation; either version 2 of the License, or
557 + * (at your option) any later version.
559 +#include <linux/clk.h>
560 +#include <linux/crypto.h>
561 +#include <linux/io.h>
562 +#include <linux/module.h>
563 +#include <linux/of.h>
564 +#include <linux/platform_device.h>
565 +#include <crypto/scatterwalk.h>
566 +#include <linux/scatterlist.h>
567 +#include <linux/interrupt.h>
568 +#include <linux/delay.h>
570 +#include "sunxi-ss.h"
572 +struct sunxi_ss_ctx *ss;
575 + * General notes for whole driver:
577 + * After each request the device must be disabled with a write of 0 in SS_CTL
579 + * For performance reason, we use writel_relaxed/read_relaxed for all
580 + * operations on RX and TX FIFO and also SS_FCSR.
581 + * Excepts for the last write on TX FIFO.
582 + * For all other registers, we use writel/readl.
583 + * See http://permalink.gmane.org/gmane.linux.ports.arm.kernel/117644
584 + * and http://permalink.gmane.org/gmane.linux.ports.arm.kernel/117640
587 +static struct ahash_alg sunxi_md5_alg = {
588 + .init = sunxi_hash_init,
589 + .update = sunxi_hash_update,
590 + .final = sunxi_hash_final,
591 + .finup = sunxi_hash_finup,
592 + .digest = sunxi_hash_digest,
594 + .digestsize = MD5_DIGEST_SIZE,
597 + .cra_driver_name = "md5-sunxi-ss",
598 + .cra_priority = 300,
599 + .cra_alignmask = 3,
600 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
601 + .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
602 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
603 + .cra_module = THIS_MODULE,
604 + .cra_type = &crypto_ahash_type,
605 + .cra_init = sunxi_hash_crainit
610 +static struct ahash_alg sunxi_sha1_alg = {
611 + .init = sunxi_hash_init,
612 + .update = sunxi_hash_update,
613 + .final = sunxi_hash_final,
614 + .finup = sunxi_hash_finup,
615 + .digest = sunxi_hash_digest,
617 + .digestsize = SHA1_DIGEST_SIZE,
619 + .cra_name = "sha1",
620 + .cra_driver_name = "sha1-sunxi-ss",
621 + .cra_priority = 300,
622 + .cra_alignmask = 3,
623 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
624 + .cra_blocksize = SHA1_BLOCK_SIZE,
625 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
626 + .cra_module = THIS_MODULE,
627 + .cra_type = &crypto_ahash_type,
628 + .cra_init = sunxi_hash_crainit
633 +static struct crypto_alg sunxi_cipher_algs[] = {
635 + .cra_name = "cbc(aes)",
636 + .cra_driver_name = "cbc-aes-sunxi-ss",
637 + .cra_priority = 300,
638 + .cra_blocksize = AES_BLOCK_SIZE,
639 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
640 + .cra_ctxsize = sizeof(struct sunxi_tfm_ctx),
641 + .cra_module = THIS_MODULE,
642 + .cra_alignmask = 3,
643 + .cra_type = &crypto_ablkcipher_type,
644 + .cra_init = sunxi_ss_cipher_init,
647 + .min_keysize = AES_MIN_KEY_SIZE,
648 + .max_keysize = AES_MAX_KEY_SIZE,
649 + .ivsize = AES_BLOCK_SIZE,
650 + .setkey = sunxi_ss_aes_setkey,
651 + .encrypt = sunxi_ss_cipher_encrypt,
652 + .decrypt = sunxi_ss_cipher_decrypt,
656 + .cra_name = "cbc(des)",
657 + .cra_driver_name = "cbc-des-sunxi-ss",
658 + .cra_priority = 300,
659 + .cra_blocksize = DES_BLOCK_SIZE,
660 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
661 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
662 + .cra_module = THIS_MODULE,
663 + .cra_alignmask = 3,
664 + .cra_type = &crypto_ablkcipher_type,
665 + .cra_init = sunxi_ss_cipher_init,
666 + .cra_u.ablkcipher = {
667 + .min_keysize = DES_KEY_SIZE,
668 + .max_keysize = DES_KEY_SIZE,
669 + .ivsize = DES_BLOCK_SIZE,
670 + .setkey = sunxi_ss_des_setkey,
671 + .encrypt = sunxi_ss_cipher_encrypt,
672 + .decrypt = sunxi_ss_cipher_decrypt,
675 + .cra_name = "cbc(des3_ede)",
676 + .cra_driver_name = "cbc-des3-sunxi-ss",
677 + .cra_priority = 300,
678 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
679 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
680 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
681 + .cra_module = THIS_MODULE,
682 + .cra_alignmask = 3,
683 + .cra_type = &crypto_ablkcipher_type,
684 + .cra_init = sunxi_ss_cipher_init,
685 + .cra_u.ablkcipher = {
686 + .min_keysize = DES3_EDE_KEY_SIZE,
687 + .max_keysize = DES3_EDE_KEY_SIZE,
688 + .ivsize = DES3_EDE_BLOCK_SIZE,
689 + .setkey = sunxi_ss_des3_setkey,
690 + .encrypt = sunxi_ss_cipher_encrypt,
691 + .decrypt = sunxi_ss_cipher_decrypt,
696 +static int sunxi_ss_probe(struct platform_device *pdev)
698 + struct resource *res;
702 + const unsigned long cr_ahb = 24 * 1000 * 1000;
703 + const unsigned long cr_mod = 150 * 1000 * 1000;
705 + if (!pdev->dev.of_node)
708 + ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
712 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
713 + ss->base = devm_ioremap_resource(&pdev->dev, res);
714 + if (IS_ERR(ss->base)) {
715 + dev_err(&pdev->dev, "Cannot request MMIO\n");
716 + return PTR_ERR(ss->base);
719 + ss->ssclk = devm_clk_get(&pdev->dev, "mod");
720 + if (IS_ERR(ss->ssclk)) {
721 + err = PTR_ERR(ss->ssclk);
722 + dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err);
725 + dev_dbg(&pdev->dev, "clock ss acquired\n");
727 + ss->busclk = devm_clk_get(&pdev->dev, "ahb");
728 + if (IS_ERR(ss->busclk)) {
729 + err = PTR_ERR(ss->busclk);
730 + dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err);
733 + dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
735 + /* Enable both clocks */
736 + err = clk_prepare_enable(ss->busclk);
738 + dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
741 + err = clk_prepare_enable(ss->ssclk);
743 + dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
744 + clk_disable_unprepare(ss->busclk);
749 + * Check that clock have the correct rates gived in the datasheet
750 + * Try to set the clock to the maximum allowed
752 + err = clk_set_rate(ss->ssclk, cr_mod);
754 + dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
755 + clk_disable_unprepare(ss->ssclk);
756 + clk_disable_unprepare(ss->busclk);
760 + cr = clk_get_rate(ss->busclk);
762 + dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
763 + cr, cr / 1000000, cr_ahb);
765 + dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
766 + cr, cr / 1000000, cr_ahb);
768 + cr = clk_get_rate(ss->ssclk);
771 + dev_info(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
772 + cr, cr / 1000000, cr_mod);
774 + dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
775 + cr, cr / 1000000, cr_mod);
777 + dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
778 + cr, cr / 1000000, cr_mod);
781 + * Datasheet named it "Die Bonding ID"
782 + * I expect to be a sort of Security System Revision number.
783 + * Since the A80 seems to have an other version of SS
784 + * this info could be useful
786 + writel(SS_ENABLED, ss->base + SS_CTL);
787 + v = readl(ss->base + SS_CTL);
790 + dev_info(&pdev->dev, "Die ID %d\n", v);
791 + writel(0, ss->base + SS_CTL);
793 + ss->dev = &pdev->dev;
795 + mutex_init(&ss->lock);
796 + mutex_init(&ss->bufin_lock);
797 + mutex_init(&ss->bufout_lock);
799 + err = crypto_register_ahash(&sunxi_md5_alg);
802 + err = crypto_register_ahash(&sunxi_sha1_alg);
805 + err = crypto_register_algs(sunxi_cipher_algs,
806 + ARRAY_SIZE(sunxi_cipher_algs));
808 + goto error_ciphers;
812 + crypto_unregister_ahash(&sunxi_sha1_alg);
814 + crypto_unregister_ahash(&sunxi_md5_alg);
816 + clk_disable_unprepare(ss->ssclk);
817 + clk_disable_unprepare(ss->busclk);
821 +static int __exit sunxi_ss_remove(struct platform_device *pdev)
823 + if (!pdev->dev.of_node)
826 + crypto_unregister_ahash(&sunxi_md5_alg);
827 + crypto_unregister_ahash(&sunxi_sha1_alg);
828 + crypto_unregister_algs(sunxi_cipher_algs,
829 + ARRAY_SIZE(sunxi_cipher_algs));
831 + if (ss->buf_in != NULL)
833 + if (ss->buf_out != NULL)
834 + kfree(ss->buf_out);
836 + writel(0, ss->base + SS_CTL);
837 + clk_disable_unprepare(ss->busclk);
838 + clk_disable_unprepare(ss->ssclk);
842 +static const struct of_device_id a20ss_crypto_of_match_table[] = {
843 + { .compatible = "allwinner,sun7i-a20-crypto" },
846 +MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
848 +static struct platform_driver sunxi_ss_driver = {
849 + .probe = sunxi_ss_probe,
850 + .remove = __exit_p(sunxi_ss_remove),
852 + .owner = THIS_MODULE,
853 + .name = "sunxi-ss",
854 + .of_match_table = a20ss_crypto_of_match_table,
858 +module_platform_driver(sunxi_ss_driver);
860 +MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
861 +MODULE_LICENSE("GPL");
862 +MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
863 diff --git a/drivers/crypto/sunxi-ss/sunxi-ss-hash.c b/drivers/crypto/sunxi-ss/sunxi-ss-hash.c
865 index 0000000..ec8758f
867 +++ b/drivers/crypto/sunxi-ss/sunxi-ss-hash.c
870 + * sunxi-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
872 + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
874 + * This file add support for MD5 and SHA1.
876 + * You could find the datasheet in Documentation/arm/sunxi/README
878 + * This program is free software; you can redistribute it and/or modify
879 + * it under the terms of the GNU General Public License as published by
880 + * the Free Software Foundation; either version 2 of the License, or
881 + * (at your option) any later version.
883 +#include "sunxi-ss.h"
885 +/* This is a totaly arbitrary value */
886 +#define SS_TIMEOUT 100
888 +extern struct sunxi_ss_ctx *ss;
890 +int sunxi_hash_crainit(struct crypto_tfm *tfm)
892 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
893 + sizeof(struct sunxi_req_ctx));
897 +/* sunxi_hash_init: initialize request context */
898 +int sunxi_hash_init(struct ahash_request *areq)
900 + const char *hash_type;
901 + struct sunxi_req_ctx *op = ahash_request_ctx(areq);
903 + memset(op, 0, sizeof(struct sunxi_req_ctx));
905 + hash_type = crypto_tfm_alg_name(areq->base.tfm);
907 + if (strcmp(hash_type, "sha1") == 0)
908 + op->mode = SS_OP_SHA1;
909 + if (strcmp(hash_type, "md5") == 0)
910 + op->mode = SS_OP_MD5;
919 +inline void ss_writer(const u32 v)
923 + writel(v, ss->base + SS_RXFIFO);
925 + while (rx_cnt == 0) {
926 + spaces = readl_relaxed(ss->base + SS_FCSR);
927 + rx_cnt = SS_RXFIFO_SPACES(spaces);
931 +inline void ss_writer_relaxed(const u32 v)
935 + writel_relaxed(v, ss->base + SS_RXFIFO);
937 + while (rx_cnt == 0) {
938 + spaces = readl_relaxed(ss->base + SS_FCSR);
939 + rx_cnt = SS_RXFIFO_SPACES(spaces);
944 + * sunxi_hash_update: update hash engine
946 + * Could be used for both SHA1 and MD5
947 + * Write data by step of 32bits and put then in the SS.
949 + * Since we cannot leave partial data and hash state in the engine,
950 + * we need to get the hash state at the end of this function.
951 + * After some work, I have found that we can get the hash state every 64o
953 + * So the first work is to get the number of bytes to write to SS modulo 64
954 + * The extra bytes will go to two different destination:
955 + * op->wait for full 32bits word
956 + * op->wb (waiting bytes) for partial 32 bits word
957 + * So we can have up to (64/4)-1 op->wait words and 0/1/2/3 bytes in wb
959 + * So at the begin of update()
960 + * if op->nwait * 4 + areq->nbytes < 64
961 + * => all data writed to wait buffers and end=0
962 + * if not write all nwait to the device and position end to complete to 64o
965 + * update1 60o => nwait=15
966 + * update2 60o => need one more word to have 64o
968 + * so write all data in op->wait and one word of SGs
969 + * write remaining data in op->wait
970 + * final state op->nwait=14
972 +int sunxi_hash_update(struct ahash_request *areq)
975 + unsigned int i = 0;
977 + * i is the total bytes read from SGs, to be compared to areq->nbytes
978 + * i is important because we cannot rely on SG length since the sum of
979 + * SG->length could be greater than areq->nbytes
982 + struct sunxi_req_ctx *op = ahash_request_ctx(areq);
983 + struct scatterlist *in_sg;
984 + unsigned int in_i = 0; /* advancement in the current SG */
987 + * end is the position when we need to stop writing to the device,
988 + * to be compared to i
993 + dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x bw=%u ww=%u",
994 + __func__, crypto_tfm_alg_name(areq->base.tfm),
995 + op->byte_count, areq->nbytes, op->mode,
996 + op->nbw, op->nwait);
998 + if (areq->nbytes == 0)
1001 + end = ((areq->nbytes + op->nwait * 4 + op->nbw) / 64) * 64
1002 + - op->nbw - op->nwait * 4;
1004 + if (end > areq->nbytes || areq->nbytes - end > 63) {
1005 + dev_err(ss->dev, "ERROR: Bound error %llu %u\n",
1006 + end, areq->nbytes);
1010 + if (op->nwait > 0 && end > 0) {
1011 + /* a precedent update was done */
1012 + for (i = 0; i < op->nwait; i++) {
1013 + ss_writer(op->wait[i]);
1014 + op->byte_count += 4;
1019 + mutex_lock(&ss->lock);
1021 + * if some data have been processed before,
1022 + * we need to restore the partial hash state
1024 + if (op->byte_count > 0) {
1025 + ivmode = SS_IV_ARBITRARY;
1026 + for (i = 0; i < 5; i++)
1027 + writel(op->hash[i], ss->base + SS_IV0 + i * 4);
1029 + /* Enable the device */
1030 + writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
1035 + in_sg = areq->src;
1036 + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
1037 + if (src_addr == NULL) {
1038 + mutex_unlock(&ss->lock);
1039 + dev_err(ss->dev, "ERROR: Cannot kmap source buffer\n");
1044 + * step 1, if some bytes remains from last SG,
1045 + * try to complete them to 4 and send that word
1047 + if (op->nbw > 0) {
1048 + while (op->nbw < 4 && i < areq->nbytes &&
1049 + in_i < in_sg->length) {
1050 + op->wb |= (*(u8 *)(src_addr + in_i))
1052 + dev_dbg(ss->dev, "%s Complete w=%d wb=%x\n",
1053 + __func__, op->nbw, op->wb);
1058 + if (op->nbw == 4) {
1060 + ss_writer(op->wb);
1061 + op->byte_count += 4;
1063 + op->wait[op->nwait] = op->wb;
1065 + dev_dbg(ss->dev, "%s Keep %u bytes after %llu\n",
1066 + __func__, op->nwait, end);
1072 + /* step 2, main loop, read data 4bytes at a time */
1073 + while (i < areq->nbytes && in_i < in_sg->length) {
1074 + /* how many bytes we can read, (we need 4) */
1075 + in_r = min(in_sg->length - in_i, areq->nbytes - i);
1077 + /* Not enough data to write to the device */
1079 + while (in_r > 0) {
1080 + op->wb |= (*(u8 *)(src_addr + in_i))
1082 + dev_dbg(ss->dev, "%s ending bw=%d wb=%x\n",
1083 + __func__, op->nbw, op->wb);
1091 + v = *(u32 *)(src_addr + in_i);
1093 + /* last write must be done without relaxed */
1097 + ss_writer_relaxed(v);
1099 + op->byte_count += 4;
1102 + op->wait[op->nwait] = v;
1106 + dev_dbg(ss->dev, "%s Keep word ww=%u after %llu\n",
1107 + __func__, op->nwait, end);
1108 + if (op->nwait > 15) {
1109 + dev_err(ss->dev, "FATAL: Cannot enqueue more, bug?\n");
1110 + writel(0, ss->base + SS_CTL);
1111 + mutex_unlock(&ss->lock);
1117 + /* Nothing more to read in this SG */
1118 + if (in_i == in_sg->length) {
1119 + kunmap(sg_page(in_sg));
1121 + in_sg = sg_next(in_sg);
1122 + } while (in_sg != NULL && in_sg->length == 0);
1124 + if (in_sg != NULL) {
1125 + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
1126 + if (src_addr == NULL) {
1127 + mutex_unlock(&ss->lock);
1128 + dev_err(ss->dev, "ERROR: Cannot kmap source buffer\n");
1133 + } while (in_sg != NULL && i < areq->nbytes);
1135 + /* ask the device to finish the hashing */
1136 + writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
1139 + v = readl(ss->base + SS_CTL);
1141 + } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
1142 + if (i >= SS_TIMEOUT) {
1143 + dev_err(ss->dev, "ERROR: %s hash end timeout after %d loop, CTL=%x\n",
1145 + writel(0, ss->base + SS_CTL);
1146 + mutex_unlock(&ss->lock);
1150 + /* get the partial hash */
1151 + if (op->mode == SS_OP_SHA1) {
1152 + for (i = 0; i < 5; i++)
1153 + op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
1155 + for (i = 0; i < 4; i++)
1156 + op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
1159 + writel(0, ss->base + SS_CTL);
1160 + mutex_unlock(&ss->lock);
1165 + * sunxi_hash_final: finalize hashing operation
1167 + * If we have some remaining bytes, we write them.
1168 + * Then ask the SS for finalizing the hashing operation
1170 +int sunxi_hash_final(struct ahash_request *areq)
1172 + u32 v, ivmode = 0;
1175 + unsigned int index, padlen;
1177 + struct sunxi_req_ctx *op = ahash_request_ctx(areq);
1179 + dev_dbg(ss->dev, "%s byte=%llu len=%u mode=%x bw=%u %x h=%x ww=%u",
1180 + __func__, op->byte_count, areq->nbytes, op->mode,
1181 + op->nbw, op->wb, op->hash[0], op->nwait);
1183 + mutex_lock(&ss->lock);
1187 + * if we have already writed something,
1188 + * restore the partial hash state
1190 + if (op->byte_count > 0) {
1191 + ivmode = SS_IV_ARBITRARY;
1192 + for (i = 0; i < 5; i++)
1193 + writel(op->hash[i], ss->base + SS_IV0 + i * 4);
1195 + writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
1197 + /* write the remaining words of the wait buffer */
1198 + if (op->nwait > 0) {
1199 + for (i = 0; i < op->nwait; i++) {
1202 + op->byte_count += 4;
1203 + dev_dbg(ss->dev, "%s write %llu i=%u %x\n",
1204 + __func__, op->byte_count, i, v);
1209 + /* write the remaining bytes of the nbw buffer */
1210 + if (op->nbw > 0) {
1211 + op->wb |= ((1 << 7) << (op->nbw * 8));
1212 + ss_writer(op->wb);
1214 + ss_writer((1 << 7));
1218 + * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
1219 + * I take the operations from other md5/sha1 implementations
1222 + /* we have already send 4 more byte of which nbw data */
1223 + if (op->mode == SS_OP_MD5) {
1224 + index = (op->byte_count + 4) & 0x3f;
1225 + op->byte_count += op->nbw;
1227 + zeros = (120 - index) / 4;
1229 + zeros = (56 - index) / 4;
1231 + op->byte_count += op->nbw;
1232 + index = op->byte_count & 0x3f;
1233 + padlen = (index < 56) ? (56 - index) : ((64+56) - index);
1234 + zeros = (padlen - 1) / 4;
1236 + for (i = 0; i < zeros; i++)
1239 + /* write the length of data */
1240 + if (op->mode == SS_OP_SHA1) {
1241 + bits = cpu_to_be64(op->byte_count << 3);
1242 + ss_writer(bits & 0xffffffff);
1243 + ss_writer((bits >> 32) & 0xffffffff);
1245 + ss_writer((op->byte_count << 3) & 0xffffffff);
1246 + ss_writer((op->byte_count >> 29) & 0xffffffff);
1249 + /* Tell the SS to stop the hashing */
1250 + writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
1253 + * Wait for SS to finish the hash.
1254 + * The timeout could happend only in case of bad overcloking
1259 + v = readl(ss->base + SS_CTL);
1261 + } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
1262 + if (i >= SS_TIMEOUT) {
1263 + dev_err(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
1264 + i, SS_TIMEOUT, v, areq->nbytes);
1265 + writel(0, ss->base + SS_CTL);
1266 + mutex_unlock(&ss->lock);
1270 + /* Get the hash from the device */
1271 + if (op->mode == SS_OP_SHA1) {
1272 + for (i = 0; i < 5; i++) {
1273 + v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
1274 + memcpy(areq->result + i * 4, &v, 4);
1277 + for (i = 0; i < 4; i++) {
1278 + v = readl(ss->base + SS_MD0 + i * 4);
1279 + memcpy(areq->result + i * 4, &v, 4);
1282 + writel(0, ss->base + SS_CTL);
1283 + mutex_unlock(&ss->lock);
1287 +/* sunxi_hash_finup: finalize hashing operation after an update */
1288 +int sunxi_hash_finup(struct ahash_request *areq)
1292 + err = sunxi_hash_update(areq);
1296 + return sunxi_hash_final(areq);
1299 +/* combo of init/update/final functions */
1300 +int sunxi_hash_digest(struct ahash_request *areq)
1304 + err = sunxi_hash_init(areq);
1308 + err = sunxi_hash_update(areq);
1312 + return sunxi_hash_final(areq);
1314 diff --git a/drivers/crypto/sunxi-ss/sunxi-ss.h b/drivers/crypto/sunxi-ss/sunxi-ss.h
1315 new file mode 100644
1316 index 0000000..331e75b
1318 +++ b/drivers/crypto/sunxi-ss/sunxi-ss.h
1321 + * sunxi-ss.c - hardware cryptographic accelerator for Allwinner A20 SoC
1323 + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
1325 + * Support AES cipher with 128,192,256 bits keysize.
1326 + * Support MD5 and SHA1 hash algorithms.
1327 + * Support DES and 3DES
1329 + * You could find the datasheet in Documentation/arm/sunxi/README
1331 + * Licensed under the GPL-2.
1334 +#include <linux/clk.h>
1335 +#include <linux/crypto.h>
1336 +#include <linux/io.h>
1337 +#include <linux/module.h>
1338 +#include <linux/of.h>
1339 +#include <linux/platform_device.h>
1340 +#include <crypto/scatterwalk.h>
1341 +#include <linux/scatterlist.h>
1342 +#include <linux/interrupt.h>
1343 +#include <linux/delay.h>
1344 +#include <crypto/md5.h>
1345 +#include <crypto/sha.h>
1346 +#include <crypto/hash.h>
1347 +#include <crypto/internal/hash.h>
1348 +#include <crypto/aes.h>
1349 +#include <crypto/des.h>
1350 +#include <crypto/internal/rng.h>
1352 +#define SS_CTL 0x00
1353 +#define SS_KEY0 0x04
1354 +#define SS_KEY1 0x08
1355 +#define SS_KEY2 0x0C
1356 +#define SS_KEY3 0x10
1357 +#define SS_KEY4 0x14
1358 +#define SS_KEY5 0x18
1359 +#define SS_KEY6 0x1C
1360 +#define SS_KEY7 0x20
1362 +#define SS_IV0 0x24
1363 +#define SS_IV1 0x28
1364 +#define SS_IV2 0x2C
1365 +#define SS_IV3 0x30
1367 +#define SS_CNT0 0x34
1368 +#define SS_CNT1 0x38
1369 +#define SS_CNT2 0x3C
1370 +#define SS_CNT3 0x40
1372 +#define SS_FCSR 0x44
1373 +#define SS_ICSR 0x48
1375 +#define SS_MD0 0x4C
1376 +#define SS_MD1 0x50
1377 +#define SS_MD2 0x54
1378 +#define SS_MD3 0x58
1379 +#define SS_MD4 0x5C
1381 +#define SS_RXFIFO 0x200
1382 +#define SS_TXFIFO 0x204
1384 +/* SS_CTL configuration values */
1386 +/* PRNG generator mode - bit 15 */
1387 +#define SS_PRNG_ONESHOT (0 << 15)
1388 +#define SS_PRNG_CONTINUE (1 << 15)
1390 +/* IV mode for hash */
1391 +#define SS_IV_ARBITRARY (1 << 14)
1393 +/* SS operation mode - bits 12-13 */
1394 +#define SS_ECB (0 << 12)
1395 +#define SS_CBC (1 << 12)
1396 +#define SS_CNT (2 << 12)
1398 +/* Counter width for CNT mode - bits 10-11 */
1399 +#define SS_CNT_16BITS (0 << 10)
1400 +#define SS_CNT_32BITS (1 << 10)
1401 +#define SS_CNT_64BITS (2 << 10)
1403 +/* Key size for AES - bits 8-9 */
1404 +#define SS_AES_128BITS (0 << 8)
1405 +#define SS_AES_192BITS (1 << 8)
1406 +#define SS_AES_256BITS (2 << 8)
1408 +/* Operation direction - bit 7 */
1409 +#define SS_ENCRYPTION (0 << 7)
1410 +#define SS_DECRYPTION (1 << 7)
1412 +/* SS Method - bits 4-6 */
1413 +#define SS_OP_AES (0 << 4)
1414 +#define SS_OP_DES (1 << 4)
1415 +#define SS_OP_3DES (2 << 4)
1416 +#define SS_OP_SHA1 (3 << 4)
1417 +#define SS_OP_MD5 (4 << 4)
1418 +#define SS_OP_PRNG (5 << 4)
1420 +/* Data end bit - bit 2 */
1421 +#define SS_DATA_END (1 << 2)
1423 +/* PRNG start bit - bit 1 */
1424 +#define SS_PRNG_START (1 << 1)
1426 +/* SS Enable bit - bit 0 */
1427 +#define SS_DISABLED (0 << 0)
1428 +#define SS_ENABLED (1 << 0)
1430 +/* SS_FCSR configuration values */
1431 +/* RX FIFO status - bit 30 */
1432 +#define SS_RXFIFO_FREE (1 << 30)
1434 +/* RX FIFO empty spaces - bits 24-29 */
1435 +#define SS_RXFIFO_SPACES(val) (((val) >> 24) & 0x3f)
1437 +/* TX FIFO status - bit 22 */
1438 +#define SS_TXFIFO_AVAILABLE (1 << 22)
1440 +/* TX FIFO available spaces - bits 16-21 */
1441 +#define SS_TXFIFO_SPACES(val) (((val) >> 16) & 0x3f)
1443 +#define SS_RXFIFO_EMP_INT_PENDING (1 << 10)
1444 +#define SS_TXFIFO_AVA_INT_PENDING (1 << 8)
1445 +#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2)
1446 +#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0)
1448 +/* SS_ICSR configuration values */
1449 +#define SS_ICS_DRQ_ENABLE (1 << 4)
1451 +struct sunxi_ss_ctx {
1452 + void __iomem *base;
1454 + struct clk *busclk;
1455 + struct clk *ssclk;
1456 + struct device *dev;
1457 + struct resource *res;
1458 + void *buf_in; /* pointer to data to be uploaded to the device */
1459 + size_t buf_in_size; /* size of buf_in */
1461 + size_t buf_out_size;
1462 + struct mutex lock; /* control the use of the device */
1463 + struct mutex bufout_lock; /* control the use of buf_out*/
1464 + struct mutex bufin_lock; /* control the sue of buf_in*/
1467 +struct sunxi_tfm_ctx {
1468 + u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */
1473 +struct sunxi_req_ctx {
1475 + u64 byte_count; /* number of bytes "uploaded" to the device */
1476 + u32 wb; /* a partial word waiting to be completed and
1477 + uploaded to the device */
1478 + /* number of bytes to be uploaded in the wb word */
1482 + unsigned int nwait;
1485 +#define SS_SEED_LEN (192/8)
1486 +#define SS_DATA_LEN (160/8)
1488 +struct prng_context {
1489 + u32 seed[SS_SEED_LEN/4];
1490 + unsigned int slen;
1493 +int sunxi_hash_crainit(struct crypto_tfm *tfm);
1494 +int sunxi_hash_init(struct ahash_request *areq);
1495 +int sunxi_hash_update(struct ahash_request *areq);
1496 +int sunxi_hash_final(struct ahash_request *areq);
1497 +int sunxi_hash_finup(struct ahash_request *areq);
1498 +int sunxi_hash_digest(struct ahash_request *areq);
1499 +int sunxi_hash_export(struct ahash_request *areq, void *out);
1500 +int sunxi_hash_import(struct ahash_request *areq, const void *in);
1502 +int sunxi_ss_aes_poll(struct ablkcipher_request *areq, u32 mode);
1503 +int sunxi_ss_des_poll(struct ablkcipher_request *areq, u32 mode);
1504 +int sunxi_ss_cipher_init(struct crypto_tfm *tfm);
1505 +int sunxi_ss_cipher_encrypt(struct ablkcipher_request *areq);
1506 +int sunxi_ss_cipher_decrypt(struct ablkcipher_request *areq);
1507 +int sunxi_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1508 + unsigned int keylen);
1509 +int sunxi_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1510 + unsigned int keylen);
1511 +int sunxi_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1512 + unsigned int keylen);