mirror of
git://projects.qi-hardware.com/openwrt-xburst.git
synced 2024-11-24 20:53:07 +02:00
18a076fccd
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@24526 3c298f89-4303-0410-b956-a3cf2f4a3e73
6210 lines
162 KiB
Diff
6210 lines
162 KiB
Diff
--- a/drivers/crypto/Kconfig
|
|
+++ b/drivers/crypto/Kconfig
|
|
@@ -243,4 +243,75 @@
|
|
OMAP processors have SHA1/MD5 hw accelerator. Select this if you
|
|
want to use the OMAP module for SHA1/MD5 algorithms.
|
|
|
|
+config CRYPTO_DEV_LANTIQ
|
|
+ bool "Support for Lantiq crypto engine"
|
|
+ select CRYPTO_ALGAPI
|
|
+ default y
|
|
+ help
|
|
+ Will support Lantiq crypto hardware
|
|
+ If you are unsure, say M.
|
|
+
|
|
+menuconfig CRYPTO_DEV_LANTIQ_DES
|
|
+ bool "Lantiq crypto hardware for DES algorithm"
|
|
+ depends on CRYPTO_DEV_LANTIQ
|
|
+ select CRYPTO_BLKCIPHER
|
|
+ default y
|
|
+ help
|
|
+ Use crypto hardware for DES/3DES algorithm.
|
|
+ If unsure say N.
|
|
+
|
|
+menuconfig CRYPTO_DEV_LANTIQ_AES
|
|
+ bool "Lantiq crypto hardware for AES algorithm"
|
|
+ depends on CRYPTO_DEV_LANTIQ
|
|
+ select CRYPTO_BLKCIPHER
|
|
+ default y
|
|
+ help
|
|
+ Use crypto hardware for AES algorithm.
|
|
+ If unsure say N.
|
|
+
|
|
+menuconfig CRYPTO_DEV_LANTIQ_ARC4
|
|
+ bool "Lantiq crypto hardware for ARC4 algorithm"
|
|
+ depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9)
|
|
+ select CRYPTO_BLKCIPHER
|
|
+ default y
|
|
+ help
|
|
+ Use crypto hardware for ARC4 algorithm.
|
|
+ If unsure say N.
|
|
+
|
|
+menuconfig CRYPTO_DEV_LANTIQ_MD5
|
|
+ bool "Lantiq crypto hardware for MD5 algorithm"
|
|
+ depends on CRYPTO_DEV_LANTIQ
|
|
+ select CRYPTO_BLKCIPHER
|
|
+ default y
|
|
+ help
|
|
+ Use crypto hardware for MD5 algorithm.
|
|
+ If unsure say N.
|
|
+
|
|
+menuconfig CRYPTO_DEV_LANTIQ_SHA1
|
|
+ bool "Lantiq crypto hardware for SHA1 algorithm"
|
|
+ depends on CRYPTO_DEV_LANTIQ
|
|
+ select CRYPTO_BLKCIPHER
|
|
+ default y
|
|
+ help
|
|
+ Use crypto hardware for SHA1 algorithm.
|
|
+ If unsure say N.
|
|
+
|
|
+menuconfig CRYPTO_DEV_LANTIQ_SHA1_HMAC
|
|
+ bool "Lantiq crypto hardware for SHA1_HMAC algorithm"
|
|
+ depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9)
|
|
+ select CRYPTO_BLKCIPHER
|
|
+ default y
|
|
+ help
|
|
+ Use crypto hardware for SHA1_HMAC algorithm.
|
|
+ If unsure say N.
|
|
+
|
|
+menuconfig CRYPTO_DEV_LANTIQ_MD5_HMAC
|
|
+ bool "Lantiq crypto hardware for MD5_HMAC algorithms"
|
|
+ depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9)
|
|
+ select CRYPTO_BLKCIPHER
|
|
+ default y
|
|
+ help
|
|
+ Use crypto hardware for MD5_HMAC algorithm.
|
|
+ If unsure say N.
|
|
+
|
|
endif # CRYPTO_HW
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/Makefile
|
|
@@ -0,0 +1,11 @@
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_falcon.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_danube.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_ar9.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ_DES) += des.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ_AES) += aes.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) += arc4.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ_SHA1) += sha1.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC) += sha1_hmac.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ_MD5) += md5.o
|
|
+obj-$(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC) += md5_hmac.o
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/aes.c
|
|
@@ -0,0 +1,1029 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file aes.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief AES Encryption Driver main file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_AES_FUNCTIONS LQ_AES_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq AES driver Functions
|
|
+*/
|
|
+
|
|
+#include <linux/version.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/delay.h>
|
|
+#include <asm/byteorder.h>
|
|
+#include <crypto/algapi.h>
|
|
+#include "deu.h"
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+# include "deu_dma.h"
|
|
+#endif
|
|
+
|
|
+static spinlock_t cipher_lock;
|
|
+
|
|
+/* Definition of constants */
|
|
+
|
|
+#define AES_MIN_KEY_SIZE 16
|
|
+#define AES_MAX_KEY_SIZE 32
|
|
+#define AES_BLOCK_SIZE 16
|
|
+#define CTR_RFC3686_NONCE_SIZE 4
|
|
+#define CTR_RFC3686_IV_SIZE 8
|
|
+#define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE \
|
|
+ + CTR_RFC3686_NONCE_SIZE)
|
|
+
|
|
+struct aes_ctx {
|
|
+ int key_length;
|
|
+ u32 buf[AES_MAX_KEY_SIZE];
|
|
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
|
|
+};
|
|
+
|
|
+/** \fn int aes_set_key(struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief sets the AES keys
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param in_key input key
|
|
+ * \param key_len key lengths of 16, 24 and 32 bytes supported
|
|
+ * \return -EINVAL - bad key length, 0 - SUCCESS
|
|
+*/
|
|
+static int aes_set_key(struct crypto_tfm *tfm,
|
|
+ const u8 *in_key,
|
|
+ unsigned int key_len)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+ u32 *flags = &tfm->crt_flags;
|
|
+
|
|
+ DPRINTF(0, "ctx @%p, key_len %d\n", ctx, key_len);
|
|
+
|
|
+ if (key_len != 16 && key_len != 24 && key_len != 32) {
|
|
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ctx->key_length = key_len;
|
|
+ memcpy((u8 *)(ctx->buf), in_key, key_len);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_DMA
|
|
+/** \fn void deu_aes(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief main interface to AES hardware
|
|
+ * \param ctx_arg crypto algo context
|
|
+ * \param out_arg output bytestream
|
|
+ * \param in_arg input bytestream
|
|
+ * \param iv_arg initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param mode operation mode such as ebc, cbc, ctr
|
|
+ *
|
|
+*/
|
|
+static void deu_aes(void *ctx_arg,
|
|
+ u8 *out_arg,
|
|
+ const u8 *in_arg,
|
|
+ u8 *iv_arg,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int mode)
|
|
+#else
|
|
+
|
|
+/** \fn void deu_aes_core(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief main interface to AES hardware
|
|
+ * \param ctx_arg crypto algo context
|
|
+ * \param out_arg output bytestream
|
|
+ * \param in_arg input bytestream
|
|
+ * \param iv_arg initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param mode operation mode such as ebc, cbc, ctr
|
|
+ *
|
|
+*/
|
|
+static void deu_aes_core(void *ctx_arg,
|
|
+ u8 *out_arg,
|
|
+ const u8 *in_arg,
|
|
+ u8 *iv_arg,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int mode)
|
|
+#endif
|
|
+
|
|
+{
|
|
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
|
|
+ volatile struct deu_aes *aes = (volatile struct deu_aes *)AES_START;
|
|
+ struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
|
|
+ u32 *in_key = ctx->buf;
|
|
+ ulong flag;
|
|
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
|
|
+ int key_len = ctx->key_length;
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_DMA
|
|
+ int i = 0;
|
|
+ int byte_cnt = nbytes;
|
|
+#else
|
|
+ volatile struct deu_dma *dma = (struct deu_dma *)LQ_DEU_DMA_CON;
|
|
+ struct dma_device_info *dma_device = lq_deu[0].dma_device;
|
|
+ /* struct deu_drv_priv *deu_priv =
|
|
+ * (struct deu_drv_priv *)dma_device->priv; */
|
|
+ int wlen = 0;
|
|
+ u32 *outcopy = NULL;
|
|
+ u32 *dword_mem_aligned_in = NULL;
|
|
+
|
|
+# ifdef CONFIG_CRYPTO_DEV_POLL_DMA
|
|
+ u32 timeout = 0;
|
|
+ u32 *out_dma = NULL;
|
|
+# endif
|
|
+#endif
|
|
+
|
|
+ DPRINTF(0, "ctx @%p, mode %d, encdec %d\n", ctx, mode, encdec);
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ /* 128, 192 or 256 bit key length */
|
|
+ aes->ctrl.K = key_len / 8 - 2;
|
|
+ if (key_len == 128 / 8) {
|
|
+ aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0));
|
|
+ aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1));
|
|
+ aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2));
|
|
+ aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3));
|
|
+ }
|
|
+ else if (key_len == 192 / 8) {
|
|
+ aes->K5R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0));
|
|
+ aes->K4R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1));
|
|
+ aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2));
|
|
+ aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3));
|
|
+ aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 4));
|
|
+ aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 5));
|
|
+ }
|
|
+ else if (key_len == 256 / 8) {
|
|
+ aes->K7R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0));
|
|
+ aes->K6R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1));
|
|
+ aes->K5R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2));
|
|
+ aes->K4R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3));
|
|
+ aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 4));
|
|
+ aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 5));
|
|
+ aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 6));
|
|
+ aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 7));
|
|
+ }
|
|
+ else {
|
|
+ CRTCL_SECT_END;
|
|
+ return; /* -EINVAL; */
|
|
+ }
|
|
+
|
|
+ /* let HW pre-process DEcryption key in any case (even if
|
|
+ ENcryption is used). Key Valid (KV) bit is then only
|
|
+ checked in decryption routine! */
|
|
+ aes->ctrl.PNK = 1;
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+ while (aes->ctrl.BUS) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+ AES_DMA_MISC_CONFIG();
|
|
+#endif
|
|
+
|
|
+ aes->ctrl.E_D = !encdec; /* encryption */
|
|
+ aes->ctrl.O = mode; /* 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */
|
|
+ aes->ctrl.SM = 1; /* start after writing input register */
|
|
+ aes->ctrl.DAU = 0; /* Disable Automatic Update of init
|
|
+ vector */
|
|
+ aes->ctrl.ARS = 1; /* Autostart Select - write to IHR */
|
|
+
|
|
+ /* aes->ctrl.F = 128; */ /* default; only for CFB and OFB modes;
|
|
+ change only for
|
|
+ customer-specific apps */
|
|
+ if (mode > 0) {
|
|
+ aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *)iv_arg);
|
|
+ aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 1));
|
|
+ aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 2));
|
|
+ aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 3));
|
|
+ };
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_DMA
|
|
+ i = 0;
|
|
+ while (byte_cnt >= 16) {
|
|
+ aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 0));
|
|
+ aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 1));
|
|
+ aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 2));
|
|
+ /* start crypto */
|
|
+ aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 3));
|
|
+
|
|
+ while (aes->ctrl.BUS) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+
|
|
+ *((volatile u32 *)out_arg + (i * 4) + 0) = aes->OD3R;
|
|
+ *((volatile u32 *)out_arg + (i * 4) + 1) = aes->OD2R;
|
|
+ *((volatile u32 *)out_arg + (i * 4) + 2) = aes->OD1R;
|
|
+ *((volatile u32 *)out_arg + (i * 4) + 3) = aes->OD0R;
|
|
+
|
|
+ i++;
|
|
+ byte_cnt -= 16;
|
|
+ }
|
|
+#else /* dma */
|
|
+ /* Prepare Rx buf length used in dma psuedo interrupt */
|
|
+ /* deu_priv->deu_rx_buf = out_arg; */
|
|
+ /* deu_priv->deu_rx_len = nbytes; */
|
|
+
|
|
+ /* memory alignment issue */
|
|
+ dword_mem_aligned_in = (u32 *)DEU_DWORD_REORDERING(in_arg,
|
|
+ aes_buff_in,
|
|
+ BUFFER_IN, nbytes);
|
|
+
|
|
+ dma->ctrl.ALGO = 1; /* AES */
|
|
+ dma->ctrl.BS = 0;
|
|
+ aes->ctrl.DAU = 0;
|
|
+ dma->ctrl.EN = 1;
|
|
+
|
|
+ while (aes->ctrl.BUS) {
|
|
+ /* wait for AES to be ready */
|
|
+ };
|
|
+
|
|
+ wlen = dma_device_write(dma_device, (u8 *)dword_mem_aligned_in,
|
|
+ nbytes, NULL);
|
|
+ if (wlen != nbytes) {
|
|
+ dma->ctrl.EN = 0;
|
|
+ CRTCL_SECT_END;
|
|
+ printk(KERN_ERR "[%s %s %d]: dma_device_write fail!\n",
|
|
+ __FILE__, __func__, __LINE__);
|
|
+ return; /* -EINVAL; */
|
|
+ }
|
|
+
|
|
+ WAIT_AES_DMA_READY();
|
|
+
|
|
+# ifdef CONFIG_CRYPTO_DEV_POLL_DMA
|
|
+ outcopy = (u32 *)DEU_DWORD_REORDERING(out_arg, aes_buff_out,
|
|
+ BUFFER_OUT, nbytes);
|
|
+
|
|
+ /* polling DMA rx channel */
|
|
+ while ((dma_device_read(dma_device, (u8 **)&out_dma, NULL)) == 0) {
|
|
+ timeout++;
|
|
+
|
|
+ if (timeout >= 333000) {
|
|
+ dma->ctrl.EN = 0;
|
|
+ CRTCL_SECT_END;
|
|
+ printk (KERN_ERR "[%s %s %d]: timeout!!\n",
|
|
+ __FILE__, __func__, __LINE__);
|
|
+ return; /* -EINVAL; */
|
|
+ }
|
|
+ }
|
|
+
|
|
+ WAIT_AES_DMA_READY();
|
|
+
|
|
+ AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes);
|
|
+
|
|
+# else /* not working at the moment.. */
|
|
+ CRTCL_SECT_END;
|
|
+
|
|
+ /* sleep and wait for Rx finished */
|
|
+ DEU_WAIT_EVENT(deu_priv->deu_thread_wait, DEU_EVENT,
|
|
+ deu_priv->deu_event_flags);
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+# endif
|
|
+
|
|
+#endif /* dma */
|
|
+
|
|
+ /* tc.chen : copy iv_arg back */
|
|
+ if (mode > 0) {
|
|
+ *((u32 *)iv_arg) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg));
|
|
+ *((u32 *)iv_arg + 1) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 1));
|
|
+ *((u32 *)iv_arg + 2) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 2));
|
|
+ *((u32 *)iv_arg + 3) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 3));
|
|
+ }
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+}
|
|
+
|
|
+/** \fn int ctr_rfc3686_aes_set_key(struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief sets RFC3686 key
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param in_key input key
|
|
+ * \param key_len key lengths of 20, 28 and 36 bytes supported; last 4 bytes is nonce
|
|
+ * \return 0 - SUCCESS
|
|
+ * -EINVAL - bad key length
|
|
+*/
|
|
+static int ctr_rfc3686_aes_set_key(struct crypto_tfm *tfm,
|
|
+ const uint8_t *in_key,
|
|
+ unsigned int key_len)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+ u32 *flags = &tfm->crt_flags;
|
|
+
|
|
+ memcpy(ctx->nonce, in_key + (key_len - CTR_RFC3686_NONCE_SIZE),
|
|
+ CTR_RFC3686_NONCE_SIZE);
|
|
+
|
|
+ key_len -= CTR_RFC3686_NONCE_SIZE; /* remove 4 bytes of nonce */
|
|
+
|
|
+ if (key_len != 16 && key_len != 24 && key_len != 32) {
|
|
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ctx->key_length = key_len;
|
|
+
|
|
+ memcpy((u8 *)(ctx->buf), in_key, key_len);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn void deu_aes(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief main interface with DEU hardware in DMA mode
|
|
+ * \param ctx_arg crypto algo context
|
|
+ * \param out_arg output bytestream
|
|
+ * \param in_arg input bytestream
|
|
+ * \param iv_arg initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param mode operation mode such as ebc, cbc, ctr
|
|
+*/
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+static void deu_aes(void *ctx_arg,
|
|
+ u8 *out_arg,
|
|
+ const u8 *in_arg,
|
|
+ u8 *iv_arg,
|
|
+ u32 nbytes,
|
|
+ int encdec,
|
|
+ int mode)
|
|
+{
|
|
+ u32 remain = nbytes;
|
|
+ u32 inc;
|
|
+
|
|
+ while (remain > 0) {
|
|
+ if (remain >= DEU_MAX_PACKET_SIZE)
|
|
+ inc = DEU_MAX_PACKET_SIZE;
|
|
+ else
|
|
+ inc = remain;
|
|
+
|
|
+ remain -= inc;
|
|
+
|
|
+ deu_aes_core(ctx_arg, out_arg, in_arg, iv_arg, inc, encdec,
|
|
+ mode);
|
|
+
|
|
+ out_arg += inc;
|
|
+ in_arg += inc;
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+/* definitions from linux/include/crypto.h:
|
|
+#define CRYPTO_TFM_MODE_ECB 0x00000001
|
|
+#define CRYPTO_TFM_MODE_CBC 0x00000002
|
|
+#define CRYPTO_TFM_MODE_CFB 0x00000004
|
|
+#define CRYPTO_TFM_MODE_CTR 0x00000008
|
|
+#define CRYPTO_TFM_MODE_OFB 0x00000010
|
|
+but hardware definition: 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */
|
|
+
|
|
+/** \fn void deu_aes_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief sets AES hardware to ECB mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+static void deu_aes_ecb(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ deu_aes(ctx, dst, src, NULL, nbytes, encdec, 0);
|
|
+}
|
|
+
|
|
+/** \fn void deu_aes_cbc(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief sets AES hardware to CBC mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+static void deu_aes_cbc(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ deu_aes(ctx, dst, src, iv, nbytes, encdec, 1);
|
|
+}
|
|
+
|
|
+#if 0
|
|
+/** \fn void deu_aes_ofb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief sets AES hardware to OFB mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+static void deu_aes_ofb(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ deu_aes(ctx, dst, src, iv, nbytes, encdec, 2);
|
|
+}
|
|
+
|
|
+/** \fn void deu_aes_cfb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief sets AES hardware to CFB mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+static void deu_aes_cfb(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ deu_aes(ctx, dst, src, iv, nbytes, encdec, 3);
|
|
+}
|
|
+#endif
|
|
+
|
|
+/** \fn void deu_aes_ctr(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief sets AES hardware to CTR mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+static void deu_aes_ctr(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ deu_aes(ctx, dst, src, iv, nbytes, encdec, 4);
|
|
+}
|
|
+
|
|
+/** \fn void aes_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief encrypt AES_BLOCK_SIZE of data
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param out output bytestream
|
|
+ * \param in input bytestream
|
|
+*/
|
|
+static void aes_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+ deu_aes(ctx, out, in, NULL, AES_BLOCK_SIZE, CRYPTO_DIR_ENCRYPT, 0);
|
|
+}
|
|
+
|
|
+/** \fn void aes_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief decrypt AES_BLOCK_SIZE of data
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param out output bytestream
|
|
+ * \param in input bytestream
|
|
+*/
|
|
+static void aes_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+ deu_aes(ctx, out, in, NULL, AES_BLOCK_SIZE, CRYPTO_DIR_DECRYPT, 0);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief AES function mappings
|
|
+*/
|
|
+static struct crypto_alg aes_alg = {
|
|
+ .cra_name = "aes",
|
|
+ .cra_driver_name = "lq_deu-aes",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct aes_ctx),
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .cipher = {
|
|
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
|
|
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
|
|
+ .cia_setkey = aes_set_key,
|
|
+ .cia_encrypt = aes_encrypt,
|
|
+ .cia_decrypt = aes_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int ecb_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief ECB AES encrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ nbytes -= (nbytes % AES_BLOCK_SIZE);
|
|
+ deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
|
|
+ nbytes &= AES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/** \fn int ecb_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief ECB AES decrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ nbytes -= (nbytes % AES_BLOCK_SIZE);
|
|
+ deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
|
|
+ nbytes &= AES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief AES function mappings
|
|
+*/
|
|
+static struct crypto_alg ecb_aes_alg = {
|
|
+ .cra_name = "ecb(aes)",
|
|
+ .cra_driver_name = "lq_deu-ecb(aes)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct aes_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = AES_MIN_KEY_SIZE,
|
|
+ .max_keysize = AES_MAX_KEY_SIZE,
|
|
+ .setkey = aes_set_key,
|
|
+ .encrypt = ecb_aes_encrypt,
|
|
+ .decrypt = ecb_aes_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int cbc_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief CBC AES encrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ u8 *iv = walk.iv;
|
|
+ nbytes -= (nbytes % AES_BLOCK_SIZE);
|
|
+ deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
|
|
+ nbytes &= AES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/** \fn int cbc_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief CBC AES decrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ u8 *iv = walk.iv;
|
|
+ nbytes -= (nbytes % AES_BLOCK_SIZE);
|
|
+ deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
|
|
+ nbytes &= AES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief AES function mappings
|
|
+*/
|
|
+static struct crypto_alg cbc_aes_alg = {
|
|
+ .cra_name = "cbc(aes)",
|
|
+ .cra_driver_name = "lq_deu-cbc(aes)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct aes_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = AES_MIN_KEY_SIZE,
|
|
+ .max_keysize = AES_MAX_KEY_SIZE,
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
+ .setkey = aes_set_key,
|
|
+ .encrypt = cbc_aes_encrypt,
|
|
+ .decrypt = cbc_aes_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int ctr_basic_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief Counter mode AES encrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int ctr_basic_aes_encrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ u8 *iv = walk.iv;
|
|
+ nbytes -= (nbytes % AES_BLOCK_SIZE);
|
|
+ deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
|
|
+ nbytes &= AES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/** \fn int ctr_basic_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief Counter mode AES decrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int ctr_basic_aes_decrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ u8 *iv = walk.iv;
|
|
+ nbytes -= (nbytes % AES_BLOCK_SIZE);
|
|
+ deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
|
|
+ nbytes &= AES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief AES function mappings
|
|
+*/
|
|
+static struct crypto_alg ctr_basic_aes_alg = {
|
|
+ .cra_name = "ctr(aes)",
|
|
+ .cra_driver_name = "lq_deu-ctr(aes)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct aes_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(ctr_basic_aes_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = AES_MIN_KEY_SIZE,
|
|
+ .max_keysize = AES_MAX_KEY_SIZE,
|
|
+ .ivsize = AES_BLOCK_SIZE,
|
|
+ .setkey = aes_set_key,
|
|
+ .encrypt = ctr_basic_aes_encrypt,
|
|
+ .decrypt = ctr_basic_aes_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief Counter mode AES (rfc3686) encrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+ u8 rfc3686_iv[16];
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ /* set up counter block */
|
|
+ memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
|
|
+ memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv,
|
|
+ CTR_RFC3686_IV_SIZE);
|
|
+
|
|
+ /* initialize counter portion of counter block */
|
|
+ *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
|
|
+ cpu_to_be32(1);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ nbytes -= (nbytes % AES_BLOCK_SIZE);
|
|
+ deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
|
|
+ nbytes &= AES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/** \fn int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief Counter mode AES (rfc3686) decrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+ u8 rfc3686_iv[16];
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ /* set up counter block */
|
|
+ memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
|
|
+ memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv,
|
|
+ CTR_RFC3686_IV_SIZE);
|
|
+
|
|
+ /* initialize counter portion of counter block */
|
|
+ *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
|
|
+ cpu_to_be32(1);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ nbytes -= (nbytes % AES_BLOCK_SIZE);
|
|
+ deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ rfc3686_iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
|
|
+ nbytes &= AES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief AES function mappings
|
|
+*/
|
|
+static struct crypto_alg ctr_rfc3686_aes_alg = {
|
|
+ .cra_name = "rfc3686(ctr(aes))",
|
|
+ .cra_driver_name = "lq_deu-ctr-rfc3686(aes)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = AES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct aes_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(ctr_rfc3686_aes_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = AES_MIN_KEY_SIZE,
|
|
+ .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
|
|
+ .ivsize = CTR_RFC3686_IV_SIZE,
|
|
+ .setkey = ctr_rfc3686_aes_set_key,
|
|
+ .encrypt = ctr_rfc3686_aes_encrypt,
|
|
+ .decrypt = ctr_rfc3686_aes_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int lq_deu_init_aes (void)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief function to initialize AES driver
|
|
+ * \return ret
|
|
+*/
|
|
+int lq_deu_init_aes(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if ((ret = crypto_register_alg(&aes_alg)))
|
|
+ goto aes_err;
|
|
+
|
|
+ if ((ret = crypto_register_alg(&ecb_aes_alg)))
|
|
+ goto ecb_aes_err;
|
|
+
|
|
+ if ((ret = crypto_register_alg(&cbc_aes_alg)))
|
|
+ goto cbc_aes_err;
|
|
+
|
|
+ if ((ret = crypto_register_alg(&ctr_basic_aes_alg)))
|
|
+ goto ctr_basic_aes_err;
|
|
+
|
|
+ if ((ret = crypto_register_alg(&ctr_rfc3686_aes_alg)))
|
|
+ goto ctr_rfc3686_aes_err;
|
|
+
|
|
+ deu_aes_chip_init();
|
|
+
|
|
+ CRTCL_SECT_INIT;
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+ if (ALLOCATE_MEMORY(BUFFER_IN, AES_ALGO) < 0) {
|
|
+ printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n",
|
|
+ __FILE__, __func__, __LINE__);
|
|
+ goto ctr_rfc3686_aes_err;
|
|
+ }
|
|
+ if (ALLOCATE_MEMORY(BUFFER_OUT, AES_ALGO) < 0) {
|
|
+ printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n",
|
|
+ __FILE__, __func__, __LINE__);
|
|
+ goto ctr_rfc3686_aes_err;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ printk(KERN_NOTICE "Lantiq DEU AES initialized%s.\n",
|
|
+ disable_deudma ? "" : " (DMA)");
|
|
+ return ret;
|
|
+
|
|
+ctr_rfc3686_aes_err:
|
|
+ crypto_unregister_alg(&ctr_rfc3686_aes_alg);
|
|
+ printk(KERN_ERR "Lantiq ctr_rfc3686_aes initialization failed!\n");
|
|
+ return ret;
|
|
+ctr_basic_aes_err:
|
|
+ crypto_unregister_alg(&ctr_basic_aes_alg);
|
|
+ printk(KERN_ERR "Lantiq ctr_basic_aes initialization failed!\n");
|
|
+ return ret;
|
|
+cbc_aes_err:
|
|
+ crypto_unregister_alg(&cbc_aes_alg);
|
|
+ printk(KERN_ERR "Lantiq cbc_aes initialization failed!\n");
|
|
+ return ret;
|
|
+ecb_aes_err:
|
|
+ crypto_unregister_alg(&ecb_aes_alg);
|
|
+ printk(KERN_ERR "Lantiq aes initialization failed!\n");
|
|
+ return ret;
|
|
+aes_err:
|
|
+ printk(KERN_ERR "Lantiq DEU AES initialization failed!\n");
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/** \fn void lq_deu_fini_aes(void)
|
|
+ * \ingroup LQ_AES_FUNCTIONS
|
|
+ * \brief unregister aes driver
|
|
+*/
|
|
+void lq_deu_fini_aes(void)
|
|
+{
|
|
+ crypto_unregister_alg(&aes_alg);
|
|
+ crypto_unregister_alg(&ecb_aes_alg);
|
|
+ crypto_unregister_alg(&cbc_aes_alg);
|
|
+ crypto_unregister_alg(&ctr_basic_aes_alg);
|
|
+ crypto_unregister_alg(&ctr_rfc3686_aes_alg);
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+ FREE_MEMORY(aes_buff_in);
|
|
+ FREE_MEMORY(aes_buff_out);
|
|
+#endif
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/arc4.c
|
|
@@ -0,0 +1,397 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file arc4.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief ARC4 encryption DEU driver file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_ARC4_FUNCTIONS LQ_ARC4_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq DEU driver functions
|
|
+*/
|
|
+
|
|
+#include <linux/version.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <crypto/algapi.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <asm/byteorder.h>
|
|
+#include <linux/delay.h>
|
|
+
|
|
+#ifdef CONFIG_SOL_LANTIQ_XWAY
|
|
+
|
|
+#include "deu.h"
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+
|
|
+static spinlock_t cipher_lock;
|
|
+
|
|
+/* Preprocessor declerations */
|
|
+#define ARC4_MIN_KEY_SIZE 1
|
|
+/* #define ARC4_MAX_KEY_SIZE 256 */
|
|
+#define ARC4_MAX_KEY_SIZE 16
|
|
+#define ARC4_BLOCK_SIZE 1
|
|
+
|
|
+/*
|
|
+ * \brief arc4 private structure
|
|
+*/
|
|
+struct arc4_ctx {
|
|
+ int key_length;
|
|
+ u8 buf[120];
|
|
+};
|
|
+
|
|
+/** \fn static void deu_arc4(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief main interface to AES hardware
|
|
+ \param ctx_arg crypto algo context
|
|
+ \param out_arg output bytestream
|
|
+ \param in_arg input bytestream
|
|
+ \param iv_arg initialization vector
|
|
+ \param nbytes length of bytestream
|
|
+ \param encdec 1 for encrypt; 0 for decrypt
|
|
+ \param mode operation mode such as ebc, cbc, ctr
|
|
+*/
|
|
+static void deu_arc4(void *ctx_arg,
|
|
+ u8 *out_arg,
|
|
+ const u8 *in_arg,
|
|
+ u8 *iv_arg,
|
|
+ u32 nbytes,
|
|
+ int encdec,
|
|
+ int mode)
|
|
+{
|
|
+ volatile struct deu_arc4 *arc4 = (struct deu_arc4 *) ARC4_START;
|
|
+ int i = 0;
|
|
+ ulong flag;
|
|
+
|
|
+#if 1 /* need to handle nbytes not multiple of 16 */
|
|
+ volatile u32 tmp_array32[4];
|
|
+ volatile u8 *tmp_ptr8;
|
|
+ int remaining_bytes, j;
|
|
+#endif
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ arc4->IDLEN = nbytes;
|
|
+
|
|
+#if 1
|
|
+ while (i < nbytes) {
|
|
+ arc4->ID3R = *((u32 *) in_arg + (i>>2) + 0);
|
|
+ arc4->ID2R = *((u32 *) in_arg + (i>>2) + 1);
|
|
+ arc4->ID1R = *((u32 *) in_arg + (i>>2) + 2);
|
|
+ arc4->ID0R = *((u32 *) in_arg + (i>>2) + 3);
|
|
+
|
|
+ arc4->ctrl.GO = 1;
|
|
+
|
|
+ while (arc4->ctrl.BUS) {
|
|
+ /* this will not take long */ }
|
|
+
|
|
+#if 1
|
|
+ /* need to handle nbytes not multiple of 16 */
|
|
+ tmp_array32[0] = arc4->OD3R;
|
|
+ tmp_array32[1] = arc4->OD2R;
|
|
+ tmp_array32[2] = arc4->OD1R;
|
|
+ tmp_array32[3] = arc4->OD0R;
|
|
+
|
|
+ remaining_bytes = nbytes - i;
|
|
+ if (remaining_bytes > 16)
|
|
+ remaining_bytes = 16;
|
|
+
|
|
+ tmp_ptr8 = (u8 *)&tmp_array32[0];
|
|
+ for (j = 0; j < remaining_bytes; j++)
|
|
+ *out_arg++ = *tmp_ptr8++;
|
|
+#else
|
|
+ *((u32 *) out_arg + (i>>2) + 0) = arc4->OD3R;
|
|
+ *((u32 *) out_arg + (i>>2) + 1) = arc4->OD2R;
|
|
+ *((u32 *) out_arg + (i>>2) + 2) = arc4->OD1R;
|
|
+ *((u32 *) out_arg + (i>>2) + 3) = arc4->OD0R;
|
|
+#endif
|
|
+
|
|
+ i += 16;
|
|
+ }
|
|
+#else /* dma */
|
|
+
|
|
+#endif /* dma */
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+}
|
|
+
|
|
+/** \fn arc4_chip_init(void)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief initialize arc4 hardware
|
|
+*/
|
|
+static void arc4_chip_init(void)
|
|
+{
|
|
+ /* do nothing */
|
|
+}
|
|
+
|
|
+/** \fn static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief sets ARC4 key
|
|
+ \param tfm linux crypto algo transform
|
|
+ \param in_key input key
|
|
+ \param key_len key lengths less than or equal to 16 bytes supported
|
|
+*/
|
|
+static int arc4_set_key(struct crypto_tfm *tfm,
|
|
+ const u8 *inkey,
|
|
+ unsigned int key_len)
|
|
+{
|
|
+ /* struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); */
|
|
+ volatile struct deu_arc4 *arc4 = (struct deu_arc4 *) ARC4_START;
|
|
+
|
|
+ u32 *in_key = (u32 *)inkey;
|
|
+
|
|
+ /* must program all bits at one go?!!! */
|
|
+#if 1
|
|
+ /* #ifndef CONFIG_CRYPTO_DEV_VR9_DMA */
|
|
+ *LQ_ARC4_CON = ( (1<<31) | ((key_len - 1)<<27) | (1<<26) | (3<<16) );
|
|
+ /* NDC=1,ENDI=1,GO=0,KSAE=1,SM=0 */
|
|
+
|
|
+ arc4->K3R = *((u32 *) in_key + 0);
|
|
+ arc4->K2R = *((u32 *) in_key + 1);
|
|
+ arc4->K1R = *((u32 *) in_key + 2);
|
|
+ arc4->K0R = *((u32 *) in_key + 3);
|
|
+#else /* dma */
|
|
+ *AMAZONS_ARC4_CON = ( (1<<31) | ((key_len - 1)<<27) | (1<<26) | (3<<16) | (1<<4) );
|
|
+ /* NDC=1,ENDI=1,GO=0,KSAE=1,SM=1 */
|
|
+
|
|
+ arc4->K3R = *((u32 *) in_key + 0);
|
|
+ arc4->K2R = *((u32 *) in_key + 1);
|
|
+ arc4->K1R = *((u32 *) in_key + 2);
|
|
+ arc4->K0R = *((u32 *) in_key + 3);
|
|
+
|
|
+#if 0
|
|
+ arc4->K3R = deu_endian_swap(*((u32 *) in_key + 0));
|
|
+ arc4->K2R = deu_endian_swap(*((u32 *) in_key + 1));
|
|
+ arc4->K1R = deu_endian_swap(*((u32 *) in_key + 2));
|
|
+ arc4->K0R = deu_endian_swap(*((u32 *) in_key + 3));
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+#if 0 /* arc4 is a ugly state machine, KSAE can only be set once per session */
|
|
+ ctx->key_length = key_len;
|
|
+
|
|
+ memcpy((u8 *)(ctx->buf), in_key, key_len);
|
|
+#endif
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn static void deu_arc4_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief sets ARC4 hardware to ECB mode
|
|
+ \param ctx crypto algo context
|
|
+ \param dst output bytestream
|
|
+ \param src input bytestream
|
|
+ \param iv initialization vector
|
|
+ \param nbytes length of bytestream
|
|
+ \param encdec 1 for encrypt; 0 for decrypt
|
|
+ \param inplace not used
|
|
+*/
|
|
+static void deu_arc4_ecb(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ deu_arc4(ctx, dst, src, NULL, nbytes, encdec, 0);
|
|
+}
|
|
+
|
|
+/** \fn static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief encrypt/decrypt ARC4_BLOCK_SIZE of data
|
|
+ \param tfm linux crypto algo transform
|
|
+ \param out output bytestream
|
|
+ \param in input bytestream
|
|
+*/
|
|
+static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|
+{
|
|
+ struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+
|
|
+ deu_arc4(ctx, out, in, NULL, ARC4_BLOCK_SIZE,
|
|
+ CRYPTO_DIR_DECRYPT, CRYPTO_TFM_MODE_ECB);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief ARC4 function mappings
|
|
+*/
|
|
+static struct crypto_alg arc4_alg = {
|
|
+ .cra_name = "arc4",
|
|
+ .cra_driver_name = "lq_deu-arc4",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
|
+ .cra_blocksize = ARC4_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct arc4_ctx),
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(arc4_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .cipher = {
|
|
+ .cia_min_keysize = ARC4_MIN_KEY_SIZE,
|
|
+ .cia_max_keysize = ARC4_MAX_KEY_SIZE,
|
|
+ .cia_setkey = arc4_set_key,
|
|
+ .cia_encrypt = arc4_crypt,
|
|
+ .cia_decrypt = arc4_crypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn static int ecb_arc4_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief ECB ARC4 encrypt using linux crypto blkcipher
|
|
+ \param desc blkcipher descriptor
|
|
+ \param dst output scatterlist
|
|
+ \param src input scatterlist
|
|
+ \param nbytes data size in bytes
|
|
+*/
|
|
+static int ecb_arc4_encrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ DPRINTF(1, "\n");
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
|
|
+ nbytes &= ARC4_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/** \fn static int ecb_arc4_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief ECB ARC4 decrypt using linux crypto blkcipher
|
|
+ \param desc blkcipher descriptor
|
|
+ \param dst output scatterlist
|
|
+ \param src input scatterlist
|
|
+ \param nbytes data size in bytes
|
|
+*/
|
|
+static int ecb_arc4_decrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ DPRINTF(1, "\n");
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
|
|
+ nbytes &= ARC4_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief ARC4 function mappings
|
|
+*/
|
|
+static struct crypto_alg ecb_arc4_alg = {
|
|
+ .cra_name = "ecb(arc4)",
|
|
+ .cra_driver_name = "lq_deu-ecb(arc4)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = ARC4_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct arc4_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(ecb_arc4_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = ARC4_MIN_KEY_SIZE,
|
|
+ .max_keysize = ARC4_MAX_KEY_SIZE,
|
|
+ .setkey = arc4_set_key,
|
|
+ .encrypt = ecb_arc4_encrypt,
|
|
+ .decrypt = ecb_arc4_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int lq_deu_init_arc4(void)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief initialize arc4 driver
|
|
+*/
|
|
+int lq_deu_init_arc4(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if ((ret = crypto_register_alg(&arc4_alg)))
|
|
+ goto arc4_err;
|
|
+
|
|
+ if ((ret = crypto_register_alg(&ecb_arc4_alg)))
|
|
+ goto ecb_arc4_err;
|
|
+
|
|
+ arc4_chip_init();
|
|
+
|
|
+ CRTCL_SECT_INIT;
|
|
+
|
|
+ printk(KERN_NOTICE "Lantiq DEU ARC4 initialized %s.\n",
|
|
+ disable_deudma ? "" : " (DMA)");
|
|
+ return ret;
|
|
+
|
|
+arc4_err:
|
|
+ crypto_unregister_alg(&arc4_alg);
|
|
+ printk(KERN_ERR "Lantiq arc4 initialization failed!\n");
|
|
+ return ret;
|
|
+ecb_arc4_err:
|
|
+ crypto_unregister_alg(&ecb_arc4_alg);
|
|
+ printk(KERN_ERR "Lantiq ecb_arc4 initialization failed!\n");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/** \fn void lq_deu_fini_arc4(void)
|
|
+ \ingroup LQ_ARC4_FUNCTIONS
|
|
+ \brief unregister arc4 driver
|
|
+*/
|
|
+void lq_deu_fini_arc4(void)
|
|
+{
|
|
+ crypto_unregister_alg(&arc4_alg);
|
|
+ crypto_unregister_alg(&ecb_arc4_alg);
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/des.c
|
|
@@ -0,0 +1,929 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file des.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief DES encryption DEU driver file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DES_FUNCTIONS LQ_DES_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq DES Encryption functions
|
|
+*/
|
|
+
|
|
+#include <linux/version.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/delay.h>
|
|
+#include <asm/byteorder.h>
|
|
+#include <crypto/algapi.h>
|
|
+
|
|
+#ifdef CONFIG_SOL_LANTIQ_XWAY
|
|
+
|
|
+#include "deu.h"
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+# include "deu_dma.h"
|
|
+#endif
|
|
+
|
|
+static spinlock_t cipher_lock;
|
|
+
|
|
+/* Preprocessor declarations */
|
|
+#define DES_KEY_SIZE 8
|
|
+#define DES_EXPKEY_WORDS 32
|
|
+#define DES_BLOCK_SIZE 8
|
|
+#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE)
|
|
+#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
|
|
+#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
|
|
+
|
|
+struct des_ctx {
|
|
+ int controlr_M;
|
|
+ int key_length;
|
|
+ u8 iv[DES_BLOCK_SIZE];
|
|
+ u32 expkey[DES3_EDE_EXPKEY_WORDS];
|
|
+};
|
|
+
|
|
+/** \fn int des_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief sets DES key
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param key input key
|
|
+ * \param key_len key length
|
|
+*/
|
|
+static int des_setkey(struct crypto_tfm *tfm,
|
|
+ const u8 *key,
|
|
+ unsigned int key_len)
|
|
+{
|
|
+ struct des_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+
|
|
+ DPRINTF(0, "ctx @%p, key_len %d %d\n", ctx, key_len);
|
|
+
|
|
+ ctx->controlr_M = 0; /* des */
|
|
+ ctx->key_length = key_len;
|
|
+
|
|
+ memcpy((u8 *)(ctx->expkey), key, key_len);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+/** \fn void deu_des(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief main interface to DES hardware
|
|
+ * \param ctx_arg crypto algo context
|
|
+ * \param out_arg output bytestream
|
|
+ * \param in_arg input bytestream
|
|
+ * \param iv_arg initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param mode operation mode such as ebc, cbc
|
|
+*/
|
|
+
|
|
+static void deu_des(void *ctx_arg,
|
|
+ u8 *out_arg,
|
|
+ const u8 *in_arg,
|
|
+ u8 *iv_arg,
|
|
+ u32 nbytes,
|
|
+ int encdec,
|
|
+ int mode)
|
|
+#else
|
|
+/** \fn void deu_des_core(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief main interface to DES hardware
|
|
+ * \param ctx_arg crypto algo context
|
|
+ * \param out_arg output bytestream
|
|
+ * \param in_arg input bytestream
|
|
+ * \param iv_arg initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param mode operation mode such as ebc, cbc
|
|
+*/
|
|
+static void deu_des_core(void *ctx_arg,
|
|
+ u8 *out_arg,
|
|
+ const u8 *in_arg,
|
|
+ u8 *iv_arg,
|
|
+ u32 nbytes,
|
|
+ int encdec,
|
|
+ int mode)
|
|
+#endif
|
|
+{
|
|
+ volatile struct deu_des *des = (struct deu_des *) DES_3DES_START;
|
|
+ struct des_ctx *dctx = ctx_arg;
|
|
+ u32 *key = dctx->expkey;
|
|
+ ulong flag;
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ int i = 0;
|
|
+ int nblocks = 0;
|
|
+#else
|
|
+ volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON;
|
|
+ struct dma_device_info *dma_device = lq_deu[0].dma_device;
|
|
+ /* struct deu_drv_priv *deu_priv =
|
|
+ * (struct deu_drv_priv *)dma_device->priv; */
|
|
+ int wlen = 0;
|
|
+ u32 *outcopy = NULL;
|
|
+ u32 *dword_mem_aligned_in = NULL;
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_POLL_DMA
|
|
+ u32 timeout = 0;
|
|
+ u32 *out_dma = NULL;
|
|
+#endif
|
|
+
|
|
+#endif
|
|
+
|
|
+ DPRINTF(0, "ctx @%p, mode %d, encdec %d\n", dctx, mode, encdec);
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ des->ctrl.E_D = !encdec; /* encryption */
|
|
+ des->ctrl.O = mode; /* 0 ECB, 1 CBC, 2 OFB, 3 CFB, 4 CTR */
|
|
+ des->ctrl.SM = 1; /* start after writing input register */
|
|
+ des->ctrl.DAU = 0; /* Disable Automatic Update of init vect */
|
|
+ des->ctrl.ARS = 1; /* Autostart Select - write to IHR */
|
|
+
|
|
+ des->ctrl.M = dctx->controlr_M;
|
|
+ /* write keys */
|
|
+ if (dctx->controlr_M == 0) {
|
|
+ /* DES mode */
|
|
+ des->K1HR = DEU_ENDIAN_SWAP(*((u32 *) key + 0));
|
|
+ des->K1LR = DEU_ENDIAN_SWAP(*((u32 *) key + 1));
|
|
+#ifdef CRYPTO_DEBUG
|
|
+ printk("key1: %x\n", (*((u32 *) key + 0)));
|
|
+ printk("key2: %x\n", (*((u32 *) key + 1)));
|
|
+#endif
|
|
+ } else {
|
|
+ /* 3DES mode (EDE-x) */
|
|
+ switch (dctx->key_length) {
|
|
+ case 24:
|
|
+ des->K3HR = DEU_ENDIAN_SWAP(*((u32 *) key + 4));
|
|
+ des->K3LR = DEU_ENDIAN_SWAP(*((u32 *) key + 5));
|
|
+ /* no break; */
|
|
+ case 16:
|
|
+ des->K2HR = DEU_ENDIAN_SWAP(*((u32 *) key + 2));
|
|
+ des->K2LR = DEU_ENDIAN_SWAP(*((u32 *) key + 3));
|
|
+ /* no break; */
|
|
+ case 8:
|
|
+ des->K1HR = DEU_ENDIAN_SWAP(*((u32 *) key + 0));
|
|
+ des->K1LR = DEU_ENDIAN_SWAP(*((u32 *) key + 1));
|
|
+ break;
|
|
+ default:
|
|
+ CRTCL_SECT_END;
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* write init vector (not required for ECB mode) */
|
|
+ if (mode > 0) {
|
|
+ des->IVHR = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
|
|
+ des->IVLR = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
|
|
+ }
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ nblocks = nbytes / 4;
|
|
+
|
|
+ for (i = 0; i < nblocks; i += 2) {
|
|
+ /* wait for busy bit to clear */
|
|
+
|
|
+ /*--- Workaround ---------------------------------------------
|
|
+ do a dummy read to the busy flag because it is not raised
|
|
+ early enough in CFB/OFB 3DES modes */
|
|
+#ifdef CRYPTO_DEBUG
|
|
+ printk("ihr: %x\n", (*((u32 *) in_arg + i)));
|
|
+ printk("ilr: %x\n", (*((u32 *) in_arg + 1 + i)));
|
|
+#endif
|
|
+ des->IHR = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + i));
|
|
+ /* start crypto */
|
|
+ des->ILR = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + 1 + i));
|
|
+
|
|
+ while (des->ctrl.BUS) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+
|
|
+ *((u32 *) out_arg + 0 + i) = des->OHR;
|
|
+ *((u32 *) out_arg + 1 + i) = des->OLR;
|
|
+
|
|
+#ifdef CRYPTO_DEBUG
|
|
+ printk("ohr: %x\n", (*((u32 *) out_arg + i)));
|
|
+ printk("olr: %x\n", (*((u32 *) out_arg + 1 + i)));
|
|
+#endif
|
|
+ }
|
|
+
|
|
+#else /* dma mode */
|
|
+
|
|
+ /* Prepare Rx buf length used in dma psuedo interrupt */
|
|
+ /* deu_priv->deu_rx_buf = out_arg; */
|
|
+ /* deu_priv->deu_rx_len = nbytes; */
|
|
+
|
|
+ /* memory alignment issue */
|
|
+ dword_mem_aligned_in = (u32 *) DEU_DWORD_REORDERING(in_arg, des_buff_in,
|
|
+ BUFFER_IN, nbytes);
|
|
+
|
|
+ dma->ctrl.ALGO = 0; /* DES */
|
|
+ des->ctrl.DAU = 0;
|
|
+ dma->ctrl.BS = 0;
|
|
+ dma->ctrl.EN = 1;
|
|
+
|
|
+ while (des->ctrl.BUS) {
|
|
+ /* wait for AES to be ready */
|
|
+ };
|
|
+
|
|
+ wlen = dma_device_write(dma_device, (u8 *) dword_mem_aligned_in, nbytes,
|
|
+ NULL);
|
|
+ if (wlen != nbytes) {
|
|
+ dma->ctrl.EN = 0;
|
|
+ CRTCL_SECT_END;
|
|
+ printk(KERN_ERR "[%s %s %d]: dma_device_write fail!\n",
|
|
+ __FILE__, __func__, __LINE__);
|
|
+ return; /* -EINVAL; */
|
|
+ }
|
|
+
|
|
+ WAIT_DES_DMA_READY();
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_POLL_DMA
|
|
+ outcopy = (u32 *) DEU_DWORD_REORDERING(out_arg, des_buff_out,
|
|
+ BUFFER_OUT, nbytes);
|
|
+
|
|
+ /* polling DMA rx channel */
|
|
+ while ((dma_device_read(dma_device, (u8 **) &out_dma, NULL)) == 0) {
|
|
+ timeout++;
|
|
+
|
|
+ if (timeout >= 333000) {
|
|
+ dma->ctrl.EN = 0;
|
|
+ CRTCL_SECT_END;
|
|
+ printk(KERN_ERR "[%s %s %d]: timeout!!\n",
|
|
+ __FILE__, __func__, __LINE__);
|
|
+ return; /* -EINVAL; */
|
|
+ }
|
|
+ }
|
|
+
|
|
+ WAIT_DES_DMA_READY();
|
|
+
|
|
+ DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes);
|
|
+#else
|
|
+ CRTCL_SECT_END; /* Sleep and wait for Rx finished */
|
|
+ DEU_WAIT_EVENT(deu_priv->deu_thread_wait, DEU_EVENT,
|
|
+ deu_priv->deu_event_flags);
|
|
+ CRTCL_SECT_START;
|
|
+#endif
|
|
+
|
|
+#endif /* dma mode */
|
|
+
|
|
+ if (mode > 0) {
|
|
+ *(u32 *) iv_arg = DEU_ENDIAN_SWAP(des->IVHR);
|
|
+ *((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(des->IVLR);
|
|
+ };
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+}
|
|
+
|
|
+/* definitions from linux/include/crypto.h:
|
|
+#define CRYPTO_TFM_MODE_ECB 0x00000001
|
|
+#define CRYPTO_TFM_MODE_CBC 0x00000002
|
|
+#define CRYPTO_TFM_MODE_CFB 0x00000004
|
|
+#define CRYPTO_TFM_MODE_CTR 0x00000008
|
|
+#define CRYPTO_TFM_MODE_OFB 0x00000010
|
|
+but hardware definition: 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */
|
|
+
|
|
+/** \fn void deu_des(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief main interface to DES hardware
|
|
+ * \param ctx_arg crypto algo context
|
|
+ * \param out_arg output bytestream
|
|
+ * \param in_arg input bytestream
|
|
+ * \param iv_arg initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param mode operation mode such as ebc, cbc
|
|
+*/
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+static void deu_des(void *ctx_arg,
|
|
+ u8 *out_arg,
|
|
+ const u8 *in_arg,
|
|
+ u8 *iv_arg,
|
|
+ u32 nbytes,
|
|
+ int encdec,
|
|
+ int mode)
|
|
+{
|
|
+ u32 remain = nbytes;
|
|
+ u32 inc;
|
|
+
|
|
+ DPRINTF(0, "\n");
|
|
+
|
|
+ while (remain > 0) {
|
|
+ if (remain >= DEU_MAX_PACKET_SIZE)
|
|
+ inc = DEU_MAX_PACKET_SIZE;
|
|
+ else
|
|
+ inc = remain;
|
|
+
|
|
+ remain -= inc;
|
|
+
|
|
+ deu_des_core(ctx_arg, out_arg, in_arg, iv_arg, inc, encdec,
|
|
+ mode);
|
|
+
|
|
+ out_arg += inc;
|
|
+ in_arg += inc;
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+/** \fn void deu_des_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief sets DES hardware to ECB mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+
|
|
+static void deu_des_ecb(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+ deu_des(ctx, dst, src, NULL, nbytes, encdec, 0);
|
|
+}
|
|
+
|
|
+/** \fn void deu_des_cbc(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief sets DES hardware to CBC mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+static void deu_des_cbc(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+ deu_des(ctx, dst, src, iv, nbytes, encdec, 1);
|
|
+}
|
|
+
|
|
+#if 0
|
|
+/** \fn void deu_des_ofb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief sets DES hardware to OFB mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+static void deu_des_ofb(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+ deu_des(ctx, dst, src, iv, nbytes, encdec, 2);
|
|
+}
|
|
+
|
|
+/** \fn void deu_des_cfb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ \ingroup LQ_DES_FUNCTIONS
|
|
+ \brief sets DES hardware to CFB mode
|
|
+ \param ctx crypto algo context
|
|
+ \param dst output bytestream
|
|
+ \param src input bytestream
|
|
+ \param iv initialization vector
|
|
+ \param nbytes length of bytestream
|
|
+ \param encdec 1 for encrypt; 0 for decrypt
|
|
+ \param inplace not used
|
|
+*/
|
|
+static void deu_des_cfb(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+ deu_des(ctx, dst, src, iv, nbytes, encdec, 3);
|
|
+}
|
|
+
|
|
+/** \fn void deu_des_ctr(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief sets DES hardware to CTR mode
|
|
+ * \param ctx crypto algo context
|
|
+ * \param dst output bytestream
|
|
+ * \param src input bytestream
|
|
+ * \param iv initialization vector
|
|
+ * \param nbytes length of bytestream
|
|
+ * \param encdec 1 for encrypt; 0 for decrypt
|
|
+ * \param inplace not used
|
|
+*/
|
|
+static void deu_des_ctr(void *ctx,
|
|
+ uint8_t *dst,
|
|
+ const uint8_t *src,
|
|
+ uint8_t *iv,
|
|
+ size_t nbytes,
|
|
+ int encdec,
|
|
+ int inplace)
|
|
+{
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+ deu_des(ctx, dst, src, iv, nbytes, encdec, 4);
|
|
+}
|
|
+#endif
|
|
+
|
|
+/** \fn void des_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief encrypt DES_BLOCK_SIZE of data
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param out output bytestream
|
|
+ * \param in input bytestream
|
|
+*/
|
|
+static void des_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
|
|
+{
|
|
+ struct des_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+ deu_des(ctx, out, in, NULL, DES_BLOCK_SIZE, CRYPTO_DIR_ENCRYPT, 0);
|
|
+}
|
|
+
|
|
+/** \fn void des_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief encrypt DES_BLOCK_SIZE of data
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param out output bytestream
|
|
+ * \param in input bytestream
|
|
+*/
|
|
+static void des_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
|
|
+{
|
|
+ struct des_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+ deu_des(ctx, out, in, NULL, DES_BLOCK_SIZE, CRYPTO_DIR_DECRYPT, 0);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief RFC2451:
|
|
+ *
|
|
+ * For DES-EDE3, there is no known need to reject weak or
|
|
+ * complementation keys. Any weakness is obviated by the use of
|
|
+ * multiple keys.
|
|
+ *
|
|
+ * However, if the first two or last two independent 64-bit keys are
|
|
+ * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
|
|
+ * same as DES. Implementers MUST reject keys that exhibit this
|
|
+ * property.
|
|
+ *
|
|
+ */
|
|
+
|
|
+/** \fn int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief sets 3DES key
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param key input key
|
|
+ * \param keylen key length
|
|
+*/
|
|
+static int des3_ede_setkey(struct crypto_tfm *tfm,
|
|
+ const u8 *key,
|
|
+ unsigned int key_len)
|
|
+{
|
|
+ struct des_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
+
|
|
+ DPRINTF(0, "ctx @%p, key_len %d\n", ctx, key_len);
|
|
+
|
|
+ ctx->controlr_M = key_len / 8 + 1; /* 3DES EDE1 / EDE2 / EDE3 Mode */
|
|
+ ctx->key_length = key_len;
|
|
+
|
|
+ memcpy((u8 *)(ctx->expkey), key, key_len);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief DES function mappings
|
|
+*/
|
|
+static struct crypto_alg des_alg = {
|
|
+ .cra_name = "des",
|
|
+ .cra_driver_name = "lq_deu-des",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
|
+ .cra_blocksize = DES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct des_ctx),
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_alignmask = 3,
|
|
+ .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .cipher = {
|
|
+ .cia_min_keysize = DES_KEY_SIZE,
|
|
+ .cia_max_keysize = DES_KEY_SIZE,
|
|
+ .cia_setkey = des_setkey,
|
|
+ .cia_encrypt = des_encrypt,
|
|
+ .cia_decrypt = des_decrypt
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/*
|
|
+ * \brief DES function mappings
|
|
+*/
|
|
+static struct crypto_alg des3_ede_alg = {
|
|
+ .cra_name = "des3_ede",
|
|
+ .cra_driver_name = "lq_deu-des3_ede",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
|
+ .cra_blocksize = DES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct des_ctx),
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_alignmask = 3,
|
|
+ .cra_list = LIST_HEAD_INIT(des3_ede_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .cipher = {
|
|
+ .cia_min_keysize = DES_KEY_SIZE,
|
|
+ .cia_max_keysize = DES_KEY_SIZE,
|
|
+ .cia_setkey = des3_ede_setkey,
|
|
+ .cia_encrypt = des_encrypt,
|
|
+ .cia_decrypt = des_decrypt
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int ecb_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief ECB DES encrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+*/
|
|
+static int ecb_des_encrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ nbytes -= (nbytes % DES_BLOCK_SIZE);
|
|
+ deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
|
|
+ nbytes &= DES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/** \fn int ecb_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief ECB DES decrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int ecb_des_decrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ nbytes -= (nbytes % DES_BLOCK_SIZE);
|
|
+ deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
|
|
+ nbytes &= DES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief DES function mappings
|
|
+*/
|
|
+static struct crypto_alg ecb_des_alg = {
|
|
+ .cra_name = "ecb(des)",
|
|
+ .cra_driver_name = "lq_deu-ecb(des)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = DES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct des_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = DES_KEY_SIZE,
|
|
+ .max_keysize = DES_KEY_SIZE,
|
|
+ .setkey = des_setkey,
|
|
+ .encrypt = ecb_des_encrypt,
|
|
+ .decrypt = ecb_des_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/*
|
|
+ * \brief DES function mappings
|
|
+*/
|
|
+static struct crypto_alg ecb_des3_ede_alg = {
|
|
+ .cra_name = "ecb(des3_ede)",
|
|
+ .cra_driver_name = "lq_deu-ecb(des3_ede)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct des_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(ecb_des3_ede_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = DES3_EDE_KEY_SIZE,
|
|
+ .max_keysize = DES3_EDE_KEY_SIZE,
|
|
+ .setkey = des3_ede_setkey,
|
|
+ .encrypt = ecb_des_encrypt,
|
|
+ .decrypt = ecb_des_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief CBC DES encrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int cbc_des_encrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ u8 *iv = walk.iv;
|
|
+ /* printk("iv = %08x\n", *(u32 *)iv); */
|
|
+ nbytes -= (nbytes % DES_BLOCK_SIZE);
|
|
+ deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
|
|
+ nbytes &= DES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/** \fn int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief CBC DES decrypt using linux crypto blkcipher
|
|
+ * \param desc blkcipher descriptor
|
|
+ * \param dst output scatterlist
|
|
+ * \param src input scatterlist
|
|
+ * \param nbytes data size in bytes
|
|
+ * \return err
|
|
+*/
|
|
+static int cbc_des_decrypt(struct blkcipher_desc *desc,
|
|
+ struct scatterlist *dst,
|
|
+ struct scatterlist *src,
|
|
+ unsigned int nbytes)
|
|
+{
|
|
+ struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
|
+ struct blkcipher_walk walk;
|
|
+ int err;
|
|
+
|
|
+ DPRINTF(0, "ctx @%p\n", ctx);
|
|
+
|
|
+ blkcipher_walk_init(&walk, dst, src, nbytes);
|
|
+ err = blkcipher_walk_virt(desc, &walk);
|
|
+
|
|
+ while ((nbytes = walk.nbytes)) {
|
|
+ u8 *iv = walk.iv;
|
|
+ /* printk("iv = %08x\n", *(u32 *)iv); */
|
|
+ nbytes -= (nbytes % DES_BLOCK_SIZE);
|
|
+ deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
+ iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
|
|
+ nbytes &= DES_BLOCK_SIZE - 1;
|
|
+ err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief DES function mappings
|
|
+*/
|
|
+static struct crypto_alg cbc_des_alg = {
|
|
+ .cra_name = "cbc(des)",
|
|
+ .cra_driver_name = "lq_deu-cbc(des)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = DES_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct des_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = DES_KEY_SIZE,
|
|
+ .max_keysize = DES_KEY_SIZE,
|
|
+ .ivsize = DES_BLOCK_SIZE,
|
|
+ .setkey = des_setkey,
|
|
+ .encrypt = cbc_des_encrypt,
|
|
+ .decrypt = cbc_des_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/*
|
|
+ * \brief DES function mappings
|
|
+*/
|
|
+static struct crypto_alg cbc_des3_ede_alg = {
|
|
+ .cra_name = "cbc(des3_ede)",
|
|
+ .cra_driver_name = "lq_deu-cbc(des3_ede)",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
|
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
|
+ .cra_ctxsize = sizeof(struct des_ctx),
|
|
+ .cra_type = &crypto_blkcipher_type,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ .cra_list = LIST_HEAD_INIT(cbc_des3_ede_alg.cra_list),
|
|
+ .cra_u = {
|
|
+ .blkcipher = {
|
|
+ .min_keysize = DES3_EDE_KEY_SIZE,
|
|
+ .max_keysize = DES3_EDE_KEY_SIZE,
|
|
+ .ivsize = DES_BLOCK_SIZE,
|
|
+ .setkey = des3_ede_setkey,
|
|
+ .encrypt = cbc_des_encrypt,
|
|
+ .decrypt = cbc_des_decrypt,
|
|
+ }
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int lq_deu_init_des(void)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief initialize des driver
|
|
+*/
|
|
+int lq_deu_init_des(void)
|
|
+{
|
|
+ int ret = 0;
|
|
+
|
|
+ ret = crypto_register_alg(&des_alg);
|
|
+ if (ret < 0)
|
|
+ goto des_err;
|
|
+
|
|
+ ret = crypto_register_alg(&ecb_des_alg);
|
|
+ if (ret < 0)
|
|
+ goto ecb_des_err;
|
|
+
|
|
+ ret = crypto_register_alg(&cbc_des_alg);
|
|
+ if (ret < 0)
|
|
+ goto cbc_des_err;
|
|
+
|
|
+ ret = crypto_register_alg(&des3_ede_alg);
|
|
+ if (ret < 0)
|
|
+ goto des3_ede_err;
|
|
+
|
|
+ ret = crypto_register_alg(&ecb_des3_ede_alg);
|
|
+ if (ret < 0)
|
|
+ goto ecb_des3_ede_err;
|
|
+
|
|
+ ret = crypto_register_alg(&cbc_des3_ede_alg);
|
|
+ if (ret < 0)
|
|
+ goto cbc_des3_ede_err;
|
|
+
|
|
+ deu_des_chip_init();
|
|
+
|
|
+ CRTCL_SECT_INIT;
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ if (ALLOCATE_MEMORY(BUFFER_IN, DES_ALGO) < 0) {
|
|
+ printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n",
|
|
+ __FILE__, __func__, __LINE__);
|
|
+ goto cbc_des3_ede_err;
|
|
+ }
|
|
+ if (ALLOCATE_MEMORY(BUFFER_OUT, DES_ALGO) < 0) {
|
|
+ printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n",
|
|
+ __FILE__, __func__, __LINE__);
|
|
+ goto cbc_des3_ede_err;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ printk(KERN_NOTICE "Lantiq DEU DES initialized%s.\n",
|
|
+ disable_deudma ? "" : " (DMA)");
|
|
+ return ret;
|
|
+
|
|
+des_err:
|
|
+ crypto_unregister_alg(&des_alg);
|
|
+ printk(KERN_ERR "Lantiq des initialization failed!\n");
|
|
+
|
|
+ return ret;
|
|
+
|
|
+ecb_des_err:
|
|
+ crypto_unregister_alg(&ecb_des_alg);
|
|
+ printk(KERN_ERR "Lantiq ecb_des initialization failed!\n");
|
|
+
|
|
+ return ret;
|
|
+
|
|
+cbc_des_err:
|
|
+ crypto_unregister_alg(&cbc_des_alg);
|
|
+ printk(KERN_ERR "Lantiq cbc_des initialization failed!\n");
|
|
+
|
|
+ return ret;
|
|
+
|
|
+des3_ede_err:
|
|
+ crypto_unregister_alg(&des3_ede_alg);
|
|
+ printk(KERN_ERR "Lantiq des3_ede initialization failed!\n");
|
|
+
|
|
+ return ret;
|
|
+
|
|
+ecb_des3_ede_err:
|
|
+ crypto_unregister_alg(&ecb_des3_ede_alg);
|
|
+ printk(KERN_ERR "Lantiq ecb_des3_ede initialization failed!\n");
|
|
+
|
|
+ return ret;
|
|
+
|
|
+cbc_des3_ede_err:
|
|
+ crypto_unregister_alg(&cbc_des3_ede_alg);
|
|
+ printk(KERN_ERR "Lantiq cbc_des3_ede initialization failed!\n");
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/** \fn void lq_deu_fini_des(void)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief unregister des driver
|
|
+*/
|
|
+void lq_deu_fini_des(void)
|
|
+{
|
|
+ crypto_unregister_alg(&des_alg);
|
|
+ crypto_unregister_alg(&ecb_des_alg);
|
|
+ crypto_unregister_alg(&cbc_des_alg);
|
|
+ crypto_unregister_alg(&des3_ede_alg);
|
|
+ crypto_unregister_alg(&ecb_des3_ede_alg);
|
|
+ crypto_unregister_alg(&cbc_des3_ede_alg);
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ FREE_MEMORY(des_buff_in);
|
|
+ FREE_MEMORY(des_buff_out);
|
|
+#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA_DANUBE */
|
|
+}
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu.c
|
|
@@ -0,0 +1,195 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief main DEU driver file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU_FUNCTIONS LQ_DEU_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq DEU functions
|
|
+*/
|
|
+
|
|
+#include <linux/version.h>
|
|
+#if defined(CONFIG_MODVERSIONS)
|
|
+#define MODVERSIONS
|
|
+#include <linux/modversions.h>
|
|
+#endif
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <linux/proc_fs.h>
|
|
+#include <linux/fs.h> /* Stuff about file systems that we need */
|
|
+#include <asm/byteorder.h>
|
|
+
|
|
+#if 0
|
|
+#ifdef CONFIG_SOC_LANTIQ_XWAY
|
|
+# include <lq_pmu.h>
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#include "deu.h"
|
|
+
|
|
+struct lq_crypto_priv lq_crypto_ops;
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+int disable_deudma = 0;
|
|
+#else
|
|
+int disable_deudma = 1;
|
|
+#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA */
|
|
+
|
|
+#ifdef CRYPTO_DEBUG
|
|
+char deu_debug_level = 3;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_MODULE
|
|
+# define STATIC static
|
|
+#else
|
|
+# define STATIC
|
|
+#endif
|
|
+
|
|
+/** \fn static int lq_deu_init(void)
|
|
+ * \ingroup LQ_DEU_FUNCTIONS
|
|
+ * \brief link all modules that have been selected in kernel config for Lantiq HW crypto support
|
|
+ * \return ret
|
|
+*/
|
|
+int lq_deu_init(void)
|
|
+{
|
|
+ int ret = -ENOSYS;
|
|
+ u32 config;
|
|
+
|
|
+ printk(KERN_INFO "Lantiq crypto hardware driver version %s\n",
|
|
+ LQ_DEU_DRV_VERSION);
|
|
+
|
|
+ config = deu_chip_init();
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ deu_dma_init();
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_AES)
|
|
+ if(config & LQ_DEU_ID_AES) {
|
|
+ if ((ret = lq_deu_init_aes())) {
|
|
+ printk(KERN_ERR "Lantiq AES initialization failed!\n");
|
|
+ }
|
|
+ } else {
|
|
+ printk(KERN_ERR "Lantiq AES not supported!\n");
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_SOL_LANTIQ_XWAY
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DES)
|
|
+ if(config & LQ_DEU_ID_DES) {
|
|
+ if ((ret = lq_deu_init_des())) {
|
|
+ printk(KERN_ERR "Lantiq DES initialization failed!\n");
|
|
+ }
|
|
+ } else {
|
|
+ printk(KERN_ERR "Lantiq DES not supported!\n");
|
|
+ }
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) && defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA)
|
|
+ if ((ret = lq_deu_init_arc4())) {
|
|
+ printk(KERN_ERR "Lantiq ARC4 initialization failed!\n");
|
|
+ }
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1)
|
|
+ if(config & LQ_DEU_ID_HASH) {
|
|
+ if ((ret = lq_deu_init_sha1())) {
|
|
+ printk(KERN_ERR "Lantiq SHA1 initialization failed!\n");
|
|
+ }
|
|
+ } else {
|
|
+ printk(KERN_ERR "Lantiq SHA1 not supported!\n");
|
|
+ }
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5)
|
|
+ if(config & LQ_DEU_ID_HASH) {
|
|
+ if ((ret = lq_deu_init_md5())) {
|
|
+ printk(KERN_ERR "Lantiq MD5 initialization failed!\n");
|
|
+ }
|
|
+ } else {
|
|
+ printk(KERN_ERR "Lantiq MD5 not supported!\n");
|
|
+ }
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC)
|
|
+ if ((ret = lq_deu_init_sha1_hmac())) {
|
|
+ printk(KERN_ERR "Lantiq SHA1_HMAC initialization failed!\n");
|
|
+ }
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC)
|
|
+ if ((ret = lq_deu_init_md5_hmac())) {
|
|
+ printk(KERN_ERR "Lantiq MD5_HMAC initialization failed!\n");
|
|
+ }
|
|
+#endif
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/** \fn static void lq_deu_fini(void)
|
|
+ * \ingroup LQ_DEU_FUNCTIONS
|
|
+ * \brief remove the loaded crypto algorithms
|
|
+*/
|
|
+void lq_deu_exit(void)
|
|
+{
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_AES)
|
|
+ lq_deu_fini_aes();
|
|
+#endif
|
|
+#ifdef CONFIG_SOL_LANTIQ_XWAY
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DES)
|
|
+ lq_deu_fini_des();
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) \
|
|
+ && defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA)
|
|
+ lq_deu_fini_arc4();
|
|
+#endif
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1)
|
|
+ lq_deu_fini_sha1();
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5)
|
|
+ lq_deu_fini_md5();
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC)
|
|
+ lq_deu_fini_sha1_hmac();
|
|
+#endif
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC)
|
|
+ lq_deu_fini_md5_hmac();
|
|
+#endif
|
|
+
|
|
+ printk("DEU has exited successfully\n");
|
|
+
|
|
+#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA)
|
|
+ deu_dma_exit();
|
|
+ printk("DMA has deregistered successfully\n");
|
|
+#endif
|
|
+}
|
|
+
|
|
+EXPORT_SYMBOL(lq_deu_init);
|
|
+EXPORT_SYMBOL(lq_deu_exit);
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu.h
|
|
@@ -0,0 +1,248 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu.h
|
|
+ \brief Main DEU driver header file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU_DEFINITIONS LQ_DEU_DEFINITIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq DEU definitions
|
|
+*/
|
|
+
|
|
+
|
|
+#ifndef DEU_H
|
|
+#define DEU_H
|
|
+
|
|
+#undef CRYPTO_DEBUG
|
|
+
|
|
+#define LQ_DEU_DRV_VERSION "1.0.1"
|
|
+
|
|
+#if defined(CONFIG_LANTIQ_DANUBE)
|
|
+# include "deu_danube.h"
|
|
+#elif defined(CONFIG_LANTIQ_AR9)
|
|
+# include "deu_ar9.h"
|
|
+#elif defined(CONFIG_SOC_LANTIQ_FALCON)
|
|
+# include "deu_falcon.h"
|
|
+#else
|
|
+//# error "Unknown platform"
|
|
+# include "deu_danube.h"
|
|
+#endif
|
|
+
|
|
+struct lq_crypto_priv {
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ u32 *des_buff_in;
|
|
+ u32 *des_buff_out;
|
|
+ u32 *aes_buff_in;
|
|
+ u32 *aes_buff_out;
|
|
+
|
|
+ int (*dma_init)(void);
|
|
+ void (*dma_exit)(void);
|
|
+ u32 (*dma_align)(const u8 *, u32 *, int, int);
|
|
+ void (*aes_dma_memcpy)(u32 *, u32 *, u8 *, int);
|
|
+ void (*des_dma_memcpy)(u32 *, u32 *, u8 *, int);
|
|
+ int (*aes_dma_malloc)(int);
|
|
+ int (*des_dma_malloc)(int);
|
|
+ void (*dma_free)(u32 *);
|
|
+#endif
|
|
+
|
|
+ u32 (*endian_swap)(u32);
|
|
+ u32 (*input_swap)(u32);
|
|
+ void (*aes_chip_init)(void);
|
|
+ void (*des_chip_init)(void);
|
|
+ u32 (*chip_init)(void);
|
|
+};
|
|
+
|
|
+extern struct lq_crypto_priv lq_crypto_ops;
|
|
+
|
|
+#define LQ_DEU_ALIGNMENT 16
|
|
+
|
|
+#define PFX "lq_deu: "
|
|
+
|
|
+#define LQ_DEU_CRA_PRIORITY 300
|
|
+#define LQ_DEU_COMPOSITE_PRIORITY 400
|
|
+
|
|
+#define CRYPTO_DIR_ENCRYPT 1
|
|
+#define CRYPTO_DIR_DECRYPT 0
|
|
+
|
|
+#define CRTCL_SECT_INIT spin_lock_init(&cipher_lock)
|
|
+#define CRTCL_SECT_START spin_lock_irqsave(&cipher_lock, flag)
|
|
+#define CRTCL_SECT_END spin_unlock_irqrestore(&cipher_lock, flag)
|
|
+
|
|
+#define LQ_DEU_ID_REV 0x00001F
|
|
+#define LQ_DEU_ID_ID 0x00FF00
|
|
+#define LQ_DEU_ID_DMA 0x010000
|
|
+#define LQ_DEU_ID_HASH 0x020000
|
|
+#define LQ_DEU_ID_AES 0x040000
|
|
+#define LQ_DEU_ID_3DES 0x080000
|
|
+#define LQ_DEU_ID_DES 0x100000
|
|
+
|
|
+extern int disable_deudma;
|
|
+
|
|
+int lq_deu_init(void);
|
|
+void lq_deu_exit(void);
|
|
+
|
|
+int lq_deu_init_des(void);
|
|
+int lq_deu_init_aes(void);
|
|
+int lq_deu_init_arc4(void);
|
|
+int lq_deu_init_sha1(void);
|
|
+int lq_deu_init_md5(void);
|
|
+int lq_deu_init_sha1_hmac(void);
|
|
+int lq_deu_init_md5_hmac(void);
|
|
+
|
|
+void lq_deu_fini_des(void);
|
|
+void lq_deu_fini_aes(void);
|
|
+void lq_deu_fini_arc4(void);
|
|
+void lq_deu_fini_sha1(void);
|
|
+void lq_deu_fini_md5(void);
|
|
+void lq_deu_fini_sha1_hmac(void);
|
|
+void lq_deu_fini_md5_hmac(void);
|
|
+
|
|
+/* board specific functions */
|
|
+/* { */
|
|
+static inline u32 deu_chip_init(void)
|
|
+{
|
|
+ return lq_crypto_ops.chip_init();
|
|
+}
|
|
+
|
|
+static inline void deu_des_chip_init(void)
|
|
+{
|
|
+ lq_crypto_ops.des_chip_init();
|
|
+}
|
|
+
|
|
+static inline void deu_aes_chip_init(void)
|
|
+{
|
|
+ lq_crypto_ops.aes_chip_init();
|
|
+}
|
|
+
|
|
+static inline u32 deu_input_swap(u32 input)
|
|
+{
|
|
+ return lq_crypto_ops.input_swap(input);
|
|
+}
|
|
+
|
|
+static inline u32 deu_endian_swap(u32 input)
|
|
+{
|
|
+ return lq_crypto_ops.endian_swap(input);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+static inline int deu_aes_dma_malloc(int value)
|
|
+{
|
|
+ return lq_crypto_ops.aes_dma_malloc(value);
|
|
+}
|
|
+
|
|
+static inline int deu_des_dma_malloc(int value)
|
|
+{
|
|
+ return lq_crypto_ops.des_dma_malloc(value);
|
|
+}
|
|
+
|
|
+static inline u32 *deu_dma_align(const u8 *arg,
|
|
+ u32 *buff_alloc,
|
|
+ int in_out,
|
|
+ int nbytes)
|
|
+{
|
|
+ return lq_crypto_ops.dma_align(arg, buff_alloc, in_out, nbytes);
|
|
+}
|
|
+
|
|
+static inline void deu_aes_dma_memcpy(u32 *outcopy,
|
|
+ u32 *out_dma,
|
|
+ u8 *out_arg,
|
|
+ int nbytes)
|
|
+{
|
|
+ lq_crypto_ops.aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes);
|
|
+}
|
|
+
|
|
+static inline void deu_des_dma_memcpy(u32 *outcopy,
|
|
+ u32 *out_dma,
|
|
+ u8 *out_arg,
|
|
+ int nbytes)
|
|
+{
|
|
+ lq_crypto_ops.des_dma_memcpy(outcopy, out_dma, out_arg, nbytes);
|
|
+}
|
|
+
|
|
+static inline void deu_dma_free(u32 *addr)
|
|
+{
|
|
+ lq_crypto_ops.dma_free(addr);
|
|
+}
|
|
+
|
|
+static inline int deu_dma_init(void)
|
|
+{
|
|
+ lq_crypto_ops.dma_init();
|
|
+}
|
|
+
|
|
+static inline void deu_dma_exit(void)
|
|
+{
|
|
+ lq_crypto_ops.dma_exit();
|
|
+}
|
|
+#endif
|
|
+
|
|
+/* } */
|
|
+
|
|
+#define DEU_WAKELIST_INIT(queue) \
|
|
+ init_waitqueue_head(&queue)
|
|
+
|
|
+#define DEU_WAIT_EVENT_TIMEOUT(queue, event, flags, timeout) \
|
|
+ do { \
|
|
+ wait_event_interruptible_timeout((queue), \
|
|
+ test_bit((event), \
|
|
+ &(flags)), (timeout)); \
|
|
+ clear_bit((event), &(flags)); \
|
|
+ }while (0)
|
|
+
|
|
+
|
|
+#define DEU_WAKEUP_EVENT(queue, event, flags) \
|
|
+ do { \
|
|
+ set_bit((event), &(flags)); \
|
|
+ wake_up_interruptible(&(queue)); \
|
|
+ }while (0)
|
|
+
|
|
+#define DEU_WAIT_EVENT(queue, event, flags) \
|
|
+ do { \
|
|
+ wait_event_interruptible(queue, \
|
|
+ test_bit((event), &(flags))); \
|
|
+ clear_bit((event), &(flags)); \
|
|
+ }while (0)
|
|
+
|
|
+struct deu_drv_priv {
|
|
+ wait_queue_head_t deu_thread_wait;
|
|
+#define DEU_EVENT 1
|
|
+ volatile long deu_event_flags;
|
|
+ u8 *deu_rx_buf;
|
|
+ u32 deu_rx_len;
|
|
+};
|
|
+
|
|
+#ifdef CRYPTO_DEBUG
|
|
+extern char deu_debug_level;
|
|
+# define DPRINTF(level, format, args...) \
|
|
+ if (level < deu_debug_level) \
|
|
+ printk(KERN_INFO "[%s %s %d]: " format, \
|
|
+ __FILE__, __func__, __LINE__, ##args)
|
|
+#else
|
|
+# define DPRINTF(level, format, args...) do { } while(0)
|
|
+#endif
|
|
+
|
|
+#endif /* DEU_H */
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu_ar9.c
|
|
@@ -0,0 +1,327 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <asm/io.h> /* dma_cache_inv */
|
|
+#include <linux/platform_device.h>
|
|
+
|
|
+#ifdef CONFIG_SOC_LANTIQ_XWAY
|
|
+
|
|
+#include "deu.h"
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu_ar9.c
|
|
+ \brief Lantiq DEU board specific driver file for ar9
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief board specific functions
|
|
+*/
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+struct lq_deu_device lq_deu[1];
|
|
+
|
|
+static u8 *g_dma_page_ptr = NULL;
|
|
+static u8 *g_dma_block = NULL;
|
|
+static u8 *g_dma_block2 = NULL;
|
|
+
|
|
+/** \fn int dma_init(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief Initialize DMA for DEU usage. DMA specific registers are
|
|
+ * intialized here, including a pointer to the device, memory
|
|
+ * space for the device and DEU-DMA descriptors
|
|
+ * \return -1: fail, 0: SUCCESS
|
|
+*/
|
|
+static int dma_init(void)
|
|
+{
|
|
+ volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON;
|
|
+ struct dma_device_info *dma_device = NULL;
|
|
+ int i = 0;
|
|
+
|
|
+ struct dma_device_info *deu_dma_device_ptr;
|
|
+
|
|
+ /* get one free page and share between g_dma_block and g_dma_block2 */
|
|
+ printk("PAGE_SIZE = %ld\n", PAGE_SIZE);
|
|
+ /* need 16-byte alignment memory block */
|
|
+ g_dma_page_ptr = (u8 *)__get_free_page(GFP_KERNEL);
|
|
+ /* need 16-byte alignment memory block */
|
|
+ g_dma_block = g_dma_page_ptr;
|
|
+ /* need 16-byte alignment memory block */
|
|
+ g_dma_block2 = (u8 *)(g_dma_page_ptr + (PAGE_SIZE >> 1));
|
|
+
|
|
+ /* deu_dma_priv_init(); */
|
|
+
|
|
+ deu_dma_device_ptr = dma_device_reserve("DEU");
|
|
+ if (!deu_dma_device_ptr) {
|
|
+ printk("DEU: reserve DMA fail!\n");
|
|
+ return -1;
|
|
+ }
|
|
+ lq_deu[0].dma_device = deu_dma_device_ptr;
|
|
+
|
|
+ dma_device = deu_dma_device_ptr;
|
|
+ /* dma_device->priv = &deu_dma_priv; */
|
|
+ dma_device->buffer_alloc = &deu_dma_buffer_alloc;
|
|
+ dma_device->buffer_free = &deu_dma_buffer_free;
|
|
+ dma_device->intr_handler = &deu_dma_intr_handler;
|
|
+
|
|
+ dma_device->tx_endianness_mode = LQ_DMA_ENDIAN_TYPE3;
|
|
+ dma_device->rx_endianness_mode = LQ_DMA_ENDIAN_TYPE3;
|
|
+ dma_device->port_num = 1;
|
|
+ dma_device->tx_burst_len = 2;
|
|
+ dma_device->rx_burst_len = 2;
|
|
+ dma_device->max_rx_chan_num = 1;
|
|
+ dma_device->max_tx_chan_num = 1;
|
|
+ dma_device->port_packet_drop_enable = 0;
|
|
+
|
|
+ for (i = 0; i < dma_device->max_rx_chan_num; i++) {
|
|
+ dma_device->rx_chan[i]->packet_size = DEU_MAX_PACKET_SIZE;
|
|
+ dma_device->rx_chan[i]->desc_len = 1;
|
|
+ dma_device->rx_chan[i]->control = LQ_DMA_CH_ON;
|
|
+ dma_device->rx_chan[i]->byte_offset = 0;
|
|
+ dma_device->rx_chan[i]->chan_poll_enable = 1;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < dma_device->max_tx_chan_num; i++) {
|
|
+ dma_device->tx_chan[i]->control = LQ_DMA_CH_ON;
|
|
+ dma_device->tx_chan[i]->desc_len = 1;
|
|
+ dma_device->tx_chan[i]->chan_poll_enable = 1;
|
|
+ }
|
|
+
|
|
+ dma_device->current_tx_chan = 0;
|
|
+ dma_device->current_rx_chan = 0;
|
|
+
|
|
+ i = dma_device_register(dma_device);
|
|
+ for (i = 0; i < dma_device->max_rx_chan_num; i++) {
|
|
+ (dma_device->rx_chan[i])->open(dma_device->rx_chan[i]);
|
|
+ }
|
|
+
|
|
+ dma->ctrl.BS = 0;
|
|
+ dma->ctrl.RXCLS = 0;
|
|
+ dma->ctrl.EN = 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief Not used for AR9
|
|
+ * \param arg Pointer to the input / output memory address
|
|
+ * \param buffer_alloc A pointer to the buffer
|
|
+ * \param in_buff Input (if == 1) or Output (if == 0) buffer
|
|
+ * \param nbytes Number of bytes of data
|
|
+*/
|
|
+static u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes)
|
|
+{
|
|
+ return (u32 *) arg;
|
|
+}
|
|
+
|
|
+/** \fn void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief copy the DMA data to the memory address space for AES
|
|
+ * \param outcopy Not used
|
|
+ * \param out_dma A pointer to the memory address that stores the DMA data
|
|
+ * \param out_arg The pointer to the memory address that needs to be copied to]
|
|
+ * \param nbytes Number of bytes of data
|
|
+*/
|
|
+static void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
|
|
+{
|
|
+ memcpy(out_arg, out_dma, nbytes);
|
|
+}
|
|
+
|
|
+/** \fn void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief copy the DMA data to the memory address space for DES
|
|
+ * \param outcopy Not used
|
|
+ * \param out_dma A pointer to the memory address that stores the DMA data
|
|
+ * \param out_arg The pointer to the memory address that needs to be copied to]
|
|
+ * \param nbytes Number of bytes of data
|
|
+*/
|
|
+static void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
|
|
+{
|
|
+ memcpy(out_arg, out_dma, nbytes);
|
|
+}
|
|
+
|
|
+/** \fn dma_exit(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief unregister dma devices after exit
|
|
+*/
|
|
+static void dma_exit(void)
|
|
+{
|
|
+ if (g_dma_page_ptr)
|
|
+ free_page((u32) g_dma_page_ptr);
|
|
+ dma_device_release(lq_deu[0].dma_device);
|
|
+ dma_device_unregister(lq_deu[0].dma_device);
|
|
+}
|
|
+#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA */
|
|
+
|
|
+/** \fn u32 endian_swap(u32 input)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief Swap data given to the function
|
|
+ * \param input Data input to be swapped
|
|
+ * \return either the swapped data or the input data depending on whether it is in DMA mode or FPI mode
|
|
+*/
|
|
+static u32 endian_swap(u32 input)
|
|
+{
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ u8 *ptr = (u8 *)&input;
|
|
+ return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]);
|
|
+#else
|
|
+ return input;
|
|
+#endif
|
|
+}
|
|
+
|
|
+/** \fn u32 input_swap(u32 input)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief Not used
|
|
+ * \return input
|
|
+*/
|
|
+static u32 input_swap(u32 input)
|
|
+{
|
|
+ return input;
|
|
+}
|
|
+
|
|
+/** \fn void aes_chip_init(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief initialize AES hardware
|
|
+*/
|
|
+static void aes_chip_init(void)
|
|
+{
|
|
+ volatile struct deu_aes *aes = (struct deu_aes *) AES_START;
|
|
+
|
|
+ aes->ctrl.SM = 1;
|
|
+#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ aes->ctrl.ARS = 1;
|
|
+#else
|
|
+ aes->ctrl.NDC = 1; /* to write to ENDI */
|
|
+ asm("sync");
|
|
+ aes->ctrl.ENDI = 0;
|
|
+ asm("sync");
|
|
+ aes->ctrl.ARS = 0; /* 0 for dma */
|
|
+ asm("sync");
|
|
+#endif
|
|
+}
|
|
+
|
|
+/** \fn void des_chip_init(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief initialize DES hardware
|
|
+*/
|
|
+static void des_chip_init(void)
|
|
+{
|
|
+ volatile struct deu_des *des = (struct deu_des *) DES_3DES_START;
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ /* start crypto engine with write to ILR */
|
|
+ des->ctrl.SM = 1;
|
|
+ asm("sync");
|
|
+ des->ctrl.ARS = 1;
|
|
+#else
|
|
+ des->ctrl.SM = 1;
|
|
+ des->ctrl.NDC = 1;
|
|
+ asm("sync");
|
|
+ des->ctrl.ENDI = 0;
|
|
+ asm("sync");
|
|
+ des->ctrl.ARS = 0; /* 0 for dma */
|
|
+
|
|
+#endif
|
|
+}
|
|
+
|
|
+static u32 chip_init(void)
|
|
+{
|
|
+ volatile struct deu_clk_ctrl *clc = (struct deu_clk_ctrl *) LQ_DEU_CLK;
|
|
+
|
|
+#if 0
|
|
+ lq_pmu_enable(1<<20);
|
|
+#endif
|
|
+
|
|
+ clc->FSOE = 0;
|
|
+ clc->SBWE = 0;
|
|
+ clc->SPEN = 0;
|
|
+ clc->SBWE = 0;
|
|
+ clc->DISS = 0;
|
|
+ clc->DISR = 0;
|
|
+
|
|
+ return *LQ_DEU_ID;
|
|
+}
|
|
+
|
|
+static int lq_crypto_probe(struct platform_device *pdev)
|
|
+{
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ lq_crypto_ops.dma_init = dma_init;
|
|
+ lq_crypto_ops.dma_exit = dma_exit;
|
|
+ lq_crypto_ops.aes_dma_memcpy = aes_dma_memcpy;
|
|
+ lq_crypto_ops.des_dma_memcpy = des_dma_memcpy;
|
|
+ lq_crypto_ops.aes_dma_malloc = aes_dma_malloc;
|
|
+ lq_crypto_ops.des_dma_malloc = des_dma_malloc;
|
|
+ lq_crypto_ops.dma_align = dma_align;
|
|
+ lq_crypto_ops.dma_free = dma_free;
|
|
+#endif
|
|
+
|
|
+ lq_crypto_ops.endian_swap = endian_swap;
|
|
+ lq_crypto_ops.input_swap = input_swap;
|
|
+ lq_crypto_ops.aes_chip_init = aes_chip_init;
|
|
+ lq_crypto_ops.des_chip_init = des_chip_init;
|
|
+ lq_crypto_ops.chip_init = chip_init;
|
|
+
|
|
+ printk("lq_ar9_deu: driver loaded!\n");
|
|
+
|
|
+ lq_deu_init();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int lq_crypto_remove(struct platform_device *pdev)
|
|
+{
|
|
+ lq_deu_exit();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct platform_driver lq_crypto = {
|
|
+ .probe = lq_crypto_probe,
|
|
+ .remove = lq_crypto_remove,
|
|
+ .driver = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .name = "lq_ar9_deu"
|
|
+ }
|
|
+};
|
|
+
|
|
+static int __init lq_crypto_init(void)
|
|
+{
|
|
+ return platform_driver_register(&lq_crypto);
|
|
+}
|
|
+module_init(lq_crypto_init);
|
|
+
|
|
+static void __exit lq_crypto_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&lq_crypto);
|
|
+}
|
|
+module_exit(lq_crypto_exit);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu_ar9.h
|
|
@@ -0,0 +1,291 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus / Infineon Technologies
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU_DEFINITIONS LQ_DEU_DEFINITIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq DEU definitions
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu_ar9.h
|
|
+ \brief DEU driver header file
|
|
+*/
|
|
+
|
|
+
|
|
+#ifndef DEU_AR9_H
|
|
+#define DEU_AR9_H
|
|
+
|
|
+#define LQ_DEU_BASE_ADDR (KSEG1 | 0x1E103100)
|
|
+#define LQ_DEU_CLK ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0000))
|
|
+#define LQ_DEU_ID ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0008))
|
|
+#define LQ_DES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0010))
|
|
+#define LQ_AES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0050))
|
|
+#define LQ_HASH_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x00B0))
|
|
+#define LQ_ARC4_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0100))
|
|
+
|
|
+#define ARC4_START LQ_ARC4_CON
|
|
+#define DES_3DES_START LQ_DES_CON
|
|
+#define HASH_START LQ_HASH_CON
|
|
+#define AES_START LQ_AES_CON
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+# include "deu_dma.h"
|
|
+# define DEU_DWORD_REORDERING(ptr, buffer, in_out, bytes) \
|
|
+ deu_dma_align(ptr, buffer, in_out, bytes)
|
|
+# define AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \
|
|
+ deu_aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes)
|
|
+# define DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \
|
|
+ deu_des_dma_memcpy(outcopy, out_dma, out_arg, nbytes)
|
|
+# define BUFFER_IN 1
|
|
+# define BUFFER_OUT 0
|
|
+# define AES_ALGO 1
|
|
+# define DES_ALGO 0
|
|
+# define ALLOCATE_MEMORY(val, type) 1
|
|
+# define FREE_MEMORY(buff)
|
|
+extern struct lq_deu_device lq_deu[1];
|
|
+#endif /* CONFIG_CRYPTO_DEV_DMA */
|
|
+
|
|
+/* SHA CONSTANTS */
|
|
+#define HASH_CON_VALUE 0x0700002C
|
|
+
|
|
+#define INPUT_ENDIAN_SWAP(input) deu_input_swap(input)
|
|
+#define DEU_ENDIAN_SWAP(input) deu_endian_swap(input)
|
|
+#define DELAY_PERIOD 10
|
|
+#define FIND_DEU_CHIP_VERSION chip_version()
|
|
+
|
|
+#define WAIT_AES_DMA_READY() \
|
|
+ do { \
|
|
+ int i; \
|
|
+ volatile struct deu_dma *dma = \
|
|
+ (struct deu_dma *) LQ_DEU_DMA_CON; \
|
|
+ volatile struct deu_aes *aes = \
|
|
+ (volatile struct deu_aes *) AES_START; \
|
|
+ for (i = 0; i < 10; i++) \
|
|
+ udelay(DELAY_PERIOD); \
|
|
+ while (dma->ctrl.BSY) {}; \
|
|
+ while (aes->ctrl.BUS) {}; \
|
|
+ } while (0)
|
|
+
|
|
+#define WAIT_DES_DMA_READY() \
|
|
+ do { \
|
|
+ int i; \
|
|
+ volatile struct deu_dma *dma = \
|
|
+ (struct deu_dma *) LQ_DEU_DMA_CON; \
|
|
+ volatile struct deu_des *des = \
|
|
+ (struct deu_des *) DES_3DES_START; \
|
|
+ for (i = 0; i < 10; i++) \
|
|
+ udelay(DELAY_PERIOD); \
|
|
+ while (dma->ctrl.BSY) {}; \
|
|
+ while (des->ctrl.BUS) {}; \
|
|
+ } while (0)
|
|
+
|
|
+#define AES_DMA_MISC_CONFIG() \
|
|
+ do { \
|
|
+ volatile struct deu_aes *aes = \
|
|
+ (volatile struct deu_aes *) AES_START; \
|
|
+ aes->ctrl.KRE = 1; \
|
|
+ aes->ctrl.GO = 1; \
|
|
+ } while(0)
|
|
+
|
|
+#define SHA_HASH_INIT \
|
|
+ do { \
|
|
+ volatile struct deu_hash *hash = \
|
|
+ (struct deu_hash *) HASH_START; \
|
|
+ hash->ctrl.SM = 1; \
|
|
+ hash->ctrl.ALGO = 0; \
|
|
+ hash->ctrl.INIT = 1; \
|
|
+ } while(0)
|
|
+
|
|
+/* DEU Common Structures for AR9*/
|
|
+
|
|
+struct deu_clk_ctrl {
|
|
+ u32 Res:26;
|
|
+ u32 FSOE:1;
|
|
+ u32 SBWE:1;
|
|
+ u32 EDIS:1;
|
|
+ u32 SPEN:1;
|
|
+ u32 DISS:1;
|
|
+ u32 DISR:1;
|
|
+};
|
|
+
|
|
+struct deu_des {
|
|
+ struct deu_des_ctrl { /* 10h */
|
|
+ u32 KRE:1;
|
|
+ u32 reserved1:5;
|
|
+ u32 GO:1;
|
|
+ u32 STP:1;
|
|
+ u32 Res2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 Res3:2;
|
|
+ u32 F:3;
|
|
+ u32 O:3;
|
|
+ u32 BUS:1;
|
|
+ u32 DAU:1;
|
|
+ u32 ARS:1;
|
|
+ u32 SM:1;
|
|
+ u32 E_D:1;
|
|
+ u32 M:3;
|
|
+ } ctrl;
|
|
+
|
|
+ u32 IHR; /* 14h */
|
|
+ u32 ILR; /* 18h */
|
|
+ u32 K1HR; /* 1c */
|
|
+ u32 K1LR;
|
|
+ u32 K2HR;
|
|
+ u32 K2LR;
|
|
+ u32 K3HR;
|
|
+ u32 K3LR; /* 30h */
|
|
+ u32 IVHR; /* 34h */
|
|
+ u32 IVLR; /* 38 */
|
|
+ u32 OHR; /* 3c */
|
|
+ u32 OLR; /* 40 */
|
|
+};
|
|
+
|
|
+struct deu_aes {
|
|
+ struct deu_aes_ctrl {
|
|
+ u32 KRE:1;
|
|
+ u32 reserved1:4;
|
|
+ u32 PNK:1;
|
|
+ u32 GO:1;
|
|
+ u32 STP:1;
|
|
+ u32 reserved2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 reserved3:2;
|
|
+ u32 F:3; /* fbs */
|
|
+ u32 O:3; /* om */
|
|
+ u32 BUS:1; /* bsy */
|
|
+ u32 DAU:1;
|
|
+ u32 ARS:1;
|
|
+ u32 SM:1;
|
|
+ u32 E_D:1;
|
|
+ u32 KV:1;
|
|
+ u32 K:2; /* KL */
|
|
+ } ctrl;
|
|
+
|
|
+ u32 ID3R; /* 80h */
|
|
+ u32 ID2R; /* 84h */
|
|
+ u32 ID1R; /* 88h */
|
|
+ u32 ID0R; /* 8Ch */
|
|
+ u32 K7R; /* 90h */
|
|
+ u32 K6R; /* 94h */
|
|
+ u32 K5R; /* 98h */
|
|
+ u32 K4R; /* 9Ch */
|
|
+ u32 K3R; /* A0h */
|
|
+ u32 K2R; /* A4h */
|
|
+ u32 K1R; /* A8h */
|
|
+ u32 K0R; /* ACh */
|
|
+ u32 IV3R; /* B0h */
|
|
+ u32 IV2R; /* B4h */
|
|
+ u32 IV1R; /* B8h */
|
|
+ u32 IV0R; /* BCh */
|
|
+ u32 OD3R; /* D4h */
|
|
+ u32 OD2R; /* D8h */
|
|
+ u32 OD1R; /* DCh */
|
|
+ u32 OD0R; /* E0h */
|
|
+};
|
|
+
|
|
+struct deu_arc4 {
|
|
+ struct arc4_controlr {
|
|
+ u32 KRE:1;
|
|
+ u32 KLEN:4;
|
|
+ u32 KSAE:1;
|
|
+ u32 GO:1;
|
|
+ u32 STP:1;
|
|
+ u32 reserved1:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 reserved2:8;
|
|
+ u32 BUS:1; /* bsy */
|
|
+ u32 reserved3:1;
|
|
+ u32 ARS:1;
|
|
+ u32 SM:1;
|
|
+ u32 reserved4:4;
|
|
+ } ctrl;
|
|
+
|
|
+ u32 K3R; /* 104h */
|
|
+ u32 K2R; /* 108h */
|
|
+ u32 K1R; /* 10Ch */
|
|
+ u32 K0R; /* 110h */
|
|
+ u32 IDLEN; /* 114h */
|
|
+ u32 ID3R; /* 118h */
|
|
+ u32 ID2R; /* 11Ch */
|
|
+ u32 ID1R; /* 120h */
|
|
+ u32 ID0R; /* 124h */
|
|
+ u32 OD3R; /* 128h */
|
|
+ u32 OD2R; /* 12Ch */
|
|
+ u32 OD1R; /* 130h */
|
|
+ u32 OD0R; /* 134h */
|
|
+};
|
|
+
|
|
+struct deu_hash {
|
|
+ struct deu_hash_ctrl {
|
|
+ u32 reserved1:5;
|
|
+ u32 KHS:1;
|
|
+ u32 GO:1;
|
|
+ u32 INIT:1;
|
|
+ u32 reserved2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 reserved3:7;
|
|
+ u32 DGRY:1;
|
|
+ u32 BSY:1;
|
|
+ u32 reserved4:1;
|
|
+ u32 IRCL:1;
|
|
+ u32 SM:1;
|
|
+ u32 KYUE:1;
|
|
+ u32 HMEN:1;
|
|
+ u32 SSEN:1;
|
|
+ u32 ALGO:1;
|
|
+ } ctrl;
|
|
+
|
|
+ u32 MR; /* B4h */
|
|
+ u32 D1R; /* B8h */
|
|
+ u32 D2R; /* BCh */
|
|
+ u32 D3R; /* C0h */
|
|
+ u32 D4R; /* C4h */
|
|
+ u32 D5R; /* C8h */
|
|
+ u32 dummy; /* CCh */
|
|
+ u32 KIDX; /* D0h */
|
|
+ u32 KEY; /* D4h */
|
|
+ u32 DBN; /* D8h */
|
|
+};
|
|
+
|
|
+struct deu_dma {
|
|
+ struct deu_dma_ctrl {
|
|
+ u32 reserved1:22;
|
|
+ u32 BS:2;
|
|
+ u32 BSY:1;
|
|
+ u32 reserved2:1;
|
|
+ u32 ALGO:2;
|
|
+ u32 RXCLS:2;
|
|
+ u32 reserved3:1;
|
|
+ u32 EN:1;
|
|
+ } ctrl;
|
|
+};
|
|
+
|
|
+#endif /* DEU_AR9_H */
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu_danube.c
|
|
@@ -0,0 +1,484 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <asm/io.h> /* dma_cache_inv */
|
|
+#include <linux/platform_device.h>
|
|
+
|
|
+#ifdef CONFIG_SOC_LANTIQ_XWAY
|
|
+
|
|
+#include "deu.h"
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu_danube.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief board specific DEU driver file for danube
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief board specific DEU functions
|
|
+*/
|
|
+
|
|
+static int danube_pre_1_4;
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+u32 *des_buff_in = NULL;
|
|
+u32 *des_buff_out = NULL;
|
|
+u32 *aes_buff_in = NULL;
|
|
+u32 *aes_buff_out = NULL;
|
|
+
|
|
+struct lq_deu_device lq_deu[1];
|
|
+
|
|
+static u8 *g_dma_page_ptr = NULL;
|
|
+static u8 *g_dma_block = NULL;
|
|
+static u8 *g_dma_block2 = NULL;
|
|
+
|
|
+/** \fn int dma_init(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief Initialize DMA for DEU usage. DMA specific registers are
|
|
+ * intialized here, including a pointer to the device, memory
|
|
+ * space for the device and DEU-DMA descriptors
|
|
+ * \return -1 if fail, otherwise return 0
|
|
+*/
|
|
+static int dma_init(void)
|
|
+{
|
|
+ struct dma_device_info *dma_device = NULL;
|
|
+ int i = 0;
|
|
+ volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON;
|
|
+ struct dma_device_info *deu_dma_device_ptr;
|
|
+
|
|
+ /* get one free page and share between g_dma_block and g_dma_block2 */
|
|
+ printk("PAGE_SIZE = %ld\n", PAGE_SIZE);
|
|
+ /* need 16-byte alignment memory block */
|
|
+ g_dma_page_ptr = (u8 *)__get_free_page(GFP_KERNEL);
|
|
+ /* need 16-byte alignment memory block */
|
|
+ g_dma_block = g_dma_page_ptr;
|
|
+ /* need 16-byte alignment memory block */
|
|
+ g_dma_block2 = (u8 *)(g_dma_page_ptr + (PAGE_SIZE >> 1));
|
|
+
|
|
+ deu_dma_device_ptr = dma_device_reserve("DEU");
|
|
+ if (!deu_dma_device_ptr) {
|
|
+ printk("DEU: reserve DMA fail!\n");
|
|
+ return -1;
|
|
+ }
|
|
+ lq_deu[0].dma_device = deu_dma_device_ptr;
|
|
+ dma_device = deu_dma_device_ptr;
|
|
+ /* dma_device->priv = &deu_dma_priv; */
|
|
+ dma_device->buffer_alloc = &deu_dma_buffer_alloc;
|
|
+ dma_device->buffer_free = &deu_dma_buffer_free;
|
|
+ dma_device->intr_handler = &deu_dma_intr_handler;
|
|
+ dma_device->tx_endianness_mode = LQ_DMA_ENDIAN_TYPE3;
|
|
+ dma_device->rx_endianness_mode = LQ_DMA_ENDIAN_TYPE3;
|
|
+ dma_device->port_num = 1;
|
|
+ dma_device->tx_burst_len = 4;
|
|
+ dma_device->max_rx_chan_num = 1;
|
|
+ dma_device->max_tx_chan_num = 1;
|
|
+ dma_device->port_packet_drop_enable = 0;
|
|
+
|
|
+ for (i = 0; i < dma_device->max_rx_chan_num; i++) {
|
|
+ dma_device->rx_chan[i]->packet_size = DEU_MAX_PACKET_SIZE;
|
|
+ dma_device->rx_chan[i]->desc_len = 1;
|
|
+ dma_device->rx_chan[i]->control = LQ_DMA_CH_ON;
|
|
+ dma_device->rx_chan[i]->byte_offset = 0;
|
|
+ dma_device->rx_chan[i]->chan_poll_enable = 1;
|
|
+
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < dma_device->max_tx_chan_num; i++) {
|
|
+ dma_device->tx_chan[i]->control = LQ_DMA_CH_ON;
|
|
+ dma_device->tx_chan[i]->desc_len = 1;
|
|
+ dma_device->tx_chan[i]->chan_poll_enable = 1;
|
|
+ }
|
|
+
|
|
+ dma_device->current_tx_chan = 0;
|
|
+ dma_device->current_rx_chan = 0;
|
|
+
|
|
+ dma_device_register(dma_device);
|
|
+ for (i = 0; i < dma_device->max_rx_chan_num; i++) {
|
|
+ (dma_device->rx_chan[i])->open(dma_device->rx_chan[i]);
|
|
+ }
|
|
+
|
|
+ dma->ctrl.BS = 0;
|
|
+ dma->ctrl.RXCLS = 0;
|
|
+ dma->ctrl.EN = 1;
|
|
+
|
|
+
|
|
+ *LQ_DMA_PS = 1;
|
|
+
|
|
+ /* DANUBE PRE 1.4 SOFTWARE FIX */
|
|
+ if (danube_pre_1_4)
|
|
+ *LQ_DMA_PCTRL = 0x14;
|
|
+ else
|
|
+ *LQ_DMA_PCTRL = 0xF14;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief A fix to align mis-aligned address for Danube version 1.3 chips
|
|
+ * which has memory alignment issues.
|
|
+ * \param arg Pointer to the input / output memory address
|
|
+ * \param buffer_alloc A pointer to the buffer
|
|
+ * \param in_buff Input (if == 1) or Output (if == 0) buffer
|
|
+ * \param nbytes Number of bytes of data
|
|
+ * \return returns arg: if address is aligned, buffer_alloc: if memory address is not aligned
|
|
+*/
|
|
+static u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes)
|
|
+{
|
|
+ if (danube_pre_1_4) {
|
|
+ /* for input buffer */
|
|
+ if (in_buff) {
|
|
+ if (((u32) arg) & 0xF) {
|
|
+ memcpy(buffer_alloc, arg, nbytes);
|
|
+ return (u32 *) buffer_alloc;
|
|
+ } else {
|
|
+ return (u32 *) arg;
|
|
+ }
|
|
+ }
|
|
+ else {
|
|
+ /* for output buffer */
|
|
+ if (((u32) arg) & 0x3)
|
|
+ return buffer_alloc;
|
|
+ else
|
|
+ return (u32 *) arg;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return (u32 *) arg;
|
|
+}
|
|
+
|
|
+/** \fn void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief copy the DMA data to the memory address space for AES. The swaping
|
|
+ * of the 4 bytes is done only for Danube version 1.3 (FIX). Otherwise,
|
|
+ * it is a direct memory copy to out_arg pointer
|
|
+ * \param outcopy Pointer to the address to store swapped copy
|
|
+ * \param out_dma A pointer to the memory address that stores the DMA data
|
|
+ * \param out_arg The pointer to the memory address that needs to be copied to
|
|
+ * \param nbytes Number of bytes of data
|
|
+*/
|
|
+static void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
|
|
+{
|
|
+ int i = 0;
|
|
+ int x = 0;
|
|
+
|
|
+ /* DANUBE PRE 1.4 SOFTWARE FIX */
|
|
+ if (danube_pre_1_4) {
|
|
+ for (i = 0; i < (nbytes / 4); i++) {
|
|
+ x = i ^ 0x3;
|
|
+ outcopy[i] = out_dma[x];
|
|
+
|
|
+ }
|
|
+ if (((u32) out_arg) & 0x3) {
|
|
+ memcpy((u8 *)out_arg, outcopy, nbytes);
|
|
+ }
|
|
+ } else {
|
|
+ memcpy(out_arg, out_dma, nbytes);
|
|
+ }
|
|
+}
|
|
+
|
|
+/** \fn void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief copy the DMA data to the memory address space for DES. The swaping
|
|
+ * of the 4 bytes is done only for Danube version 1.3 (FIX). Otherwise,
|
|
+ * it is a direct memory copy to out_arg pointer
|
|
+ * \param outcopy Pointer to the address to store swapped copy
|
|
+ * \param out_dma A pointer to the memory address that stores the DMA data
|
|
+ * \param out_arg The pointer to the memory address that needs to be copied to
|
|
+ * \param nbytes Number of bytes of data
|
|
+*/
|
|
+static void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
|
|
+{
|
|
+ int i = 0;
|
|
+ int x = 0;
|
|
+
|
|
+ /* DANUBE PRE 1.4 SOFTWARE FIX */
|
|
+ if (danube_pre_1_4) {
|
|
+ for (i = 0; i < (nbytes / 4); i++) {
|
|
+ x = i ^ 1;
|
|
+ outcopy[i] = out_dma[x];
|
|
+
|
|
+ }
|
|
+ if (((u32) out_arg) & 0x3) {
|
|
+ memcpy((u8 *)out_arg, outcopy, nbytes);
|
|
+ }
|
|
+ } else {
|
|
+ memcpy(out_arg, out_dma, nbytes);
|
|
+ }
|
|
+}
|
|
+
|
|
+/** \fn int des_dma_malloc(int value)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief allocates memory to the necessary memory input/output buffer
|
|
+ * location, used during the DES algorithm DMA transfer (memory
|
|
+ * alignment issues)
|
|
+ * \param value value determinds whether the calling of the function is for a
|
|
+ * input buffer or for an output buffer memory allocation
|
|
+*/
|
|
+static int des_dma_malloc(int value)
|
|
+{
|
|
+ if (danube_pre_1_4) {
|
|
+ if (value == BUFFER_IN) {
|
|
+ des_buff_in = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC);
|
|
+ if (!des_buff_in)
|
|
+ return -1;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+ else {
|
|
+ des_buff_out = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC);
|
|
+ if (!des_buff_out)
|
|
+ return -1;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+ } else {
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+/** \fn int aes_dma_malloc(int value)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief allocates memory to the necessary memory input/output buffer
|
|
+ * location, used during the AES algorithm DMA transfer (memory
|
|
+ * alignment issues)
|
|
+ * \param value value determinds whether the calling of the function is for a
|
|
+ * input buffer or for an output buffer memory allocation
|
|
+*/
|
|
+static int aes_dma_malloc(int value)
|
|
+{
|
|
+ if (danube_pre_1_4) {
|
|
+ if (value == BUFFER_IN) {
|
|
+ aes_buff_in = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC);
|
|
+ if (!aes_buff_in)
|
|
+ return -1;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+ else {
|
|
+ aes_buff_out = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC);
|
|
+ if (!aes_buff_out)
|
|
+ return -1;
|
|
+ else
|
|
+ return 0;
|
|
+ }
|
|
+ } else {
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+/** \fn void dma_free(u32 *addr)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief frees previously allocated memory
|
|
+ * \param addr memory address of the buffer that needs to be freed
|
|
+*/
|
|
+static void dma_free(u32 *addr)
|
|
+{
|
|
+ if (addr)
|
|
+ kfree(addr);
|
|
+ return;
|
|
+}
|
|
+
|
|
+/** \fn dma_exit(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief unregister dma devices after exit
|
|
+*/
|
|
+static void dma_exit(void)
|
|
+{
|
|
+ if (g_dma_page_ptr)
|
|
+ free_page((u32) g_dma_page_ptr);
|
|
+ dma_device_release(lq_deu[0].dma_device);
|
|
+ dma_device_unregister(lq_deu[0].dma_device);
|
|
+}
|
|
+#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA */
|
|
+
|
|
+/** \fn u32 endian_swap(u32 input)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief function is not used
|
|
+ * \param input Data input to be swapped
|
|
+ * \return input
|
|
+*/
|
|
+static u32 endian_swap(u32 input)
|
|
+{
|
|
+ return input;
|
|
+}
|
|
+
|
|
+/** \fn u32 input_swap(u32 input)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief Swap the input data if the current chip is Danube version
|
|
+ * 1.4 and do nothing to the data if the current chip is
|
|
+ * Danube version 1.3
|
|
+ * \param input data that needs to be swapped
|
|
+ * \return input or swapped input
|
|
+*/
|
|
+static u32 input_swap(u32 input)
|
|
+{
|
|
+ if (!danube_pre_1_4) {
|
|
+ u8 *ptr = (u8 *)&input;
|
|
+ return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]);
|
|
+ } else {
|
|
+ return input;
|
|
+ }
|
|
+}
|
|
+
|
|
+/** \fn void aes_chip_init(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief initialize AES hardware
|
|
+*/
|
|
+static void aes_chip_init(void)
|
|
+{
|
|
+ volatile struct deu_aes *aes = (struct deu_aes *) AES_START;
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ /* start crypto engine with write to ILR */
|
|
+ aes->ctrl.SM = 1;
|
|
+ aes->ctrl.ARS = 1;
|
|
+#else
|
|
+ aes->ctrl.SM = 1;
|
|
+ aes->ctrl.ARS = 1; /* 0 for dma */
|
|
+#endif
|
|
+}
|
|
+
|
|
+/** \fn void des_chip_init(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief initialize DES hardware
|
|
+*/
|
|
+static void des_chip_init(void)
|
|
+{
|
|
+ volatile struct deu_des *des = (struct deu_des *) DES_3DES_START;
|
|
+
|
|
+#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ /* start crypto engine with write to ILR */
|
|
+ des->ctrl.SM = 1;
|
|
+ des->ctrl.ARS = 1;
|
|
+#else
|
|
+ des->ctrl.SM = 1;
|
|
+ des->ctrl.ARS = 1; /* 0 for dma */
|
|
+#endif
|
|
+}
|
|
+
|
|
+/** \fn void deu_chip_version(void)
|
|
+ * \ingroup LQ_DES_FUNCTIONS
|
|
+ * \brief To find the version of the chip by looking at the chip ID
|
|
+ * \param danube_pre_1_4 (sets to 1 if Chip is Danube less than v1.4)
|
|
+*/
|
|
+static void deu_chip_version(void)
|
|
+{
|
|
+ /* DANUBE PRE 1.4 SOFTWARE FIX */
|
|
+ int chip_id = 0;
|
|
+ chip_id = *LQ_MPS_CHIPID;
|
|
+ chip_id >>= 28;
|
|
+
|
|
+ if (chip_id >= 4) {
|
|
+ danube_pre_1_4 = 0;
|
|
+ printk("Danube Chip ver. 1.4 detected. \n");
|
|
+ }
|
|
+ else {
|
|
+ danube_pre_1_4 = 1;
|
|
+ printk("Danube Chip ver. 1.3 or below detected. \n");
|
|
+ }
|
|
+}
|
|
+
|
|
+static u32 chip_init(void)
|
|
+{
|
|
+ volatile struct deu_clk_ctrl *clc = (struct deu_clk_ctrl *) LQ_DEU_CLK;
|
|
+
|
|
+#if 0
|
|
+ lq_pmu_enable(1<<20);
|
|
+#endif
|
|
+
|
|
+ deu_chip_version();
|
|
+
|
|
+ clc->FSOE = 0;
|
|
+ clc->SBWE = 0;
|
|
+ clc->SPEN = 0;
|
|
+ clc->SBWE = 0;
|
|
+ clc->DISS = 0;
|
|
+ clc->DISR = 0;
|
|
+
|
|
+ return *LQ_DEU_ID;
|
|
+}
|
|
+
|
|
+static int lq_crypto_probe(struct platform_device *pdev)
|
|
+{
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ lq_crypto_ops.dma_init = dma_init;
|
|
+ lq_crypto_ops.dma_exit = dma_exit;
|
|
+ lq_crypto_ops.aes_dma_memcpy = aes_dma_memcpy;
|
|
+ lq_crypto_ops.des_dma_memcpy = des_dma_memcpy;
|
|
+ lq_crypto_ops.aes_dma_malloc = aes_dma_malloc;
|
|
+ lq_crypto_ops.des_dma_malloc = des_dma_malloc;
|
|
+ lq_crypto_ops.dma_align = dma_align;
|
|
+ lq_crypto_ops.dma_free = dma_free;
|
|
+#endif
|
|
+
|
|
+ lq_crypto_ops.endian_swap = endian_swap;
|
|
+ lq_crypto_ops.input_swap = input_swap;
|
|
+ lq_crypto_ops.aes_chip_init = aes_chip_init;
|
|
+ lq_crypto_ops.des_chip_init = des_chip_init;
|
|
+ lq_crypto_ops.chip_init = chip_init;
|
|
+
|
|
+ printk("lq_danube_deu: driver loaded!\n");
|
|
+
|
|
+ lq_deu_init();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int lq_crypto_remove(struct platform_device *pdev)
|
|
+{
|
|
+ lq_deu_exit();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct platform_driver lq_crypto = {
|
|
+ .probe = lq_crypto_probe,
|
|
+ .remove = lq_crypto_remove,
|
|
+ .driver = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .name = "lq_danube_deu"
|
|
+ }
|
|
+};
|
|
+
|
|
+static int __init lq_crypto_init(void)
|
|
+{
|
|
+ return platform_driver_register(&lq_crypto);
|
|
+}
|
|
+module_init(lq_crypto_init);
|
|
+
|
|
+static void __exit lq_crypto_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&lq_crypto);
|
|
+}
|
|
+module_exit(lq_crypto_exit);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu_danube.h
|
|
@@ -0,0 +1,255 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus / Infineon Technologies
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu_danube.h
|
|
+ \brief board specific driver header file for danube
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief board specific DEU header files
|
|
+*/
|
|
+
|
|
+#ifndef DEU_DANUBE_H
|
|
+#define DEU_DANUBE_H
|
|
+
|
|
+#define LQ_DEU_BASE_ADDR (KSEG1 | 0x1E103100)
|
|
+#define LQ_DEU_CLK ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0000))
|
|
+#define LQ_DEU_ID ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0008))
|
|
+#define LQ_DES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0010))
|
|
+#define LQ_AES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0050))
|
|
+#define LQ_HASH_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x00B0))
|
|
+#define LQ_ARC4_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0100))
|
|
+
|
|
+#define ARC4_START LQ_ARC4_CON
|
|
+#define DES_3DES_START LQ_DES_CON
|
|
+#define HASH_START LQ_HASH_CON
|
|
+#define AES_START LQ_AES_CON
|
|
+
|
|
+#define LQ_MPS (KSEG1 | 0x1F107000)
|
|
+#define LQ_MPS_CHIPID ((volatile u32*)(LQ_MPS + 0x0344))
|
|
+#define LQ_MPS_CHIPID_VERSION_GET(value) (((value) >> 28) & 0xF)
|
|
+#define LQ_MPS_CHIPID_VERSION_SET(value) (((value) & 0xF) << 28)
|
|
+#define LQ_MPS_CHIPID_PARTNUM_GET(value) (((value) >> 12) & 0xFFFF)
|
|
+#define LQ_MPS_CHIPID_PARTNUM_SET(value) (((value) & 0xFFFF) << 12)
|
|
+#define LQ_MPS_CHIPID_MANID_GET(value) (((value) >> 1) & 0x7FF)
|
|
+#define LQ_MPS_CHIPID_MANID_SET(value) (((value) & 0x7FF) << 1)
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+# define DEU_DWORD_REORDERING(ptr, buffer, in_out, bytes) \
|
|
+ deu_dma_align(ptr, buffer, in_out, bytes)
|
|
+# define AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \
|
|
+ deu_aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes)
|
|
+# define DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \
|
|
+ deu_des_dma_memcpy(outcopy, out_dma, out_arg, nbytes)
|
|
+# define BUFFER_IN 1
|
|
+# define BUFFER_OUT 0
|
|
+# define DELAY_PERIOD 9
|
|
+# define AES_ALGO 1
|
|
+# define DES_ALGO 0
|
|
+# define FREE_MEMORY(buff) deu_dma_free(buff)
|
|
+# define ALLOCATE_MEMORY(val, type) type ? \
|
|
+ deu_aes_dma_malloc(val) : \
|
|
+ deu_des_dma_malloc(val)
|
|
+#endif /* CONFIG_CRYPTO_DEV_DMA */
|
|
+
|
|
+#define INPUT_ENDIAN_SWAP(input) deu_input_swap(input)
|
|
+#define DEU_ENDIAN_SWAP(input) deu_endian_swap(input)
|
|
+#define AES_DMA_MISC_CONFIG()
|
|
+
|
|
+#define WAIT_AES_DMA_READY() \
|
|
+ do { \
|
|
+ int i; \
|
|
+ volatile struct deu_dma *dma = \
|
|
+ (struct deu_dma *) LQ_DEU_DMA_CON; \
|
|
+ volatile struct deu_aes *aes = \
|
|
+ (volatile struct deu_aes *) AES_START; \
|
|
+ for (i = 0; i < 10; i++) \
|
|
+ udelay(DELAY_PERIOD); \
|
|
+ while (dma->ctrl.BSY) {}; \
|
|
+ while (aes->ctrl.BUS) {}; \
|
|
+ } while (0)
|
|
+
|
|
+#define WAIT_DES_DMA_READY() \
|
|
+ do { \
|
|
+ int i; \
|
|
+ volatile struct deu_dma *dma = \
|
|
+ (struct deu_dma *) LQ_DEU_DMA_CON; \
|
|
+ volatile struct deu_des *des = \
|
|
+ (struct deu_des *) DES_3DES_START; \
|
|
+ for (i = 0; i < 10; i++) \
|
|
+ udelay(DELAY_PERIOD); \
|
|
+ while (dma->ctrl.BSY) {}; \
|
|
+ while (des->ctrl.BUS) {}; \
|
|
+ } while (0)
|
|
+
|
|
+#define SHA_HASH_INIT \
|
|
+ do { \
|
|
+ volatile struct deu_hash *hash = \
|
|
+ (struct deu_hash *) HASH_START; \
|
|
+ hash->ctrl.SM = 1; \
|
|
+ hash->ctrl.ALGO = 0; \
|
|
+ hash->ctrl.INIT = 1; \
|
|
+ } while(0)
|
|
+
|
|
+/* DEU STRUCTURES */
|
|
+
|
|
+struct deu_clk_ctrl {
|
|
+ u32 Res:26;
|
|
+ u32 FSOE:1;
|
|
+ u32 SBWE:1;
|
|
+ u32 EDIS:1;
|
|
+ u32 SPEN:1;
|
|
+ u32 DISS:1;
|
|
+ u32 DISR:1;
|
|
+};
|
|
+
|
|
+struct deu_des {
|
|
+ struct deu_des_ctrl {
|
|
+ u32 KRE:1;
|
|
+ u32 reserved1:5;
|
|
+ u32 GO:1;
|
|
+ u32 STP:1;
|
|
+ u32 Res2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 Res3:2;
|
|
+ u32 F:3;
|
|
+ u32 O:3;
|
|
+ u32 BUS:1;
|
|
+ u32 DAU:1;
|
|
+ u32 ARS:1;
|
|
+ u32 SM:1;
|
|
+ u32 E_D:1;
|
|
+ u32 M:3;
|
|
+ } ctrl;
|
|
+
|
|
+ u32 IHR;
|
|
+ u32 ILR;
|
|
+ u32 K1HR;
|
|
+ u32 K1LR;
|
|
+ u32 K2HR;
|
|
+ u32 K2LR;
|
|
+ u32 K3HR;
|
|
+ u32 K3LR;
|
|
+ u32 IVHR;
|
|
+ u32 IVLR;
|
|
+ u32 OHR;
|
|
+ u32 OLR;
|
|
+};
|
|
+
|
|
+struct deu_aes {
|
|
+ struct deu_aes_ctrl {
|
|
+ u32 KRE:1;
|
|
+ u32 reserved1:4;
|
|
+ u32 PNK:1;
|
|
+ u32 GO:1;
|
|
+ u32 STP:1;
|
|
+ u32 reserved2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 reserved3:2;
|
|
+ u32 F:3; /* fbs */
|
|
+ u32 O:3; /* om */
|
|
+ u32 BUS:1; /* bsy */
|
|
+ u32 DAU:1;
|
|
+ u32 ARS:1;
|
|
+ u32 SM:1;
|
|
+ u32 E_D:1;
|
|
+ u32 KV:1;
|
|
+ u32 K:2; /* KL */
|
|
+ } ctrl;
|
|
+
|
|
+ u32 ID3R; /* 80h */
|
|
+ u32 ID2R; /* 84h */
|
|
+ u32 ID1R; /* 88h */
|
|
+ u32 ID0R; /* 8Ch */
|
|
+ u32 K7R; /* 90h */
|
|
+ u32 K6R; /* 94h */
|
|
+ u32 K5R; /* 98h */
|
|
+ u32 K4R; /* 9Ch */
|
|
+ u32 K3R; /* A0h */
|
|
+ u32 K2R; /* A4h */
|
|
+ u32 K1R; /* A8h */
|
|
+ u32 K0R; /* ACh */
|
|
+ u32 IV3R; /* B0h */
|
|
+ u32 IV2R; /* B4h */
|
|
+ u32 IV1R; /* B8h */
|
|
+ u32 IV0R; /* BCh */
|
|
+ u32 OD3R; /* D4h */
|
|
+ u32 OD2R; /* D8h */
|
|
+ u32 OD1R; /* DCh */
|
|
+ u32 OD0R; /* E0h */
|
|
+};
|
|
+
|
|
+struct deu_hash {
|
|
+ struct deu_hash_ctrl {
|
|
+ u32 reserved1:5;
|
|
+ u32 KHS:1;
|
|
+ u32 GO:1;
|
|
+ u32 INIT:1;
|
|
+ u32 reserved2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 reserved3:7;
|
|
+ u32 DGRY:1;
|
|
+ u32 BSY:1;
|
|
+ u32 reserved4:1;
|
|
+ u32 IRCL:1;
|
|
+ u32 SM:1;
|
|
+ u32 KYUE:1;
|
|
+ u32 HMEN:1;
|
|
+ u32 SSEN:1;
|
|
+ u32 ALGO:1;
|
|
+ } ctrl;
|
|
+
|
|
+ u32 MR; /* B4h */
|
|
+ u32 D1R; /* B8h */
|
|
+ u32 D2R; /* BCh */
|
|
+ u32 D3R; /* C0h */
|
|
+ u32 D4R; /* C4h */
|
|
+ u32 D5R; /* C8h */
|
|
+ u32 dummy; /* CCh */
|
|
+ u32 KIDX; /* D0h */
|
|
+ u32 KEY; /* D4h */
|
|
+ u32 DBN; /* D8h */
|
|
+};
|
|
+
|
|
+struct deu_dma {
|
|
+ struct deu_dma_ctrl {
|
|
+ u32 reserved1:22;
|
|
+ u32 BS:2;
|
|
+ u32 BSY:1;
|
|
+ u32 reserved2:1;
|
|
+ u32 ALGO:2;
|
|
+ u32 RXCLS:2;
|
|
+ u32 reserved3:1;
|
|
+ u32 EN:1;
|
|
+ } ctrl;
|
|
+};
|
|
+
|
|
+#endif /* DEU_DANUBE_H */
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu_dma.c
|
|
@@ -0,0 +1,147 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup LQ_API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu_dma.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief DMA DEU driver file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DMA_FUNCTIONS LQ_DMA_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief DMA DEU driver functions
|
|
+*/
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/delay.h>
|
|
+#include <asm/io.h>
|
|
+#include "deu.h"
|
|
+#include "deu_dma.h"
|
|
+
|
|
+/* extern struct deu_drv_priv deu_dma_priv; */
|
|
+
|
|
+/** \fn int deu_dma_intr_handler(struct dma_device_info *dma_dev, int status)
|
|
+ * \ingroup LQ_DMA_FUNCTIONS
|
|
+ * \brief callback function for DEU DMA interrupt
|
|
+ * \param dma_dev dma device
|
|
+ * \param status not used
|
|
+*/
|
|
+int deu_dma_intr_handler(struct dma_device_info *dma_dev, int status)
|
|
+{
|
|
+#if 0
|
|
+ int len = 0;
|
|
+ while (len <= 20000) { len++; }
|
|
+ u8 *buf;
|
|
+ int len = 0;
|
|
+
|
|
+ struct deu_drv_priv *deu_priv = (struct deu_drv_priv *)dma_dev->priv;
|
|
+ /* printk("status:%d \n",status); */
|
|
+ switch(status) {
|
|
+ case RCV_INT:
|
|
+ len = dma_device_read(dma_dev, (u8 **)&buf, NULL);
|
|
+ if ( len != deu_priv->deu_rx_len) {
|
|
+ printk(KERN_ERR "%s packet length %d is not "
|
|
+ "equal to expect %d\n",
|
|
+ __func__, len, deu_priv->deu_rx_len);
|
|
+ return -1;
|
|
+ }
|
|
+ memcpy(deu_priv->deu_rx_buf, buf, deu_priv->deu_rx_len);
|
|
+ /* Reset for next usage */
|
|
+ deu_priv->deu_rx_buf = NULL;
|
|
+ deu_priv->deu_rx_len = 0;
|
|
+ DEU_WAKEUP_EVENT(deu_priv->deu_thread_wait, DEU_EVENT,
|
|
+ deu_priv->deu_event_flags);
|
|
+ break;
|
|
+ case TX_BUF_FULL_INT:
|
|
+ /* delay for buffer to be cleared */
|
|
+ while (len <= 20000) { len++; }
|
|
+ break;
|
|
+
|
|
+ case TRANSMIT_CPT_INT:
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+#endif
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+extern u8 *g_dma_block;
|
|
+extern u8 *g_dma_block2;
|
|
+
|
|
+/** \fn u8 *deu_dma_buffer_alloc(int len, int *byte_offset, void **opt)
|
|
+ * \ingroup LQ_DMA_FUNCTIONS
|
|
+ * \brief callback function for allocating buffers for dma receive descriptors
|
|
+ * \param len not used
|
|
+ * \param byte_offset dma byte offset
|
|
+ * \param *opt not used
|
|
+ *
|
|
+*/
|
|
+u8 *deu_dma_buffer_alloc(int len, int *byte_offset, void **opt)
|
|
+{
|
|
+ u8 *swap = NULL;
|
|
+
|
|
+ /* dma-core needs at least 2 blocks of memory */
|
|
+ swap = g_dma_block;
|
|
+ g_dma_block = g_dma_block2;
|
|
+ g_dma_block2 = swap;
|
|
+
|
|
+ /* dma_cache_wback_inv((unsigned long) g_dma_block,(PAGE_SIZE >> 1)); */
|
|
+ *byte_offset = 0;
|
|
+
|
|
+ return g_dma_block;
|
|
+}
|
|
+
|
|
+/** \fn int deu_dma_buffer_free(u8 * dataptr, void *opt)
|
|
+ * \ingroup LQ_DMA_FUNCTIONS
|
|
+ * \brief callback function for freeing dma transmit descriptors
|
|
+ * \param dataptr data pointer to be freed
|
|
+ * \param opt not used
|
|
+*/
|
|
+int deu_dma_buffer_free(u8 *dataptr, void *opt)
|
|
+{
|
|
+#if 0
|
|
+ printk("Trying to free memory buffer\n");
|
|
+ if (dataptr == NULL && opt == NULL)
|
|
+ return 0;
|
|
+ else if (opt == NULL) {
|
|
+ kfree(dataptr);
|
|
+ return 1;
|
|
+ }
|
|
+ else if (dataptr == NULL) {
|
|
+ kfree(opt);
|
|
+ return 1;
|
|
+ }
|
|
+ else {
|
|
+ kfree(opt);
|
|
+ kfree(dataptr);
|
|
+ }
|
|
+#endif
|
|
+ return 0;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu_dma.h
|
|
@@ -0,0 +1,78 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \addtogroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu_dma.h
|
|
+ \ingroup LQ_DEU
|
|
+ \brief DMA DEU driver header file
|
|
+*/
|
|
+
|
|
+#ifndef DEU_DMA_H
|
|
+#define DEU_DMA_H
|
|
+
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <asm/scatterlist.h>
|
|
+#include <asm/byteorder.h>
|
|
+#include <linux/skbuff.h>
|
|
+#include <linux/netdevice.h>
|
|
+
|
|
+#include <asm/ifx/irq.h>
|
|
+#include <asm/ifx/ifx_dma_core.h>
|
|
+#ifndef CONFIG_CRYPTO_DEV_POLL_DMA
|
|
+# define CONFIG_CRYPTO_DEV_POLL_DMA
|
|
+#endif
|
|
+
|
|
+/* must match the size of memory block allocated for
|
|
+ * g_dma_block and g_dma_block2 */
|
|
+#define DEU_MAX_PACKET_SIZE (PAGE_SIZE >> 1)
|
|
+
|
|
+struct lq_deu_device {
|
|
+ struct dma_device_info *dma_device;
|
|
+ u8 *dst;
|
|
+ u8 *src;
|
|
+ int len;
|
|
+ int dst_count;
|
|
+ int src_count;
|
|
+ int recv_count;
|
|
+ int packet_size;
|
|
+ int packet_num;
|
|
+ wait_queue_t wait;
|
|
+};
|
|
+
|
|
+extern struct lq_deu_device lq_deu[1];
|
|
+
|
|
+extern int deu_dma_intr_handler(struct dma_device_info *, int);
|
|
+extern u8 *deu_dma_buffer_alloc(int, int *, void **);
|
|
+extern int deu_dma_buffer_free(u8 *, void *);
|
|
+extern void deu_dma_inactivate_poll(struct dma_device_info* dma_dev);
|
|
+extern void deu_dma_activate_poll(struct dma_device_info* dma_dev);
|
|
+extern struct dma_device_info* deu_dma_reserve(struct dma_device_info**
|
|
+ dma_device);
|
|
+extern int deu_dma_release(struct dma_device_info** dma_device);
|
|
+
|
|
+#endif /* IFMIPS_DEU_DMA_H */
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/md5.c
|
|
@@ -0,0 +1,285 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file md5.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief MD5 encryption DEU driver file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_MD5_FUNCTIONS LQ_MD5_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq DEU MD5 functions
|
|
+*/
|
|
+
|
|
+#include <crypto/internal/hash.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <linux/types.h>
|
|
+#include <asm/byteorder.h>
|
|
+#include "deu.h"
|
|
+
|
|
+#define MD5_DIGEST_SIZE 16
|
|
+#define MD5_HMAC_BLOCK_SIZE 64
|
|
+#define MD5_BLOCK_WORDS 16
|
|
+#define MD5_HASH_WORDS 4
|
|
+
|
|
+static spinlock_t cipher_lock;
|
|
+
|
|
+struct md5_ctx {
|
|
+ u32 hash[MD5_HASH_WORDS];
|
|
+ u32 block[MD5_BLOCK_WORDS];
|
|
+ u64 byte_count;
|
|
+};
|
|
+
|
|
+/** \fn static u32 md5_endian_swap(u32 input)
|
|
+ * \ingroup LQ_MD5_FUNCTIONS
|
|
+ * \brief perform dword level endian swap
|
|
+ * \param input value of dword that requires to be swapped
|
|
+*/
|
|
+static u32 md5_endian_swap(u32 input)
|
|
+{
|
|
+ u8 *ptr = (u8 *)&input;
|
|
+
|
|
+ return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]);
|
|
+}
|
|
+
|
|
+/** \fn static void md5_transform(u32 *hash, u32 const *in)
|
|
+ * \ingroup LQ_MD5_FUNCTIONS
|
|
+ * \brief main interface to md5 hardware
|
|
+ * \param hash current hash value
|
|
+ * \param in 64-byte block of input
|
|
+*/
|
|
+static void md5_transform(u32 *hash, u32 const *in)
|
|
+{
|
|
+ int i;
|
|
+ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START;
|
|
+ ulong flag;
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ for (i = 0; i < 16; i++) {
|
|
+ hashs->MR = md5_endian_swap(in[i]);
|
|
+ };
|
|
+
|
|
+ /* wait for processing */
|
|
+ while (hashs->ctrl.BSY) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+}
|
|
+
|
|
+/** \fn static inline void md5_transform_helper(struct md5_ctx *ctx)
|
|
+ * \ingroup LQ_MD5_FUNCTIONS
|
|
+ * \brief interfacing function for md5_transform()
|
|
+ * \param ctx crypto context
|
|
+*/
|
|
+static inline void md5_transform_helper(struct md5_ctx *ctx)
|
|
+{
|
|
+ /* le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); */
|
|
+ md5_transform(ctx->hash, ctx->block);
|
|
+}
|
|
+
|
|
+/** \fn static void md5_init(struct crypto_tfm *tfm)
|
|
+ * \ingroup LQ_MD5_FUNCTIONS
|
|
+ * \brief initialize md5 hardware
|
|
+ * \param tfm linux crypto algo transform
|
|
+*/
|
|
+static int md5_init(struct shash_desc *desc)
|
|
+{
|
|
+ struct md5_ctx *mctx = shash_desc_ctx(desc);
|
|
+ volatile struct deu_hash *hash = (struct deu_hash *) HASH_START;
|
|
+
|
|
+ hash->ctrl.SM = 1;
|
|
+ hash->ctrl.ALGO = 1; /* 1 = md5 0 = sha1 */
|
|
+ hash->ctrl.INIT = 1; /* Initialize the hash operation by writing
|
|
+ a '1' to the INIT bit. */
|
|
+
|
|
+ mctx->byte_count = 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|
+ * \ingroup LQ_MD5_FUNCTIONS
|
|
+ * \brief on-the-fly md5 computation
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param data input data
|
|
+ * \param len size of input data
|
|
+*/
|
|
+static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
|
+{
|
|
+ struct md5_ctx *mctx = shash_desc_ctx(desc);
|
|
+ const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
|
|
+
|
|
+ mctx->byte_count += len;
|
|
+
|
|
+ if (avail > len) {
|
|
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
|
+ data, len);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
|
+ data, avail);
|
|
+
|
|
+ md5_transform_helper(mctx);
|
|
+ data += avail;
|
|
+ len -= avail;
|
|
+
|
|
+ while (len >= sizeof(mctx->block)) {
|
|
+ memcpy(mctx->block, data, sizeof(mctx->block));
|
|
+ md5_transform_helper(mctx);
|
|
+ data += sizeof(mctx->block);
|
|
+ len -= sizeof(mctx->block);
|
|
+ }
|
|
+
|
|
+ memcpy(mctx->block, data, len);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn static void md5_final(struct crypto_tfm *tfm, u8 *out)
|
|
+ * \ingroup LQ_MD5_FUNCTIONS
|
|
+ * \brief compute final md5 value
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param out final md5 output value
|
|
+*/
|
|
+static int md5_final(struct shash_desc *desc, u8 *out)
|
|
+{
|
|
+ struct md5_ctx *mctx = shash_desc_ctx(desc);
|
|
+ const unsigned int offset = mctx->byte_count & 0x3f;
|
|
+ char *p = (char *)mctx->block + offset;
|
|
+ int padding = 56 - (offset + 1);
|
|
+ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START;
|
|
+ unsigned long flag;
|
|
+
|
|
+ *p++ = 0x80;
|
|
+ if (padding < 0) {
|
|
+ memset(p, 0x00, padding + sizeof (u64));
|
|
+ md5_transform_helper(mctx);
|
|
+ p = (char *)mctx->block;
|
|
+ padding = 56;
|
|
+ }
|
|
+
|
|
+ memset(p, 0, padding);
|
|
+ mctx->block[14] = md5_endian_swap(mctx->byte_count << 3);
|
|
+ mctx->block[15] = md5_endian_swap(mctx->byte_count >> 29);
|
|
+
|
|
+#if 0
|
|
+ le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
|
|
+ sizeof(u64)) / sizeof(u32));
|
|
+#endif
|
|
+
|
|
+ md5_transform(mctx->hash, mctx->block);
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ *((u32 *) out + 0) = md5_endian_swap(hashs->D1R);
|
|
+ *((u32 *) out + 1) = md5_endian_swap(hashs->D2R);
|
|
+ *((u32 *) out + 2) = md5_endian_swap(hashs->D3R);
|
|
+ *((u32 *) out + 3) = md5_endian_swap(hashs->D4R);
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+
|
|
+ /* Wipe context */
|
|
+ memset(mctx, 0, sizeof(*mctx));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int md5_export(struct shash_desc *desc, void *out)
|
|
+{
|
|
+ struct md5_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(out, sctx, sizeof(*sctx));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int md5_import(struct shash_desc *desc, const void *in)
|
|
+{
|
|
+ struct md5_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(sctx, in, sizeof(*sctx));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief MD5 function mappings
|
|
+*/
|
|
+static struct shash_alg md5_alg = {
|
|
+ .digestsize = MD5_DIGEST_SIZE,
|
|
+ .init = md5_init,
|
|
+ .update = md5_update,
|
|
+ .final = md5_final,
|
|
+ .export = md5_export,
|
|
+ .import = md5_import,
|
|
+ .descsize = sizeof(struct md5_ctx),
|
|
+ .statesize = sizeof(struct md5_ctx),
|
|
+ .base = {
|
|
+ .cra_name = "md5",
|
|
+ .cra_driver_name = "lq_deu-md5",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
|
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int lq_deu_init_md5(void)
|
|
+ * \ingroup LQ_MD5_FUNCTIONS
|
|
+ * \brief initialize md5 driver
|
|
+*/
|
|
+int lq_deu_init_md5(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if ((ret = crypto_register_shash(&md5_alg)))
|
|
+ goto md5_err;
|
|
+
|
|
+ CRTCL_SECT_INIT;
|
|
+
|
|
+ printk(KERN_NOTICE "Lantiq DEU MD5 initialized%s.\n",
|
|
+ disable_deudma ? "" : " (DMA)");
|
|
+ return ret;
|
|
+
|
|
+md5_err:
|
|
+ printk(KERN_ERR "Lantiq DEU MD5 initialization failed!\n");
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/** \fn void lq_deu_fini_md5(void)
|
|
+ * \ingroup LQ_MD5_FUNCTIONS
|
|
+ * \brief unregister md5 driver
|
|
+*/
|
|
+
|
|
+void lq_deu_fini_md5(void)
|
|
+{
|
|
+ crypto_unregister_shash(&md5_alg);
|
|
+}
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/md5_hmac.c
|
|
@@ -0,0 +1,329 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file md5_hmac.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief MD5-HMAC encryption DEU driver file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_MD5_HMAC_FUNCTIONS LQ_MD5_HMAC_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq md5-hmac driver functions
|
|
+*/
|
|
+
|
|
+#include <crypto/internal/hash.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <linux/types.h>
|
|
+#include <asm/byteorder.h>
|
|
+#include "deu.h"
|
|
+
|
|
+#define MD5_DIGEST_SIZE 16
|
|
+#define MD5_HMAC_BLOCK_SIZE 64
|
|
+#define MD5_BLOCK_WORDS 16
|
|
+#define MD5_HASH_WORDS 4
|
|
+#define MD5_HMAC_DBN_TEMP_SIZE 1024 /* size in dword,
|
|
+ needed for dbn workaround */
|
|
+
|
|
+static spinlock_t cipher_lock;
|
|
+
|
|
+struct md5_hmac_ctx {
|
|
+ u32 hash[MD5_HASH_WORDS];
|
|
+ u32 block[MD5_BLOCK_WORDS];
|
|
+ u64 byte_count;
|
|
+ u32 dbn;
|
|
+ u32 temp[MD5_HMAC_DBN_TEMP_SIZE];
|
|
+};
|
|
+
|
|
+/** \fn static u32 md5_endian_swap(u32 input)
|
|
+ * \ingroup LQ_MD5_HMAC_FUNCTIONS
|
|
+ * \brief perform dword level endian swap
|
|
+ * \param input value of dword that requires to be swapped
|
|
+*/
|
|
+static u32 md5_endian_swap(u32 input)
|
|
+{
|
|
+ u8 *ptr = (u8 *)&input;
|
|
+
|
|
+ return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]);
|
|
+}
|
|
+
|
|
+/** \fn static void md5_hmac_transform(struct crypto_tfm *tfm, u32 const *in)
|
|
+ * \ingroup LQ_MD5_HMAC_FUNCTIONS
|
|
+ * \brief save input block to context
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param in 64-byte block of input
|
|
+*/
|
|
+static void md5_hmac_transform(struct shash_desc *desc, u32 const *in)
|
|
+{
|
|
+ struct md5_hmac_ctx *mctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(&mctx->temp[mctx->dbn<<4], in, 64); /* dbn workaround */
|
|
+ mctx->dbn += 1;
|
|
+
|
|
+ if ( (mctx->dbn<<4) > MD5_HMAC_DBN_TEMP_SIZE )
|
|
+ {
|
|
+ printk("MD5_HMAC_DBN_TEMP_SIZE exceeded\n");
|
|
+ }
|
|
+}
|
|
+
|
|
+/** \fn int md5_hmac_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
|
|
+ * \ingroup LQ_MD5_HMAC_FUNCTIONS
|
|
+ * \brief sets md5 hmac key
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param key input key
|
|
+ * \param keylen key length greater than 64 bytes IS NOT SUPPORTED
|
|
+*/
|
|
+static int md5_hmac_setkey(struct crypto_shash *tfm,
|
|
+ const u8 *key,
|
|
+ unsigned int keylen)
|
|
+{
|
|
+ volatile struct deu_hash *hash = (struct deu_hash *) HASH_START;
|
|
+ int i, j;
|
|
+ u32 *in_key = (u32 *)key;
|
|
+
|
|
+ hash->KIDX = 0x80000000; /* reset all 16 words of the key to '0' */
|
|
+ asm("sync");
|
|
+
|
|
+ j = 0;
|
|
+ for (i = 0; i < keylen; i+=4)
|
|
+ {
|
|
+ hash->KIDX = j;
|
|
+ asm("sync");
|
|
+ hash->KEY = *((u32 *) in_key + j);
|
|
+ j++;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn void md5_hmac_init(struct crypto_tfm *tfm)
|
|
+ * \ingroup LQ_MD5_HMAC_FUNCTIONS
|
|
+ * \brief initialize md5 hmac context
|
|
+ * \param tfm linux crypto algo transform
|
|
+*/
|
|
+static int md5_hmac_init(struct shash_desc *desc)
|
|
+{
|
|
+ struct md5_hmac_ctx *mctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memset(mctx, 0, sizeof(struct md5_hmac_ctx));
|
|
+ mctx->dbn = 0; /* dbn workaround */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn void md5_hmac_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|
+ * \ingroup LQ_MD5_HMAC_FUNCTIONS
|
|
+ * \brief on-the-fly md5 hmac computation
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param data input data
|
|
+ * \param len size of input data
|
|
+*/
|
|
+static int md5_hmac_update(struct shash_desc *desc,
|
|
+ const u8 *data,
|
|
+ unsigned int len)
|
|
+{
|
|
+ struct md5_hmac_ctx *mctx = shash_desc_ctx(desc);
|
|
+ const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
|
|
+
|
|
+ mctx->byte_count += len;
|
|
+
|
|
+ if (avail > len) {
|
|
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
|
+ data, len);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
|
|
+ data, avail);
|
|
+
|
|
+ md5_hmac_transform(desc, mctx->block);
|
|
+ data += avail;
|
|
+ len -= avail;
|
|
+
|
|
+ while (len >= sizeof(mctx->block)) {
|
|
+ memcpy(mctx->block, data, sizeof(mctx->block));
|
|
+ md5_hmac_transform(desc, mctx->block);
|
|
+ data += sizeof(mctx->block);
|
|
+ len -= sizeof(mctx->block);
|
|
+ }
|
|
+
|
|
+ memcpy(mctx->block, data, len);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn void md5_hmac_final(struct crypto_tfm *tfm, u8 *out)
|
|
+ * \ingroup LQ_MD5_HMAC_FUNCTIONS
|
|
+ * \brief compute final md5 hmac value
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param out final md5 hmac output value
|
|
+*/
|
|
+static int md5_hmac_final(struct shash_desc *desc, u8 *out)
|
|
+{
|
|
+ struct md5_hmac_ctx *mctx = shash_desc_ctx(desc);
|
|
+ const unsigned int offset = mctx->byte_count & 0x3f;
|
|
+ char *p = (char *)mctx->block + offset;
|
|
+ int padding = 56 - (offset + 1);
|
|
+ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START;
|
|
+ u32 flag;
|
|
+ int i = 0;
|
|
+ int dbn;
|
|
+ u32 *in = &mctx->temp[0];
|
|
+
|
|
+ *p++ = 0x80;
|
|
+ if (padding < 0) {
|
|
+ memset(p, 0x00, padding + sizeof (u64));
|
|
+ md5_hmac_transform(desc, mctx->block);
|
|
+ p = (char *)mctx->block;
|
|
+ padding = 56;
|
|
+ }
|
|
+
|
|
+ memset(p, 0, padding);
|
|
+ /* need to add 512 bit of the IPAD operation */
|
|
+ mctx->block[14] = md5_endian_swap((mctx->byte_count + 64) << 3);
|
|
+ mctx->block[15] = 0x00000000;
|
|
+
|
|
+ md5_hmac_transform(desc, mctx->block);
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ printk("dbn = %d\n", mctx->dbn);
|
|
+ hashs->DBN = mctx->dbn;
|
|
+
|
|
+ /* khs, go, init, ndc, endi, kyue, hmen, md5 */
|
|
+ *LQ_HASH_CON = 0x0703002D;
|
|
+
|
|
+ /* wait for processing */
|
|
+ while (hashs->ctrl.BSY) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+
|
|
+ for (dbn = 0; dbn < mctx->dbn; dbn++)
|
|
+ {
|
|
+ for (i = 0; i < 16; i++) {
|
|
+ hashs->MR = in[i];
|
|
+ };
|
|
+
|
|
+ hashs->ctrl.GO = 1;
|
|
+ asm("sync");
|
|
+
|
|
+ /* wait for processing */
|
|
+ while (hashs->ctrl.BSY) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+
|
|
+ in += 16;
|
|
+ }
|
|
+
|
|
+#if 1
|
|
+ /* wait for digest ready */
|
|
+ while (! hashs->ctrl.DGRY) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ *((u32 *) out + 0) = hashs->D1R;
|
|
+ *((u32 *) out + 1) = hashs->D2R;
|
|
+ *((u32 *) out + 2) = hashs->D3R;
|
|
+ *((u32 *) out + 3) = hashs->D4R;
|
|
+ *((u32 *) out + 4) = hashs->D5R;
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int md5_hmac_export(struct shash_desc *desc, void *out)
|
|
+{
|
|
+ struct md5_hmac_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(out, sctx, sizeof(*sctx));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int md5_hmac_import(struct shash_desc *desc, const void *in)
|
|
+{
|
|
+ struct md5_hmac_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(sctx, in, sizeof(*sctx));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief MD5_HMAC function mappings
|
|
+*/
|
|
+static struct shash_alg md5_hmac_alg = {
|
|
+ .digestsize = MD5_DIGEST_SIZE,
|
|
+ .init = md5_hmac_init,
|
|
+ .update = md5_hmac_update,
|
|
+ .final = md5_hmac_final,
|
|
+ .setkey = md5_hmac_setkey,
|
|
+ .export = md5_hmac_export,
|
|
+ .import = md5_hmac_import,
|
|
+ .descsize = sizeof(struct md5_hmac_ctx),
|
|
+ .statesize = sizeof(struct md5_hmac_ctx),
|
|
+ .base = {
|
|
+ .cra_name = "hmac(md5)",
|
|
+ .cra_driver_name = "lq_deu-md5_hmac",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
|
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int lq_deu_init_md5_hmac(void)
|
|
+ * \ingroup LQ_MD5_HMAC_FUNCTIONS
|
|
+ * \brief initialize md5 hmac driver
|
|
+*/
|
|
+int lq_deu_init_md5_hmac(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if ((ret = crypto_register_shash(&md5_hmac_alg)))
|
|
+ goto md5_hmac_err;
|
|
+
|
|
+ CRTCL_SECT_INIT;
|
|
+
|
|
+ printk(KERN_NOTICE "Lantiq DEU MD5_HMAC initialized%s.\n",
|
|
+ disable_deudma ? "" : " (DMA)");
|
|
+ return ret;
|
|
+
|
|
+md5_hmac_err:
|
|
+ printk(KERN_ERR "Lantiq DEU MD5_HMAC initialization failed!\n");
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/** \fn void lq_deu_fini_md5_hmac(void)
|
|
+ * \ingroup LQ_MD5_HMAC_FUNCTIONS
|
|
+ * \brief unregister md5 hmac driver
|
|
+*/
|
|
+void lq_deu_fini_md5_hmac(void)
|
|
+{
|
|
+ crypto_unregister_shash(&md5_hmac_alg);
|
|
+}
|
|
+
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/sha1.c
|
|
@@ -0,0 +1,262 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file sha1.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief SHA1 encryption DEU driver file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_SHA1_FUNCTIONS LQ_SHA1_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq DEU sha1 functions
|
|
+*/
|
|
+
|
|
+
|
|
+#include <crypto/internal/hash.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <linux/cryptohash.h>
|
|
+#include <crypto/sha.h>
|
|
+#include <linux/types.h>
|
|
+#include <asm/scatterlist.h>
|
|
+#include <asm/byteorder.h>
|
|
+#include "deu.h"
|
|
+
|
|
+#define SHA1_DIGEST_SIZE 20
|
|
+#define SHA1_HMAC_BLOCK_SIZE 64
|
|
+
|
|
+static spinlock_t cipher_lock;
|
|
+
|
|
+/*
|
|
+ * \brief SHA1 private structure
|
|
+*/
|
|
+struct sha1_ctx {
|
|
+ u64 count;
|
|
+ u32 state[5];
|
|
+ u8 buffer[64];
|
|
+};
|
|
+
|
|
+/** \fn static void sha1_transform(u32 *state, const u32 *in)
|
|
+ * \ingroup LQ_SHA1_FUNCTIONS
|
|
+ * \brief main interface to sha1 hardware
|
|
+ * \param state current state
|
|
+ * \param in 64-byte block of input
|
|
+*/
|
|
+static void sha1_transform(u32 *state, const u32 *in)
|
|
+{
|
|
+ int i = 0;
|
|
+ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START;
|
|
+ unsigned long flag;
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ for (i = 0; i < 16; i++) {
|
|
+ hashs->MR = in[i];
|
|
+ };
|
|
+
|
|
+ /* wait for processing */
|
|
+ while (hashs->ctrl.BSY) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+}
|
|
+
|
|
+/** \fn static void sha1_init(struct crypto_tfm *tfm)
|
|
+ * \ingroup LQ_SHA1_FUNCTIONS
|
|
+ * \brief initialize sha1 hardware
|
|
+ * \param tfm linux crypto algo transform
|
|
+*/
|
|
+static int sha1_init(struct shash_desc *desc)
|
|
+{
|
|
+ struct sha1_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ SHA_HASH_INIT;
|
|
+
|
|
+ sctx->count = 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn static void sha1_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|
+ * \ingroup LQ_SHA1_FUNCTIONS
|
|
+ * \brief on-the-fly sha1 computation
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param data input data
|
|
+ * \param len size of input data
|
|
+*/
|
|
+static int sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
|
+{
|
|
+ struct sha1_ctx *sctx = shash_desc_ctx(desc);
|
|
+ unsigned int i, j;
|
|
+
|
|
+ j = (sctx->count >> 3) & 0x3f;
|
|
+ sctx->count += len << 3;
|
|
+
|
|
+ if ((j + len) > 63) {
|
|
+ memcpy(&sctx->buffer[j], data, (i = 64 - j));
|
|
+ sha1_transform(sctx->state, (const u32 *)sctx->buffer);
|
|
+ for (; i + 63 < len; i += 64) {
|
|
+ sha1_transform(sctx->state, (const u32 *)&data[i]);
|
|
+ }
|
|
+
|
|
+ j = 0;
|
|
+ } else {
|
|
+ i = 0;
|
|
+ }
|
|
+
|
|
+ memcpy(&sctx->buffer[j], &data[i], len - i);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn static void sha1_final(struct crypto_tfm *tfm, u8 *out)
|
|
+ * \ingroup LQ_SHA1_FUNCTIONS
|
|
+ * \brief compute final sha1 value
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param out final md5 output value
|
|
+*/
|
|
+static int sha1_final(struct shash_desc *desc, u8 *out)
|
|
+{
|
|
+ struct sha1_ctx *sctx = shash_desc_ctx(desc);
|
|
+ u32 index, padlen;
|
|
+ u64 t;
|
|
+ u8 bits[8] = { 0, };
|
|
+ static const u8 padding[64] = { 0x80, };
|
|
+ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START;
|
|
+ ulong flag;
|
|
+
|
|
+ t = sctx->count;
|
|
+ bits[7] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[6] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[5] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[4] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[3] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[2] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[1] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[0] = 0xff & t;
|
|
+
|
|
+ /* Pad out to 56 mod 64 */
|
|
+ index = (sctx->count >> 3) & 0x3f;
|
|
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
|
|
+ sha1_update(desc, padding, padlen);
|
|
+
|
|
+ /* Append length */
|
|
+ sha1_update(desc, bits, sizeof bits);
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ *((u32 *) out + 0) = hashs->D1R;
|
|
+ *((u32 *) out + 1) = hashs->D2R;
|
|
+ *((u32 *) out + 2) = hashs->D3R;
|
|
+ *((u32 *) out + 3) = hashs->D4R;
|
|
+ *((u32 *) out + 4) = hashs->D5R;
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+
|
|
+ /* Wipe context*/
|
|
+ memset(sctx, 0, sizeof *sctx);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sha1_export(struct shash_desc *desc, void *out)
|
|
+{
|
|
+ struct sha1_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(out, sctx, sizeof(*sctx));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sha1_import(struct shash_desc *desc, const void *in)
|
|
+{
|
|
+ struct sha1_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(sctx, in, sizeof(*sctx));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief SHA1 function mappings
|
|
+*/
|
|
+static struct shash_alg deu_sha1_alg = {
|
|
+ .digestsize = SHA1_DIGEST_SIZE,
|
|
+ .init = sha1_init,
|
|
+ .update = sha1_update,
|
|
+ .final = sha1_final,
|
|
+ .export = sha1_export,
|
|
+ .import = sha1_import,
|
|
+ .descsize = sizeof(struct sha1_ctx),
|
|
+ .statesize = sizeof(struct sha1_ctx),
|
|
+ .base = {
|
|
+ .cra_name = "sha1",
|
|
+ .cra_driver_name = "lq_deu-sha1",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
|
+ .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int lq_deu_init_sha1(void)
|
|
+ * \ingroup LQ_SHA1_FUNCTIONS
|
|
+ * \brief initialize sha1 driver
|
|
+*/
|
|
+int lq_deu_init_sha1(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if ((ret = crypto_register_shash(&deu_sha1_alg)))
|
|
+ goto sha1_err;
|
|
+
|
|
+ CRTCL_SECT_INIT;
|
|
+
|
|
+ printk(KERN_NOTICE "Lantiq DEU SHA1 initialized%s.\n",
|
|
+ disable_deudma ? "" : " (DMA)");
|
|
+ return ret;
|
|
+
|
|
+sha1_err:
|
|
+ printk(KERN_ERR "Lantiq DEU SHA1 initialization failed!\n");
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/** \fn void lq_deu_fini_sha1(void)
|
|
+ * \ingroup LQ_SHA1_FUNCTIONS
|
|
+ * \brief unregister sha1 driver
|
|
+*/
|
|
+void lq_deu_fini_sha1(void)
|
|
+{
|
|
+ crypto_unregister_shash(&deu_sha1_alg);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/sha1_hmac.c
|
|
@@ -0,0 +1,325 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file sha1_hmac.c
|
|
+ \ingroup LQ_DEU
|
|
+ \brief SHA1-HMAC DEU driver file
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_SHA1_HMAC_FUNCTIONS LQ_SHA1_HMAC_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq sha1 hmac functions
|
|
+*/
|
|
+
|
|
+
|
|
+#include <crypto/internal/hash.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/crypto.h>
|
|
+#include <linux/cryptohash.h>
|
|
+#include <linux/types.h>
|
|
+#include <asm/scatterlist.h>
|
|
+#include <asm/byteorder.h>
|
|
+#include <linux/delay.h>
|
|
+#include "deu.h"
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC
|
|
+
|
|
+#define SHA1_DIGEST_SIZE 20
|
|
+#define SHA1_HMAC_BLOCK_SIZE 64
|
|
+/* size in dword, needed for dbn workaround */
|
|
+#define SHA1_HMAC_DBN_TEMP_SIZE 1024
|
|
+
|
|
+static spinlock_t cipher_lock;
|
|
+
|
|
+struct sha1_hmac_ctx {
|
|
+ u64 count;
|
|
+ u32 state[5];
|
|
+ u8 buffer[64];
|
|
+ u32 dbn;
|
|
+ u32 temp[SHA1_HMAC_DBN_TEMP_SIZE];
|
|
+};
|
|
+
|
|
+/** \fn static void sha1_hmac_transform(struct crypto_tfm *tfm, u32 const *in)
|
|
+ * \ingroup LQ_SHA1_HMAC_FUNCTIONS
|
|
+ * \brief save input block to context
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param in 64-byte block of input
|
|
+*/
|
|
+static void sha1_hmac_transform(struct shash_desc *desc, u32 const *in)
|
|
+{
|
|
+ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(&sctx->temp[sctx->dbn<<4], in, 64); /* dbn workaround */
|
|
+ sctx->dbn += 1;
|
|
+
|
|
+ if ((sctx->dbn<<4) > SHA1_HMAC_DBN_TEMP_SIZE) {
|
|
+ printk("SHA1_HMAC_DBN_TEMP_SIZE exceeded\n");
|
|
+ }
|
|
+}
|
|
+
|
|
+/** \fn int sha1_hmac_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
|
|
+ * \ingroup LQ_SHA1_HMAC_FUNCTIONS
|
|
+ * \brief sets sha1 hmac key
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param key input key
|
|
+ * \param keylen key length greater than 64 bytes IS NOT SUPPORTED
|
|
+*/
|
|
+static int sha1_hmac_setkey(struct crypto_shash *tfm,
|
|
+ const u8 *key,
|
|
+ unsigned int keylen)
|
|
+{
|
|
+ volatile struct deu_hash *hash = (struct deu_hash *) HASH_START;
|
|
+ int i, j;
|
|
+ u32 *in_key = (u32 *)key;
|
|
+
|
|
+ hash->KIDX = 0x80000000; /* reset all 16 words of the key to '0' */
|
|
+ asm("sync");
|
|
+
|
|
+ j = 0;
|
|
+ for (i = 0; i < keylen; i+=4)
|
|
+ {
|
|
+ hash->KIDX = j;
|
|
+ asm("sync");
|
|
+ hash->KEY = *((u32 *) in_key + j);
|
|
+ j++;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sha1_hmac_export(struct shash_desc *desc, void *out)
|
|
+{
|
|
+ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(out, sctx, sizeof(*sctx));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sha1_hmac_import(struct shash_desc *desc, const void *in)
|
|
+{
|
|
+ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memcpy(sctx, in, sizeof(*sctx));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn void sha1_hmac_init(struct crypto_tfm *tfm)
|
|
+ * \ingroup LQ_SHA1_HMAC_FUNCTIONS
|
|
+ * \brief initialize sha1 hmac context
|
|
+ * \param tfm linux crypto algo transform
|
|
+*/
|
|
+static int sha1_hmac_init(struct shash_desc *desc)
|
|
+{
|
|
+ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc);
|
|
+
|
|
+ memset(sctx, 0, sizeof(struct sha1_hmac_ctx));
|
|
+ sctx->dbn = 0; /* dbn workaround */
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn static void sha1_hmac_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
|
|
+ * \ingroup LQ_SHA1_HMAC_FUNCTIONS
|
|
+ * \brief on-the-fly sha1 hmac computation
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param data input data
|
|
+ * \param len size of input data
|
|
+*/
|
|
+static int sha1_hmac_update(struct shash_desc *desc, const u8 *data,
|
|
+ unsigned int len)
|
|
+{
|
|
+ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc);
|
|
+ unsigned int i, j;
|
|
+
|
|
+ j = (sctx->count >> 3) & 0x3f;
|
|
+ sctx->count += len << 3;
|
|
+ /* printk("sctx->count = %d\n", (sctx->count >> 3)); */
|
|
+
|
|
+ if ((j + len) > 63) {
|
|
+ memcpy(&sctx->buffer[j], data, (i = 64 - j));
|
|
+ sha1_hmac_transform(desc, (const u32 *)sctx->buffer);
|
|
+ for (; i + 63 < len; i += 64) {
|
|
+ sha1_hmac_transform(desc, (const u32 *)&data[i]);
|
|
+ }
|
|
+
|
|
+ j = 0;
|
|
+ } else {
|
|
+ i = 0;
|
|
+ }
|
|
+
|
|
+ memcpy(&sctx->buffer[j], &data[i], len - i);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/** \fn static void sha1_hmac_final(struct crypto_tfm *tfm, u8 *out)
|
|
+ * \ingroup LQ_SHA1_HMAC_FUNCTIONS
|
|
+ * \brief ompute final sha1 hmac value
|
|
+ * \param tfm linux crypto algo transform
|
|
+ * \param out final sha1 hmac output value
|
|
+*/
|
|
+static int sha1_hmac_final(struct shash_desc *desc, u8 *out)
|
|
+{
|
|
+ struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc);
|
|
+ u32 index, padlen;
|
|
+ u64 t;
|
|
+ u8 bits[8] = { 0, };
|
|
+ static const u8 padding[64] = { 0x80, };
|
|
+ volatile struct deu_hash *hashs = (struct deu_hash *) HASH_START;
|
|
+ ulong flag;
|
|
+ int i = 0;
|
|
+ int dbn;
|
|
+ u32 *in = &sctx->temp[0];
|
|
+
|
|
+ t = sctx->count + 512; /* need to add 512 bit of the IPAD operation */
|
|
+ bits[7] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[6] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[5] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[4] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[3] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[2] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[1] = 0xff & t;
|
|
+ t >>= 8;
|
|
+ bits[0] = 0xff & t;
|
|
+
|
|
+ /* Pad out to 56 mod 64 */
|
|
+ index = (sctx->count >> 3) & 0x3f;
|
|
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
|
|
+ sha1_hmac_update(desc, padding, padlen);
|
|
+
|
|
+ /* Append length */
|
|
+ sha1_hmac_update(desc, bits, sizeof bits);
|
|
+
|
|
+ CRTCL_SECT_START;
|
|
+
|
|
+ hashs->DBN = sctx->dbn;
|
|
+
|
|
+ /* for vr9 change, ENDI = 1 */
|
|
+ *LQ_HASH_CON = HASH_CON_VALUE;
|
|
+
|
|
+ /* wait for processing */
|
|
+ while (hashs->ctrl.BSY) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+
|
|
+ for (dbn = 0; dbn < sctx->dbn; dbn++)
|
|
+ {
|
|
+ for (i = 0; i < 16; i++) {
|
|
+ hashs->MR = in[i];
|
|
+ };
|
|
+
|
|
+ hashs->ctrl.GO = 1;
|
|
+ asm("sync");
|
|
+
|
|
+ /* wait for processing */
|
|
+ while (hashs->ctrl.BSY) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+
|
|
+ in += 16;
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+#if 1
|
|
+ /* wait for digest ready */
|
|
+ while (! hashs->ctrl.DGRY) {
|
|
+ /* this will not take long */
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ *((u32 *) out + 0) = hashs->D1R;
|
|
+ *((u32 *) out + 1) = hashs->D2R;
|
|
+ *((u32 *) out + 2) = hashs->D3R;
|
|
+ *((u32 *) out + 3) = hashs->D4R;
|
|
+ *((u32 *) out + 4) = hashs->D5R;
|
|
+
|
|
+ CRTCL_SECT_END;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * \brief SHA1-HMAC function mappings
|
|
+*/
|
|
+static struct shash_alg sha1_hmac_alg = {
|
|
+ .digestsize = SHA1_DIGEST_SIZE,
|
|
+ .init = sha1_hmac_init,
|
|
+ .update = sha1_hmac_update,
|
|
+ .final = sha1_hmac_final,
|
|
+ .export = sha1_hmac_export,
|
|
+ .import = sha1_hmac_import,
|
|
+ .setkey = sha1_hmac_setkey,
|
|
+ .descsize = sizeof(struct sha1_hmac_ctx),
|
|
+ .statesize = sizeof(struct sha1_hmac_ctx),
|
|
+ .base = {
|
|
+ .cra_name = "hmac(sha1)",
|
|
+ .cra_driver_name = "lq_deu-sha1_hmac",
|
|
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
|
+ .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
|
|
+ .cra_module = THIS_MODULE,
|
|
+ }
|
|
+};
|
|
+
|
|
+/** \fn int lq_deu_init_sha1_hmac(void)
|
|
+ * \ingroup LQ_SHA1_HMAC_FUNCTIONS
|
|
+ * \brief initialize sha1 hmac driver
|
|
+*/
|
|
+int lq_deu_init_sha1_hmac(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if ((ret = crypto_register_shash(&sha1_hmac_alg)))
|
|
+ goto sha1_err;
|
|
+
|
|
+ CRTCL_SECT_INIT;
|
|
+
|
|
+ printk(KERN_NOTICE "Lantiq DEU SHA1_HMAC initialized%s.\n",
|
|
+ disable_deudma ? "" : " (DMA)");
|
|
+ return ret;
|
|
+
|
|
+sha1_err:
|
|
+ printk(KERN_ERR "Lantiq DEU SHA1_HMAC initialization failed!\n");
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/** \fn void lq_deu_fini_sha1_hmac(void)
|
|
+ * \ingroup LQ_SHA1_HMAC_FUNCTIONS
|
|
+ * \brief unregister sha1 hmac driver
|
|
+*/
|
|
+void lq_deu_fini_sha1_hmac(void)
|
|
+{
|
|
+ crypto_unregister_shash(&sha1_hmac_alg);
|
|
+}
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu_falcon.c
|
|
@@ -0,0 +1,163 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/errno.h>
|
|
+#include <asm/io.h> /* dma_cache_inv */
|
|
+#include <linux/platform_device.h>
|
|
+
|
|
+#ifdef CONFIG_SOC_LANTIQ_FALCON
|
|
+
|
|
+#include "deu.h"
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief Lantiq DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu_falcon.c
|
|
+ \brief Lantiq DEU board specific driver file for ar9
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief board specific functions
|
|
+*/
|
|
+
|
|
+#include <falcon/gpon_reg_base.h>
|
|
+#include <falcon/sys1_reg.h>
|
|
+#include <falcon/status_reg.h>
|
|
+#include <falcon/sysctrl.h>
|
|
+
|
|
+#define reg_r32(reg) __raw_readl(reg)
|
|
+#define reg_w32(val, reg) __raw_writel(val, reg)
|
|
+#define reg_w32_mask(clear, set, reg) reg_w32((reg_r32(reg) & ~(clear)) | (set), reg)
|
|
+
|
|
+static gpon_sys1_t * const sys1 = (gpon_sys1_t *)GPON_SYS1_BASE;
|
|
+static gpon_status_t * const status = (gpon_status_t *)GPON_STATUS_BASE;
|
|
+
|
|
+/** \fn u32 endian_swap(u32 input)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief Swap data given to the function
|
|
+ * \param input Data input to be swapped
|
|
+ * \return either the swapped data or the input data depending on whether it is in DMA mode or FPI mode
|
|
+*/
|
|
+static u32 endian_swap(u32 input)
|
|
+{
|
|
+ return input;
|
|
+}
|
|
+
|
|
+/** \fn u32 input_swap(u32 input)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief Not used
|
|
+ * \return input
|
|
+*/
|
|
+static u32 input_swap(u32 input)
|
|
+{
|
|
+ return input;
|
|
+}
|
|
+
|
|
+/** \fn void aes_chip_init(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief initialize AES hardware
|
|
+*/
|
|
+static void aes_chip_init(void)
|
|
+{
|
|
+ volatile struct deu_aes *aes = (struct deu_aes *) AES_START;
|
|
+
|
|
+ aes->ctrl.SM = 1;
|
|
+ aes->ctrl.ARS = 1;
|
|
+}
|
|
+
|
|
+/** \fn void des_chip_init(void)
|
|
+ * \ingroup BOARD_SPECIFIC_FUNCTIONS
|
|
+ * \brief initialize DES hardware
|
|
+*/
|
|
+static void des_chip_init(void)
|
|
+{
|
|
+}
|
|
+
|
|
+static u32 chip_init(void)
|
|
+{
|
|
+ sys1_hw_clk_enable(CLKEN_SHA1_SET | CLKEN_AES_SET);
|
|
+ sys1_hw_activate(ACT_SHA1_SET | ACT_AES_SET);
|
|
+
|
|
+ return LQ_DEU_ID_AES | LQ_DEU_ID_HASH;
|
|
+}
|
|
+
|
|
+static int lq_crypto_probe(struct platform_device *pdev)
|
|
+{
|
|
+#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
|
|
+ lq_crypto_ops.dma_init = NULL;
|
|
+ lq_crypto_ops.dma_exit = NULL;
|
|
+ lq_crypto_ops.aes_dma_memcpy = NULL;
|
|
+ lq_crypto_ops.des_dma_memcpy = NULL;
|
|
+ lq_crypto_ops.aes_dma_malloc = NULL;
|
|
+ lq_crypto_ops.des_dma_malloc = NULL;
|
|
+ lq_crypto_ops.dma_align = NULL;
|
|
+ lq_crypto_ops.dma_free = NULL;
|
|
+#endif
|
|
+
|
|
+ lq_crypto_ops.endian_swap = endian_swap;
|
|
+ lq_crypto_ops.input_swap = input_swap;
|
|
+ lq_crypto_ops.aes_chip_init = aes_chip_init;
|
|
+ lq_crypto_ops.des_chip_init = des_chip_init;
|
|
+ lq_crypto_ops.chip_init = chip_init;
|
|
+
|
|
+ printk("lq_falcon_deu: driver loaded!\n");
|
|
+
|
|
+ lq_deu_init();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int lq_crypto_remove(struct platform_device *pdev)
|
|
+{
|
|
+ lq_deu_exit();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct platform_driver lq_crypto = {
|
|
+ .probe = lq_crypto_probe,
|
|
+ .remove = lq_crypto_remove,
|
|
+ .driver = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .name = "lq_falcon_deu"
|
|
+ }
|
|
+};
|
|
+
|
|
+static int __init lq_crypto_init(void)
|
|
+{
|
|
+ return platform_driver_register(&lq_crypto);
|
|
+}
|
|
+module_init(lq_crypto_init);
|
|
+
|
|
+static void __exit lq_crypto_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&lq_crypto);
|
|
+}
|
|
+module_exit(lq_crypto_exit);
|
|
+
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/drivers/crypto/lantiq/deu_falcon.h
|
|
@@ -0,0 +1,281 @@
|
|
+/*
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
+ * GNU General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License
|
|
+ * along with this program; if not, write to the Free Software
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
|
|
+ *
|
|
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
|
|
+ * Copyright (C) 2009 Mohammad Firdaus / Infineon Technologies
|
|
+ */
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU LQ_DEU_DRIVERS
|
|
+ \ingroup API
|
|
+ \brief DEU driver module
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \defgroup LQ_DEU_DEFINITIONS LQ_DEU_DEFINITIONS
|
|
+ \ingroup LQ_DEU
|
|
+ \brief Lantiq DEU definitions
|
|
+*/
|
|
+
|
|
+/**
|
|
+ \file deu_falcon.h
|
|
+ \brief DEU driver header file
|
|
+*/
|
|
+
|
|
+
|
|
+#ifndef DEU_FALCON_H
|
|
+#define DEU_FALCON_H
|
|
+
|
|
+#define HASH_START 0xbd008100
|
|
+#define AES_START 0xbd008000
|
|
+
|
|
+#ifdef CONFIG_CRYPTO_DEV_DMA
|
|
+# include "deu_dma.h"
|
|
+# define DEU_DWORD_REORDERING(ptr, buffer, in_out, bytes) \
|
|
+ deu_dma_align(ptr, buffer, in_out, bytes)
|
|
+# define AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \
|
|
+ deu_aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes)
|
|
+# define DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \
|
|
+ deu_des_dma_memcpy(outcopy, out_dma, out_arg, nbytes)
|
|
+# define BUFFER_IN 1
|
|
+# define BUFFER_OUT 0
|
|
+# define AES_ALGO 1
|
|
+# define DES_ALGO 0
|
|
+# define ALLOCATE_MEMORY(val, type) 1
|
|
+# define FREE_MEMORY(buff)
|
|
+extern struct lq_deu_device lq_deu[1];
|
|
+#endif /* CONFIG_CRYPTO_DEV_DMA */
|
|
+
|
|
+/* SHA CONSTANTS */
|
|
+#define HASH_CON_VALUE 0x0700002C
|
|
+
|
|
+#define INPUT_ENDIAN_SWAP(input) deu_input_swap(input)
|
|
+#define DEU_ENDIAN_SWAP(input) deu_endian_swap(input)
|
|
+#define DELAY_PERIOD 10
|
|
+#define FIND_DEU_CHIP_VERSION chip_version()
|
|
+
|
|
+#define WAIT_AES_DMA_READY() \
|
|
+ do { \
|
|
+ int i; \
|
|
+ volatile struct deu_dma *dma = \
|
|
+ (struct deu_dma *) LQ_DEU_DMA_CON; \
|
|
+ volatile struct deu_aes *aes = \
|
|
+ (volatile struct deu_aes *) AES_START; \
|
|
+ for (i = 0; i < 10; i++) \
|
|
+ udelay(DELAY_PERIOD); \
|
|
+ while (dma->ctrl.BSY) {}; \
|
|
+ while (aes->ctrl.BUS) {}; \
|
|
+ } while (0)
|
|
+
|
|
+#define WAIT_DES_DMA_READY() \
|
|
+ do { \
|
|
+ int i; \
|
|
+ volatile struct deu_dma *dma = \
|
|
+ (struct deu_dma *) LQ_DEU_DMA_CON; \
|
|
+ volatile struct deu_des *des = \
|
|
+ (struct deu_des *) DES_3DES_START; \
|
|
+ for (i = 0; i < 10; i++) \
|
|
+ udelay(DELAY_PERIOD); \
|
|
+ while (dma->ctrl.BSY) {}; \
|
|
+ while (des->ctrl.BUS) {}; \
|
|
+ } while (0)
|
|
+
|
|
+#define AES_DMA_MISC_CONFIG() \
|
|
+ do { \
|
|
+ volatile struct deu_aes *aes = \
|
|
+ (volatile struct deu_aes *) AES_START; \
|
|
+ aes->ctrl.KRE = 1; \
|
|
+ aes->ctrl.GO = 1; \
|
|
+ } while(0)
|
|
+
|
|
+#define SHA_HASH_INIT \
|
|
+ do { \
|
|
+ volatile struct deu_hash *hash = \
|
|
+ (struct deu_hash *) HASH_START; \
|
|
+ hash->ctrl.SM = 1; \
|
|
+ hash->ctrl.ALGO = 0; \
|
|
+ hash->ctrl.INIT = 1; \
|
|
+ } while(0)
|
|
+
|
|
+/* DEU Common Structures for Falcon*/
|
|
+
|
|
+struct deu_clk_ctrl {
|
|
+ u32 Res:26;
|
|
+ u32 FSOE:1;
|
|
+ u32 SBWE:1;
|
|
+ u32 EDIS:1;
|
|
+ u32 SPEN:1;
|
|
+ u32 DISS:1;
|
|
+ u32 DISR:1;
|
|
+};
|
|
+
|
|
+struct deu_des {
|
|
+ struct deu_des_ctrl { /* 10h */
|
|
+ u32 KRE:1;
|
|
+ u32 reserved1:5;
|
|
+ u32 GO:1;
|
|
+ u32 STP:1;
|
|
+ u32 Res2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 Res3:2;
|
|
+ u32 F:3;
|
|
+ u32 O:3;
|
|
+ u32 BUS:1;
|
|
+ u32 DAU:1;
|
|
+ u32 ARS:1;
|
|
+ u32 SM:1;
|
|
+ u32 E_D:1;
|
|
+ u32 M:3;
|
|
+ } ctrl;
|
|
+
|
|
+ u32 IHR; /* 14h */
|
|
+ u32 ILR; /* 18h */
|
|
+ u32 K1HR; /* 1c */
|
|
+ u32 K1LR;
|
|
+ u32 K2HR;
|
|
+ u32 K2LR;
|
|
+ u32 K3HR;
|
|
+ u32 K3LR; /* 30h */
|
|
+ u32 IVHR; /* 34h */
|
|
+ u32 IVLR; /* 38 */
|
|
+ u32 OHR; /* 3c */
|
|
+ u32 OLR; /* 40 */
|
|
+};
|
|
+
|
|
+struct deu_aes {
|
|
+ struct deu_aes_ctrl {
|
|
+ u32 KRE:1;
|
|
+ u32 reserved1:4;
|
|
+ u32 PNK:1;
|
|
+ u32 GO:1;
|
|
+ u32 STP:1;
|
|
+ u32 reserved2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 reserved3:2;
|
|
+ u32 F:3; /* fbs */
|
|
+ u32 O:3; /* om */
|
|
+ u32 BUS:1; /* bsy */
|
|
+ u32 DAU:1;
|
|
+ u32 ARS:1;
|
|
+ u32 SM:1;
|
|
+ u32 E_D:1;
|
|
+ u32 KV:1;
|
|
+ u32 K:2; /* KL */
|
|
+ } ctrl;
|
|
+
|
|
+ u32 ID3R; /* 80h */
|
|
+ u32 ID2R; /* 84h */
|
|
+ u32 ID1R; /* 88h */
|
|
+ u32 ID0R; /* 8Ch */
|
|
+ u32 K7R; /* 90h */
|
|
+ u32 K6R; /* 94h */
|
|
+ u32 K5R; /* 98h */
|
|
+ u32 K4R; /* 9Ch */
|
|
+ u32 K3R; /* A0h */
|
|
+ u32 K2R; /* A4h */
|
|
+ u32 K1R; /* A8h */
|
|
+ u32 K0R; /* ACh */
|
|
+ u32 IV3R; /* B0h */
|
|
+ u32 IV2R; /* B4h */
|
|
+ u32 IV1R; /* B8h */
|
|
+ u32 IV0R; /* BCh */
|
|
+ u32 OD3R; /* D4h */
|
|
+ u32 OD2R; /* D8h */
|
|
+ u32 OD1R; /* DCh */
|
|
+ u32 OD0R; /* E0h */
|
|
+};
|
|
+
|
|
+struct deu_arc4 {
|
|
+ struct arc4_controlr {
|
|
+ u32 KRE:1;
|
|
+ u32 KLEN:4;
|
|
+ u32 KSAE:1;
|
|
+ u32 GO:1;
|
|
+ u32 STP:1;
|
|
+ u32 reserved1:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 reserved2:8;
|
|
+ u32 BUS:1; /* bsy */
|
|
+ u32 reserved3:1;
|
|
+ u32 ARS:1;
|
|
+ u32 SM:1;
|
|
+ u32 reserved4:4;
|
|
+ } ctrl;
|
|
+
|
|
+ u32 K3R; /* 104h */
|
|
+ u32 K2R; /* 108h */
|
|
+ u32 K1R; /* 10Ch */
|
|
+ u32 K0R; /* 110h */
|
|
+ u32 IDLEN; /* 114h */
|
|
+ u32 ID3R; /* 118h */
|
|
+ u32 ID2R; /* 11Ch */
|
|
+ u32 ID1R; /* 120h */
|
|
+ u32 ID0R; /* 124h */
|
|
+ u32 OD3R; /* 128h */
|
|
+ u32 OD2R; /* 12Ch */
|
|
+ u32 OD1R; /* 130h */
|
|
+ u32 OD0R; /* 134h */
|
|
+};
|
|
+
|
|
+struct deu_hash {
|
|
+ struct deu_hash_ctrl {
|
|
+ u32 reserved1:5;
|
|
+ u32 KHS:1;
|
|
+ u32 GO:1;
|
|
+ u32 INIT:1;
|
|
+ u32 reserved2:6;
|
|
+ u32 NDC:1;
|
|
+ u32 ENDI:1;
|
|
+ u32 reserved3:7;
|
|
+ u32 DGRY:1;
|
|
+ u32 BSY:1;
|
|
+ u32 reserved4:1;
|
|
+ u32 IRCL:1;
|
|
+ u32 SM:1;
|
|
+ u32 KYUE:1;
|
|
+ u32 HMEN:1;
|
|
+ u32 SSEN:1;
|
|
+ u32 ALGO:1;
|
|
+ } ctrl;
|
|
+
|
|
+ u32 MR; /* B4h */
|
|
+ u32 D1R; /* B8h */
|
|
+ u32 D2R; /* BCh */
|
|
+ u32 D3R; /* C0h */
|
|
+ u32 D4R; /* C4h */
|
|
+ u32 D5R; /* C8h */
|
|
+ u32 dummy; /* CCh */
|
|
+ u32 KIDX; /* D0h */
|
|
+ u32 KEY; /* D4h */
|
|
+ u32 DBN; /* D8h */
|
|
+};
|
|
+
|
|
+struct deu_dma {
|
|
+ struct deu_dma_ctrl {
|
|
+ u32 reserved1:22;
|
|
+ u32 BS:2;
|
|
+ u32 BSY:1;
|
|
+ u32 reserved2:1;
|
|
+ u32 ALGO:2;
|
|
+ u32 RXCLS:2;
|
|
+ u32 reserved3:1;
|
|
+ u32 EN:1;
|
|
+ } ctrl;
|
|
+};
|
|
+
|
|
+#endif /* DEU_FALCON_H */
|
|
--- a/arch/mips/lantiq/xway/devices.c
|
|
+++ b/arch/mips/lantiq/xway/devices.c
|
|
@@ -277,3 +277,9 @@
|
|
break;
|
|
}
|
|
}
|
|
+
|
|
+void __init
|
|
+lq_register_crypto(const char *name)
|
|
+{
|
|
+ platform_device_register_simple(name, 0, 0, 0);
|
|
+}
|
|
--- a/arch/mips/lantiq/xway/devices.h
|
|
+++ b/arch/mips/lantiq/xway/devices.h
|
|
@@ -21,5 +21,6 @@
|
|
extern void __init lq_register_wdt(void);
|
|
extern void __init lq_register_ethernet(struct lq_eth_data *eth);
|
|
extern void __init lq_register_asc(int port);
|
|
+extern void __init lq_register_crypto(const char *name);
|
|
|
|
#endif
|
|
--- a/arch/mips/lantiq/xway/mach-easy50712.c
|
|
+++ b/arch/mips/lantiq/xway/mach-easy50712.c
|
|
@@ -72,6 +72,7 @@
|
|
lq_register_wdt();
|
|
lq_register_pci(&lq_pci_data);
|
|
lq_register_ethernet(&lq_eth_data);
|
|
+ lq_register_crypto("lq_danube_deu");
|
|
}
|
|
|
|
MIPS_MACHINE(LANTIQ_MACH_EASY50712,
|
|
--- a/arch/mips/lantiq/xway/mach-easy50812.c
|
|
+++ b/arch/mips/lantiq/xway/mach-easy50812.c
|
|
@@ -71,6 +71,7 @@
|
|
lq_register_wdt();
|
|
lq_register_pci(&lq_pci_data);
|
|
lq_register_ethernet(&lq_eth_data);
|
|
+ lq_register_crypto("lq_ar9_deu");
|
|
}
|
|
|
|
MIPS_MACHINE(LANTIQ_MACH_EASY50812,
|