From c925421b8c35357427499f3d298777535c2c6cfd Mon Sep 17 00:00:00 2001
From: Alison Wang <b18965@freescale.com>
Date: Thu, 4 Aug 2011 09:59:45 +0800
Subject: [PATCH 24/52] Add SEC 1.1 support for MCF547x and MCF548x

Add SEC 1.1 support for MCF547x and MCF548x. The SEC driver is
in drivers/crypto.

Signed-off-by: Alison Wang <b18965@freescale.com>
---
 arch/m68k/coldfire/m547x/mcf548x-devices.c |    2 +-
 arch/m68k/include/asm/cf_io.h              |    4 +
 crypto/testmgr.c                           |   18 +-
 drivers/crypto/Kconfig                     |   13 +
 drivers/crypto/Makefile                    |    1 +
 drivers/crypto/cf_talitos.c                | 1727 ++++++++++++++++++++++++++++
 drivers/crypto/cf_talitos.h                |  229 ++++
 7 files changed, 1989 insertions(+), 5 deletions(-)
 create mode 100644 drivers/crypto/cf_talitos.c
 create mode 100644 drivers/crypto/cf_talitos.h

--- a/arch/m68k/coldfire/m547x/mcf548x-devices.c
+++ b/arch/m68k/coldfire/m547x/mcf548x-devices.c
@@ -54,7 +54,7 @@ static struct resource coldfire_sec_reso
 };
 
 static struct platform_device coldfire_sec_device = {
-	.name                   = "fsl-sec1",
+	.name                   = "talitos",
 	.id                     = -1,
 	.num_resources          = ARRAY_SIZE(coldfire_sec_resources),
 	.resource               = coldfire_sec_resources,
--- a/arch/m68k/include/asm/cf_io.h
+++ b/arch/m68k/include/asm/cf_io.h
@@ -192,4 +192,8 @@ static inline void memcpy_toio(volatile
 #define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
 #endif /* readb */
 
+/* access ports */
+#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) |  (_v))
+#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v))
+
 #endif /* _IO_H */
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -212,7 +212,11 @@ static int test_hash(struct crypto_ahash
 				   tcrypt_complete, &tresult);
 
 	j = 0;
+#if defined(CONFIG_CRYPTO_DEV_CF_TALITOS)
+	for (i = 1; i < tcount; i++) {
+#else
 	for (i = 0; i < tcount; i++) {
+#endif
 		if (template[i].np)
 			continue;
 
@@ -276,7 +280,9 @@ static int test_hash(struct crypto_ahash
 			hexdump(result, crypto_ahash_digestsize(tfm));
 			ret = -EINVAL;
 			goto out;
-		}
+		} else
+			printk(KERN_INFO "alg: hash: Test %d succeed for %s\n",
+				j, algo);
 	}
 
 	j = 0;
@@ -344,7 +350,9 @@ static int test_hash(struct crypto_ahash
 				hexdump(result, crypto_ahash_digestsize(tfm));
 				ret = -EINVAL;
 				goto out;
-			}
+			} else
+				printk(KERN_INFO "alg: hash: Chunking test %d "
+				       "succeed for %s\n", j, algo);
 		}
 	}
 
@@ -788,7 +796,6 @@ static int test_skcipher(struct crypto_a
 	else
 		e = "decryption";
 
-	printk(KERN_INFO "%s testing %s %s\n", __func__, algo, e);
 	init_completion(&result.completion);
 
 	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
@@ -963,7 +970,10 @@ static int test_skcipher(struct crypto_a
 					       "%u for %s\n", j, e, k, algo);
 					hexdump(q, template[i].tap[k]);
 					goto out;
-				}
+				} else
+					printk(KERN_INFO "alg: skcipher: Chunk "
+						"test %d pass on %s for %s\n",
+						j, e, algo);
 
 				q += template[i].tap[k];
 				for (n = 0; offset_in_page(q + n) && q[n]; n++)
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -282,6 +282,19 @@ config CRYPTO_DEV_TALITOS
 	  To compile this driver as a module, choose M here: the module
 	  will be called talitos.
 
+config CRYPTO_DEV_CF_TALITOS
+	tristate "Talitos Freescale Coldfire Security Engine (SEC)"
+	select CRYPTO_ALGAPI
+	select CRYPTO_AUTHENC
+	select HW_RANDOM
+	depends on (M547X || M548X)
+	help
+	  Say 'Y' here to use the Freescale Coldfire Security Engine (SEC)
+	  to offload cryptographic algorithm computation.
+
+	  The Freescale SEC is present on Coldfire MCF547x and MCF548x
+	  processors.
+
 config CRYPTO_DEV_IXP4XX
 	tristate "Driver for IXP4xx crypto hardware acceleration"
 	depends on ARCH_IXP4XX
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,6 +6,7 @@ n2_crypto-y := n2_core.o n2_asm.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
 obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
+obj-$(CONFIG_CRYPTO_DEV_CF_TALITOS) += cf_talitos.o
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_MCFCAU) += mcfcau.o
 obj-$(CONFIG_CRYPTO_DEV_MCFCAU_DES) += mcfcau-des.o
--- /dev/null
+++ b/drivers/crypto/cf_talitos.c
@@ -0,0 +1,1727 @@
+/*
+ * cf_talitos - Freescale Coldfire Integrated Security Engine
+ *		(SEC) device driver
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *	Author: Alison Wang <b18965@freescale.com>
+ *		based on talitos.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+
+#include <asm/m5485sim.h>
+#include "cf_talitos.h"
+
+#define TALITOS_TIMEOUT 100000
+#define TALITOS_MAX_DATA_LEN 65535
+
+#define DESC_TYPE(desc_hdr) (((desc_hdr) >> 4) & 0xf)
+#define PRIMARY_EU(desc_hdr) (((desc_hdr) >> 28) & 0xf)
+#define SECONDARY_EU(desc_hdr) (((desc_hdr) >> 16) & 0xf)
+
+#define CF_TALITOS_DEBUG 0
+#if CF_TALITOS_DEBUG
+#define dprintk(args...) printk(args)
+#else
+#define dprintk(...)
+#endif
+
+/* descriptor pointer entry */
+struct talitos_ptr {
+	u32 len;	/* length */
+	u32 ptr;	/* address */
+};
+
+static const struct talitos_ptr zero_entry = {
+	.len = 0,
+	.ptr = 0
+};
+
+/* descriptor */
+struct talitos_desc {
+	u32 hdr;			/* header */
+	struct talitos_ptr ptr[7];	/* ptr/len pair array */
+	u32 next_hdr;
+};
+
+/**
+ * talitos_request - descriptor submission request
+ * @desc: descriptor pointer (kernel virtual)
+ * @dma_desc: descriptor's physical bus address
+ * @callback: whom to call when descriptor processing is done
+ * @context: caller context (optional)
+ */
+struct talitos_request {
+	struct talitos_desc *desc;
+	dma_addr_t dma_desc;
+	void (*callback) (struct device *dev, struct talitos_desc *desc,
+		void *context, int error);
+	void *context;
+};
+
+/* per-channel fifo management */
+struct talitos_channel {
+	/* request fifo */
+	struct talitos_request *fifo;
+
+	/* number of requests pending in channel h/w fifo */
+	atomic_t submit_count ____cacheline_aligned;
+
+	/* request submission (head) lock */
+	spinlock_t head_lock ____cacheline_aligned;
+	/* index to next free descriptor request */
+	int head;
+
+	/* request release (tail) lock */
+	spinlock_t tail_lock ____cacheline_aligned;
+	/* index to next in-progress/done descriptor request */
+	int tail;
+};
+
+struct talitos_private {
+	struct device *dev;
+	struct platform_device *pdev;
+	void __iomem *reg;
+	int irq;
+
+	/* SEC version geometry (from device tree node) */
+	unsigned int num_channels;
+	unsigned int chfifo_len;
+	unsigned int exec_units;
+	unsigned int desc_types;
+
+	/* SEC Compatibility info */
+	unsigned long features;
+
+	/*
+	 * length of the request fifo
+	 * fifo_len is chfifo_len rounded up to next power of 2
+	 * so we can use bitwise ops to wrap
+	 */
+	unsigned int fifo_len;
+
+	struct talitos_channel *chan;
+
+	/* next channel to be assigned next incoming descriptor */
+	atomic_t last_chan ____cacheline_aligned;
+
+	/* request callback tasklet */
+	struct tasklet_struct done_task;
+
+	/* list of registered algorithms */
+	struct list_head alg_list;
+
+	/* hwrng device */
+	struct hwrng rng;
+};
+
+/* .features flag */
+#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
+#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+#define TALITOS_FTR_SHA224_HWINIT 0x00000004
+
+/*
+ * map virtual single (contiguous) pointer to h/w descriptor pointer
+ */
+static void map_single_talitos_ptr(struct device *dev,
+				   struct talitos_ptr *talitos_ptr,
+				   unsigned short len, void *data,
+				   unsigned char extent,
+				   enum dma_data_direction dir)
+{
+	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
+
+	talitos_ptr->len = len;
+	talitos_ptr->ptr = dma_addr;
+}
+
+/*
+ * unmap bus single (contiguous) h/w descriptor pointer
+ */
+static void unmap_single_talitos_ptr(struct device *dev,
+				     struct talitos_ptr *talitos_ptr,
+				     enum dma_data_direction dir)
+{
+	dma_unmap_single(dev, talitos_ptr->ptr, talitos_ptr->len, dir);
+}
+
+static int reset_channel(struct device *dev, int ch)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	unsigned int timeout = TALITOS_TIMEOUT;
+
+	setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
+
+	while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
+		TALITOS_CCCR_RESET) && --timeout)
+		cpu_relax();
+
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset channel %d\n", ch);
+		return -EIO;
+	}
+
+	/* set 36-bit addressing, done writeback enable and done IRQ enable */
+	setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_NE |
+			TALITOS_CCCR_NT | TALITOS_CCCR_CDWE |
+			TALITOS_CCCR_CDIE);
+
+	return 0;
+}
+
+static int reset_device(struct device *dev)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	unsigned int timeout = TALITOS_TIMEOUT;
+
+	setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
+
+	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
+	       && --timeout)
+		cpu_relax();
+
+	if (timeout == 0) {
+		dev_err(dev, "failed to reset device\n");
+		return -EIO;
+	}
+
+	setbits32(priv->reg + TALITOS_DEURCR, TALITOS_DEURCR_RESET);
+	setbits32(priv->reg + TALITOS_AFEURCR, TALITOS_AFEURCR_RESET);
+	setbits32(priv->reg + TALITOS_AESURCR, TALITOS_AESURCR_RESET);
+	setbits32(priv->reg + TALITOS_MDEURCR, TALITOS_MDEURCR_RESET);
+	setbits32(priv->reg + TALITOS_RNGRCR, TALITOS_RNGRCR_SR);
+	return 0;
+}
+
+/*
+ * Reset and initialize the device
+ */
+static int init_device(struct device *dev)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int ch, err;
+
+	/*
+	 * Master reset
+	 * errata documentation: warning: certain SEC interrupts
+	 * are not fully cleared by writing the MCR:SWR bit,
+	 * set bit twice to completely reset
+	 */
+	err = reset_device(dev);
+	if (err)
+		return err;
+
+	err = reset_device(dev);
+	if (err)
+		return err;
+
+	/* reset channels */
+	for (ch = 0; ch < priv->num_channels; ch++) {
+		err = reset_channel(dev, ch);
+		if (err)
+			return err;
+	}
+
+	/* enable channel done and error interrupts */
+	out_be32(priv->reg + TALITOS_IMR, 0);
+	out_be32(priv->reg + TALITOS_IMR_LO, 0);
+
+	out_be32(priv->reg + TALITOS_ICR,
+			TALITOS_ICR_CHERR | TALITOS_ICR_CHDONE);
+	out_be32(priv->reg + TALITOS_ICR_LO,
+			TALITOS_ICR_LO_CHERR | TALITOS_ICR_LO_CHDONE);
+
+	return 0;
+}
+
+/**
+ * talitos_submit - submits a descriptor to the device for processing
+ * @dev:	the SEC device to be used
+ * @desc:	the descriptor to be processed by the device
+ * @callback:	whom to call when processing is complete
+ * @context:	a handle for use by caller (optional)
+ *
+ * desc must contain valid dma-mapped (bus physical) address pointers.
+ * callback must check err and feedback in descriptor header
+ * for device processing status.
+ */
+static int talitos_submit(struct device *dev, struct talitos_desc *desc,
+			  void (*callback)(struct device *dev,
+					   struct talitos_desc *desc,
+					   void *context, int error),
+			  void *context)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	struct talitos_request *request;
+	unsigned long flags, ch;
+	int head;
+
+	/* ignore key parity check in triple DES */
+	if (((desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_DEU) &&
+		(desc->hdr & DESC_HDR_MODE0_DEU_3DES))
+		setbits32(priv->reg + TALITOS_DEUIMR, TALITOS_DEUIMR_KPE_MASK);
+
+	/* select done notification */
+	desc->hdr |= DESC_HDR_DONE;
+
+	/* emulate SEC's round-robin channel fifo polling scheme */
+	ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
+
+	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
+
+	head = priv->chan[ch].head;
+	request = &priv->chan[ch].fifo[head];
+
+	/* map descriptor and save caller data */
+	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
+				DMA_BIDIRECTIONAL);
+	request->callback = callback;
+	request->context = context;
+
+	/* increment fifo head */
+	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
+
+	smp_wmb();
+	request->desc = desc;
+
+	/* GO! */
+	wmb();
+	out_be32(priv->reg + TALITOS_FF(ch), request->dma_desc);
+
+	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
+
+	return -EINPROGRESS;
+}
+
+/*
+ * process what was done, notify callback of error if not
+ */
+static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	struct talitos_request *request, saved_req;
+	unsigned long flags;
+	int tail, status;
+
+	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
+
+	tail = priv->chan[ch].tail;
+	while (priv->chan[ch].fifo[tail].desc) {
+		request = &priv->chan[ch].fifo[tail];
+
+		/* descriptors with their done bits set don't get the error */
+		rmb();
+		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
+			status = 0;
+		else
+			if (!error)
+				break;
+			else
+				status = error;
+
+		dma_unmap_single(dev, request->dma_desc,
+				 sizeof(struct talitos_desc),
+				 DMA_BIDIRECTIONAL);
+
+		/* copy entries so we can call callback outside lock */
+		saved_req.desc = request->desc;
+		saved_req.callback = request->callback;
+		saved_req.context = request->context;
+
+		/* release request entry in fifo */
+		smp_wmb();
+		request->desc = NULL;
+
+		/* increment fifo tail */
+		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
+
+		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
+
+		atomic_dec(&priv->chan[ch].submit_count);
+
+		saved_req.callback(dev, saved_req.desc, saved_req.context,
+				   status);
+		/* channel may resume processing in single desc error case */
+		if (error && !reset_ch && status == error)
+			return;
+		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
+		tail = priv->chan[ch].tail;
+	}
+
+	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
+}
+
+/*
+ * process completed requests for channels that have done status
+ */
+static void talitos_done(unsigned long data)
+{
+	struct device *dev = (struct device *)data;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int ch;
+
+	for (ch = 0; ch < priv->num_channels; ch++)
+		flush_channel(dev, ch, 0, 0);
+
+	/* At this point, all completed channels have been processed.
+	 * Unmask done interrupts for channels completed later on.
+	 */
+	out_be32(priv->reg + TALITOS_IMR, 0);
+	out_be32(priv->reg + TALITOS_IMR_LO, 0);
+
+	out_be32(priv->reg + TALITOS_ICR,
+			TALITOS_ICR_CHERR | TALITOS_ICR_CHDONE);
+	out_be32(priv->reg + TALITOS_ICR_LO,
+			TALITOS_ICR_LO_CHERR | TALITOS_ICR_LO_CHDONE);
+}
+
+/*
+ * locate current (offending) descriptor
+ */
+static struct talitos_desc *current_desc(struct device *dev, int ch)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int tail = priv->chan[ch].tail;
+	dma_addr_t cur_desc;
+
+	cur_desc = in_be32(priv->reg + TALITOS_CDPR(ch));
+
+	while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
+		tail = (tail + 1) & (priv->fifo_len - 1);
+		if (tail == priv->chan[ch].tail) {
+			dev_err(dev, "couldn't locate current descriptor\n");
+			return NULL;
+		}
+	}
+
+	return priv->chan[ch].fifo[tail].desc;
+}
+
+/*
+ * user diagnostics; report root cause of error based on execution unit status
+ */
+static void report_eu_error(struct device *dev, int ch,
+			    struct talitos_desc *desc)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int i;
+
+	switch (desc->hdr & DESC_HDR_SEL0_MASK) {
+	case DESC_HDR_SEL0_AFEU:
+		dev_err(dev, "AFEUISR 0x%08x\n",
+			in_be32(priv->reg + TALITOS_AFEUISR));
+		break;
+	case DESC_HDR_SEL0_DEU:
+		dev_err(dev, "DEUISR 0x%08x\n",
+			in_be32(priv->reg + TALITOS_DEUISR));
+		break;
+	case DESC_HDR_SEL0_MDEU:
+		dev_err(dev, "MDEUISR 0x%08x\n",
+			in_be32(priv->reg + TALITOS_MDEUISR));
+		break;
+	case DESC_HDR_SEL0_RNG:
+		dev_err(dev, "RNGISR 0x%08x\n",
+			in_be32(priv->reg + TALITOS_RNGISR));
+		break;
+	case DESC_HDR_SEL0_AESU:
+		dev_err(dev, "AESUISR 0x%08x\n",
+			in_be32(priv->reg + TALITOS_AESUISR));
+		break;
+	}
+
+	switch (desc->hdr & DESC_HDR_SEL1_MASK) {
+	case DESC_HDR_SEL1_MDEU:
+		dev_err(dev, "MDEUISR 0x%08x\n",
+			in_be32(priv->reg + TALITOS_MDEUISR));
+		break;
+	}
+
+	for (i = 0; i < 8; i++)
+		dev_err(dev, "DESCBUF 0x%08x\n",
+			in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8 * i));
+}
+
+/*
+ * recover from error interrupts
+ */
+static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
+{
+	struct device *dev = (struct device *)data;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int ch, error, reset_ch = 0;
+	u32 v, v_lo;
+
+	for (ch = 0; ch < priv->num_channels; ch++) {
+		/* skip channels without errors */
+		if (!((isr >> 29) & (1 << (ch * 2))))
+			continue;
+
+		error = -EINVAL;
+
+		v = in_be32(priv->reg + TALITOS_CCPSR(ch));
+		v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
+
+		if (v_lo & TALITOS_CCPSR_LO_TEA)
+			dev_err(dev, "master data transfer error\n");
+		if (v_lo & TALITOS_CCPSR_LO_PERR)
+			dev_err(dev, "fetch pointer not complete error\n");
+		if (v_lo & TALITOS_CCPSR_LO_DERR)
+			dev_err(dev, "illegal descriptor header error\n");
+		if (v_lo & TALITOS_CCPSR_LO_SERR)
+			dev_err(dev, "static assignment error\n");
+		if (v_lo & TALITOS_CCPSR_LO_EUERR)
+			report_eu_error(dev, ch, current_desc(dev, ch));
+
+		flush_channel(dev, ch, error, reset_ch);
+
+		if (reset_ch)
+			reset_channel(dev, ch);
+	}
+
+	/* purge request queues */
+	for (ch = 0; ch < priv->num_channels; ch++)
+		flush_channel(dev, ch, -EIO, 1);
+
+	/* reset and reinitialize the device */
+	init_device(dev);
+}
+
+static irqreturn_t talitos_interrupt(int irq, void *data)
+{
+	struct device *dev = data;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	u32 isr, isr_lo;
+
+	isr = in_be32(priv->reg + TALITOS_ISR);
+	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
+	/* Acknowledge interrupt */
+	out_be32(priv->reg + TALITOS_ICR, isr);
+	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
+
+	if (unlikely(isr & ~TALITOS_ISR_CHDONE)) {
+		talitos_error((unsigned long)data, isr, isr_lo);
+	} else if (likely(isr & TALITOS_ISR_CHDONE)) {
+		/* mask further done interrupts. */
+		setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
+		/* done_task will unmask done interrupts at exit */
+		tasklet_schedule(&priv->done_task);
+	}
+
+	return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+
+/*
+ * crypto alg
+ */
+#define TALITOS_CRA_PRIORITY		3000
+#define TALITOS_MAX_KEY_SIZE		64
+#define TALITOS_MAX_IV_LENGTH		16
+#define TALITOS_MAX_OUTPUTDATA_SIZE	64
+#define TALITOS_MAX_INPUTDATA_SIZE	64
+
+#define ARC4_MIN_KEY_SIZE		4
+#define ARC4_MAX_KEY_SIZE		16
+#define ARC4_BLOCK_SIZE			64
+#define MD5_BLOCK_SIZE			64
+
+struct talitos_ctx {
+	struct device *dev;
+	__be32 desc_hdr_template;
+	u8 key[TALITOS_MAX_KEY_SIZE];
+	u8 iv[TALITOS_MAX_IV_LENGTH];
+	unsigned int keylen;
+	unsigned int enckeylen;
+	unsigned int authkeylen;
+	unsigned int authsize;
+};
+
+#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
+#define TALITOS_MDEU_MAX_CONTEXT_SIZE		\
+	TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+
+struct talitos_ahash_req_ctx {
+	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
+	unsigned int hw_context_size;
+	u8 buf[HASH_MAX_BLOCK_SIZE];
+	u8 bufnext[HASH_MAX_BLOCK_SIZE];
+	unsigned int swinit;
+	unsigned int first;
+	unsigned int last;
+	unsigned int to_hash_later;
+	u64 nbuf;
+	struct scatterlist bufsl[2];
+	struct scatterlist *psrc;
+};
+
+/*
+ * talitos_edesc - s/w-extended descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @desc: h/w descriptor
+ *
+ * if decrypting (with authcheck), or either one of src_nents or dst_nents
+ * is greater than 1, an integrity check value is concatenated to the end
+ * of link_tbl data
+ */
+struct talitos_edesc {
+	int src_nents;
+	int dst_nents;
+	int src_is_chained;
+	int dst_is_chained;
+	struct talitos_desc desc;
+	u8 src_buf[TALITOS_MAX_INPUTDATA_SIZE];
+	u8 dst_buf[TALITOS_MAX_OUTPUTDATA_SIZE];
+};
+
+static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
+			  unsigned int nents, enum dma_data_direction dir,
+			  int chained)
+{
+	if (unlikely(chained))
+		while (sg) {
+			dma_map_sg(dev, sg, 1, dir);
+			sg = scatterwalk_sg_next(sg);
+		}
+	else
+		dma_map_sg(dev, sg, nents, dir);
+	return nents;
+}
+
+static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
+				   enum dma_data_direction dir)
+{
+	while (sg) {
+		dma_unmap_sg(dev, sg, 1, dir);
+		sg = scatterwalk_sg_next(sg);
+	}
+}
+
+static void talitos_sg_unmap(struct device *dev,
+			     struct talitos_edesc *edesc,
+			     struct scatterlist *src,
+			     struct scatterlist *dst)
+{
+	unsigned int src_nents = edesc->src_nents ? : 1;
+	unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+	if (src != dst) {
+		if (edesc->src_is_chained)
+			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
+		else
+			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+
+		if (dst) {
+			if (edesc->dst_is_chained)
+				talitos_unmap_sg_chain(dev, dst,
+						       DMA_FROM_DEVICE);
+			else
+				dma_unmap_sg(dev, dst, dst_nents,
+					     DMA_FROM_DEVICE);
+		}
+	} else
+		if (edesc->src_is_chained)
+			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
+		else
+			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+}
+
+/*
+ * derive number of elements in scatterlist
+ */
+static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
+{
+	struct scatterlist *sg = sg_list;
+	int sg_nents = 0;
+
+	*chained = 0;
+	while (nbytes > 0) {
+		sg_nents++;
+		nbytes -= sg->length;
+		if (!sg_is_last(sg) && (sg + 1)->length == 0)
+			*chained = 1;
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	return sg_nents;
+}
+
+/**
+ * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
+ * @sgl:		 The SG list
+ * @nents:		 Number of SG entries
+ * @buf:		 Where to copy to
+ * @buflen:		 The number of bytes to copy
+ * @skip:		 The number of bytes to skip before copying.
+ *                       Note: skip + buflen should equal SG total size.
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
+				    void *buf, size_t buflen, unsigned int skip)
+{
+	unsigned int offset = 0;
+	unsigned int boffset = 0;
+	struct sg_mapping_iter miter;
+	unsigned long flags;
+	unsigned int sg_flags = SG_MITER_ATOMIC;
+	size_t total_buffer = buflen + skip;
+
+	sg_flags |= SG_MITER_FROM_SG;
+
+	sg_miter_start(&miter, sgl, nents, sg_flags);
+
+	local_irq_save(flags);
+
+	while (sg_miter_next(&miter) && offset < total_buffer) {
+		unsigned int len;
+		unsigned int ignore;
+
+		if ((offset + miter.length) > skip) {
+			if (offset < skip) {
+				/* Copy part of this segment */
+				ignore = skip - offset;
+				len = miter.length - ignore;
+				if (boffset + len > buflen)
+					len = buflen - boffset;
+				memcpy(buf + boffset, miter.addr + ignore, len);
+			} else {
+				/* Copy all of this segment (up to buflen) */
+				len = miter.length;
+				if (boffset + len > buflen)
+					len = buflen - boffset;
+				memcpy(buf + boffset, miter.addr, len);
+			}
+			boffset += len;
+		}
+		offset += miter.length;
+	}
+
+	sg_miter_stop(&miter);
+
+	local_irq_restore(flags);
+	return boffset;
+}
+
+/*
+ * allocate and map the extended descriptor
+ */
+static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+						 struct scatterlist *src,
+						 struct scatterlist *dst,
+						 int hash_result,
+						 unsigned int cryptlen,
+						 unsigned int authsize,
+						 int icv_stashing,
+						 u32 cryptoflags)
+{
+	struct talitos_edesc *edesc;
+	int src_nents, dst_nents, alloc_len;
+	int src_chained, dst_chained = 0;
+	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		      GFP_ATOMIC;
+
+	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
+		dev_err(dev, "length exceeds h/w max limit\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+	src_nents = (src_nents == 1) ? 0 : src_nents;
+
+	if (hash_result) {
+		dst_nents = 0;
+	} else {
+		if (dst == src) {
+			dst_nents = src_nents;
+		} else {
+			dst_nents = sg_count(dst, cryptlen + authsize,
+					     &dst_chained);
+			dst_nents = (dst_nents == 1) ? 0 : dst_nents;
+		}
+	}
+
+	/*
+	 * allocate space for base edesc plus the link tables,
+	 * allowing for two separate entries for ICV and generated ICV (+ 2),
+	 * and the ICV data itself
+	 */
+	alloc_len = sizeof(struct talitos_edesc);
+
+	edesc = kmalloc(alloc_len, GFP_KERNEL | flags);
+	if (!edesc) {
+		dev_err(dev, "could not allocate edescriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->dst_nents = dst_nents;
+	edesc->src_is_chained = src_chained;
+	edesc->dst_is_chained = dst_chained;
+	return edesc;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
+
+	if (keylen > TALITOS_MAX_KEY_SIZE)
+		goto badkey;
+
+	if (keylen < alg->min_keysize || keylen > alg->max_keysize)
+		goto badkey;
+
+	memcpy(&ctx->key, key, keylen);
+	ctx->keylen = keylen;
+
+	return 0;
+
+badkey:
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static void common_nonsnoop_unmap(struct device *dev,
+				  struct talitos_edesc *edesc,
+				  struct ablkcipher_request *areq)
+{
+	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
+	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
+
+	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
+}
+
+static void ablkcipher_done(struct device *dev,
+			    struct talitos_desc *desc, void *context,
+			    int err)
+{
+	struct ablkcipher_request *areq = context;
+	struct talitos_edesc *edesc;
+
+	edesc = container_of(desc, struct talitos_edesc, desc);
+
+	if (edesc->dst_nents != 0)
+		sg_copy_from_buffer(areq->dst, edesc->dst_nents,
+			edesc->dst_buf, areq->nbytes);
+
+	common_nonsnoop_unmap(dev, edesc, areq);
+
+	kfree(edesc);
+
+	areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop(struct talitos_edesc *edesc,
+			   struct ablkcipher_request *areq,
+			   u8 *giv,
+			   void (*callback) (struct device *dev,
+					     struct talitos_desc *desc,
+					     void *context, int error))
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct device *dev = ctx->dev;
+	struct talitos_desc *desc = &edesc->desc;
+	unsigned int cryptlen = areq->nbytes;
+	unsigned int ivsize;
+	int sg_count, ret;
+
+	desc->next_hdr = 0;
+
+	/* first DWORD empty */
+	desc->ptr[0] = zero_entry;
+
+	/* cipher iv */
+	ivsize = crypto_ablkcipher_ivsize(cipher);
+	map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
+			       DMA_TO_DEVICE);
+
+	/* AFEU using a key */
+	if (((desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AFEU) &&
+		((desc->hdr & DESC_HDR_MODE0_MASK) ==
+		 DESC_HDR_MODE0_AFEU_USE_KEY))
+		desc->ptr[1] = zero_entry;
+
+	/* cipher key */
+	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+			       (char *)&ctx->key, 0, DMA_TO_DEVICE);
+
+	/* AFEU using context */
+	if (((desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AFEU) &&
+		((desc->hdr & DESC_HDR_MODE0_MASK) ==
+		 DESC_HDR_MODE0_AFEU_USE_CONTEXT))
+		desc->ptr[2] = zero_entry;
+
+	/*
+	 * cipher in
+	 */
+	desc->ptr[3].len = cpu_to_be16(cryptlen);
+
+	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
+				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+							   : DMA_TO_DEVICE,
+				  edesc->src_is_chained);
+
+	if (sg_count == 1)
+		desc->ptr[3].ptr = sg_dma_address(areq->src);
+	else {
+		sg_copy_to_buffer(areq->src, sg_count, edesc->src_buf,
+				desc->ptr[3].len);
+		desc->ptr[3].ptr = (u32)edesc->src_buf;
+	}
+
+	/* cipher out */
+	desc->ptr[4].len = cpu_to_be16(cryptlen);
+
+	if (areq->src != areq->dst)
+		sg_count = talitos_map_sg(dev, areq->dst,
+					  edesc->dst_nents ? : 1,
+					  DMA_FROM_DEVICE,
+					  edesc->dst_is_chained);
+
+	if (sg_count == 1)
+		desc->ptr[4].ptr = sg_dma_address(areq->dst);
+	else
+		desc->ptr[4].ptr = (u32)edesc->dst_buf;
+
+	/* iv out */
+	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
+			       DMA_FROM_DEVICE);
+
+	/* last DWORD empty */
+	desc->ptr[6] = zero_entry;
+
+	ret = talitos_submit(dev, desc, callback, areq);
+	if (ret != -EINPROGRESS) {
+		common_nonsnoop_unmap(dev, edesc, areq);
+		kfree(edesc);
+	}
+	return ret;
+}
+
+static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
+						    areq)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
+				   areq->nbytes, 0, 0, areq->base.flags);
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct talitos_edesc *edesc;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(areq);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* set encrypt except AFEU */
+	if ((ctx->desc_hdr_template & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AFEU)
+		edesc->desc.hdr = ctx->desc_hdr_template;
+	else
+		edesc->desc.hdr = ctx->desc_hdr_template |
+			DESC_HDR_MODE0_ENCRYP;
+
+	return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+	struct talitos_edesc *edesc;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(areq);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+
+	return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+}
+
+static void common_nonsnoop_hash_unmap(struct device *dev,
+				       struct talitos_edesc *edesc,
+				       struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+
+	/* When using hashctx-in, must unmap it. */
+	if (edesc->desc.ptr[1].len)
+		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
+					 DMA_TO_DEVICE);
+
+	if (edesc->desc.ptr[2].len)
+		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
+					 DMA_TO_DEVICE);
+
+	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
+}
+
+static void ahash_done(struct device *dev,
+		       struct talitos_desc *desc, void *context,
+		       int err)
+{
+	struct ahash_request *areq = context;
+	struct talitos_edesc *edesc =
+		 container_of(desc, struct talitos_edesc, desc);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	if (!req_ctx->last && req_ctx->to_hash_later) {
+		/* Position any partial block for next update/final/finup */
+		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+		req_ctx->nbuf = req_ctx->to_hash_later;
+	}
+	common_nonsnoop_hash_unmap(dev, edesc, areq);
+
+	kfree(edesc);
+
+	areq->base.complete(&areq->base, err);
+}
+
+static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+				struct ahash_request *areq, unsigned int length,
+				void (*callback) (struct device *dev,
+						  struct talitos_desc *desc,
+						  void *context, int error))
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct device *dev = ctx->dev;
+	struct talitos_desc *desc = &edesc->desc;
+	int sg_count, ret;
+
+	desc->next_hdr = 0;
+
+	/* first DWORD empty */
+	desc->ptr[0] = zero_entry;
+
+	/* hash context in */
+	if (!req_ctx->first || req_ctx->swinit) {
+		map_single_talitos_ptr(dev, &desc->ptr[1],
+				       req_ctx->hw_context_size,
+				       (char *)req_ctx->hw_context, 0,
+				       DMA_TO_DEVICE);
+		req_ctx->swinit = 0;
+	} else {
+		desc->ptr[1] = zero_entry;
+		/* Indicate next op is not the first. */
+		req_ctx->first = 0;
+	}
+
+	/* HMAC key */
+	if (ctx->keylen)
+		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
+				       (char *)&ctx->key, 0, DMA_TO_DEVICE);
+	else
+		desc->ptr[2] = zero_entry;
+
+	/*
+	 * data in
+	 */
+	desc->ptr[3].len = length;
+	sg_count = talitos_map_sg(dev, req_ctx->psrc,
+				  edesc->src_nents ? : 1,
+				  DMA_TO_DEVICE,
+				  edesc->src_is_chained);
+
+	if (sg_count == 1)
+		desc->ptr[3].ptr = sg_dma_address(req_ctx->psrc);
+	else {
+		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->src_buf,
+				desc->ptr[3].len);
+		desc->ptr[3].ptr = (u32)edesc->src_buf;
+	}
+
+	/* fifth DWORD empty */
+	desc->ptr[4] = zero_entry;
+
+	/* hash/HMAC out -or- hash context out */
+	if (req_ctx->last)
+		map_single_talitos_ptr(dev, &desc->ptr[5],
+				crypto_ahash_digestsize(tfm),
+				areq->result, 0, DMA_FROM_DEVICE);
+	else
+		map_single_talitos_ptr(dev, &desc->ptr[5],
+				req_ctx->hw_context_size,
+				req_ctx->hw_context, 0, DMA_FROM_DEVICE);
+
+	/* last DWORD empty */
+	desc->ptr[6] = zero_entry;
+
+	ret = talitos_submit(dev, desc, callback, areq);
+	if (ret != -EINPROGRESS) {
+		common_nonsnoop_hash_unmap(dev, edesc, areq);
+		kfree(edesc);
+	}
+	return ret;
+}
+
+static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
+					       unsigned int nbytes)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
+				   nbytes, 0, 0, areq->base.flags);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	/* Initialize the context */
+	req_ctx->nbuf = 0;
+	req_ctx->first = 1; /* first indicates h/w must init its context */
+	req_ctx->swinit = 0; /* assume h/w init of context */
+	req_ctx->hw_context_size =
+		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+	return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	ahash_init(areq);
+	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+	req_ctx->hw_context[0] = SHA224_H0;
+	req_ctx->hw_context[1] = SHA224_H1;
+	req_ctx->hw_context[2] = SHA224_H2;
+	req_ctx->hw_context[3] = SHA224_H3;
+	req_ctx->hw_context[4] = SHA224_H4;
+	req_ctx->hw_context[5] = SHA224_H5;
+	req_ctx->hw_context[6] = SHA224_H6;
+	req_ctx->hw_context[7] = SHA224_H7;
+
+	/* init 64-bit count */
+	req_ctx->hw_context[8] = 0;
+	req_ctx->hw_context[9] = 0;
+
+	return 0;
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct talitos_edesc *edesc;
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	unsigned int nbytes_to_hash;
+	unsigned int to_hash_later;
+	unsigned int nsg;
+	int chained;
+
+	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
+		/* Buffer up to one whole block */
+		sg_copy_to_buffer(areq->src,
+				  sg_count(areq->src, nbytes, &chained),
+				  req_ctx->buf + req_ctx->nbuf, nbytes);
+		req_ctx->nbuf += nbytes;
+		return 0;
+	}
+
+	/* At least (blocksize + 1) bytes are available to hash */
+	nbytes_to_hash = nbytes + req_ctx->nbuf;
+	to_hash_later = nbytes_to_hash & (blocksize - 1);
+
+	if (req_ctx->last)
+		to_hash_later = 0;
+	else if (to_hash_later)
+		/* There is a partial block. Hash the full block(s) now */
+		nbytes_to_hash -= to_hash_later;
+	else {
+		/* Keep one block buffered */
+		nbytes_to_hash -= blocksize;
+		to_hash_later = blocksize;
+	}
+
+	/* Chain in any previously buffered data */
+	if (req_ctx->nbuf) {
+		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
+		sg_init_table(req_ctx->bufsl, nsg);
+		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
+		if (nsg > 1)
+			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
+		req_ctx->psrc = req_ctx->bufsl;
+	} else
+		req_ctx->psrc = areq->src;
+
+	if (to_hash_later) {
+		int nents = sg_count(areq->src, nbytes, &chained);
+		sg_copy_end_to_buffer(areq->src, nents,
+				      req_ctx->bufnext,
+				      to_hash_later,
+				      nbytes - to_hash_later);
+	}
+	req_ctx->to_hash_later = to_hash_later;
+
+	/* Allocate extended descriptor */
+	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	edesc->desc.hdr = ctx->desc_hdr_template;
+
+	/* On last one, request SEC to pad; otherwise continue */
+	if (req_ctx->last)
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
+	else
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
+
+	/* request SEC to INIT hash. */
+	if (req_ctx->first && !req_ctx->swinit)
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+
+	/* When the tfm context has a keylen, it's an HMAC.
+	 * A first or last (ie. not middle) descriptor must request HMAC.
+	 */
+	if (ctx->keylen && (req_ctx->first || req_ctx->last))
+		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+				    ahash_done);
+}
+
+static int ahash_update(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 0;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_final(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, 0);
+}
+
+static int ahash_finup(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
+static int ahash_digest(struct ahash_request *areq)
+{
+	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+	ahash->init(areq);
+	req_ctx->last = 1;
+
+	return ahash_process_req(areq, areq->nbytes);
+}
+
+struct talitos_alg_template {
+	u32 type;
+	union {
+		struct crypto_alg crypto;
+		struct ahash_alg hash;
+	} alg;
+	__be32 desc_hdr_template;
+};
+
+static struct talitos_alg_template driver_algs[] = {
+	/* ABLKCIPHER algorithms. */
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "ecb(arc4)",
+			.cra_driver_name = "ecb-arc4-talitos",
+			.cra_blocksize = ARC4_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_ablkcipher = {
+				.setkey = ablkcipher_setkey,
+				.encrypt = ablkcipher_encrypt,
+				.decrypt = ablkcipher_decrypt,
+				.geniv = "eseqiv",
+				.min_keysize = ARC4_MIN_KEY_SIZE,
+				.max_keysize = ARC4_MAX_KEY_SIZE,
+				.ivsize = ARC4_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_AFEU |
+				     DESC_HDR_SEL0_AFEU |
+				     DESC_HDR_MODE0_AFEU_USE_KEY,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "cbc(aes)",
+			.cra_driver_name = "cbc-aes-talitos",
+			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_ablkcipher = {
+				.setkey = ablkcipher_setkey,
+				.encrypt = ablkcipher_encrypt,
+				.decrypt = ablkcipher_decrypt,
+				.geniv = "eseqiv",
+				.min_keysize = AES_MIN_KEY_SIZE,
+				.max_keysize = AES_MAX_KEY_SIZE,
+				.ivsize = AES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_AESU |
+				     DESC_HDR_MODE0_AESU_CBC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "cbc(des)",
+			.cra_driver_name = "cbc-des-talitos",
+			.cra_blocksize = DES_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_ablkcipher = {
+				.setkey = ablkcipher_setkey,
+				.encrypt = ablkcipher_encrypt,
+				.decrypt = ablkcipher_decrypt,
+				.geniv = "eseqiv",
+				.min_keysize = DES_KEY_SIZE,
+				.max_keysize = DES_KEY_SIZE,
+				.ivsize = DES_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+					DESC_HDR_SEL0_DEU |
+					DESC_HDR_MODE0_DEU_CBC,
+	},
+	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.alg.crypto = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "cbc-3des-talitos",
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_ASYNC,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_ablkcipher = {
+				.setkey = ablkcipher_setkey,
+				.encrypt = ablkcipher_encrypt,
+				.decrypt = ablkcipher_decrypt,
+				.geniv = "eseqiv",
+				.min_keysize = DES3_EDE_KEY_SIZE,
+				.max_keysize = DES3_EDE_KEY_SIZE,
+				.ivsize = DES3_EDE_BLOCK_SIZE,
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+					DESC_HDR_SEL0_DEU |
+					DESC_HDR_MODE0_DEU_CBC |
+					DESC_HDR_MODE0_DEU_3DES,
+	},
+	/* AHASH algorithms. */
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.halg.digestsize = MD5_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "md5-talitos",
+				.cra_blocksize = MD5_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEU |
+				     DESC_HDR_MODE0_MDEU_MD5,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "sha1-talitos",
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEU |
+				     DESC_HDR_MODE0_MDEU_SHA1,
+	},
+};
+
+struct talitos_crypto_alg {
+	struct list_head entry;
+	struct device *dev;
+	struct talitos_alg_template algt;
+};
+
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct talitos_crypto_alg *talitos_alg;
+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+		talitos_alg = container_of(__crypto_ahash_alg(alg),
+					   struct talitos_crypto_alg,
+					   algt.alg.hash);
+	else
+		talitos_alg = container_of(alg, struct talitos_crypto_alg,
+					   algt.alg.crypto);
+
+	/* update context with ptr to dev */
+	ctx->dev = talitos_alg->dev;
+
+	/* copy descriptor header template value */
+	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+
+	return 0;
+}
+
+static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	talitos_cra_init(tfm);
+
+	ctx->keylen = 0;
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct talitos_ahash_req_ctx));
+
+	return 0;
+}
+
+/*
+ * given the alg's descriptor header template, determine whether descriptor
+ * type and primary/secondary execution units required match the hw
+ * capabilities description provided in the device tree node.
+ */
+static int hw_supports(struct device *dev, __be32 desc_hdr_template)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int ret;
+
+	ret = (DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
+	      (PRIMARY_EU(desc_hdr_template) & priv->exec_units);
+
+	if (SECONDARY_EU(desc_hdr_template))
+		ret = ret && (SECONDARY_EU(desc_hdr_template)
+				& priv->exec_units);
+
+	return ret;
+}
+
+static int talitos_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	struct talitos_crypto_alg *t_alg, *n;
+	int i;
+
+	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+		switch (t_alg->algt.type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		case CRYPTO_ALG_TYPE_AEAD:
+			crypto_unregister_alg(&t_alg->algt.alg.crypto);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&t_alg->algt.alg.hash);
+			break;
+		}
+		list_del(&t_alg->entry);
+		kfree(t_alg);
+	}
+
+	for (i = 0; i < priv->num_channels; i++)
+		kfree(priv->chan[i].fifo);
+
+	kfree(priv->chan);
+
+	if (priv->irq != 0)
+		free_irq(priv->irq, dev);
+
+	tasklet_kill(&priv->done_task);
+
+	iounmap(priv->reg);
+
+	dev_set_drvdata(dev, NULL);
+
+	kfree(priv);
+
+	return 0;
+}
+
+static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+						    struct talitos_alg_template
+							*template)
+{
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	struct talitos_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	t_alg->algt = *template;
+
+	switch (t_alg->algt.type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		alg = &t_alg->algt.alg.crypto;
+		alg->cra_init = talitos_cra_init;
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		alg = &t_alg->algt.alg.hash.halg.base;
+		alg->cra_init = talitos_cra_init_ahash;
+		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+		    !strcmp(alg->cra_name, "sha224")) {
+			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
+			t_alg->algt.desc_hdr_template =
+					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+					DESC_HDR_SEL0_MDEU |
+					DESC_HDR_MODE0_MDEU_SHA256;
+		}
+		break;
+	default:
+		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	alg->cra_module = THIS_MODULE;
+	alg->cra_priority = TALITOS_CRA_PRIORITY;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct talitos_ctx);
+
+	t_alg->dev = dev;
+
+	return t_alg;
+}
+
+static int __devinit talitos_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct talitos_private *priv;
+	int prop;
+	struct resource *r;
+	int i, err;
+
+	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, priv);
+
+	priv->pdev = pdev;
+
+	tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
+
+	INIT_LIST_HEAD(&priv->alg_list);
+
+	priv->irq = 64 + ISC_SEC;
+	/* get the irq line */
+	err = request_irq(priv->irq, talitos_interrupt, IRQF_DISABLED,
+			dev_driver_string(dev), dev);
+	if (err) {
+		dev_err(dev, "failed to request irq %d\n", priv->irq);
+		goto err_out;
+	} else
+		MCF_ICR(ISC_SEC) = ILP_SEC;
+
+
+	/* get a pointer to the register memory */
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->reg = ioremap(r->start, (r->end - r->start));
+	if (!priv->reg)
+		dev_err(dev, "failed to ioremap\n");
+
+	/* get SEC version capabilities from device tree */
+	prop = in_be32(priv->reg + TALITOS_ID);
+	if (prop & TALITOS_ID_SEC_1_1) {
+		priv->num_channels = TALITOS_NCHANNELS_SEC_1_1;
+		priv->chfifo_len = TALITOS_CHFIFOLEN_SEC_1_1;
+		priv->exec_units = TALITOS_HAS_EUS_SEC_1_1;
+		priv->desc_types = TALITOS_HAS_DESCTYPES_SEC_1_1;
+	} else {
+		dev_err(dev, "failed to id device\n");
+		goto err_out;
+	}
+
+	priv->chan = kzalloc(sizeof(struct talitos_channel) *
+			     priv->num_channels, GFP_KERNEL);
+	if (!priv->chan) {
+		dev_err(dev, "failed to allocate channel management space\n");
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	for (i = 0; i < priv->num_channels; i++) {
+		spin_lock_init(&priv->chan[i].head_lock);
+		spin_lock_init(&priv->chan[i].tail_lock);
+	}
+
+	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
+
+	for (i = 0; i < priv->num_channels; i++) {
+		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
+					     priv->fifo_len, GFP_KERNEL);
+		if (!priv->chan[i].fifo) {
+			dev_err(dev, "failed to allocate request fifo %d\n", i);
+			err = -ENOMEM;
+			goto err_out;
+		}
+	}
+
+	for (i = 0; i < priv->num_channels; i++)
+		atomic_set(&priv->chan[i].submit_count,
+			-(priv->chfifo_len - 1));
+
+	dma_set_mask(dev, DMA_BIT_MASK(36));
+
+	/* reset and initialize the h/w */
+	err = init_device(dev);
+	if (err) {
+		dev_err(dev, "failed to initialize device\n");
+		goto err_out;
+	}
+
+	/* register crypto algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
+			struct talitos_crypto_alg *t_alg;
+			char *name = NULL;
+
+			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
+			if (IS_ERR(t_alg)) {
+				err = PTR_ERR(t_alg);
+				goto err_out;
+			}
+
+			switch (t_alg->algt.type) {
+			case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			case CRYPTO_ALG_TYPE_AEAD:
+				err = crypto_register_alg(
+						&t_alg->algt.alg.crypto);
+				name = t_alg->algt.alg.crypto.cra_driver_name;
+				break;
+			case CRYPTO_ALG_TYPE_AHASH:
+				err = crypto_register_ahash(
+						&t_alg->algt.alg.hash);
+				name =
+				t_alg->algt.alg.hash.halg.base.cra_driver_name;
+				break;
+			}
+			if (err) {
+				dev_err(dev, "%s alg registration failed\n",
+					name);
+				kfree(t_alg);
+			} else {
+				list_add_tail(&t_alg->entry, &priv->alg_list);
+				dev_info(dev, "%s\n", name);
+			}
+		}
+	}
+
+	return 0;
+
+err_out:
+	talitos_remove(pdev);
+
+	return err;
+}
+
+static struct platform_driver talitos_driver = {
+	.driver = {
+		.name = "talitos",
+		.owner = THIS_MODULE,
+	},
+	.probe = talitos_probe,
+	.remove = talitos_remove,
+};
+
+static int __init talitos_init(void)
+{
+	return platform_driver_register(&talitos_driver);
+}
+module_init(talitos_init);
+
+static void __exit talitos_exit(void)
+{
+	platform_driver_unregister(&talitos_driver);
+}
+module_exit(talitos_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
+MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
--- /dev/null
+++ b/drivers/crypto/cf_talitos.h
@@ -0,0 +1,229 @@
+/*
+ * Freescale Coldfire SEC (talitos) device dependent data structures
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* device ID register values */
+#define TALITOS_ID_SEC_1_1	(0x09000000)  /* MCF547x and MCF548x */
+
+/*
+ * following num_channels, channel-fifo-depth, exec-unit-mask, and
+ * descriptor-types-mask are for forward-compatibility with openfirmware
+ * flat device trees
+ */
+
+/*
+ *  num_channels : the number of channels available in each SEC version.
+ */
+
+/* n.b. this driver requires these values be a power of 2 */
+#define TALITOS_NCHANNELS_SEC_1_1       2
+
+/*
+ *  channel-fifo-depth : The number of descriptor
+ *  pointers a channel fetch fifo can hold.
+ */
+#define TALITOS_CHFIFOLEN_SEC_1_1       1
+
+/* the corresponding masks for each SEC version */
+#define TALITOS_HAS_EUS_SEC_1_1		0x7
+
+/* the corresponding masks for each SEC version */
+#define TALITOS_HAS_DESCTYPES_SEC_1_1   0xf
+
+/*
+ * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
+ */
+/* global register offset addresses */
+/* EU Assaginment controller register is useless*/
+#define TALITOS_EUACR           0x1000
+#define TALITOS_EUACR_LO        0x1004
+
+#define TALITOS_IMR		0x1008		/* interrupt mask register */
+#define TALITOS_IMR_ALL		0xf8000000	/* enable all interrupts mask */
+#define TALITOS_IMR_ERR		0xa8000000	/* mask error interrupts */
+#define TALITOS_IMR_DONE	0x50000000      /* mask done interrupts */
+#define TALITOS_IMR_LO		0x100C		/* interrupt mask register */
+/* mask all channel interrupts mask */
+#define TALITOS_IMR_LO_ALL	0x03333340
+#define TALITOS_IMR_LO_ERR	0x02222240	/* mask error interrupts */
+#define TALITOS_IMR_LO_DONE	0x01111100      /* mask done interrupts */
+
+#define TALITOS_ISR		0x1010		/* interrupt status register */
+#define TALITOS_ISR_CHERR	0xa8000000	/* errors mask */
+#define TALITOS_ISR_CHDONE	0x50000000	/* channel(s) done mask */
+#define TALITOS_ISR_LO		0x1014		/* interrupt status register */
+
+#define TALITOS_ICR		0x1018		/* interrupt clear register */
+#define TALITOS_ICR_CHERR	0xa8000000	/* errors enable */
+#define TALITOS_ICR_CHDONE	0x50000000	/* channel(s) done enable */
+#define TALITOS_ICR_LO		0x101C		/* interrupt clear register */
+#define TALITOS_ICR_LO_CHERR	0x02222240	/* errors enable */
+#define TALITOS_ICR_LO_CHDONE	0x01111100	/* channel(s) done enable */
+
+#define TALITOS_ID              0x1020
+
+/* EU Assaginment status register is useless*/
+#define TALITOS_EUASR           0x1028
+#define TALITOS_EUASR_LO        0x102C
+
+#define TALITOS_MCR             0x1030          /* master control register */
+#define TALITOS_MCR_SWR         0x01000000
+
+#define TALITOS_MEAR		0x1038
+
+/* channel register address stride */
+#define TALITOS_CH_STRIDE	0x1000
+
+/* channel register offset addresses and bits */
+#define TALITOS_CCCR(ch)	(ch * TALITOS_CH_STRIDE + 0x200c)
+#define TALITOS_CCCR_RESET   0x1     /* Channel Reset bit */
+#define TALITOS_CCCR_CDWE    0x10    /* Channel done writeback enable bit */
+#define TALITOS_CCCR_NE      0x8     /* Fetch Next Descriptor Enable bit */
+#define TALITOS_CCCR_NT      0x4     /* Notification type bit */
+#define TALITOS_CCCR_CDIE    0x2     /* Channel Done Interrupt Enable bit */
+
+/* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CCPSR(ch)	(ch * TALITOS_CH_STRIDE + 0x2010)
+#define TALITOS_CCPSR_LO(ch)	(ch * TALITOS_CH_STRIDE + 0x2014)
+#define TALITOS_CCPSR_LO_TEA	0x2000	/* Transfer error acknowledge */
+#define TALITOS_CCPSR_LO_PERR	0x1000	/* Pointer not complete error */
+#define TALITOS_CCPSR_LO_DERR	0x400	/* Descriptor error */
+#define TALITOS_CCPSR_LO_SERR	0x200	/* Static assignment error */
+#define TALITOS_CCPSR_LO_EUERR	0x100	/* EU error */
+
+/* channel fetch fifo register */
+#define TALITOS_FF(ch)		(ch * TALITOS_CH_STRIDE + 0x204c)
+
+/* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CDPR(ch)	(ch * TALITOS_CH_STRIDE + 0x2044)
+
+/* Descriptor Buffer (debug) 0x2080-0x20BF*/
+#define TALITOS_DESCBUF(ch)	(ch * TALITOS_CH_STRIDE + 0x2080)
+
+/* execution unit register offset addresses and bits */
+#define TALITOS_DEURCR          0xa018  /* DEU reset control register */
+#define TALITOS_DEURCR_RESET    0x01000000  /* DEU reset bit */
+#define TALITOS_DEUSR		0xa028	/* DEU status register */
+#define TALITOS_DEUSR_RESET     0x01000000  /* DEU Reset status bit */
+#define TALITOS_DEUISR		0xa030	/* DEU interrupt status register */
+#define TALITOS_DEUIMR		0xa038	/* DEU interrupt mask register */
+#define TALITOS_DEUIMR_MASK     0xf63f0000  /* DEU interrupt control mask*/
+#define TALITOS_DEUIMR_KPE_MASK	0x00200000  /* DEU interrupt KPE mask*/
+
+#define TALITOS_AESURCR         0x12018  /* AESU reset control register */
+#define TALITOS_AESURCR_RESET   0x01000000  /* AESU reset bit */
+#define TALITOS_AESUSR          0x12028  /* AESU status register */
+#define TALITOS_AESUSR_RESET    0x01000000  /* AESU Reset status bit */
+#define TALITOS_AESUISR         0x12030  /* AESU interrupt status register */
+#define TALITOS_AESUIMR         0x12038  /* AESU interrupt mask register */
+#define TALITOS_AESUIMR_MASK    0xf61f0000  /* AESU interrupt control mask*/
+
+#define TALITOS_MDEURCR         0xc018  /* MDEU reset control register */
+#define TALITOS_MDEURCR_RESET   0x01000000  /* MDEU reset bit */
+#define TALITOS_MDEUSR          0xc028  /* MDEU status register */
+#define TALITOS_MDEUSR_RESET    0x01000000  /* MDEU Reset status bit */
+#define TALITOS_MDEUISR         0xc030  /* MDEU interrupt status register */
+#define TALITOS_MDEUIMR         0xc038  /* MDEU interrupt mask register */
+#define TALITOS_MDEUIMR_MASK    0xc41f0000  /* MDEU interrupt control mask*/
+
+#define TALITOS_AFEURCR         0x8018  /* AFEU reset control register */
+#define TALITOS_AFEURCR_RESET   0x01000000  /* AFEU reset bit */
+#define TALITOS_AFEUSR          0x8028  /* AFEU status register */
+#define TALITOS_AFEUSR_RESET    0x01000000  /* AFEU Reset status bit */
+#define TALITOS_AFEUISR         0x8030  /* AFEU interrupt status register */
+#define TALITOS_AFEUIMR         0x8038  /* AFEU interrupt mask register */
+#define TALITOS_AFEUIMR_MASK    0xf61f0000  /* AFEU interrupt control mask*/
+
+#define TALITOS_RNGRCR          0xe018  /* RNG Reset control register */
+#define TALITOS_RNGRCR_SR       0x01000000      /* RNG RNGRCR:Software Reset */
+#define TALITOS_RNGSR		0xe028	/* RNG status register */
+#define TALITOS_RNGSR_RD	0x01000000	/* RNG Reset done */
+#define TALITOS_RNGISR		0xe030	/* RNG Interrupt status register */
+#define TALITOS_RNGIMR          0xe038  /* RNG interrupt mask register */
+#define TALITOS_RNGIMR_MASK     0xc2100000  /* RNG interrupt control mask*/
+
+#define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256       0x28
+#define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512         0x48
+
+/***************************RC4*******************/
+#define ARC4_SEC_MIN_KEY_SIZE   5
+#define ARC4_SEC_MAX_KEY_SIZE   16
+#define ARC4_SEC_CONTEXT_LEN    259
+#define SEC_ALG_AFEU_KEY              0x10200050
+#define SEC_ALG_AFEU_CONTEXT          0x10700050
+
+/* talitos descriptor header (hdr) bits */
+
+/* primary execution unit select */
+#define	DESC_HDR_SEL0_MASK	0xf0000000
+#define	DESC_HDR_SEL0_AFEU	0x10000000
+#define	DESC_HDR_SEL0_DEU	0x20000000
+#define	DESC_HDR_SEL0_MDEU	0x30000000
+#define	DESC_HDR_SEL0_RNG	0x40000000
+#define	DESC_HDR_SEL0_AESU	0x60000000
+
+/* primary execution unit mode (MODE0) and derivatives */
+#define	DESC_HDR_MODE0_MASK		0x0ff00000
+#define	DESC_HDR_MODE0_ENCRYP		0x00100000
+#define	DESC_HDR_MODE0_AFEU_USE_KEY	0x00200000
+#define	DESC_HDR_MODE0_AFEU_USE_CONTEXT	0x00700000
+#define	DESC_HDR_MODE0_AESU_CBC		0x00200000
+#define	DESC_HDR_MODE0_AESU_ENC		0x00100000
+#define	DESC_HDR_MODE0_DEU_CBC		0x00400000
+#define	DESC_HDR_MODE0_DEU_3DES		0x00200000
+#define	DESC_HDR_MODE0_DEU_ENC		0x00100000
+#define	DESC_HDR_MODE0_MDEU_CONT	0x08000000
+#define	DESC_HDR_MODE0_MDEU_INIT	0x01000000	/* init starting regs */
+#define	DESC_HDR_MODE0_MDEU_HMAC	0x00800000
+#define	DESC_HDR_MODE0_MDEU_PAD		0x00400000	/* PD */
+#define	DESC_HDR_MODE0_MDEU_MD5		0x00200000
+#define	DESC_HDR_MODE0_MDEU_SHA256	0x00100000
+#define	DESC_HDR_MODE0_MDEU_SHA1	0x00000000	/* SHA-160 */
+#define	DESC_HDR_MODE0_MDEU_MD5_HMAC	\
+		(DESC_HDR_MODE0_MDEU_MD5 | DESC_HDR_MODE0_MDEU_HMAC)
+#define	DESC_HDR_MODE0_MDEU_SHA256_HMAC	\
+		(DESC_HDR_MODE0_MDEU_SHA256 | DESC_HDR_MODE0_MDEU_HMAC)
+#define	DESC_HDR_MODE0_MDEU_SHA1_HMAC	\
+		(DESC_HDR_MODE0_MDEU_SHA1 | DESC_HDR_MODE0_MDEU_HMAC)
+
+/* secondary execution unit select (SEL1) */
+/* it's MDEU or nothing */
+#define	DESC_HDR_SEL1_MASK	0x000f0000
+#define	DESC_HDR_SEL1_MDEU	0x00030000
+
+/* secondary execution unit mode (MODE1) and derivatives */
+#define	DESC_HDR_MODE1_MDEU_INIT	0x00001000	/* init starting regs */
+#define	DESC_HDR_MODE1_MDEU_HMAC	0x00000800
+#define	DESC_HDR_MODE1_MDEU_PAD		0x00000400	/* PD */
+#define	DESC_HDR_MODE1_MDEU_MD5		0x00000200
+#define	DESC_HDR_MODE1_MDEU_SHA256	0x00000100
+#define	DESC_HDR_MODE1_MDEU_SHA1	0x00000000	/* SHA-160 */
+#define	DESC_HDR_MODE1_MDEU_MD5_HMAC	\
+	(DESC_HDR_MODE1_MDEU_MD5 | DESC_HDR_MODE1_MDEU_HMAC)
+#define	DESC_HDR_MODE1_MDEU_SHA256_HMAC	\
+	(DESC_HDR_MODE1_MDEU_SHA256 | DESC_HDR_MODE1_MDEU_HMAC)
+#define	DESC_HDR_MODE1_MDEU_SHA1_HMAC	\
+	(DESC_HDR_MODE1_MDEU_SHA1 | DESC_HDR_MODE1_MDEU_HMAC)
+
+/* direction of overall data flow (DIR) */
+#define	DESC_HDR_DIR_OUTBOUND	0x00000000
+#define	DESC_HDR_DIR_INBOUND	0x00000002
+
+/* done notification (DN) */
+#define	DESC_HDR_DONE		0x00000001
+
+/* descriptor types */
+#define DESC_HDR_TYPE_AESU_CTR_NONSNOOP         (0 << 4)
+#define DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU	(1 << 4)
+#define DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU	(2 << 4)
+#define DESC_HDR_TYPE_NONHMAC_SNOOP_NO_AFEU	(3 << 4)
+#define DESC_HDR_TYPE_COMMON_NONSNOOP_AFEU	(5 << 4)
+
+#define TALITOS_HDR_DONE_BITS	0xff000000