1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2025-04-21 12:27:27 +03:00

ocf-linux: version bump to 20110720

Fixes problem with TFM allocation in cryptosoft.c


Signed-off-by: Philip Prindeville <philipp@redfish-solutions.com>

Hauke:
 * remove ubsec_ssb package and take it from ocf-linux
 * use patches from ocf-linux package
 * refresh all patches
 * readd some build fixes for OpenWrt.
 * readd CRYPTO_MANAGER dependency


git-svn-id: svn://svn.openwrt.org/openwrt/trunk@27753 3c298f89-4303-0410-b956-a3cf2f4a3e73
This commit is contained in:
hauke
2011-07-24 14:17:58 +00:00
parent c3cc5459ec
commit 32dec7075a
63 changed files with 1264 additions and 2768 deletions

View File

@@ -0,0 +1,12 @@
# for SGlinux builds
-include $(ROOTDIR)/modules/.config
obj-$(CONFIG_OCF_UBSEC_SSB) += ubsec_ssb.o
obj ?= .
EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
ifdef TOPDIR
-include $(TOPDIR)/Rules.make
endif

View File

@@ -0,0 +1,527 @@
/* $OpenBSD: queue.h,v 1.32 2007/04/30 18:42:34 pedro Exp $ */
/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _BSD_SYS_QUEUE_H_
#define _BSD_SYS_QUEUE_H_
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
*
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* A circle queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the list.
* A circle queue may be traversed in either direction, but has a more
* complex end of list detection.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC))
#define _Q_INVALIDATE(a) (a) = ((void *)-1)
#else
#define _Q_INVALIDATE(a)
#endif
/*
* Singly-linked List definitions.
*/
#define BSD_SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define BSD_SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define BSD_SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List access methods.
*/
#define BSD_SLIST_FIRST(head) ((head)->slh_first)
#define BSD_SLIST_END(head) NULL
#define BSD_SLIST_EMPTY(head) (BSD_SLIST_FIRST(head) == BSD_SLIST_END(head))
#define BSD_SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define BSD_SLIST_FOREACH(var, head, field) \
for((var) = BSD_SLIST_FIRST(head); \
(var) != BSD_SLIST_END(head); \
(var) = BSD_SLIST_NEXT(var, field))
#define BSD_SLIST_FOREACH_PREVPTR(var, varp, head, field) \
for ((varp) = &BSD_SLIST_FIRST((head)); \
((var) = *(varp)) != BSD_SLIST_END(head); \
(varp) = &BSD_SLIST_NEXT((var), field))
/*
* Singly-linked List functions.
*/
#define BSD_SLIST_INIT(head) { \
BSD_SLIST_FIRST(head) = BSD_SLIST_END(head); \
}
#define BSD_SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (0)
#define BSD_SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (0)
#define BSD_SLIST_REMOVE_NEXT(head, elm, field) do { \
(elm)->field.sle_next = (elm)->field.sle_next->field.sle_next; \
} while (0)
#define BSD_SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (0)
#define BSD_SLIST_REMOVE(head, elm, type, field) do { \
if ((head)->slh_first == (elm)) { \
BSD_SLIST_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->slh_first; \
\
while (curelm->field.sle_next != (elm)) \
curelm = curelm->field.sle_next; \
curelm->field.sle_next = \
curelm->field.sle_next->field.sle_next; \
_Q_INVALIDATE((elm)->field.sle_next); \
} \
} while (0)
/*
* List definitions.
*/
#define BSD_LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define BSD_LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define BSD_LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List access methods
*/
#define BSD_LIST_FIRST(head) ((head)->lh_first)
#define BSD_LIST_END(head) NULL
#define BSD_LIST_EMPTY(head) (BSD_LIST_FIRST(head) == BSD_LIST_END(head))
#define BSD_LIST_NEXT(elm, field) ((elm)->field.le_next)
#define BSD_LIST_FOREACH(var, head, field) \
for((var) = BSD_LIST_FIRST(head); \
(var)!= BSD_LIST_END(head); \
(var) = BSD_LIST_NEXT(var, field))
/*
* List functions.
*/
#define BSD_LIST_INIT(head) do { \
BSD_LIST_FIRST(head) = BSD_LIST_END(head); \
} while (0)
#define BSD_LIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (0)
#define BSD_LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (0)
#define BSD_LIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (0)
#define BSD_LIST_REMOVE(elm, field) do { \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
_Q_INVALIDATE((elm)->field.le_prev); \
_Q_INVALIDATE((elm)->field.le_next); \
} while (0)
#define BSD_LIST_REPLACE(elm, elm2, field) do { \
if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
(elm2)->field.le_next->field.le_prev = \
&(elm2)->field.le_next; \
(elm2)->field.le_prev = (elm)->field.le_prev; \
*(elm2)->field.le_prev = (elm2); \
_Q_INVALIDATE((elm)->field.le_prev); \
_Q_INVALIDATE((elm)->field.le_next); \
} while (0)
/*
* Simple queue definitions.
*/
#define BSD_SIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define BSD_SIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define BSD_SIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue access methods.
*/
#define BSD_SIMPLEQ_FIRST(head) ((head)->sqh_first)
#define BSD_SIMPLEQ_END(head) NULL
#define BSD_SIMPLEQ_EMPTY(head) (BSD_SIMPLEQ_FIRST(head) == BSD_SIMPLEQ_END(head))
#define BSD_SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
#define BSD_SIMPLEQ_FOREACH(var, head, field) \
for((var) = BSD_SIMPLEQ_FIRST(head); \
(var) != BSD_SIMPLEQ_END(head); \
(var) = BSD_SIMPLEQ_NEXT(var, field))
/*
* Simple queue functions.
*/
#define BSD_SIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
#define BSD_SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (0)
#define BSD_SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (0)
#define BSD_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (0)
#define BSD_SIMPLEQ_REMOVE_HEAD(head, field) do { \
if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
/*
* Tail queue definitions.
*/
#define BSD_TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
}
#define BSD_TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define BSD_TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
/*
* tail queue access methods
*/
#define BSD_TAILQ_FIRST(head) ((head)->tqh_first)
#define BSD_TAILQ_END(head) NULL
#define BSD_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define BSD_TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
/* XXX */
#define BSD_TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define BSD_TAILQ_EMPTY(head) \
(BSD_TAILQ_FIRST(head) == BSD_TAILQ_END(head))
#define BSD_TAILQ_FOREACH(var, head, field) \
for((var) = BSD_TAILQ_FIRST(head); \
(var) != BSD_TAILQ_END(head); \
(var) = BSD_TAILQ_NEXT(var, field))
#define BSD_TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for((var) = BSD_TAILQ_LAST(head, headname); \
(var) != BSD_TAILQ_END(head); \
(var) = BSD_TAILQ_PREV(var, headname, field))
/*
* Tail queue functions.
*/
#define BSD_TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (0)
#define BSD_TAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (0)
#define BSD_TAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (0)
#define BSD_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (0)
#define BSD_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#define BSD_TAILQ_REMOVE(head, elm, field) do { \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
_Q_INVALIDATE((elm)->field.tqe_prev); \
_Q_INVALIDATE((elm)->field.tqe_next); \
} while (0)
#define BSD_TAILQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
(elm2)->field.tqe_next->field.tqe_prev = \
&(elm2)->field.tqe_next; \
else \
(head)->tqh_last = &(elm2)->field.tqe_next; \
(elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
*(elm2)->field.tqe_prev = (elm2); \
_Q_INVALIDATE((elm)->field.tqe_prev); \
_Q_INVALIDATE((elm)->field.tqe_next); \
} while (0)
/*
* Circular queue definitions.
*/
#define BSD_CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define BSD_CIRCLEQ_HEAD_INITIALIZER(head) \
{ BSD_CIRCLEQ_END(&head), BSD_CIRCLEQ_END(&head) }
#define BSD_CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue access methods
*/
#define BSD_CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define BSD_CIRCLEQ_LAST(head) ((head)->cqh_last)
#define BSD_CIRCLEQ_END(head) ((void *)(head))
#define BSD_CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define BSD_CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define BSD_CIRCLEQ_EMPTY(head) \
(BSD_CIRCLEQ_FIRST(head) == BSD_CIRCLEQ_END(head))
#define BSD_CIRCLEQ_FOREACH(var, head, field) \
for((var) = BSD_CIRCLEQ_FIRST(head); \
(var) != BSD_CIRCLEQ_END(head); \
(var) = BSD_CIRCLEQ_NEXT(var, field))
#define BSD_CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for((var) = BSD_CIRCLEQ_LAST(head); \
(var) != BSD_CIRCLEQ_END(head); \
(var) = BSD_CIRCLEQ_PREV(var, field))
/*
* Circular queue functions.
*/
#define BSD_CIRCLEQ_INIT(head) do { \
(head)->cqh_first = BSD_CIRCLEQ_END(head); \
(head)->cqh_last = BSD_CIRCLEQ_END(head); \
} while (0)
#define BSD_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == BSD_CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (0)
#define BSD_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == BSD_CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (0)
#define BSD_CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = BSD_CIRCLEQ_END(head); \
if ((head)->cqh_last == BSD_CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (0)
#define BSD_CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.cqe_next = BSD_CIRCLEQ_END(head); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == BSD_CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (0)
#define BSD_CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == BSD_CIRCLEQ_END(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == BSD_CIRCLEQ_END(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
_Q_INVALIDATE((elm)->field.cqe_prev); \
_Q_INVALIDATE((elm)->field.cqe_next); \
} while (0)
#define BSD_CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
BSD_CIRCLEQ_END(head)) \
(head).cqh_last = (elm2); \
else \
(elm2)->field.cqe_next->field.cqe_prev = (elm2); \
if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
BSD_CIRCLEQ_END(head)) \
(head).cqh_first = (elm2); \
else \
(elm2)->field.cqe_prev->field.cqe_next = (elm2); \
_Q_INVALIDATE((elm)->field.cqe_prev); \
_Q_INVALIDATE((elm)->field.cqe_next); \
} while (0)
#endif /* !_BSD_SYS_QUEUE_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,233 @@
/*
* Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
* Copyright (c) 2000 Theo de Raadt
* Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Effort sponsored in part by the Defense Advanced Research Projects
* Agency (DARPA) and Air Force Research Laboratory, Air Force
* Materiel Command, USAF, under agreement number F30602-01-2-0537.
*
*/
/*
* Register definitions for 5601 BlueSteel Networks Ubiquitous Broadband
* Security "uBSec" chip. Definitions from revision 2.8 of the product
* datasheet.
*/
#define BS_BAR 0x10 /* DMA base address register */
#define BS_TRDY_TIMEOUT 0x40 /* TRDY timeout */
#define BS_RETRY_TIMEOUT 0x41 /* DMA retry timeout */
#define UBS_PCI_RTY_SHIFT 8
#define UBS_PCI_RTY_MASK 0xff
#define UBS_PCI_RTY(misc) \
(((misc) >> UBS_PCI_RTY_SHIFT) & UBS_PCI_RTY_MASK)
#define UBS_PCI_TOUT_SHIFT 0
#define UBS_PCI_TOUT_MASK 0xff
#define UBS_PCI_TOUT(misc) \
(((misc) >> PCI_TOUT_SHIFT) & PCI_TOUT_MASK)
/*
* DMA Control & Status Registers (offset from BS_BAR)
*/
#define BS_MCR1 0x20 /* DMA Master Command Record 1 */
#define BS_CTRL 0x24 /* DMA Control */
#define BS_STAT 0x28 /* DMA Status */
#define BS_ERR 0x2c /* DMA Error Address */
#define BS_DEV_ID 0x34 /* IPSec Device ID */
/* BS_CTRL - DMA Control */
#define BS_CTRL_RESET 0x80000000 /* hardware reset, 5805/5820 */
#define BS_CTRL_MCR2INT 0x40000000 /* enable intr MCR for MCR2 */
#define BS_CTRL_MCR1INT 0x20000000 /* enable intr MCR for MCR1 */
#define BS_CTRL_OFM 0x10000000 /* Output fragment mode */
#define BS_CTRL_BE32 0x08000000 /* big-endian, 32bit bytes */
#define BS_CTRL_BE64 0x04000000 /* big-endian, 64bit bytes */
#define BS_CTRL_DMAERR 0x02000000 /* enable intr DMA error */
#define BS_CTRL_RNG_M 0x01800000 /* RNG mode */
#define BS_CTRL_RNG_1 0x00000000 /* 1bit rn/one slow clock */
#define BS_CTRL_RNG_4 0x00800000 /* 1bit rn/four slow clocks */
#define BS_CTRL_RNG_8 0x01000000 /* 1bit rn/eight slow clocks */
#define BS_CTRL_RNG_16 0x01800000 /* 1bit rn/16 slow clocks */
#define BS_CTRL_SWNORM 0x00400000 /* 582[01], sw normalization */
#define BS_CTRL_FRAG_M 0x0000ffff /* output fragment size mask */
#define BS_CTRL_LITTLE_ENDIAN (BS_CTRL_BE32 | BS_CTRL_BE64)
/* BS_STAT - DMA Status */
#define BS_STAT_MCR1_BUSY 0x80000000 /* MCR1 is busy */
#define BS_STAT_MCR1_FULL 0x40000000 /* MCR1 is full */
#define BS_STAT_MCR1_DONE 0x20000000 /* MCR1 is done */
#define BS_STAT_DMAERR 0x10000000 /* DMA error */
#define BS_STAT_MCR2_FULL 0x08000000 /* MCR2 is full */
#define BS_STAT_MCR2_DONE 0x04000000 /* MCR2 is done */
#define BS_STAT_MCR1_ALLEMPTY 0x02000000 /* 5821, MCR1 is empty */
#define BS_STAT_MCR2_ALLEMPTY 0x01000000 /* 5821, MCR2 is empty */
/* BS_ERR - DMA Error Address */
#define BS_ERR_ADDR 0xfffffffc /* error address mask */
#define BS_ERR_READ 0x00000002 /* fault was on read */
struct ubsec_pktctx {
u_int32_t pc_deskey[6]; /* 3DES key */
u_int32_t pc_hminner[5]; /* hmac inner state */
u_int32_t pc_hmouter[5]; /* hmac outer state */
u_int32_t pc_iv[2]; /* [3]DES iv */
u_int16_t pc_flags; /* flags, below */
u_int16_t pc_offset; /* crypto offset */
} __attribute__ ((packed));
#define UBS_PKTCTX_ENC_3DES 0x8000 /* use 3des */
#define UBS_PKTCTX_ENC_AES 0x8000 /* use aes */
#define UBS_PKTCTX_ENC_NONE 0x0000 /* no encryption */
#define UBS_PKTCTX_INBOUND 0x4000 /* inbound packet */
#define UBS_PKTCTX_AUTH 0x3000 /* authentication mask */
#define UBS_PKTCTX_AUTH_NONE 0x0000 /* no authentication */
#define UBS_PKTCTX_AUTH_MD5 0x1000 /* use hmac-md5 */
#define UBS_PKTCTX_AUTH_SHA1 0x2000 /* use hmac-sha1 */
#define UBS_PKTCTX_AES128 0x0 /* AES 128bit keys */
#define UBS_PKTCTX_AES192 0x100 /* AES 192bit keys */
#define UBS_PKTCTX_AES256 0x200 /* AES 256bit keys */
struct ubsec_pktctx_des {
volatile u_int16_t pc_len; /* length of ctx struct */
volatile u_int16_t pc_type; /* context type */
volatile u_int16_t pc_flags; /* flags, same as above */
volatile u_int16_t pc_offset; /* crypto/auth offset */
volatile u_int32_t pc_deskey[6]; /* 3DES key */
volatile u_int32_t pc_iv[2]; /* [3]DES iv */
volatile u_int32_t pc_hminner[5]; /* hmac inner state */
volatile u_int32_t pc_hmouter[5]; /* hmac outer state */
} __attribute__ ((packed));
struct ubsec_pktctx_aes128 {
volatile u_int16_t pc_len; /* length of ctx struct */
volatile u_int16_t pc_type; /* context type */
volatile u_int16_t pc_flags; /* flags, same as above */
volatile u_int16_t pc_offset; /* crypto/auth offset */
volatile u_int32_t pc_aeskey[4]; /* AES 128bit key */
volatile u_int32_t pc_iv[4]; /* AES iv */
volatile u_int32_t pc_hminner[5]; /* hmac inner state */
volatile u_int32_t pc_hmouter[5]; /* hmac outer state */
} __attribute__ ((packed));
struct ubsec_pktctx_aes192 {
volatile u_int16_t pc_len; /* length of ctx struct */
volatile u_int16_t pc_type; /* context type */
volatile u_int16_t pc_flags; /* flags, same as above */
volatile u_int16_t pc_offset; /* crypto/auth offset */
volatile u_int32_t pc_aeskey[6]; /* AES 192bit key */
volatile u_int32_t pc_iv[4]; /* AES iv */
volatile u_int32_t pc_hminner[5]; /* hmac inner state */
volatile u_int32_t pc_hmouter[5]; /* hmac outer state */
} __attribute__ ((packed));
struct ubsec_pktctx_aes256 {
volatile u_int16_t pc_len; /* length of ctx struct */
volatile u_int16_t pc_type; /* context type */
volatile u_int16_t pc_flags; /* flags, same as above */
volatile u_int16_t pc_offset; /* crypto/auth offset */
volatile u_int32_t pc_aeskey[8]; /* AES 256bit key */
volatile u_int32_t pc_iv[4]; /* AES iv */
volatile u_int32_t pc_hminner[5]; /* hmac inner state */
volatile u_int32_t pc_hmouter[5]; /* hmac outer state */
} __attribute__ ((packed));
#define UBS_PKTCTX_TYPE_IPSEC_DES 0x0000
#define UBS_PKTCTX_TYPE_IPSEC_AES 0x0040
struct ubsec_pktbuf {
volatile u_int32_t pb_addr; /* address of buffer start */
volatile u_int32_t pb_next; /* pointer to next pktbuf */
volatile u_int32_t pb_len; /* packet length */
} __attribute__ ((packed));
#define UBS_PKTBUF_LEN 0x0000ffff /* length mask */
struct ubsec_mcr {
volatile u_int16_t mcr_pkts; /* #pkts in this mcr */
volatile u_int16_t mcr_flags; /* mcr flags (below) */
volatile u_int32_t mcr_cmdctxp; /* command ctx pointer */
struct ubsec_pktbuf mcr_ipktbuf; /* input chain header */
volatile u_int16_t mcr_reserved;
volatile u_int16_t mcr_pktlen;
struct ubsec_pktbuf mcr_opktbuf; /* output chain header */
} __attribute__ ((packed));
struct ubsec_mcr_add {
volatile u_int32_t mcr_cmdctxp; /* command ctx pointer */
struct ubsec_pktbuf mcr_ipktbuf; /* input chain header */
volatile u_int16_t mcr_reserved;
volatile u_int16_t mcr_pktlen;
struct ubsec_pktbuf mcr_opktbuf; /* output chain header */
} __attribute__ ((packed));
#define UBS_MCR_DONE 0x0001 /* mcr has been processed */
#define UBS_MCR_ERROR 0x0002 /* error in processing */
#define UBS_MCR_ERRORCODE 0xff00 /* error type */
struct ubsec_ctx_keyop {
volatile u_int16_t ctx_len; /* command length */
volatile u_int16_t ctx_op; /* operation code */
volatile u_int8_t ctx_pad[60]; /* padding */
} __attribute__ ((packed));
#define UBS_CTXOP_DHPKGEN 0x01 /* dh public key generation */
#define UBS_CTXOP_DHSSGEN 0x02 /* dh shared secret gen. */
#define UBS_CTXOP_RSAPUB 0x03 /* rsa public key op */
#define UBS_CTXOP_RSAPRIV 0x04 /* rsa private key op */
#define UBS_CTXOP_DSASIGN 0x05 /* dsa signing op */
#define UBS_CTXOP_DSAVRFY 0x06 /* dsa verification */
#define UBS_CTXOP_RNGBYPASS 0x41 /* rng direct test mode */
#define UBS_CTXOP_RNGSHA1 0x42 /* rng sha1 test mode */
#define UBS_CTXOP_MODADD 0x43 /* modular addition */
#define UBS_CTXOP_MODSUB 0x44 /* modular subtraction */
#define UBS_CTXOP_MODMUL 0x45 /* modular multiplication */
#define UBS_CTXOP_MODRED 0x46 /* modular reduction */
#define UBS_CTXOP_MODEXP 0x47 /* modular exponentiation */
#define UBS_CTXOP_MODINV 0x48 /* modular inverse */
struct ubsec_ctx_rngbypass {
volatile u_int16_t rbp_len; /* command length, 64 */
volatile u_int16_t rbp_op; /* rng bypass, 0x41 */
volatile u_int8_t rbp_pad[60]; /* padding */
} __attribute__ ((packed));
/* modexp: C = (M ^ E) mod N */
struct ubsec_ctx_modexp {
volatile u_int16_t me_len; /* command length */
volatile u_int16_t me_op; /* modexp, 0x47 */
volatile u_int16_t me_E_len; /* E (bits) */
volatile u_int16_t me_N_len; /* N (bits) */
u_int8_t me_N[2048/8]; /* N */
} __attribute__ ((packed));
struct ubsec_ctx_rsapriv {
volatile u_int16_t rpr_len; /* command length */
volatile u_int16_t rpr_op; /* rsaprivate, 0x04 */
volatile u_int16_t rpr_q_len; /* q (bits) */
volatile u_int16_t rpr_p_len; /* p (bits) */
u_int8_t rpr_buf[5 * 1024 / 8]; /* parameters: */
/* p, q, dp, dq, pinv */
} __attribute__ ((packed));

View File

@@ -0,0 +1,228 @@
/*
* Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
* Copyright (c) 2000 Theo de Raadt
* Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Effort sponsored in part by the Defense Advanced Research Projects
* Agency (DARPA) and Air Force Research Laboratory, Air Force
* Materiel Command, USAF, under agreement number F30602-01-2-0537.
*
*/
/* Maximum queue length */
#ifndef UBS_MAX_NQUEUE
#define UBS_MAX_NQUEUE 60
#endif
#define UBS_MAX_SCATTER 64 /* Maximum scatter/gather depth */
#ifndef UBS_MAX_AGGR
#define UBS_MAX_AGGR 5 /* Maximum aggregation count */
#endif
#define UBSEC_CARD(sid) (((sid) & 0xf0000000) >> 28)
#define UBSEC_SESSION(sid) ( (sid) & 0x0fffffff)
#define UBSEC_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
#define UBS_DEF_RTY 0xff /* PCI Retry Timeout */
#define UBS_DEF_TOUT 0xff /* PCI TRDY Timeout */
#define UBS_DEF_CACHELINE 0x01 /* Cache Line setting */
#define DEFAULT_HMAC_LEN 12
struct ubsec_dma_alloc {
dma_addr_t dma_paddr;
void *dma_vaddr;
/*
bus_dmamap_t dma_map;
bus_dma_segment_t dma_seg;
*/
size_t dma_size;
/*
int dma_nseg;
*/
};
struct ubsec_q2 {
BSD_SIMPLEQ_ENTRY(ubsec_q2) q_next;
struct ubsec_dma_alloc q_mcr;
struct ubsec_dma_alloc q_ctx;
u_int q_type;
};
struct ubsec_q2_rng {
struct ubsec_q2 rng_q;
struct ubsec_dma_alloc rng_buf;
int rng_used;
};
/* C = (M ^ E) mod N */
#define UBS_MODEXP_PAR_M 0
#define UBS_MODEXP_PAR_E 1
#define UBS_MODEXP_PAR_N 2
struct ubsec_q2_modexp {
struct ubsec_q2 me_q;
struct cryptkop * me_krp;
struct ubsec_dma_alloc me_M;
struct ubsec_dma_alloc me_E;
struct ubsec_dma_alloc me_C;
struct ubsec_dma_alloc me_epb;
int me_modbits;
int me_shiftbits;
int me_normbits;
};
#define UBS_RSAPRIV_PAR_P 0
#define UBS_RSAPRIV_PAR_Q 1
#define UBS_RSAPRIV_PAR_DP 2
#define UBS_RSAPRIV_PAR_DQ 3
#define UBS_RSAPRIV_PAR_PINV 4
#define UBS_RSAPRIV_PAR_MSGIN 5
#define UBS_RSAPRIV_PAR_MSGOUT 6
struct ubsec_q2_rsapriv {
struct ubsec_q2 rpr_q;
struct cryptkop * rpr_krp;
struct ubsec_dma_alloc rpr_msgin;
struct ubsec_dma_alloc rpr_msgout;
};
#define UBSEC_RNG_BUFSIZ 16 /* measured in 32bit words */
struct ubsec_dmachunk {
struct ubsec_mcr d_mcr;
struct ubsec_mcr_add d_mcradd[UBS_MAX_AGGR-1];
struct ubsec_pktbuf d_sbuf[UBS_MAX_SCATTER-1];
struct ubsec_pktbuf d_dbuf[UBS_MAX_SCATTER-1];
u_int32_t d_macbuf[5];
union {
struct ubsec_pktctx_aes256 ctxaes256;
struct ubsec_pktctx_aes192 ctxaes192;
struct ubsec_pktctx_des ctxdes;
struct ubsec_pktctx_aes128 ctxaes128;
struct ubsec_pktctx ctx;
} d_ctx;
};
struct ubsec_dma {
BSD_SIMPLEQ_ENTRY(ubsec_dma) d_next;
struct ubsec_dmachunk *d_dma;
struct ubsec_dma_alloc d_alloc;
};
#define UBS_FLAGS_KEY 0x01 /* has key accelerator */
#define UBS_FLAGS_LONGCTX 0x02 /* uses long ipsec ctx */
#define UBS_FLAGS_BIGKEY 0x04 /* 2048bit keys */
#define UBS_FLAGS_HWNORM 0x08 /* hardware normalization */
#define UBS_FLAGS_RNG 0x10 /* hardware rng */
#define UBS_FLAGS_AES 0x20 /* hardware AES support */
struct ubsec_q {
BSD_SIMPLEQ_ENTRY(ubsec_q) q_next;
int q_nstacked_mcrs;
struct ubsec_q *q_stacked_mcr[UBS_MAX_AGGR-1];
struct cryptop *q_crp;
struct ubsec_dma *q_dma;
//struct mbuf *q_src_m, *q_dst_m;
struct sk_buff *q_src_m, *q_dst_m;
struct uio *q_src_io, *q_dst_io;
/*
bus_dmamap_t q_src_map;
bus_dmamap_t q_dst_map;
*/
/* DMA addresses for In-/Out packages */
int q_src_len;
int q_dst_len;
struct ubsec_dma_alloc q_src_map[UBS_MAX_SCATTER];
struct ubsec_dma_alloc q_dst_map[UBS_MAX_SCATTER];
int q_has_dst;
int q_sesn;
int q_flags;
};
struct ubsec_softc {
softc_device_decl sc_dev;
struct ssb_device *sdev; /* device backpointer */
struct device *sc_dv; /* generic device */
void *sc_ih; /* interrupt handler cookie */
int sc_flags; /* device specific flags */
u_int32_t sc_statmask; /* interrupt status mask */
int32_t sc_cid; /* crypto tag */
BSD_SIMPLEQ_HEAD(,ubsec_q) sc_queue; /* packet queue, mcr1 */
int sc_nqueue; /* count enqueued, mcr1 */
BSD_SIMPLEQ_HEAD(,ubsec_q) sc_qchip; /* on chip, mcr1 */
BSD_SIMPLEQ_HEAD(,ubsec_q) sc_freequeue; /* list of free queue elements */
BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_queue2; /* packet queue, mcr2 */
int sc_nqueue2; /* count enqueued, mcr2 */
BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_qchip2; /* on chip, mcr2 */
int sc_nsessions; /* # of sessions */
struct ubsec_session *sc_sessions; /* sessions */
int sc_rnghz; /* rng poll time */
struct ubsec_q2_rng sc_rng;
struct ubsec_dma sc_dmaa[UBS_MAX_NQUEUE];
struct ubsec_q *sc_queuea[UBS_MAX_NQUEUE];
BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_q2free; /* free list */
spinlock_t sc_ringmtx; /* PE ring lock */
};
#define UBSEC_QFLAGS_COPYOUTIV 0x1
struct ubsec_session {
u_int32_t ses_used;
u_int32_t ses_key[8]; /* 3DES/AES key */
u_int32_t ses_hminner[5]; /* hmac inner state */
u_int32_t ses_hmouter[5]; /* hmac outer state */
u_int32_t ses_iv[4]; /* [3]DES/AES iv */
u_int32_t ses_keysize; /* AES key size */
u_int32_t ses_mlen; /* hmac/hash length */
};
struct ubsec_stats {
u_int64_t hst_ibytes;
u_int64_t hst_obytes;
u_int32_t hst_ipackets;
u_int32_t hst_opackets;
u_int32_t hst_invalid;
u_int32_t hst_nomem;
u_int32_t hst_queuefull;
u_int32_t hst_dmaerr;
u_int32_t hst_mcrerr;
u_int32_t hst_nodmafree;
};
struct ubsec_generic_ctx {
u_int32_t pc_key[8]; /* [3]DES/AES key */
u_int32_t pc_hminner[5]; /* hmac inner state */
u_int32_t pc_hmouter[5]; /* hmac outer state */
u_int32_t pc_iv[4]; /* [3]DES/AES iv */
u_int16_t pc_flags; /* flags, below */
u_int16_t pc_offset; /* crypto offset */
u_int16_t pc_type; /* Cryptographic operation */
};