1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2024-11-24 16:44:58 +02:00
openwrt-xburst/target/linux/coldfire/patches/006-mcfv4e_arch_lib_mods.patch
kaloz 343c185b7d use broken-out patches for the coldfire to make it easier to follow differences against the bsp
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@16547 3c298f89-4303-0410-b956-a3cf2f4a3e73
2009-06-23 21:04:37 +00:00

565 lines
12 KiB
Diff

From a9faf34ba120d9d39ff0c7656ee3de12a110e22a Mon Sep 17 00:00:00 2001
From: Kurt Mahan <kmahan@freescale.com>
Date: Wed, 31 Oct 2007 16:57:05 -0600
Subject: [PATCH] Core Coldfire/MCF5445x arch lib changes.
LTIBName: mcfv4e-arch-lib-mods
Signed-off-by: Kurt Mahan <kmahan@freescale.com>
---
arch/m68k/lib/checksum.c | 124 +++++++++++++++++++++++
arch/m68k/lib/muldi3.c | 10 ++
arch/m68k/lib/semaphore.S | 25 +++++
arch/m68k/lib/string.c | 64 ++++++++++++
arch/m68k/lib/uaccess.c | 242 +++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 465 insertions(+), 0 deletions(-)
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -39,8 +39,131 @@
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
+#ifdef CONFIG_COLDFIRE
+
+static inline unsigned short from32to16(unsigned long x)
+{
+ /* add up 16-bit and 16-bit for 16+c bit */
+ x = (x & 0xffff) + (x >> 16);
+ /* add up carry.. */
+ x = (x & 0xffff) + (x >> 16);
+ return x;
+}
+
+static unsigned long do_csum(const unsigned char *buff, int len)
+{
+ int odd, count;
+ unsigned long result = 0;
+
+ if (len <= 0)
+ goto out;
+ odd = 1 & (unsigned long) buff;
+ if (odd) {
+ result = *buff;
+ len--;
+ buff++;
+ }
+ count = len >> 1; /* nr of 16-bit words.. */
+ if (count) {
+ if (2 & (unsigned long) buff) {
+ result += *(unsigned short *) buff;
+ count--;
+ len -= 2;
+ buff += 2;
+ }
+ count >>= 1; /* nr of 32-bit words.. */
+ if (count) {
+ unsigned long carry = 0;
+ do {
+ unsigned long w = *(unsigned long *) buff;
+ count--;
+ buff += 4;
+ result += carry;
+ result += w;
+ carry = (w > result);
+ } while (count);
+ result += carry;
+ result = (result & 0xffff) + (result >> 16);
+ }
+ if (len & 2) {
+ result += *(unsigned short *) buff;
+ buff += 2;
+ }
+ }
+ if (len & 1)
+ result += (*buff << 8);
+ result = from32to16(result);
+ if (odd)
+ result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+out:
+ return result;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ return ~do_csum(iph, ihl*4);
+}
+EXPORT_SYMBOL(ip_fast_csum);
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
+ unsigned int result = do_csum(buff, len);
+
+ /* add in old sum, and carry.. */
+ result += sum;
+ if (sum > result)
+ result += 1;
+ return result;
+}
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * copy from fs while checksumming, otherwise like csum_partial
+ */
+
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum sum, int *csum_err)
+{
+ if (csum_err) *csum_err = 0;
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+
+/*
+ * copy from ds while checksumming, otherwise like csum_partial
+ */
+
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+{
+ memcpy(dst, src, len);
+ return csum_partial(dst, len, sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+
+#else /* !CONFIG_COLDFIRE */
+
+unsigned int
+csum_partial(const unsigned char *buff, int len, unsigned int sum)
+{
unsigned long tmp1, tmp2;
/*
* Experiments with ethernet and slip connections show that buff
@@ -423,3 +546,4 @@ csum_partial_copy_nocheck(const void *sr
return(sum);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
+#endif /* CONFIG_COLDFIRE */
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -21,12 +21,22 @@ Boston, MA 02111-1307, USA. */
#define BITS_PER_UNIT 8
+#ifdef CONFIG_COLDFIRE
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ unsigned long long x; \
+ x = (unsigned long long)u * v; \
+ w0 = (unsigned long)(x & 0x00000000ffffffff); \
+ w1 = (unsigned long)(x & 0xffffffff00000000) >> 32; \
+ } while (0)
+#else /* CONFIG_COLDFIRE */
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("mulu%.l %3,%1:%0" \
: "=d" ((USItype)(w0)), \
"=d" ((USItype)(w1)) \
: "%0" ((USItype)(u)), \
"dmi" ((USItype)(v)))
+#endif /* CONFIG_COLDFIRE */
#define __umulsidi3(u, v) \
({DIunion __w; \
--- a/arch/m68k/lib/semaphore.S
+++ b/arch/m68k/lib/semaphore.S
@@ -16,11 +16,24 @@
* there is contention on the semaphore.
*/
ENTRY(__down_failed)
+#ifndef CONFIG_COLDFIRE
moveml %a0/%d0/%d1,-(%sp)
+#else
+ movel %a0,-(%sp)
+ movel %d0,-(%sp)
+ movel %d1,-(%sp)
+#endif
movel %a1,-(%sp)
jbsr __down
movel (%sp)+,%a1
+#ifndef CONFIG_COLDFIRE
moveml (%sp)+,%a0/%d0/%d1
+#else
+ movel (%sp)+,%d1
+ movel (%sp)+,%d0
+ movel (%sp)+,%a0
+#endif
+
rts
ENTRY(__down_failed_interruptible)
@@ -44,10 +57,22 @@ ENTRY(__down_failed_trylock)
rts
ENTRY(__up_wakeup)
+#ifndef CONFIG_COLDFIRE
moveml %a0/%d0/%d1,-(%sp)
+#else
+ movel %a0,-(%sp)
+ movel %d0,-(%sp)
+ movel %d1,-(%sp)
+#endif
movel %a1,-(%sp)
jbsr __up
movel (%sp)+,%a1
+#ifndef CONFIG_COLDFIRE
moveml (%sp)+,%a0/%d0/%d1
+#else
+ movel (%sp)+,%d1
+ movel (%sp)+,%d0
+ movel (%sp)+,%a0
+#endif
rts
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -15,6 +15,7 @@ char *strcpy(char *dest, const char *src
}
EXPORT_SYMBOL(strcpy);
+#ifndef CONFIG_COLDFIRE
void *memset(void *s, int c, size_t count)
{
void *xs = s;
@@ -143,6 +144,69 @@ void *memcpy(void *to, const void *from,
}
EXPORT_SYMBOL(memcpy);
+#else /* CONFIG_COLDFIRE */
+
+void *memset(void *s, int c, size_t count)
+{
+ unsigned long x;
+ void *originalTo;
+
+ for (x = 0; x < count; x++)
+ *(unsigned char *)s++ = (unsigned char)c;
+
+ return originalTo;
+}
+EXPORT_SYMBOL(memset);
+
+void *memcpy(void *to, const void *from, size_t n)
+{
+ void *xto = to;
+ size_t temp;
+
+ if (!n)
+ return xto;
+ if ((long) to & 1) {
+ char *cto = to;
+ const char *cfrom = from;
+ *cto++ = *cfrom++;
+ to = cto;
+ from = cfrom;
+ n--;
+ }
+ if (n > 2 && (long) to & 2) {
+ short *sto = to;
+ const short *sfrom = from;
+ *sto++ = *sfrom++;
+ to = sto;
+ from = sfrom;
+ n -= 2;
+ }
+ temp = n >> 2;
+ if (temp) {
+ long *lto = to;
+ const long *lfrom = from;
+ for (; temp; temp--)
+ *lto++ = *lfrom++;
+ to = lto;
+ from = lfrom;
+ }
+ if (n & 2) {
+ short *sto = to;
+ const short *sfrom = from;
+ *sto++ = *sfrom++;
+ to = sto;
+ from = sfrom;
+ }
+ if (n & 1) {
+ char *cto = to;
+ const char *cfrom = from;
+ *cto = *cfrom;
+ }
+ return xto;
+}
+EXPORT_SYMBOL(memcpy);
+#endif /* CONFIG_COLDFIRE */
+
void *memmove(void *dest, const void *src, size_t n)
{
void *xdest = dest;
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -5,6 +5,7 @@
*/
#include <linux/module.h>
+#ifndef CONFIG_COLDFIRE
#include <asm/uaccess.h>
unsigned long __generic_copy_from_user(void *to, const void __user *from,
@@ -220,3 +221,244 @@ unsigned long __clear_user(void __user *
return res;
}
EXPORT_SYMBOL(__clear_user);
+
+#else /* CONFIG_COLDFIRE */
+
+#include <asm/cf_uaccess.h>
+
+unsigned long __generic_copy_from_user(void *to, const void *from,
+ unsigned long n)
+{
+ unsigned long tmp;
+ __asm__ __volatile__
+ (" tstl %2\n"
+ " jeq 2f\n"
+ "1: movel (%1)+,%3\n"
+ " movel %3,(%0)+\n"
+ " subql #1,%2\n"
+ " jne 1b\n"
+ "2: movel %4,%2\n"
+ " bclr #1,%2\n"
+ " jeq 4f\n"
+ "3: movew (%1)+,%3\n"
+ " movew %3,(%0)+\n"
+ "4: bclr #0,%2\n"
+ " jeq 6f\n"
+ "5: moveb (%1)+,%3\n"
+ " moveb %3,(%0)+\n"
+ "6:\n"
+ ".section .fixup,\"ax\"\n"
+ " .even\n"
+ "7: movel %2,%%d0\n"
+ "71:clrl (%0)+\n"
+ " subql #1,%%d0\n"
+ " jne 71b\n"
+ " lsll #2,%2\n"
+ " addl %4,%2\n"
+ " btst #1,%4\n"
+ " jne 81f\n"
+ " btst #0,%4\n"
+ " jne 91f\n"
+ " jra 6b\n"
+ "8: addql #2,%2\n"
+ "81:clrw (%0)+\n"
+ " btst #0,%4\n"
+ " jne 91f\n"
+ " jra 6b\n"
+ "9: addql #1,%2\n"
+ "91:clrb (%0)+\n"
+ " jra 6b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,7b\n"
+ " .long 3b,8b\n"
+ " .long 5b,9b\n"
+ ".previous"
+ : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
+ : "d"(n & 3), "0"(to), "1"(from), "2"(n/4)
+ : "d0", "memory");
+ return n;
+}
+EXPORT_SYMBOL(__generic_copy_from_user);
+
+
+unsigned long __generic_copy_to_user(void *to, const void *from,
+ unsigned long n)
+{
+ unsigned long tmp;
+ __asm__ __volatile__
+ (" tstl %2\n"
+ " jeq 3f\n"
+ "1: movel (%1)+,%3\n"
+ "22:movel %3,(%0)+\n"
+ "2: subql #1,%2\n"
+ " jne 1b\n"
+ "3: movel %4,%2\n"
+ " bclr #1,%2\n"
+ " jeq 4f\n"
+ " movew (%1)+,%3\n"
+ "24:movew %3,(%0)+\n"
+ "4: bclr #0,%2\n"
+ " jeq 5f\n"
+ " moveb (%1)+,%3\n"
+ "25:moveb %3,(%0)+\n"
+ "5:\n"
+ ".section .fixup,\"ax\"\n"
+ " .even\n"
+ "60:addql #1,%2\n"
+ "6: lsll #2,%2\n"
+ " addl %4,%2\n"
+ " jra 5b\n"
+ "7: addql #2,%2\n"
+ " jra 5b\n"
+ "8: addql #1,%2\n"
+ " jra 5b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,60b\n"
+ " .long 22b,6b\n"
+ " .long 2b,6b\n"
+ " .long 24b,7b\n"
+ " .long 3b,60b\n"
+ " .long 4b,7b\n"
+ " .long 25b,8b\n"
+ " .long 5b,8b\n"
+ ".previous"
+ : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
+ : "r"(n & 3), "0"(to), "1"(from), "2"(n / 4)
+ : "memory");
+ return n;
+}
+EXPORT_SYMBOL(__generic_copy_to_user);
+
+/*
+ * Copy a null terminated string from userspace.
+ */
+
+long strncpy_from_user(char *dst, const char *src, long count)
+{
+ long res = -EFAULT;
+ if (!(access_ok(VERIFY_READ, src, 1))) /* --tym-- */
+ return res;
+ if (count == 0) return count;
+ __asm__ __volatile__
+ ("1: moveb (%2)+,%%d0\n"
+ "12:moveb %%d0,(%1)+\n"
+ " jeq 2f\n"
+ " subql #1,%3\n"
+ " jne 1b\n"
+ "2: subl %3,%0\n"
+ "3:\n"
+ ".section .fixup,\"ax\"\n"
+ " .even\n"
+ "4: movel %4,%0\n"
+ " jra 3b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,4b\n"
+ " .long 12b,4b\n"
+ ".previous"
+ : "=d"(res), "=a"(dst), "=a"(src), "=d"(count)
+ : "i"(-EFAULT), "0"(count), "1"(dst), "2"(src), "3"(count)
+ : "d0", "memory");
+ return res;
+}
+EXPORT_SYMBOL(strncpy_from_user);
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+long strnlen_user(const char *src, long n)
+{
+ long res = -EFAULT;
+ if (!(access_ok(VERIFY_READ, src, 1))) /* --tym-- */
+ return res;
+
+ res = -(long)src;
+ __asm__ __volatile__
+ ("1:\n"
+ " tstl %2\n"
+ " jeq 3f\n"
+ "2: moveb (%1)+,%%d0\n"
+ "22:\n"
+ " subql #1,%2\n"
+ " tstb %%d0\n"
+ " jne 1b\n"
+ " jra 4f\n"
+ "3:\n"
+ " addql #1,%0\n"
+ "4:\n"
+ " addl %1,%0\n"
+ "5:\n"
+ ".section .fixup,\"ax\"\n"
+ " .even\n"
+ "6: moveq %3,%0\n"
+ " jra 5b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,6b\n"
+ " .long 22b,6b\n"
+ ".previous"
+ : "=d"(res), "=a"(src), "=d"(n)
+ : "i"(0), "0"(res), "1"(src), "2"(n)
+ : "d0");
+ return res;
+}
+EXPORT_SYMBOL(strnlen_user);
+
+
+/*
+ * Zero Userspace
+ */
+
+unsigned long __clear_user(void *to, unsigned long n)
+{
+ __asm__ __volatile__
+ (" tstl %1\n"
+ " jeq 3f\n"
+ "1: movel %3,(%0)+\n"
+ "2: subql #1,%1\n"
+ " jne 1b\n"
+ "3: movel %2,%1\n"
+ " bclr #1,%1\n"
+ " jeq 4f\n"
+ "24:movew %3,(%0)+\n"
+ "4: bclr #0,%1\n"
+ " jeq 5f\n"
+ "25:moveb %3,(%0)+\n"
+ "5:\n"
+ ".section .fixup,\"ax\"\n"
+ " .even\n"
+ "61:addql #1,%1\n"
+ "6: lsll #2,%1\n"
+ " addl %2,%1\n"
+ " jra 5b\n"
+ "7: addql #2,%1\n"
+ " jra 5b\n"
+ "8: addql #1,%1\n"
+ " jra 5b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,61b\n"
+ " .long 2b,6b\n"
+ " .long 3b,61b\n"
+ " .long 24b,7b\n"
+ " .long 4b,7b\n"
+ " .long 25b,8b\n"
+ " .long 5b,8b\n"
+ ".previous"
+ : "=a"(to), "=d"(n)
+ : "r"(n & 3), "d"(0), "0"(to), "1"(n/4));
+ return n;
+}
+EXPORT_SYMBOL(__clear_user);
+
+#endif /* CONFIG_COLDFIRE */
+