mirror of
git://projects.qi-hardware.com/openwrt-xburst.git
synced 2024-11-09 10:04:04 +02:00
343c185b7d
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@16547 3c298f89-4303-0410-b956-a3cf2f4a3e73
4485 lines
113 KiB
Diff
4485 lines
113 KiB
Diff
From 70b2bd01829b38a1a79caeda05d436b2e5fecf82 Mon Sep 17 00:00:00 2001
|
|
From: Kurt Mahan <kmahan@freescale.com>
|
|
Date: Wed, 31 Oct 2007 17:00:18 -0600
|
|
Subject: [PATCH] Core Coldfire/MCF5445x specific code.
|
|
|
|
LTIBName: mcfv4e-coldfire-code
|
|
Signed-off-by: Kurt Mahan <kmahan@freescale.com>
|
|
---
|
|
arch/m68k/coldfire/Makefile | 11 +
|
|
arch/m68k/coldfire/cache.c | 215 +++++++++
|
|
arch/m68k/coldfire/config.c | 420 ++++++++++++++++++
|
|
arch/m68k/coldfire/entry.S | 701 ++++++++++++++++++++++++++++++
|
|
arch/m68k/coldfire/head.S | 474 ++++++++++++++++++++
|
|
arch/m68k/coldfire/ints.c | 384 ++++++++++++++++
|
|
arch/m68k/coldfire/iomap.c | 54 +++
|
|
arch/m68k/coldfire/mcf5445x-pci.c | 427 ++++++++++++++++++
|
|
arch/m68k/coldfire/muldi3.S | 64 +++
|
|
arch/m68k/coldfire/pci.c | 245 +++++++++++
|
|
arch/m68k/coldfire/signal.c | 868 +++++++++++++++++++++++++++++++++++++
|
|
arch/m68k/coldfire/traps.c | 454 +++++++++++++++++++
|
|
arch/m68k/coldfire/vmlinux-cf.lds | 92 ++++
|
|
13 files changed, 4409 insertions(+), 0 deletions(-)
|
|
create mode 100644 arch/m68k/coldfire/Makefile
|
|
create mode 100644 arch/m68k/coldfire/cache.c
|
|
create mode 100644 arch/m68k/coldfire/config.c
|
|
create mode 100644 arch/m68k/coldfire/entry.S
|
|
create mode 100644 arch/m68k/coldfire/head.S
|
|
create mode 100644 arch/m68k/coldfire/ints.c
|
|
create mode 100644 arch/m68k/coldfire/iomap.c
|
|
create mode 100644 arch/m68k/coldfire/mcf5445x-pci.c
|
|
create mode 100644 arch/m68k/coldfire/muldi3.S
|
|
create mode 100644 arch/m68k/coldfire/pci.c
|
|
create mode 100644 arch/m68k/coldfire/signal.c
|
|
create mode 100644 arch/m68k/coldfire/traps.c
|
|
create mode 100644 arch/m68k/coldfire/vmlinux-cf.lds
|
|
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/Makefile
|
|
@@ -0,0 +1,11 @@
|
|
+#
|
|
+# Makefile for Linux arch/m68k/coldfire source directory
|
|
+#
|
|
+
|
|
+obj-y:= entry.o config.o cache.o signal.o muldi3.o traps.o ints.o
|
|
+
|
|
+ifneq ($(strip $(CONFIG_USB) $(CONFIG_USB_GADGET_MCF5445X)),)
|
|
+ obj-y += usb.o usb/
|
|
+endif
|
|
+
|
|
+obj-$(CONFIG_PCI) += pci.o mcf5445x-pci.o iomap.o
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/cache.c
|
|
@@ -0,0 +1,215 @@
|
|
+/*
|
|
+ * linux/arch/m68k/coldifre/cache.c
|
|
+ *
|
|
+ * Matt Waddel Matt.Waddel@freescale.com
|
|
+ * Copyright Freescale Semiconductor, Inc. 2007
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ */
|
|
+
|
|
+#include <linux/interrupt.h>
|
|
+#include <asm/cfcache.h>
|
|
+#include <asm/coldfire.h>
|
|
+#include <asm/system.h>
|
|
+
|
|
+#define _DCACHE_SIZE (2*16384)
|
|
+#define _ICACHE_SIZE (2*16384)
|
|
+
|
|
+#define _SET_SHIFT 4
|
|
+
|
|
+/*
|
|
+ * Masks for cache sizes. Programming note: because the set size is a
|
|
+ * power of two, the mask is also the last address in the set.
|
|
+ */
|
|
+
|
|
+#define _DCACHE_SET_MASK ((_DCACHE_SIZE/64-1)<<_SET_SHIFT)
|
|
+#define _ICACHE_SET_MASK ((_ICACHE_SIZE/64-1)<<_SET_SHIFT)
|
|
+#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
|
|
+#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
|
|
+
|
|
+/************************************************************
|
|
+ * Routine to cleanly flush the cache, pushing all lines and
|
|
+ * invalidating them.
|
|
+ *
|
|
+ * The is the flash-resident version, used after copying the .text
|
|
+ * segment from flash to ram.
|
|
+ *************************************************************/
|
|
+void FLASHDcacheFlushInvalidate(void)
|
|
+ __attribute__ ((section (".text_loader")));
|
|
+
|
|
+void FLASHDcacheFlushInvalidate()
|
|
+{
|
|
+ unsigned long set;
|
|
+ unsigned long start_set;
|
|
+ unsigned long end_set;
|
|
+
|
|
+ start_set = 0;
|
|
+ end_set = (unsigned long)LAST_DCACHE_ADDR;
|
|
+
|
|
+ for (set = start_set; set < end_set; set += (0x10 - 3))
|
|
+ asm volatile("cpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)" : : "a" (set));
|
|
+}
|
|
+
|
|
+/************************************************************
|
|
+ * Routine to cleanly flush the cache, pushing all lines and
|
|
+ * invalidating them.
|
|
+ *
|
|
+ *************************************************************/
|
|
+void DcacheFlushInvalidate()
|
|
+{
|
|
+ unsigned long set;
|
|
+ unsigned long start_set;
|
|
+ unsigned long end_set;
|
|
+
|
|
+ start_set = 0;
|
|
+ end_set = (unsigned long)LAST_DCACHE_ADDR;
|
|
+
|
|
+ for (set = start_set; set < end_set; set += (0x10 - 3))
|
|
+ asm volatile("cpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)" : : "a" (set));
|
|
+}
|
|
+
|
|
+
|
|
+
|
|
+/******************************************************************************
|
|
+ * Routine to cleanly flush the a block of cache, pushing all relevant lines
|
|
+ * and invalidating them.
|
|
+ *
|
|
+ ******************************************************************************/
|
|
+void DcacheFlushInvalidateCacheBlock(void *start, unsigned long size)
|
|
+{
|
|
+ unsigned long set;
|
|
+ unsigned long start_set;
|
|
+ unsigned long end_set;
|
|
+
|
|
+ /* if size is bigger than the cache can store
|
|
+ * set the size to the maximum amount
|
|
+ */
|
|
+
|
|
+ if (size > LAST_DCACHE_ADDR)
|
|
+ size = LAST_DCACHE_ADDR;
|
|
+
|
|
+ start_set = ((unsigned long)start) & _DCACHE_SET_MASK;
|
|
+ end_set = ((unsigned long)(start+size-1)) & _DCACHE_SET_MASK;
|
|
+
|
|
+ if (start_set > end_set) {
|
|
+ /* from the begining to the lowest address */
|
|
+ for (set = 0; set <= end_set; set += (0x10 - 3))
|
|
+ asm volatile("cpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)" : : "a" (set));
|
|
+
|
|
+ /* next loop will finish the cache ie pass the hole */
|
|
+ end_set = LAST_DCACHE_ADDR;
|
|
+ }
|
|
+ for (set = start_set; set <= end_set; set += (0x10 - 3))
|
|
+ asm volatile("cpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%dc,(%0)" : : "a" (set));
|
|
+}
|
|
+
|
|
+
|
|
+void IcacheInvalidateCacheBlock(void *start, unsigned long size)
|
|
+{
|
|
+ unsigned long set;
|
|
+ unsigned long start_set;
|
|
+ unsigned long end_set;
|
|
+
|
|
+ /* if size is bigger than the cache can store
|
|
+ * set the size to the maximum ammount
|
|
+ */
|
|
+
|
|
+ if (size > LAST_ICACHE_ADDR)
|
|
+ size = LAST_ICACHE_ADDR;
|
|
+
|
|
+ start_set = ((unsigned long)start) & _ICACHE_SET_MASK;
|
|
+ end_set = ((unsigned long)(start+size-1)) & _ICACHE_SET_MASK;
|
|
+
|
|
+ if (start_set > end_set) {
|
|
+ /* from the begining to the lowest address */
|
|
+ for (set = 0; set <= end_set; set += (0x10 - 3))
|
|
+ asm volatile("cpushl %%ic,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%ic,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%ic,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%ic,(%0)" : : "a" (set));
|
|
+
|
|
+ /* next loop will finish the cache ie pass the hole */
|
|
+ end_set = LAST_ICACHE_ADDR;
|
|
+ }
|
|
+ for (set = start_set; set <= end_set; set += (0x10 - 3))
|
|
+ asm volatile("cpushl %%ic,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%ic,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%ic,(%0)\n"
|
|
+ "\taddq%.l #1,%0\n"
|
|
+ "\tcpushl %%ic,(%0)" : : "a" (set));
|
|
+}
|
|
+
|
|
+
|
|
+/********************************************************************
|
|
+ * Disable the data cache completely
|
|
+ ********************************************************************/
|
|
+void DcacheDisable(void)
|
|
+{
|
|
+ int newValue;
|
|
+ unsigned long flags;
|
|
+
|
|
+ local_save_flags(flags);
|
|
+ local_irq_disable();
|
|
+
|
|
+ DcacheFlushInvalidate(); /* begin by flushing the cache */
|
|
+ newValue = CACHE_DISABLE_MODE; /* disable it */
|
|
+ cacr_set(newValue);
|
|
+ local_irq_restore(flags);
|
|
+}
|
|
+
|
|
+/********************************************************************
|
|
+ * Unconditionally enable the data cache
|
|
+ ********************************************************************/
|
|
+void DcacheEnable(void)
|
|
+{
|
|
+ cacr_set(CACHE_INITIAL_MODE);
|
|
+}
|
|
+
|
|
+
|
|
+unsigned long shadow_cacr;
|
|
+
|
|
+void cacr_set(unsigned long x)
|
|
+{
|
|
+ shadow_cacr = x;
|
|
+
|
|
+ __asm__ __volatile__ ("movec %0, %%cacr"
|
|
+ : /* no outputs */
|
|
+ : "r" (shadow_cacr));
|
|
+}
|
|
+
|
|
+unsigned long cacr_get(void)
|
|
+{
|
|
+ return shadow_cacr;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/config.c
|
|
@@ -0,0 +1,420 @@
|
|
+/*
|
|
+ * linux/arch/m68k/coldifre/config.c
|
|
+ *
|
|
+ * Matt Waddel Matt.Waddel@freescale.com
|
|
+ * Copyright Freescale Semiconductor, Inc. 2007
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/console.h>
|
|
+#include <linux/bootmem.h>
|
|
+#include <linux/mm.h>
|
|
+#include <asm/bootinfo.h>
|
|
+#include <asm/machdep.h>
|
|
+#include <asm/coldfire.h>
|
|
+#include <asm/cfcache.h>
|
|
+#include <asm/bootinfo.h>
|
|
+#include <asm/io.h>
|
|
+#include <asm/cfmmu.h>
|
|
+#include <asm/setup.h>
|
|
+#include <asm/irq.h>
|
|
+#include <asm/traps.h>
|
|
+#include <asm/movs.h>
|
|
+#include <asm/movs.h>
|
|
+#include <asm/page.h>
|
|
+#include <asm/pgalloc.h>
|
|
+#include <asm/mcf5445x_intc.h>
|
|
+#include <asm/mcf5445x_sdramc.h>
|
|
+#include <asm/mcf5445x_fbcs.h>
|
|
+#include <asm/mcf5445x_dtim.h>
|
|
+
|
|
+/* JKM -- testing */
|
|
+#include <linux/pfn.h>
|
|
+/* JKM */
|
|
+
|
|
+extern int get_irq_list(struct seq_file *p, void *v);
|
|
+extern char _text, _end;
|
|
+extern char _etext, _edata, __init_begin, __init_end;
|
|
+extern struct console mcfrs_console;
|
|
+extern char m68k_command_line[CL_SIZE];
|
|
+extern unsigned long availmem;
|
|
+
|
|
+static int irq_enable[NR_IRQS];
|
|
+unsigned long num_pages;
|
|
+
|
|
+void coldfire_sort_memrec(void)
|
|
+{
|
|
+ int i, j;
|
|
+
|
|
+ /* Sort the m68k_memory records by address */
|
|
+ for (i = 0; i < m68k_num_memory; ++i) {
|
|
+ for (j = i + 1; j < m68k_num_memory; ++j) {
|
|
+ if (m68k_memory[i].addr > m68k_memory[j].addr) {
|
|
+ struct mem_info tmp;
|
|
+ tmp = m68k_memory[i];
|
|
+ m68k_memory[i] = m68k_memory[j];
|
|
+ m68k_memory[j] = tmp;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ /* Trim off discontiguous bits */
|
|
+ for (i = 1; i < m68k_num_memory; ++i) {
|
|
+ if ((m68k_memory[i-1].addr + m68k_memory[i-1].size) !=
|
|
+ m68k_memory[i].addr) {
|
|
+ printk(KERN_DEBUG "m68k_parse_bootinfo: addr gap between \
|
|
+ 0x%lx & 0x%lx\n",
|
|
+ m68k_memory[i-1].addr+m68k_memory[i-1].size,
|
|
+ m68k_memory[i].addr);
|
|
+ m68k_num_memory = i;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+int __init uboot_commandline(char *bootargs)
|
|
+{
|
|
+ int len = 0, cmd_line_len;
|
|
+ static struct uboot_record uboot_info;
|
|
+
|
|
+ extern unsigned long uboot_info_stk;
|
|
+
|
|
+ /* Add 0x80000000 to get post-remapped kernel memory location */
|
|
+ uboot_info.bd_info = (*(u32 *)(uboot_info_stk)) + 0x80000000;
|
|
+ uboot_info.initrd_start = (*(u32 *)(uboot_info_stk+4)) + 0x80000000;
|
|
+ uboot_info.initrd_end = (*(u32 *)(uboot_info_stk+8)) + 0x80000000;
|
|
+ uboot_info.cmd_line_start = (*(u32 *)(uboot_info_stk+12)) + 0x80000000;
|
|
+ uboot_info.cmd_line_stop = (*(u32 *)(uboot_info_stk+16)) + 0x80000000;
|
|
+
|
|
+ cmd_line_len = uboot_info.cmd_line_stop - uboot_info.cmd_line_start;
|
|
+ if ((cmd_line_len > 0) && (cmd_line_len < CL_SIZE-1))
|
|
+ len = (int)strncpy(bootargs, (char *)uboot_info.cmd_line_start,\
|
|
+ cmd_line_len);
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This routine does things not done in the bootloader.
|
|
+ */
|
|
+#define DEFAULT_COMMAND_LINE "root=/dev/mtdblock1 rw rootfstype=jffs2 ip=none mtdparts=physmap-flash.0:5M(kernel)ro,-(jffs2)"
|
|
+asmlinkage void __init cf_early_init(void)
|
|
+{
|
|
+ struct bi_record *record = (struct bi_record *) &_end;
|
|
+
|
|
+ extern char _end;
|
|
+
|
|
+ SET_VBR((void *)MCF_RAMBAR1);
|
|
+
|
|
+ /* Mask all interrupts */
|
|
+ MCF_INTC0_IMRL = 0xFFFFFFFF;
|
|
+ MCF_INTC0_IMRH = 0xFFFFFFFF;
|
|
+ MCF_INTC1_IMRL = 0xFFFFFFFF;
|
|
+ MCF_INTC1_IMRH = 0xFFFFFFFF;
|
|
+
|
|
+#if defined(CONFIG_NOR_FLASH_BASE)
|
|
+ MCF_FBCS_CSAR(1) = CONFIG_NOR_FLASH_BASE;
|
|
+#else
|
|
+ MCF_FBCS_CSAR(1) = 0x00000000;
|
|
+#endif
|
|
+
|
|
+#if CONFIG_SDRAM_SIZE > (256*1024*1024)
|
|
+ /* Init optional SDRAM chip select */
|
|
+ MCF_SDRAMC_SDCS(1) = (256*1024*1024) | 0x1B;
|
|
+#endif
|
|
+
|
|
+ m68k_machtype = MACH_CFMMU;
|
|
+ m68k_fputype = FPU_CFV4E;
|
|
+ m68k_mmutype = MMU_CFV4E;
|
|
+ m68k_cputype = CPU_CFV4E;
|
|
+
|
|
+ m68k_num_memory = 0;
|
|
+ m68k_memory[m68k_num_memory].addr = CONFIG_SDRAM_BASE;
|
|
+ m68k_memory[m68k_num_memory++].size = CONFIG_SDRAM_SIZE;
|
|
+
|
|
+ if (!uboot_commandline(m68k_command_line)) {
|
|
+#if defined(CONFIG_BOOTPARAM)
|
|
+ strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE-1);
|
|
+#else
|
|
+ strcpy(m68k_command_line, DEFAULT_COMMAND_LINE);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+
|
|
+#if defined(CONFIG_BLK_DEV_INITRD)
|
|
+ /* add initrd image */
|
|
+ record = (struct bi_record *) ((void *)record + record->size);
|
|
+ record->tag = BI_RAMDISK;
|
|
+ record->size = sizeof(record->tag) + sizeof(record->size)
|
|
+ + sizeof(record->data[0]) + sizeof(record->data[1]);
|
|
+#endif
|
|
+
|
|
+ /* Mark end of tags. */
|
|
+ record = (struct bi_record *) ((void *) record + record->size);
|
|
+ record->tag = 0;
|
|
+ record->data[0] = 0;
|
|
+ record->data[1] = 0;
|
|
+ record->size = sizeof(record->tag) + sizeof(record->size)
|
|
+ + sizeof(record->data[0]) + sizeof(record->data[1]);
|
|
+
|
|
+ /* Invalidate caches via CACR */
|
|
+ cacr_set(CACHE_DISABLE_MODE);
|
|
+
|
|
+ /* Turn on caches via CACR, enable EUSP */
|
|
+ cacr_set(CACHE_INITIAL_MODE);
|
|
+}
|
|
+
|
|
+void settimericr(unsigned int timer, unsigned int level)
|
|
+{
|
|
+ volatile unsigned char *icrp;
|
|
+ unsigned int icr;
|
|
+ unsigned char irq;
|
|
+
|
|
+ if (timer <= 2) {
|
|
+ switch (timer) {
|
|
+ case 2: irq = 33; icr = MCFSIM_ICR_TIMER2; break;
|
|
+ default: irq = 32; icr = MCFSIM_ICR_TIMER1; break;
|
|
+ }
|
|
+
|
|
+ icrp = (volatile unsigned char *) (icr);
|
|
+ *icrp = level;
|
|
+ coldfire_enable_irq0(irq);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Assembler routines */
|
|
+asmlinkage void buserr(void);
|
|
+asmlinkage void trap(void);
|
|
+asmlinkage void system_call(void);
|
|
+asmlinkage void inthandler(void);
|
|
+
|
|
+void __init coldfire_trap_init(void)
|
|
+{
|
|
+ int i = 0;
|
|
+ e_vector *vectors;
|
|
+
|
|
+ vectors = (e_vector *)MCF_RAMBAR1;
|
|
+ /*
|
|
+ * There is a common trap handler and common interrupt
|
|
+ * handler that handle almost every vector. We treat
|
|
+ * the system call and bus error special, they get their
|
|
+ * own first level handlers.
|
|
+ */
|
|
+ for (i = 3; (i <= 23); i++)
|
|
+ vectors[i] = trap;
|
|
+ for (i = 33; (i <= 63); i++)
|
|
+ vectors[i] = trap;
|
|
+ for (i = 24; (i <= 31); i++)
|
|
+ vectors[i] = inthandler;
|
|
+ for (i = 64; (i < 255); i++)
|
|
+ vectors[i] = inthandler;
|
|
+
|
|
+ vectors[255] = 0;
|
|
+ vectors[2] = buserr;
|
|
+ vectors[32] = system_call;
|
|
+}
|
|
+
|
|
+void coldfire_tick(void)
|
|
+{
|
|
+ /* Reset the ColdFire timer */
|
|
+ __raw_writeb(MCF_DTIM_DTER_CAP | MCF_DTIM_DTER_REF, MCF_DTIM0_DTER);
|
|
+}
|
|
+
|
|
+void __init coldfire_sched_init(irq_handler_t handler)
|
|
+{
|
|
+ unsigned int mcf_timerlevel = 5;
|
|
+ unsigned int mcf_timervector = 64+32;
|
|
+
|
|
+ __raw_writew(MCF_DTIM_DTMR_RST_RST, MCF_DTIM0_DTMR);
|
|
+ __raw_writel(((MCF_BUSCLK / 16) / HZ), MCF_DTIM0_DTRR);
|
|
+ __raw_writew(MCF_DTIM_DTMR_ORRI | MCF_DTIM_DTMR_CLK_DIV16 |
|
|
+ MCF_DTIM_DTMR_FRR | MCF_DTIM_DTMR_RST_EN, \
|
|
+ MCF_DTIM0_DTMR);
|
|
+
|
|
+ request_irq(mcf_timervector, handler, SA_INTERRUPT, \
|
|
+ "timer", (void *)MCF_DTIM0_DTMR);
|
|
+
|
|
+ settimericr(1, mcf_timerlevel);
|
|
+}
|
|
+
|
|
+int timerirqpending(int timer)
|
|
+{
|
|
+ unsigned int imr = 0;
|
|
+
|
|
+ switch (timer) {
|
|
+ case 1: imr = 0x1; break;
|
|
+ case 2: imr = 0x2; break;
|
|
+ default: break;
|
|
+ }
|
|
+
|
|
+ return (getiprh() & imr);
|
|
+}
|
|
+
|
|
+unsigned long coldfire_gettimeoffset(void)
|
|
+{
|
|
+ volatile unsigned long trr, tcn, offset;
|
|
+
|
|
+ tcn = __raw_readw(MCF_DTIM0_DTCN);
|
|
+ trr = __raw_readl(MCF_DTIM0_DTRR);
|
|
+ offset = (tcn * (1000000 / HZ)) / trr;
|
|
+
|
|
+ /* Check if we just wrapped the counters and maybe missed a tick */
|
|
+ if ((offset < (1000000 / HZ / 2)) && timerirqpending(1))
|
|
+ offset += 1000000 / HZ;
|
|
+ return offset;
|
|
+}
|
|
+
|
|
+void coldfire_reboot(void)
|
|
+{
|
|
+ /* disable interrupts and do a software reset */
|
|
+ asm("movew #0x2700, %%sr\n\t"
|
|
+ "moveb #0x80, %%d0\n\t"
|
|
+ "moveb %%d0, 0xfc0a0000\n\t"
|
|
+ : : : "%d0");
|
|
+}
|
|
+
|
|
+/* int coldfire_hwclk(int i, struct rtc_time *t)
|
|
+{
|
|
+ printk ("Real time clock needs porting.\n");
|
|
+ return 0;
|
|
+}*/
|
|
+
|
|
+static void coldfire_get_model(char *model)
|
|
+{
|
|
+ sprintf(model, "Version 4 ColdFire");
|
|
+}
|
|
+
|
|
+void coldfire_enable_irq(unsigned int vec)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ vec -= 64;
|
|
+
|
|
+ if (((int)vec < 0) || (vec > 63)) {
|
|
+ printk(KERN_WARNING "enable_irq %d failed\n", vec);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ irq_enable[vec]++;
|
|
+ if (vec < 32)
|
|
+ MCF_INTC0_IMRL &= ~(1 << vec);
|
|
+ else
|
|
+ MCF_INTC0_IMRH &= ~(1 << (vec - 32));
|
|
+ local_irq_restore(flags);
|
|
+}
|
|
+
|
|
+void coldfire_disable_irq(unsigned int vec)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ vec -= 64;
|
|
+
|
|
+ if (((int)vec < 0) || (vec > 63)) {
|
|
+ printk(KERN_WARNING "disable_irq %d failed\n", vec);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ if (--irq_enable[vec] == 0) {
|
|
+ if (vec < 32)
|
|
+ MCF_INTC0_IMRL |= (1 << vec);
|
|
+ else
|
|
+ MCF_INTC0_IMRH |= (1 << (vec - 32));
|
|
+
|
|
+ }
|
|
+ local_irq_restore(flags);
|
|
+}
|
|
+
|
|
+static void __init
|
|
+coldfire_bootmem_alloc(unsigned long memory_start, unsigned long memory_end)
|
|
+{
|
|
+ unsigned long base_pfn;
|
|
+
|
|
+ /* compute total pages in system */
|
|
+ num_pages = PAGE_ALIGN(memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
|
|
+
|
|
+ /* align start/end to page boundries */
|
|
+ memory_start = PAGE_ALIGN(memory_start);
|
|
+ memory_end = memory_end & PAGE_MASK;
|
|
+
|
|
+ /* page numbers */
|
|
+ base_pfn = __pa(PAGE_OFFSET) >> PAGE_SHIFT;
|
|
+ min_low_pfn = __pa(memory_start) >> PAGE_SHIFT;
|
|
+ max_low_pfn = __pa(memory_end) >> PAGE_SHIFT;
|
|
+
|
|
+ high_memory = (void *)memory_end;
|
|
+ availmem = memory_start;
|
|
+
|
|
+ /* setup bootmem data */
|
|
+ m68k_setup_node(0);
|
|
+ availmem += init_bootmem_node(NODE_DATA(0), min_low_pfn,
|
|
+ base_pfn, max_low_pfn);
|
|
+ availmem = PAGE_ALIGN(availmem);
|
|
+ free_bootmem(__pa(availmem), memory_end - (availmem));
|
|
+}
|
|
+
|
|
+void __init config_coldfire(void)
|
|
+{
|
|
+ unsigned long endmem, startmem;
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Calculate endmem from m68k_memory, assume all are contiguous
|
|
+ */
|
|
+ startmem = ((((int) &_end) + (PAGE_SIZE - 1)) & PAGE_MASK);
|
|
+ endmem = PAGE_OFFSET;
|
|
+ for (i = 0; i < m68k_num_memory; ++i)
|
|
+ endmem += m68k_memory[i].size;
|
|
+
|
|
+ printk(KERN_INFO "starting up linux startmem 0x%lx, endmem 0x%lx, \
|
|
+ size %luMB\n", startmem, endmem, (endmem - startmem) >> 20);
|
|
+
|
|
+ memset(irq_enable, 0, sizeof(irq_enable));
|
|
+
|
|
+ /*
|
|
+ * Setup coldfire mach-specific handlers
|
|
+ */
|
|
+ mach_max_dma_address = 0xffffffff;
|
|
+ mach_sched_init = coldfire_sched_init;
|
|
+ mach_tick = coldfire_tick;
|
|
+ mach_gettimeoffset = coldfire_gettimeoffset;
|
|
+ mach_reset = coldfire_reboot;
|
|
+/* mach_hwclk = coldfire_hwclk; to be done */
|
|
+ mach_get_model = coldfire_get_model;
|
|
+
|
|
+ coldfire_bootmem_alloc(startmem, endmem);
|
|
+
|
|
+ /*
|
|
+ * initrd setup
|
|
+ */
|
|
+/* #ifdef CONFIG_BLK_DEV_INITRD
|
|
+ if (m68k_ramdisk.size) {
|
|
+ reserve_bootmem (__pa(m68k_ramdisk.addr), m68k_ramdisk.size);
|
|
+ initrd_start = (unsigned long) m68k_ramdisk.addr;
|
|
+ initrd_end = initrd_start + m68k_ramdisk.size;
|
|
+ printk (KERN_DEBUG "initrd: %08lx - %08lx\n", initrd_start,
|
|
+ initrd_end);
|
|
+ }
|
|
+#endif */
|
|
+
|
|
+#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_FRAMEBUFFER_CONSOLE)
|
|
+ conswitchp = &dummy_con;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_SERIAL_COLDFIRE)
|
|
+ /*
|
|
+ * This causes trouble when it is re-registered later.
|
|
+ * Currently this is fixed by conditionally commenting
|
|
+ * out the register_console in mcf_serial.c
|
|
+ */
|
|
+ register_console(&mcfrs_console);
|
|
+#endif
|
|
+}
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/entry.S
|
|
@@ -0,0 +1,701 @@
|
|
+/*
|
|
+ * arch/m68k/coldfire/entry.S
|
|
+ *
|
|
+ * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
|
|
+ * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
|
|
+ * Kenneth Albanowski <kjahds@kjahds.com>,
|
|
+ * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
|
|
+ * Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com)
|
|
+ * Matt Waddel Matt.Waddel@freescale.com
|
|
+ * Kurt Mahan kmahan@freescale.com
|
|
+ * Copyright Freescale Semiconductor, Inc. 2007
|
|
+ *
|
|
+ * Based on:
|
|
+ *
|
|
+ * arch/m68knommu/platform/5307/entry.S &
|
|
+ * arch/m68k/kernel/entry.S
|
|
+ *
|
|
+ * Copyright (C) 1991, 1992 Linus Torvalds
|
|
+ *
|
|
+ * This file is subject to the terms and conditions of the GNU General Public
|
|
+ * License. See the file README.legal in the main directory of this archive
|
|
+ * for more details.
|
|
+ *
|
|
+ * Linux/m68k support by Hamish Macdonald
|
|
+ *
|
|
+ * ColdFire support by Greg Ungerer (gerg@snapgear.com)
|
|
+ * 5307 fixes by David W. Miller
|
|
+ * linux 2.4 support David McCullough <davidm@snapgear.com>
|
|
+ * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
|
|
+ * Ported to mmu Coldfire by Matt Waddel
|
|
+ */
|
|
+
|
|
+#include <linux/sys.h>
|
|
+#include <linux/linkage.h>
|
|
+#include <asm/cf_entry.h>
|
|
+#include <asm/errno.h>
|
|
+#include <asm/setup.h>
|
|
+#include <asm/segment.h>
|
|
+#include <asm/traps.h>
|
|
+#include <asm/unistd.h>
|
|
+
|
|
+/*
|
|
+ * TASK_INFO:
|
|
+ *
|
|
+ * - TINFO_PREEMPT (struct thread_info / preempt_count)
|
|
+ * Used to keep track of preemptability
|
|
+ * - TINFO_FLAGS (struct thread_info / flags - include/asm-m68k/thread_info.h)
|
|
+ * Various bit flags that are checked for scheduling/tracing
|
|
+ * Bits 0-7 are checked every exception exit
|
|
+ * 8-15 are checked every syscall exit
|
|
+ *
|
|
+ * TIF_SIGPENDING 6
|
|
+ * TIF_NEED_RESCHED 7
|
|
+ * TIF_DELAYED_TRACE 14
|
|
+ * TIF_SYSCALL_TRACE 15
|
|
+ * TIF_MEMDIE 16 (never checked here)
|
|
+ */
|
|
+
|
|
+.bss
|
|
+
|
|
+sw_ksp:
|
|
+.long 0
|
|
+
|
|
+sw_usp:
|
|
+.long 0
|
|
+
|
|
+.text
|
|
+
|
|
+.globl system_call
|
|
+.globl buserr
|
|
+.globl trap
|
|
+.globl resume
|
|
+.globl ret_from_exception
|
|
+.globl ret_from_signal
|
|
+.globl sys_call_table
|
|
+.globl ret_from_interrupt
|
|
+.globl inthandler
|
|
+
|
|
+ENTRY(buserr)
|
|
+ SAVE_ALL_INT
|
|
+ GET_CURRENT(%d0)
|
|
+ movel %sp,%sp@- /* stack frame pointer argument */
|
|
+ jsr buserr_c
|
|
+ addql #4,%sp
|
|
+ jra .Lret_from_exception
|
|
+
|
|
+ENTRY(trap)
|
|
+ SAVE_ALL_INT
|
|
+ GET_CURRENT(%d0)
|
|
+ movel %sp,%sp@- /* stack frame pointer argument */
|
|
+ jsr trap_c
|
|
+ addql #4,%sp
|
|
+ jra .Lret_from_exception
|
|
+
|
|
+ /* After a fork we jump here directly from resume,
|
|
+ %d1 contains the previous task schedule_tail */
|
|
+ENTRY(ret_from_fork)
|
|
+ movel %d1,%sp@-
|
|
+ jsr schedule_tail
|
|
+ addql #4,%sp
|
|
+ jra .Lret_from_exception
|
|
+
|
|
+do_trace_entry:
|
|
+ movel #-ENOSYS,%d1 /* needed for strace */
|
|
+ movel %d1,%sp@(PT_D0)
|
|
+ subql #4,%sp
|
|
+ SAVE_SWITCH_STACK
|
|
+ jbsr syscall_trace
|
|
+ RESTORE_SWITCH_STACK
|
|
+ addql #4,%sp
|
|
+ movel %sp@(PT_ORIG_D0),%d0
|
|
+ cmpl #NR_syscalls,%d0
|
|
+ jcs syscall
|
|
+badsys:
|
|
+ movel #-ENOSYS,%d1
|
|
+ movel %d1,%sp@(PT_D0)
|
|
+ jra ret_from_exception
|
|
+
|
|
+do_trace_exit:
|
|
+ subql #4,%sp
|
|
+ SAVE_SWITCH_STACK
|
|
+ jbsr syscall_trace
|
|
+ RESTORE_SWITCH_STACK
|
|
+ addql #4,%sp
|
|
+ jra .Lret_from_exception
|
|
+
|
|
+ENTRY(ret_from_signal)
|
|
+ RESTORE_SWITCH_STACK
|
|
+ addql #4,%sp
|
|
+ jra .Lret_from_exception
|
|
+
|
|
+ENTRY(system_call)
|
|
+ SAVE_ALL_SYS
|
|
+
|
|
+ GET_CURRENT(%d1)
|
|
+ /* save top of frame */
|
|
+ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
|
|
+
|
|
+ /* syscall trace */
|
|
+ tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
|
|
+ jmi do_trace_entry /* SYSCALL_TRACE is set */
|
|
+ cmpl #NR_syscalls,%d0
|
|
+ jcc badsys
|
|
+syscall:
|
|
+ movel #sys_call_table,%a0
|
|
+ asll #2,%d0
|
|
+ addl %d0,%a0
|
|
+ movel %a0@,%a0
|
|
+ jsr %a0@
|
|
+ movel %d0,%sp@(PT_D0) /* save the return value */
|
|
+ret_from_syscall:
|
|
+ movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
|
|
+ jne syscall_exit_work /* flags set so process */
|
|
+1: RESTORE_ALL
|
|
+
|
|
+syscall_exit_work:
|
|
+ btst #5,%sp@(PT_SR) /* check if returning to kernel */
|
|
+ bnes 1b /* if so, skip resched, signals */
|
|
+
|
|
+ btstl #15,%d0 /* check if SYSCALL_TRACE */
|
|
+ jne do_trace_exit
|
|
+ btstl #14,%d0 /* check if DELAYED_TRACE */
|
|
+ jne do_delayed_trace
|
|
+ btstl #6,%d0 /* check if SIGPENDING */
|
|
+ jne do_signal_return
|
|
+ pea resume_userspace
|
|
+ jra schedule
|
|
+
|
|
+ENTRY(ret_from_exception)
|
|
+.Lret_from_exception:
|
|
+ btst #5,%sp@(PT_SR) /* check if returning to kernel */
|
|
+ bnes 1f /* if so, skip resched, signals */
|
|
+ movel %d0,%sp@- /* Only allow interrupts when we are */
|
|
+ move %sr,%d0 /* last one on the kernel stack, */
|
|
+ andl #ALLOWINT,%d0 /* otherwise stack overflow can occur */
|
|
+ move %d0,%sr /* during heavy interrupt load. */
|
|
+ movel %sp@+,%d0
|
|
+
|
|
+resume_userspace:
|
|
+ moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
|
|
+ jne exit_work /* SIGPENDING and/or NEED_RESCHED set */
|
|
+1: RESTORE_ALL
|
|
+
|
|
+exit_work:
|
|
+ /* save top of frame */
|
|
+ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
|
|
+ btstl #6,%d0 /* check for SIGPENDING in flags */
|
|
+ jne do_signal_return
|
|
+ pea resume_userspace
|
|
+ jra schedule
|
|
+
|
|
+do_signal_return:
|
|
+ subql #4,%sp /* dummy return address */
|
|
+ SAVE_SWITCH_STACK
|
|
+ pea %sp@(SWITCH_STACK_SIZE)
|
|
+ clrl %sp@-
|
|
+ bsrl do_signal
|
|
+ addql #8,%sp
|
|
+ RESTORE_SWITCH_STACK
|
|
+ addql #4,%sp
|
|
+ jbra resume_userspace
|
|
+
|
|
+do_delayed_trace:
|
|
+ bclr #7,%sp@(PT_SR) /* clear trace bit in SR */
|
|
+ pea 1 /* send SIGTRAP */
|
|
+ movel %curptr,%sp@-
|
|
+ pea LSIGTRAP
|
|
+ jbsr send_sig
|
|
+ addql #8,%sp
|
|
+ addql #4,%sp
|
|
+ jbra resume_userspace
|
|
+
|
|
+/*
|
|
+ * This is the interrupt handler (for all hardware interrupt
|
|
+ * sources). It figures out the vector number and calls the appropriate
|
|
+ * interrupt service routine directly.
|
|
+ */
|
|
+ENTRY(inthandler)
|
|
+ SAVE_ALL_INT
|
|
+ GET_CURRENT(%d0)
|
|
+ addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
|
|
+ /* put exception # in d0 */
|
|
+ movel %sp@(PT_VECTOR),%d0
|
|
+ swap %d0 /* extract bits 25:18 */
|
|
+ lsrl #2,%d0
|
|
+ andl #0x0ff,%d0
|
|
+
|
|
+ movel %sp,%sp@-
|
|
+ movel %d0,%sp@- /* put vector # on stack */
|
|
+auto_irqhandler_fixup = . + 2
|
|
+ jbsr process_int /* process the IRQ */
|
|
+ addql #8,%sp /* pop parameters off stack */
|
|
+
|
|
+ENTRY(ret_from_interrupt)
|
|
+ret_from_interrupt:
|
|
+
|
|
+ subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
|
|
+ jeq ret_from_last_interrupt
|
|
+2: RESTORE_ALL
|
|
+
|
|
+ ALIGN
|
|
+ret_from_last_interrupt:
|
|
+ moveb %sp@(PT_SR),%d0
|
|
+ andl #(~ALLOWINT>>8)&0xff,%d0
|
|
+ jne 2b
|
|
+
|
|
+ /* check if we need to do software interrupts */
|
|
+ tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
|
|
+ jeq .Lret_from_exception
|
|
+ pea ret_from_exception
|
|
+ jra do_softirq
|
|
+
|
|
+ENTRY(user_inthandler)
|
|
+ SAVE_ALL_INT
|
|
+ GET_CURRENT(%d0)
|
|
+ addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
|
|
+ /* put exception # in d0 */
|
|
+ movel %sp@(PT_VECTOR),%d0
|
|
+user_irqvec_fixup = . + 2
|
|
+ swap %d0 /* extract bits 25:18 */
|
|
+ lsrl #2,%d0
|
|
+ andl #0x0ff,%d0
|
|
+
|
|
+ movel %sp,%sp@-
|
|
+ movel %d0,%sp@- /* put vector # on stack */
|
|
+user_irqhandler_fixup = . + 2
|
|
+ jbsr process_int /* process the IRQ */
|
|
+ addql #8,%sp /* pop parameters off stack */
|
|
+
|
|
+ subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
|
|
+ jeq ret_from_last_interrupt
|
|
+ RESTORE_ALL
|
|
+
|
|
+/* Handler for uninitialized and spurious interrupts */
|
|
+
|
|
+ENTRY(bad_inthandler)
|
|
+ SAVE_ALL_INT
|
|
+ GET_CURRENT(%d0)
|
|
+ addql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
|
|
+
|
|
+ movel %sp,%sp@-
|
|
+ jsr handle_badint
|
|
+ addql #4,%sp
|
|
+
|
|
+ subql #1,%curptr@(TASK_INFO+TINFO_PREEMPT)
|
|
+ jeq ret_from_last_interrupt
|
|
+ RESTORE_ALL
|
|
+
|
|
+ENTRY(sys_fork)
|
|
+ SAVE_SWITCH_STACK
|
|
+ pea %sp@(SWITCH_STACK_SIZE)
|
|
+ jbsr m68k_fork
|
|
+ addql #4,%sp
|
|
+ RESTORE_SWITCH_STACK
|
|
+ rts
|
|
+
|
|
+ENTRY(sys_clone)
|
|
+ SAVE_SWITCH_STACK
|
|
+ pea %sp@(SWITCH_STACK_SIZE)
|
|
+ jbsr m68k_clone
|
|
+ addql #4,%sp
|
|
+ RESTORE_SWITCH_STACK
|
|
+ rts
|
|
+
|
|
+ENTRY(sys_vfork)
|
|
+ SAVE_SWITCH_STACK
|
|
+ pea %sp@(SWITCH_STACK_SIZE)
|
|
+ jbsr m68k_vfork
|
|
+ addql #4,%sp
|
|
+ RESTORE_SWITCH_STACK
|
|
+ rts
|
|
+
|
|
+ENTRY(sys_sigsuspend)
|
|
+ SAVE_SWITCH_STACK
|
|
+ pea %sp@(SWITCH_STACK_SIZE)
|
|
+ jbsr do_sigsuspend
|
|
+ addql #4,%sp
|
|
+ RESTORE_SWITCH_STACK
|
|
+ rts
|
|
+
|
|
+ENTRY(sys_rt_sigsuspend)
|
|
+ SAVE_SWITCH_STACK
|
|
+ pea %sp@(SWITCH_STACK_SIZE)
|
|
+ jbsr do_rt_sigsuspend
|
|
+ addql #4,%sp
|
|
+ RESTORE_SWITCH_STACK
|
|
+ rts
|
|
+
|
|
+ENTRY(sys_sigreturn)
|
|
+ SAVE_SWITCH_STACK
|
|
+ jbsr do_sigreturn
|
|
+ RESTORE_SWITCH_STACK
|
|
+ rts
|
|
+
|
|
+ENTRY(sys_rt_sigreturn)
|
|
+ SAVE_SWITCH_STACK
|
|
+ jbsr do_rt_sigreturn
|
|
+ RESTORE_SWITCH_STACK
|
|
+ rts
|
|
+
|
|
+resume:
|
|
+ /*
|
|
+ * Beware - when entering resume, prev (the current task) is
|
|
+ * in a0, next (the new task) is in a1,so don't change these
|
|
+ * registers until their contents are no longer needed.
|
|
+ */
|
|
+
|
|
+ /* save sr */
|
|
+ movew %sr,%d0
|
|
+ movew %d0,%a0@(TASK_THREAD+THREAD_SR)
|
|
+
|
|
+ /* save usp */
|
|
+ /* Save USP via %a1 (which is saved/restored from %d0) */
|
|
+ movel %a1,%d0
|
|
+ movel %usp,%a1
|
|
+ movel %a1,%a0@(TASK_THREAD+THREAD_USP)
|
|
+ movel %d0,%a1
|
|
+
|
|
+ /* save non-scratch registers on stack */
|
|
+ SAVE_SWITCH_STACK
|
|
+
|
|
+ /* save current kernel stack pointer */
|
|
+ movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
|
|
+
|
|
+ /* Return previous task in %d1 */
|
|
+ movel %curptr,%d1
|
|
+
|
|
+ /* switch to new task (a1 contains new task) */
|
|
+ movel %a1,%curptr
|
|
+
|
|
+ /* restore the kernel stack pointer */
|
|
+ movel %a1@(TASK_THREAD+THREAD_KSP),%sp
|
|
+
|
|
+ /* restore non-scratch registers */
|
|
+ RESTORE_SWITCH_STACK
|
|
+
|
|
+ /* restore user stack pointer */
|
|
+ movel %a1@(TASK_THREAD+THREAD_USP),%a0
|
|
+ movel %a0,%usp
|
|
+
|
|
+ /* restore status register */
|
|
+ movew %a1@(TASK_THREAD+THREAD_SR),%d0
|
|
+ movew %d0,%sr
|
|
+
|
|
+ rts
|
|
+
|
|
+.data
|
|
+ALIGN
|
|
+sys_call_table:
|
|
+ .long sys_ni_syscall /* 0 - old "setup()" system call*/
|
|
+ .long sys_exit
|
|
+ .long sys_fork
|
|
+ .long sys_read
|
|
+ .long sys_write
|
|
+ .long sys_open /* 5 */
|
|
+ .long sys_close
|
|
+ .long sys_waitpid
|
|
+ .long sys_creat
|
|
+ .long sys_link
|
|
+ .long sys_unlink /* 10 */
|
|
+ .long sys_execve
|
|
+ .long sys_chdir
|
|
+ .long sys_time
|
|
+ .long sys_mknod
|
|
+ .long sys_chmod /* 15 */
|
|
+ .long sys_chown16
|
|
+ .long sys_ni_syscall /* old break syscall holder */
|
|
+ .long sys_stat
|
|
+ .long sys_lseek
|
|
+ .long sys_getpid /* 20 */
|
|
+ .long sys_mount
|
|
+ .long sys_oldumount
|
|
+ .long sys_setuid16
|
|
+ .long sys_getuid16
|
|
+ .long sys_stime /* 25 */
|
|
+ .long sys_ptrace
|
|
+ .long sys_alarm
|
|
+ .long sys_fstat
|
|
+ .long sys_pause
|
|
+ .long sys_utime /* 30 */
|
|
+ .long sys_ni_syscall /* old stty syscall holder */
|
|
+ .long sys_ni_syscall /* old gtty syscall holder */
|
|
+ .long sys_access
|
|
+ .long sys_nice
|
|
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
|
|
+ .long sys_sync
|
|
+ .long sys_kill
|
|
+ .long sys_rename
|
|
+ .long sys_mkdir
|
|
+ .long sys_rmdir /* 40 */
|
|
+ .long sys_dup
|
|
+ .long sys_pipe
|
|
+ .long sys_times
|
|
+ .long sys_ni_syscall /* old prof syscall holder */
|
|
+ .long sys_brk /* 45 */
|
|
+ .long sys_setgid16
|
|
+ .long sys_getgid16
|
|
+ .long sys_signal
|
|
+ .long sys_geteuid16
|
|
+ .long sys_getegid16 /* 50 */
|
|
+ .long sys_acct
|
|
+ .long sys_umount /* recycled never used phys() */
|
|
+ .long sys_ni_syscall /* old lock syscall holder */
|
|
+ .long sys_ioctl
|
|
+ .long sys_fcntl /* 55 */
|
|
+ .long sys_ni_syscall /* old mpx syscall holder */
|
|
+ .long sys_setpgid
|
|
+ .long sys_ni_syscall /* old ulimit syscall holder */
|
|
+ .long sys_ni_syscall
|
|
+ .long sys_umask /* 60 */
|
|
+ .long sys_chroot
|
|
+ .long sys_ustat
|
|
+ .long sys_dup2
|
|
+ .long sys_getppid
|
|
+ .long sys_getpgrp /* 65 */
|
|
+ .long sys_setsid
|
|
+ .long sys_sigaction
|
|
+ .long sys_sgetmask
|
|
+ .long sys_ssetmask
|
|
+ .long sys_setreuid16 /* 70 */
|
|
+ .long sys_setregid16
|
|
+ .long sys_sigsuspend
|
|
+ .long sys_sigpending
|
|
+ .long sys_sethostname
|
|
+ .long sys_setrlimit /* 75 */
|
|
+ .long sys_old_getrlimit
|
|
+ .long sys_getrusage
|
|
+ .long sys_gettimeofday
|
|
+ .long sys_settimeofday
|
|
+ .long sys_getgroups16 /* 80 */
|
|
+ .long sys_setgroups16
|
|
+ .long old_select
|
|
+ .long sys_symlink
|
|
+ .long sys_lstat
|
|
+ .long sys_readlink /* 85 */
|
|
+ .long sys_uselib
|
|
+ .long sys_swapon
|
|
+ .long sys_reboot
|
|
+ .long old_readdir
|
|
+ .long old_mmap /* 90 */
|
|
+ .long sys_munmap
|
|
+ .long sys_truncate
|
|
+ .long sys_ftruncate
|
|
+ .long sys_fchmod
|
|
+ .long sys_fchown16 /* 95 */
|
|
+ .long sys_getpriority
|
|
+ .long sys_setpriority
|
|
+ .long sys_ni_syscall /* old profil syscall holder */
|
|
+ .long sys_statfs
|
|
+ .long sys_fstatfs /* 100 */
|
|
+ .long sys_ni_syscall /* ioperm for i386 */
|
|
+ .long sys_socketcall
|
|
+ .long sys_syslog
|
|
+ .long sys_setitimer
|
|
+ .long sys_getitimer /* 105 */
|
|
+ .long sys_newstat
|
|
+ .long sys_newlstat
|
|
+ .long sys_newfstat
|
|
+ .long sys_ni_syscall
|
|
+ .long sys_ni_syscall /* 110 */ /* iopl for i386 */
|
|
+ .long sys_vhangup
|
|
+ .long sys_ni_syscall /* obsolete idle() syscall */
|
|
+ .long sys_ni_syscall /* vm86old for i386 */
|
|
+ .long sys_wait4
|
|
+ .long sys_swapoff /* 115 */
|
|
+ .long sys_sysinfo
|
|
+ .long sys_ipc
|
|
+ .long sys_fsync
|
|
+ .long sys_sigreturn
|
|
+ .long sys_clone /* 120 */
|
|
+ .long sys_setdomainname
|
|
+ .long sys_newuname
|
|
+ .long sys_cacheflush /* modify_ldt for i386 */
|
|
+ .long sys_adjtimex
|
|
+ .long sys_mprotect /* 125 */
|
|
+ .long sys_sigprocmask
|
|
+ .long sys_ni_syscall /* old "create_module" */
|
|
+ .long sys_init_module
|
|
+ .long sys_delete_module
|
|
+ .long sys_ni_syscall /* 130 - old "get_kernel_syms" */
|
|
+ .long sys_quotactl
|
|
+ .long sys_getpgid
|
|
+ .long sys_fchdir
|
|
+ .long sys_bdflush
|
|
+ .long sys_sysfs /* 135 */
|
|
+ .long sys_personality
|
|
+ .long sys_ni_syscall /* for afs_syscall */
|
|
+ .long sys_setfsuid16
|
|
+ .long sys_setfsgid16
|
|
+ .long sys_llseek /* 140 */
|
|
+ .long sys_getdents
|
|
+ .long sys_select
|
|
+ .long sys_flock
|
|
+ .long sys_msync
|
|
+ .long sys_readv /* 145 */
|
|
+ .long sys_writev
|
|
+ .long sys_getsid
|
|
+ .long sys_fdatasync
|
|
+ .long sys_sysctl
|
|
+ .long sys_mlock /* 150 */
|
|
+ .long sys_munlock
|
|
+ .long sys_mlockall
|
|
+ .long sys_munlockall
|
|
+ .long sys_sched_setparam
|
|
+ .long sys_sched_getparam /* 155 */
|
|
+ .long sys_sched_setscheduler
|
|
+ .long sys_sched_getscheduler
|
|
+ .long sys_sched_yield
|
|
+ .long sys_sched_get_priority_max
|
|
+ .long sys_sched_get_priority_min /* 160 */
|
|
+ .long sys_sched_rr_get_interval
|
|
+ .long sys_nanosleep
|
|
+ .long sys_mremap
|
|
+ .long sys_setresuid16
|
|
+ .long sys_getresuid16 /* 165 */
|
|
+ .long sys_getpagesize
|
|
+ .long sys_ni_syscall /* old sys_query_module */
|
|
+ .long sys_poll
|
|
+ .long sys_nfsservctl
|
|
+ .long sys_setresgid16 /* 170 */
|
|
+ .long sys_getresgid16
|
|
+ .long sys_prctl
|
|
+ .long sys_rt_sigreturn
|
|
+ .long sys_rt_sigaction
|
|
+ .long sys_rt_sigprocmask /* 175 */
|
|
+ .long sys_rt_sigpending
|
|
+ .long sys_rt_sigtimedwait
|
|
+ .long sys_rt_sigqueueinfo
|
|
+ .long sys_rt_sigsuspend
|
|
+ .long sys_pread64 /* 180 */
|
|
+ .long sys_pwrite64
|
|
+ .long sys_lchown16;
|
|
+ .long sys_getcwd
|
|
+ .long sys_capget
|
|
+ .long sys_capset /* 185 */
|
|
+ .long sys_sigaltstack
|
|
+ .long sys_sendfile
|
|
+ .long sys_ni_syscall /* streams1 */
|
|
+ .long sys_ni_syscall /* streams2 */
|
|
+ .long sys_vfork /* 190 */
|
|
+ .long sys_getrlimit
|
|
+ .long sys_mmap2
|
|
+ .long sys_truncate64
|
|
+ .long sys_ftruncate64
|
|
+ .long sys_stat64 /* 195 */
|
|
+ .long sys_lstat64
|
|
+ .long sys_fstat64
|
|
+ .long sys_chown
|
|
+ .long sys_getuid
|
|
+ .long sys_getgid /* 200 */
|
|
+ .long sys_geteuid
|
|
+ .long sys_getegid
|
|
+ .long sys_setreuid
|
|
+ .long sys_setregid
|
|
+ .long sys_getgroups /* 205 */
|
|
+ .long sys_setgroups
|
|
+ .long sys_fchown
|
|
+ .long sys_setresuid
|
|
+ .long sys_getresuid
|
|
+ .long sys_setresgid /* 210 */
|
|
+ .long sys_getresgid
|
|
+ .long sys_lchown
|
|
+ .long sys_setuid
|
|
+ .long sys_setgid
|
|
+ .long sys_setfsuid /* 215 */
|
|
+ .long sys_setfsgid
|
|
+ .long sys_pivot_root
|
|
+ .long sys_ni_syscall
|
|
+ .long sys_ni_syscall
|
|
+ .long sys_getdents64 /* 220 */
|
|
+ .long sys_gettid
|
|
+ .long sys_tkill
|
|
+ .long sys_setxattr
|
|
+ .long sys_lsetxattr
|
|
+ .long sys_fsetxattr /* 225 */
|
|
+ .long sys_getxattr
|
|
+ .long sys_lgetxattr
|
|
+ .long sys_fgetxattr
|
|
+ .long sys_listxattr
|
|
+ .long sys_llistxattr /* 230 */
|
|
+ .long sys_flistxattr
|
|
+ .long sys_removexattr
|
|
+ .long sys_lremovexattr
|
|
+ .long sys_fremovexattr
|
|
+ .long sys_futex /* 235 */
|
|
+ .long sys_sendfile64
|
|
+ .long sys_mincore
|
|
+ .long sys_madvise
|
|
+ .long sys_fcntl64
|
|
+ .long sys_readahead /* 240 */
|
|
+ .long sys_io_setup
|
|
+ .long sys_io_destroy
|
|
+ .long sys_io_getevents
|
|
+ .long sys_io_submit
|
|
+ .long sys_io_cancel /* 245 */
|
|
+ .long sys_fadvise64
|
|
+ .long sys_exit_group
|
|
+ .long sys_lookup_dcookie
|
|
+ .long sys_epoll_create
|
|
+ .long sys_epoll_ctl /* 250 */
|
|
+ .long sys_epoll_wait
|
|
+ .long sys_remap_file_pages
|
|
+ .long sys_set_tid_address
|
|
+ .long sys_timer_create
|
|
+ .long sys_timer_settime /* 255 */
|
|
+ .long sys_timer_gettime
|
|
+ .long sys_timer_getoverrun
|
|
+ .long sys_timer_delete
|
|
+ .long sys_clock_settime
|
|
+ .long sys_clock_gettime /* 260 */
|
|
+ .long sys_clock_getres
|
|
+ .long sys_clock_nanosleep
|
|
+ .long sys_statfs64
|
|
+ .long sys_fstatfs64
|
|
+ .long sys_tgkill /* 265 */
|
|
+ .long sys_utimes
|
|
+ .long sys_fadvise64_64
|
|
+ .long sys_mbind
|
|
+ .long sys_get_mempolicy
|
|
+ .long sys_set_mempolicy /* 270 */
|
|
+ .long sys_mq_open
|
|
+ .long sys_mq_unlink
|
|
+ .long sys_mq_timedsend
|
|
+ .long sys_mq_timedreceive
|
|
+ .long sys_mq_notify /* 275 */
|
|
+ .long sys_mq_getsetattr
|
|
+ .long sys_waitid
|
|
+ .long sys_ni_syscall /* for sys_vserver */
|
|
+ .long sys_add_key
|
|
+ .long sys_request_key /* 280 */
|
|
+ .long sys_keyctl
|
|
+ .long sys_ioprio_set
|
|
+ .long sys_ioprio_get
|
|
+ .long sys_inotify_init
|
|
+ .long sys_inotify_add_watch /* 285 */
|
|
+ .long sys_inotify_rm_watch
|
|
+ .long sys_migrate_pages
|
|
+ .long sys_openat
|
|
+ .long sys_mkdirat
|
|
+ .long sys_mknodat /* 290 */
|
|
+ .long sys_fchownat
|
|
+ .long sys_futimesat
|
|
+ .long sys_fstatat64
|
|
+ .long sys_unlinkat
|
|
+ .long sys_renameat /* 295 */
|
|
+ .long sys_linkat
|
|
+ .long sys_symlinkat
|
|
+ .long sys_readlinkat
|
|
+ .long sys_fchmodat
|
|
+ .long sys_faccessat /* 300 */
|
|
+ .long sys_ni_syscall /* Reserved for pselect6 */
|
|
+ .long sys_ni_syscall /* Reserved for ppoll */
|
|
+ .long sys_unshare
|
|
+ .long sys_set_robust_list
|
|
+ .long sys_get_robust_list /* 305 */
|
|
+ .long sys_splice
|
|
+ .long sys_sync_file_range
|
|
+ .long sys_tee
|
|
+ .long sys_vmsplice
|
|
+ .long sys_move_pages /* 310 */
|
|
+
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/head.S
|
|
@@ -0,0 +1,474 @@
|
|
+/*
|
|
+ * head.S is the MMU enabled ColdFire specific initial boot code
|
|
+ *
|
|
+ * Ported to ColdFire by
|
|
+ * Matt Waddel Matt.Waddel@freescale.com
|
|
+ * Kurt Mahan kmahan@freescale.com
|
|
+ * Copyright Freescale Semiconductor, Inc. 2007
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ *
|
|
+ * Parts of this code came from arch/m68k/kernel/head.S
|
|
+ */
|
|
+#include <linux/linkage.h>
|
|
+#include <linux/init.h>
|
|
+#include <asm/bootinfo.h>
|
|
+#include <asm/setup.h>
|
|
+#include <asm/entry.h>
|
|
+#include <asm/pgtable.h>
|
|
+#include <asm/page.h>
|
|
+#include <asm/coldfire.h>
|
|
+#include <asm/mcfuart.h>
|
|
+#include <asm/cfcache.h>
|
|
+
|
|
+#define DEBUG
|
|
+
|
|
+.globl kernel_pg_dir
|
|
+.globl availmem
|
|
+.globl set_context
|
|
+.globl set_fpga
|
|
+
|
|
+#ifdef DEBUG
|
|
+/* When debugging use readable names for labels */
|
|
+#ifdef __STDC__
|
|
+#define L(name) .head.S.##name
|
|
+#else
|
|
+#define L(name) .head.S./**/name
|
|
+#endif
|
|
+#else
|
|
+#ifdef __STDC__
|
|
+#define L(name) .L##name
|
|
+#else
|
|
+#define L(name) .L/**/name
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
|
|
+#ifndef __INITDATA
|
|
+#define __INITDATA .data
|
|
+#define __FINIT .previous
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * Setup ACR mappings to provide the following memory map:
|
|
+ * Data
|
|
+ * 0xA0000000 -> 0xAFFFFFFF [0] NO CACHE / PRECISE / SUPER ONLY
|
|
+ * 0xFC000000 -> 0xFCFFFFFF [1] NO CACHE / PRECISE / SUPER ONLY
|
|
+ * Code
|
|
+ * None currently (mapped via TLBs)
|
|
+ */
|
|
+
|
|
+#define ACR0_DEFAULT #0xA00FA048 /* ACR0 default value */
|
|
+#define ACR1_DEFAULT #0xFC00A040 /* ACR1 default value */
|
|
+#define ACR2_DEFAULT #0x00000000 /* ACR2 default value */
|
|
+#define ACR3_DEFAULT #0x00000000 /* ACR3 default value */
|
|
+
|
|
+/* ACR mapping for FPGA (maps 0) */
|
|
+#define ACR0_FPGA #0x000FA048 /* ACR0 enable FPGA */
|
|
+
|
|
+/* Several macros to make the writing of subroutines easier:
|
|
+ * - func_start marks the beginning of the routine which setups the frame
|
|
+ * register and saves the registers, it also defines another macro
|
|
+ * to automatically restore the registers again.
|
|
+ * - func_return marks the end of the routine and simply calls the prepared
|
|
+ * macro to restore registers and jump back to the caller.
|
|
+ * - func_define generates another macro to automatically put arguments
|
|
+ * onto the stack call the subroutine and cleanup the stack again.
|
|
+ */
|
|
+
|
|
+.macro load_symbol_address symbol,register
|
|
+ movel #\symbol,\register
|
|
+.endm
|
|
+
|
|
+.macro func_start name,saveregs,savesize,stack=0
|
|
+L(\name):
|
|
+ linkw %a6,#-\stack
|
|
+ subal #(\savesize),%sp
|
|
+ moveml \saveregs,%sp@
|
|
+.set stackstart,-\stack
|
|
+
|
|
+.macro func_return_\name
|
|
+ moveml %sp@,\saveregs
|
|
+ addal #(\savesize),%sp
|
|
+ unlk %a6
|
|
+ rts
|
|
+.endm
|
|
+.endm
|
|
+
|
|
+.macro func_return name
|
|
+ func_return_\name
|
|
+.endm
|
|
+
|
|
+.macro func_call name
|
|
+ jbsr L(\name)
|
|
+.endm
|
|
+
|
|
+.macro move_stack nr,arg1,arg2,arg3,arg4
|
|
+.if \nr
|
|
+ move_stack "(\nr-1)",\arg2,\arg3,\arg4
|
|
+ movel \arg1,%sp@-
|
|
+.endif
|
|
+.endm
|
|
+
|
|
+.macro func_define name,nr=0
|
|
+.macro \name arg1,arg2,arg3,arg4
|
|
+ move_stack \nr,\arg1,\arg2,\arg3,\arg4
|
|
+ func_call \name
|
|
+.if \nr
|
|
+ lea %sp@(\nr*4),%sp
|
|
+.endif
|
|
+.endm
|
|
+.endm
|
|
+
|
|
+func_define serial_putc,1
|
|
+
|
|
+.macro putc ch
|
|
+ pea \ch
|
|
+ func_call serial_putc
|
|
+ addql #4,%sp
|
|
+.endm
|
|
+
|
|
+.macro dputc ch
|
|
+#ifdef DEBUG
|
|
+ putc \ch
|
|
+#endif
|
|
+.endm
|
|
+
|
|
+func_define putn,1
|
|
+
|
|
+.macro dputn nr
|
|
+#ifdef DEBUG
|
|
+ putn \nr
|
|
+#endif
|
|
+.endm
|
|
+
|
|
+/*
|
|
+ mmu_map - creates a new TLB entry
|
|
+
|
|
+ virt_addr Must be on proper boundary
|
|
+ phys_addr Must be on proper boundary
|
|
+ itlb MMUOR_ITLB if instruction TLB or 0
|
|
+ asid address space ID
|
|
+ shared_global MMUTR_SG if shared between different ASIDs or 0
|
|
+ size_code MMUDR_SZ1M 1 MB
|
|
+ MMUDR_SZ4K 4 KB
|
|
+ MMUDR_SZ8K 8 KB
|
|
+ MMUDR_SZ16M 16 MB
|
|
+ cache_mode MMUDR_INC instruction non-cacheable
|
|
+ MMUDR_IC instruction cacheable
|
|
+ MMUDR_DWT data writethrough
|
|
+ MMUDR_DCB data copyback
|
|
+ MMUDR_DNCP data non-cacheable, precise
|
|
+ MMUDR_DNCIP data non-cacheable, imprecise
|
|
+ super_prot MMUDR_SP if user mode generates exception or 0
|
|
+ readable MMUDR_R if permits read access (data TLB) or 0
|
|
+ writable MMUDR_W if permits write access (data TLB) or 0
|
|
+ executable MMUDR_X if permits execute access (instruction TLB) or 0
|
|
+ locked MMUDR_LK prevents TLB entry from being replaced or 0
|
|
+ temp_data_reg a data register to use for temporary values
|
|
+*/
|
|
+.macro mmu_map virt_addr,phys_addr,itlb,asid,shared_global,size_code,cache_mode,super_prot,readable,writable,executable,locked,temp_data_reg
|
|
+ /* Set up search of TLB. */
|
|
+ movel #(\virt_addr+1), \temp_data_reg
|
|
+ movel \temp_data_reg, MMUAR
|
|
+ /* Search. */
|
|
+ movel #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
|
|
+ movew \temp_data_reg, (MMUOR)
|
|
+ /* Set up tag value. */
|
|
+ movel #(\virt_addr + \asid + \shared_global + MMUTR_V), \temp_data_reg
|
|
+ movel \temp_data_reg, MMUTR
|
|
+ /* Set up data value. */
|
|
+ movel #(\phys_addr + \size_code + \cache_mode + \super_prot + \readable + \writable + \executable + \locked), \temp_data_reg
|
|
+ movel \temp_data_reg, MMUDR
|
|
+ /* Save it. */
|
|
+ movel #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
|
|
+ movew \temp_data_reg, (MMUOR)
|
|
+.endm /* mmu_map */
|
|
+
|
|
+.macro mmu_unmap virt_addr,itlb,temp_data_reg
|
|
+ /* Set up search of TLB. */
|
|
+ movel #(\virt_addr+1), \temp_data_reg
|
|
+ movel \temp_data_reg, MMUAR
|
|
+ /* Search. */
|
|
+ movel #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
|
|
+ movew \temp_data_reg, (MMUOR)
|
|
+ /* Test for hit. */
|
|
+ movel MMUSR,\temp_data_reg
|
|
+ btst #MMUSR_HITN,\temp_data_reg
|
|
+ beq 1f
|
|
+ /* Read the TLB. */
|
|
+ movel #(MMUOR_RW + MMUOR_ACC +\itlb), \temp_data_reg
|
|
+ movew \temp_data_reg, (MMUOR)
|
|
+ movel MMUSR,\temp_data_reg
|
|
+ /* Set up tag value. */
|
|
+ movel #0, \temp_data_reg
|
|
+ movel \temp_data_reg, MMUTR
|
|
+ /* Set up data value. */
|
|
+ movel #0, \temp_data_reg
|
|
+ movel \temp_data_reg, MMUDR
|
|
+ /* Save it. */
|
|
+ movel #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
|
|
+ movew \temp_data_reg, (MMUOR)
|
|
+1:
|
|
+.endm /* mmu_unmap */
|
|
+
|
|
+/* .text */
|
|
+.section ".text.head","ax"
|
|
+ENTRY(_stext)
|
|
+/* Version numbers of the bootinfo interface -- if we later pass info
|
|
+ * from boot ROM we might want to put something real here.
|
|
+ *
|
|
+ * The area from _stext to _start will later be used as kernel pointer table
|
|
+ */
|
|
+ bras 1f /* Jump over bootinfo version numbers */
|
|
+
|
|
+ .long BOOTINFOV_MAGIC
|
|
+ .long 0
|
|
+1: jmp __start-0x80000000
|
|
+
|
|
+.equ kernel_pg_dir,_stext
|
|
+.equ .,_stext+0x1000
|
|
+
|
|
+ENTRY(_start)
|
|
+ jra __start
|
|
+__INIT
|
|
+ENTRY(__start)
|
|
+
|
|
+/* Save the location of u-boot info - cmd line, bd_info, etc. */
|
|
+ movel %a7,%a4 /* Don't use %a4 before cf_early_init */
|
|
+ addl #0x80000004,%a4 /* 0x80000004= 1 stack push + high mem offset */
|
|
+
|
|
+/* Setup initial stack pointer */
|
|
+ movel #0x40001000,%sp
|
|
+
|
|
+/* Clear usp */
|
|
+ subl %a0,%a0
|
|
+ movel %a0,%usp
|
|
+
|
|
+ movel #(MCF_RAMBAR1 + 0x221), %d0
|
|
+ movec %d0, %rambar1
|
|
+ movew #0x2700,%sr
|
|
+
|
|
+ movel #(MMU_BASE+1),%d0
|
|
+ movecl %d0,%mmubar
|
|
+ movel #MMUOR_CA,%a0 /* Clear tlb entries */
|
|
+ movew %a0,(MMUOR)
|
|
+ movel #(MMUOR_CA + MMUOR_ITLB),%a0 /* Use ITLB for searches */
|
|
+ movew %a0,(MMUOR)
|
|
+ movel #0,%a0 /* Clear Addr Space User ID */
|
|
+ movecl %a0,%asid
|
|
+
|
|
+/* setup ACRs */
|
|
+ movel ACR0_DEFAULT, %d0 /* ACR0 (DATA) setup */
|
|
+ movec %d0, %acr0
|
|
+ movel ACR1_DEFAULT, %d0 /* ACR1 (DATA) setup */
|
|
+ movec %d0, %acr1
|
|
+ movel ACR2_DEFAULT, %d0 /* ACR2 (CODE) setup */
|
|
+ movec %d0, %acr2
|
|
+ movel ACR3_DEFAULT, %d0 /* ACR3 (CODE) setup */
|
|
+ movec %d0, %acr3
|
|
+
|
|
+ /* If you change the memory size to another value make a matching
|
|
+ change in paging_init(cf-mmu.c) to zones_size[]. */
|
|
+
|
|
+ /* Map 256MB as code */
|
|
+ mmu_map (PAGE_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+1*0x1000000), (PHYS_OFFSET+1*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+2*0x1000000), (PHYS_OFFSET+2*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+3*0x1000000), (PHYS_OFFSET+3*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+4*0x1000000), (PHYS_OFFSET+4*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+5*0x1000000), (PHYS_OFFSET+5*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+6*0x1000000), (PHYS_OFFSET+6*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+7*0x1000000), (PHYS_OFFSET+7*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+8*0x1000000), (PHYS_OFFSET+8*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+9*0x1000000), (PHYS_OFFSET+9*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+10*0x1000000), (PHYS_OFFSET+10*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+11*0x1000000), (PHYS_OFFSET+11*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+12*0x1000000), (PHYS_OFFSET+12*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+13*0x1000000), (PHYS_OFFSET+13*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+14*0x1000000), (PHYS_OFFSET+14*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+15*0x1000000), (PHYS_OFFSET+15*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
|
|
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
|
|
+
|
|
+ /* Map 256MB as data also */
|
|
+ mmu_map (PAGE_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+1*0x1000000), (PHYS_OFFSET+1*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+2*0x1000000), (PHYS_OFFSET+2*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+3*0x1000000), (PHYS_OFFSET+3*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+4*0x1000000), (PHYS_OFFSET+4*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+5*0x1000000), (PHYS_OFFSET+5*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+6*0x1000000), (PHYS_OFFSET+6*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+7*0x1000000), (PHYS_OFFSET+7*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+8*0x1000000), (PHYS_OFFSET+8*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+9*0x1000000), (PHYS_OFFSET+9*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+10*0x1000000), (PHYS_OFFSET+10*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+11*0x1000000), (PHYS_OFFSET+11*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+12*0x1000000), (PHYS_OFFSET+12*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+13*0x1000000), (PHYS_OFFSET+13*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+14*0x1000000), (PHYS_OFFSET+14*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+ mmu_map (PAGE_OFFSET+15*0x1000000), (PHYS_OFFSET+15*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, MMUDR_LK, %d0
|
|
+
|
|
+ /* Do unity mapping to enable the MMU. Map first 16 MB in place as
|
|
+ code (delete TLBs after MMU is enabled and we are executing in high
|
|
+ memory). */
|
|
+ mmu_map (PHYS_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), \
|
|
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_INC, MMUDR_SP, 0, \
|
|
+ 0, MMUDR_X, 0, %d0
|
|
+ /* Map first 16 MB as data too. */
|
|
+ mmu_map (PHYS_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), 0, 0, \
|
|
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
|
|
+ 0, 0, %d0
|
|
+
|
|
+ /* Turn on MMU */
|
|
+ movel #(MMUCR_EN),%a0
|
|
+ movel %a0,MMUCR
|
|
+ nop /* This synchs the pipeline after a write to MMUCR */
|
|
+
|
|
+ movel #__running_high,%a0 /* Get around PC-relative addressing. */
|
|
+ jmp %a0@
|
|
+
|
|
+ENTRY(__running_high)
|
|
+ load_symbol_address _stext,%sp
|
|
+ movel L(memory_start),%a0
|
|
+ movel %a0,availmem
|
|
+ load_symbol_address L(phys_kernel_start),%a0
|
|
+ load_symbol_address _stext,%a1
|
|
+ subl #_stext,%a1
|
|
+ addl #PAGE_OFFSET,%a1
|
|
+ movel %a1,%a0@
|
|
+
|
|
+ /* Unmap first 16 MB, code and data. */
|
|
+ mmu_unmap (PHYS_OFFSET+0*0x1000000), MMUOR_ITLB, %d0
|
|
+ mmu_unmap (PHYS_OFFSET+0*0x1000000), 0, %d0
|
|
+
|
|
+/* Setup initial stack pointer */
|
|
+ lea init_task,%a2
|
|
+ lea init_thread_union+THREAD_SIZE,%sp
|
|
+ subl %a6,%a6 /* clear a6 for gdb */
|
|
+
|
|
+#ifdef CONFIG_MCF_USER_HALT
|
|
+/* Setup debug control reg to allow halts from user space */
|
|
+ lea wdbg_uhe,%a0
|
|
+ wdebug (%a0)
|
|
+#endif
|
|
+
|
|
+ movel %a4,uboot_info_stk /* save uboot info to variable */
|
|
+ jsr cf_early_init
|
|
+ jmp start_kernel
|
|
+
|
|
+.section ".text.head","ax"
|
|
+set_context:
|
|
+func_start set_context,%d0,(1*4)
|
|
+ movel 12(%sp),%d0
|
|
+ movec %d0,%asid
|
|
+func_return set_context
|
|
+
|
|
+/*
|
|
+ * set_fpga(addr,val)
|
|
+ *
|
|
+ * Map in 0x00000000 -> 0x0fffffff and then do the write.
|
|
+ */
|
|
+set_fpga:
|
|
+ movew %sr,%d1
|
|
+ movew #0x2700,%sr
|
|
+ movel ACR0_FPGA, %d0
|
|
+ movec %d0, %acr0
|
|
+ nop
|
|
+ moveal 4(%sp),%a0
|
|
+ movel 8(%sp),%a0@
|
|
+ movel ACR0_DEFAULT, %d0
|
|
+ movec %d0, %acr0
|
|
+ nop
|
|
+ movew %d1,%sr
|
|
+ rts
|
|
+
|
|
+ .data
|
|
+ .align 4
|
|
+
|
|
+availmem:
|
|
+ .long 0
|
|
+L(phys_kernel_start):
|
|
+ .long PAGE_OFFSET
|
|
+L(kernel_end):
|
|
+ .long 0
|
|
+L(memory_start):
|
|
+ .long PAGE_OFFSET_RAW
|
|
+
|
|
+#ifdef CONFIG_MCF_USER_HALT
|
|
+/*
|
|
+ * Enable User Halt Enable in the debug control register.
|
|
+ */
|
|
+wdbg_uhe:
|
|
+ .word 0x2c80 /* DR0 */
|
|
+ .word 0x00b0 /* 31:16 */
|
|
+ .word 0x0400 /* 15:0 -- enable UHE */
|
|
+ .word 0x0000 /* unused */
|
|
+#endif
|
|
+
|
|
+
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/ints.c
|
|
@@ -0,0 +1,384 @@
|
|
+/*
|
|
+ * linux/arch/m68k/coldfire/ints.c -- General interrupt handling code
|
|
+ *
|
|
+ * Copyright (C) 1999-2002 Greg Ungerer (gerg@snapgear.com)
|
|
+ * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
|
|
+ * Kenneth Albanowski <kjahds@kjahds.com>,
|
|
+ * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
|
|
+ * Matt Waddel Matt.Waddel@freescale.com
|
|
+ * Copyright Freescale Semiconductor, Inc. 2007
|
|
+ * Kurt Mahan kmahan@freescale.com
|
|
+ *
|
|
+ * Based on:
|
|
+ * linux/arch/m68k/kernel/ints.c &
|
|
+ * linux/arch/m68knommu/5307/ints.c
|
|
+ *
|
|
+ * This file is subject to the terms and conditions of the GNU General Public
|
|
+ * License. See the file COPYING in the main directory of this archive
|
|
+ * for more details.
|
|
+ */
|
|
+
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/sched.h>
|
|
+#include <linux/kernel_stat.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/seq_file.h>
|
|
+#include <linux/interrupt.h>
|
|
+
|
|
+#include <asm/system.h>
|
|
+#include <asm/irq.h>
|
|
+#include <asm/traps.h>
|
|
+#include <asm/page.h>
|
|
+#include <asm/machdep.h>
|
|
+#include <asm/irq_regs.h>
|
|
+
|
|
+#include <asm/mcfsim.h>
|
|
+
|
|
+/*
|
|
+ * IRQ Handler lists.
|
|
+ */
|
|
+static struct irq_node *irq_list[SYS_IRQS];
|
|
+static struct irq_controller *irq_controller[SYS_IRQS];
|
|
+static int irq_depth[SYS_IRQS];
|
|
+
|
|
+/*
|
|
+ * IRQ Controller
|
|
+ */
|
|
+#ifdef CONFIG_M54455
|
|
+void m5445x_irq_enable(unsigned int irq);
|
|
+void m5445x_irq_disable(unsigned int irq);
|
|
+static struct irq_controller m5445x_irq_controller = {
|
|
+ .name = "M5445X",
|
|
+ .lock = SPIN_LOCK_UNLOCKED,
|
|
+ .enable = m5445x_irq_enable,
|
|
+ .disable = m5445x_irq_disable,
|
|
+};
|
|
+#endif
|
|
+
|
|
+#define POOL_SIZE SYS_IRQS
|
|
+static struct irq_node pool[POOL_SIZE];
|
|
+static struct irq_node *get_irq_node(void);
|
|
+
|
|
+/* The number of spurious interrupts */
|
|
+unsigned int num_spurious;
|
|
+asmlinkage void handle_badint(struct pt_regs *regs);
|
|
+
|
|
+/*
|
|
+ * void init_IRQ(void)
|
|
+ *
|
|
+ * This function should be called during kernel startup to initialize
|
|
+ * the IRQ handling routines.
|
|
+ */
|
|
+void __init init_IRQ(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+#ifdef CONFIG_M54455
|
|
+ for (i = 0; i < SYS_IRQS; i++)
|
|
+ irq_controller[i] = &m5445x_irq_controller;
|
|
+#endif
|
|
+}
|
|
+
|
|
+/*
|
|
+ * process_int(unsigned long vec, struct pt_regs *fp)
|
|
+ *
|
|
+ * Process an interrupt. Called from entry.S.
|
|
+ */
|
|
+asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
|
|
+{
|
|
+ struct pt_regs *old_regs;
|
|
+ struct irq_node *node;
|
|
+ old_regs = set_irq_regs(fp);
|
|
+ kstat_cpu(0).irqs[vec]++;
|
|
+
|
|
+ node = irq_list[vec];
|
|
+ if (!node)
|
|
+ handle_badint(fp);
|
|
+ else {
|
|
+ do {
|
|
+ node->handler(vec, node->dev_id);
|
|
+ node = node->next;
|
|
+ } while (node);
|
|
+ }
|
|
+
|
|
+ set_irq_regs(old_regs);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * show_interrupts( struct seq_file *p, void *v)
|
|
+ *
|
|
+ * Called to show all the current interrupt information.
|
|
+ */
|
|
+int show_interrupts(struct seq_file *p, void *v)
|
|
+{
|
|
+ struct irq_controller *contr;
|
|
+ struct irq_node *node;
|
|
+ int i = *(loff_t *) v;
|
|
+
|
|
+ if ((i < NR_IRQS) && (irq_list[i])) {
|
|
+ contr = irq_controller[i];
|
|
+ node = irq_list[i];
|
|
+ seq_printf(p, "%-8s %3u: %10u %s", contr->name, i,
|
|
+ kstat_cpu(0).irqs[i], node->devname);
|
|
+ while ((node = node->next))
|
|
+ seq_printf(p, ", %s", node->devname);
|
|
+
|
|
+ seq_printf(p, "\n");
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * get_irq_node(void)
|
|
+ *
|
|
+ * Get an irq node from the pool.
|
|
+ */
|
|
+struct irq_node *get_irq_node(void)
|
|
+{
|
|
+ struct irq_node *p = pool;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < POOL_SIZE; i++, p++) {
|
|
+ if (!p->handler) {
|
|
+ memset(p, 0, sizeof(struct irq_node));
|
|
+ return p;
|
|
+ }
|
|
+ }
|
|
+ printk(KERN_INFO "%s(%s:%d): No more irq nodes, I suggest you \
|
|
+ increase POOL_SIZE", __FUNCTION__, __FILE__, __LINE__);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+void init_irq_proc(void)
|
|
+{
|
|
+ /* Insert /proc/irq driver here */
|
|
+}
|
|
+
|
|
+int setup_irq(unsigned int irq, struct irq_node *node)
|
|
+{
|
|
+ struct irq_controller *contr;
|
|
+ struct irq_node **prev;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (irq >= NR_IRQS || !irq_controller[irq]) {
|
|
+ printk("%s: Incorrect IRQ %d from %s\n",
|
|
+ __FUNCTION__, irq, node->devname);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+ contr = irq_controller[irq];
|
|
+ spin_lock_irqsave(&contr->lock, flags);
|
|
+
|
|
+ prev = irq_list + irq;
|
|
+ if (*prev) {
|
|
+ /* Can't share interrupts unless both agree to */
|
|
+ if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
|
|
+ spin_unlock_irqrestore(&contr->lock, flags);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+ while (*prev)
|
|
+ prev = &(*prev)->next;
|
|
+ }
|
|
+
|
|
+ if (!irq_list[irq]) {
|
|
+ if (contr->startup)
|
|
+ contr->startup(irq);
|
|
+ else
|
|
+ contr->enable(irq);
|
|
+ }
|
|
+ node->next = NULL;
|
|
+ *prev = node;
|
|
+
|
|
+ spin_unlock_irqrestore(&contr->lock, flags);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int request_irq(unsigned int irq,
|
|
+ irq_handler_t handler,
|
|
+ unsigned long flags, const char *devname, void *dev_id)
|
|
+{
|
|
+ struct irq_node *node = get_irq_node();
|
|
+ int res;
|
|
+
|
|
+ if (!node)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ node->handler = handler;
|
|
+ node->flags = flags;
|
|
+ node->dev_id = dev_id;
|
|
+ node->devname = devname;
|
|
+
|
|
+ res = setup_irq(irq, node);
|
|
+ if (res)
|
|
+ node->handler = NULL;
|
|
+
|
|
+ return res;
|
|
+}
|
|
+EXPORT_SYMBOL(request_irq);
|
|
+
|
|
+void free_irq(unsigned int irq, void *dev_id)
|
|
+{
|
|
+ struct irq_controller *contr;
|
|
+ struct irq_node **p, *node;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (irq >= NR_IRQS || !irq_controller[irq]) {
|
|
+ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ contr = irq_controller[irq];
|
|
+ spin_lock_irqsave(&contr->lock, flags);
|
|
+
|
|
+ p = irq_list + irq;
|
|
+ while ((node = *p)) {
|
|
+ if (node->dev_id == dev_id)
|
|
+ break;
|
|
+ p = &node->next;
|
|
+ }
|
|
+
|
|
+ if (node) {
|
|
+ *p = node->next;
|
|
+ node->handler = NULL;
|
|
+ } else
|
|
+ printk(KERN_DEBUG "%s: Removing probably wrong IRQ %d\n",
|
|
+ __FUNCTION__, irq);
|
|
+
|
|
+ if (!irq_list[irq]) {
|
|
+ if (contr->shutdown)
|
|
+ contr->shutdown(irq);
|
|
+ else
|
|
+ contr->disable(irq);
|
|
+ }
|
|
+
|
|
+ spin_unlock_irqrestore(&contr->lock, flags);
|
|
+}
|
|
+EXPORT_SYMBOL(free_irq);
|
|
+
|
|
+void enable_irq(unsigned int irq)
|
|
+{
|
|
+ struct irq_controller *contr;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (irq >= NR_IRQS || !irq_controller[irq]) {
|
|
+ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ contr = irq_controller[irq];
|
|
+ spin_lock_irqsave(&contr->lock, flags);
|
|
+ if (irq_depth[irq]) {
|
|
+ if (!--irq_depth[irq]) {
|
|
+ if (contr->enable)
|
|
+ contr->enable(irq);
|
|
+ }
|
|
+ } else
|
|
+ WARN_ON(1);
|
|
+ spin_unlock_irqrestore(&contr->lock, flags);
|
|
+}
|
|
+EXPORT_SYMBOL(enable_irq);
|
|
+
|
|
+void disable_irq(unsigned int irq)
|
|
+{
|
|
+ struct irq_controller *contr;
|
|
+ unsigned long flags;
|
|
+
|
|
+ if (irq >= NR_IRQS || !irq_controller[irq]) {
|
|
+ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ contr = irq_controller[irq];
|
|
+ spin_lock_irqsave(&contr->lock, flags);
|
|
+ if (!irq_depth[irq]++) {
|
|
+ if (contr->disable)
|
|
+ contr->disable(irq);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&contr->lock, flags);
|
|
+}
|
|
+EXPORT_SYMBOL(disable_irq);
|
|
+
|
|
+unsigned long probe_irq_on(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(probe_irq_on);
|
|
+
|
|
+int probe_irq_off(unsigned long irqs)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL(probe_irq_off);
|
|
+
|
|
+asmlinkage void handle_badint(struct pt_regs *regs)
|
|
+{
|
|
+ kstat_cpu(0).irqs[0]++;
|
|
+ num_spurious++;
|
|
+ printk(KERN_DEBUG "unexpected interrupt from %u\n", regs->vector);
|
|
+}
|
|
+EXPORT_SYMBOL(handle_badint);
|
|
+
|
|
+#ifdef CONFIG_M54455
|
|
+/*
|
|
+ * M5445X Implementation
|
|
+ */
|
|
+void m5445x_irq_enable(unsigned int irq)
|
|
+{
|
|
+ /* enable the interrupt hardware */
|
|
+ if (irq < 64)
|
|
+ return;
|
|
+
|
|
+ /* adjust past non-hardware ints */
|
|
+ irq -= 64;
|
|
+
|
|
+ /* check for eport */
|
|
+ if ((irq > 0) && (irq < 8)) {
|
|
+ /* enable eport */
|
|
+ MCF_EPORT_EPPAR &= ~(3 << (irq*2)); /* level */
|
|
+ MCF_EPORT_EPDDR &= ~(1 << irq); /* input */
|
|
+ MCF_EPORT_EPIER |= 1 << irq; /* irq enabled */
|
|
+ }
|
|
+
|
|
+ if (irq < 64) {
|
|
+ /* controller 0 */
|
|
+ MCF_INTC0_ICR(irq) = 0x02;
|
|
+ MCF_INTC0_CIMR = irq;
|
|
+ } else {
|
|
+ /* controller 1 */
|
|
+ irq -= 64;
|
|
+ MCF_INTC1_ICR(irq) = 0x02;
|
|
+ MCF_INTC1_CIMR = irq;
|
|
+ }
|
|
+}
|
|
+
|
|
+void m5445x_irq_disable(unsigned int irq)
|
|
+{
|
|
+ /* disable the interrupt hardware */
|
|
+ if (irq < 64)
|
|
+ return;
|
|
+
|
|
+ /* adjust past non-hardware ints */
|
|
+ irq -= 64;
|
|
+
|
|
+ /* check for eport */
|
|
+ if ((irq > 0) && (irq < 8)) {
|
|
+ /* disable eport */
|
|
+ MCF_EPORT_EPIER &= ~(1 << irq);
|
|
+ }
|
|
+
|
|
+ if (irq < 64) {
|
|
+ /* controller 0 */
|
|
+ MCF_INTC0_ICR(irq) = 0x00;
|
|
+ MCF_INTC0_SIMR = irq;
|
|
+ } else {
|
|
+ /* controller 1 */
|
|
+ irq -= 64;
|
|
+ MCF_INTC1_ICR(irq) = 0x00;
|
|
+ MCF_INTC1_SIMR = irq;
|
|
+ }
|
|
+}
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/iomap.c
|
|
@@ -0,0 +1,54 @@
|
|
+/*
|
|
+ * arch/m68k/coldfire/iomap.c
|
|
+ *
|
|
+ * Generic coldfire iomap interface
|
|
+ *
|
|
+ * Based on the sh64 iomap.c by Paul Mundt.
|
|
+ *
|
|
+ * This file is subject to the terms and conditions of the GNU General Public
|
|
+ * License. See the file "COPYING" in the main directory of this archive
|
|
+ * for more details.
|
|
+ */
|
|
+#include <linux/pci.h>
|
|
+#include <asm/io.h>
|
|
+
|
|
+void __iomem *__attribute__ ((weak))
|
|
+ioport_map(unsigned long port, unsigned int len)
|
|
+{
|
|
+ return (void __iomem *)port;
|
|
+}
|
|
+EXPORT_SYMBOL(pci_iomap);
|
|
+
|
|
+void ioport_unmap(void __iomem *addr)
|
|
+{
|
|
+ /* Nothing .. */
|
|
+}
|
|
+EXPORT_SYMBOL(pci_iounmap);
|
|
+
|
|
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
|
|
+{
|
|
+ unsigned long start = pci_resource_start(dev, bar);
|
|
+ unsigned long len = pci_resource_len(dev, bar);
|
|
+ unsigned long flags = pci_resource_flags(dev, bar);
|
|
+printk(KERN_INFO "PCI_IOMAP: BAR=%d START=0x%lx LEN=0x%lx FLAGS=0x%lx\n",
|
|
+ bar, start, len, flags);
|
|
+
|
|
+ if (!len)
|
|
+ return NULL;
|
|
+ if (max && len > max)
|
|
+ len = max;
|
|
+ if (flags & IORESOURCE_IO)
|
|
+ return ioport_map(start, len);
|
|
+ if (flags & IORESOURCE_MEM)
|
|
+ return (void __iomem *)start;
|
|
+
|
|
+ /* What? */
|
|
+ return NULL;
|
|
+}
|
|
+EXPORT_SYMBOL(ioport_map);
|
|
+
|
|
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
|
|
+{
|
|
+ /* Nothing .. */
|
|
+}
|
|
+EXPORT_SYMBOL(ioport_unmap);
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/mcf5445x-pci.c
|
|
@@ -0,0 +1,427 @@
|
|
+/*
|
|
+ * arch/m68k/coldfire/mcf5445x-pci.c
|
|
+ *
|
|
+ * Coldfire M5445x specific PCI implementation.
|
|
+ *
|
|
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
|
|
+ * Kurt Mahan <kmahan@freescale.com>
|
|
+ */
|
|
+
|
|
+#include <linux/delay.h>
|
|
+#include <linux/pci.h>
|
|
+
|
|
+#include <asm/mcfsim.h>
|
|
+#include <asm/pci.h>
|
|
+#include <asm/irq.h>
|
|
+
|
|
+/*
|
|
+ * Layout MCF5445x to PCI memory mappings:
|
|
+ *
|
|
+ * WIN MCF5445x PCI TYPE
|
|
+ * --- -------- --- ----
|
|
+ * [0] 0xA0000000 -> 0xA7FFFFFF 0xA0000000 -> 0xA7FFFFFF MEM
|
|
+ * [1] 0xA8000000 -> 0xABFFFFFF 0xA8000000 -> 0xABFFFFFF MEM
|
|
+ * [2] 0xAC000000 -> 0xAFFFFFFF 0xAC000000 -> 0xAFFFFFFF IO
|
|
+ */
|
|
+
|
|
+#define MCF5445X_PCI_MEM_BASE 0xA0000000
|
|
+#define MCF5445X_PCI_MEM_SIZE 0x0C000000
|
|
+
|
|
+#define MCF5445X_PCI_CONFIG_BASE 0xAC000000
|
|
+#define MCF5445X_PCI_CONFIG_SIZE 0x04000000
|
|
+
|
|
+#define MCF5445X_PCI_IO_BASE 0xAC000000
|
|
+#define MCF5445X_PCI_IO_SIZE 0x04000000
|
|
+
|
|
+/* PCI Bus memory resource block */
|
|
+struct resource pci_iomem_resource = {
|
|
+ .name = "PCI memory space",
|
|
+ .start = MCF5445X_PCI_MEM_BASE,
|
|
+ .flags = IORESOURCE_MEM,
|
|
+ .end = MCF5445X_PCI_MEM_BASE + MCF5445X_PCI_MEM_SIZE - 1
|
|
+};
|
|
+
|
|
+/* PCI Bus ioport resource block */
|
|
+struct resource pci_ioport_resource = {
|
|
+ .name = "PCI I/O space",
|
|
+ .start = MCF5445X_PCI_IO_BASE,
|
|
+ .flags = IORESOURCE_IO,
|
|
+ .end = MCF5445X_PCI_IO_BASE + MCF5445X_PCI_IO_SIZE - 1
|
|
+};
|
|
+
|
|
+/*
|
|
+ * The M54455EVB multiplexes all the PCI interrupts via
|
|
+ * the FPGA and routes them to a single interrupt. The
|
|
+ * PCI spec requires all PCI interrupt routines be smart
|
|
+ * enough to sort out their own interrupts.
|
|
+ * The interrupt source from the FPGA is configured
|
|
+ * to EPORT 3.
|
|
+ */
|
|
+#define MCF5445X_PCI_IRQ 0x43
|
|
+
|
|
+#define PCI_SLOTS 4
|
|
+
|
|
+/*
|
|
+ * FPGA Info
|
|
+ */
|
|
+#define FPGA_PCI_IRQ_ENABLE (u32 *)0x09000000
|
|
+#define FPGA_PCI_IRQ_STATUS (u32 *)0x09000004
|
|
+#define FPGA_PCI_IRQ_ROUTE (u32 *)0x0900000c
|
|
+#define FPGA_SEVEN_LED (u32 *)0x09000014
|
|
+
|
|
+extern void set_fpga(u32 *addr, u32 val);
|
|
+
|
|
+#ifdef DEBUG
|
|
+void mcf5445x_pci_dumpregs(void);
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * static void mcf5445x_conf_device(struct pci_dev *dev)
|
|
+ *
|
|
+ * Machine dependent Configure the given device.
|
|
+ *
|
|
+ * Parameters:
|
|
+ *
|
|
+ * dev - the pci device.
|
|
+ */
|
|
+void __init
|
|
+mcf5445x_conf_device(struct pci_dev *dev)
|
|
+{
|
|
+ set_fpga(FPGA_PCI_IRQ_ENABLE, 0x0f);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * int mcf5445x_pci_config_read(unsigned int seg, unsigned int bus,
|
|
+ * unsigned int devfn, int reg,
|
|
+ * u32 *value)
|
|
+ *
|
|
+ * Read from PCI configuration space.
|
|
+ *
|
|
+ */
|
|
+int mcf5445x_pci_config_read(unsigned int seg, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 *value)
|
|
+{
|
|
+ u32 addr = MCF_PCI_PCICAR_BUSNUM(bus) |
|
|
+ MCF_PCI_PCICAR_DEVNUM(PCI_SLOT(devfn)) |
|
|
+ MCF_PCI_PCICAR_FUNCNUM(PCI_FUNC(devfn)) |
|
|
+ MCF_PCI_PCICAR_DWORD(reg) |
|
|
+ MCF_PCI_PCICAR_E;
|
|
+
|
|
+ if ((bus > 255) || (devfn > 255) || (reg > 255)) {
|
|
+ *value = -1;
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* setup for config mode */
|
|
+ MCF_PCI_PCICAR = addr;
|
|
+ __asm__ __volatile__("nop");
|
|
+
|
|
+ switch (len) {
|
|
+ case 1:
|
|
+ *value = *(volatile u8 *)(MCF5445X_PCI_CONFIG_BASE+(reg&3));
|
|
+ break;
|
|
+ case 2:
|
|
+ *value = le16_to_cpu(*(volatile u16 *)
|
|
+ (MCF5445X_PCI_CONFIG_BASE + (reg&2)));
|
|
+ break;
|
|
+ case 4:
|
|
+ *value = le32_to_cpu(*(volatile u32 *)
|
|
+ (MCF5445X_PCI_CONFIG_BASE));
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* clear config mode */
|
|
+ MCF_PCI_PCICAR = ~MCF_PCI_PCICAR_E;
|
|
+ __asm__ __volatile__("nop");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * int mcf5445x_pci_config_write(unsigned int seg, unsigned int bus,
|
|
+ * unsigned int devfn, int reg,
|
|
+ * u32 *value)
|
|
+ *
|
|
+ * Write to PCI configuration space
|
|
+ */
|
|
+int mcf5445x_pci_config_write(unsigned int seg, unsigned int bus,
|
|
+ unsigned int devfn, int reg, int len, u32 value)
|
|
+{
|
|
+ u32 addr = MCF_PCI_PCICAR_BUSNUM(bus) |
|
|
+ MCF_PCI_PCICAR_DEVNUM(PCI_SLOT(devfn)) |
|
|
+ MCF_PCI_PCICAR_FUNCNUM(PCI_FUNC(devfn)) |
|
|
+ MCF_PCI_PCICAR_DWORD(reg) |
|
|
+ MCF_PCI_PCICAR_E;
|
|
+
|
|
+ if ((bus > 255) || (devfn > 255) || (reg > 255))
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* setup for config mode */
|
|
+ MCF_PCI_PCICAR = addr;
|
|
+ __asm__ __volatile__("nop");
|
|
+
|
|
+ switch (len) {
|
|
+ case 1:
|
|
+ *(volatile u8 *)(MCF5445X_PCI_CONFIG_BASE+(reg&3)) = (u8)value;
|
|
+ break;
|
|
+ case 2:
|
|
+ *(volatile u16 *)(MCF5445X_PCI_CONFIG_BASE+(reg&2)) =
|
|
+ cpu_to_le16((u16)value);
|
|
+ break;
|
|
+ case 4:
|
|
+ *(volatile u32 *)(MCF5445X_PCI_CONFIG_BASE) =
|
|
+ cpu_to_le32(value);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* clear config mode */
|
|
+ MCF_PCI_PCICAR = ~MCF_PCI_PCICAR_E;
|
|
+ __asm__ __volatile__("nop");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* hardware operations */
|
|
+static struct pci_raw_ops mcf5445x_pci_ops = {
|
|
+ .read = mcf5445x_pci_config_read,
|
|
+ .write = mcf5445x_pci_config_write,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * irqreturn_t mcf5445x_pci_interrupt( int irq, void *dev)
|
|
+ *
|
|
+ * PCI controller interrupt handler.
|
|
+ */
|
|
+static irqreturn_t
|
|
+mcf5445x_pci_interrupt(int irq, void *dev)
|
|
+{
|
|
+ u32 status = MCF_PCI_PCIGSCR;
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_INFO "PCI: Controller irq status=0x%08x\n", status);
|
|
+#endif
|
|
+ /* clear */
|
|
+ MCF_PCI_PCIGSCR = status;
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * irqreturn_t mcf5445x_pci_arb_interrupt( int irq, void *dev)
|
|
+ *
|
|
+ * PCI Arbiter interrupt handler.
|
|
+ */
|
|
+static irqreturn_t
|
|
+mcf5445x_pci_arb_interrupt(int irq, void *dev)
|
|
+{
|
|
+ u32 status = MCF_PCIARB_PASR;
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_INFO "PCI: Arbiter irq status=0x%08x\n", status);
|
|
+#endif
|
|
+ /* clear */
|
|
+ MCF_PCIARB_PASR = status;
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * struct pci_bus_info *init_mcf5445x_pci(void)
|
|
+ *
|
|
+ * Machine specific initialisation:
|
|
+ *
|
|
+ * - Allocate and initialise a 'pci_bus_info' structure
|
|
+ * - Initialize hardware
|
|
+ *
|
|
+ * Result: pointer to 'pci_bus_info' structure.
|
|
+ */
|
|
+int __init
|
|
+init_mcf5445x_pci(void)
|
|
+{
|
|
+ /*
|
|
+ * Initialize the PCI core
|
|
+ */
|
|
+
|
|
+ /* arbitration controller */
|
|
+ MCF_PCIARB_PACR = MCF_PCIARB_PACR_INTMPRI |
|
|
+ MCF_PCIARB_PACR_EXTMPRI(0x0f) |
|
|
+ MCF_PCIARB_PACR_INTMINTEN |
|
|
+ MCF_PCIARB_PACR_EXTMINTEN(0x0f);
|
|
+
|
|
+ /* pci pin assignment regs */
|
|
+ MCF_GPIO_PAR_PCI = MCF_GPIO_PAR_PCI_GNT0 |
|
|
+ MCF_GPIO_PAR_PCI_GNT1 |
|
|
+ MCF_GPIO_PAR_PCI_GNT2 |
|
|
+ MCF_GPIO_PAR_PCI_GNT3_GNT3 |
|
|
+ MCF_GPIO_PAR_PCI_REQ0 |
|
|
+ MCF_GPIO_PAR_PCI_REQ1 |
|
|
+ MCF_GPIO_PAR_PCI_REQ2 |
|
|
+ MCF_GPIO_PAR_PCI_REQ3_REQ3;
|
|
+
|
|
+ /* target control reg */
|
|
+ MCF_PCI_PCITCR = MCF_PCI_PCITCR_P |
|
|
+ MCF_PCI_PCITCR_WCT(8);
|
|
+
|
|
+ /* PCI MEM address */
|
|
+ MCF_PCI_PCIIW0BTAR = 0xA007A000;
|
|
+
|
|
+ /* PCI MEM address */
|
|
+ MCF_PCI_PCIIW1BTAR = 0xA803A800;
|
|
+
|
|
+ /* PCI IO address */
|
|
+ MCF_PCI_PCIIW2BTAR = 0xAC03AC00;
|
|
+
|
|
+ /* window control */
|
|
+ MCF_PCI_PCIIWCR = MCF_PCI_PCIIWCR_WINCTRL0_ENABLE |
|
|
+ MCF_PCI_PCIIWCR_WINCTRL0_MEMREAD |
|
|
+ MCF_PCI_PCIIWCR_WINCTRL1_ENABLE |
|
|
+ MCF_PCI_PCIIWCR_WINCTRL1_MEMREAD |
|
|
+ MCF_PCI_PCIIWCR_WINCTRL2_ENABLE |
|
|
+ MCF_PCI_PCIIWCR_WINCTRL2_IO;
|
|
+
|
|
+ /* initiator control reg */
|
|
+ MCF_PCI_PCIICR = 0x00ff;
|
|
+
|
|
+ /* type 0 - command */
|
|
+ MCF_PCI_PCISCR = MCF_PCI_PCISCR_MW | /* mem write/inval */
|
|
+ MCF_PCI_PCISCR_B | /* bus master enable */
|
|
+ MCF_PCI_PCISCR_M; /* mem access enable */
|
|
+
|
|
+ /* type 0 - config reg */
|
|
+ MCF_PCI_PCICR1 = MCF_PCI_PCICR1_CACHELINESIZE(8) |
|
|
+ MCF_PCI_PCICR1_LATTIMER(0xff);
|
|
+
|
|
+ /* type 0 - config 2 reg */
|
|
+ MCF_PCI_PCICR2 = 0;
|
|
+
|
|
+ /* target control reg */
|
|
+ MCF_PCI_PCITCR2 = MCF_PCI_PCITCR2_B0E |
|
|
+ MCF_PCI_PCITCR2_B4E;
|
|
+
|
|
+ /* translate addresses from PCI[0] to CF[SDRAM] */
|
|
+ MCF_PCI_PCITBATR0 = MCF_RAMBAR1 | MCF_PCI_PCITBATR0_EN;
|
|
+ MCF_PCI_PCITBATR4 = MCF_RAMBAR1 | MCF_PCI_PCITBATR4_EN;
|
|
+
|
|
+ /* setup controller interrupt handlers */
|
|
+ if (request_irq(55+128, mcf5445x_pci_interrupt, IRQF_SHARED,
|
|
+ "PCI Controller", NULL))
|
|
+ printk(KERN_ERR "PCI: Unable to register controller irq\n");
|
|
+
|
|
+ if (request_irq (56+128, mcf5445x_pci_arb_interrupt, IRQF_SHARED, "PCI Arbiter", NULL))
|
|
+ printk(KERN_ERR "PCI: Unable to register arbiter irq\n");
|
|
+
|
|
+ /* global control - clear reset bit */
|
|
+ MCF_PCI_PCIGSCR = MCF_PCI_PCIGSCR_SEE |
|
|
+ MCF_PCI_PCIGSCR_PEE;
|
|
+
|
|
+ /* let everything settle */
|
|
+ udelay(1000);
|
|
+
|
|
+ /* allocate bus ioport resource */
|
|
+ if (request_resource(&ioport_resource, &pci_ioport_resource) < 0)
|
|
+ printk(KERN_ERR "PCI: Unable to alloc ioport resource\n");
|
|
+
|
|
+ /* allocate bus iomem resource */
|
|
+ if (request_resource(&iomem_resource, &pci_iomem_resource) < 0)
|
|
+ printk(KERN_ERR "PCI: Unable to alloc iomem resource\n");
|
|
+
|
|
+ /* setup FPGA to route PCI to IRQ3(67), SW7 to IRQ7, SW6 to IRQ4 */
|
|
+ set_fpga(FPGA_PCI_IRQ_ENABLE, 0x00000000);
|
|
+ set_fpga(FPGA_PCI_IRQ_ROUTE, 0x00000039);
|
|
+ set_fpga(FPGA_SEVEN_LED, 0x000000FF);
|
|
+
|
|
+ raw_pci_ops = &mcf5445x_pci_ops;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * DEBUGGING
|
|
+ */
|
|
+
|
|
+#ifdef DEBUG
|
|
+struct regdump {
|
|
+ u32 addr;
|
|
+ char regname[16];
|
|
+};
|
|
+
|
|
+struct regdump type0regs[] = {
|
|
+ { 0xfc0a8000, "PCIIDR" },
|
|
+ { 0xfc0a8004, "PCISCR" },
|
|
+ { 0xfc0a8008, "PCICCRIR" },
|
|
+ { 0xfc0a800c, "PCICR1" },
|
|
+ { 0xfc0a8010, "PCIBAR0" },
|
|
+ { 0xfc0a8014, "PCIBAR1" },
|
|
+ { 0xfc0a8018, "PCIBAR2" },
|
|
+ { 0xfc0a801c, "PCIBAR3" },
|
|
+ { 0xfc0a8020, "PCIBAR4" },
|
|
+ { 0xfc0a8024, "PCIBAR5" },
|
|
+ { 0xfc0a8028, "PCICCPR" },
|
|
+ { 0xfc0a802c, "PCISID" },
|
|
+ { 0xfc0a8030, "PCIERBAR" },
|
|
+ { 0xfc0a8034, "PCICPR" },
|
|
+ { 0xfc0a803c, "PCICR2" },
|
|
+ { 0, "" }
|
|
+};
|
|
+
|
|
+struct regdump genregs[] = {
|
|
+ { 0xfc0a8060, "PCIGSCR" },
|
|
+ { 0xfc0a8064, "PCITBATR0" },
|
|
+ { 0xfc0a8068, "PCITBATR1" },
|
|
+ { 0xfc0a806c, "PCITCR1" },
|
|
+ { 0xfc0a8070, "PCIIW0BTAR" },
|
|
+ { 0xfc0a8074, "PCIIW1BTAR" },
|
|
+ { 0xfc0a8078, "PCIIW2BTAR" },
|
|
+ { 0xfc0a8080, "PCIIWCR" },
|
|
+ { 0xfc0a8084, "PCIICR" },
|
|
+ { 0xfc0a8088, "PCIISR" },
|
|
+ { 0xfc0a808c, "PCITCR2" },
|
|
+ { 0xfc0a8090, "PCITBATR0" },
|
|
+ { 0xfc0a8094, "PCITBATR1" },
|
|
+ { 0xfc0a8098, "PCITBATR2" },
|
|
+ { 0xfc0a809c, "PCITBATR3" },
|
|
+ { 0xfc0a80a0, "PCITBATR4" },
|
|
+ { 0xfc0a80a4, "PCITBATR5" },
|
|
+ { 0xfc0a80a8, "PCIINTR" },
|
|
+ { 0xfc0a80f8, "PCICAR" },
|
|
+ { 0, "" }
|
|
+};
|
|
+
|
|
+struct regdump arbregs[] = {
|
|
+ { 0xfc0ac000, "PACR" },
|
|
+ { 0xfc0ac004, "PASR" }, /* documentation error */
|
|
+ { 0, "" }
|
|
+};
|
|
+
|
|
+/*
|
|
+ * void mcf5445x_pci_dumpregs()
|
|
+ *
|
|
+ * Dump out all the PCI registers
|
|
+ */
|
|
+void
|
|
+mcf5445x_pci_dumpregs(void)
|
|
+{
|
|
+ struct regdump *reg;
|
|
+
|
|
+ printk(KERN_INFO "*** MCF5445x PCI TARGET 0 REGISTERS ***\n");
|
|
+
|
|
+ reg = type0regs;
|
|
+ while (reg->addr) {
|
|
+ printk(KERN_INFO "0x%08x 0x%08x %s\n", reg->addr,
|
|
+ *((u32 *)reg->addr), reg->regname);
|
|
+ reg++;
|
|
+ }
|
|
+
|
|
+ printk(KERN_INFO "\n*** MCF5445x PCI GENERAL REGISTERS ***\n");
|
|
+ reg = genregs;
|
|
+ while (reg->addr) {
|
|
+ printk(KERN_INFO "0x%08x 0x%08x %s\n", reg->addr,
|
|
+ *((u32 *)reg->addr), reg->regname);
|
|
+ reg++;
|
|
+ }
|
|
+ printk(KERN_INFO "\n*** MCF5445x PCI ARBITER REGISTERS ***\n");
|
|
+ reg = arbregs;
|
|
+ while (reg->addr) {
|
|
+ printk(KERN_INFO "0x%08x 0x%08x %s\n", reg->addr,
|
|
+ *((u32 *)reg->addr), reg->regname);
|
|
+ reg++;
|
|
+ }
|
|
+}
|
|
+#endif /* DEBUG */
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/muldi3.S
|
|
@@ -0,0 +1,64 @@
|
|
+/*
|
|
+ * Coldfire muldi3 assembly verion
|
|
+ */
|
|
+
|
|
+#include <linux/linkage.h>
|
|
+.globl __muldi3
|
|
+
|
|
+ENTRY(__muldi3)
|
|
+ linkw %fp,#0
|
|
+ lea %sp@(-32),%sp
|
|
+ moveml %d2-%d7/%a2-%a3,%sp@
|
|
+ moveal %fp@(8), %a2
|
|
+ moveal %fp@(12), %a3
|
|
+ moveal %fp@(16), %a0
|
|
+ moveal %fp@(20),%a1
|
|
+ movel %a3,%d2
|
|
+ andil #65535,%d2
|
|
+ movel %a3,%d3
|
|
+ clrw %d3
|
|
+ swap %d3
|
|
+ movel %a1,%d0
|
|
+ andil #65535,%d0
|
|
+ movel %a1,%d1
|
|
+ clrw %d1
|
|
+ swap %d1
|
|
+ movel %d2,%d7
|
|
+ mulsl %d0,%d7
|
|
+ movel %d2,%d4
|
|
+ mulsl %d1,%d4
|
|
+ movel %d3,%d2
|
|
+ mulsl %d0,%d2
|
|
+ mulsl %d1,%d3
|
|
+ movel %d7,%d0
|
|
+ clrw %d0
|
|
+ swap %d0
|
|
+ addl %d0,%d4
|
|
+ addl %d2,%d4
|
|
+ cmpl %d4,%d2
|
|
+ blss 1f
|
|
+ addil #65536,%d3
|
|
+1:
|
|
+ movel %d4,%d0
|
|
+ clrw %d0
|
|
+ swap %d0
|
|
+ movel %d3,%d5
|
|
+ addl %d0,%d5
|
|
+ movew %d4,%d6
|
|
+ swap %d6
|
|
+ movew %d7,%d6
|
|
+ movel %d5,%d0
|
|
+ movel %d6,%d1
|
|
+ movel %a3,%d2
|
|
+ movel %a0,%d3
|
|
+ mulsl %d3,%d2
|
|
+ movel %a2,%d3
|
|
+ movel %a1,%d4
|
|
+ mulsl %d4,%d3
|
|
+ addl %d3,%d2
|
|
+ movel %d2,%d0
|
|
+ addl %d5,%d0
|
|
+ moveml %sp@, %d2-%d7/%a2-%a3
|
|
+ lea %sp@(32),%sp
|
|
+ unlk %fp
|
|
+ rts
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/pci.c
|
|
@@ -0,0 +1,245 @@
|
|
+/*
|
|
+ * linux/arch/m68k/coldfire/pci.c
|
|
+ *
|
|
+ * PCI initialization for Coldfire architectures.
|
|
+ *
|
|
+ * Currently Supported:
|
|
+ * M5445x
|
|
+ *
|
|
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
|
|
+ * Kurt Mahan <kmahan@freescale.com>
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/pci.h>
|
|
+
|
|
+#include <asm/mcfsim.h>
|
|
+#include <asm/pci.h>
|
|
+
|
|
+/* pci ops for reading/writing config */
|
|
+struct pci_raw_ops *raw_pci_ops;
|
|
+
|
|
+/* pci debug flag */
|
|
+static int debug_pci;
|
|
+
|
|
+#ifdef CONFIG_M54455
|
|
+extern int init_mcf5445x_pci(void);
|
|
+extern void mcf5445x_conf_device(struct pci_dev *dev);
|
|
+extern void mcf5445x_pci_dumpregs(void);
|
|
+
|
|
+extern struct resource pci_ioport_resource;
|
|
+extern struct resource pci_iomem_resource;
|
|
+#endif
|
|
+
|
|
+static int
|
|
+pci_read(struct pci_bus *bus, unsigned int devfn, int where,
|
|
+ int size, u32 *value)
|
|
+{
|
|
+ return raw_pci_ops->read(0, bus->number, devfn, where, size, value);
|
|
+}
|
|
+
|
|
+static int
|
|
+pci_write(struct pci_bus *bus, unsigned int devfn, int where,
|
|
+ int size, u32 value)
|
|
+{
|
|
+ return raw_pci_ops->write(0, bus->number, devfn, where, size, value);
|
|
+}
|
|
+
|
|
+struct pci_ops pci_root_ops = {
|
|
+ .read = pci_read,
|
|
+ .write = pci_write,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * pcibios_setup(char *)
|
|
+ *
|
|
+ * Initialize the pcibios based on cmd line params.
|
|
+ */
|
|
+char * __init
|
|
+pcibios_setup(char *str)
|
|
+{
|
|
+ if (!strcmp(str, "debug")) {
|
|
+ debug_pci = 1;
|
|
+ return NULL;
|
|
+ }
|
|
+ return str;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * We need to avoid collisions with `mirrored' VGA ports
|
|
+ * and other strange ISA hardware, so we always want the
|
|
+ * addresses to be allocated in the 0x000-0x0ff region
|
|
+ * modulo 0x400.
|
|
+ *
|
|
+ * Why? Because some silly external IO cards only decode
|
|
+ * the low 10 bits of the IO address. The 0x00-0xff region
|
|
+ * is reserved for motherboard devices that decode all 16
|
|
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
|
|
+ * but we want to try to avoid allocating at 0x2900-0x2bff
|
|
+ * which might have be mirrored at 0x0100-0x03ff..
|
|
+ */
|
|
+void
|
|
+pcibios_align_resource(void *data, struct resource *res, resource_size_t size,
|
|
+ resource_size_t align)
|
|
+{
|
|
+ struct pci_dev *dev = data;
|
|
+
|
|
+ if (res->flags & IORESOURCE_IO) {
|
|
+ resource_size_t start = res->start;
|
|
+
|
|
+ if (size > 0x100)
|
|
+ printk(KERN_ERR "PCI: I/O Region %s/%d too large"
|
|
+ " (%ld bytes)\n", pci_name(dev),
|
|
+ dev->resource - res, (long int)size);
|
|
+
|
|
+ if (start & 0x300) {
|
|
+ start = (start + 0x3ff) & ~0x3ff;
|
|
+ res->start = start;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Swizzle the device pin each time we cross a bridge
|
|
+ * and return the slot number.
|
|
+ */
|
|
+static u8 __devinit
|
|
+pcibios_swizzle(struct pci_dev *dev, u8 *pin)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Map a slot/pin to an IRQ.
|
|
+ */
|
|
+static int
|
|
+pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
|
|
+{
|
|
+ return 0x43;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pcibios_update_irq(struct pci_dev *dev, int irq)
|
|
+ *
|
|
+ * Update a PCI interrupt.
|
|
+ */
|
|
+void __init
|
|
+pcibios_update_irq(struct pci_dev *dev, int irq)
|
|
+{
|
|
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pcibios_enable_device(struct pci_dev *dev, int mask)
|
|
+ *
|
|
+ * Enable a device on the PCI bus.
|
|
+ */
|
|
+int
|
|
+pcibios_enable_device(struct pci_dev *dev, int mask)
|
|
+{
|
|
+ u16 cmd, old_cmd;
|
|
+ int idx;
|
|
+ struct resource *r;
|
|
+
|
|
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
|
|
+ old_cmd = cmd;
|
|
+ for (idx = 0; idx < 6; idx++) {
|
|
+ r = &dev->resource[idx];
|
|
+ if (!r->start && r->end) {
|
|
+ printk(KERN_ERR "PCI: Device %s not available because "
|
|
+ "of resource collisions\n", pci_name(dev));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (r->flags & IORESOURCE_IO)
|
|
+ cmd |= PCI_COMMAND_IO;
|
|
+ if (r->flags & IORESOURCE_MEM)
|
|
+ cmd |= PCI_COMMAND_MEMORY;
|
|
+ }
|
|
+ if (cmd != old_cmd) {
|
|
+ printk("PCI: Enabling device %s (%04x -> %04x)\n",
|
|
+ pci_name(dev), old_cmd, cmd);
|
|
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
|
|
+#ifdef CONFIG_M54455
|
|
+ mcf5445x_conf_device(dev);
|
|
+#endif
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pcibios_fixup_bus(struct pci_bus *bus)
|
|
+ */
|
|
+void __init
|
|
+pcibios_fixup_bus(struct pci_bus *bus)
|
|
+{
|
|
+ struct pci_dev *dev = bus->self;
|
|
+
|
|
+ if (!dev) {
|
|
+ /* Root bus. */
|
|
+#ifdef CONFIG_M54455
|
|
+ bus->resource[0] = &pci_ioport_resource;
|
|
+ bus->resource[1] = &pci_iomem_resource;
|
|
+#endif
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pcibios_init(void)
|
|
+ *
|
|
+ * Allocate/initialize low level pci bus/devices.
|
|
+ */
|
|
+static int __init
|
|
+pcibios_init(void)
|
|
+{
|
|
+ struct pci_bus *bus;
|
|
+
|
|
+ if (!raw_pci_ops) {
|
|
+ printk(KERN_WARNING "PCIBIOS: FATAL: NO PCI Hardware found\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* allocate and scan the (only) bus */
|
|
+ bus = pci_scan_bus_parented(NULL, 0, &pci_root_ops, NULL);
|
|
+
|
|
+ /* setup everything */
|
|
+ if (bus) {
|
|
+ /* compute the bridge window sizes */
|
|
+ pci_bus_size_bridges(bus);
|
|
+
|
|
+ /* (re)assign device resources */
|
|
+ pci_bus_assign_resources(bus);
|
|
+
|
|
+ /* add the bus to the system */
|
|
+ pci_bus_add_devices(bus);
|
|
+
|
|
+ /* fixup irqs */
|
|
+ pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * pci_init(void)
|
|
+ *
|
|
+ * Initialize the PCI Hardware.
|
|
+ */
|
|
+static int __init
|
|
+pci_init(void)
|
|
+{
|
|
+#if defined(CONFIG_M54455)
|
|
+ init_mcf5445x_pci();
|
|
+#endif
|
|
+ if (!raw_pci_ops)
|
|
+ printk(KERN_ERR "PCI: FATAL: NO PCI Detected\n");
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* low level hardware (first) */
|
|
+arch_initcall(pci_init);
|
|
+
|
|
+/* basic bios init (second) */
|
|
+subsys_initcall(pcibios_init);
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/signal.c
|
|
@@ -0,0 +1,868 @@
|
|
+/*
|
|
+ * linux/arch/m68k/kernel/signal.c
|
|
+ *
|
|
+ * Copyright (C) 1991, 1992 Linus Torvalds
|
|
+ *
|
|
+ * This file is subject to the terms and conditions of the GNU General Public
|
|
+ * License. See the file COPYING in the main directory of this archive
|
|
+ * for more details.
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Derived from m68k/kernel/signal.c and the original authors are credited
|
|
+ * there.
|
|
+ *
|
|
+ * Coldfire support by:
|
|
+ * Matt Waddel Matt.Waddel@freescale.com
|
|
+ * Copyright Freescale Semiconductor, Inc 2007
|
|
+ */
|
|
+
|
|
+#include <linux/sched.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/signal.h>
|
|
+#include <linux/syscalls.h>
|
|
+#include <linux/errno.h>
|
|
+#include <linux/wait.h>
|
|
+#include <linux/ptrace.h>
|
|
+#include <linux/unistd.h>
|
|
+#include <linux/stddef.h>
|
|
+#include <linux/highuid.h>
|
|
+#include <linux/personality.h>
|
|
+#include <linux/tty.h>
|
|
+#include <linux/binfmts.h>
|
|
+
|
|
+#include <asm/setup.h>
|
|
+#include <asm/cf_uaccess.h>
|
|
+#include <asm/cf_pgtable.h>
|
|
+#include <asm/traps.h>
|
|
+#include <asm/ucontext.h>
|
|
+
|
|
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
|
+
|
|
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
|
|
+
|
|
+const int frame_extra_sizes[16] = {
|
|
+ [1] = -1,
|
|
+ [2] = sizeof(((struct frame *)0)->un.fmt2),
|
|
+ [3] = sizeof(((struct frame *)0)->un.fmt3),
|
|
+ [4] = 0,
|
|
+ [5] = -1,
|
|
+ [6] = -1,
|
|
+ [7] = sizeof(((struct frame *)0)->un.fmt7),
|
|
+ [8] = -1,
|
|
+ [9] = sizeof(((struct frame *)0)->un.fmt9),
|
|
+ [10] = sizeof(((struct frame *)0)->un.fmta),
|
|
+ [11] = sizeof(((struct frame *)0)->un.fmtb),
|
|
+ [12] = -1,
|
|
+ [13] = -1,
|
|
+ [14] = -1,
|
|
+ [15] = -1,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Atomically swap in the new signal mask, and wait for a signal.
|
|
+ */
|
|
+asmlinkage int do_sigsuspend(struct pt_regs *regs)
|
|
+{
|
|
+ old_sigset_t mask = regs->d3;
|
|
+ sigset_t saveset;
|
|
+
|
|
+ mask &= _BLOCKABLE;
|
|
+ spin_lock_irq(¤t->sighand->siglock);
|
|
+ saveset = current->blocked;
|
|
+ siginitset(¤t->blocked, mask);
|
|
+ recalc_sigpending();
|
|
+ spin_unlock_irq(¤t->sighand->siglock);
|
|
+
|
|
+ regs->d0 = -EINTR;
|
|
+ while (1) {
|
|
+ current->state = TASK_INTERRUPTIBLE;
|
|
+ schedule();
|
|
+ if (do_signal(&saveset, regs))
|
|
+ return -EINTR;
|
|
+ }
|
|
+}
|
|
+
|
|
+asmlinkage int
|
|
+do_rt_sigsuspend(struct pt_regs *regs)
|
|
+{
|
|
+ sigset_t __user *unewset = (sigset_t __user *)regs->d1;
|
|
+ size_t sigsetsize = (size_t)regs->d2;
|
|
+ sigset_t saveset, newset;
|
|
+
|
|
+ /* XXX: Don't preclude handling different sized sigset_t's. */
|
|
+ if (sigsetsize != sizeof(sigset_t))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
|
|
+ return -EFAULT;
|
|
+ sigdelsetmask(&newset, ~_BLOCKABLE);
|
|
+
|
|
+ spin_lock_irq(¤t->sighand->siglock);
|
|
+ saveset = current->blocked;
|
|
+ current->blocked = newset;
|
|
+ recalc_sigpending();
|
|
+ spin_unlock_irq(¤t->sighand->siglock);
|
|
+
|
|
+ regs->d0 = -EINTR;
|
|
+ while (1) {
|
|
+ current->state = TASK_INTERRUPTIBLE;
|
|
+ schedule();
|
|
+ if (do_signal(&saveset, regs))
|
|
+ return -EINTR;
|
|
+ }
|
|
+}
|
|
+
|
|
+asmlinkage int
|
|
+sys_sigaction(int sig, const struct old_sigaction __user *act,
|
|
+ struct old_sigaction __user *oact)
|
|
+{
|
|
+ struct k_sigaction new_ka, old_ka;
|
|
+ int ret;
|
|
+
|
|
+ if (act) {
|
|
+ old_sigset_t mask;
|
|
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
|
|
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
|
|
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
|
|
+ return -EFAULT;
|
|
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
|
|
+ __get_user(mask, &act->sa_mask);
|
|
+ siginitset(&new_ka.sa.sa_mask, mask);
|
|
+ }
|
|
+
|
|
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
|
+
|
|
+ if (!ret && oact) {
|
|
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
|
|
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
|
|
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
|
|
+ return -EFAULT;
|
|
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
|
|
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+asmlinkage int
|
|
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
|
|
+{
|
|
+ return do_sigaltstack(uss, uoss, rdusp());
|
|
+}
|
|
+
|
|
+
|
|
+/*
|
|
+ * Do a signal return; undo the signal stack.
|
|
+ *
|
|
+ * Keep the return code on the stack quadword aligned!
|
|
+ * That makes the cache flush below easier.
|
|
+ */
|
|
+
|
|
+struct sigframe
|
|
+{
|
|
+ char __user *pretcode;
|
|
+ int sig;
|
|
+ int code;
|
|
+ struct sigcontext __user *psc;
|
|
+ char retcode[16];
|
|
+ unsigned long extramask[_NSIG_WORDS-1];
|
|
+ struct sigcontext sc;
|
|
+};
|
|
+
|
|
+struct rt_sigframe
|
|
+{
|
|
+ char __user *pretcode;
|
|
+ int sig;
|
|
+ struct siginfo __user *pinfo;
|
|
+ void __user *puc;
|
|
+ char retcode[16];
|
|
+ struct siginfo info;
|
|
+ struct ucontext uc;
|
|
+};
|
|
+
|
|
+#define FPCONTEXT_SIZE 216
|
|
+#define uc_fpstate uc_filler[0]
|
|
+#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
|
|
+#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
|
|
+
|
|
+#ifdef CONFIG_FPU
|
|
+static unsigned char fpu_version; /* version num of fpu, set by setup_frame */
|
|
+
|
|
+static inline int restore_fpu_state(struct sigcontext *sc)
|
|
+{
|
|
+ int err = 1;
|
|
+
|
|
+ if (FPU_IS_EMU) {
|
|
+ /* restore registers */
|
|
+ memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
|
|
+ memcpy(current->thread.fp, sc->sc_fpregs, 24);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
|
|
+ /* Verify the frame format. */
|
|
+ if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
|
|
+ goto out;
|
|
+ if (CPU_IS_020_OR_030) {
|
|
+ if (m68k_fputype & FPU_68881 &&
|
|
+ !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
|
|
+ goto out;
|
|
+ if (m68k_fputype & FPU_68882 &&
|
|
+ !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
|
|
+ goto out;
|
|
+ } else if (CPU_IS_040) {
|
|
+ if (!(sc->sc_fpstate[1] == 0x00 ||
|
|
+ sc->sc_fpstate[1] == 0x28 ||
|
|
+ sc->sc_fpstate[1] == 0x60))
|
|
+ goto out;
|
|
+ } else if (CPU_IS_060) {
|
|
+ if (!(sc->sc_fpstate[3] == 0x00 ||
|
|
+ sc->sc_fpstate[3] == 0x60 ||
|
|
+ sc->sc_fpstate[3] == 0xe0))
|
|
+ goto out;
|
|
+ } else
|
|
+ goto out;
|
|
+
|
|
+ }
|
|
+ err = 0;
|
|
+
|
|
+out:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
|
|
+{
|
|
+ unsigned char fpstate[FPCONTEXT_SIZE];
|
|
+ int context_size = CPU_IS_060 ? 8 : 0;
|
|
+ fpregset_t fpregs;
|
|
+ int err = 1;
|
|
+
|
|
+ if (FPU_IS_EMU) {
|
|
+ /* restore fpu control register */
|
|
+ if (__copy_from_user(current->thread.fpcntl,
|
|
+ uc->uc_mcontext.fpregs.f_fpcntl, 12))
|
|
+ goto out;
|
|
+ /* restore all other fpu register */
|
|
+ if (__copy_from_user(current->thread.fp,
|
|
+ uc->uc_mcontext.fpregs.f_fpregs, 96))
|
|
+ goto out;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
|
|
+ goto out;
|
|
+ if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
|
|
+ if (!CPU_IS_060)
|
|
+ context_size = fpstate[1];
|
|
+ /* Verify the frame format. */
|
|
+ if (!CPU_IS_060 && (fpstate[0] != fpu_version))
|
|
+ goto out;
|
|
+ if (CPU_IS_020_OR_030) {
|
|
+ if (m68k_fputype & FPU_68881 &&
|
|
+ !(context_size == 0x18 || context_size == 0xb4))
|
|
+ goto out;
|
|
+ if (m68k_fputype & FPU_68882 &&
|
|
+ !(context_size == 0x38 || context_size == 0xd4))
|
|
+ goto out;
|
|
+ } else if (CPU_IS_040) {
|
|
+ if (!(context_size == 0x00 ||
|
|
+ context_size == 0x28 ||
|
|
+ context_size == 0x60))
|
|
+ goto out;
|
|
+ } else if (CPU_IS_060) {
|
|
+ if (!(fpstate[3] == 0x00 ||
|
|
+ fpstate[3] == 0x60 ||
|
|
+ fpstate[3] == 0xe0))
|
|
+ goto out;
|
|
+ } else
|
|
+ goto out;
|
|
+ if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
|
|
+ sizeof(fpregs)))
|
|
+ goto out;
|
|
+ }
|
|
+ if (context_size &&
|
|
+ __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
|
|
+ context_size))
|
|
+ goto out;
|
|
+ err = 0;
|
|
+
|
|
+out:
|
|
+ return err;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static inline int
|
|
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc,
|
|
+ void __user *fp, int *pd0)
|
|
+{
|
|
+ int fsize, formatvec;
|
|
+ struct sigcontext context;
|
|
+ int err = 0;
|
|
+
|
|
+ /* get previous context */
|
|
+ if (copy_from_user(&context, usc, sizeof(context)))
|
|
+ goto badframe;
|
|
+
|
|
+ /* restore passed registers */
|
|
+ regs->d1 = context.sc_d1;
|
|
+ regs->a0 = context.sc_a0;
|
|
+ regs->a1 = context.sc_a1;
|
|
+ regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
|
|
+ regs->pc = context.sc_pc;
|
|
+ regs->orig_d0 = -1; /* disable syscall checks */
|
|
+ wrusp(context.sc_usp);
|
|
+ formatvec = context.sc_formatvec;
|
|
+ regs->format = formatvec >> 12;
|
|
+ regs->vector = formatvec & 0xfff;
|
|
+
|
|
+#ifdef CONFIG_FPU
|
|
+ err = restore_fpu_state(&context);
|
|
+#endif
|
|
+
|
|
+ fsize = frame_extra_sizes[regs->format];
|
|
+ if (fsize < 0) {
|
|
+ /*
|
|
+ * user process trying to return with weird frame format
|
|
+ */
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_DEBUG "user process returning with weird \
|
|
+ frame format\n");
|
|
+#endif
|
|
+ goto badframe;
|
|
+ }
|
|
+
|
|
+ /* OK. Make room on the supervisor stack for the extra junk,
|
|
+ * if necessary.
|
|
+ */
|
|
+
|
|
+ {
|
|
+ struct switch_stack *sw = (struct switch_stack *)regs - 1;
|
|
+ regs->d0 = context.sc_d0;
|
|
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
|
|
+ __asm__ __volatile__
|
|
+ (" movel %0,%/sp\n\t"
|
|
+ " bra ret_from_signal\n"
|
|
+ "4:\n"
|
|
+ ".section __ex_table,\"a\"\n"
|
|
+ " .align 4\n"
|
|
+ " .long 2b,4b\n"
|
|
+ ".previous"
|
|
+ : /* no outputs, it doesn't ever return */
|
|
+ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
|
|
+ "n" (frame_offset), "a" (fp)
|
|
+ : "a0");
|
|
+#undef frame_offset
|
|
+ /*
|
|
+ * If we ever get here an exception occurred while
|
|
+ * building the above stack-frame.
|
|
+ */
|
|
+ goto badframe;
|
|
+ }
|
|
+
|
|
+ *pd0 = context.sc_d0;
|
|
+ return err;
|
|
+
|
|
+badframe:
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static inline int
|
|
+rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
|
|
+ struct ucontext __user *uc, int *pd0)
|
|
+{
|
|
+ int fsize, temp;
|
|
+ greg_t __user *gregs = uc->uc_mcontext.gregs;
|
|
+ unsigned long usp;
|
|
+ int err;
|
|
+
|
|
+ err = __get_user(temp, &uc->uc_mcontext.version);
|
|
+ if (temp != MCONTEXT_VERSION)
|
|
+ goto badframe;
|
|
+ /* restore passed registers */
|
|
+ err |= __get_user(regs->d0, &gregs[0]);
|
|
+ err |= __get_user(regs->d1, &gregs[1]);
|
|
+ err |= __get_user(regs->d2, &gregs[2]);
|
|
+ err |= __get_user(regs->d3, &gregs[3]);
|
|
+ err |= __get_user(regs->d4, &gregs[4]);
|
|
+ err |= __get_user(regs->d5, &gregs[5]);
|
|
+ err |= __get_user(sw->d6, &gregs[6]);
|
|
+ err |= __get_user(sw->d7, &gregs[7]);
|
|
+ err |= __get_user(regs->a0, &gregs[8]);
|
|
+ err |= __get_user(regs->a1, &gregs[9]);
|
|
+ err |= __get_user(regs->a2, &gregs[10]);
|
|
+ err |= __get_user(sw->a3, &gregs[11]);
|
|
+ err |= __get_user(sw->a4, &gregs[12]);
|
|
+ err |= __get_user(sw->a5, &gregs[13]);
|
|
+ err |= __get_user(sw->a6, &gregs[14]);
|
|
+ err |= __get_user(usp, &gregs[15]);
|
|
+ wrusp(usp);
|
|
+ err |= __get_user(regs->pc, &gregs[16]);
|
|
+ err |= __get_user(temp, &gregs[17]);
|
|
+ regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
|
|
+ regs->orig_d0 = -1; /* disable syscall checks */
|
|
+ err |= __get_user(temp, &uc->uc_formatvec);
|
|
+ regs->format = temp >> 12;
|
|
+ regs->vector = temp & 0xfff;
|
|
+
|
|
+#ifdef CONFIG_FPU
|
|
+ err |= rt_restore_fpu_state(uc);
|
|
+#endif
|
|
+
|
|
+ if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
|
|
+ goto badframe;
|
|
+
|
|
+ fsize = frame_extra_sizes[regs->format];
|
|
+ if (fsize < 0) {
|
|
+ /*
|
|
+ * user process trying to return with weird frame format
|
|
+ */
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_DEBUG "user process returning with weird \
|
|
+ frame format\n");
|
|
+#endif
|
|
+ goto badframe;
|
|
+ }
|
|
+
|
|
+ /* OK. Make room on the supervisor stack for the extra junk,
|
|
+ * if necessary.
|
|
+ */
|
|
+
|
|
+ {
|
|
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
|
|
+ __asm__ __volatile__
|
|
+ (" movel %0,%/sp\n\t"
|
|
+ " bra ret_from_signal\n"
|
|
+ "4:\n"
|
|
+ ".section __ex_table,\"a\"\n"
|
|
+ " .align 4\n"
|
|
+ " .long 2b,4b\n"
|
|
+ ".previous"
|
|
+ : /* no outputs, it doesn't ever return */
|
|
+ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
|
|
+ "n" (frame_offset), "a" (&uc->uc_extra)
|
|
+ : "a0");
|
|
+#undef frame_offset
|
|
+ /*
|
|
+ * If we ever get here an exception occurred while
|
|
+ * building the above stack-frame.
|
|
+ */
|
|
+ goto badframe;
|
|
+ }
|
|
+
|
|
+ *pd0 = regs->d0;
|
|
+ return err;
|
|
+
|
|
+badframe:
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+asmlinkage int do_sigreturn(unsigned long __unused)
|
|
+{
|
|
+ struct switch_stack *sw = (struct switch_stack *) &__unused;
|
|
+ struct pt_regs *regs = (struct pt_regs *) (sw + 1);
|
|
+ unsigned long usp = rdusp();
|
|
+ struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
|
|
+ sigset_t set;
|
|
+ int d0;
|
|
+
|
|
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
|
+ goto badframe;
|
|
+ if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
|
|
+ (_NSIG_WORDS > 1 &&
|
|
+ __copy_from_user(&set.sig[1], &frame->extramask,
|
|
+ sizeof(frame->extramask))))
|
|
+ goto badframe;
|
|
+
|
|
+ sigdelsetmask(&set, ~_BLOCKABLE);
|
|
+ spin_lock_irq(¤t->sighand->siglock);
|
|
+ current->blocked = set;
|
|
+ recalc_sigpending();
|
|
+ spin_unlock_irq(¤t->sighand->siglock);
|
|
+
|
|
+ if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
|
|
+ goto badframe;
|
|
+ return d0;
|
|
+
|
|
+badframe:
|
|
+ force_sig(SIGSEGV, current);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+asmlinkage int do_rt_sigreturn(unsigned long __unused)
|
|
+{
|
|
+ struct switch_stack *sw = (struct switch_stack *) &__unused;
|
|
+ struct pt_regs *regs = (struct pt_regs *) (sw + 1);
|
|
+ unsigned long usp = rdusp();
|
|
+ struct rt_sigframe __user *frame =
|
|
+ (struct rt_sigframe __user *)(usp - 4);
|
|
+ sigset_t set;
|
|
+ int d0;
|
|
+
|
|
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
|
+ goto badframe;
|
|
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
|
+ goto badframe;
|
|
+
|
|
+ sigdelsetmask(&set, ~_BLOCKABLE);
|
|
+ spin_lock_irq(¤t->sighand->siglock);
|
|
+ current->blocked = set;
|
|
+ recalc_sigpending();
|
|
+ spin_unlock_irq(¤t->sighand->siglock);
|
|
+
|
|
+ if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
|
|
+ goto badframe;
|
|
+ return d0;
|
|
+
|
|
+badframe:
|
|
+ force_sig(SIGSEGV, current);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_FPU
|
|
+/*
|
|
+ * Set up a signal frame.
|
|
+ */
|
|
+
|
|
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
|
|
+{
|
|
+ if (FPU_IS_EMU) {
|
|
+ /* save registers */
|
|
+ memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
|
|
+ memcpy(sc->sc_fpregs, current->thread.fp, 24);
|
|
+ return;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline int rt_save_fpu_state(struct ucontext __user *uc,
|
|
+ struct pt_regs *regs)
|
|
+{
|
|
+ int err = 0;
|
|
+
|
|
+ if (FPU_IS_EMU) {
|
|
+ /* save fpu control register */
|
|
+ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
|
|
+ current->thread.fpcntl, 12);
|
|
+ /* save all other fpu register */
|
|
+ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
|
|
+ current->thread.fp, 96);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ return err;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
|
|
+ unsigned long mask)
|
|
+{
|
|
+ sc->sc_mask = mask;
|
|
+ sc->sc_usp = rdusp();
|
|
+ sc->sc_d0 = regs->d0;
|
|
+ sc->sc_d1 = regs->d1;
|
|
+ sc->sc_a0 = regs->a0;
|
|
+ sc->sc_a1 = regs->a1;
|
|
+ sc->sc_sr = regs->sr;
|
|
+ sc->sc_pc = regs->pc;
|
|
+ sc->sc_formatvec = regs->format << 12 | regs->vector;
|
|
+#ifdef CONFIG_FPU
|
|
+ save_fpu_state(sc, regs);
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline int rt_setup_ucontext(struct ucontext __user *uc,
|
|
+ struct pt_regs *regs)
|
|
+{
|
|
+ struct switch_stack *sw = (struct switch_stack *)regs - 1;
|
|
+ greg_t __user *gregs = uc->uc_mcontext.gregs;
|
|
+ int err = 0;
|
|
+
|
|
+ err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
|
|
+ err |= __put_user(regs->d0, &gregs[0]);
|
|
+ err |= __put_user(regs->d1, &gregs[1]);
|
|
+ err |= __put_user(regs->d2, &gregs[2]);
|
|
+ err |= __put_user(regs->d3, &gregs[3]);
|
|
+ err |= __put_user(regs->d4, &gregs[4]);
|
|
+ err |= __put_user(regs->d5, &gregs[5]);
|
|
+ err |= __put_user(sw->d6, &gregs[6]);
|
|
+ err |= __put_user(sw->d7, &gregs[7]);
|
|
+ err |= __put_user(regs->a0, &gregs[8]);
|
|
+ err |= __put_user(regs->a1, &gregs[9]);
|
|
+ err |= __put_user(regs->a2, &gregs[10]);
|
|
+ err |= __put_user(sw->a3, &gregs[11]);
|
|
+ err |= __put_user(sw->a4, &gregs[12]);
|
|
+ err |= __put_user(sw->a5, &gregs[13]);
|
|
+ err |= __put_user(sw->a6, &gregs[14]);
|
|
+ err |= __put_user(rdusp(), &gregs[15]);
|
|
+ err |= __put_user(regs->pc, &gregs[16]);
|
|
+ err |= __put_user(regs->sr, &gregs[17]);
|
|
+ err |= __put_user((regs->format << 12) | regs->vector,
|
|
+ &uc->uc_formatvec);
|
|
+#ifdef CONFIG_FPU
|
|
+ err |= rt_save_fpu_state(uc, regs);
|
|
+#endif
|
|
+ return err;
|
|
+}
|
|
+
|
|
+extern void IcacheInvalidateCacheBlock(void *, unsigned long);
|
|
+static inline void push_cache(unsigned long vaddr)
|
|
+{
|
|
+ IcacheInvalidateCacheBlock((void *)vaddr, 8);
|
|
+}
|
|
+
|
|
+static inline void __user *
|
|
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
|
|
+{
|
|
+ unsigned long usp;
|
|
+
|
|
+ /* Default to using normal stack. */
|
|
+ usp = rdusp();
|
|
+
|
|
+ /* This is the X/Open sanctioned signal stack switching. */
|
|
+ if (ka->sa.sa_flags & SA_ONSTACK) {
|
|
+ if (!sas_ss_flags(usp))
|
|
+ usp = current->sas_ss_sp + current->sas_ss_size;
|
|
+ }
|
|
+ return (void __user *)((usp - frame_size) & -8UL);
|
|
+}
|
|
+
|
|
+static void setup_frame(int sig, struct k_sigaction *ka,
|
|
+ sigset_t *set, struct pt_regs *regs)
|
|
+{
|
|
+ struct sigframe __user *frame;
|
|
+ int fsize = frame_extra_sizes[regs->format];
|
|
+ struct sigcontext context;
|
|
+ int err = 0;
|
|
+
|
|
+ if (fsize < 0) {
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
|
|
+ regs->format);
|
|
+#endif
|
|
+ goto give_sigsegv;
|
|
+ }
|
|
+
|
|
+ frame = get_sigframe(ka, regs, sizeof(*frame));
|
|
+
|
|
+ err |= __put_user((current_thread_info()->exec_domain
|
|
+ && current_thread_info()->exec_domain->signal_invmap
|
|
+ && sig < 32
|
|
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
|
|
+ : sig),
|
|
+ &frame->sig);
|
|
+
|
|
+ err |= __put_user(regs->vector, &frame->code);
|
|
+ err |= __put_user(&frame->sc, &frame->psc);
|
|
+
|
|
+ if (_NSIG_WORDS > 1)
|
|
+ err |= copy_to_user(frame->extramask, &set->sig[1],
|
|
+ sizeof(frame->extramask));
|
|
+
|
|
+ setup_sigcontext(&context, regs, set->sig[0]);
|
|
+ err |= copy_to_user(&frame->sc, &context, sizeof(context));
|
|
+
|
|
+ /* Set up to return from userspace. */
|
|
+ err |= __put_user(frame->retcode, &frame->pretcode);
|
|
+ /* moveq #,d0; trap #0 */
|
|
+ err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
|
|
+ (long __user *)(frame->retcode));
|
|
+
|
|
+ if (err)
|
|
+ goto give_sigsegv;
|
|
+
|
|
+ push_cache((unsigned long) &frame->retcode);
|
|
+
|
|
+ /* Set up registers for signal handler */
|
|
+ wrusp((unsigned long) frame);
|
|
+ regs->pc = (unsigned long) ka->sa.sa_handler;
|
|
+
|
|
+adjust_stack:
|
|
+ /* Prepare to skip over the extra stuff in the exception frame. */
|
|
+ if (regs->stkadj) {
|
|
+ struct pt_regs *tregs =
|
|
+ (struct pt_regs *)((ulong)regs + regs->stkadj);
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_DEBUG "Performing stackadjust=%04x\n",
|
|
+ regs->stkadj);
|
|
+#endif
|
|
+ /* This must be copied with decreasing addresses to
|
|
+ handle overlaps. */
|
|
+ tregs->vector = 0;
|
|
+ tregs->format = 0;
|
|
+ tregs->pc = regs->pc;
|
|
+ tregs->sr = regs->sr;
|
|
+ }
|
|
+ return;
|
|
+
|
|
+give_sigsegv:
|
|
+ force_sigsegv(sig, current);
|
|
+ goto adjust_stack;
|
|
+}
|
|
+
|
|
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
+ sigset_t *set, struct pt_regs *regs)
|
|
+{
|
|
+ struct rt_sigframe __user *frame;
|
|
+ int fsize = frame_extra_sizes[regs->format];
|
|
+ int err = 0;
|
|
+
|
|
+ if (fsize < 0) {
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
|
|
+ regs->format);
|
|
+#endif
|
|
+ goto give_sigsegv;
|
|
+ }
|
|
+
|
|
+ frame = get_sigframe(ka, regs, sizeof(*frame));
|
|
+
|
|
+ if (fsize) {
|
|
+ err |= copy_to_user(&frame->uc.uc_extra, regs + 1, fsize);
|
|
+ regs->stkadj = fsize;
|
|
+ }
|
|
+
|
|
+ err |= __put_user((current_thread_info()->exec_domain
|
|
+ && current_thread_info()->exec_domain->signal_invmap
|
|
+ && sig < 32
|
|
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
|
|
+ : sig),
|
|
+ &frame->sig);
|
|
+ err |= __put_user(&frame->info, &frame->pinfo);
|
|
+ err |= __put_user(&frame->uc, &frame->puc);
|
|
+ err |= copy_siginfo_to_user(&frame->info, info);
|
|
+
|
|
+ /* Create the ucontext. */
|
|
+ err |= __put_user(0, &frame->uc.uc_flags);
|
|
+ err |= __put_user(NULL, &frame->uc.uc_link);
|
|
+ err |= __put_user((void __user *)current->sas_ss_sp,
|
|
+ &frame->uc.uc_stack.ss_sp);
|
|
+ err |= __put_user(sas_ss_flags(rdusp()),
|
|
+ &frame->uc.uc_stack.ss_flags);
|
|
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
|
+ err |= rt_setup_ucontext(&frame->uc, regs);
|
|
+ err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
|
+
|
|
+ /* Set up to return from userspace. */
|
|
+ err |= __put_user(frame->retcode, &frame->pretcode);
|
|
+
|
|
+ /* moveq #,d0; andi.l #,D0; trap #0 */
|
|
+ err |= __put_user(0x70AD0280, (long *)(frame->retcode + 0));
|
|
+ err |= __put_user(0x000000ff, (long *)(frame->retcode + 4));
|
|
+ err |= __put_user(0x4e400000, (long *)(frame->retcode + 8));
|
|
+
|
|
+ if (err)
|
|
+ goto give_sigsegv;
|
|
+
|
|
+ push_cache((unsigned long) &frame->retcode);
|
|
+
|
|
+ /* Set up registers for signal handler */
|
|
+ wrusp((unsigned long) frame);
|
|
+ regs->pc = (unsigned long) ka->sa.sa_handler;
|
|
+
|
|
+adjust_stack:
|
|
+ /* Prepare to skip over the extra stuff in the exception frame. */
|
|
+ if (regs->stkadj) {
|
|
+ struct pt_regs *tregs =
|
|
+ (struct pt_regs *)((ulong)regs + regs->stkadj);
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_DEBUG "Performing stackadjust=%04x\n",
|
|
+ regs->stkadj);
|
|
+#endif
|
|
+ /* This must be copied with decreasing addresses to
|
|
+ handle overlaps. */
|
|
+ tregs->vector = 0;
|
|
+ tregs->format = 0;
|
|
+ tregs->pc = regs->pc;
|
|
+ tregs->sr = regs->sr;
|
|
+ }
|
|
+ return;
|
|
+
|
|
+give_sigsegv:
|
|
+ force_sigsegv(sig, current);
|
|
+ goto adjust_stack;
|
|
+}
|
|
+
|
|
+static inline void
|
|
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
|
|
+{
|
|
+ switch (regs->d0) {
|
|
+ case -ERESTARTNOHAND:
|
|
+ if (!has_handler)
|
|
+ goto do_restart;
|
|
+ regs->d0 = -EINTR;
|
|
+ break;
|
|
+
|
|
+ case -ERESTARTSYS:
|
|
+ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
|
|
+ regs->d0 = -EINTR;
|
|
+ break;
|
|
+ }
|
|
+ /* fallthrough */
|
|
+ case -ERESTARTNOINTR:
|
|
+do_restart:
|
|
+ regs->d0 = regs->orig_d0;
|
|
+ regs->pc -= 2;
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * OK, we're invoking a handler
|
|
+ */
|
|
+static void
|
|
+handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|
+ sigset_t *oldset, struct pt_regs *regs)
|
|
+{
|
|
+ /* are we from a system call? */
|
|
+ if (regs->orig_d0 >= 0)
|
|
+ /* If so, check system call restarting.. */
|
|
+ handle_restart(regs, ka, 1);
|
|
+
|
|
+ /* set up the stack frame */
|
|
+ if (ka->sa.sa_flags & SA_SIGINFO)
|
|
+ setup_rt_frame(sig, ka, info, oldset, regs);
|
|
+ else
|
|
+ setup_frame(sig, ka, oldset, regs);
|
|
+
|
|
+ if (ka->sa.sa_flags & SA_ONESHOT)
|
|
+ ka->sa.sa_handler = SIG_DFL;
|
|
+
|
|
+ spin_lock_irq(¤t->sighand->siglock);
|
|
+ sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
|
|
+ if (!(ka->sa.sa_flags & SA_NODEFER))
|
|
+ sigaddset(¤t->blocked, sig);
|
|
+ recalc_sigpending();
|
|
+ spin_unlock_irq(¤t->sighand->siglock);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
|
|
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
|
|
+ * mistake.
|
|
+ */
|
|
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
|
|
+{
|
|
+ siginfo_t info;
|
|
+ struct k_sigaction ka;
|
|
+ int signr;
|
|
+
|
|
+ current->thread.esp0 = (unsigned long) regs;
|
|
+
|
|
+ if (!oldset)
|
|
+ oldset = ¤t->blocked;
|
|
+
|
|
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
|
+ if (signr > 0) {
|
|
+ /* Whee! Actually deliver the signal. */
|
|
+ handle_signal(signr, &ka, &info, oldset, regs);
|
|
+ return 1;
|
|
+ }
|
|
+
|
|
+ /* Did we come from a system call? */
|
|
+ if (regs->orig_d0 >= 0)
|
|
+ /* Restart the system call - no handlers present */
|
|
+ handle_restart(regs, NULL, 0);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/traps.c
|
|
@@ -0,0 +1,454 @@
|
|
+/*
|
|
+ * linux/arch/m68knommu/kernel/traps.c
|
|
+ *
|
|
+ * Copyright (C) 1993, 1994 by Hamish Macdonald
|
|
+ *
|
|
+ * 68040 fixes by Michael Rausch
|
|
+ * 68040 fixes by Martin Apel
|
|
+ * 68060 fixes by Roman Hodek
|
|
+ * 68060 fixes by Jesper Skov
|
|
+ *
|
|
+ * This file is subject to the terms and conditions of the GNU General Public
|
|
+ * License. See the file COPYING in the main directory of this archive
|
|
+ * for more details.
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * Sets up all exception vectors
|
|
+ */
|
|
+#include <linux/sched.h>
|
|
+#include <linux/signal.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/mm.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/a.out.h>
|
|
+#include <linux/user.h>
|
|
+#include <linux/string.h>
|
|
+#include <linux/linkage.h>
|
|
+#include <linux/init.h>
|
|
+#include <linux/ptrace.h>
|
|
+#include <linux/kallsyms.h>
|
|
+
|
|
+#include <asm/setup.h>
|
|
+#include <asm/fpu.h>
|
|
+#include <asm/system.h>
|
|
+#include <asm/uaccess.h>
|
|
+#include <asm/traps.h>
|
|
+#include <asm/pgtable.h>
|
|
+#include <asm/machdep.h>
|
|
+#include <asm/siginfo.h>
|
|
+
|
|
+static char const * const vec_names[] = {
|
|
+ "RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
|
|
+ "ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
|
|
+ "PRIVILEGE VIOLATION", "TRACE", "LINE 1010", "LINE 1111",
|
|
+ "UNASSIGNED RESERVED 12", "COPROCESSOR PROTOCOL VIOLATION",
|
|
+ "FORMAT ERROR", "UNINITIALIZED INTERRUPT",
|
|
+ "UNASSIGNED RESERVED 16", "UNASSIGNED RESERVED 17",
|
|
+ "UNASSIGNED RESERVED 18", "UNASSIGNED RESERVED 19",
|
|
+ "UNASSIGNED RESERVED 20", "UNASSIGNED RESERVED 21",
|
|
+ "UNASSIGNED RESERVED 22", "UNASSIGNED RESERVED 23",
|
|
+ "SPURIOUS INTERRUPT", "LEVEL 1 INT", "LEVEL 2 INT", "LEVEL 3 INT",
|
|
+ "LEVEL 4 INT", "LEVEL 5 INT", "LEVEL 6 INT", "LEVEL 7 INT",
|
|
+ "SYSCALL", "TRAP #1", "TRAP #2", "TRAP #3",
|
|
+ "TRAP #4", "TRAP #5", "TRAP #6", "TRAP #7",
|
|
+ "TRAP #8", "TRAP #9", "TRAP #10", "TRAP #11",
|
|
+ "TRAP #12", "TRAP #13", "TRAP #14", "TRAP #15",
|
|
+ "FPCP BSUN", "FPCP INEXACT", "FPCP DIV BY 0", "FPCP UNDERFLOW",
|
|
+ "FPCP OPERAND ERROR", "FPCP OVERFLOW", "FPCP SNAN",
|
|
+ "FPCP UNSUPPORTED OPERATION",
|
|
+ "MMU CONFIGURATION ERROR"
|
|
+};
|
|
+
|
|
+asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
|
|
+ unsigned long error_code);
|
|
+asmlinkage void trap_c(struct frame *fp);
|
|
+extern void __init coldfire_trap_init(void);
|
|
+
|
|
+void __init trap_init(void)
|
|
+{
|
|
+ coldfire_trap_init();
|
|
+}
|
|
+
|
|
+/* The following table converts the FS encoding of a ColdFire
|
|
+ exception stack frame into the error_code value needed by
|
|
+ do_fault. */
|
|
+
|
|
+static const unsigned char fs_err_code[] = {
|
|
+ 0, /* 0000 */
|
|
+ 0, /* 0001 */
|
|
+ 0, /* 0010 */
|
|
+ 0, /* 0011 */
|
|
+ 1, /* 0100 */
|
|
+ 0, /* 0101 */
|
|
+ 0, /* 0110 */
|
|
+ 0, /* 0111 */
|
|
+ 2, /* 1000 */
|
|
+ 3, /* 1001 */
|
|
+ 2, /* 1010 */
|
|
+ 0, /* 1011 */
|
|
+ 1, /* 1100 */
|
|
+ 1, /* 1101 */
|
|
+ 0, /* 1110 */
|
|
+ 0 /* 1111 */
|
|
+};
|
|
+
|
|
+#ifdef DEBUG
|
|
+static const char *fs_err_msg[16] = {
|
|
+ "Normal",
|
|
+ "Reserved",
|
|
+ "Interrupt during debug service routine",
|
|
+ "Reserved",
|
|
+ "X Protection",
|
|
+ "TLB X miss (opword)",
|
|
+ "TLB X miss (ext. word)",
|
|
+ "IFP in emulator mode",
|
|
+ "W Protection",
|
|
+ "Write error",
|
|
+ "TLB W miss",
|
|
+ "Reserved",
|
|
+ "R Protection",
|
|
+ "R/RMW Protection",
|
|
+ "TLB R miss",
|
|
+ "OEP in emulator mode",
|
|
+};
|
|
+#endif
|
|
+
|
|
+static inline void access_errorCF(struct frame *fp)
|
|
+{
|
|
+ unsigned long int mmusr, complainingAddress;
|
|
+ unsigned int err_code, fs;
|
|
+ int need_page_fault;
|
|
+
|
|
+ mmusr = fp->ptregs.mmusr;
|
|
+ complainingAddress = fp->ptregs.mmuar;
|
|
+#ifdef DEBUG
|
|
+ printk(KERN_DEBUG "pc %#lx, mmusr %#lx, complainingAddress %#lx\n", \
|
|
+ fp->ptregs.pc, mmusr, complainingAddress);
|
|
+#endif
|
|
+
|
|
+ /*
|
|
+ * error_code:
|
|
+ * bit 0 == 0 means no page found, 1 means protection fault
|
|
+ * bit 1 == 0 means read, 1 means write
|
|
+ */
|
|
+
|
|
+ fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
|
|
+ switch (fs) {
|
|
+ case 5: /* 0101 TLB opword X miss */
|
|
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
|
|
+ complainingAddress = fp->ptregs.pc;
|
|
+ break;
|
|
+ case 6: /* 0110 TLB extension word X miss */
|
|
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
|
|
+ complainingAddress = fp->ptregs.pc + sizeof(long);
|
|
+ break;
|
|
+ case 10: /* 1010 TLB W miss */
|
|
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
|
|
+ break;
|
|
+ case 14: /* 1110 TLB R miss */
|
|
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
|
|
+ break;
|
|
+ default:
|
|
+ /* 0000 Normal */
|
|
+ /* 0001 Reserved */
|
|
+ /* 0010 Interrupt during debug service routine */
|
|
+ /* 0011 Reserved */
|
|
+ /* 0100 X Protection */
|
|
+ /* 0111 IFP in emulator mode */
|
|
+ /* 1000 W Protection*/
|
|
+ /* 1001 Write error*/
|
|
+ /* 1011 Reserved*/
|
|
+ /* 1100 R Protection*/
|
|
+ /* 1101 R Protection*/
|
|
+ /* 1111 OEP in emulator mode*/
|
|
+ need_page_fault = 1;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (need_page_fault) {
|
|
+ err_code = fs_err_code[fs];
|
|
+ if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
|
|
+ err_code |= 2; /* bit1 - write, bit0 - protection */
|
|
+ do_page_fault(&fp->ptregs, complainingAddress, err_code);
|
|
+ }
|
|
+}
|
|
+
|
|
+void die_if_kernel(char *str, struct pt_regs *fp, int nr)
|
|
+{
|
|
+ if (!(fp->sr & PS_S))
|
|
+ return;
|
|
+
|
|
+ console_verbose();
|
|
+ printk(KERN_EMERG "%s: %08x\n", str, nr);
|
|
+ printk(KERN_EMERG "PC: [<%08lx>]", fp->pc);
|
|
+ print_symbol(" %s", fp->pc);
|
|
+ printk(KERN_EMERG "\nSR: %04x SP: %p a2: %08lx\n",
|
|
+ fp->sr, fp, fp->a2);
|
|
+ printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
|
|
+ fp->d0, fp->d1, fp->d2, fp->d3);
|
|
+ printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
|
|
+ fp->d4, fp->d5, fp->a0, fp->a1);
|
|
+
|
|
+ printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
|
|
+ current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
|
|
+ show_stack(NULL, (unsigned long *)fp);
|
|
+ do_exit(SIGSEGV);
|
|
+}
|
|
+
|
|
+asmlinkage void buserr_c(struct frame *fp)
|
|
+{
|
|
+ unsigned int fs;
|
|
+
|
|
+ /* Only set esp0 if coming from user mode */
|
|
+ if (user_mode(&fp->ptregs))
|
|
+ current->thread.esp0 = (unsigned long) fp;
|
|
+
|
|
+ fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
|
|
+#if defined(DEBUG)
|
|
+ printk(KERN_DEBUG "*** Bus Error *** (%x)%s\n", fs,
|
|
+ fs_err_msg[fs & 0xf]);
|
|
+#endif
|
|
+ switch (fs) {
|
|
+ case 0x5:
|
|
+ case 0x6:
|
|
+ case 0x7:
|
|
+ case 0x9:
|
|
+ case 0xa:
|
|
+ case 0xd:
|
|
+ case 0xe:
|
|
+ case 0xf:
|
|
+ access_errorCF(fp);
|
|
+ break;
|
|
+ default:
|
|
+ die_if_kernel("bad frame format", &fp->ptregs, 0);
|
|
+#if defined(DEBUG)
|
|
+ printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
|
|
+#endif
|
|
+ force_sig(SIGSEGV, current);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+int kstack_depth_to_print = 48;
|
|
+
|
|
+void show_stack(struct task_struct *task, unsigned long *stack)
|
|
+{
|
|
+ unsigned long *endstack, addr, symaddr;
|
|
+ extern char _start, _etext;
|
|
+ int i;
|
|
+
|
|
+ if (!stack) {
|
|
+ if (task)
|
|
+ stack = (unsigned long *)task->thread.ksp;
|
|
+ else
|
|
+ stack = (unsigned long *)&stack;
|
|
+ }
|
|
+
|
|
+ addr = (unsigned long) stack;
|
|
+ endstack = (unsigned long *) PAGE_ALIGN(addr);
|
|
+
|
|
+ printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack);
|
|
+ for (i = 0; i < kstack_depth_to_print; i++) {
|
|
+ if (stack + 1 > endstack)
|
|
+ break;
|
|
+ if (i % 8 == 0)
|
|
+ printk("\n" KERN_EMERG " ");
|
|
+ symaddr = *stack;
|
|
+ printk(KERN_EMERG " %08lx", *stack++);
|
|
+ if ((symaddr >= 0xc0000000) && (symaddr < 0xc1000000))
|
|
+ print_symbol("(%s)", symaddr);
|
|
+ }
|
|
+ printk("\n");
|
|
+
|
|
+ printk(KERN_EMERG "Call Trace:");
|
|
+ i = 0;
|
|
+ while (stack + 1 <= endstack) {
|
|
+ addr = *stack++;
|
|
+ /*
|
|
+ * If the address is either in the text segment of the
|
|
+ * kernel, or in the region which contains vmalloc'ed
|
|
+ * memory, it *may* be the address of a calling
|
|
+ * routine; if so, print it so that someone tracing
|
|
+ * down the cause of the crash will be able to figure
|
|
+ * out the call path that was taken.
|
|
+ */
|
|
+ if (((addr >= (unsigned long) &_start) &&
|
|
+ (addr <= (unsigned long) &_etext))) {
|
|
+ if (i % 4 == 0)
|
|
+ printk("\n" KERN_EMERG " ");
|
|
+ printk(KERN_EMERG " [<%08lx>]", addr);
|
|
+ i++;
|
|
+ }
|
|
+ }
|
|
+ printk("\n");
|
|
+}
|
|
+
|
|
+void bad_super_trap(struct frame *fp)
|
|
+{
|
|
+ console_verbose();
|
|
+ if (fp->ptregs.vector < 4*sizeof(vec_names)/sizeof(vec_names[0]))
|
|
+ printk(KERN_WARNING "*** %s *** FORMAT=%X\n",
|
|
+ vec_names[(fp->ptregs.vector) >> 2],
|
|
+ fp->ptregs.format);
|
|
+ else
|
|
+ printk(KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
|
|
+ (fp->ptregs.vector) >> 2,
|
|
+ fp->ptregs.format);
|
|
+ printk(KERN_WARNING "Current process id is %d\n", current->pid);
|
|
+ die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
|
|
+}
|
|
+
|
|
+asmlinkage void trap_c(struct frame *fp)
|
|
+{
|
|
+ int sig;
|
|
+ siginfo_t info;
|
|
+
|
|
+ if (fp->ptregs.sr & PS_S) {
|
|
+ if ((fp->ptregs.vector >> 2) == VEC_TRACE) {
|
|
+ /* traced a trapping instruction */
|
|
+ current->ptrace |= PT_DTRACE;
|
|
+ } else
|
|
+ bad_super_trap(fp);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* send the appropriate signal to the user program */
|
|
+ switch ((fp->ptregs.vector) >> 2) {
|
|
+ case VEC_ADDRERR:
|
|
+ info.si_code = BUS_ADRALN;
|
|
+ sig = SIGBUS;
|
|
+ break;
|
|
+ case VEC_ILLEGAL:
|
|
+ case VEC_LINE10:
|
|
+ case VEC_LINE11:
|
|
+ info.si_code = ILL_ILLOPC;
|
|
+ sig = SIGILL;
|
|
+ break;
|
|
+ case VEC_PRIV:
|
|
+ info.si_code = ILL_PRVOPC;
|
|
+ sig = SIGILL;
|
|
+ break;
|
|
+ case VEC_COPROC:
|
|
+ info.si_code = ILL_COPROC;
|
|
+ sig = SIGILL;
|
|
+ break;
|
|
+ case VEC_TRAP1: /* gdbserver breakpoint */
|
|
+ fp->ptregs.pc -= 2;
|
|
+ info.si_code = TRAP_TRACE;
|
|
+ sig = SIGTRAP;
|
|
+ break;
|
|
+ case VEC_TRAP2:
|
|
+ case VEC_TRAP3:
|
|
+ case VEC_TRAP4:
|
|
+ case VEC_TRAP5:
|
|
+ case VEC_TRAP6:
|
|
+ case VEC_TRAP7:
|
|
+ case VEC_TRAP8:
|
|
+ case VEC_TRAP9:
|
|
+ case VEC_TRAP10:
|
|
+ case VEC_TRAP11:
|
|
+ case VEC_TRAP12:
|
|
+ case VEC_TRAP13:
|
|
+ case VEC_TRAP14:
|
|
+ info.si_code = ILL_ILLTRP;
|
|
+ sig = SIGILL;
|
|
+ break;
|
|
+ case VEC_FPBRUC:
|
|
+ case VEC_FPOE:
|
|
+ case VEC_FPNAN:
|
|
+ info.si_code = FPE_FLTINV;
|
|
+ sig = SIGFPE;
|
|
+ break;
|
|
+ case VEC_FPIR:
|
|
+ info.si_code = FPE_FLTRES;
|
|
+ sig = SIGFPE;
|
|
+ break;
|
|
+ case VEC_FPDIVZ:
|
|
+ info.si_code = FPE_FLTDIV;
|
|
+ sig = SIGFPE;
|
|
+ break;
|
|
+ case VEC_FPUNDER:
|
|
+ info.si_code = FPE_FLTUND;
|
|
+ sig = SIGFPE;
|
|
+ break;
|
|
+ case VEC_FPOVER:
|
|
+ info.si_code = FPE_FLTOVF;
|
|
+ sig = SIGFPE;
|
|
+ break;
|
|
+ case VEC_ZERODIV:
|
|
+ info.si_code = FPE_INTDIV;
|
|
+ sig = SIGFPE;
|
|
+ break;
|
|
+ case VEC_CHK:
|
|
+ case VEC_TRAP:
|
|
+ info.si_code = FPE_INTOVF;
|
|
+ sig = SIGFPE;
|
|
+ break;
|
|
+ case VEC_TRACE: /* ptrace single step */
|
|
+ info.si_code = TRAP_TRACE;
|
|
+ sig = SIGTRAP;
|
|
+ break;
|
|
+ case VEC_TRAP15: /* breakpoint */
|
|
+ info.si_code = TRAP_BRKPT;
|
|
+ sig = SIGTRAP;
|
|
+ break;
|
|
+ default:
|
|
+ info.si_code = ILL_ILLOPC;
|
|
+ sig = SIGILL;
|
|
+ break;
|
|
+ }
|
|
+ info.si_signo = sig;
|
|
+ info.si_errno = 0;
|
|
+ switch (fp->ptregs.format) {
|
|
+ default:
|
|
+ info.si_addr = (void *) fp->ptregs.pc;
|
|
+ break;
|
|
+ case 2:
|
|
+ info.si_addr = (void *) fp->un.fmt2.iaddr;
|
|
+ break;
|
|
+ case 7:
|
|
+ info.si_addr = (void *) fp->un.fmt7.effaddr;
|
|
+ break;
|
|
+ case 9:
|
|
+ info.si_addr = (void *) fp->un.fmt9.iaddr;
|
|
+ break;
|
|
+ case 10:
|
|
+ info.si_addr = (void *) fp->un.fmta.daddr;
|
|
+ break;
|
|
+ case 11:
|
|
+ info.si_addr = (void *) fp->un.fmtb.daddr;
|
|
+ break;
|
|
+ }
|
|
+ force_sig_info(sig, &info, current);
|
|
+}
|
|
+
|
|
+asmlinkage void set_esp0(unsigned long ssp)
|
|
+{
|
|
+ current->thread.esp0 = ssp;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * The architecture-independent backtrace generator
|
|
+ */
|
|
+void dump_stack(void)
|
|
+{
|
|
+ unsigned long stack;
|
|
+
|
|
+ show_stack(current, &stack);
|
|
+}
|
|
+EXPORT_SYMBOL(dump_stack);
|
|
+
|
|
+#ifdef CONFIG_M68KFPU_EMU
|
|
+asmlinkage void fpemu_signal(int signal, int code, void *addr)
|
|
+{
|
|
+ siginfo_t info;
|
|
+
|
|
+ info.si_signo = signal;
|
|
+ info.si_errno = 0;
|
|
+ info.si_code = code;
|
|
+ info.si_addr = addr;
|
|
+ force_sig_info(signal, &info, current);
|
|
+}
|
|
+#endif
|
|
--- /dev/null
|
|
+++ b/arch/m68k/coldfire/vmlinux-cf.lds
|
|
@@ -0,0 +1,92 @@
|
|
+/* ld script to make m68k Coldfire Linux kernel */
|
|
+
|
|
+#include <asm-generic/vmlinux.lds.h>
|
|
+
|
|
+OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
|
|
+OUTPUT_ARCH(m68k)
|
|
+ENTRY(_start)
|
|
+jiffies = jiffies_64 + 4;
|
|
+SECTIONS
|
|
+{
|
|
+ . = 0xC0020000;
|
|
+ _text = .; /* Text and read-only data */
|
|
+ .text : {
|
|
+ *(.text.head)
|
|
+ TEXT_TEXT
|
|
+ SCHED_TEXT
|
|
+ LOCK_TEXT
|
|
+ *(.fixup)
|
|
+ *(.gnu.warning)
|
|
+ } :text = 0x4e75
|
|
+
|
|
+ _etext = .; /* End of text section */
|
|
+
|
|
+ . = ALIGN(16);
|
|
+ __start___ex_table = .;
|
|
+ __ex_table : { *(__ex_table) }
|
|
+ __stop___ex_table = .;
|
|
+
|
|
+ RODATA
|
|
+
|
|
+ .data : { /* Data */
|
|
+ DATA_DATA
|
|
+ CONSTRUCTORS
|
|
+ }
|
|
+
|
|
+ .bss : { *(.bss) } /* BSS */
|
|
+
|
|
+ . = ALIGN(16);
|
|
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) } :data
|
|
+
|
|
+ _edata = .; /* End of data section */
|
|
+
|
|
+ . = ALIGN(8192); /* Initrd */
|
|
+ __init_begin = .;
|
|
+ .init.text : {
|
|
+ _sinittext = .;
|
|
+ *(.init.text)
|
|
+ _einittext = .;
|
|
+ }
|
|
+ .init.data : { *(.init.data) }
|
|
+ . = ALIGN(16);
|
|
+ __setup_start = .;
|
|
+ .init.setup : { *(.init.setup) }
|
|
+ __setup_end = .;
|
|
+ __initcall_start = .;
|
|
+ .initcall.init : {
|
|
+ INITCALLS
|
|
+ }
|
|
+ __initcall_end = .;
|
|
+ __con_initcall_start = .;
|
|
+ .con_initcall.init : { *(.con_initcall.init) }
|
|
+ __con_initcall_end = .;
|
|
+ SECURITY_INIT
|
|
+#ifdef CONFIG_BLK_DEV_INITRD
|
|
+ . = ALIGN(8192);
|
|
+ __initramfs_start = .;
|
|
+ .init.ramfs : { *(.init.ramfs) }
|
|
+ __initramfs_end = .;
|
|
+#endif
|
|
+ . = ALIGN(8192);
|
|
+ __init_end = .;
|
|
+
|
|
+ .data.init_task : { *(.data.init_task) } /* The initial task and kernel stack */
|
|
+
|
|
+ _end = . ;
|
|
+
|
|
+ /* Sections to be discarded */
|
|
+ /DISCARD/ : {
|
|
+ *(.exit.text)
|
|
+ *(.exit.data)
|
|
+ *(.exitcall.exit)
|
|
+ }
|
|
+
|
|
+ /* Stabs debugging sections. */
|
|
+ .stab 0 : { *(.stab) }
|
|
+ .stabstr 0 : { *(.stabstr) }
|
|
+ .stab.excl 0 : { *(.stab.excl) }
|
|
+ .stab.exclstr 0 : { *(.stab.exclstr) }
|
|
+ .stab.index 0 : { *(.stab.index) }
|
|
+ .stab.indexstr 0 : { *(.stab.indexstr) }
|
|
+ .comment 0 : { *(.comment) }
|
|
+}
|