1
0
mirror of git://projects.qi-hardware.com/openwrt-xburst.git synced 2024-12-24 21:09:53 +02:00

[ixp4xx]: remove unneeded patches

git-svn-id: svn://svn.openwrt.org/openwrt/trunk@23774 3c298f89-4303-0410-b956-a3cf2f4a3e73
This commit is contained in:
kaloz 2010-11-02 12:17:05 +00:00
parent 9875342576
commit 4e228a9f1f
3 changed files with 0 additions and 199 deletions

View File

@ -1,154 +0,0 @@
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -435,7 +435,6 @@ config ARCH_IXP4XX
select CPU_XSCALE
select GENERIC_GPIO
select GENERIC_CLOCKEVENTS
- select DMABOUNCE if PCI
help
Support for Intel's IXP4XX (XScale) family of processors.
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -199,6 +199,43 @@ config IXP4XX_INDIRECT_PCI
need to use the indirect method instead. If you don't know
what you need, leave this option unselected.
+config IXP4XX_LEGACY_DMABOUNCE
+ bool "Legacy PCI DMA bounce support"
+ depends on PCI
+ default n
+ select DMABOUNCE
+ help
+ The IXP4xx is limited to a 64MB window for PCI DMA, which
+ requires that PCI accesses >= 64MB are bounced via buffers
+ below 64MB.
+
+ The kernel has traditionally handled this issue by using ARM
+ specific DMA bounce support code for all accesses >= 64MB.
+ That code causes problems of its own, so it is desirable to
+ disable it.
+
+ Enabling this option makes IXP4xx continue to use the problematic
+ ARM DMA bounce code. Disabling this option makes IXP4xx use the
+ kernel's generic bounce code.
+
+ Say 'N'.
+
+config IXP4XX_ZONE_DMA
+ bool "Support > 64MB RAM"
+ depends on !IXP4XX_LEGACY_DMABOUNCE
+ default y
+ select ZONE_DMA
+ help
+ The IXP4xx is limited to a 64MB window for PCI DMA, which
+ requires that PCI accesses above 64MB are bounced via buffers
+ below 64MB.
+
+ Disabling this option allows you to omit the support code for
+ DMA-able memory allocations and DMA bouncing, but the kernel
+ will then not work properly if more than 64MB of RAM is present.
+
+ Say 'Y' unless your platform is limited to <= 64MB of RAM.
+
config IXP4XX_QMGR
tristate "IXP4xx Queue Manager support"
help
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -321,27 +321,33 @@ static int abort_handler(unsigned long a
*/
static int ixp4xx_pci_platform_notify(struct device *dev)
{
- if(dev->bus == &pci_bus_type) {
- *dev->dma_mask = SZ_64M - 1;
+ if (dev->bus == &pci_bus_type) {
+ *dev->dma_mask = SZ_64M - 1;
dev->coherent_dma_mask = SZ_64M - 1;
+#ifdef CONFIG_DMABOUNCE
dmabounce_register_dev(dev, 2048, 4096);
+#endif
}
return 0;
}
static int ixp4xx_pci_platform_notify_remove(struct device *dev)
{
- if(dev->bus == &pci_bus_type) {
+#ifdef CONFIG_DMABOUNCE
+ if (dev->bus == &pci_bus_type)
dmabounce_unregister_dev(dev);
- }
+#endif
return 0;
}
-int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+#ifdef CONFIG_DMABOUNCE
+int dma_needs_bounce_2(struct device *dev, dma_addr_t dma_addr, size_t size)
{
- return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
+ return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
}
+#endif
+#ifdef CONFIG_ZONE_DMA
/*
* Only first 64MB of memory can be accessed via PCI.
* We use GFP_DMA to allocate safe buffers to do map/unmap.
@@ -364,6 +370,7 @@ void __init ixp4xx_adjust_zones(unsigned
zhole_size[1] = zhole_size[0];
zhole_size[0] = 0;
}
+#endif
void __init ixp4xx_pci_preinit(void)
{
--- a/arch/arm/mach-ixp4xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
@@ -16,10 +16,12 @@
#if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
+#ifdef CONFIG_ZONE_DMA
void ixp4xx_adjust_zones(unsigned long *size, unsigned long *holes);
#define arch_adjust_zones(size, holes) \
ixp4xx_adjust_zones(size, holes)
+#endif
#define ISA_DMA_THRESHOLD (SZ_64M - 1)
#define MAX_DMA_ADDRESS (PAGE_OFFSET + SZ_64M)
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -30,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/list.h>
+#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
@@ -248,7 +249,13 @@ static inline dma_addr_t map_single(stru
needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
}
- if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
+#ifdef CONFIG_DMABOUNCE
+int dma_needs_bounce_2(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
+}
+
+ if (device_info && (needs_bounce || dma_needs_bounce_2(dev, dma_addr, size))) {
struct safe_buffer *buf;
buf = alloc_safe_buffer(device_info, ptr, size, dir);
@@ -282,6 +289,7 @@ static inline dma_addr_t map_single(stru
return dma_addr;
}
+#endif
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)

View File

@ -1,12 +0,0 @@
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -381,7 +381,8 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
*/
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
- WARN_ON(irqs_disabled());
+ if (irqs_disabled()) /* don't want stack dumps for these! */
+ printk("WARNING: at %s:%d %s()\n", __FILE__, __LINE__, __FUNCTION__);
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;

View File

@ -1,33 +0,0 @@
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -323,12 +323,13 @@ static void __init setup_processor(void)
void cpu_init(void)
{
unsigned int cpu = smp_processor_id();
- struct stack *stk = &stacks[cpu];
+ struct stack *stk;
if (cpu >= NR_CPUS) {
printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
BUG();
}
+ stk = &stacks[cpu];
/*
* Define the placement constraint for the inline asm directive below.
@@ -387,13 +388,14 @@ static struct machine_desc * __init setu
static int __init arm_add_memory(unsigned long start, unsigned long size)
{
- struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+ struct membank *bank;
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring memory at %#lx\n", start);
return -EINVAL;
}
+ bank = &meminfo.bank[meminfo.nr_banks];
/*
* Ensure that start/size are aligned to a page boundary.