mirror of
git://projects.qi-hardware.com/openwrt-xburst.git
synced 2024-12-25 02:48:37 +02:00
improve the skb padding performance change to avoid unnecessary reallocations in the routing code
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@15761 3c298f89-4303-0410-b956-a3cf2f4a3e73
This commit is contained in:
parent
582aef75bb
commit
7858dcd34a
@ -1,16 +1,56 @@
|
||||
--- a/include/linux/skbuff.h
|
||||
+++ b/include/linux/skbuff.h
|
||||
@@ -1270,9 +1270,12 @@
|
||||
@@ -1256,11 +1256,18 @@ static inline int skb_network_offset(con
|
||||
*
|
||||
* Various parts of the networking layer expect at least 16 bytes of
|
||||
* headroom, you should not reduce this.
|
||||
+ *
|
||||
+ * This has been changed to 64 to acommodate for routing between ethernet
|
||||
+ * and wireless
|
||||
+ * and wireless, but only for new allocations
|
||||
*/
|
||||
#ifndef NET_SKB_PAD
|
||||
-#define NET_SKB_PAD 16
|
||||
+#define NET_SKB_PAD 64
|
||||
#define NET_SKB_PAD 16
|
||||
#endif
|
||||
|
||||
+#ifndef NET_SKB_PAD_ALLOC
|
||||
+#define NET_SKB_PAD_ALLOC 64
|
||||
+#endif
|
||||
+
|
||||
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
|
||||
|
||||
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
|
||||
@@ -1350,9 +1357,9 @@ static inline void __skb_queue_purge(str
|
||||
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
|
||||
+ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask);
|
||||
if (likely(skb))
|
||||
- skb_reserve(skb, NET_SKB_PAD);
|
||||
+ skb_reserve(skb, NET_SKB_PAD_ALLOC);
|
||||
return skb;
|
||||
}
|
||||
|
||||
@@ -1425,7 +1432,7 @@ static inline int __skb_cow(struct sk_bu
|
||||
delta = headroom - skb_headroom(skb);
|
||||
|
||||
if (delta || cloned)
|
||||
- return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
|
||||
+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD_ALLOC), 0,
|
||||
GFP_ATOMIC);
|
||||
return 0;
|
||||
}
|
||||
--- a/net/core/skbuff.c
|
||||
+++ b/net/core/skbuff.c
|
||||
@@ -243,9 +243,9 @@ struct sk_buff *__netdev_alloc_skb(struc
|
||||
int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
|
||||
struct sk_buff *skb;
|
||||
|
||||
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
|
||||
+ skb = __alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask, 0, node);
|
||||
if (likely(skb)) {
|
||||
- skb_reserve(skb, NET_SKB_PAD);
|
||||
+ skb_reserve(skb, NET_SKB_PAD_ALLOC);
|
||||
skb->dev = dev;
|
||||
}
|
||||
return skb;
|
||||
|
@ -1,16 +1,56 @@
|
||||
--- a/include/linux/skbuff.h
|
||||
+++ b/include/linux/skbuff.h
|
||||
@@ -1306,9 +1306,12 @@
|
||||
@@ -1306,11 +1306,18 @@ static inline int skb_network_offset(con
|
||||
*
|
||||
* Various parts of the networking layer expect at least 16 bytes of
|
||||
* headroom, you should not reduce this.
|
||||
+ *
|
||||
+ * This has been changed to 64 to acommodate for routing between ethernet
|
||||
+ * and wireless
|
||||
+ * and wireless, but only for new allocations
|
||||
*/
|
||||
#ifndef NET_SKB_PAD
|
||||
-#define NET_SKB_PAD 16
|
||||
+#define NET_SKB_PAD 64
|
||||
#define NET_SKB_PAD 16
|
||||
#endif
|
||||
|
||||
+#ifndef NET_SKB_PAD_ALLOC
|
||||
+#define NET_SKB_PAD_ALLOC 64
|
||||
+#endif
|
||||
+
|
||||
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
|
||||
|
||||
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
|
||||
@@ -1400,9 +1407,9 @@ static inline void __skb_queue_purge(str
|
||||
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
|
||||
+ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask);
|
||||
if (likely(skb))
|
||||
- skb_reserve(skb, NET_SKB_PAD);
|
||||
+ skb_reserve(skb, NET_SKB_PAD_ALLOC);
|
||||
return skb;
|
||||
}
|
||||
|
||||
@@ -1475,7 +1482,7 @@ static inline int __skb_cow(struct sk_bu
|
||||
delta = headroom - skb_headroom(skb);
|
||||
|
||||
if (delta || cloned)
|
||||
- return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
|
||||
+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD_ALLOC), 0,
|
||||
GFP_ATOMIC);
|
||||
return 0;
|
||||
}
|
||||
--- a/net/core/skbuff.c
|
||||
+++ b/net/core/skbuff.c
|
||||
@@ -320,9 +320,9 @@ struct sk_buff *__netdev_alloc_skb(struc
|
||||
int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
|
||||
struct sk_buff *skb;
|
||||
|
||||
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
|
||||
+ skb = __alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask, 0, node);
|
||||
if (likely(skb)) {
|
||||
- skb_reserve(skb, NET_SKB_PAD);
|
||||
+ skb_reserve(skb, NET_SKB_PAD_ALLOC);
|
||||
skb->dev = dev;
|
||||
}
|
||||
return skb;
|
||||
|
@ -1,16 +1,56 @@
|
||||
--- a/include/linux/skbuff.h
|
||||
+++ b/include/linux/skbuff.h
|
||||
@@ -1369,9 +1369,12 @@ static inline int skb_network_offset(con
|
||||
@@ -1369,11 +1369,18 @@ static inline int skb_network_offset(con
|
||||
*
|
||||
* Various parts of the networking layer expect at least 32 bytes of
|
||||
* headroom, you should not reduce this.
|
||||
+ *
|
||||
+ * This has been changed to 64 to acommodate for routing between ethernet
|
||||
+ * and wireless
|
||||
+ * and wireless, but only for new allocations
|
||||
*/
|
||||
#ifndef NET_SKB_PAD
|
||||
-#define NET_SKB_PAD 32
|
||||
+#define NET_SKB_PAD 64
|
||||
#define NET_SKB_PAD 32
|
||||
#endif
|
||||
|
||||
+#ifndef NET_SKB_PAD_ALLOC
|
||||
+#define NET_SKB_PAD_ALLOC 64
|
||||
+#endif
|
||||
+
|
||||
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
|
||||
|
||||
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
|
||||
@@ -1463,9 +1470,9 @@ static inline void __skb_queue_purge(str
|
||||
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
|
||||
+ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask);
|
||||
if (likely(skb))
|
||||
- skb_reserve(skb, NET_SKB_PAD);
|
||||
+ skb_reserve(skb, NET_SKB_PAD_ALLOC);
|
||||
return skb;
|
||||
}
|
||||
|
||||
@@ -1538,7 +1545,7 @@ static inline int __skb_cow(struct sk_bu
|
||||
delta = headroom - skb_headroom(skb);
|
||||
|
||||
if (delta || cloned)
|
||||
- return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
|
||||
+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD_ALLOC), 0,
|
||||
GFP_ATOMIC);
|
||||
return 0;
|
||||
}
|
||||
--- a/net/core/skbuff.c
|
||||
+++ b/net/core/skbuff.c
|
||||
@@ -327,9 +327,9 @@ struct sk_buff *__netdev_alloc_skb(struc
|
||||
int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
|
||||
struct sk_buff *skb;
|
||||
|
||||
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
|
||||
+ skb = __alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask, 0, node);
|
||||
if (likely(skb)) {
|
||||
- skb_reserve(skb, NET_SKB_PAD);
|
||||
+ skb_reserve(skb, NET_SKB_PAD_ALLOC);
|
||||
skb->dev = dev;
|
||||
}
|
||||
return skb;
|
||||
|
Loading…
Reference in New Issue
Block a user