patch-2.1.91 linux/net/core/skbuff.c
Next file: linux/net/core/sock.c
Previous file: linux/net/core/iovec.c
Back to the patch index
Back to the overall index
- Lines: 357
- Date:
Thu Mar 26 14:59:02 1998
- Orig file:
v2.1.90/linux/net/core/skbuff.c
- Orig date:
Tue Mar 10 10:03:36 1998
diff -u --recursive --new-file v2.1.90/linux/net/core/skbuff.c linux/net/core/skbuff.c
@@ -16,6 +16,7 @@
* only put in the headers
* Ray VanTassle : Fixed --skb->lock in free
* Alan Cox : skb_copy copy arp field
+ * Andi Kleen : slabified it.
*
* NOTE:
* The __skb_ routines should be called with interrupts
@@ -45,6 +46,8 @@
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/init.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -57,6 +60,11 @@
#include <asm/system.h>
/*
+ * Skb list spinlock
+ */
+spinlock_t skb_queue_lock = SPIN_LOCK_UNLOCKED;
+
+/*
* Resource tracking variables
*/
@@ -66,6 +74,8 @@
extern atomic_t ip_frag_mem;
+static kmem_cache_t *skbuff_head_cache;
+
/*
* Strings we don't want inline's duplicating
*/
@@ -87,138 +97,119 @@
#endif
}
-/*
- * Free an sk_buff. Release anything attached to the buffer.
- */
-
-void __kfree_skb(struct sk_buff *skb)
-{
- if (skb->list)
- printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
- "on a list (from %p).\n", __builtin_return_address(0));
-
- dst_release(skb->dst);
- if(skb->destructor)
- skb->destructor(skb);
- kfree_skbmem(skb);
-}
-
-/*
- * Allocate a new skbuff. We do this ourselves so we can fill in a few 'private'
- * fields and also do memory statistics to find all the [BEEP] leaks.
- *
- * Note: For now we put the header after the data to get better cache
- * usage. Once we have a good cache aware kmalloc this will cease
- * to be a good idea.
+/* Allocate a new skbuff. We do this ourselves so we can fill in a few
+ * 'private' fields and also do memory statistics to find all the
+ * [BEEP] leaks.
+ *
*/
struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
{
struct sk_buff *skb;
- unsigned char *bptr;
- int len;
+ u8 *data;
if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
static int count = 0;
if (++count < 5) {
printk(KERN_ERR "alloc_skb called nonatomically "
"from interrupt %p\n", __builtin_return_address(0));
- gfp_mask &= ~__GFP_WAIT;
}
+ gfp_mask &= ~__GFP_WAIT;
}
- /*
- * FIXME: We could do with an architecture dependent
- * 'alignment mask'.
- */
-
- /* Allow for alignments. Make a multiple of 16 bytes */
- size = (size + 15) & ~15;
- len = size;
-
- /* And stick the control itself on the end */
- size += sizeof(struct sk_buff);
-
- /*
- * Allocate some space
- */
-
- bptr = kmalloc(size,gfp_mask);
- if (bptr == NULL) {
- atomic_inc(&net_fails);
- return NULL;
- }
-
- /*
- * Now we play a little game with the caches. Linux kmalloc is
- * a bit cache dumb, in fact its just about maximally non
- * optimal for typical kernel buffers. We actually run faster
- * by doing the following. Which is to deliberately put the
- * skb at the _end_ not the start of the memory block.
+ /* Get the HEAD */
+ skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+ if (skb == NULL)
+ goto nohead;
+
+ /* Get the DATA. Size must match skb_add_mtu(). */
+ size = ((size + 15) & ~15);
+ data = kmalloc(size + sizeof(atomic_t), gfp_mask);
+ if (data == NULL)
+ goto nodata;
+
+ /* Note that this counter is useless now - you can just look in the
+ * skbuff_head entry in /proc/slabinfo. We keep it only for emergency
+ * cases.
*/
atomic_inc(&net_allocs);
-
- skb = (struct sk_buff *)(bptr + size) - 1;
- atomic_set(&skb->count, 1); /* only one reference to this */
- skb->data_skb = skb; /* and we're our own data skb */
+ skb->truesize = size;
+
+ atomic_inc(&net_skbcount);
+
+ /* Load the data pointers. */
+ skb->head = data;
+ skb->data = data;
+ skb->tail = data;
+ skb->end = data + size;
+
+ /* Set up other state */
+ skb->len = 0;
+ skb->is_clone = 0;
+ skb->cloned = 0;
+
+ atomic_set(&skb->users, 1);
+ atomic_set(skb_datarefp(skb), 1);
+ return skb;
+
+nodata:
+ kmem_cache_free(skbuff_head_cache, skb);
+nohead:
+ atomic_inc(&net_fails);
+ return NULL;
+}
+
+
+/*
+ * Slab constructor for a skb head.
+ */
+static inline void skb_headerinit(void *p, kmem_cache_t *cache,
+ unsigned long flags)
+{
+ struct sk_buff *skb = p;
+ skb->destructor = NULL;
skb->pkt_type = PACKET_HOST; /* Default type */
skb->pkt_bridged = 0; /* Not bridged */
skb->prev = skb->next = NULL;
skb->list = NULL;
skb->sk = NULL;
- skb->truesize=size;
skb->stamp.tv_sec=0; /* No idea about time */
skb->ip_summed = 0;
skb->security = 0; /* By default packets are insecure */
skb->dst = NULL;
- skb->destructor = NULL;
memset(skb->cb, 0, sizeof(skb->cb));
skb->priority = 0;
- atomic_inc(&net_skbcount);
- atomic_set(&skb->users, 1);
-
- /* Load the data pointers. */
- skb->head = bptr;
- skb->data = bptr;
- skb->tail = bptr;
- skb->end = bptr + len;
- skb->len = 0;
- skb->inclone = 0;
- return skb;
}
/*
- * Free an skbuff by memory
+ * Free an skbuff by memory without cleaning the state.
*/
-
-extern inline void __kfree_skbmem(struct sk_buff *skb)
+void kfree_skbmem(struct sk_buff *skb)
{
- /* don't do anything if somebody still uses us */
- if (atomic_dec_and_test(&skb->count)) {
+ if (!skb->cloned || atomic_dec_and_test(skb_datarefp(skb)))
kfree(skb->head);
- atomic_dec(&net_skbcount);
- }
+
+ kmem_cache_free(skbuff_head_cache, skb);
+ atomic_dec(&net_skbcount);
}
-void kfree_skbmem(struct sk_buff *skb)
+/*
+ * Free an sk_buff. Release anything attached to the buffer. Clean the state.
+ */
+
+void __kfree_skb(struct sk_buff *skb)
{
- void * addr = skb->head;
+ if (skb->list)
+ printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
+ "on a list (from %p).\n", __builtin_return_address(0));
- /* don't do anything if somebody still uses us */
- if (atomic_dec_and_test(&skb->count)) {
- int free_head = (skb->inclone != SKB_CLONE_INLINE);
-
- /* free the skb that contains the actual data if we've clone()'d */
- if (skb->data_skb != skb) {
- addr = skb;
- __kfree_skbmem(skb->data_skb);
- }
- if (free_head)
- kfree(addr);
- atomic_dec(&net_skbcount);
- }
+ dst_release(skb->dst);
+ if(skb->destructor)
+ skb->destructor(skb);
+ skb_headerinit(skb, NULL, 0); /* clean state */
+ kfree_skbmem(skb);
}
/*
@@ -228,32 +219,24 @@
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
{
struct sk_buff *n;
- int inbuff = 0;
- if (!skb->inclone && skb_tailroom(skb) >= sizeof(struct sk_buff)) {
- n = ((struct sk_buff *) skb->end) - 1;
- skb->end -= sizeof(struct sk_buff);
- skb->inclone = SKB_CLONE_ORIG;
- inbuff = SKB_CLONE_INLINE;
- } else {
- n = kmalloc(sizeof(*n), gfp_mask);
- if (!n)
- return NULL;
- }
+ n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+ if (!n)
+ return NULL;
+
memcpy(n, skb, sizeof(*n));
- atomic_set(&n->count, 1);
- skb = skb->data_skb;
- atomic_inc(&skb->count);
+ atomic_inc(skb_datarefp(skb));
+ skb->cloned = 1;
+
atomic_inc(&net_allocs);
atomic_inc(&net_skbcount);
dst_clone(n->dst);
- n->data_skb = skb;
+ n->cloned = 1;
n->next = n->prev = NULL;
n->list = NULL;
n->sk = NULL;
- n->tries = 0;
+ n->is_clone = 1;
atomic_set(&n->users, 1);
- n->inclone = inbuff;
n->destructor = NULL;
return n;
}
@@ -287,6 +270,7 @@
skb_put(n,skb->len);
/* Copy the bytes */
memcpy(n->head,skb->head,skb->end-skb->head);
+ n->csum = skb->csum;
n->list=NULL;
n->sk=NULL;
n->when=skb->when;
@@ -302,7 +286,7 @@
n->ack_seq=skb->ack_seq;
memcpy(n->cb, skb->cb, sizeof(skb->cb));
n->used=skb->used;
- n->tries=0;
+ n->is_clone=0;
atomic_set(&n->users, 1);
n->pkt_type=skb->pkt_type;
n->stamp=skb->stamp;
@@ -321,7 +305,7 @@
* Allocate the copy buffer
*/
- n=alloc_skb(skb->truesize+newheadroom-headroom-sizeof(struct sk_buff), GFP_ATOMIC);
+ n=alloc_skb(skb->truesize+newheadroom-headroom, GFP_ATOMIC);
if(n==NULL)
return NULL;
@@ -352,7 +336,7 @@
n->end_seq=skb->end_seq;
n->ack_seq=skb->ack_seq;
n->used=skb->used;
- n->tries=0;
+ n->is_clone=0;
atomic_set(&n->users, 1);
n->pkt_type=skb->pkt_type;
n->stamp=skb->stamp;
@@ -360,4 +344,28 @@
n->security=skb->security;
return n;
+}
+
+#if 0
+/*
+ * Tune the memory allocator for a new MTU size.
+ */
+void skb_add_mtu(int mtu)
+{
+ /* Must match allocation in alloc_skb */
+ mtu = ((mtu + 15) & ~15) + sizeof(atomic_t);
+
+ kmem_add_cache_size(mtu);
+}
+#endif
+
+__initfunc(void skb_init(void))
+{
+ skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
+ sizeof(struct sk_buff),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ skb_headerinit, NULL);
+ if (!skbuff_head_cache)
+ panic("cannot create skbuff cache");
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov