patch-2.1.80 linux/net/core/skbuff.c
Next file: linux/net/core/sock.c
Previous file: linux/net/core/neighbour.c
Back to the patch index
Back to the overall index
- Lines: 67
- Date:
Thu Jan 15 21:12:27 1998
- Orig file:
v2.1.79/linux/net/core/skbuff.c
- Orig date:
Mon Jan 12 22:09:24 1998
diff -u --recursive --new-file v2.1.79/linux/net/core/skbuff.c linux/net/core/skbuff.c
@@ -113,18 +113,18 @@
* to be a good idea.
*/
-struct sk_buff *alloc_skb(unsigned int size,int priority)
+struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
{
struct sk_buff *skb;
unsigned char *bptr;
int len;
- if (in_interrupt() && priority!=GFP_ATOMIC) {
+ if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
static int count = 0;
if (++count < 5) {
printk(KERN_ERR "alloc_skb called nonatomically "
"from interrupt %p\n", __builtin_return_address(0));
- priority = GFP_ATOMIC;
+ gfp_mask &= ~__GFP_WAIT;
}
}
@@ -144,7 +144,7 @@
* Allocate some space
*/
- bptr = kmalloc(size,priority);
+ bptr = kmalloc(size,gfp_mask);
if (bptr == NULL) {
atomic_inc(&net_fails);
return NULL;
@@ -226,7 +226,7 @@
* Duplicate an sk_buff. The new one is not owned by a socket.
*/
-struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
+struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
{
struct sk_buff *n;
int inbuff = 0;
@@ -237,7 +237,7 @@
skb->inclone = SKB_CLONE_ORIG;
inbuff = SKB_CLONE_INLINE;
} else {
- n = kmalloc(sizeof(*n), priority);
+ n = kmalloc(sizeof(*n), gfp_mask);
if (!n)
return NULL;
}
@@ -263,7 +263,7 @@
* This is slower, and copies the whole data area
*/
-struct sk_buff *skb_copy(struct sk_buff *skb, int priority)
+struct sk_buff *skb_copy(struct sk_buff *skb, int gfp_mask)
{
struct sk_buff *n;
unsigned long offset;
@@ -272,7 +272,7 @@
* Allocate the copy buffer
*/
- n=alloc_skb(skb->end - skb->head, priority);
+ n=alloc_skb(skb->end - skb->head, gfp_mask);
if(n==NULL)
return NULL;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov