diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-10-13 05:34:20 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-13 11:48:18 -0700 |
commit | 89d71a66c40d629e3b1285def543ab1425558cd5 (patch) | |
tree | 45159e85418170fe36e4e023d9617693625d1740 /drivers/net/dl2k.c | |
parent | bff1c09640b3006bca711e18ef08a5fb955ad9b5 (diff) |
net: Use netdev_alloc_skb_ip_align()
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/dl2k.c')
-rw-r--r-- | drivers/net/dl2k.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 7fa7a907f134..ce8fef184f2c 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -505,7 +505,8 @@ rio_timer (unsigned long data) entry = np->old_rx % RX_RING_SIZE; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { - skb = netdev_alloc_skb (dev, np->rx_buf_sz); + skb = netdev_alloc_skb_ip_align(dev, + np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO @@ -514,8 +515,6 @@ rio_timer (unsigned long data) break; } np->rx_skbuff[entry] = skb; - /* 16 byte align the IP header */ - skb_reserve (skb, 2); np->rx_ring[entry].fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, np->rx_buf_sz, @@ -576,7 +575,9 @@ alloc_list (struct net_device *dev) /* Allocate the rx buffers */ for (i = 0; i < RX_RING_SIZE; i++) { /* Allocated fixed size of skbuff */ - struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz); + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); np->rx_skbuff[i] = skb; if (skb == NULL) { printk (KERN_ERR @@ -584,7 +585,6 @@ alloc_list (struct net_device *dev) dev->name); break; } - skb_reserve (skb, 2); /* 16 byte align the IP header. */ /* Rubicon now supports 40 bits of addressing space. */ np->rx_ring[i].fraginfo = cpu_to_le64 ( pci_map_single ( @@ -871,13 +871,11 @@ receive_packet (struct net_device *dev) PCI_DMA_FROMDEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; - } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) { + } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { pci_dma_sync_single_for_cpu(np->pdev, desc_to_dma(desc), np->rx_buf_sz, PCI_DMA_FROMDEVICE); - /* 16 byte align the IP header */ - skb_reserve (skb, 2); skb_copy_to_linear_data (skb, np->rx_skbuff[entry]->data, pkt_len); @@ -907,7 +905,7 @@ receive_packet (struct net_device *dev) struct sk_buff *skb; /* Dropped packets don't need to re-allocate */ if (np->rx_skbuff[entry] == NULL) { - skb = netdev_alloc_skb(dev, np->rx_buf_sz); + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); if (skb == NULL) { np->rx_ring[entry].fraginfo = 0; printk (KERN_INFO @@ -917,8 +915,6 @@ receive_packet (struct net_device *dev) break; } np->rx_skbuff[entry] = skb; - /* 16 byte align the IP header */ - skb_reserve (skb, 2); np->rx_ring[entry].fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, np->rx_buf_sz, |