summaryrefslogtreecommitdiff
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2007-03-27 18:55:52 -0300
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-25 22:28:23 -0700
commitd626f62b11e00c16e81e4308ab93d3f13551812a (patch)
treefac4af6ced853755e12fc709d55f0c2bec51265d /net/core/skbuff.c
parent2a123b86e2b242a4a6db990d2851d45e192f88e5 (diff)
downloadlwn-d626f62b11e00c16e81e4308ab93d3f13551812a.tar.gz
lwn-d626f62b11e00c16e81e4308ab93d3f13551812a.zip
[SK_BUFF]: Introduce skb_copy_from_linear_data{_offset}
To clearly state the intent of copying from linear sk_buffs, _offset being a overly long variant but interesting for the sake of saving some bytes. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f16c72204cf6..17c6bb5927b6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -576,7 +576,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
/* Set the tail pointer and length */
skb_put(n, skb_headlen(skb));
/* Copy the bytes */
- memcpy(n->data, skb->data, n->len);
+ skb_copy_from_linear_data(skb, n->data, n->len);
n->csum = skb->csum;
n->ip_summed = skb->ip_summed;
@@ -1043,7 +1043,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
if ((copy = start - offset) > 0) {
if (copy > len)
copy = len;
- memcpy(to, skb->data + offset, copy);
+ skb_copy_from_linear_data_offset(skb, offset, to, copy);
if ((len -= copy) == 0)
return 0;
offset += copy;
@@ -1362,7 +1362,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
BUG_ON(csstart > skb_headlen(skb));
- memcpy(to, skb->data, csstart);
+ skb_copy_from_linear_data(skb, to, csstart);
csum = 0;
if (csstart != skb->len)
@@ -1536,8 +1536,8 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
{
int i;
- memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
-
+ skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
+ pos - len);
/* And move data appendix as is. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -1927,8 +1927,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
skb_set_network_header(nskb, skb->mac_len);
nskb->transport_header = (nskb->network_header +
skb_network_header_len(skb));
- memcpy(skb_put(nskb, doffset), skb->data, doffset);
-
+ skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
+ doffset);
if (!sg) {
nskb->csum = skb_copy_and_csum_bits(skb, offset,
skb_put(nskb, len),
@@ -1941,7 +1941,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
nskb->ip_summed = CHECKSUM_PARTIAL;
nskb->csum = skb->csum;
- memcpy(skb_put(nskb, hsize), skb->data + offset, hsize);
+ skb_copy_from_linear_data_offset(skb, offset,
+ skb_put(nskb, hsize), hsize);
while (pos < offset + len) {
BUG_ON(i >= nfrags);