From 67efb746c492c25fac4d77aa16b808a8aa26089d Mon Sep 17 00:00:00 2001 From: Samuel Thibault Date: Sun, 19 Feb 2023 22:24:32 +0100 Subject: pfinet: Align packets The Ethernet header is 14 bytes long, and thus leads to IP header misalignment. This uses skb_reserve to introduce 2 bytes of padding to realign IP headers. --- pfinet/ethernet.c | 3 ++- pfinet/linux-src/include/linux/skbuff.h | 24 ++++++++++++++++++++++++ pfinet/tunnel.c | 3 ++- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/pfinet/ethernet.c b/pfinet/ethernet.c index c317820c..82c318b2 100644 --- a/pfinet/ethernet.c +++ b/pfinet/ethernet.c @@ -153,7 +153,8 @@ ethernet_demuxer (mach_msg_header_t *inp, + msg->packet_type.msgt_number - sizeof (struct packet_header); pthread_mutex_lock (&net_bh_lock); - skb = alloc_skb (datalen, GFP_ATOMIC); + skb = alloc_skb (NET_IP_ALIGN + datalen, GFP_ATOMIC); + skb_reserve(skb, NET_IP_ALIGN); skb_put (skb, datalen); skb->dev = dev; diff --git a/pfinet/linux-src/include/linux/skbuff.h b/pfinet/linux-src/include/linux/skbuff.h index 00f9ab2a..46eb995e 100644 --- a/pfinet/linux-src/include/linux/skbuff.h +++ b/pfinet/linux-src/include/linux/skbuff.h @@ -498,6 +498,30 @@ static __inline__ int skb_tailroom(struct sk_buff *skb) return skb->end-skb->tail; } +/* + * CPUs often take a performance hit when accessing unaligned memory + * locations. The actual performance hit varies, it can be small if the + * hardware handles it or large if we have to take an exception and fix it + * in software. + * + * Since an ethernet header is 14 bytes network drivers often end up with + * the IP header at an unaligned offset. The IP header can be aligned by + * shifting the start of the packet by 2 bytes. Drivers should do this + * with: + * + * skb_reserve(skb, NET_IP_ALIGN); + * + * The downside to this alignment of the IP header is that the DMA is now + * unaligned. On some architectures the cost of an unaligned DMA is high + * and this cost outweighs the gains made by aligning the IP header. + * + * Since this trade off varies between architectures, we allow NET_IP_ALIGN + * to be overridden. + */ +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + static __inline__ void skb_reserve(struct sk_buff *skb, unsigned int len) { skb->data+=len; diff --git a/pfinet/tunnel.c b/pfinet/tunnel.c index e11ab670..b519ebd1 100644 --- a/pfinet/tunnel.c +++ b/pfinet/tunnel.c @@ -391,7 +391,8 @@ trivfs_S_io_write (struct trivfs_protid *cred, pthread_mutex_lock (&tdev->lock); pthread_mutex_lock (&net_bh_lock); - skb = alloc_skb (datalen, GFP_ATOMIC); + skb = alloc_skb (NET_IP_ALIGN + datalen, GFP_ATOMIC); + skb_reserve(skb, NET_IP_ALIGN); skb->len = datalen; skb->dev = &tdev->dev; -- cgit v1.2.3