summaryrefslogtreecommitdiff
path: root/include/net/vxlan.h
diff options
context:
space:
mode:
authorJiri Benc <jbenc@redhat.com>2016-03-21 19:39:18 +0300
committerDavid S. Miller <davem@davemloft.net>2016-03-21 20:30:02 +0300
commit5692d7ea4183b8dd5a49d73d6a4436aa22929b7b (patch)
tree6bb984ea0f48325d72a53b7de1e8698aa73eb7e5 /include/net/vxlan.h
parented49e650371008b0e00c8004cc2ca93055740f78 (diff)
downloadlinux-5692d7ea4183b8dd5a49d73d6a4436aa22929b7b.tar.xz
vxlan: fix sparse warnings
Sparse reports false positives for the header manipulation inlines. Annotate them correctly. Tested by sparse on a little endian and big endian machine. Fixes: 54bfd872bf16d ("vxlan: keep flags and vni in network byte order") Reported-by: kbuild test robot <fengguang.wu@intel.com> Signed-off-by: Jiri Benc <jbenc@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/vxlan.h')
-rw-r--r--include/net/vxlan.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index a763c96ecde4..73ed2e951c02 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -271,36 +271,36 @@ static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
static inline __be32 vxlan_vni(__be32 vni_field)
{
#if defined(__BIG_ENDIAN)
- return vni_field >> 8;
+ return (__force __be32)((__force u32)vni_field >> 8);
#else
- return (vni_field & VXLAN_VNI_MASK) << 8;
+ return (__force __be32)((__force u32)(vni_field & VXLAN_VNI_MASK) << 8);
#endif
}
static inline __be32 vxlan_vni_field(__be32 vni)
{
#if defined(__BIG_ENDIAN)
- return vni << 8;
+ return (__force __be32)((__force u32)vni << 8);
#else
- return vni >> 8;
+ return (__force __be32)((__force u32)vni >> 8);
#endif
}
static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
{
#if defined(__BIG_ENDIAN)
- return tun_id;
+ return (__force __be32)tun_id;
#else
- return tun_id >> 32;
+ return (__force __be32)((__force u64)tun_id >> 32);
#endif
}
static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
{
#if defined(__BIG_ENDIAN)
- return (__be64)vni;
+ return (__force __be64)vni;
#else
- return (__be64)vni << 32;
+ return (__force __be64)((u64)(__force u32)vni << 32);
#endif
}