summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c')
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c142
1 files changed, 134 insertions, 8 deletions
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 0ac2ff828320..cecf72be5102 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*
* RMNET Data MAP protocol
*/
@@ -8,6 +8,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include <linux/bitfield.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@@ -250,12 +251,62 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
}
#endif
+static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev)
+{
+ struct rmnet_priv *priv = netdev_priv(orig_dev);
+ struct rmnet_map_v5_csum_header *ul_header;
+
+ ul_header = skb_push(skb, sizeof(*ul_header));
+ memset(ul_header, 0, sizeof(*ul_header));
+ ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ void *iph = ip_hdr(skb);
+ __sum16 *check;
+ void *trans;
+ u8 proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
+
+ proto = ((struct iphdr *)iph)->protocol;
+ trans = iph + ip_len;
+ } else if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6)) {
+ u16 ip_len = sizeof(struct ipv6hdr);
+
+ proto = ((struct ipv6hdr *)iph)->nexthdr;
+ trans = iph + ip_len;
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
+ goto sw_csum;
+ }
+
+ check = rmnet_map_get_csum_field(proto, trans);
+ if (check) {
+ skb->ip_summed = CHECKSUM_NONE;
+ /* Ask for checksum offloading */
+ ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
+ priv->stats.csum_hw++;
+ return;
+ }
+ }
+
+sw_csum:
+ priv->stats.csum_sw++;
+}
+
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
- int hdrlen, int pad)
+ int hdrlen,
+ struct rmnet_port *port,
+ int pad)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
@@ -266,6 +317,10 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));
+ /* Set next_hdr bit for csum offload packets */
+ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
+ map_header->flags |= MAP_NEXT_HEADER_FLAG;
+
if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
@@ -300,8 +355,11 @@ done:
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
+ struct rmnet_map_v5_csum_header *next_hdr = NULL;
struct rmnet_map_header *maph;
+ void *data = skb->data;
struct sk_buff *skbn;
+ u8 nexthdr_type;
u32 packet_len;
if (skb->len == 0)
@@ -310,8 +368,18 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+ } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
+ if (!(maph->flags & MAP_CMD_FLAG)) {
+ packet_len += sizeof(*next_hdr);
+ if (maph->flags & MAP_NEXT_HEADER_FLAG)
+ next_hdr = data + sizeof(*maph);
+ else
+ /* Mapv5 data pkt without csum hdr is invalid */
+ return NULL;
+ }
+ }
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -320,6 +388,13 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
if (!maph->pkt_len)
return NULL;
+ if (next_hdr) {
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return NULL;
+ }
+
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
if (!skbn)
return NULL;
@@ -372,11 +447,8 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
return 0;
}
-/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
- * packets that are supported for UL checksum offload.
- */
-void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
- struct net_device *orig_dev)
+static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
@@ -395,10 +467,12 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
return;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
return;
#else
priv->stats.csum_err_invalid_ip_version++;
@@ -414,3 +488,55 @@ sw_csum:
priv->stats.csum_sw++;
}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev,
+ int csum_type)
+{
+ switch (csum_type) {
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
+ rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
+ break;
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
+ rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Process a MAPv5 packet header */
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
+ u16 len)
+{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
+ struct rmnet_map_v5_csum_header *next_hdr;
+ u8 nexthdr_type;
+
+ next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
+ sizeof(struct rmnet_map_header));
+
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return -EINVAL;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ priv->stats.csum_sw++;
+ } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
+ priv->stats.csum_ok++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ priv->stats.csum_valid_unset++;
+ }
+
+ /* Pull csum v5 header */
+ skb_pull(skb, sizeof(*next_hdr));
+
+ return 0;
+}