/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
* Copyright 2018 Mellanox Technologies, Ltd
*/
#include <assert.h>
#include <errno.h>
#include <libmnl/libmnl.h>
#include <linux/gen_stats.h>
#include <linux/if_ether.h>
#include <linux/netlink.h>
#include <linux/pkt_cls.h>
#include <linux/pkt_sched.h>
#include <linux/rtnetlink.h>
#include <linux/tc_act/tc_gact.h>
#include <linux/tc_act/tc_mirred.h>
#include <netinet/in.h>
#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include "mlx5.h"
#include "mlx5_flow.h"
#include "mlx5_autoconf.h"
#ifdef HAVE_TC_ACT_VLAN
#include <linux/tc_act/tc_vlan.h>
#else /* HAVE_TC_ACT_VLAN */
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
#define TCA_VLAN_ACT_MODIFY 3
#define TCA_VLAN_PARMS 2
#define TCA_VLAN_PUSH_VLAN_ID 3
#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4
#define TCA_VLAN_PAD 5
#define TCA_VLAN_PUSH_VLAN_PRIORITY 6
struct tc_vlan {
tc_gen;
int v_action;
};
#endif /* HAVE_TC_ACT_VLAN */
#ifdef HAVE_TC_ACT_PEDIT
#include <linux/tc_act/tc_pedit.h>
#else /* HAVE_TC_ACT_VLAN */
enum {
TCA_PEDIT_UNSPEC,
TCA_PEDIT_TM,
TCA_PEDIT_PARMS,
TCA_PEDIT_PAD,
TCA_PEDIT_PARMS_EX,
TCA_PEDIT_KEYS_EX,
TCA_PEDIT_KEY_EX,
__TCA_PEDIT_MAX
};
enum {
TCA_PEDIT_KEY_EX_HTYPE = 1,
TCA_PEDIT_KEY_EX_CMD = 2,
__TCA_PEDIT_KEY_EX_MAX
};
enum pedit_header_type {
TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0,
TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1,
TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2,
TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3,
TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4,
TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
__PEDIT_HDR_TYPE_MAX,
};
enum pedit_cmd {
TCA_PEDIT_KEY_EX_CMD_SET = 0,
TCA_PEDIT_KEY_EX_CMD_ADD = 1,
__PEDIT_CMD_MAX,
};
struct tc_pedit_key {
__u32 mask; /* AND */
__u32 val; /*XOR */
__u32 off; /*offset */
__u32 at;
__u32 offmask;
__u32 shift;
};
__extension__
struct tc_pedit_sel {
tc_gen;
unsigned char nkeys;
unsigned char flags;
struct tc_pedit_key keys[0];
};
#endif /* HAVE_TC_ACT_VLAN */
#ifdef HAVE_TC_ACT_TUNNEL_KEY
#include <linux/tc_act/tc_tunnel_key.h>
#ifndef HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT
#define TCA_TUNNEL_KEY_ENC_DST_PORT 9
#endif
#ifndef HAVE_TCA_TUNNEL_KEY_NO_CSUM
#define TCA_TUNNEL_KEY_NO_CSUM 10
#endif
#ifndef HAVE_TCA_TUNNEL_KEY_ENC_TOS
#define TCA_TUNNEL_KEY_ENC_TOS 12
#endif
#ifndef HAVE_TCA_TUNNEL_KEY_ENC_TTL
#define TCA_TUNNEL_KEY_ENC_TTL 13
#endif
#else /* HAVE_TC_ACT_TUNNEL_KEY */
#define TCA_ACT_TUNNEL_KEY 17
#define TCA_TUNNEL_KEY_ACT_SET 1
#define TCA_TUNNEL_KEY_ACT_RELEASE 2
#define TCA_TUNNEL_KEY_PARMS 2
#define TCA_TUNNEL_KEY_ENC_IPV4_SRC 3
#define TCA_TUNNEL_KEY_ENC_IPV4_DST 4
#define TCA_TUNNEL_KEY_ENC_IPV6_SRC 5
#define TCA_TUNNEL_KEY_ENC_IPV6_DST 6
#define TCA_TUNNEL_KEY_ENC_KEY_ID 7
#define TCA_TUNNEL_KEY_ENC_DST_PORT 9
#define TCA_TUNNEL_KEY_NO_CSUM 10
#define TCA_TUNNEL_KEY_ENC_TOS 12
#define TCA_TUNNEL_KEY_ENC_TTL 13
struct tc_tunnel_key {
tc_gen;
int t_action;
};
#endif /* HAVE_TC_ACT_TUNNEL_KEY */
/* Normally found in linux/netlink.h. */
#ifndef NETLINK_CAP_ACK
#define NETLINK_CAP_ACK 10
#endif
/* Normally found in linux/pkt_sched.h. */
#ifndef TC_H_MIN_INGRESS
#define TC_H_MIN_INGRESS 0xfff2u
#endif
/* Normally found in linux/pkt_cls.h. */
#ifndef TCA_CLS_FLAGS_SKIP_SW
#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
#endif
#ifndef TCA_CLS_FLAGS_IN_HW
#define TCA_CLS_FLAGS_IN_HW (1 << 2)
#endif
#ifndef HAVE_TCA_CHAIN
#define TCA_CHAIN 11
#endif
#ifndef HAVE_TCA_FLOWER_ACT
#define TCA_FLOWER_ACT 3
#endif
#ifndef HAVE_TCA_FLOWER_FLAGS
#define TCA_FLOWER_FLAGS 22
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE
#define TCA_FLOWER_KEY_ETH_TYPE 8
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST
#define TCA_FLOWER_KEY_ETH_DST 4
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK
#define TCA_FLOWER_KEY_ETH_DST_MASK 5
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC
#define TCA_FLOWER_KEY_ETH_SRC 6
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK
#define TCA_FLOWER_KEY_ETH_SRC_MASK 7
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO
#define TCA_FLOWER_KEY_IP_PROTO 9
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC
#define TCA_FLOWER_KEY_IPV4_SRC 10
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK
#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST
#define TCA_FLOWER_KEY_IPV4_DST 12
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK
#define TCA_FLOWER_KEY_IPV4_DST_MASK 13
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC
#define TCA_FLOWER_KEY_IPV6_SRC 14
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK
#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST
#define TCA_FLOWER_KEY_IPV6_DST 16
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK
#define TCA_FLOWER_KEY_IPV6_DST_MASK 17
#endif
#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC
#define TCA_FLOWER_KEY_TCP_SRC 18
#endif
#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK
#define TCA_FLOWER_KEY_TCP_SRC_MASK 35
#endif
#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST
#define TCA_FLOWER_KEY_TCP_DST 19
#endif
#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK
#define TCA_FLOWER_KEY_TCP_DST_MASK 36
#endif
#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC
#define TCA_FLOWER_KEY_UDP_SRC 20
#endif
#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK
#define TCA_FLOWER_KEY_UDP_SRC_MASK 37
#endif
#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST
#define TCA_FLOWER_KEY_UDP_DST 21
#endif
#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK
#define TCA_FLOWER_KEY_UDP_DST_MASK 38
#endif
#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID
#define TCA_FLOWER_KEY_VLAN_ID 23
#endif
#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO
#define TCA_FLOWER_KEY_VLAN_PRIO 24
#endif
#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_KEY_ID
#define TCA_FLOWER_KEY_ENC_KEY_ID 26
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC
#define TCA_FLOWER_KEY_ENC_IPV4_SRC 27
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK
#define TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK 28
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST
#define TCA_FLOWER_KEY_ENC_IPV4_DST 29
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK
#define TCA_FLOWER_KEY_ENC_IPV4_DST_MASK 30
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC
#define TCA_FLOWER_KEY_ENC_IPV6_SRC 31
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK
#define TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK 32
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST
#define TCA_FLOWER_KEY_ENC_IPV6_DST 33
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK
#define TCA_FLOWER_KEY_ENC_IPV6_DST_MASK 34
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT
#define TCA_FLOWER_KEY_ENC_UDP_SRC_PORT 43
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK
#define TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK 44
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT
#define TCA_FLOWER_KEY_ENC_UDP_DST_PORT 45
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK
#define TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK 46
#endif
#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS
#define TCA_FLOWER_KEY_TCP_FLAGS 71
#endif
#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK
#define TCA_FLOWER_KEY_TCP_FLAGS_MASK 72
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IP_TOS
#define TCA_FLOWER_KEY_IP_TOS 73
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IP_TOS_MASK
#define TCA_FLOWER_KEY_IP_TOS_MASK 74
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL
#define TCA_FLOWER_KEY_IP_TTL 75
#endif
#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL_MASK
#define TCA_FLOWER_KEY_IP_TTL_MASK 76
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TOS
#define TCA_FLOWER_KEY_ENC_IP_TOS 80
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TOS_MASK
#define TCA_FLOWER_KEY_ENC_IP_TOS_MASK 81
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TTL
#define TCA_FLOWER_KEY_ENC_IP_TTL 82
#endif
#ifndef HAVE_TCA_FLOWER_KEY_ENC_IP_TTL_MASK
#define TCA_FLOWER_KEY_ENC_IP_TTL_MASK 83
#endif
#ifndef HAVE_TC_ACT_GOTO_CHAIN
#define TC_ACT_GOTO_CHAIN 0x20000000
#endif
#ifndef IPV6_ADDR_LEN
#define IPV6_ADDR_LEN 16
#endif
#ifndef IPV4_ADDR_LEN
#define IPV4_ADDR_LEN 4
#endif
#ifndef TP_PORT_LEN
#define TP_PORT_LEN 2 /* Transport Port (UDP/TCP) Length */
#endif
#ifndef TTL_LEN
#define TTL_LEN 1
#endif
#ifndef TCA_ACT_MAX_PRIO
#define TCA_ACT_MAX_PRIO 32
#endif
/** Parameters of VXLAN devices created by driver. */
#define MLX5_VXLAN_DEFAULT_VNI 1
#define MLX5_VXLAN_DEVICE_PFX "vmlx_"
/**
* Timeout in milliseconds to wait VXLAN UDP offloaded port
* registration completed within the mlx5 driver.
*/
#define MLX5_VXLAN_WAIT_PORT_REG_MS 250
/** Tunnel action type, used for @p type in header structure. */
enum flow_tcf_tunact_type {
FLOW_TCF_TUNACT_VXLAN_DECAP,
FLOW_TCF_TUNACT_VXLAN_ENCAP,
};
/** Flags used for @p mask in tunnel action encap descriptors. */
#define FLOW_TCF_ENCAP_ETH_SRC (1u << 0)
#define FLOW_TCF_ENCAP_ETH_DST (1u << 1)
#define FLOW_TCF_ENCAP_IPV4_SRC (1u << 2)
#define FLOW_TCF_ENCAP_IPV4_DST (1u << 3)
#define FLOW_TCF_ENCAP_IPV6_SRC (1u << 4)
#define FLOW_TCF_ENCAP_IPV6_DST (1u << 5)
#define FLOW_TCF_ENCAP_UDP_SRC (1u << 6)
#define FLOW_TCF_ENCAP_UDP_DST (1u << 7)
#define FLOW_TCF_ENCAP_VXLAN_VNI (1u << 8)
#define FLOW_TCF_ENCAP_IP_TTL (1u << 9)
#define FLOW_TCF_ENCAP_IP_TOS (1u << 10)
/**
* Structure for holding netlink context.
* Note the size of the message buffer which is MNL_SOCKET_BUFFER_SIZE.
* Using this (8KB) buffer size ensures that netlink messages will never be
* truncated.
*/
struct mlx5_flow_tcf_context {
struct mnl_socket *nl; /* NETLINK_ROUTE libmnl socket. */
uint32_t seq; /* Message sequence number. */
uint32_t buf_size; /* Message buffer size. */
uint8_t *buf; /* Message buffer. */
};
/**
* Neigh rule structure. The neigh rule is applied via Netlink to
* outer tunnel iface in order to provide destination MAC address
* for the VXLAN encapsultion. The neigh rule is implicitly related
* to the Flow itself and can be shared by multiple Flows.
*/
struct tcf_neigh_rule {
LIST_ENTRY(tcf_neigh_rule) next;
uint32_t refcnt;
struct ether_addr eth;
uint16_t mask;
union {
struct {
rte_be32_t dst;
} ipv4;
struct {
uint8_t dst[IPV6_ADDR_LEN];
} ipv6;
};
};
/**
* Local rule structure. The local rule is applied via Netlink to
* outer tunnel iface in order to provide local and peer IP addresses
* of the VXLAN tunnel for encapsulation. The local rule is implicitly
* related to the Flow itself and can be shared by multiple Flows.
*/
struct tcf_local_rule {
LIST_ENTRY(tcf_local_rule) next;
uint32_t refcnt;
uint16_t mask;
union {
struct {
rte_be32_t dst;
rte_be32_t src;
} ipv4;
struct {
uint8_t dst[IPV6_ADDR_LEN];
uint8_t src[IPV6_ADDR_LEN];
} ipv6;
};
};
/** Outer interface VXLAN encapsulation rules container. */
struct tcf_irule {
LIST_ENTRY(tcf_irule) next;
LIST_HEAD(, tcf_neigh_rule) neigh;
LIST_HEAD(, tcf_local_rule) local;
uint32_t refcnt;
unsigned int ifouter; /**< Own interface index. */
};
/** VXLAN virtual netdev. */
struct tcf_vtep {
LIST_ENTRY(tcf_vtep) next;
uint32_t refcnt;
unsigned int ifindex; /**< Own interface index. */
uint16_t port;
uint32_t created:1; /**< Actually created by PMD. */
uint32_t waitreg:1; /**< Wait for VXLAN UDP port registration. */
};
/** Tunnel descriptor header, common for all tunnel types. */
struct flow_tcf_tunnel_hdr {
uint32_t type; /**< Tunnel action type. */
struct tcf_vtep *vtep; /**< Virtual tunnel endpoint device. */
unsigned int ifindex_org; /**< Original dst/src interface */
unsigned int *ifindex_ptr; /**< Interface ptr in message. */
};
struct flow_tcf_vxlan_decap {
struct flow_tcf_tunnel_hdr hdr;
uint16_t udp_port;
};
struct flow_tcf_vxlan_encap {
struct flow_tcf_tunnel_hdr hdr;
struct tcf_irule *iface;
uint32_t mask;
uint8_t ip_tos;
uint8_t ip_ttl_hop;
struct {
struct ether_addr dst;
struct ether_addr src;
} eth;
union {
struct {
rte_be32_t dst;
rte_be32_t src;
} ipv4;
struct {
uint8_t dst[IPV6_ADDR_LEN];
uint8_t src[IPV6_ADDR_LEN];
} ipv6;
};
struct {
rte_be16_t src;
rte_be16_t dst;
} udp;
struct {
uint8_t vni[3];
} vxlan;
};
/** Structure used when extracting the values of a flow counters
* from a netlink message.
*/
struct flow_tcf_stats_basic {
bool valid;
struct gnet_stats_basic counters;
};
/** Empty masks for known item types. */
static const union {
struct rte_flow_item_port_id port_id;
struct rte_flow_item_eth eth;
struct rte_flow_item_vlan vlan;
struct rte_flow_item_ipv4 ipv4;
struct rte_flow_item_ipv6 ipv6;
struct rte_flow_item_tcp tcp;
struct rte_flow_item_udp udp;
struct rte_flow_item_vxlan vxlan;
} flow_tcf_mask_empty = {
{0},
};
/** Supported masks for known item types. */
static const struct {
struct rte_flow_item_port_id port_id;
struct rte_flow_item_eth eth;
struct rte_flow_item_vlan vlan;
struct rte_flow_item_ipv4 ipv4;
struct rte_flow_item_ipv6 ipv6;
struct rte_flow_item_tcp tcp;
struct rte_flow_item_udp udp;
struct rte_flow_item_vxlan vxlan;
} flow_tcf_mask_supported = {
.port_id = {
.id = 0xffffffff,
},
.eth = {
.type = RTE_BE16(0xffff),
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
},
.vlan = {
/* PCP and VID only, no DEI. */
.tci = RTE_BE16(0xefff),
.inner_type = RTE_BE16(0xffff),
},
.ipv4.hdr = {
.next_proto_id = 0xff,
.time_to_live = 0xff,
.type_of_service = 0xff,
.src_addr = RTE_BE32(0xffffffff),
.dst_addr = RTE_BE32(0xffffffff),
},
.ipv6.hdr = {
.proto = 0xff,
.vtc_flow = RTE_BE32(0xfful << IPV6_HDR_FL_SHIFT),
.hop_limits = 0xff,
.src_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
.dst_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
"\xff\xff\xff\xff\xff\xff\xff\xff",
},
.tcp.hdr = {
.src_port = RTE_BE16(0xffff),
.dst_port = RTE_BE16(0xffff),
.tcp_flags = 0xff,
},
.udp.hdr = {
.src_port = RTE_BE16(0xffff),
.dst_port = RTE_BE16(0xffff),
},
.vxlan = {
.vni = "\xff\xff\xff",
},
};
#define SZ_NLATTR_HDR MNL_ALIGN(sizeof(struct nlattr))
#define SZ_NLATTR_NEST SZ_NLATTR_HDR
#define SZ_NLATTR_DATA_OF(len) MNL_ALIGN(SZ_NLATTR_HDR + (len))
#define SZ_NLATTR_TYPE_OF(typ) SZ_NLATTR_DATA_OF(sizeof(typ))
#define SZ_NLATTR_STRZ_OF(str) SZ_NLATTR_DATA_OF(strlen(str) + 1)
#define PTOI_TABLE_SZ_MAX(dev) (mlx5_dev_to_port_id((dev)->device, NULL, 0) + 2)
/** DPDK port to network interface index (ifindex) conversion. */
struct flow_tcf_ptoi {
uint16_t port_id; /**< DPDK port ID. */
unsigned int ifindex; /**< Network interface index. */
};
/* Due to a limitation on driver/FW. */
#define MLX5_TCF_GROUP_ID_MAX 3
/*
* Due to a limitation on driver/FW, priority ranges from 1 to 16 in kernel.
* Priority in rte_flow attribute starts from 0 and is added by 1 in
* translation. This is subject to be changed to determine the max priority
* based on trial-and-error like Verbs driver once the restriction is lifted or
* the range is extended.
*/
#define MLX5_TCF_GROUP_PRIORITY_MAX 15
#define MLX5_TCF_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
MLX5_FLOW_ACTION_JUMP)
#define MLX5_TCF_VLAN_ACTIONS \
(MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN | \
MLX5_FLOW_ACTION_OF_SET_VLAN_VID | MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
#define MLX5_TCF_VXLAN_ACTIONS \
(MLX5_FLOW_ACTION_VXLAN_ENCAP | MLX5_FLOW_ACTION_VXLAN_DECAP)
#define MLX5_TCF_PEDIT_ACTIONS \
(MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST | \
MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST | \
MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST | \
MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL | \
MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)
#define MLX5_TCF_CONFIG_ACTIONS \
(MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_JUMP | \
MLX5_FLOW_ACTION_OF_PUSH_VLAN | MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
MLX5_FLOW_ACTION_OF_SET_VLAN_PCP | \
(MLX5_TCF_PEDIT_ACTIONS & ~MLX5_FLOW_ACTION_DEC_TTL))
#define MAX_PEDIT_KEYS 128
#define SZ_PEDIT_KEY_VAL 4
#define NUM_OF_PEDIT_KEYS(sz) \
(((sz) / SZ_PEDIT_KEY_VAL) + (((sz) % SZ_PEDIT_KEY_VAL) ? 1 : 0))
struct pedit_key_ex {
enum pedit_header_type htype;
enum pedit_cmd cmd;
};
struct pedit_parser {
struct tc_pedit_sel sel;
struct tc_pedit_key keys[MAX_PEDIT_KEYS];
struct pedit_key_ex keys_ex[MAX_PEDIT_KEYS];
};
/**
* Create space for using the implicitly created TC flow counter.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
*
* @return
* A pointer to the counter data structure, NULL otherwise and
* rte_errno is set.
*/
static struct mlx5_flow_counter *
flow_tcf_counter_new(void)
{
struct mlx5_flow_counter *cnt;
/*
* eswitch counter cannot be shared and its id is unknown.
* currently returning all with id 0.
* in the future maybe better to switch to unique numbers.
*/
struct mlx5_flow_counter tmpl = {
.ref_cnt = 1,
};
cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
if (!cnt) {
rte_errno = ENOMEM;
return NULL;
}
*cnt = tmpl;
/* Implicit counter, do not add to list. */
return cnt;
}
/**
* Set pedit key of MAC address
*
* @param[in] actions
* pointer to action specification
* @param[in,out] p_parser
* pointer to pedit_parser
*/
static void
flow_tcf_pedit_key_set_mac(const struct rte_flow_action *actions,
struct pedit_parser *p_parser)
{
int idx = p_parser->sel.nkeys;
uint32_t off = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
offsetof(struct ether_hdr, s_addr) :
offsetof(struct ether_hdr, d_addr);
const struct rte_flow_action_set_mac *conf =
(const struct rte_flow_action_set_mac *)actions->conf;
p_parser->keys[idx].off = off;
p_parser->keys[idx].mask = ~UINT32_MAX;
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH;
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
memcpy(&p_parser->keys[idx].val,
conf->mac_addr, SZ_PEDIT_KEY_VAL);
idx++;
p_parser->keys[idx].off = off + SZ_PEDIT_KEY_VAL;
p_parser->keys[idx].mask = 0xFFFF0000;
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH;
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
memcpy(&p_parser->keys[idx].val,
conf->mac_addr + SZ_PEDIT_KEY_VAL,
ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
p_parser->sel.nkeys = (++idx);
}
/**
* Set pedit key of decrease/set ttl
*
* @param[in] actions
* pointer to action specification
* @param[in,out] p_parser
* pointer to pedit_parser
* @param[in] item_flags
* flags of all items presented
*/
static void
flow_tcf_pedit_key_set_dec_ttl(const struct rte_flow_action *actions,
struct pedit_parser *p_parser,
uint64_t item_flags)
{
int idx = p_parser->sel.nkeys;
p_parser->keys[idx].mask = 0xFFFFFF00;
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
p_parser->keys[idx].off =
offsetof(struct ipv4_hdr, time_to_live);
}
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
p_parser->keys[idx].off =
offsetof(struct ipv6_hdr, hop_limits);
}
if (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;
p_parser->keys[idx].val = 0x000000FF;
} else {
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
p_parser->keys[idx].val =
(__u32)((const struct rte_flow_action_set_ttl *)
actions->conf)->ttl_value;
}
p_parser->sel.nkeys = (++idx);
}
/**
* Set pedit key of transport (TCP/UDP) port value
*
* @param[in] actions
* pointer to action specification
* @param[in,out] p_parser
* pointer to pedit_parser
* @param[in] item_flags
* flags of all items presented
*/
static void
flow_tcf_pedit_key_set_tp_port(const struct rte_flow_action *actions,
struct pedit_parser *p_parser,
uint64_t item_flags)
{
int idx = p_parser->sel.nkeys;
if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP;
if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP;
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
/* offset of src/dst port is same for TCP and UDP */
p_parser->keys[idx].off =
actions->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
offsetof(struct tcp_hdr, src_port) :
offsetof(struct tcp_hdr, dst_port);
p_parser->keys[idx].mask = 0xFFFF0000;
p_parser->keys[idx].val =
(__u32)((const struct rte_flow_action_set_tp *)
actions->conf)->port;
p_parser->sel.nkeys = (++idx);
}
/**
* Set pedit key of ipv6 address
*
* @param[in] actions
* pointer to action specification
* @param[in,out] p_parser
* pointer to pedit_parser
*/
static void
flow_tcf_pedit_key_set_ipv6_addr(const struct rte_flow_action *actions,
struct pedit_parser *p_parser)
{
int idx = p_parser->sel.nkeys;
int keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
int off_base =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
offsetof(struct ipv6_hdr, src_addr) :
offsetof(struct ipv6_hdr, dst_addr);
const struct rte_flow_action_set_ipv6 *conf =
(const struct rte_flow_action_set_ipv6 *)actions->conf;
for (int i = 0; i < keys; i++, idx++) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
p_parser->keys[idx].off = off_base + i * SZ_PEDIT_KEY_VAL;
p_parser->keys[idx].mask = ~UINT32_MAX;
memcpy(&p_parser->keys[idx].val,
conf->ipv6_addr + i * SZ_PEDIT_KEY_VAL,
SZ_PEDIT_KEY_VAL);
}
p_parser->sel.nkeys += keys;
}
/**
* Set pedit key of ipv4 address
*
* @param[in] actions
* pointer to action specification
* @param[in,out] p_parser
* pointer to pedit_parser
*/
static void
flow_tcf_pedit_key_set_ipv4_addr(const struct rte_flow_action *actions,
struct pedit_parser *p_parser)
{
int idx = p_parser->sel.nkeys;
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
p_parser->keys[idx].off =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
offsetof(struct ipv4_hdr, src_addr) :
offsetof(struct ipv4_hdr, dst_addr);
p_parser->keys[idx].mask = ~UINT32_MAX;
p_parser->keys[idx].val =
((const struct rte_flow_action_set_ipv4 *)
actions->conf)->ipv4_addr;
p_parser->sel.nkeys = (++idx);
}
/**
* Create the pedit's na attribute in netlink message
* on pre-allocate message buffer
*
* @param[in,out] nl
* pointer to pre-allocated netlink message buffer
* @param[in,out] actions
* pointer to pointer of actions specification.
* @param[in,out] action_flags
* pointer to actions flags
* @param[in] item_flags
* flags of all item presented
*/
static void
flow_tcf_create_pedit_mnl_msg(struct nlmsghdr *nl,
const struct rte_flow_action **actions,
uint64_t item_flags)
{
struct pedit_parser p_parser;
struct nlattr *na_act_options;
struct nlattr *na_pedit_keys;
memset(&p_parser, 0, sizeof(p_parser));
mnl_attr_put_strz(nl, TCA_ACT_KIND, "pedit");
na_act_options = mnl_attr_nest_start(nl, TCA_ACT_OPTIONS);
/* all modify header actions should be in one tc-pedit action */
for (; (*actions)->type != RTE_FLOW_ACTION_TYPE_END; (*actions)++) {
switch ((*actions)->type) {
case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
flow_tcf_pedit_key_set_ipv4_addr(*actions, &p_parser);
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
flow_tcf_pedit_key_set_ipv6_addr(*actions, &p_parser);
break;
case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
flow_tcf_pedit_key_set_tp_port(*actions,
&p_parser, item_flags);
break;
case RTE_FLOW_ACTION_TYPE_SET_TTL:
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
flow_tcf_pedit_key_set_dec_ttl(*actions,
&p_parser, item_flags);
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
flow_tcf_pedit_key_set_mac(*actions, &p_parser);
break;
default:
goto pedit_mnl_msg_done;
}
}
pedit_mnl_msg_done:
p_parser.sel.action = TC_ACT_PIPE;
mnl_attr_put(nl, TCA_PEDIT_PARMS_EX,
sizeof(p_parser.sel) +
p_parser.sel.nkeys * sizeof(struct tc_pedit_key),
&p_parser);
na_pedit_keys =
mnl_attr_nest_start(nl, TCA_PEDIT_KEYS_EX | NLA_F_NESTED);
for (int i = 0; i < p_parser.sel.nkeys; i++) {
struct nlattr *na_pedit_key =
mnl_attr_nest_start(nl,
TCA_PEDIT_KEY_EX | NLA_F_NESTED);
mnl_attr_put_u16(nl, TCA_PEDIT_KEY_EX_HTYPE,
p_parser.keys_ex[i].htype);
mnl_attr_put_u16(nl, TCA_PEDIT_KEY_EX_CMD,
p_parser.keys_ex[i].cmd);
mnl_attr_nest_end(nl, na_pedit_key);
}
mnl_attr_nest_end(nl, na_pedit_keys);
mnl_attr_nest_end(nl, na_act_options);
(*actions)--;
}
/**
* Calculate max memory size of one TC-pedit actions.
* One TC-pedit action can contain set of keys each defining
* a rewrite element (rte_flow action)
*
* @param[in,out] actions
* actions specification.
* @param[in,out] action_flags
* actions flags
* @param[in,out] size
* accumulated size
* @return
* Max memory size of one TC-pedit action
*/
static int
flow_tcf_get_pedit_actions_size(const struct rte_flow_action **actions,
uint64_t *action_flags)
{
int pedit_size = 0;
int keys = 0;
uint64_t flags = 0;
pedit_size += SZ_NLATTR_NEST + /* na_act_index. */
SZ_NLATTR_STRZ_OF("pedit") +
SZ_NLATTR_NEST; /* TCA_ACT_OPTIONS. */
for (; (*actions)->type != RTE_FLOW_ACTION_TYPE_END; (*actions)++) {
switch ((*actions)->type) {
case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
keys += NUM_OF_PEDIT_KEYS(IPV4_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_IPV4_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
keys += NUM_OF_PEDIT_KEYS(IPV4_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_IPV4_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
keys += NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_IPV6_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
keys += NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_IPV6_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
/* TCP is as same as UDP */
keys += NUM_OF_PEDIT_KEYS(TP_PORT_LEN);
flags |= MLX5_FLOW_ACTION_SET_TP_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
/* TCP is as same as UDP */
keys += NUM_OF_PEDIT_KEYS(TP_PORT_LEN);
flags |= MLX5_FLOW_ACTION_SET_TP_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_TTL:
keys += NUM_OF_PEDIT_KEYS(TTL_LEN);
flags |= MLX5_FLOW_ACTION_SET_TTL;
break;
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
keys += NUM_OF_PEDIT_KEYS(TTL_LEN);
flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_MAC_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_MAC_DST;
break;
default:
goto get_pedit_action_size_done;
}
}
get_pedit_action_size_done:
/* TCA_PEDIT_PARAMS_EX */
pedit_size +=
SZ_NLATTR_DATA_OF(sizeof(struct tc_pedit_sel) +
keys * sizeof(struct tc_pedit_key));
pedit_size += SZ_NLATTR_NEST; /* TCA_PEDIT_KEYS */
pedit_size += keys *
/* TCA_PEDIT_KEY_EX + HTYPE + CMD */
(SZ_NLATTR_NEST + SZ_NLATTR_DATA_OF(2) +
SZ_NLATTR_DATA_OF(2));
(*action_flags) |= flags;
(*actions)--;
return pedit_size;
}
/**
* Retrieve mask for pattern item.
*
* This function does basic sanity checks on a pattern item in order to
* return the most appropriate mask for it.
*
* @param[in] item
* Item specification.
* @param[in] mask_default
* Default mask for pattern item as specified by the flow API.
* @param[in] mask_supported
* Mask fields supported by the implementation.
* @param[in] mask_empty
* Empty mask to return when there is no specification.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
* Either @p item->mask or one of the mask parameters on success, NULL
* otherwise and rte_errno is set.
*/
static const void *
flow_tcf_item_mask(const struct rte_flow_item *item, const void *mask_default,
const void *mask_supported, const void *mask_empty,
size_t mask_size, struct rte_flow_error *error)
{
const uint8_t *mask;
size_t i;
/* item->last and item->mask cannot exist without item->spec. */
if (!item->spec && (item->mask || item->last)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"\"mask\" or \"last\" field provided without"
" a corresponding \"spec\"");
return NULL;
}
/* No spec, no mask, no problem. */
if (!item->spec)
return mask_empty;
mask = item->mask ? item->mask : mask_default;
assert(mask);
/*
* Single-pass check to make sure that:
* - Mask is supported, no bits are set outside mask_supported.
* - Both item->spec and item->last are included in mask.
*/
for (i = 0; i != mask_size; ++i) {
if (!mask[i])
continue;
if ((mask[i] | ((const uint8_t *)mask_supported)[i]) !=
((const uint8_t *)mask_supported)[i]) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"unsupported field found"
" in \"mask\"");
return NULL;
}
if (item->last &&
(((const uint8_t *)item->spec)[i] & mask[i]) !=
(((const uint8_t *)item->last)[i] & mask[i])) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_LAST,
item->last,
"range between \"spec\" and \"last\""
" not comprised in \"mask\"");
return NULL;
}
}
return mask;
}
/**
* Build a conversion table between port ID and ifindex.
*
* @param[in] dev
* Pointer to Ethernet device.
* @param[out] ptoi
* Pointer to ptoi table.
* @param[in] len
* Size of ptoi table provided.
*
* @return
* Size of ptoi table filled.
*/
static unsigned int
flow_tcf_build_ptoi_table(struct rte_eth_dev *dev, struct flow_tcf_ptoi *ptoi,
unsigned int len)
{
unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
uint16_t port_id[n + 1];
unsigned int i;
unsigned int own = 0;
/* At least one port is needed when no switch domain is present. */
if (!n) {
n = 1;
port_id[0] = dev->data->port_id;
} else {
n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
}
if (n > len)
return 0;
for (i = 0; i != n; ++i) {
struct rte_eth_dev_info dev_info;
rte_eth_dev_info_get(port_id[i], &dev_info);
if (port_id[i] == dev->data->port_id)
own = i;
ptoi[i].port_id = port_id[i];
ptoi[i].ifindex = dev_info.if_index;
}
/* Ensure first entry of ptoi[] is the current device. */
if (own) {
ptoi[n] = ptoi[0];
ptoi[0] = ptoi[own];
ptoi[own] = ptoi[n];
}
/* An entry with zero ifindex terminates ptoi[]. */
ptoi[n].port_id = 0;
ptoi[n].ifindex = 0;
return n;
}
/**
* Verify the @p attr will be correctly understood by the E-switch.
*
* @param[in] attr
* Pointer to flow attributes
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_tcf_validate_attributes(const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
/*
* Supported attributes: groups, some priorities and ingress only.
* group is supported only if kernel supports chain. Don't care about
* transfer as it is the caller's problem.
*/
if (attr->group > MLX5_TCF_GROUP_ID_MAX)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
"group ID larger than "
RTE_STR(MLX5_TCF_GROUP_ID_MAX)
" isn't supported");
else if (attr->priority > MLX5_TCF_GROUP_PRIORITY_MAX)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr,
"priority more than "
RTE_STR(MLX5_TCF_GROUP_PRIORITY_MAX)
" is not supported");
if (!attr->ingress)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "only ingress is supported");
if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "egress is not supported");
return 0;
}
/**
* Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_ETH item for E-Switch.
* The routine checks the L2 fields to be used in encapsulation header.
*
* @param[in] item
* Pointer to the item structure.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
**/
static int
flow_tcf_validate_vxlan_encap_eth(const struct rte_flow_item *item,
struct rte_flow_error *error)
{
const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
if (!spec) {
/*
* Specification for L2 addresses can be empty
* because these ones are optional and not
* required directly by tc rule. Kernel tries
* to resolve these ones on its own
*/
return 0;
}
if (!mask) {
/* If mask is not specified use the default one. */
mask = &rte_flow_item_eth_mask;
}
if (memcmp(&mask->dst,
&flow_tcf_mask_empty.eth.dst,
sizeof(flow_tcf_mask_empty.eth.dst))) {
if (memcmp(&mask->dst,
&rte_flow_item_eth_mask.dst,
sizeof(rte_flow_item_eth_mask.dst)))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"eth.dst\" field");
}
if (memcmp(&mask->src,
&flow_tcf_mask_empty.eth.src,
sizeof(flow_tcf_mask_empty.eth.src))) {
if (memcmp(&mask->src,
&rte_flow_item_eth_mask.src,
sizeof(rte_flow_item_eth_mask.src)))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"eth.src\" field");
}
if (mask->type != RTE_BE16(0x0000)) {
if (mask->type != RTE_BE16(0xffff))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"eth.type\" field");
DRV_LOG(WARNING,
"outer ethernet type field"
" cannot be forced for vxlan"
" encapsulation, parameter ignored");
}
return 0;
}
/**
* Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV4 item for E-Switch.
* The routine checks the IPv4 fields to be used in encapsulation header.
*
* @param[in] item
* Pointer to the item structure.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
**/
static int
flow_tcf_validate_vxlan_encap_ipv4(const struct rte_flow_item *item,
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
if (!spec) {
/*
* Specification for IP addresses cannot be empty
* because it is required by tunnel_key parameter.
*/
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"NULL outer ipv4 address"
" specification for vxlan"
" encapsulation");
}
if (!mask)
mask = &rte_flow_item_ipv4_mask;
if (mask->hdr.dst_addr != RTE_BE32(0x00000000)) {
if (mask->hdr.dst_addr != RTE_BE32(0xffffffff))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"ipv4.hdr.dst_addr\" field"
" for vxlan encapsulation");
/* More IPv4 address validations can be put here. */
} else {
/*
* Kernel uses the destination IP address to determine
* the routing path and obtain the MAC destination
* address, so IP destination address must be
* specified in the tc rule.
*/
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"outer ipv4 destination address"
" must be specified for"
" vxlan encapsulation");
}
if (mask->hdr.src_addr != RTE_BE32(0x00000000)) {
if (mask->hdr.src_addr != RTE_BE32(0xffffffff))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"ipv4.hdr.src_addr\" field"
" for vxlan encapsulation");
/* More IPv4 address validations can be put here. */
} else {
/*
* Kernel uses the source IP address to select the
* interface for egress encapsulated traffic, so
* it must be specified in the tc rule.
*/
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"outer ipv4 source address"
" must be specified for"
" vxlan encapsulation");
}
if (mask->hdr.type_of_service &&
mask->hdr.type_of_service != 0xff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"ipv4.hdr.type_of_service\" field"
" for vxlan encapsulation");
if (mask->hdr.time_to_live &&
mask->hdr.time_to_live != 0xff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"ipv4.hdr.time_to_live\" field"
" for vxlan encapsulation");
return 0;
}
/**
* Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV6 item for E-Switch.
* The routine checks the IPv6 fields to be used in encapsulation header.
*
* @param[in] item
* Pointer to the item structure.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
**/
static int
flow_tcf_validate_vxlan_encap_ipv6(const struct rte_flow_item *item,
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
uint8_t msk6;
if (!spec) {
/*
* Specification for IP addresses cannot be empty
* because it is required by tunnel_key parameter.
*/
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"NULL outer ipv6 address"
" specification for"
" vxlan encapsulation");
}
if (!mask)
mask = &rte_flow_item_ipv6_mask;
if (memcmp(&mask->hdr.dst_addr,
&flow_tcf_mask_empty.ipv6.hdr.dst_addr,
IPV6_ADDR_LEN)) {
if (memcmp(&mask->hdr.dst_addr,
&rte_flow_item_ipv6_mask.hdr.dst_addr,
IPV6_ADDR_LEN))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"ipv6.hdr.dst_addr\" field"
" for vxlan encapsulation");
/* More IPv6 address validations can be put here. */
} else {
/*
* Kernel uses the destination IP address to determine
* the routing path and obtain the MAC destination
* address (heigh or gate), so IP destination address
* must be specified within the tc rule.
*/
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"outer ipv6 destination address"
" must be specified for"
" vxlan encapsulation");
}
if (memcmp(&mask->hdr.src_addr,
&flow_tcf_mask_empty.ipv6.hdr.src_addr,
IPV6_ADDR_LEN)) {
if (memcmp(&mask->hdr.src_addr,
&rte_flow_item_ipv6_mask.hdr.src_addr,
IPV6_ADDR_LEN))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"ipv6.hdr.src_addr\" field"
" for vxlan encapsulation");
/* More L3 address validation can be put here. */
} else {
/*
* Kernel uses the source IP address to select the
* interface for egress encapsulated traffic, so
* it must be specified in the tc rule.
*/
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"outer L3 source address"
" must be specified for"
" vxlan encapsulation");
}
msk6 = (rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
IPV6_HDR_TC_SHIFT) & 0xff;
if (msk6 && msk6 != 0xff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"ipv6.hdr.vtc_flow.tos\" field"
" for vxlan encapsulation");
if (mask->hdr.hop_limits && mask->hdr.hop_limits != 0xff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"ipv6.hdr.hop_limits\" field"
" for vxlan encapsulation");
return 0;
}
/**
* Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_UDP item for E-Switch.
* The routine checks the UDP fields to be used in encapsulation header.
*
* @param[in] item
* Pointer to the item structure.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
**/
static int
flow_tcf_validate_vxlan_encap_udp(const struct rte_flow_item *item,
struct rte_flow_error *error)
{
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
if (!spec) {
/*
* Specification for UDP ports cannot be empty
* because it is required by tunnel_key parameter.
*/
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"NULL UDP port specification "
" for vxlan encapsulation");
}
if (!mask)
mask = &rte_flow_item_udp_mask;
if (mask->hdr.dst_port != RTE_BE16(0x0000)) {
if (mask->hdr.dst_port != RTE_BE16(0xffff))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"udp.hdr.dst_port\" field"
" for vxlan encapsulation");
if (!spec->hdr.dst_port)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"outer UDP remote port cannot be"
" 0 for vxlan encapsulation");
} else {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"outer UDP remote port"
" must be specified for"
" vxlan encapsulation");
}
if (mask->hdr.src_port != RTE_BE16(0x0000)) {
if (mask->hdr.src_port != RTE_BE16(0xffff))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"udp.hdr.src_port\" field"
" for vxlan encapsulation");
DRV_LOG(WARNING,
"outer UDP source port cannot be"
" forced for vxlan encapsulation,"
" parameter ignored");
}
return 0;
}
/**
* Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_VXLAN item for E-Switch.
* The routine checks the VNIP fields to be used in encapsulation header.
*
* @param[in] item
* Pointer to the item structure.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
**/
static int
flow_tcf_validate_vxlan_encap_vni(const struct rte_flow_item *item,
struct rte_flow_error *error)
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
if (!spec) {
/* Outer VNI is required by tunnel_key parameter. */
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"NULL VNI specification"
" for vxlan encapsulation");
}
if (!mask)
mask = &rte_flow_item_vxlan_mask;
if (!mask->vni[0] && !mask->vni[1] && !mask->vni[2])
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"outer VNI must be specified "
"for vxlan encapsulation");
if (mask->vni[0] != 0xff ||
mask->vni[1] != 0xff ||
mask->vni[2] != 0xff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"vxlan.vni\" field");
if (!spec->vni[0] && !spec->vni[1] && !spec->vni[2])
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"vxlan vni cannot be 0");
return 0;
}
/**
* Validate VXLAN_ENCAP action item list for E-Switch.
* The routine checks items to be used in encapsulation header.
*
* @param[in] action
* Pointer to the VXLAN_ENCAP action structure.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
**/
static int
flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action,
struct rte_flow_error *error)
{
const struct rte_flow_item *items;
int ret;
uint32_t item_flags = 0;
if (!action->conf)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"Missing vxlan tunnel"
" action configuration");
items = ((const struct rte_flow_action_vxlan_encap *)
action->conf)->definition;
if (!items)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"Missing vxlan tunnel"
" encapsulation parameters");
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
ret = mlx5_flow_validate_item_eth(items, item_flags,
error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_eth(items, error);
if (ret < 0)
return ret;
item_flags |= MLX5_FLOW_LAYER_OUTER_L2;
break;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4
(items, item_flags,
&flow_tcf_mask_supported.ipv4, error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_ipv4(items, error);
if (ret < 0)
return ret;
item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6
(items, item_flags,
&flow_tcf_mask_supported.ipv6, error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_ipv6(items, error);
if (ret < 0)
return ret;
item_flags |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
0xFF, error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_udp(items, error);
if (ret < 0)
return ret;
item_flags |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(items,
item_flags, error);
if (ret < 0)
return ret;
ret = flow_tcf_validate_vxlan_encap_vni(items, error);
if (ret < 0)
return ret;
item_flags |= MLX5_FLOW_LAYER_VXLAN;
break;
default:
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, items,
"vxlan encap item not supported");
}
}
if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"no outer IP layer found"
" for vxlan encapsulation");
if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"no outer UDP layer found"
" for vxlan encapsulation");
if (!(item_flags & MLX5_FLOW_LAYER_VXLAN))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"no VXLAN VNI found"
" for vxlan encapsulation");
return 0;
}
/**
* Validate outer RTE_FLOW_ITEM_TYPE_UDP item if tunnel item
* RTE_FLOW_ITEM_TYPE_VXLAN is present in item list.
*
* @param[in] udp
* Outer UDP layer item (if any, NULL otherwise).
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
**/
static int
flow_tcf_validate_vxlan_decap_udp(const struct rte_flow_item *udp,
struct rte_flow_error *error)
{
const struct rte_flow_item_udp *spec = udp->spec;
const struct rte_flow_item_udp *mask = udp->mask;
if (!spec)
/*
* Specification for UDP ports cannot be empty
* because it is required as decap parameter.
*/
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, udp,
"NULL UDP port specification"
" for VXLAN decapsulation");
if (!mask)
mask = &rte_flow_item_udp_mask;
if (mask->hdr.dst_port != RTE_BE16(0x0000)) {
if (mask->hdr.dst_port != RTE_BE16(0xffff))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"udp.hdr.dst_port\" field");
if (!spec->hdr.dst_port)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, udp,
"zero decap local UDP port");
} else {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, udp,
"outer UDP destination port must be "
"specified for vxlan decapsulation");
}
if (mask->hdr.src_port != RTE_BE16(0x0000)) {
if (mask->hdr.src_port != RTE_BE16(0xffff))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
"no support for partial mask on"
" \"udp.hdr.src_port\" field");
DRV_LOG(WARNING,
"outer UDP local port cannot be "
"forced for VXLAN encapsulation, "
"parameter ignored");
}
return 0;
}
/**
* Validate flow for E-Switch.
*
* @param[in] priv
* Pointer to the priv structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_tcf_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
union {
const struct rte_flow_item_port_id *port_id;
const struct rte_flow_item_eth *eth;
const struct rte_flow_item_vlan *vlan;
const struct rte_flow_item_ipv4 *ipv4;
const struct rte_flow_item_ipv6 *ipv6;
const struct rte_flow_item_tcp *tcp;
const struct rte_flow_item_udp *udp;
const struct rte_flow_item_vxlan *vxlan;
} spec, mask;
union {
const struct rte_flow_action_port_id *port_id;
const struct rte_flow_action_jump *jump;
const struct rte_flow_action_of_push_vlan *of_push_vlan;
const struct rte_flow_action_of_set_vlan_vid *
of_set_vlan_vid;
const struct rte_flow_action_of_set_vlan_pcp *
of_set_vlan_pcp;
const struct rte_flow_action_vxlan_encap *vxlan_encap;
const struct rte_flow_action_set_ipv4 *set_ipv4;
const struct rte_flow_action_set_ipv6 *set_ipv6;
} conf;
const struct rte_flow_item *outer_udp = NULL;
rte_be16_t inner_etype = RTE_BE16(ETH_P_ALL);
rte_be16_t outer_etype = RTE_BE16(ETH_P_ALL);
rte_be16_t vlan_etype = RTE_BE16(ETH_P_ALL);
uint64_t item_flags = 0;
uint64_t action_flags = 0;
uint8_t next_protocol = 0xff;
unsigned int tcm_ifindex = 0;
uint8_t pedit_validated = 0;
struct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];
struct rte_eth_dev *port_id_dev = NULL;
bool in_port_id_set;
int ret;
claim_nonzero(flow_tcf_build_ptoi_table(dev, ptoi,
PTOI_TABLE_SZ_MAX(dev)));
ret = flow_tcf_validate_attributes(attr, error);
if (ret < 0)
return ret;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
unsigned int i;
uint64_t current_action_flag = 0;
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
current_action_flag = MLX5_FLOW_ACTION_PORT_ID;
if (!actions->conf)
break;
conf.port_id = actions->conf;
if (conf.port_id->original)
i = 0;
else
for (i = 0; ptoi[i].ifindex; ++i)
if (ptoi[i].port_id == conf.port_id->id)
break;
if (!ptoi[i].ifindex)
return rte_flow_error_set
(error, ENODEV,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
conf.port_id,
"missing data to convert port ID to"
" ifindex");
port_id_dev = &rte_eth_devices[conf.port_id->id];
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
current_action_flag = MLX5_FLOW_ACTION_JUMP;
if (!actions->conf)
break;
conf.jump = actions->conf;
if (attr->group >= conf.jump->group)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"can jump only to a group forward");
break;
case RTE_FLOW_ACTION_TYPE_DROP:
current_action_flag = MLX5_FLOW_ACTION_DROP;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
current_action_flag = MLX5_FLOW_ACTION_OF_POP_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: {
rte_be16_t ethertype;
current_action_flag = MLX5_FLOW_ACTION_OF_PUSH_VLAN;
if (!actions->conf)
break;
conf.of_push_vlan = actions->conf;
ethertype = conf.of_push_vlan->ethertype;
if (ethertype != RTE_BE16(ETH_P_8021Q) &&
ethertype != RTE_BE16(ETH_P_8021AD))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan push TPID must be "
"802.1Q or 802.1AD");
break;
}
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan modify is not supported,"
" set action must follow push action");
current_action_flag = MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan modify is not supported,"
" set action must follow push action");
current_action_flag = MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
current_action_flag = MLX5_FLOW_ACTION_VXLAN_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
ret = flow_tcf_validate_vxlan_encap(actions, error);
if (ret < 0)
return ret;
current_action_flag = MLX5_FLOW_ACTION_VXLAN_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
current_action_flag = MLX5_FLOW_ACTION_SET_IPV4_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
current_action_flag = MLX5_FLOW_ACTION_SET_IPV4_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
current_action_flag = MLX5_FLOW_ACTION_SET_IPV6_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
current_action_flag = MLX5_FLOW_ACTION_SET_IPV6_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
current_action_flag = MLX5_FLOW_ACTION_SET_TP_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
current_action_flag = MLX5_FLOW_ACTION_SET_TP_DST;
break;
case RTE_FLOW_ACTION_TYPE_SET_TTL:
current_action_flag = MLX5_FLOW_ACTION_SET_TTL;
break;
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
current_action_flag = MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
current_action_flag = MLX5_FLOW_ACTION_SET_MAC_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
current_action_flag = MLX5_FLOW_ACTION_SET_MAC_DST;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"action not supported");
}
if (current_action_flag & MLX5_TCF_CONFIG_ACTIONS) {
if (!actions->conf)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
actions,
"action configuration not set");
}
if ((current_action_flag & MLX5_TCF_PEDIT_ACTIONS) &&
pedit_validated)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"set actions should be "
"listed successively");
if ((current_action_flag & ~MLX5_TCF_PEDIT_ACTIONS) &&
(action_flags & MLX5_TCF_PEDIT_ACTIONS))
pedit_validated = 1;
if ((current_action_flag & MLX5_TCF_FATE_ACTIONS) &&
(action_flags & MLX5_TCF_FATE_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"can't have multiple fate"
" actions");
if ((current_action_flag & MLX5_TCF_VXLAN_ACTIONS) &&
(action_flags & MLX5_TCF_VXLAN_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"can't have multiple vxlan"
" actions");
if ((current_action_flag & MLX5_TCF_VXLAN_ACTIONS) &&
(action_flags & MLX5_TCF_VLAN_ACTIONS))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"can't have vxlan and vlan"
" actions in the same rule");
action_flags |= current_action_flag;
}
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
unsigned int i;
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, items,
"inner tunnel port id"
" item is not supported");
mask.port_id = flow_tcf_item_mask
(items, &rte_flow_item_port_id_mask,
&flow_tcf_mask_supported.port_id,
&flow_tcf_mask_empty.port_id,
sizeof(flow_tcf_mask_supported.port_id),
error);
if (!mask.port_id)
return -rte_errno;
if (mask.port_id == &flow_tcf_mask_empty.port_id) {
in_port_id_set = 1;
break;
}
spec.port_id = items->spec;
if (mask.port_id->id && mask.port_id->id != 0xffffffff)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
mask.port_id,
"no support for partial mask on"
" \"id\" field");
if (!mask.port_id->id)
i = 0;
else
for (i = 0; ptoi[i].ifindex; ++i)
if (ptoi[i].port_id == spec.port_id->id)
break;
if (!ptoi[i].ifindex)
return rte_flow_error_set
(error, ENODEV,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
spec.port_id,
"missing data to convert port ID to"
" ifindex");
if (in_port_id_set && ptoi[i].ifindex != tcm_ifindex)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
spec.port_id,
"cannot match traffic for"
" several port IDs through"
" a single flow rule");
tcm_ifindex = ptoi[i].ifindex;
in_port_id_set = 1;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
ret = mlx5_flow_validate_item_eth(items, item_flags,
error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
/* TODO:
* Redundant check due to different supported mask.
* Same for the rest of items.
*/
mask.eth = flow_tcf_item_mask
(items, &rte_flow_item_eth_mask,
&flow_tcf_mask_supported.eth,
&flow_tcf_mask_empty.eth,
sizeof(flow_tcf_mask_supported.eth),
error);
if (!mask.eth)
return -rte_errno;
if (mask.eth->type && mask.eth->type !=
RTE_BE16(0xffff))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
mask.eth,
"no support for partial mask on"
" \"type\" field");
assert(items->spec);
spec.eth = items->spec;
if (mask.eth->type &&
(item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
inner_etype != RTE_BE16(ETH_P_ALL) &&
inner_etype != spec.eth->type)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"inner eth_type conflict");
if (mask.eth->type &&
!(item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
outer_etype != RTE_BE16(ETH_P_ALL) &&
outer_etype != spec.eth->type)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"outer eth_type conflict");
if (mask.eth->type) {
if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
inner_etype = spec.eth->type;
else
outer_etype = spec.eth->type;
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, items,
"inner tunnel VLAN"
" is not supported");
ret = mlx5_flow_validate_item_vlan(items, item_flags,
error);
if (ret < 0)
return ret;
item_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
mask.vlan = flow_tcf_item_mask
(items, &rte_flow_item_vlan_mask,
&flow_tcf_mask_supported.vlan,
&flow_tcf_mask_empty.vlan,
sizeof(flow_tcf_mask_supported.vlan),
error);
if (!mask.vlan)
return -rte_errno;
if ((mask.vlan->tci & RTE_BE16(0xe000) &&
(mask.vlan->tci & RTE_BE16(0xe000)) !=
RTE_BE16(0xe000)) ||
(mask.vlan->tci & RTE_BE16(0x0fff) &&
(mask.vlan->tci & RTE_BE16(0x0fff)) !=
RTE_BE16(0x0fff)) ||
(mask.vlan->inner_type &&
mask.vlan->inner_type != RTE_BE16(0xffff)))
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
mask.vlan,
"no support for partial masks on"
" \"tci\" (PCP and VID parts) and"
" \"inner_type\" fields");
if (outer_etype != RTE_BE16(ETH_P_ALL) &&
outer_etype != RTE_BE16(ETH_P_8021Q))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"outer eth_type conflict,"
" must be 802.1Q");
outer_etype = RTE_BE16(ETH_P_8021Q);
assert(items->spec);
spec.vlan = items->spec;
if (mask.vlan->inner_type &&
vlan_etype != RTE_BE16(ETH_P_ALL) &&
vlan_etype != spec.vlan->inner_type)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"vlan eth_type conflict");
if (mask.vlan->inner_type)
vlan_etype = spec.vlan->inner_type;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4
(items, item_flags,
&flow_tcf_mask_supported.ipv4, error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
mask.ipv4 = flow_tcf_item_mask
(items, &rte_flow_item_ipv4_mask,
&flow_tcf_mask_supported.ipv4,
&flow_tcf_mask_empty.ipv4,
sizeof(flow_tcf_mask_supported.ipv4),
error);
if (!mask.ipv4)
return -rte_errno;
if (mask.ipv4->hdr.next_proto_id &&
mask.ipv4->hdr.next_proto_id != 0xff)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
mask.ipv4,
"no support for partial mask on"
" \"hdr.next_proto_id\" field");
else if (mask.ipv4->hdr.next_proto_id)
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
if (inner_etype != RTE_BE16(ETH_P_ALL) &&
inner_etype != RTE_BE16(ETH_P_IP))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"inner eth_type conflict,"
" IPv4 is required");
inner_etype = RTE_BE16(ETH_P_IP);
} else if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN) {
if (vlan_etype != RTE_BE16(ETH_P_ALL) &&
vlan_etype != RTE_BE16(ETH_P_IP))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"vlan eth_type conflict,"
" IPv4 is required");
vlan_etype = RTE_BE16(ETH_P_IP);
} else {
if (outer_etype != RTE_BE16(ETH_P_ALL) &&
outer_etype != RTE_BE16(ETH_P_IP))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"eth_type conflict,"
" IPv4 is required");
outer_etype = RTE_BE16(ETH_P_IP);
}
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6
(items, item_flags,
&flow_tcf_mask_supported.ipv6, error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
mask.ipv6 = flow_tcf_item_mask
(items, &rte_flow_item_ipv6_mask,
&flow_tcf_mask_supported.ipv6,
&flow_tcf_mask_empty.ipv6,
sizeof(flow_tcf_mask_supported.ipv6),
error);
if (!mask.ipv6)
return -rte_errno;
if (mask.ipv6->hdr.proto &&
mask.ipv6->hdr.proto != 0xff)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
mask.ipv6,
"no support for partial mask on"
" \"hdr.proto\" field");
else if (mask.ipv6->hdr.proto)
next_protocol =
((const struct rte_flow_item_ipv6 *)
(items->spec))->hdr.proto;
if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
if (inner_etype != RTE_BE16(ETH_P_ALL) &&
inner_etype != RTE_BE16(ETH_P_IPV6))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"inner eth_type conflict,"
" IPv6 is required");
inner_etype = RTE_BE16(ETH_P_IPV6);
} else if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN) {
if (vlan_etype != RTE_BE16(ETH_P_ALL) &&
vlan_etype != RTE_BE16(ETH_P_IPV6))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"vlan eth_type conflict,"
" IPv6 is required");
vlan_etype = RTE_BE16(ETH_P_IPV6);
} else {
if (outer_etype != RTE_BE16(ETH_P_ALL) &&
outer_etype != RTE_BE16(ETH_P_IPV6))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
items,
"eth_type conflict,"
" IPv6 is required");
outer_etype = RTE_BE16(ETH_P_IPV6);
}
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
next_protocol, error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
MLX5_FLOW_LAYER_INNER_L4_UDP :
MLX5_FLOW_LAYER_OUTER_L4_UDP;
mask.udp = flow_tcf_item_mask
(items, &rte_flow_item_udp_mask,
&flow_tcf_mask_supported.udp,
&flow_tcf_mask_empty.udp,
sizeof(flow_tcf_mask_supported.udp),
error);
if (!mask.udp)
return -rte_errno;
/*
* Save the presumed outer UDP item for extra check
* if the tunnel item will be found later in the list.
*/
if (!(item_flags & MLX5_FLOW_LAYER_TUNNEL))
outer_udp = items;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
(items, item_flags,
next_protocol,
&flow_tcf_mask_supported.tcp,
error);
if (ret < 0)
return ret;
item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
MLX5_FLOW_LAYER_INNER_L4_TCP :
MLX5_FLOW_LAYER_OUTER_L4_TCP;
mask.tcp = flow_tcf_item_mask
(items, &rte_flow_item_tcp_mask,
&flow_tcf_mask_supported.tcp,
&flow_tcf_mask_empty.tcp,
sizeof(flow_tcf_mask_supported.tcp),
error);
if (!mask.tcp)
return -rte_errno;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, items,
"vxlan tunnel over vlan"
" is not supported");
ret = mlx5_flow_validate_item_vxlan(items,
item_flags, error);
if (ret < 0)
return ret;
item_flags |= MLX5_FLOW_LAYER_VXLAN;
mask.vxlan = flow_tcf_item_mask
(items, &rte_flow_item_vxlan_mask,
&flow_tcf_mask_supported.vxlan,
&flow_tcf_mask_empty.vxlan,
sizeof(flow_tcf_mask_supported.vxlan), error);
if (!mask.vxlan)
return -rte_errno;
if (mask.vxlan->vni[0] != 0xff ||
mask.vxlan->vni[1] != 0xff ||
mask.vxlan->vni[2] != 0xff)
return rte_flow_error_set
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK,
mask.vxlan,
"no support for partial or "
"empty mask on \"vxlan.vni\" field");
/*
* The VNI item assumes the VXLAN tunnel, it requires
* at least the outer destination UDP port must be
* specified without wildcards to allow kernel select
* the virtual VXLAN device by port. Also outer IPv4
* or IPv6 item must be specified (wilcards or even
* zero mask are allowed) to let driver know the tunnel
* IP version and process UDP traffic correctly.
*/
if (!(item_flags &
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 |
MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"no outer IP pattern found"
" for vxlan tunnel");
if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"no outer UDP pattern found"
" for vxlan tunnel");
/*
* All items preceding the tunnel item become outer
* ones and we should do extra validation for them
* due to tc limitations for tunnel outer parameters.
* Currently only outer UDP item requres extra check,
* use the saved pointer instead of item list rescan.
*/
assert(outer_udp);
ret = flow_tcf_validate_vxlan_decap_udp
(outer_udp, error);
if (ret < 0)
return ret;
/* Reset L4 protocol for inner parameters. */
next_protocol = 0xff;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
items, "item not supported");
}
}
if ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&
(action_flags & MLX5_FLOW_ACTION_DROP))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"set action is not compatible with "
"drop action");
if ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&
!(action_flags & MLX5_FLOW_ACTION_PORT_ID))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"set action must be followed by "
"port_id action");
if (action_flags &
(MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST)) {
if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no ipv4 item found in"
" pattern");
}
if (action_flags &
(MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST)) {
if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no ipv6 item found in"
" pattern");
}
if (action_flags &
(MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST)) {
if (!(item_flags &
(MLX5_FLOW_LAYER_OUTER_L4_UDP |
MLX5_FLOW_LAYER_OUTER_L4_TCP)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no TCP/UDP item found in"
" pattern");
}
/*
* FW syndrome (0xA9C090):
* set_flow_table_entry: push vlan action fte in fdb can ONLY be
* forward to the uplink.
*/
if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
(action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
((struct mlx5_priv *)port_id_dev->data->dev_private)->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan push can only be applied"
" when forwarding to uplink port");
/*
* FW syndrome (0x294609):
* set_flow_table_entry: modify/pop/push actions in fdb flow table
* are supported only while forwarding to vport.
*/
if ((action_flags & MLX5_TCF_VLAN_ACTIONS) &&
!(action_flags & MLX5_FLOW_ACTION_PORT_ID))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan actions are supported"
" only with port_id action");
if ((action_flags & MLX5_TCF_VXLAN_ACTIONS) &&
!(action_flags & MLX5_FLOW_ACTION_PORT_ID))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"vxlan actions are supported"
" only with port_id action");
if (!(action_flags & MLX5_TCF_FATE_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"no fate action is found");
if (action_flags &
(MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL)) {
if (!(item_flags &
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 |
MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no IP found in pattern");
}
if (action_flags &
(MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)) {
if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
"no ethernet found in"
" pattern");
}
if ((action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) &&
!(item_flags & MLX5_FLOW_LAYER_VXLAN))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"no VNI pattern found"
" for vxlan decap action");
if ((action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) &&
(item_flags & MLX5_FLOW_LAYER_TUNNEL))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"vxlan encap not supported"
" for tunneled traffic");
return 0;
}
/**
* Calculate maximum size of memory for flow items of Linux TC flower.
*
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[out] action_flags
* Pointer to the detected actions.
*
* @return
* Maximum size of memory for items.
*/
static int
flow_tcf_get_items_size(const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
uint64_t *action_flags)
{
int size = 0;
size += SZ_NLATTR_STRZ_OF("flower") +
SZ_NLATTR_TYPE_OF(uint16_t) + /* Outer ether type. */
SZ_NLATTR_NEST + /* TCA_OPTIONS. */
SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CLS_FLAGS_SKIP_SW. */
if (attr->group > 0)
size += SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CHAIN. */
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
size += SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) * 4;
/* dst/src MAC addr and mask. */
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
size += SZ_NLATTR_TYPE_OF(uint16_t) +
/* VLAN Ether type. */
SZ_NLATTR_TYPE_OF(uint8_t) + /* VLAN prio. */
SZ_NLATTR_TYPE_OF(uint16_t); /* VLAN ID. */
break;
case RTE_FLOW_ITEM_TYPE_IPV4: {
const struct rte_flow_item_ipv4 *ipv4 = items->mask;
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_TYPE_OF(uint32_t) * 4;
/* dst/src IP addr and mask. */
if (ipv4 && ipv4->hdr.time_to_live)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv4 && ipv4->hdr.type_of_service)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
case RTE_FLOW_ITEM_TYPE_IPV6: {
const struct rte_flow_item_ipv6 *ipv6 = items->mask;
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 4;
/* dst/src IP addr and mask. */
if (ipv6 && ipv6->hdr.hop_limits)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
(0xfful << IPV6_HDR_TC_SHIFT)))
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
case RTE_FLOW_ITEM_TYPE_UDP:
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_TYPE_OF(uint16_t) * 4;
/* dst/src port and mask. */
break;
case RTE_FLOW_ITEM_TYPE_TCP:
size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
SZ_NLATTR_TYPE_OF(uint16_t) * 4;
/* dst/src port and mask. */
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
size += SZ_NLATTR_TYPE_OF(uint32_t);
/*
* There might be no VXLAN decap action in the action
* list, nonetheless the VXLAN tunnel flow requires
* the decap structure to be correctly applied to
* VXLAN device, set the flag to create the structure.
* Translation routine will not put the decap action
* in tne Netlink message if there is no actual action
* in the list.
*/
*action_flags |= MLX5_FLOW_ACTION_VXLAN_DECAP;
break;
default:
DRV_LOG(WARNING,
"unsupported item %p type %d,"
" items must be validated before flow creation",
(const void *)items, items->type);
break;
}
}
return size;
}
/**
* Calculate size of memory to store the VXLAN encapsultion
* related items in the Netlink message buffer. Items list
* is specified by RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action.
* The item list should be validated.
*
* @param[in] action
* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action object.
* List of pattern items to scan data from.
*
* @return
* The size the part of Netlink message buffer to store the
* VXLAN encapsulation item attributes.
*/
static int
flow_tcf_vxlan_encap_size(const struct rte_flow_action *action)
{
const struct rte_flow_item *items;
int size = 0;
assert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);
assert(action->conf);
items = ((const struct rte_flow_action_vxlan_encap *)
action->conf)->definition;
assert(items);
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
/* This item does not require message buffer. */
break;
case RTE_FLOW_ITEM_TYPE_IPV4: {
const struct rte_flow_item_ipv4 *ipv4 = items->mask;
size += SZ_NLATTR_DATA_OF(IPV4_ADDR_LEN) * 2;
if (ipv4 && ipv4->hdr.time_to_live)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv4 && ipv4->hdr.type_of_service)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
case RTE_FLOW_ITEM_TYPE_IPV6: {
const struct rte_flow_item_ipv6 *ipv6 = items->mask;
size += SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 2;
if (ipv6 && ipv6->hdr.hop_limits)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
(0xfful << IPV6_HDR_TC_SHIFT)))
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
case RTE_FLOW_ITEM_TYPE_UDP: {
const struct rte_flow_item_udp *udp = items->mask;
size += SZ_NLATTR_TYPE_OF(uint16_t);
if (!udp || udp->hdr.src_port != RTE_BE16(0x0000))
size += SZ_NLATTR_TYPE_OF(uint16_t);
break;
}
case RTE_FLOW_ITEM_TYPE_VXLAN:
size += SZ_NLATTR_TYPE_OF(uint32_t);
break;
default:
assert(false);
DRV_LOG(WARNING,
"unsupported item %p type %d,"
" items must be validated"
" before flow creation",
(const void *)items, items->type);
return 0;
}
}
return size;
}
/**
* Calculate maximum size of memory for flow actions of Linux TC flower and
* extract specified actions.
*
* @param[in] actions
* Pointer to the list of actions.
* @param[out] action_flags
* Pointer to the detected actions.
*
* @return
* Maximum size of memory for actions.
*/
static int
flow_tcf_get_actions_and_size(const struct rte_flow_action actions[],
uint64_t *action_flags)
{
int size = 0;
uint64_t flags = *action_flags;
size += SZ_NLATTR_NEST; /* TCA_FLOWER_ACT. */
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
size += SZ_NLATTR_NEST + /* na_act_index. */
SZ_NLATTR_STRZ_OF("mirred") +
SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
SZ_NLATTR_TYPE_OF(struct tc_mirred);
flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
size += SZ_NLATTR_NEST + /* na_act_index. */
SZ_NLATTR_STRZ_OF("gact") +
SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
SZ_NLATTR_TYPE_OF(struct tc_gact);
flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_DROP:
size += SZ_NLATTR_NEST + /* na_act_index. */
SZ_NLATTR_STRZ_OF("gact") +
SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
SZ_NLATTR_TYPE_OF(struct tc_gact);
flags |= MLX5_FLOW_ACTION_DROP;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
goto action_of_vlan;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
goto action_of_vlan;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
goto action_of_vlan;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
goto action_of_vlan;
action_of_vlan:
size += SZ_NLATTR_NEST + /* na_act_index. */
SZ_NLATTR_STRZ_OF("vlan") +
SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
SZ_NLATTR_TYPE_OF(struct tc_vlan) +
SZ_NLATTR_TYPE_OF(uint16_t) +
/* VLAN protocol. */
SZ_NLATTR_TYPE_OF(uint16_t) + /* VLAN ID. */
SZ_NLATTR_TYPE_OF(uint8_t); /* VLAN prio. */
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
size += SZ_NLATTR_NEST + /* na_act_index. */
SZ_NLATTR_STRZ_OF("tunnel_key") +
SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
SZ_NLATTR_TYPE_OF(uint8_t);
size += SZ_NLATTR_TYPE_OF(struct tc_tunnel_key);
size += flow_tcf_vxlan_encap_size(actions) +
RTE_ALIGN_CEIL /* preceding encap params. */
(sizeof(struct flow_tcf_vxlan_encap),
MNL_ALIGNTO);
flags |= MLX5_FLOW_ACTION_VXLAN_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
size += SZ_NLATTR_NEST + /* na_act_index. */
SZ_NLATTR_STRZ_OF("tunnel_key") +
SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
SZ_NLATTR_TYPE_OF(uint8_t);
size += SZ_NLATTR_TYPE_OF(struct tc_tunnel_key);
size += RTE_ALIGN_CEIL /* preceding decap params. */
(sizeof(struct flow_tcf_vxlan_decap),
MNL_ALIGNTO);
flags |= MLX5_FLOW_ACTION_VXLAN_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
case RTE_FLOW_ACTION_TYPE_SET_TTL:
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
size += flow_tcf_get_pedit_actions_size(&actions,
&flags);
break;
default:
DRV_LOG(WARNING,
"unsupported action %p type %d,"
" items must be validated before flow creation",
(const void *)actions, actions->type);
break;
}
}
*action_flags = flags;
return size;
}
/**
* Prepare a flow object for Linux TC flower. It calculates the maximum size of
* memory required, allocates the memory, initializes Netlink message headers
* and set unique TC message handle.
*
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] error
* Pointer to the error structure.
*
* @return
* Pointer to mlx5_flow object on success,
* otherwise NULL and rte_errno is set.
*/
static struct mlx5_flow *
flow_tcf_prepare(const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
size_t size = RTE_ALIGN_CEIL
(sizeof(struct mlx5_flow),
alignof(struct flow_tcf_tunnel_hdr)) +
MNL_ALIGN(sizeof(struct nlmsghdr)) +
MNL_ALIGN(sizeof(struct tcmsg));
struct mlx5_flow *dev_flow;
uint64_t action_flags = 0;
struct nlmsghdr *nlh;
struct tcmsg *tcm;
uint8_t *sp, *tun = NULL;
size += flow_tcf_get_items_size(attr, items, &action_flags);
size += flow_tcf_get_actions_and_size(actions, &action_flags);
dev_flow = rte_zmalloc(__func__, size, MNL_ALIGNTO);
if (!dev_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"not enough memory to create E-Switch flow");
return NULL;
}
sp = (uint8_t *)(dev_flow + 1);
if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) {
sp = RTE_PTR_ALIGN
(sp, alignof(struct flow_tcf_tunnel_hdr));
tun = sp;
sp += RTE_ALIGN_CEIL
(sizeof(struct flow_tcf_vxlan_encap),
MNL_ALIGNTO);
#ifndef NDEBUG
size -= RTE_ALIGN_CEIL
(sizeof(struct flow_tcf_vxlan_encap),
MNL_ALIGNTO);
#endif
} else if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) {
sp = RTE_PTR_ALIGN
(sp, alignof(struct flow_tcf_tunnel_hdr));
tun = sp;
sp += RTE_ALIGN_CEIL
(sizeof(struct flow_tcf_vxlan_decap),
MNL_ALIGNTO);
#ifndef NDEBUG
size -= RTE_ALIGN_CEIL
(sizeof(struct flow_tcf_vxlan_decap),
MNL_ALIGNTO);
#endif
} else {
sp = RTE_PTR_ALIGN(sp, MNL_ALIGNTO);
}
nlh = mnl_nlmsg_put_header(sp);
tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
*dev_flow = (struct mlx5_flow){
.tcf = (struct mlx5_flow_tcf){
#ifndef NDEBUG
.nlsize = size - RTE_ALIGN_CEIL
(sizeof(struct mlx5_flow),
alignof(struct flow_tcf_tunnel_hdr)),
#endif
.tunnel = (struct flow_tcf_tunnel_hdr *)tun,
.nlh = nlh,
.tcm = tcm,
},
};
if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP)
dev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_DECAP;
else if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP)
dev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_ENCAP;
return dev_flow;
}
/**
* Make adjustments for supporting count actions.
*
* @param[in] dev
* Pointer to the Ethernet device structure.
* @param[in] dev_flow
* Pointer to mlx5_flow.
* @param[out] error
* Pointer to error structure.
*
* @return
* 0 On success else a negative errno value is returned and rte_errno is set.
*/
static int
flow_tcf_translate_action_count(struct rte_eth_dev *dev __rte_unused,
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
struct rte_flow *flow = dev_flow->flow;
if (!flow->counter) {
flow->counter = flow_tcf_counter_new();
if (!flow->counter)
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"cannot get counter"
" context.");
}
return 0;
}
/**
* Convert VXLAN VNI to 32-bit integer.
*
* @param[in] vni
* VXLAN VNI in 24-bit wire format.
*
* @return
* VXLAN VNI as a 32-bit integer value in network endianness.
*/
static inline rte_be32_t
vxlan_vni_as_be32(const uint8_t vni[3])
{
union {
uint8_t vni[4];
rte_be32_t dword;
} ret = {
.vni = { 0, vni[0], vni[1], vni[2] },
};
return ret.dword;
}
/**
* Helper function to process RTE_FLOW_ITEM_TYPE_ETH entry in configuration
* of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the MAC address fields
* in the encapsulation parameters structure. The item must be prevalidated,
* no any validation checks performed by function.
*
* @param[in] spec
* RTE_FLOW_ITEM_TYPE_ETH entry specification.
* @param[in] mask
* RTE_FLOW_ITEM_TYPE_ETH entry mask.
* @param[out] encap
* Structure to fill the gathered MAC address data.
*/
static void
flow_tcf_parse_vxlan_encap_eth(const struct rte_flow_item_eth *spec,
const struct rte_flow_item_eth *mask,
struct flow_tcf_vxlan_encap *encap)
{
/* Item must be validated before. No redundant checks. */
assert(spec);
if (!mask || !memcmp(&mask->dst,
&rte_flow_item_eth_mask.dst,
sizeof(rte_flow_item_eth_mask.dst))) {
/*
* Ethernet addresses are not supported by
* tc as tunnel_key parameters. Destination
* address is needed to form encap packet
* header and retrieved by kernel from
* implicit sources (ARP table, etc),
* address masks are not supported at all.
*/
encap->eth.dst = spec->dst;
encap->mask |= FLOW_TCF_ENCAP_ETH_DST;
}
if (!mask || !memcmp(&mask->src,
&rte_flow_item_eth_mask.src,
sizeof(rte_flow_item_eth_mask.src))) {
/*
* Ethernet addresses are not supported by
* tc as tunnel_key parameters. Source ethernet
* address is ignored anyway.
*/
encap->eth.src = spec->src;
encap->mask |= FLOW_TCF_ENCAP_ETH_SRC;
}
}
/**
* Helper function to process RTE_FLOW_ITEM_TYPE_IPV4 entry in configuration
* of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the IPV4 address fields
* in the encapsulation parameters structure. The item must be prevalidated,
* no any validation checks performed by function.
*
* @param[in] spec
* RTE_FLOW_ITEM_TYPE_IPV4 entry specification.
* @param[in] mask
* RTE_FLOW_ITEM_TYPE_IPV4 entry mask.
* @param[out] encap
* Structure to fill the gathered IPV4 address data.
*/
static void
flow_tcf_parse_vxlan_encap_ipv4(const struct rte_flow_item_ipv4 *spec,
const struct rte_flow_item_ipv4 *mask,
struct flow_tcf_vxlan_encap *encap)
{
/* Item must be validated before. No redundant checks. */
assert(spec);
encap->ipv4.dst = spec->hdr.dst_addr;
encap->ipv4.src = spec->hdr.src_addr;
encap->mask |= FLOW_TCF_ENCAP_IPV4_SRC |
FLOW_TCF_ENCAP_IPV4_DST;
if (mask && mask->hdr.type_of_service) {
encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
encap->ip_tos = spec->hdr.type_of_service;
}
if (mask && mask->hdr.time_to_live) {
encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
encap->ip_ttl_hop = spec->hdr.time_to_live;
}
}
/**
* Helper function to process RTE_FLOW_ITEM_TYPE_IPV6 entry in configuration
* of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the IPV6 address fields
* in the encapsulation parameters structure. The item must be prevalidated,
* no any validation checks performed by function.
*
* @param[in] spec
* RTE_FLOW_ITEM_TYPE_IPV6 entry specification.
* @param[in] mask
* RTE_FLOW_ITEM_TYPE_IPV6 entry mask.
* @param[out] encap
* Structure to fill the gathered IPV6 address data.
*/
static void
flow_tcf_parse_vxlan_encap_ipv6(const struct rte_flow_item_ipv6 *spec,
const struct rte_flow_item_ipv6 *mask,
struct flow_tcf_vxlan_encap *encap)
{
/* Item must be validated before. No redundant checks. */
assert(spec);
memcpy(encap->ipv6.dst, spec->hdr.dst_addr, IPV6_ADDR_LEN);
memcpy(encap->ipv6.src, spec->hdr.src_addr, IPV6_ADDR_LEN);
encap->mask |= FLOW_TCF_ENCAP_IPV6_SRC |
FLOW_TCF_ENCAP_IPV6_DST;
if (mask) {
if ((rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
IPV6_HDR_TC_SHIFT) & 0xff) {
encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
encap->ip_tos = (rte_be_to_cpu_32
(spec->hdr.vtc_flow) >>
IPV6_HDR_TC_SHIFT) & 0xff;
}
if (mask->hdr.hop_limits) {
encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
encap->ip_ttl_hop = spec->hdr.hop_limits;
}
}
}
/**
* Helper function to process RTE_FLOW_ITEM_TYPE_UDP entry in configuration
* of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the UDP port fields
* in the encapsulation parameters structure. The item must be prevalidated,
* no any validation checks performed by function.
*
* @param[in] spec
* RTE_FLOW_ITEM_TYPE_UDP entry specification.
* @param[in] mask
* RTE_FLOW_ITEM_TYPE_UDP entry mask.
* @param[out] encap
* Structure to fill the gathered UDP port data.
*/
static void
flow_tcf_parse_vxlan_encap_udp(const struct rte_flow_item_udp *spec,
const struct rte_flow_item_udp *mask,
struct flow_tcf_vxlan_encap *encap)
{
assert(spec);
encap->udp.dst = spec->hdr.dst_port;
encap->mask |= FLOW_TCF_ENCAP_UDP_DST;
if (!mask || mask->hdr.src_port != RTE_BE16(0x0000)) {
encap->udp.src = spec->hdr.src_port;
encap->mask |= FLOW_TCF_ENCAP_IPV4_SRC;
}
}
/**
* Helper function to process RTE_FLOW_ITEM_TYPE_VXLAN entry in configuration
* of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the VNI fields
* in the encapsulation parameters structure. The item must be prevalidated,
* no any validation checks performed by function.
*
* @param[in] spec
* RTE_FLOW_ITEM_TYPE_VXLAN entry specification.
* @param[out] encap
* Structure to fill the gathered VNI address data.
*/
static void
flow_tcf_parse_vxlan_encap_vni(const struct rte_flow_item_vxlan *spec,
struct flow_tcf_vxlan_encap *encap)
{
/* Item must be validated before. Do not redundant checks. */
assert(spec);
memcpy(encap->vxlan.vni, spec->vni, sizeof(encap->vxlan.vni));
encap->mask |= FLOW_TCF_ENCAP_VXLAN_VNI;
}
/**
* Populate consolidated encapsulation object from list of pattern items.
*
* Helper function to process configuration of action such as
* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. The item list should be
* validated, there is no way to return an meaningful error.
*
* @param[in] action
* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action object.
* List of pattern items to gather data from.
* @param[out] src
* Structure to fill gathered data.
*/
static void
flow_tcf_vxlan_encap_parse(const struct rte_flow_action *action,
struct flow_tcf_vxlan_encap *encap)
{
union {
const struct rte_flow_item_eth *eth;
const struct rte_flow_item_ipv4 *ipv4;
const struct rte_flow_item_ipv6 *ipv6;
const struct rte_flow_item_udp *udp;
const struct rte_flow_item_vxlan *vxlan;
} spec, mask;
const struct rte_flow_item *items;
assert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);
assert(action->conf);
items = ((const struct rte_flow_action_vxlan_encap *)
action->conf)->definition;
assert(items);
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
mask.eth = items->mask;
spec.eth = items->spec;
flow_tcf_parse_vxlan_encap_eth(spec.eth, mask.eth,
encap);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
spec.ipv4 = items->spec;
mask.ipv4 = items->mask;
flow_tcf_parse_vxlan_encap_ipv4(spec.ipv4, mask.ipv4,
encap);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
spec.ipv6 = items->spec;
mask.ipv6 = items->mask;
flow_tcf_parse_vxlan_encap_ipv6(spec.ipv6, mask.ipv6,
encap);
break;
case RTE_FLOW_ITEM_TYPE_UDP:
mask.udp = items->mask;
spec.udp = items->spec;
flow_tcf_parse_vxlan_encap_udp(spec.udp, mask.udp,
encap);
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
spec.vxlan = items->spec;
flow_tcf_parse_vxlan_encap_vni(spec.vxlan, encap);
break;
default:
assert(false);
DRV_LOG(WARNING,
"unsupported item %p type %d,"
" items must be validated"
" before flow creation",
(const void *)items, items->type);
encap->mask = 0;
return;
}
}
}
/**
* Translate flow for Linux TC flower and construct Netlink message.
*
* @param[in] priv
* Pointer to the priv structure.
* @param[in, out] flow
* Pointer to the sub flow.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
* @param[out] error
* Pointer to the error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
flow_tcf_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
union {
const struct rte_flow_item_port_id *port_id;
const struct rte_flow_item_eth *eth;
const struct rte_flow_item_vlan *vlan;
const struct rte_flow_item_ipv4 *ipv4;
const struct rte_flow_item_ipv6 *ipv6;
const struct rte_flow_item_tcp *tcp;
const struct rte_flow_item_udp *udp;
const struct rte_flow_item_vxlan *vxlan;
} spec, mask;
union {
const struct rte_flow_action_port_id *port_id;
const struct rte_flow_action_jump *jump;
const struct rte_flow_action_of_push_vlan *of_push_vlan;
const struct rte_flow_action_of_set_vlan_vid *
of_set_vlan_vid;
const struct rte_flow_action_of_set_vlan_pcp *
of_set_vlan_pcp;
} conf;
union {
struct flow_tcf_tunnel_hdr *hdr;
struct flow_tcf_vxlan_decap *vxlan;
} decap = {
.hdr = NULL,
};
union {
struct flow_tcf_tunnel_hdr *hdr;
struct flow_tcf_vxlan_encap *vxlan;
} encap = {
.hdr = NULL,
};
struct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];
struct nlmsghdr *nlh = dev_flow->tcf.nlh;
struct tcmsg *tcm = dev_flow->tcf.tcm;
uint32_t na_act_index_cur;
rte_be16_t inner_etype = RTE_BE16(ETH_P_ALL);
rte_be16_t outer_etype = RTE_BE16(ETH_P_ALL);