/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2017 Intel Corporation
*/
#include <sys/queue.h>
#include <stdio.h>
#include <errno.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <stdarg.h>
#include <inttypes.h>
#include <netinet/in.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
#include <rte_hash_crc.h>
#ifdef RTE_LIBRTE_SECURITY
#include <rte_security_driver.h>
#endif
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
#include "base/ixgbe_vf.h"
#include "base/ixgbe_common.h"
#include "ixgbe_ethdev.h"
#include "ixgbe_bypass.h"
#include "ixgbe_rxtx.h"
#include "base/ixgbe_type.h"
#include "base/ixgbe_phy.h"
#include "ixgbe_regs.h"
/*
* High threshold controlling when to start sending XOFF frames. Must be at
* least 8 bytes less than receive packet buffer size. This value is in units
* of 1024 bytes.
*/
#define IXGBE_FC_HI 0x80
/*
* Low threshold controlling when to start sending XON frames. This value is
* in units of 1024 bytes.
*/
#define IXGBE_FC_LO 0x40
/* Default minimum inter-interrupt interval for EITR configuration */
#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
/*Default value of Max Rx Queue*/
#define IXGBE_MAX_RX_QUEUE_NUM 128
#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
#define IXGBE_MMW_SIZE_DEFAULT 0x4
#define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14
#define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */
/*
* Default values for RX/TX configuration
*/
#define IXGBE_DEFAULT_RX_FREE_THRESH 32
#define IXGBE_DEFAULT_RX_PTHRESH 8
#define IXGBE_DEFAULT_RX_HTHRESH 8
#define IXGBE_DEFAULT_RX_WTHRESH 0
#define IXGBE_DEFAULT_TX_FREE_THRESH 32
#define IXGBE_DEFAULT_TX_PTHRESH 32
#define IXGBE_DEFAULT_TX_HTHRESH 0
#define IXGBE_DEFAULT_TX_WTHRESH 0
#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
/* Bit shift and mask */
#define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2)
#define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
#define IXGBE_8_BIT_WIDTH CHAR_BIT
#define IXGBE_8_BIT_MASK UINT8_MAX
#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
#define IXGBE_HKEY_MAX_INDEX 10
/* Additional timesync values. */
#define NSEC_PER_SEC 1000000000L
#define IXGBE_INCVAL_10GB 0x66666666
#define IXGBE_INCVAL_1GB 0x40000000
#define IXGBE_INCVAL_100 0x50000000
#define IXGBE_INCVAL_SHIFT_10GB 28
#define IXGBE_INCVAL_SHIFT_1GB 24
#define IXGBE_INCVAL_SHIFT_100 21
#define IXGBE_INCVAL_SHIFT_82599 7
#define IXGBE_INCPER_SHIFT_82599 24
#define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
#define DEFAULT_ETAG_ETYPE 0x893f
#define IXGBE_ETAG_ETYPE 0x00005084
#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
#define IXGBE_ETAG_ETYPE_VALID 0x80000000
#define IXGBE_RAH_ADTYPE 0x40000000
#define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff
#define IXGBE_VMVIR_TAGA_MASK 0x18000000
#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_QDE_STRIP_TAG 0x00000004
#define IXGBE_VTEICR_MASK 0x07
#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static int ixgbe_dev_start(struct rte_eth_dev *dev);
static void ixgbe_dev_stop(struct rte_eth_dev *dev);
static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
static void ixgbe_dev_close(struct rte_eth_dev *dev);
static int ixgbe_dev_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int ixgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
static int
ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
uint64_t *values, unsigned int n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
unsigned int size);
static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned limit);
static int ixgbe_dev_xstats_get_names_by_id(
struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
const uint64_t *ids,
unsigned int limit);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
size_t fw_size);
static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
enum rte_vlan_type vlan_type,
uint16_t tpid_id);
static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
uint16_t queue, bool on);
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev);
static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_pfc_conf *pfc_conf);
static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
struct rte_intr_handle *handle);
static void ixgbe_dev_interrupt_handler(void *param);
static void ixgbe_dev_interrupt_delayed_handler(void *param);
static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
static bool is_device_supported(struct rte_eth_dev *dev,
struct rte_pci_driver *drv);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue, int on);
static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
ether_addr * mac_addr, uint8_t on);
static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
uint8_t rule_id);
static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
uint16_t queue_id);
static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
static void ixgbe_configure_msix(struct rte_eth_dev *dev);
static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter);
static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
static int ixgbe_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs);
static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
static int ixgbevf_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs);
static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp,
uint32_t flags);
static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp);
static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
struct timespec *timestamp);
static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
const struct timespec *timestamp);
static void ixgbevf_dev_interrupt_handler(void *param);
static int ixgbe_dev_l2_tunnel_eth_type_conf
(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
static int ixgbe_dev_l2_tunnel_offload_set
(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
uint32_t mask,
uint8_t en);
static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static int ixgbe_filter_restore(struct rte_eth_dev *dev);
static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
*/
#define UPDATE_VF_STAT(reg, last, cur) \
{ \
uint32_t latest = IXGBE_READ_REG(hw, reg); \
cur += (latest - last) & UINT_MAX; \
last = latest; \
}
#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
{ \
u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
u64 new_msb = IXGBE_READ_REG(hw, msb); \
u64 latest = ((new_msb << 32) | new_lsb); \
cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
last = latest; \
}
#define IXGBE_SET_HWSTRIP(h, q) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] |= 1 << bit;\
} while (0)
#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] &= ~(1 << bit);\
} while (0)
#define IXGBE_GET_HWSTRIP(h, q, r) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
int ixgbe_logtype_init;
int ixgbe_logtype_driver;
/*
* The set of PCI devices this driver supports
*/
static const struct rte_pci_id pci_id_ixgbe_map[] = {
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
#ifdef RTE_LIBRTE_IXGBE_BYPASS
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
#endif
{ .vendor_id = 0, /* sentinel */ },
};
/*
* The set of PCI devices this driver supports (for 82599 VF)
*/
static const struct rte_pci_id pci_id_ixgbevf_map[] = {
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
{ RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
{ .vendor_id = 0, /* sentinel */ },
};
static const struct rte_eth_desc_lim rx_desc_lim = {
.nb_max = IXGBE_MAX_RING_DESC,
.nb_min = IXGBE_MIN_RING_DESC,
.nb_align = IXGBE_RXD_ALIGN,
};
static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = IXGBE_MAX_RING_DESC,
.nb_min = IXGBE_MIN_RING_DESC,
.nb_align = IXGBE_TXD_ALIGN,
.nb_seg_max = IXGBE_TX_MAX_SEG,
.nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
};
static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.dev_configure = ixgbe_dev_configure,
.dev_start = ixgbe_dev_start,
.dev_stop = ixgbe_dev_stop,
.dev_set_link_up = ixgbe_dev_set_link_up,
.dev_set_link_down = ixgbe_dev_set_link_down,
.dev_close = ixgbe_dev_close,
.dev_reset = ixgbe_dev_reset,
.promiscuous_enable = ixgbe_dev_promiscuous_enable,
.promiscuous_disable = ixgbe_dev_promiscuous_disable,
.allmulticast_enable = ixgbe_dev_allmulticast_enable,
.allmulticast_disable = ixgbe_dev_allmulticast_disable,
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbe_dev_stats_get,
.xstats_get = ixgbe_dev_xstats_get,
.xstats_get_by_id = ixgbe_dev_xstats_get_by_id,
.stats_reset = ixgbe_dev_stats_reset,
.xstats_reset = ixgbe_dev_xstats_reset,
.xstats_get_names = ixgbe_dev_xstats_get_names,
.xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
.fw_version_get = ixgbe_fw_version_get,
.dev_infos_get = ixgbe_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
.mtu_set = ixgbe_dev_mtu_set,
.vlan_filter_set = ixgbe_vlan_filter_set,
.vlan_tpid_set = ixgbe_vlan_tpid_set,
.vlan_offload_set = ixgbe_vlan_offload_set,
.vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
.rx_queue_start = ixgbe_dev_rx_queue_start,
.rx_queue_stop = ixgbe_dev_rx_queue_stop,
.tx_queue_start = ixgbe_dev_tx_queue_start,
.tx_queue_stop = ixgbe_dev_tx_queue_stop,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_queue_count = ixgbe_dev_rx_queue_count,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
.tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
.dev_led_off = ixgbe_dev_led_off,
.flow_ctrl_get = ixgbe_flow_ctrl_get,
.flow_ctrl_set = ixgbe_flow_ctrl_set,
.priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
.mac_addr_add = ixgbe_add_rar,
.mac_addr_remove = ixgbe_remove_rar,
.mac_addr_set = ixgbe_set_default_mac_addr,
.uc_hash_table_set = ixgbe_uc_hash_table_set,
.uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
.mirror_rule_set = ixgbe_mirror_rule_set,
.mirror_rule_reset = ixgbe_mirror_rule_reset,
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
.rss_hash_update = ixgbe_dev_rss_hash_update,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
.filter_ctrl = ixgbe_dev_filter_ctrl,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
.rxq_info_get = ixgbe_rxq_info_get,
.txq_info_get = ixgbe_txq_info_get,
.timesync_enable = ixgbe_timesync_enable,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
.timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
.get_reg = ixgbe_get_regs,
.get_eeprom_length = ixgbe_get_eeprom_length,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
.get_dcb_info = ixgbe_dev_get_dcb_info,
.timesync_adjust_time = ixgbe_timesync_adjust_time,
.timesync_read_time = ixgbe_timesync_read_time,
.timesync_write_time = ixgbe_timesync_write_time,
.l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
.l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set,
.udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del,
.tm_ops_get = ixgbe_tm_ops_get,
};
/*
* dev_ops for virtual function, bare necessities for basic vf
* operation have been implemented
*/
static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.dev_configure = ixgbevf_dev_configure,
.dev_start = ixgbevf_dev_start,
.dev_stop = ixgbevf_dev_stop,
.link_update = ixgbevf_dev_link_update,
.stats_get = ixgbevf_dev_stats_get,
.xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
.xstats_reset = ixgbevf_dev_stats_reset,
.xstats_get_names = ixgbevf_dev_xstats_get_names,
.dev_close = ixgbevf_dev_close,
.dev_reset = ixgbevf_dev_reset,
.allmulticast_enable = ixgbevf_dev_allmulticast_enable,
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
.dev_infos_get = ixgbevf_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
.mtu_set = ixgbevf_dev_set_mtu,
.vlan_filter_set = ixgbevf_vlan_filter_set,
.vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
.vlan_offload_set = ixgbevf_vlan_offload_set,
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
.rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
.tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
.mac_addr_add = ixgbevf_add_mac_addr,
.mac_addr_remove = ixgbevf_remove_mac_addr,
.set_mc_addr_list = ixgbe_dev_set_mc_addr_list,
.rxq_info_get = ixgbe_rxq_info_get,
.txq_info_get = ixgbe_txq_info_get,
.mac_addr_set = ixgbevf_set_default_mac_addr,
.get_reg = ixgbevf_get_regs,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
.rss_hash_update = ixgbe_dev_rss_hash_update,
.rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get,
};
/* store statistics names and its offset in stats structure */
struct rte_ixgbe_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
};
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
{"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
{"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
{"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
{"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
{"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
{"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
{"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
{"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
{"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
{"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
{"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
{"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
{"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
{"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
{"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
prc1023)},
{"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
prc1522)},
{"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
{"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
{"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
{"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
{"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
{"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
{"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
{"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
{"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
{"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
{"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
{"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
{"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
{"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
{"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
{"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
{"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
ptc1023)},
{"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
ptc1522)},
{"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
{"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
{"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
{"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
{"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
fdirustat_add)},
{"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
fdirustat_remove)},
{"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
fdirfstat_fadd)},
{"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
fdirfstat_fremove)},
{"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
fdirmatch)},
{"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
fdirmiss)},
{"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
{"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
{"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
fclast)},
{"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
{"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
{"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
{"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
{"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
fcoe_noddp)},
{"rx_fcoe_no_direct_data_placement_ext_buff",
offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
{"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
lxontxc)},
{"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
lxonrxc)},
{"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
lxofftxc)},
{"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
lxoffrxc)},
{"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
};
#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
sizeof(rte_ixgbe_stats_strings[0]))
/* MACsec statistics */
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
{"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
out_pkts_untagged)},
{"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
out_pkts_encrypted)},
{"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
out_pkts_protected)},
{"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
out_octets_encrypted)},
{"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
out_octets_protected)},
{"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
in_pkts_untagged)},
{"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
in_pkts_badtag)},
{"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
in_pkts_nosci)},
{"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
in_pkts_unknownsci)},
{"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
in_octets_decrypted)},
{"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
in_octets_validated)},
{"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
in_pkts_unchecked)},
{"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
in_pkts_delayed)},
{"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
in_pkts_late)},
{"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
in_pkts_ok)},
{"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
in_pkts_invalid)},
{"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
in_pkts_notvalid)},
{"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
in_pkts_unusedsa)},
{"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
in_pkts_notusingsa)},
};
#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
sizeof(rte_ixgbe_macsec_strings[0]))
/* Per-queue statistics */
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
{"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
{"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
{"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
};
#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
sizeof(rte_ixgbe_rxq_strings[0]))
#define IXGBE_NB_RXQ_PRIO_VALUES 8
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
{"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
{"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
{"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
pxon2offc)},
};
#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
sizeof(rte_ixgbe_txq_strings[0]))
#define IXGBE_NB_TXQ_PRIO_VALUES 8
static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
{"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
};
#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
sizeof(rte_ixgbevf_stats_strings[0]))
/**
* Atomically reads the link status information from global
* structure rte_eth_dev.
*
* @param dev
* - Pointer to the structure rte_eth_dev to read from.
* - Pointer to the buffer to be saved with the link status.
*
* @return
* - On success, zero.
* - On failure, negative value.
*/
static inline int
rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
struct rte_eth_link *dst = link;
struct rte_eth_link *src = &(dev->data->dev_link);
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
*(uint64_t *)src) == 0)
return -1;
return 0;
}
/**
* Atomically writes the link status information into global
* structure rte_eth_dev.
*
* @param dev
* - Pointer to the structure rte_eth_dev to read from.
* - Pointer to the buffer to be saved with the link status.
*
* @return
* - On success, zero.
* - On failure, negative value.
*/
static inline int
rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *link)
{
struct rte_eth_link *dst = &(dev->data->dev_link);
struct rte_eth_link *src = link;
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
*(uint64_t *)src) == 0)
return -1;
return 0;
}
/*
* This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
*/
static inline int
ixgbe_is_sfp(struct ixgbe_hw *hw)
{
switch (hw->phy.type) {
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
return 1;
default:
return 0;
}
}
static inline int32_t
ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
{
uint32_t ctrl_ext;
int32_t status;
status = ixgbe_reset_hw(hw);
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
if (status == IXGBE_ERR_SFP_NOT_PRESENT)
status = IXGBE_SUCCESS;
return status;
}
static inline void
ixgbe_enable_intr(struct rte_eth_dev *dev)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
IXGBE_WRITE_FLUSH(hw);
}
/*
* This function is based on ixgbe_disable_intr() in base/ixgbe.h.
*/
static void
ixgbe_disable_intr(struct ixgbe_hw *hw)
{
PMD_INIT_FUNC_TRACE();
if (hw->mac.type == ixgbe_mac_82598EB) {
IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
} else {
IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
}
IXGBE_WRITE_FLUSH(hw);
}
/*
* This function resets queue statistics mapping registers.
* From Niantic datasheet, Initialization of Statistics section:
* "...if software requires the queue counters, the RQSMR and TQSM registers
* must be re-programmed following a device reset.
*/
static void
ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
{
uint32_t i;
for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
}
}
static int
ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx)
{
#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
#define NB_QMAP_FIELDS_PER_QSM_REG 4
#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_stat_mapping_registers *stat_mappings =
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
uint32_t qsmr_mask = 0;
uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
uint32_t q_map;
uint8_t n, offset;
if ((hw->mac.type != ixgbe_mac_82599EB) &&
(hw->mac.type != ixgbe_mac_X540) &&
(hw->mac.type != ixgbe_mac_X550) &&
(hw->mac.type != ixgbe_mac_X550EM_x) &&
(hw->mac.type != ixgbe_mac_X550EM_a))
return -ENOSYS;
PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
queue_id, stat_idx);
n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
return -EIO;
}
offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
/* Now clear any previous stat_idx set */
clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
if (!is_rx)
stat_mappings->tqsm[n] &= ~clearing_mask;
else
stat_mappings->rqsmr[n] &= ~clearing_mask;
q_map = (uint32_t)stat_idx;
q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
if (!is_rx)
stat_mappings->tqsm[n] |= qsmr_mask;
else
stat_mappings->rqsmr[n] |= qsmr_mask;
PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
queue_id, stat_idx);
PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
/* Now write the mapping in the appropriate register */
if (is_rx) {
PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
} else {
PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
}
return 0;
}
static void
ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
{
struct ixgbe_stat_mapping_registers *stat_mappings =
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int i;
/* write whatever was in stat mapping table to the NIC */
for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) {
/* rx */
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]);
/* tx */
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]);
}
}
static void
ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
{
uint8_t i;
struct ixgbe_dcb_tc_config *tc;
uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
dcb_config->num_tcs.pg_tcs = dcb_max_tc;
dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
for (i = 0; i < dcb_max_tc; i++) {
tc = &dcb_config->tc_config[i];
tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
(uint8_t)(100/dcb_max_tc + (i & 1));
tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
(uint8_t)(100/dcb_max_tc + (i & 1));
tc->pfc = ixgbe_dcb_pfc_disabled;
}
/* Initialize default user to priority mapping, UPx->TC0 */
tc = &dcb_config->tc_config[0];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
}
dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
dcb_config->pfc_mode_enable = false;
dcb_config->vt_mode = true;
dcb_config->round_robin_enable = false;
/* support all DCB capabilities in 82599 */
dcb_config->support.capabilities = 0xFF;
/*we only support 4 Tcs for X540, X550 */
if (hw->mac.type == ixgbe_mac_X540 ||
hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) {
dcb_config->num_tcs.pg_tcs = 4;
dcb_config->num_tcs.pfc_tcs = 4;
}
}
/*
* Ensure that all locks are released before first NVM or PHY access
*/
static void
ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
{
uint16_t mask;
/*
* Phy lock should not fail in this early stage. If this is the case,
* it is due to an improper exit of the application.
* So force the release of the faulty lock. Release of common lock
* is done automatically by swfw_sync function.
*/
mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
}
ixgbe_release_swfw_semaphore(hw, mask);
/*
* These ones are more tricky since they are common to all ports; but
* swfw_sync retries last long enough (1s) to be almost sure that if
* lock can not be taken it is due to an improper lock of the
* semaphore.
*/
mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
PMD_DRV_LOG(DEBUG, "SWFW common locks released");
}
ixgbe_release_swfw_semaphore(hw, mask);
}
/*
* This function is based on code in ixgbe_attach() in base/ixgbe.c.
* It returns 0 on success.
*/
static int
eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
struct ixgbe_dcb_config *dcb_config =
IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
struct ixgbe_bw_conf *bw_conf =
IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
uint32_t ctrl_ext;
uint16_t csum;
int diag, i;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
/*
* For secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
* RX and TX function.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
*/
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
"Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
return 0;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->allow_unsupported_sfp = 1;
/* Initialize the shared code (base driver) */
#ifdef RTE_LIBRTE_IXGBE_BYPASS
diag = ixgbe_bypass_init_shared_code(hw);
#else
diag = ixgbe_init_shared_code(hw);
#endif /* RTE_LIBRTE_IXGBE_BYPASS */
if (diag != IXGBE_SUCCESS) {
PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
return -EIO;
}
/* pick up the PCI bus settings for reporting later */
ixgbe_get_bus_info(hw);
/* Unlock any pending hardware semaphore */
ixgbe_swfw_lock_reset(hw);
#ifdef RTE_LIBRTE_SECURITY
/* Initialize security_ctx only for primary process*/
if (ixgbe_ipsec_ctx_create(eth_dev))
return -ENOMEM;
#endif
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
ixgbe_dcb_init(hw, dcb_config);
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full;
hw->fc.pause_time = IXGBE_FC_PAUSE;
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
hw->fc.low_water[i] = IXGBE_FC_LO;
hw->fc.high_water[i] = IXGBE_FC_HI;
}
hw->fc.send_xon = 1;
/* Make sure we have a good EEPROM before we read from it */
diag = ixgbe_validate_eeprom_checksum(hw, &csum);
if (diag != IXGBE_SUCCESS) {
PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
return -EIO;
}
#ifdef RTE_LIBRTE_IXGBE_BYPASS
diag = ixgbe_bypass_init_hw(hw);
#else
diag = ixgbe_init_hw(hw);
#endif /* RTE_LIBRTE_IXGBE_BYPASS */
/*
* Devices with copper phys will fail to initialise if ixgbe_init_hw()
* is called too soon after the kernel driver unbinding/binding occurs.
* The failure occurs in ixgbe_identify_phy_generic() for all devices,
* but for non-copper devies, ixgbe_identify_sfp_module_generic() is
* also called. See ixgbe_identify_phy_82599(). The reason for the
* failure is not known, and only occuts when virtualis