blob: 48ba39ff874f97ca9614c227b24c02afcc81e48e [file] [log] [blame]
// bindgen-flags: --rust-target 1.0 --with-derive-hash --with-derive-partialeq --with-derive-eq --rustified-enum ".*"
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
/**
* Simple flags are used for rte_eth_conf.rxmode.mq_mode.
*/
#define ETH_MQ_RX_RSS_FLAG 0x1
#define ETH_MQ_RX_DCB_FLAG 0x2
#define ETH_MQ_RX_VMDQ_FLAG 0x4
/* Definitions used for VMDQ and DCB functionality */
#define ETH_VMDQ_MAX_VLAN_FILTERS 64 /**< Maximum nb. of VMDQ vlan filters. */
#define ETH_DCB_NUM_USER_PRIORITIES 8 /**< Maximum nb. of DCB priorities. */
#define ETH_VMDQ_DCB_NUM_QUEUES 128 /**< Maximum nb. of VMDQ DCB queues. */
#define ETH_DCB_NUM_QUEUES 128 /**< Maximum nb. of DCB queues. */
/**
* A set of values to identify what method is to be used to route
* packets to multiple queues.
*/
enum rte_eth_rx_mq_mode {
/** None of DCB,RSS or VMDQ mode */
ETH_MQ_RX_NONE = 0,
/** For RX side, only RSS is on */
ETH_MQ_RX_RSS = ETH_MQ_RX_RSS_FLAG,
/** For RX side,only DCB is on. */
ETH_MQ_RX_DCB = ETH_MQ_RX_DCB_FLAG,
/** Both DCB and RSS enable */
ETH_MQ_RX_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG,
/** Only VMDQ, no RSS nor DCB */
ETH_MQ_RX_VMDQ_ONLY = ETH_MQ_RX_VMDQ_FLAG,
/** RSS mode with VMDQ */
ETH_MQ_RX_VMDQ_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG,
/** Use VMDQ+DCB to route traffic to queues */
ETH_MQ_RX_VMDQ_DCB = ETH_MQ_RX_VMDQ_FLAG | ETH_MQ_RX_DCB_FLAG,
/** Enable both VMDQ and DCB in VMDq */
ETH_MQ_RX_VMDQ_DCB_RSS = ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_DCB_FLAG |
ETH_MQ_RX_VMDQ_FLAG,
};
/**
* A structure used to configure the RX features of an Ethernet port.
*/
struct rte_eth_rxmode {
/** The multi-queue packet distribution mode to be used, e.g. RSS. */
enum rte_eth_rx_mq_mode mq_mode;
uint32_t max_rx_pkt_len; /**< Only used if jumbo_frame enabled. */
uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/
__extension__
uint16_t header_split : 1, /**< Header Split enable. */
hw_ip_checksum : 1, /**< IP/UDP/TCP checksum offload enable. */
hw_vlan_filter : 1, /**< VLAN filter enable. */
hw_vlan_strip : 1, /**< VLAN strip enable. */
hw_vlan_extend : 1, /**< Extended VLAN enable. */
jumbo_frame : 1, /**< Jumbo Frame Receipt enable. */
hw_strip_crc : 1, /**< Enable CRC stripping by hardware. */
enable_scatter : 1, /**< Enable scatter packets rx handler */
enable_lro : 1; /**< Enable LRO */
};
/**
* A set of values to identify what method is to be used to transmit
* packets using multi-TCs.
*/
enum rte_eth_tx_mq_mode {
ETH_MQ_TX_NONE = 0, /**< It is in neither DCB nor VT mode. */
ETH_MQ_TX_DCB, /**< For TX side,only DCB is on. */
ETH_MQ_TX_VMDQ_DCB, /**< For TX side,both DCB and VT is on. */
ETH_MQ_TX_VMDQ_ONLY, /**< Only VT on, no DCB */
};
/**
* A structure used to configure the TX features of an Ethernet port.
*/
struct rte_eth_txmode {
enum rte_eth_tx_mq_mode mq_mode; /**< TX multi-queues mode. */
/* For i40e specifically */
uint16_t pvid;
__extension__
uint8_t hw_vlan_reject_tagged : 1,
/**< If set, reject sending out tagged pkts */
hw_vlan_reject_untagged : 1,
/**< If set, reject sending out untagged pkts */
hw_vlan_insert_pvid : 1;
/**< If set, enable port based VLAN insertion */
};
/**
* A structure used to configure the Receive Side Scaling (RSS) feature
* of an Ethernet port.
* If not NULL, the *rss_key* pointer of the *rss_conf* structure points
* to an array holding the RSS key to use for hashing specific header
* fields of received packets. The length of this array should be indicated
* by *rss_key_len* below. Otherwise, a default random hash key is used by
* the device driver.
*
* The *rss_key_len* field of the *rss_conf* structure indicates the length
* in bytes of the array pointed by *rss_key*. To be compatible, this length
* will be checked in i40e only. Others assume 40 bytes to be used as before.
*
* The *rss_hf* field of the *rss_conf* structure indicates the different
* types of IPv4/IPv6 packets to which the RSS hashing must be applied.
* Supplying an *rss_hf* equal to zero disables the RSS feature.
*/
struct rte_eth_rss_conf {
uint8_t *rss_key; /**< If not NULL, 40-byte hash key. */
uint8_t rss_key_len; /**< hash key length in bytes. */
uint64_t rss_hf; /**< Hash functions to apply - see below. */
};
/**
* This enum indicates the possible number of traffic classes
* in DCB configratioins
*/
enum rte_eth_nb_tcs {
ETH_4_TCS = 4, /**< 4 TCs with DCB. */
ETH_8_TCS = 8 /**< 8 TCs with DCB. */
};
/**
* This enum indicates the possible number of queue pools
* in VMDQ configurations.
*/
enum rte_eth_nb_pools {
ETH_8_POOLS = 8, /**< 8 VMDq pools. */
ETH_16_POOLS = 16, /**< 16 VMDq pools. */
ETH_32_POOLS = 32, /**< 32 VMDq pools. */
ETH_64_POOLS = 64 /**< 64 VMDq pools. */
};
/**
* A structure used to configure the VMDQ+DCB feature
* of an Ethernet port.
*
* Using this feature, packets are routed to a pool of queues, based
* on the vlan id in the vlan tag, and then to a specific queue within
* that pool, using the user priority vlan tag field.
*
* A default pool may be used, if desired, to route all traffic which
* does not match the vlan filter rules.
*/
struct rte_eth_vmdq_dcb_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools */
uint8_t enable_default_pool; /**< If non-zero, use a default pool */
uint8_t default_pool; /**< The default pool, if applicable */
uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
struct {
uint16_t vlan_id; /**< The vlan id of the received frame */
uint64_t pools; /**< Bitmask of pools for packet rx */
} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
/**< Selects a queue in a pool */
};
/* This structure may be extended in future. */
struct rte_eth_dcb_rx_conf {
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
/** Traffic class each UP mapped to. */
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_vmdq_dcb_tx_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
/** Traffic class each UP mapped to. */
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_dcb_tx_conf {
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
/** Traffic class each UP mapped to. */
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
};
struct rte_eth_vmdq_tx_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< VMDq mode, 64 pools. */
};
struct rte_eth_vmdq_rx_conf {
enum rte_eth_nb_pools nb_queue_pools; /**< VMDq only mode, 8 or 64 pools */
uint8_t enable_default_pool; /**< If non-zero, use a default pool */
uint8_t default_pool; /**< The default pool, if applicable */
uint8_t enable_loop_back; /**< Enable VT loop back */
uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
uint32_t rx_mode; /**< Flags from ETH_VMDQ_ACCEPT_* */
struct {
uint16_t vlan_id; /**< The vlan id of the received frame */
uint64_t pools; /**< Bitmask of pools for packet rx */
} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
};
/**
* Flow Director setting modes: none, signature or perfect.
*/
enum rte_fdir_mode {
RTE_FDIR_MODE_NONE = 0, /**< Disable FDIR support. */
RTE_FDIR_MODE_SIGNATURE, /**< Enable FDIR signature filter mode. */
RTE_FDIR_MODE_PERFECT, /**< Enable FDIR perfect filter mode. */
RTE_FDIR_MODE_PERFECT_MAC_VLAN, /**< Enable FDIR filter mode - MAC VLAN. */
RTE_FDIR_MODE_PERFECT_TUNNEL, /**< Enable FDIR filter mode - tunnel. */
};
/**
* Memory space that can be configured to store Flow Director filters
* in the board memory.
*/
enum rte_fdir_pballoc_type {
RTE_FDIR_PBALLOC_64K = 0, /**< 64k. */
RTE_FDIR_PBALLOC_128K, /**< 128k. */
RTE_FDIR_PBALLOC_256K, /**< 256k. */
};
/**
* Select report mode of FDIR hash information in RX descriptors.
*/
enum rte_fdir_status_mode {
RTE_FDIR_NO_REPORT_STATUS = 0, /**< Never report FDIR hash. */
RTE_FDIR_REPORT_STATUS, /**< Only report FDIR hash for matching pkts. */
RTE_FDIR_REPORT_STATUS_ALWAYS, /**< Always report FDIR hash. */
};
/**
* A structure used to define the input for IPV4 flow
*/
struct rte_eth_ipv4_flow {
uint32_t src_ip; /**< IPv4 source address in big endian. */
uint32_t dst_ip; /**< IPv4 destination address in big endian. */
uint8_t tos; /**< Type of service to match. */
uint8_t ttl; /**< Time to live to match. */
uint8_t proto; /**< Protocol, next header in big endian. */
};
/**
* A structure used to define the input for IPV6 flow
*/
struct rte_eth_ipv6_flow {
uint32_t src_ip[4]; /**< IPv6 source address in big endian. */
uint32_t dst_ip[4]; /**< IPv6 destination address in big endian. */
uint8_t tc; /**< Traffic class to match. */
uint8_t proto; /**< Protocol, next header to match. */
uint8_t hop_limits; /**< Hop limits to match. */
};
/**
* A structure used to configure FDIR masks that are used by the device
* to match the various fields of RX packet headers.
*/
struct rte_eth_fdir_masks {
uint16_t vlan_tci_mask; /**< Bit mask for vlan_tci in big endian */
/** Bit mask for ipv4 flow in big endian. */
struct rte_eth_ipv4_flow ipv4_mask;
/** Bit maks for ipv6 flow in big endian. */
struct rte_eth_ipv6_flow ipv6_mask;
/** Bit mask for L4 source port in big endian. */
uint16_t src_port_mask;
/** Bit mask for L4 destination port in big endian. */
uint16_t dst_port_mask;
/** 6 bit mask for proper 6 bytes of Mac address, bit 0 matches the
first byte on the wire */
uint8_t mac_addr_byte_mask;
/** Bit mask for tunnel ID in big endian. */
uint32_t tunnel_id_mask;
uint8_t tunnel_type_mask; /**< 1 - Match tunnel type,
0 - Ignore tunnel type. */
};
/**
* Payload type
*/
enum rte_eth_payload_type {
RTE_ETH_PAYLOAD_UNKNOWN = 0,
RTE_ETH_RAW_PAYLOAD,
RTE_ETH_L2_PAYLOAD,
RTE_ETH_L3_PAYLOAD,
RTE_ETH_L4_PAYLOAD,
RTE_ETH_PAYLOAD_MAX = 8,
};
#define RTE_ETH_FDIR_MAX_FLEXLEN 16 /**< Max length of flexbytes. */
#define RTE_ETH_INSET_SIZE_MAX 128 /**< Max length of input set. */
/**
* A structure used to select bytes extracted from the protocol layers to
* flexible payload for filter
*/
struct rte_eth_flex_payload_cfg {
enum rte_eth_payload_type type; /**< Payload type */
uint16_t src_offset[RTE_ETH_FDIR_MAX_FLEXLEN];
/**< Offset in bytes from the beginning of packet's payload
src_offset[i] indicates the flexbyte i's offset in original
packet payload. This value should be less than
flex_payload_limit in struct rte_eth_fdir_info.*/
};
/**
* A structure used to define FDIR masks for flexible payload
* for each flow type
*/
struct rte_eth_fdir_flex_mask {
uint16_t flow_type;
uint8_t mask[RTE_ETH_FDIR_MAX_FLEXLEN];
/**< Mask for the whole flexible payload */
};
/*
* A packet can be identified by hardware as different flow types. Different
* NIC hardwares may support different flow types.
* Basically, the NIC hardware identifies the flow type as deep protocol as
* possible, and exclusively. For example, if a packet is identified as
* 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
* though it is an actual IPV4 packet.
* Note that the flow types are used to define RSS offload types in
* rte_ethdev.h.
*/
#define RTE_ETH_FLOW_UNKNOWN 0
#define RTE_ETH_FLOW_RAW 1
#define RTE_ETH_FLOW_IPV4 2
#define RTE_ETH_FLOW_FRAG_IPV4 3
#define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
#define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
#define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
#define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
#define RTE_ETH_FLOW_IPV6 8
#define RTE_ETH_FLOW_FRAG_IPV6 9
#define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
#define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
#define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
#define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
#define RTE_ETH_FLOW_L2_PAYLOAD 14
#define RTE_ETH_FLOW_IPV6_EX 15
#define RTE_ETH_FLOW_IPV6_TCP_EX 16
#define RTE_ETH_FLOW_IPV6_UDP_EX 17
#define RTE_ETH_FLOW_PORT 18
/**< Consider device port number as a flow differentiator */
#define RTE_ETH_FLOW_VXLAN 19 /**< VXLAN protocol based flow */
#define RTE_ETH_FLOW_GENEVE 20 /**< GENEVE protocol based flow */
#define RTE_ETH_FLOW_NVGRE 21 /**< NVGRE protocol based flow */
#define RTE_ETH_FLOW_MAX 22
/**
* A structure used to define all flexible payload related setting
* include flex payload and flex mask
*/
struct rte_eth_fdir_flex_conf {
uint16_t nb_payloads; /**< The number of following payload cfg */
uint16_t nb_flexmasks; /**< The number of following mask */
struct rte_eth_flex_payload_cfg flex_set[RTE_ETH_PAYLOAD_MAX];
/**< Flex payload configuration for each payload type */
struct rte_eth_fdir_flex_mask flex_mask[RTE_ETH_FLOW_MAX];
/**< Flex mask configuration for each flow type */
};
/**
* A structure used to configure the Flow Director (FDIR) feature
* of an Ethernet port.
*
* If mode is RTE_FDIR_DISABLE, the pballoc value is ignored.
*/
struct rte_fdir_conf {
enum rte_fdir_mode mode; /**< Flow Director mode. */
enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
enum rte_fdir_status_mode status; /**< How to report FDIR hash. */
/** RX queue of packets matching a "drop" filter in perfect mode. */
uint8_t drop_queue;
struct rte_eth_fdir_masks mask;
struct rte_eth_fdir_flex_conf flex_conf;
/**< Flex payload configuration. */
};
/**
* A structure used to enable/disable specific device interrupts.
*/
struct rte_intr_conf {
/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
uint16_t lsc;
/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
uint16_t rxq;
};
/**
* A structure used to configure an Ethernet port.
* Depending upon the RX multi-queue mode, extra advanced
* configuration settings may be needed.
*/
struct rte_eth_conf {
uint32_t link_speeds; /**< bitmap of ETH_LINK_SPEED_XXX of speeds to be
used. ETH_LINK_SPEED_FIXED disables link
autonegotiation, and a unique speed shall be
set. Otherwise, the bitmap defines the set of
speeds to be advertised. If the special value
ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
supported are advertised. */
struct rte_eth_rxmode rxmode; /**< Port RX configuration. */
struct rte_eth_txmode txmode; /**< Port TX configuration. */
uint32_t lpbk_mode; /**< Loopback operation mode. By default the value
is 0, meaning the loopback mode is disabled.
Read the datasheet of given ethernet controller
for details. The possible values of this field
are defined in implementation of each driver. */
struct {
struct rte_eth_rss_conf rss_conf; /**< Port RSS configuration */
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
/**< Port vmdq+dcb configuration. */
struct rte_eth_dcb_rx_conf dcb_rx_conf;
/**< Port dcb RX configuration. */
struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
/**< Port vmdq RX configuration. */
} rx_adv_conf; /**< Port RX filtering configuration (union). */
union {
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
/**< Port vmdq+dcb TX configuration. */
struct rte_eth_dcb_tx_conf dcb_tx_conf;
/**< Port dcb TX configuration. */
struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
/**< Port vmdq TX configuration. */
} tx_adv_conf; /**< Port TX DCB configuration (union). */
/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
is needed,and the variable must be set ETH_DCB_PFC_SUPPORT. */
uint32_t dcb_capability_en;
struct rte_fdir_conf fdir_conf; /**< FDIR configuration. */
struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
};