DPDK logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
/*
 * Copyright (c) 2016 QLogic Corporation.
 * All rights reserved.
 * www.qlogic.com
 *
 * See LICENSE.qede_pmd for copyright and licensing details.
 */

#ifndef __ECORE_VF_PF_IF_H__
#define __ECORE_VF_PF_IF_H__

/* @@@ TBD MichalK this should be HSI? */
#define T_ETH_INDIRECTION_TABLE_SIZE 128
#define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */

/***********************************************
 *
 * Common definitions for all HVs
 *
 **/
struct vf_pf_resc_request {
	u8  num_rxqs;
	u8  num_txqs;
	u8  num_sbs;
	u8  num_mac_filters;
	u8  num_vlan_filters;
	u8  num_mc_filters; /* No limit  so superfluous */
	u16 padding;
};

struct hw_sb_info {
	u16 hw_sb_id;    /* aka absolute igu id, used to ack the sb */
	u8 sb_qid;      /* used to update DHC for sb */
	u8 padding[5];
};

/***********************************************
 *
 * HW VF-PF channel definitions
 *
 * A.K.A VF-PF mailbox
 *
 **/
#define TLV_BUFFER_SIZE		1024

/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
	u16 type;
	u16 length;
};

/* header of first vf->pf tlv carries the offset used to calculate response
 * buffer address
 */
struct vfpf_first_tlv {
	struct channel_tlv tl;
	u32 padding;
	u64 reply_address;
};

/* header of pf->vf tlvs, carries the status of handling the request */
struct pfvf_tlv {
	struct channel_tlv tl;
	u8 status;
	u8 padding[3];
};

/* response tlv used for most tlvs */
struct pfvf_def_resp_tlv {
	struct pfvf_tlv hdr;
};

/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
	struct channel_tlv tl;
	u8 padding[4];
};

/* Acquire */
struct vfpf_acquire_tlv {
	struct vfpf_first_tlv first_tlv;

	struct vf_pf_vfdev_info {
#ifndef LINUX_REMOVE
	/* First bit was used on 8.7.x and 8.8.x versions, which had different
	 * FWs used but with the same faspath HSI. As this was prior to the
	 * fastpath versioning, wanted to have ability to override fw matching
	 * and allow them to interact.
	 */
#endif
/* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI	(1 << 0)
#define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
		u64 capabilities;
		u8 fw_major;
		u8 fw_minor;
		u8 fw_revision;
		u8 fw_engineering;
		u32 driver_version;
		u16 opaque_fid; /* ME register value */
		u8 os_type; /* VFPF_ACQUIRE_OS_* value */
		u8 eth_fp_hsi_major;
		u8 eth_fp_hsi_minor;
		u8 padding[3];
	} vfdev_info;

	struct vf_pf_resc_request resc_request;

	u64 bulletin_addr;
	u32 bulletin_size;
	u32 padding;
};

/* receive side scaling tlv */
struct vfpf_vport_update_rss_tlv {
	struct channel_tlv	tl;

	u8 update_rss_flags;
	#define VFPF_UPDATE_RSS_CONFIG_FLAG	  (1 << 0)
	#define VFPF_UPDATE_RSS_CAPS_FLAG	  (1 << 1)
	#define VFPF_UPDATE_RSS_IND_TABLE_FLAG	  (1 << 2)
	#define VFPF_UPDATE_RSS_KEY_FLAG	  (1 << 3)

	u8 rss_enable;
	u8 rss_caps;
	u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
	u32 rss_key[T_ETH_RSS_KEY_SIZE];
};

struct pfvf_storm_stats {
	u32 address;
	u32 len;
};

struct pfvf_stats_info {
	struct pfvf_storm_stats mstats;
	struct pfvf_storm_stats pstats;
	struct pfvf_storm_stats tstats;
	struct pfvf_storm_stats ustats;
};

/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
	struct pfvf_tlv hdr;

	struct pf_vf_pfdev_info {
		u32 chip_num;
		u32 mfw_ver;

		u16 fw_major;
		u16 fw_minor;
		u16 fw_rev;
		u16 fw_eng;

		u64 capabilities;
#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	(1 << 0)
#define PFVF_ACQUIRE_CAP_100G			(1 << 1) /* If set, 100g PF */
/* There are old PF versions where the PF might mistakenly override the sanity
 * mechanism [version-based] and allow a VF that can't be supported to pass
 * the acquisition phase.
 * To overcome this, PFs now indicate that they're past that point and the new
 * VFs would fail probe on the older PFs that fail to do so.
 */
#ifndef LINUX_REMOVE
/* Said bug was in quest/serpens; Can't be certain no official release included
 * the bug since the fix arrived very late in the programs.
 */
#endif
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	(1 << 2)

		u16 db_size;
		u8  indices_per_sb;
		u8 os_type;

		/* These should match the PF's ecore_dev values */
		u16 chip_rev;
		u8 dev_type;

		u8 padding;

		struct pfvf_stats_info stats_info;

		u8 port_mac[ETH_ALEN];

		/* It's possible PF had to configure an older fastpath HSI
		 * [in case VF is newer than PF]. This is communicated back
		 * to the VF. It can also be used in case of error due to
		 * non-matching versions to shed light in VF about failure.
		 */
		u8 major_fp_hsi;
		u8 minor_fp_hsi;
	} pfdev_info;

	struct pf_vf_resc {
		/* in case of status NO_RESOURCE in message hdr, pf will fill
		 * this struct with suggested amount of resources for next
		 * acquire request
		 */
		#define PFVF_MAX_QUEUES_PER_VF         16
		#define PFVF_MAX_SBS_PER_VF            16
		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
		u8      hw_qid[PFVF_MAX_QUEUES_PER_VF];
		u8      cid[PFVF_MAX_QUEUES_PER_VF];

		u8      num_rxqs;
		u8      num_txqs;
		u8      num_sbs;
		u8      num_mac_filters;
		u8      num_vlan_filters;
		u8      num_mc_filters;
		u8      padding[2];
	} resc;

	u32 bulletin_size;
	u32 padding;
};

struct pfvf_start_queue_resp_tlv {
	struct pfvf_tlv hdr;
	u32 offset; /* offset to consumer/producer of queue */
	u8 padding[4];
};

/* Setup Queue */
struct vfpf_start_rxq_tlv {
	struct vfpf_first_tlv	first_tlv;

	/* physical addresses */
	u64		rxq_addr;
	u64		deprecated_sge_addr;
	u64		cqe_pbl_addr;

	u16			cqe_pbl_size;
	u16			hw_sb;
	u16			rx_qid;
	u16			hc_rate; /* desired interrupts per sec. */

	u16			bd_max_bytes;
	u16			stat_id;
	u8			sb_index;
	u8			padding[3];

};

struct vfpf_start_txq_tlv {
	struct vfpf_first_tlv	first_tlv;

	/* physical addresses */
	u64		pbl_addr;
	u16			pbl_size;
	u16			stat_id;
	u16			tx_qid;
	u16			hw_sb;

	u32			flags; /* VFPF_QUEUE_FLG_X flags */
	u16			hc_rate; /* desired interrupts per sec. */
	u8			sb_index;
	u8			padding[3];
};

/* Stop RX Queue */
struct vfpf_stop_rxqs_tlv {
	struct vfpf_first_tlv	first_tlv;

	u16			rx_qid;
	u8			num_rxqs;
	u8			cqe_completion;
	u8			padding[4];
};

/* Stop TX Queues */
struct vfpf_stop_txqs_tlv {
	struct vfpf_first_tlv	first_tlv;

	u16			tx_qid;
	u8			num_txqs;
	u8			padding[5];
};

struct vfpf_update_rxq_tlv {
	struct vfpf_first_tlv	first_tlv;

	u64		deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];

	u16			rx_qid;
	u8			num_rxqs;
	u8			flags;
	#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG	(1 << 0)
	#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG		(1 << 1)
	#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG	(1 << 2)

	u8			padding[4];
};

/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
	u32 flags;
	#define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
	#define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
	#define VFPF_Q_FILTER_SET_MAC		0x100   /* set/clear */

	u8  mac[ETH_ALEN];
	u16 vlan_tag;

	u8	padding[4];
};

/* Start a vport */
struct vfpf_vport_start_tlv {
	struct vfpf_first_tlv	first_tlv;

	u64		sb_addr[PFVF_MAX_SBS_PER_VF];

	u32			tpa_mode;
	u16			dep1;
	u16			mtu;

	u8			vport_id;
	u8			inner_vlan_removal;

	u8			only_untagged;
	u8			max_buffers_per_cqe;

	u8			padding[4];
};

/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
struct vfpf_vport_update_activate_tlv {
	struct channel_tlv	tl;
	u8			update_rx;
	u8			update_tx;
	u8			active_rx;
	u8			active_tx;
};

struct vfpf_vport_update_tx_switch_tlv {
	struct channel_tlv	tl;
	u8			tx_switching;
	u8			padding[3];
};

struct vfpf_vport_update_vlan_strip_tlv {
	struct channel_tlv	tl;
	u8			remove_vlan;
	u8			padding[3];
};

struct vfpf_vport_update_mcast_bin_tlv {
	struct channel_tlv	tl;
	u8			padding[4];

	u64		bins[8];
};

struct vfpf_vport_update_accept_param_tlv {
	struct channel_tlv tl;
	u8	update_rx_mode;
	u8	update_tx_mode;
	u8	rx_accept_filter;
	u8	tx_accept_filter;
};

struct vfpf_vport_update_accept_any_vlan_tlv {
	struct channel_tlv tl;
	u8 update_accept_any_vlan_flg;
	u8 accept_any_vlan;

	u8 padding[2];
};

struct vfpf_vport_update_sge_tpa_tlv {
	struct channel_tlv	tl;

	u16			sge_tpa_flags;
	#define VFPF_TPA_IPV4_EN_FLAG	     (1 << 0)
	#define VFPF_TPA_IPV6_EN_FLAG        (1 << 1)
	#define VFPF_TPA_PKT_SPLIT_FLAG      (1 << 2)
	#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
	#define VFPF_TPA_GRO_CONSIST_FLAG    (1 << 4)

	u8			update_sge_tpa_flags;
	#define VFPF_UPDATE_SGE_DEPRECATED_FLAG	   (1 << 0)
	#define VFPF_UPDATE_TPA_EN_FLAG    (1 << 1)
	#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)

	u8			max_buffers_per_cqe;

	u16			deprecated_sge_buff_size;
	u16			tpa_max_size;
	u16			tpa_min_size_to_start;
	u16			tpa_min_size_to_cont;

	u8			tpa_max_aggs_num;
	u8			padding[7];

};

/* Primary tlv as a header for various extended tlvs for
 * various functionalities in vport update ramrod.
 */
struct vfpf_vport_update_tlv {
	struct vfpf_first_tlv first_tlv;
};

struct vfpf_ucast_filter_tlv {
	struct vfpf_first_tlv	first_tlv;

	u8			opcode;
	u8			type;

	u8			mac[ETH_ALEN];

	u16			vlan;
	u16			padding[3];
};

struct tlv_buffer_size {
	u8 tlv_buffer[TLV_BUFFER_SIZE];
};

union vfpf_tlvs {
	struct vfpf_first_tlv			first_tlv;
	struct vfpf_acquire_tlv			acquire;
	struct vfpf_start_rxq_tlv		start_rxq;
	struct vfpf_start_txq_tlv		start_txq;
	struct vfpf_stop_rxqs_tlv		stop_rxqs;
	struct vfpf_stop_txqs_tlv		stop_txqs;
	struct vfpf_update_rxq_tlv		update_rxq;
	struct vfpf_vport_start_tlv		start_vport;
	struct vfpf_vport_update_tlv		vport_update;
	struct vfpf_ucast_filter_tlv		ucast_filter;
	struct tlv_buffer_size			tlv_buf_size;
};

union pfvf_tlvs {
	struct pfvf_def_resp_tlv		default_resp;
	struct pfvf_acquire_resp_tlv		acquire_resp;
	struct tlv_buffer_size			tlv_buf_size;
	struct pfvf_start_queue_resp_tlv	queue_start;
};

/* This is a structure which is allocated in the VF, which the PF may update
 * when it deems it necessary to do so. The bulletin board is sampled
 * periodically by the VF. A copy per VF is maintained in the PF (to prevent
 * loss of data upon multiple updates (or the need for read modify write)).
 */
enum ecore_bulletin_bit {
	/* Alert the VF that a forced MAC was set by the PF */
	MAC_ADDR_FORCED = 0,

	/* The VF should not access the vfpf channel */
	VFPF_CHANNEL_INVALID = 1,

	/* Alert the VF that a forced VLAN was set by the PF */
	VLAN_ADDR_FORCED = 2,

	/* Indicate that `default_only_untagged' contains actual data */
	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,

	/* Alert the VF that suggested mac was sent by the PF.
	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set
	 */
	VFPF_BULLETIN_MAC_ADDR = 5
};

struct ecore_bulletin_content {
	/* crc of structure to ensure is not in mid-update */
	u32 crc;

	u32 version;

	/* bitmap indicating which fields hold valid values */
	u64 valid_bitmap;

	/* used for MAC_ADDR or MAC_ADDR_FORCED */
	u8 mac[ETH_ALEN];

	/* If valid, 1 => only untagged Rx if no vlan is configured */
	u8 default_only_untagged;
	u8 padding;

	/* The following is a 'copy' of ecore_mcp_link_state,
	 * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's
	 * possible the structs will increase further along the road we cannot
	 * have it here; Instead we need to have all of its fields.
	 */
	u8 req_autoneg;
	u8 req_autoneg_pause;
	u8 req_forced_rx;
	u8 req_forced_tx;
	u8 padding2[4];

	u32 req_adv_speed;
	u32 req_forced_speed;
	u32 req_loopback;
	u32 padding3;

	u8 link_up;
	u8 full_duplex;
	u8 autoneg;
	u8 autoneg_complete;
	u8 parallel_detection;
	u8 pfc_enabled;
	u8 partner_tx_flow_ctrl_en;
	u8 partner_rx_flow_ctrl_en;
	u8 partner_adv_pause;
	u8 sfp_tx_fault;
	u8 padding4[6];

	u32 speed;
	u32 partner_adv_speed;

	u32 capability_speed;

	/* Forced vlan */
	u16 pvid;
	u16 padding5;
};

struct ecore_bulletin {
	dma_addr_t phys;
	struct ecore_bulletin_content *p_virt;
	u32 size;
};

enum {
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/

	CHANNEL_TLV_NONE, /* ends tlv sequence */
	CHANNEL_TLV_ACQUIRE,
	CHANNEL_TLV_VPORT_START,
	CHANNEL_TLV_VPORT_UPDATE,
	CHANNEL_TLV_VPORT_TEARDOWN,
	CHANNEL_TLV_START_RXQ,
	CHANNEL_TLV_START_TXQ,
	CHANNEL_TLV_STOP_RXQS,
	CHANNEL_TLV_STOP_TXQS,
	CHANNEL_TLV_UPDATE_RXQ,
	CHANNEL_TLV_INT_CLEANUP,
	CHANNEL_TLV_CLOSE,
	CHANNEL_TLV_RELEASE,
	CHANNEL_TLV_LIST_END,
	CHANNEL_TLV_UCAST_FILTER,
	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
	CHANNEL_TLV_VPORT_UPDATE_MCAST,
	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
	CHANNEL_TLV_VPORT_UPDATE_RSS,
	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
	CHANNEL_TLV_MAX,

	/* Required for iterating over vport-update tlvs.
	 * Will break in case non-sequential vport-update tlvs.
	 */
	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,

/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
};
extern const char *ecore_channel_tlvs_string[];

#endif /* __ECORE_VF_PF_IF_H__ */