DPDK logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2018 Cavium, Inc
 */

#include <string.h>

#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cpuflags.h>
#include <rte_malloc.h>

#include "otx_zip.h"

static const struct rte_compressdev_capabilities
				octtx_zip_pmd_capabilities[] = {
	{	.algo = RTE_COMP_ALGO_DEFLATE,
		/* Deflate */
		.comp_feature_flags =	RTE_COMP_FF_HUFFMAN_FIXED |
					RTE_COMP_FF_HUFFMAN_DYNAMIC,
		/* Non sharable Priv XFORM and Stateless */
		.window_size = {
				.min = 1,
				.max = 14,
				.increment = 1
				/* size supported 2^1 to 2^14 */
		},
	},
	RTE_COMP_END_OF_CAPABILITIES_LIST()
};

/*
 * Reset session to default state for next set of stateless operation
 */
static inline void
reset_stream(struct zip_stream *z_stream)
{
	union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);

	inst->s.bf = 1;
	inst->s.ef = 0;
}

int
zip_process_op(struct rte_comp_op *op,
		struct zipvf_qp *qp,
		struct zip_stream *zstrm)
{
	union zip_inst_s *inst = zstrm->inst;
	volatile union zip_zres_s *zresult = NULL;


	if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
			(op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
			(op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
		ZIP_PMD_ERR("Segmented packet is not supported\n");
		return 0;
	}

	zipvf_prepare_cmd_stateless(op, zstrm);

	zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
	zresult->s.compcode = 0;

#ifdef ZIP_DBG
	zip_dump_instruction(inst);
#endif

	/* Submit zip command */
	zipvf_push_command(qp, (void *)inst);

	/* Check and Process results in sync mode */
	do {
	} while (!zresult->s.compcode);

	if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
		op->status = RTE_COMP_OP_STATUS_SUCCESS;
	} else {
		/* FATAL error cannot do anything */
		ZIP_PMD_ERR("operation failed with error code:%d\n",
			zresult->s.compcode);
		if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
			op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
		else
			op->status = RTE_COMP_OP_STATUS_ERROR;
	}

	ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);

	/* Update op stats */
	switch (op->status) {
	case RTE_COMP_OP_STATUS_SUCCESS:
		op->consumed = zresult->s.totalbytesread;
	/* Fall-through */
	case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
		op->produced = zresult->s.totalbyteswritten;
		break;
	default:
		ZIP_PMD_ERR("stats not updated for status:%d\n",
				op->status);
		break;
	}
	/* zstream is reset irrespective of result */
	reset_stream(zstrm);

	zresult->s.compcode = ZIP_COMP_E_NOTDONE;
	return 0;
}

/** Parse xform parameters and setup a stream */
static int
zip_set_stream_parameters(struct rte_compressdev *dev,
			const struct rte_comp_xform *xform,
			struct zip_stream *z_stream)
{
	int ret;
	union zip_inst_s *inst;
	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
	void *res;

	/* Allocate resources required by a stream */
	ret = rte_mempool_get_bulk(vf->zip_mp,
			z_stream->bufs, MAX_BUFS_PER_STREAM);
	if (ret < 0)
		return -1;

	/* get one command buffer from pool and set up */
	inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
	res = z_stream->bufs[RES_BUF];

	memset(inst->u, 0, sizeof(inst->u));

	/* set bf for only first ops of stream */
	inst->s.bf = 1;

	if (xform->type == RTE_COMP_COMPRESS) {
		inst->s.op = ZIP_OP_E_COMP;

		switch (xform->compress.deflate.huffman) {
		case RTE_COMP_HUFFMAN_DEFAULT:
			inst->s.cc = ZIP_CC_DEFAULT;
			break;
		case RTE_COMP_HUFFMAN_FIXED:
			inst->s.cc = ZIP_CC_FIXED_HUFF;
			break;
		case RTE_COMP_HUFFMAN_DYNAMIC:
			inst->s.cc = ZIP_CC_DYN_HUFF;
			break;
		default:
			ret = -1;
			goto err;
		}

		switch (xform->compress.level) {
		case RTE_COMP_LEVEL_MIN:
			inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
			break;
		case RTE_COMP_LEVEL_MAX:
			inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
			break;
		case RTE_COMP_LEVEL_NONE:
			ZIP_PMD_ERR("Compression level not supported");
			ret = -1;
			goto err;
		default:
			/* for any value between min and max , choose
			 * PMD default.
			 */
			inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
			break;
		}
	} else if (xform->type == RTE_COMP_DECOMPRESS) {
		inst->s.op = ZIP_OP_E_DECOMP;
		/* from HRM,
		 * For DEFLATE decompression, [CC] must be 0x0.
		 * For decompression, [SS] must be 0x0
		 */
		inst->s.cc = 0;
		/* Speed bit should not be set for decompression */
		inst->s.ss = 0;
		/* decompression context is supported only for STATEFUL
		 * operations. Currently we support STATELESS ONLY so
		 * skip setting of ctx pointer
		 */

	} else {
		ZIP_PMD_ERR("\nxform type not supported");
		ret = -1;
		goto err;
	}

	inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
	inst->s.res_ptr_ctl.s.length = 0;

	z_stream->inst = inst;
	z_stream->func = zip_process_op;

	return 0;

err:
	rte_mempool_put_bulk(vf->zip_mp,
			     (void *)&(z_stream->bufs[0]),
			     MAX_BUFS_PER_STREAM);

	return ret;
}

/** Configure device */
static int
zip_pmd_config(struct rte_compressdev *dev,
		struct rte_compressdev_config *config)
{
	int nb_streams;
	char res_pool[RTE_MEMZONE_NAMESIZE];
	struct zip_vf *vf;
	struct rte_mempool *zip_buf_mp;

	if (!config || !dev)
		return -EIO;

	vf = (struct zip_vf *)(dev->data->dev_private);

	/* create pool with maximum numbers of resources
	 * required by streams
	 */

	/* use common pool for non-shareable priv_xform and stream */
	nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;

	snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
		 dev->data->dev_id);

	/** TBD Should we use the per core object cache for stream resources */
	zip_buf_mp = rte_mempool_create(
			res_pool,
			nb_streams * MAX_BUFS_PER_STREAM,
			ZIP_BUF_SIZE,
			0,
			0,
			NULL,
			NULL,
			NULL,
			NULL,
			SOCKET_ID_ANY,
			0);

	if (zip_buf_mp == NULL) {
		ZIP_PMD_ERR(
			"Failed to create buf mempool octtx_zip_res_pool%u",
			dev->data->dev_id);
		return -1;
	}

	vf->zip_mp = zip_buf_mp;

	return 0;
}

/** Start device */
static int
zip_pmd_start(__rte_unused struct rte_compressdev *dev)
{
	return 0;
}

/** Stop device */
static void
zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
{

}

/** Close device */
static int
zip_pmd_close(struct rte_compressdev *dev)
{
	if (dev == NULL)
		return -1;

	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
	rte_mempool_free(vf->zip_mp);

	return 0;
}

/** Get device statistics */
static void
zip_pmd_stats_get(struct rte_compressdev *dev,
		struct rte_compressdev_stats *stats)
{
	int qp_id;

	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];

		stats->enqueued_count += qp->qp_stats.enqueued_count;
		stats->dequeued_count += qp->qp_stats.dequeued_count;

		stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
		stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
	}
}

/** Reset device statistics */
static void
zip_pmd_stats_reset(struct rte_compressdev *dev)
{
	int qp_id;

	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
		struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
	}
}

/** Get device info */
static void
zip_pmd_info_get(struct rte_compressdev *dev,
		struct rte_compressdev_info *dev_info)
{
	struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;

	if (dev_info != NULL) {
		dev_info->driver_name = dev->device->driver->name;
		dev_info->feature_flags = dev->feature_flags;
		dev_info->capabilities = octtx_zip_pmd_capabilities;
		dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
	}
}

/** Release queue pair */
static int
zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
{
	struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];

	if (qp != NULL) {
		zipvf_q_term(qp);

		if (qp->processed_pkts)
			rte_ring_free(qp->processed_pkts);

		rte_free(qp);
		dev->data->queue_pairs[qp_id] = NULL;
	}
	return 0;
}

/** Create a ring to place process packets on */
static struct rte_ring *
zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
		unsigned int ring_size, int socket_id)
{
	struct rte_ring *r;

	r = rte_ring_lookup(qp->name);
	if (r) {
		if (rte_ring_get_size(r) >= ring_size) {
			ZIP_PMD_INFO("Reusing existing ring %s for processed"
					" packets", qp->name);
			return r;
		}

		ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
				" packets", qp->name);
		return NULL;
	}

	return rte_ring_create(qp->name, ring_size, socket_id,
						RING_F_EXACT_SZ);
}

/** Setup a queue pair */
static int
zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
		uint32_t max_inflight_ops, int socket_id)
{
	struct zipvf_qp *qp = NULL;
	struct zip_vf *vf;
	char *name;
	int ret;

	if (!dev)
		return -1;

	vf = (struct zip_vf *) (dev->data->dev_private);

	/* Free memory prior to re-allocation if needed. */
	if (dev->data->queue_pairs[qp_id] != NULL) {
		ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
		return 0;
	}

	name =  rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
	snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
		 "zip_pmd_%u_qp_%u",
		 dev->data->dev_id, qp_id);

	/* Allocate the queue pair data structure. */
	qp = rte_zmalloc_socket(name, sizeof(*qp),
				RTE_CACHE_LINE_SIZE, socket_id);
	if (qp == NULL)
		return (-ENOMEM);

	qp->name = name;

	/* Create completion queue up to max_inflight_ops */
	qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
						max_inflight_ops, socket_id);
	if (qp->processed_pkts == NULL)
		goto qp_setup_cleanup;

	qp->id = qp_id;
	qp->vf = vf;

	ret = zipvf_q_init(qp);
	if (ret < 0)
		goto qp_setup_cleanup;

	dev->data->queue_pairs[qp_id] = qp;

	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
	return 0;

qp_setup_cleanup:
	if (qp->processed_pkts)
		rte_ring_free(qp->processed_pkts);
	if (qp)
		rte_free(qp);
	return -1;
}

static int
zip_pmd_stream_create(struct rte_compressdev *dev,
		const struct rte_comp_xform *xform, void **stream)
{
	int ret;
	struct zip_stream *strm = NULL;

	strm = rte_malloc(NULL,
			sizeof(struct zip_stream), 0);

	if (strm == NULL)
		return (-ENOMEM);

	ret = zip_set_stream_parameters(dev, xform, strm);
	if (ret < 0) {
		ZIP_PMD_ERR("failed configure xform parameters");
		rte_free(strm);
		return ret;
	}
	*stream = strm;
	return 0;
}

static int
zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
{
	struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
	struct zip_stream *z_stream;

	if (stream == NULL)
		return 0;

	z_stream = (struct zip_stream *)stream;

	/* Free resources back to pool */
	rte_mempool_put_bulk(vf->zip_mp,
				(void *)&(z_stream->bufs[0]),
				MAX_BUFS_PER_STREAM);

	/* Zero out the whole structure */
	memset(stream, 0, sizeof(struct zip_stream));
	rte_free(stream);

	return 0;
}


static uint16_t
zip_pmd_enqueue_burst_sync(void *queue_pair,
		struct rte_comp_op **ops, uint16_t nb_ops)
{
	struct zipvf_qp *qp = queue_pair;
	struct rte_comp_op *op;
	struct zip_stream *zstrm;
	int i, ret = 0;
	uint16_t enqd = 0;

	for (i = 0; i < nb_ops; i++) {
		op = ops[i];

		if (op->op_type == RTE_COMP_OP_STATEFUL) {
			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
		} else {
			/* process stateless ops */
			zstrm = (struct zip_stream *)op->private_xform;
			if (unlikely(zstrm == NULL))
				op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
			else
				ret = zstrm->func(op, qp, zstrm);
		}

		/* Whatever is out of op, put it into completion queue with
		 * its status
		 */
		if (!ret)
			ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);

		if (unlikely(ret < 0)) {
			/* increment count if failed to enqueue op*/
			qp->qp_stats.enqueue_err_count++;
		} else {
			qp->qp_stats.enqueued_count++;
			enqd++;
		}
	}
	return enqd;
}

static uint16_t
zip_pmd_dequeue_burst_sync(void *queue_pair,
		struct rte_comp_op **ops, uint16_t nb_ops)
{
	struct zipvf_qp *qp = queue_pair;

	unsigned int nb_dequeued = 0;

	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
			(void **)ops, nb_ops, NULL);
	qp->qp_stats.dequeued_count += nb_dequeued;

	return nb_dequeued;
}

static struct rte_compressdev_ops octtx_zip_pmd_ops = {
		.dev_configure		= zip_pmd_config,
		.dev_start		= zip_pmd_start,
		.dev_stop		= zip_pmd_stop,
		.dev_close		= zip_pmd_close,

		.stats_get		= zip_pmd_stats_get,
		.stats_reset		= zip_pmd_stats_reset,

		.dev_infos_get		= zip_pmd_info_get,

		.queue_pair_setup	= zip_pmd_qp_setup,
		.queue_pair_release	= zip_pmd_qp_release,

		.private_xform_create	= zip_pmd_stream_create,
		.private_xform_free	= zip_pmd_stream_free,
		.stream_create		= NULL,
		.stream_free		= NULL
};

static int
zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
	struct rte_pci_device *pci_dev)
{
	int ret = 0;
	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
	struct rte_compressdev *compressdev;
	struct rte_compressdev_pmd_init_params init_params = {
		"",
		rte_socket_id(),
	};

	ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
			(unsigned int)pci_dev->id.vendor_id,
			(unsigned int)pci_dev->id.device_id);

	rte_pci_device_name(&pci_dev->addr, compressdev_name,
			    sizeof(compressdev_name));

	compressdev = rte_compressdev_pmd_create(compressdev_name,
		&pci_dev->device, sizeof(struct zip_vf), &init_params);
	if (compressdev == NULL) {
		ZIP_PMD_ERR("driver %s: create failed", init_params.name);
		return -ENODEV;
	}

	/*
	 * create only if proc_type is primary.
	 */
	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
		/*  create vf dev with given pmd dev id */
		ret = zipvf_create(compressdev);
		if (ret < 0) {
			ZIP_PMD_ERR("Device creation failed");
			rte_compressdev_pmd_destroy(compressdev);
			return ret;
		}
	}

	compressdev->dev_ops = &octtx_zip_pmd_ops;
	/* register rx/tx burst functions for data path */
	compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
	compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
	compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
	return ret;
}

static int
zip_pci_remove(struct rte_pci_device *pci_dev)
{
	struct rte_compressdev *compressdev;
	char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];

	if (pci_dev == NULL) {
		ZIP_PMD_ERR(" Invalid PCI Device\n");
		return -EINVAL;
	}
	rte_pci_device_name(&pci_dev->addr, compressdev_name,
			sizeof(compressdev_name));

	compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
	if (compressdev == NULL)
		return -ENODEV;

	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
		if (zipvf_destroy(compressdev) < 0)
			return -ENODEV;
	}
	return rte_compressdev_pmd_destroy(compressdev);
}

static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
	{
		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
			PCI_DEVICE_ID_OCTEONTX_ZIPVF),
	},
	{
		.device_id = 0
	},
};

/**
 * Structure that represents a PCI driver
 */
static struct rte_pci_driver octtx_zip_pmd = {
	.id_table    = pci_id_octtx_zipvf_table,
	.drv_flags   = RTE_PCI_DRV_NEED_MAPPING,
	.probe       = zip_pci_probe,
	.remove      = zip_pci_remove,
};

RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
RTE_LOG_REGISTER(octtx_zip_logtype_driver, pmd.compress.octeontx, INFO);