[dpdk-dev] [PATCH v2 11/11] net/octeontx_ep: Transmit data path function added

Ferruh Yigit ferruh.yigit at intel.com
Tue Jan 26 16:35:36 CET 2021


On 1/18/2021 9:36 AM, Nalla Pradeep wrote:
> 1. Packet transmit function for both otx and otx2 are added.
> 2. Flushing transmit(command) queue when pending commands are more than
>     maximum allowed value (currently 16).
> 3. Scatter gather support if the packet spans multiple buffers.
> 
> Signed-off-by: Nalla Pradeep <pnalla at marvell.com>

<...>

> +uint16_t
> +otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
> +{
> +	struct otx_ep_instr_64B iqcmd;
> +	struct otx_ep_instr_queue *iq;
> +	struct otx_ep_device *otx_ep;
> +	struct rte_mbuf *m;
> +
> +	uint32_t iqreq_type, sgbuf_sz;
> +	int dbell, index, count = 0;
> +	unsigned int pkt_len, i;
> +	int gather, gsz;
> +	void *iqreq_buf;
> +	uint64_t dptr;
> +
> +	iq = (struct otx_ep_instr_queue *)tx_queue;
> +	otx_ep = iq->otx_ep_dev;
> +
> +	/* if (!otx_ep->started || !otx_ep->linkup) {
> +	 *	goto xmit_fail;
> +	 * }
> +	 */

Please drop the commented out code.

> +
> +	iqcmd.ih.u64 = 0;
> +	iqcmd.pki_ih3.u64 = 0;
> +	iqcmd.irh.u64 = 0;
> +
> +	/* ih invars */
> +	iqcmd.ih.s.fsz = OTX_EP_FSZ;
> +	iqcmd.ih.s.pkind = otx_ep->pkind; /* The SDK decided PKIND value */
> +
> +	/* pki ih3 invars */
> +	iqcmd.pki_ih3.s.w = 1;
> +	iqcmd.pki_ih3.s.utt = 1;
> +	iqcmd.pki_ih3.s.tagtype = ORDERED_TAG;
> +	/* sl will be sizeof(pki_ih3) */
> +	iqcmd.pki_ih3.s.sl = OTX_EP_FSZ + OTX_CUST_DATA_LEN;
> +
> +	/* irh invars */
> +	iqcmd.irh.s.opcode = OTX_EP_NW_PKT_OP;
> +
> +	for (i = 0; i < nb_pkts; i++) {
> +		m = pkts[i];
> +		if (m->nb_segs == 1) {
> +			/* dptr */
> +			dptr = rte_mbuf_data_iova(m);
> +			pkt_len = rte_pktmbuf_data_len(m);
> +			iqreq_buf = m;
> +			iqreq_type = OTX_EP_REQTYPE_NORESP_NET;
> +			gather = 0;
> +			gsz = 0;
> +		} else {
> +			struct otx_ep_buf_free_info *finfo;
> +			int j, frags, num_sg;
> +
> +			if (!(otx_ep->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
> +				goto xmit_fail;
> +
> +			finfo = (struct otx_ep_buf_free_info *)rte_malloc(NULL,
> +							sizeof(*finfo), 0);
> +			if (finfo == NULL) {
> +				otx_ep_err("free buffer alloc failed\n");
> +				goto xmit_fail;
> +			}
> +			num_sg = (m->nb_segs + 3) / 4;
> +			sgbuf_sz = sizeof(struct otx_ep_sg_entry) * num_sg;
> +			finfo->g.sg =
> +				rte_zmalloc(NULL, sgbuf_sz, OTX_EP_SG_ALIGN);
> +			if (finfo->g.sg == NULL) {
> +				rte_free(finfo);
> +				otx_ep_err("sg entry alloc failed\n");
> +				goto xmit_fail;
> +			}
> +			gather = 1;
> +			gsz = m->nb_segs;
> +			finfo->g.num_sg = num_sg;
> +			finfo->g.sg[0].ptr[0] = rte_mbuf_data_iova(m);
> +			set_sg_size(&finfo->g.sg[0], m->data_len, 0);
> +			pkt_len = m->data_len;
> +			finfo->mbuf = m;
> +
> +			frags = m->nb_segs - 1;
> +			j = 1;
> +			m = m->next;
> +			while (frags--) {
> +				finfo->g.sg[(j >> 2)].ptr[(j & 3)] =
> +						rte_mbuf_data_iova(m);
> +				set_sg_size(&finfo->g.sg[(j >> 2)],
> +						m->data_len, (j & 3));
> +				pkt_len += m->data_len;
> +				j++;
> +				m = m->next;
> +			}
> +			dptr = rte_mem_virt2iova(finfo->g.sg);
> +			iqreq_buf = finfo;
> +			iqreq_type = OTX_EP_REQTYPE_NORESP_GATHER;
> +			if (pkt_len > OTX_EP_MAX_PKT_SZ) {
> +				rte_free(finfo->g.sg);
> +				rte_free(finfo);
> +				otx_ep_err("failed\n");
> +				goto xmit_fail;
> +			}
> +		}
> +		/* ih vars */
> +		iqcmd.ih.s.tlen = pkt_len + iqcmd.ih.s.fsz;
> +		iqcmd.ih.s.gather = gather;
> +		iqcmd.ih.s.gsz = gsz;
> +		/* PKI_IH3 vars */
> +		/* irh vars */
> +		/* irh.rlenssz = ; */

Ditto.

> +
> +		iqcmd.dptr = dptr;
> +		/* Swap FSZ(front data) here, to avoid swapping on
> +		 * OCTEON TX side rprt is not used so not swapping
> +		 */
> +		/* otx_ep_swap_8B_data(&iqcmd.rptr, 1); */

ditto

<...>

> +};
> +#define OTX_EP_64B_INSTR_SIZE	(sizeof(otx_ep_instr_64B))
> +

Is this macro used at all?


More information about the dev mailing list