<div dir="ltr">It looks like this did not get processed correctly by patchwork, or there is an issue with the patchfiles. <div><br></div><div><a href="https://patchwork.dpdk.org/project/dpdk/list/?series=&submitter=&state=&q=gdtc&archive=&delegate=">https://patchwork.dpdk.org/project/dpdk/list/?series=&submitter=&state=&q=gdtc&archive=&delegate=</a><br></div><div><br></div><div>Can you resubmit? It was not picked up by 3/4 of the CI labs.</div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Mon, Oct 14, 2024 at 4:16 AM Yong Zhang <<a href="mailto:zhang.yong25@zte.com.cn">zhang.yong25@zte.com.cn</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">Add rawdev dequeue operation for gdtc devices.<br>
<br>Signed-off-by: Yong Zhang <<a href="mailto:zhang.yong25@zte.com.cn" target="_blank">zhang.yong25@zte.com.cn</a>>
<br>---<br> drivers/raw/gdtc/gdtc_rawdev.c | 113 +++++++++++++++++++++++++++++++++<br> 1 file changed, 113 insertions(+)<br>
<br>diff --git a/drivers/raw/gdtc/gdtc_rawdev.c b/drivers/raw/gdtc/gdtc_rawdev.c<br>index 03f7cc1a8e..8e9543f402 100644<br>--- a/drivers/raw/gdtc/gdtc_rawdev.c<br>+++ b/drivers/raw/gdtc/gdtc_rawdev.c<br>@@ -88,6 +88,8 @@<br> #define LOW32_MASK 0xffffffff<br> #define LOW16_MASK 0xffff<br>
<br>+#define ZXDH_GDMA_TC_CNT_MAX 0x10000<br>+<br> #define IDX_TO_ADDR(addr, idx, t) \<br> ((t)((uintptr_t)(addr) + (idx) * sizeof(struct zxdh_gdma_buff_desc)))<br>
<br>@@ -526,6 +528,116 @@ zxdh_gdma_rawdev_enqueue_bufs(struct rte_rawdev *dev,<br>
<br> return count;<br> }<br>+<br>+static inline void<br>+zxdh_gdma_used_idx_update(struct zxdh_gdma_queue *queue, uint16_t cnt, uint8_t data_bd_err)<br>+{<br>+ uint16_t idx = 0;<br>+<br>+ if (queue->sw_ring.used_idx + cnt < queue->queue_size)<br>+ queue->sw_ring.used_idx += cnt;<br>+ else<br>+ queue->sw_ring.used_idx = queue->sw_ring.used_idx + cnt - queue->queue_size;<br>+<br>+ if (data_bd_err == 1) {<br>+ /* Update job status, the last job status is error */<br>+ if (queue->sw_ring.used_idx == 0)<br>+ idx = queue->queue_size - 1;<br>+ else<br>+ idx = queue->sw_ring.used_idx - 1;<br>+<br>+ queue->sw_ring.job[idx]->status = 1;<br>+ }<br>+}<br>+<br>+static int<br>+zxdh_gdma_rawdev_dequeue_bufs(struct rte_rawdev *dev,<br>+ __rte_unused struct rte_rawdev_buf **buffers,<br>+ uint32_t count,<br>+ rte_rawdev_obj_t context)<br>+{<br>+ struct zxdh_gdma_queue *queue = NULL;<br>+ struct zxdh_gdma_enqdeq *e_context = NULL;<br>+ uint16_t queue_id = 0;<br>+ uint32_t val = 0;<br>+ uint16_t tc_cnt = 0;<br>+ uint16_t diff_cnt = 0;<br>+ uint16_t i = 0;<br>+ uint16_t bd_idx = 0;<br>+ uint64_t next_bd_addr = 0;<br>+ uint8_t data_bd_err = 0;<br>+<br>+ if ((dev == NULL) || (context == NULL))<br>+ return -EINVAL;<br>+<br>+ e_context = (struct zxdh_gdma_enqdeq *)context;<br>+ queue_id = e_context->vq_id;<br>+ queue = zxdh_gdma_get_queue(dev, queue_id);<br>+ if ((queue == NULL) || (queue->enable == 0))<br>+ return -EINVAL;<br>+<br>+ if (queue->sw_ring.pend_cnt == 0)<br>+ goto deq_job;<br>+<br>+ /* Get data transmit count */<br>+ val = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET);<br>+ tc_cnt = val & LOW16_MASK;<br>+ if (tc_cnt >= queue->tc_cnt)<br>+ diff_cnt = tc_cnt - queue->tc_cnt;<br>+ else<br>+ diff_cnt = tc_cnt + ZXDH_GDMA_TC_CNT_MAX - queue->tc_cnt;<br>+<br>+ queue->tc_cnt = tc_cnt;<br>+<br>+ /* Data transmit error, channel stopped */<br>+ if ((val & ZXDH_GDMA_ERR_STATUS) != 0) {<br>+ next_bd_addr = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET);<br>+ next_bd_addr |= ((uint64_t)zxdh_gdma_read_reg(dev, queue_id,<br>+ ZXDH_GDMA_LLI_H_OFFSET) << 32);<br>+ next_bd_addr = next_bd_addr << 6;<br>+ bd_idx = (next_bd_addr - queue->ring.ring_mem) / sizeof(struct zxdh_gdma_buff_desc);<br>+ if ((val & ZXDH_GDMA_SRC_DATA_ERR) || (val & ZXDH_GDMA_DST_ADDR_ERR)) {<br>+ diff_cnt++;<br>+ data_bd_err = 1;<br>+ }<br>+ ZXDH_PMD_LOG(INFO, "queue%d is err(0x%x) next_bd_idx:%u ll_addr:0x%"PRIx64" def user:0x%x",<br>+ queue_id, val, bd_idx, next_bd_addr, queue->user);<br>+<br>+ ZXDH_PMD_LOG(INFO, "Clean up error status");<br>+ val = ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_ERR_INTR_ENABLE;<br>+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET, val);<br>+<br>+ ZXDH_PMD_LOG(INFO, "Restart channel");<br>+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);<br>+ zxdh_gdma_control_cal(&val, 0);<br>+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);<br>+ }<br>+<br>+ if (diff_cnt != 0) {<br>+ zxdh_gdma_used_idx_update(queue, diff_cnt, data_bd_err);<br>+ queue->sw_ring.deq_cnt += diff_cnt;<br>+ queue->sw_ring.pend_cnt -= diff_cnt;<br>+ }<br>+<br>+deq_job:<br>+ if (queue->sw_ring.deq_cnt == 0)<br>+ return 0;<br>+ else if (queue->sw_ring.deq_cnt < count)<br>+ count = queue->sw_ring.deq_cnt;<br>+<br>+ queue->sw_ring.deq_cnt -= count;<br>+<br>+ for (i = 0; i < count; i++) {<br>+ e_context->job[i] = queue->sw_ring.job[queue->sw_ring.deq_idx];<br>+ queue->sw_ring.job[queue->sw_ring.deq_idx] = NULL;<br>+ if (++queue->sw_ring.deq_idx >= queue->queue_size)<br>+ queue->sw_ring.deq_idx -= queue->queue_size;<br>+ }<br>+ queue->sw_ring.free_cnt += count;<br>+<br>+ return count;<br>+}<br>+<br> static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {<br> .dev_info_get = zxdh_gdma_rawdev_info_get,<br> .dev_configure = zxdh_gdma_rawdev_configure,<br>@@ -540,6 +652,7 @@ static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {<br> .attr_get = zxdh_gdma_rawdev_get_attr,<br>
<br> .enqueue_bufs = zxdh_gdma_rawdev_enqueue_bufs,<br>+ .dequeue_bufs = zxdh_gdma_rawdev_dequeue_bufs,<br> };<br>
<br> static int<br>--
<br>2.43.0<br></blockquote></div>