[dpdk-dev] [PATCH v2 20/44] ip_pipeline: add link object
Jasvinder Singh
jasvinder.singh at intel.com
Mon Mar 12 18:25:51 CET 2018
Add link object implementation to the application.
Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu at intel.com>
Signed-off-by: Jasvinder Singh <jasvinder.singh at intel.com>
Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
---
examples/ip_pipeline/Makefile | 1 +
examples/ip_pipeline/cli.c | 122 ++++++++++++++++++
examples/ip_pipeline/link.c | 268 +++++++++++++++++++++++++++++++++++++++
examples/ip_pipeline/link.h | 63 +++++++++
examples/ip_pipeline/main.c | 8 ++
examples/ip_pipeline/meson.build | 1 +
6 files changed, 463 insertions(+)
create mode 100644 examples/ip_pipeline/link.c
create mode 100644 examples/ip_pipeline/link.h
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index fca28c5..3dab932 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -7,6 +7,7 @@ APP = ip_pipeline
# all source are stored in SRCS-y
SRCS-y := cli.c
SRCS-y += conn.c
+SRCS-y += link.c
SRCS-y += main.c
SRCS-y += mempool.c
SRCS-y += parser.c
diff --git a/examples/ip_pipeline/cli.c b/examples/ip_pipeline/cli.c
index 6fe3725..221b716 100644
--- a/examples/ip_pipeline/cli.c
+++ b/examples/ip_pipeline/cli.c
@@ -10,6 +10,7 @@
#include <rte_common.h>
#include "cli.h"
+#include "link.h"
#include "mempool.h"
#include "parser.h"
@@ -110,6 +111,122 @@ cmd_mempool(char **tokens,
}
}
+/**
+ * link <link_name>
+ * dev <device_name> | port <port_id>
+ * rxq <n_queues> <queue_size> <mempool_name>
+ * txq <n_queues> <queue_size>
+ * promiscuous on | off
+ * [rss <qid_0> ... <qid_n>]
+*/
+static void
+cmd_link(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct link_params p;
+ struct link_params_rss rss;
+ struct link *link;
+ char *name;
+
+ if ((n_tokens < 13) || (n_tokens > 14 + LINK_RXQ_RSS_MAX)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "dev") == 0)
+ p.dev_name = tokens[3];
+ else if (strcmp(tokens[2], "port") == 0) {
+ p.dev_name = NULL;
+
+ if (parser_read_uint16(&p.port_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dev or port");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rxq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq");
+ return;
+ }
+
+ if (parser_read_uint32(&p.rx.n_queues, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_queues");
+ return;
+ }
+ if (parser_read_uint32(&p.rx.queue_size, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_size");
+ return;
+}
+
+ p.rx.mempool_name = tokens[7];
+
+ if (strcmp(tokens[8], "txq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tx.n_queues, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_queues");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tx.queue_size, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_size");
+ return;
+ }
+
+ if (strcmp(tokens[11], "promiscuous") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "promiscuous");
+ return;
+ }
+
+ if (strcmp(tokens[12], "on") == 0)
+ p.promiscuous = 1;
+ else if (strcmp(tokens[12], "off") == 0)
+ p.promiscuous = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "on or off");
+ return;
+ }
+
+ /* RSS */
+ p.rx.rss = NULL;
+ if (n_tokens > 13) {
+ uint32_t queue_id, i;
+
+ if (strcmp(tokens[13], "rss") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rss");
+ return;
+ }
+
+ p.rx.rss = &rss;
+
+ rss.n_queues = 0;
+ for (i = 14; i < n_tokens; i++) {
+ if (parser_read_uint32(&queue_id, tokens[i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_id");
+ return;
+ }
+
+ rss.queue_id[rss.n_queues] = queue_id;
+ rss.n_queues++;
+ }
+ }
+
+ link = link_create(name, &p);
+ if (link == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
void
cli_process(char *in, char *out, size_t out_size)
{
@@ -134,6 +251,11 @@ cli_process(char *in, char *out, size_t out_size)
return;
}
+ if (strcmp(tokens[0], "link") == 0) {
+ cmd_link(tokens, n_tokens, out, out_size);
+ return;
+ }
+
snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]);
}
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
new file mode 100644
index 0000000..26ff41b
--- /dev/null
+++ b/examples/ip_pipeline/link.c
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+
+#include "link.h"
+#include "mempool.h"
+
+static struct link_list link_list;
+
+int
+link_init(void)
+{
+ TAILQ_INIT(&link_list);
+
+ return 0;
+}
+
+struct link *
+link_find(const char *name)
+{
+ struct link *link;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(link, &link_list, node)
+ if (strcmp(link->name, name) == 0)
+ return link;
+
+ return NULL;
+}
+
+static struct rte_eth_conf port_conf_default = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+
+ .header_split = 0, /* Header split */
+ .hw_ip_checksum = 0, /* IP checksum offload */
+ .hw_vlan_filter = 0, /* VLAN filtering */
+ .hw_vlan_strip = 0, /* VLAN strip */
+ .hw_vlan_extend = 0, /* Extended VLAN */
+ .jumbo_frame = 0, /* Jumbo frame support */
+ .hw_strip_crc = 1, /* CRC strip by HW */
+ .enable_scatter = 0, /* Scattered packets RX handler */
+
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+ .lpbk_mode = 0,
+};
+
+#define RETA_CONF_SIZE (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+
+static int
+rss_setup(uint16_t port_id,
+ uint16_t reta_size,
+ struct link_params_rss *rss)
+{
+ struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE];
+ uint32_t i;
+ int status;
+
+ /* RETA setting */
+ memset(reta_conf, 0, sizeof(reta_conf));
+
+ for (i = 0; i < reta_size; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < reta_size; i++) {
+ uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
+ uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+ uint32_t rss_qs_pos = i % rss->n_queues;
+
+ reta_conf[reta_id].reta[reta_pos] =
+ (uint16_t) rss->queue_id[rss_qs_pos];
+ }
+
+ /* RETA update */
+ status = rte_eth_dev_rss_reta_update(port_id,
+ reta_conf,
+ reta_size);
+
+ return status;
+}
+
+struct link *
+link_create(const char *name, struct link_params *params)
+{
+ struct rte_eth_dev_info port_info;
+ struct rte_eth_conf port_conf;
+ struct link *link;
+ struct link_params_rss *rss;
+ struct mempool *mempool;
+ uint32_t cpu_id, i;
+ int status;
+ uint16_t port_id;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ link_find(name) ||
+ (params == NULL) ||
+ (params->rx.n_queues == 0) ||
+ (params->rx.queue_size == 0) ||
+ (params->tx.n_queues == 0) ||
+ (params->tx.queue_size == 0))
+ return NULL;
+
+ port_id = params->port_id;
+ if (params->dev_name) {
+ status = rte_eth_dev_get_port_by_name(params->dev_name,
+ &port_id);
+
+ if (status)
+ return NULL;
+ } else
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return NULL;
+
+ rte_eth_dev_info_get(port_id, &port_info);
+
+ mempool = mempool_find(params->rx.mempool_name);
+ if (mempool == NULL)
+ return NULL;
+
+ rss = params->rx.rss;
+ if (rss) {
+ if ((port_info.reta_size == 0) ||
+ (port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+ return NULL;
+
+ if ((rss->n_queues == 0) ||
+ (rss->n_queues >= LINK_RXQ_RSS_MAX))
+ return NULL;
+
+ for (i = 0; i < rss->n_queues; i++)
+ if (rss->queue_id[i] >= port_info.max_rx_queues)
+ return NULL;
+ }
+
+ /**
+ * Resource create
+ */
+ /* Port */
+ memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
+ if (rss) {
+ port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ port_conf.rx_adv_conf.rss_conf.rss_hf =
+ ETH_RSS_IPV4 | ETH_RSS_IPV6;
+ }
+
+ cpu_id = (uint32_t) rte_eth_dev_socket_id(port_id);
+ if (cpu_id == (uint32_t) SOCKET_ID_ANY)
+ cpu_id = 0;
+
+ status = rte_eth_dev_configure(
+ port_id,
+ params->rx.n_queues,
+ params->tx.n_queues,
+ &port_conf);
+
+ if (status < 0)
+ return NULL;
+
+ if (params->promiscuous)
+ rte_eth_promiscuous_enable(port_id);
+
+ /* Port RX */
+ for (i = 0; i < params->rx.n_queues; i++) {
+ status = rte_eth_rx_queue_setup(
+ port_id,
+ i,
+ params->rx.queue_size,
+ cpu_id,
+ NULL,
+ mempool->m);
+
+ if (status < 0)
+ return NULL;
+ }
+
+ /* Port TX */
+ for (i = 0; i < params->tx.n_queues; i++) {
+ status = rte_eth_tx_queue_setup(
+ port_id,
+ i,
+ params->tx.queue_size,
+ cpu_id,
+ NULL);
+
+ if (status < 0)
+ return NULL;
+ }
+
+ /* Port start */
+ status = rte_eth_dev_start(port_id);
+ if (status < 0)
+ return NULL;
+
+ if (rss) {
+ status = rss_setup(port_id, port_info.reta_size, rss);
+
+ if (status) {
+ rte_eth_dev_stop(port_id);
+ return NULL;
+ }
+ }
+
+ /* Port link up */
+ status = rte_eth_dev_set_link_up(port_id);
+ if ((status < 0) && (status != -ENOTSUP)) {
+ rte_eth_dev_stop(port_id);
+ return NULL;
+ }
+
+ /* Node allocation */
+ link = calloc(1, sizeof(struct link));
+ if (link == NULL) {
+ rte_eth_dev_stop(port_id);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strncpy(link->name, name, sizeof(link->name));
+ link->port_id = port_id;
+ link->n_rxq = params->rx.n_queues;
+ link->n_txq = params->tx.n_queues;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&link_list, link, node);
+
+ return link;
+}
+
+int
+link_is_up(const char *name)
+{
+ struct rte_eth_link link_params;
+ struct link *link;
+
+ /* Check input params */
+ if (name == NULL)
+ return 0;
+
+ link = link_find(name);
+ if (link == NULL)
+ return 0;
+
+ /* Resource */
+ rte_eth_link_get(link->port_id, &link_params);
+
+ return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+}
diff --git a/examples/ip_pipeline/link.h b/examples/ip_pipeline/link.h
new file mode 100644
index 0000000..37d3dc4
--- /dev/null
+++ b/examples/ip_pipeline/link.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_LINK_H_
+#define _INCLUDE_LINK_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include "common.h"
+
+#ifndef LINK_RXQ_RSS_MAX
+#define LINK_RXQ_RSS_MAX 16
+#endif
+
+struct link {
+ TAILQ_ENTRY(link) node;
+ char name[NAME_SIZE];
+ uint16_t port_id;
+ uint32_t n_rxq;
+ uint32_t n_txq;
+};
+
+TAILQ_HEAD(link_list, link);
+
+int
+link_init(void);
+
+struct link *
+link_find(const char *name);
+
+struct link_params_rss {
+ uint32_t queue_id[LINK_RXQ_RSS_MAX];
+ uint32_t n_queues;
+};
+
+struct link_params {
+ const char *dev_name;
+ uint16_t port_id; /**< Valid only when *dev_name* is NULL. */
+
+ struct {
+ uint32_t n_queues;
+ uint32_t queue_size;
+ const char *mempool_name;
+ struct link_params_rss *rss;
+ } rx;
+
+ struct {
+ uint32_t n_queues;
+ uint32_t queue_size;
+ } tx;
+
+ int promiscuous;
+};
+
+struct link *
+link_create(const char *name, struct link_params *params);
+
+int
+link_is_up(const char *name);
+
+#endif /* _INCLUDE_LINK_H_ */
diff --git a/examples/ip_pipeline/main.c b/examples/ip_pipeline/main.c
index b53f623..edfb523 100644
--- a/examples/ip_pipeline/main.c
+++ b/examples/ip_pipeline/main.c
@@ -12,6 +12,7 @@
#include "cli.h"
#include "conn.h"
+#include "link.h"
#include "mempool.h"
static const char usage[] =
@@ -167,6 +168,13 @@ main(int argc, char **argv)
return status;
}
+ /* Link */
+ status = link_init();
+ if (status) {
+ printf("Error: Link initialization failed (%d)\n", status);
+ return status;
+ }
+
/* Script */
if (app.script_name)
cli_script_process(app.script_name,
diff --git a/examples/ip_pipeline/meson.build b/examples/ip_pipeline/meson.build
index f8a450f..a2f9bb6 100644
--- a/examples/ip_pipeline/meson.build
+++ b/examples/ip_pipeline/meson.build
@@ -10,6 +10,7 @@ deps += ['pipeline', 'bus_pci']
sources = files(
'cli.c',
'conn.c',
+ 'link.c',
'main.c',
'mempool.c',
'parser.c',
--
2.9.3
More information about the dev
mailing list