[EXT] [PATCH v5] app/eventdev: add crypto producer mode
Akhil Goyal
gakhil at marvell.com
Thu Feb 17 07:56:00 CET 2022
Hi Shijith,
Few comments inline.
With those fixed.
Acked-by: Akhil Goyal <gakhil at marvell.com>
> +static int
> +perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
> +{
> + struct evt_options *opt = t->opt;
> + uint32_t cap;
> + int ret;
> +
> + ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id,
> &cap);
> + if (ret) {
> + evt_err("Failed to get crypto adapter capabilities");
> + return ret;
> + }
> +
> + if (((opt->crypto_adptr_mode ==
> RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
> + !(cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
> + ((opt->crypto_adptr_mode ==
> RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
> + !(cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
> + evt_err("crypto adapter %s mode unsupported\n",
> + opt->crypto_adptr_mode ? "OP_FORWARD" :
> "OP_NEW");
> + return -EINVAL;
> + } else if (!(cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
> + evt_err("Storing crypto session not supported");
> + return -EINVAL;
> + }
The above errors should be -ENOTSUP. If the PMD is not capable, test should be skipped.
> +
> + if (cap &
> RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
> + struct rte_event response_info;
> +
> + response_info.event = 0;
> + response_info.op =
> + opt->crypto_adptr_mode ==
> +
> RTE_EVENT_CRYPTO_ADAPTER_OP_NEW ?
> + RTE_EVENT_OP_NEW :
> + RTE_EVENT_OP_FORWARD;
> + response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
> + response_info.event_type = RTE_EVENT_TYPE_CRYPTODEV;
> + response_info.queue_id = p->queue_id;
> + ret = rte_event_crypto_adapter_queue_pair_add(
> + TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> + &response_info);
> + } else {
> + ret = rte_event_crypto_adapter_queue_pair_add(
> + TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
> NULL);
> + }
> +
> + return ret;
> +}
> +
> +static struct rte_cryptodev_sym_session *
> +cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
> +{
> + struct rte_crypto_sym_xform cipher_xform;
> + struct rte_cryptodev_sym_session *sess;
> +
> + cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
> + cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
> + cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
> + cipher_xform.next = NULL;
> +
> + sess = rte_cryptodev_sym_session_create(t->ca_sess_pool);
> + if (sess == NULL) {
> + evt_err("Failed to create sym session");
> + return NULL;
> + }
> +
> + if (rte_cryptodev_sym_session_init(p->ca.cdev_id, sess, &cipher_xform,
> + t->ca_sess_priv_pool)) {
> + evt_err("Failed to init session");
> + return NULL;
> + }
> +
> + return sess;
> +}
> +
> int
> perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
> uint8_t stride, uint8_t nb_queues,
> @@ -598,6 +796,80 @@ perf_event_dev_port_setup(struct evt_test *test,
> struct evt_options *opt,
> ret = perf_event_timer_adapter_setup(t);
> if (ret)
> return ret;
> + } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
> + struct rte_event_port_conf conf = *port_conf;
> + uint8_t cdev_id = 0;
> + uint16_t qp_id = 0;
> +
> + ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
> + opt->dev_id, &conf, 0);
> + if (ret) {
> + evt_err("Failed to create crypto adapter");
> + return ret;
> + }
> +
> + prod = 0;
> + for (; port < perf_nb_event_ports(opt); port++) {
> + struct rte_cryptodev_sym_session *crypto_sess;
> + union rte_event_crypto_metadata m_data;
> + struct prod_data *p = &t->prod[port];
> + uint32_t flow_id;
> +
> + if (qp_id == rte_cryptodev_queue_pair_count(cdev_id))
> {
> + cdev_id++;
> + qp_id = 0;
> + }
> +
> + p->dev_id = opt->dev_id;
> + p->port_id = port;
> + p->queue_id = prod * stride;
> + p->ca.cdev_id = cdev_id;
> + p->ca.cdev_qp_id = qp_id;
> + p->ca.crypto_sess = rte_zmalloc_socket(
> + NULL, sizeof(crypto_sess) * t->nb_flows,
> + RTE_CACHE_LINE_SIZE, opt->socket_id);
> + p->t = t;
> +
> + m_data.request_info.cdev_id = p->ca.cdev_id;
> + m_data.request_info.queue_pair_id = p-
> >ca.cdev_qp_id;
> + m_data.response_info.op =
> + opt->crypto_adptr_mode ==
> +
> RTE_EVENT_CRYPTO_ADAPTER_OP_NEW ?
> + RTE_EVENT_OP_NEW :
> + RTE_EVENT_OP_FORWARD;
> + m_data.response_info.sched_type =
> RTE_SCHED_TYPE_ATOMIC;
> + m_data.response_info.event_type =
> + RTE_EVENT_TYPE_CRYPTODEV;
> + m_data.response_info.queue_id = p->queue_id;
> +
> + for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
> + crypto_sess = cryptodev_sym_sess_create(p, t);
> + if (crypto_sess == NULL)
> + return -ENOMEM;
> +
> + m_data.response_info.flow_id = flow_id;
> + rte_cryptodev_sym_session_set_user_data(
> + crypto_sess, &m_data,
> sizeof(m_data));
> + p->ca.crypto_sess[flow_id] = crypto_sess;
> + }
> +
> + conf.event_port_cfg |=
> + RTE_EVENT_PORT_CFG_HINT_PRODUCER |
> + RTE_EVENT_PORT_CFG_HINT_CONSUMER;
> +
> + ret = rte_event_port_setup(opt->dev_id, port, &conf);
> + if (ret) {
> + evt_err("failed to setup port %d", port);
> + return ret;
> + }
> +
> + ret = perf_event_crypto_adapter_setup(t, p);
> + if (ret)
> + return ret;
> +
> + qp_id++;
> + prod++;
Memory leak for p->ca.crypto_sess in failure cases.
Also check if it is getting freed when sessions are destroyed.
> + }
> } else {
> prod = 0;
> for ( ; port < perf_nb_event_ports(opt); port++) {
> @@ -659,7 +931,8 @@ perf_opt_check(struct evt_options *opt, uint64_t
> nb_queues)
> }
>
> if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
> - opt->prod_type ==
> EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
> + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
> + opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
> /* Validate producer lcores */
> if (evt_lcores_has_overlap(opt->plcores,
> rte_get_main_lcore())) {
> @@ -767,8 +1040,7 @@ perf_ethdev_setup(struct evt_test *test, struct
> evt_options *opt)
> },
> };
>
> - if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
> - opt->prod_type ==
> EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
> + if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
> return 0;
>
> if (!rte_eth_dev_count_avail()) {
> @@ -841,6 +1113,147 @@ void perf_ethdev_destroy(struct evt_test *test,
> struct evt_options *opt)
> }
> }
>
> +int
> +perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
> +{
> + uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
> + struct test_perf *t = evt_test_priv(test);
> + unsigned int max_session_size;
> + uint32_t nb_sessions;
> + int ret;
> +
> + if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
> + return 0;
> +
> + cdev_count = rte_cryptodev_count();
> + if (cdev_count == 0) {
> + evt_err("No crypto devices available\n");
> + return -ENODEV;
> + }
> +
> + t->ca_op_pool = rte_crypto_op_pool_create(
> + "crypto_op_pool", RTE_CRYPTO_OP_TYPE_SYMMETRIC, opt-
> >pool_sz,
> + 128, 0, rte_socket_id());
> + if (t->ca_op_pool == NULL) {
> + evt_err("Failed to create crypto op pool");
> + return -ENOMEM;
> + }
> +
> + nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
> + t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
> + "ca_sess_pool", nb_sessions, 0, 0,
> + sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
> + if (t->ca_sess_pool == NULL) {
> + evt_err("Failed to create sym session pool");
> + ret = -ENOMEM;
> + goto err;
> + }
> +
> + max_session_size = 0;
> + for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
> + unsigned int session_size;
> +
> + session_size =
> + rte_cryptodev_sym_get_private_session_size(cdev_id);
> + if (session_size > max_session_size)
> + max_session_size = session_size;
> + }
> +
> + max_session_size += sizeof(union rte_event_crypto_metadata);
> + t->ca_sess_priv_pool = rte_mempool_create(
> + "ca_sess_priv_pool", nb_sessions, max_session_size, 0, 0,
> NULL,
> + NULL, NULL, NULL, SOCKET_ID_ANY, 0);
> + if (t->ca_sess_priv_pool == NULL) {
> + evt_err("failed to create sym session private pool");
> + ret = -ENOMEM;
> + goto err;
> + }
> +
> + /*
> + * Calculate number of needed queue pairs, based on the amount of
> + * available number of logical cores and crypto devices. For instance,
> + * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
> + * up per device.
> + */
> + nb_plcores = evt_nr_active_lcores(opt->plcores);
> + nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
> + nb_plcores / cdev_count;
> + for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
> + struct rte_cryptodev_qp_conf qp_conf;
> + struct rte_cryptodev_config conf;
> + struct rte_cryptodev_info info;
> + int qp_id;
> +
> + rte_cryptodev_info_get(cdev_id, &info);
> + if (nb_qps > info.max_nb_queue_pairs) {
> + evt_err("Not enough queue pairs per cryptodev (%u)",
> + nb_qps);
> + ret = -EINVAL;
> + goto err;
> + }
> +
> + conf.nb_queue_pairs = nb_qps;
> + conf.socket_id = SOCKET_ID_ANY;
> + conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
> +
> + ret = rte_cryptodev_configure(cdev_id, &conf);
> + if (ret) {
> + evt_err("Failed to configure cryptodev (%u)", cdev_id);
> + goto err;
> + }
> +
> + qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
> + qp_conf.mp_session = t->ca_sess_pool;
> + qp_conf.mp_session_private = t->ca_sess_priv_pool;
> +
> + for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
> + ret = rte_cryptodev_queue_pair_setup(
> + cdev_id, qp_id, &qp_conf,
> + rte_cryptodev_socket_id(cdev_id));
> + if (ret) {
> + evt_err("Failed to setup queue pairs on
> cryptodev %u\n",
> + cdev_id);
> + goto err;
Cryptodev need to be cleared also on this error.
> + }
> + }
> + }
> +
> + return 0;
> +err:
> + rte_mempool_free(t->ca_op_pool);
> + rte_mempool_free(t->ca_sess_pool);
> + rte_mempool_free(t->ca_sess_priv_pool);
> +
> + return ret;
> +}
> +
More information about the dev
mailing list