[dpdk-dev] [PATCH v3 1/3] kasumi: add new KASUMI PMD

Chen, Zhaoyan zhaoyan.chen at intel.com
Thu Jun 23 09:37:33 CEST 2016


Tested-by: Chen, Zhaoyan<Zhaoyan.chen at intel.com>

* Commit: 3901ed99c2f82d3e979bb1bea001d61898241829
* Patch Apply: Success
* Compilation: Success
* Kernel/OS: 3.11.10-301.fc20.x86_64
* GCC: 4.8.3 20140911

* Case 1
./app/test -cf -n4
cryptodev_sw_kasumi_autotest

KASUMI Unittest execution is successful.

Thanks,
Joey

> -----Original Message-----
> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Pablo de Lara
> Sent: Monday, June 20, 2016 10:40 PM
> To: dev at dpdk.org
> Cc: Doherty, Declan <declan.doherty at intel.com>; Jain, Deepak K
> <deepak.k.jain at intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch at intel.com>
> Subject: [dpdk-dev] [PATCH v3 1/3] kasumi: add new KASUMI PMD
> 
> Added new SW PMD which makes use of the libsso_kasumi SW library,
> which provides wireless algorithms KASUMI F8 and F9
> in software.
> 
> This PMD supports cipher-only, hash-only and chained operations
> ("cipher then hash" and "hash then cipher") of the following
> algorithms:
> - RTE_CRYPTO_SYM_CIPHER_KASUMI_F8
> - RTE_CRYPTO_SYM_AUTH_KASUMI_F9
> 
> Signed-off-by: Pablo de Lara <pablo.de.lara.guarch at intel.com>
> Acked-by: Jain, Deepak K <deepak.k.jain at intel.com>
> ---
>  MAINTAINERS                                      |   5 +
>  config/common_base                               |   6 +
>  config/defconfig_i686-native-linuxapp-gcc        |   5 +
>  config/defconfig_i686-native-linuxapp-icc        |   5 +
>  doc/guides/cryptodevs/index.rst                  |   3 +-
>  doc/guides/cryptodevs/kasumi.rst                 | 101 ++++
>  doc/guides/cryptodevs/overview.rst               |  79 +--
>  doc/guides/rel_notes/release_16_07.rst           |   5 +
>  drivers/crypto/Makefile                          |   1 +
>  drivers/crypto/kasumi/Makefile                   |  64 +++
>  drivers/crypto/kasumi/rte_kasumi_pmd.c           | 658
> +++++++++++++++++++++++
>  drivers/crypto/kasumi/rte_kasumi_pmd_ops.c       | 344 ++++++++++++
>  drivers/crypto/kasumi/rte_kasumi_pmd_private.h   | 106 ++++
>  drivers/crypto/kasumi/rte_pmd_kasumi_version.map |   3 +
>  examples/l2fwd-crypto/main.c                     |  10 +-
>  lib/librte_cryptodev/rte_crypto_sym.h            |   6 +-
>  lib/librte_cryptodev/rte_cryptodev.h             |   3 +
>  mk/rte.app.mk                                    |   2 +
>  scripts/test-build.sh                            |   4 +
>  19 files changed, 1366 insertions(+), 44 deletions(-)
>  create mode 100644 doc/guides/cryptodevs/kasumi.rst
>  create mode 100644 drivers/crypto/kasumi/Makefile
>  create mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd.c
>  create mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
>  create mode 100644 drivers/crypto/kasumi/rte_kasumi_pmd_private.h
>  create mode 100644 drivers/crypto/kasumi/rte_pmd_kasumi_version.map
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 3e6b70c..2e0270f 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -396,6 +396,11 @@ M: Pablo de Lara <pablo.de.lara.guarch at intel.com>
>  F: drivers/crypto/snow3g/
>  F: doc/guides/cryptodevs/snow3g.rst
> 
> +KASUMI PMD
> +M: Pablo de Lara <pablo.de.lara.guarch at intel.com>
> +F: drivers/crypto/kasumi/
> +F: doc/guides/cryptodevs/kasumi.rst
> +
>  Null Crypto PMD
>  M: Declan Doherty <declan.doherty at intel.com>
>  F: drivers/crypto/null/
> diff --git a/config/common_base b/config/common_base
> index b9ba405..fcf91c6 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -370,6 +370,12 @@ CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
>  CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n
> 
>  #
> +# Compile PMD for KASUMI device
> +#
> +CONFIG_RTE_LIBRTE_PMD_KASUMI=n
> +CONFIG_RTE_LIBRTE_PMD_KASUMI_DEBUG=n
> +
> +#
>  # Compile PMD for NULL Crypto device
>  #
>  CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
> diff --git a/config/defconfig_i686-native-linuxapp-gcc
> b/config/defconfig_i686-native-linuxapp-gcc
> index c32859f..ba07259 100644
> --- a/config/defconfig_i686-native-linuxapp-gcc
> +++ b/config/defconfig_i686-native-linuxapp-gcc
> @@ -60,3 +60,8 @@ CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
>  # AES-NI GCM PMD is not supported on 32-bit
>  #
>  CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
> +
> +#
> +# KASUMI PMD is not supported on 32-bit
> +#
> +CONFIG_RTE_LIBRTE_PMD_KASUMI=n
> diff --git a/config/defconfig_i686-native-linuxapp-icc
> b/config/defconfig_i686-native-linuxapp-icc
> index cde9d96..850e536 100644
> --- a/config/defconfig_i686-native-linuxapp-icc
> +++ b/config/defconfig_i686-native-linuxapp-icc
> @@ -60,3 +60,8 @@ CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
>  # AES-NI GCM PMD is not supported on 32-bit
>  #
>  CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
> +
> +#
> +# KASUMI PMD is not supported on 32-bit
> +#
> +CONFIG_RTE_LIBRTE_PMD_KASUMI=n
> diff --git a/doc/guides/cryptodevs/index.rst
> b/doc/guides/cryptodevs/index.rst
> index a3f11f3..9616de1 100644
> --- a/doc/guides/cryptodevs/index.rst
> +++ b/doc/guides/cryptodevs/index.rst
> @@ -38,6 +38,7 @@ Crypto Device Drivers
>      overview
>      aesni_mb
>      aesni_gcm
> +    kasumi
>      null
>      snow3g
> -    qat
> \ No newline at end of file
> +    qat
> diff --git a/doc/guides/cryptodevs/kasumi.rst
> b/doc/guides/cryptodevs/kasumi.rst
> new file mode 100644
> index 0000000..d6b3a97
> --- /dev/null
> +++ b/doc/guides/cryptodevs/kasumi.rst
> @@ -0,0 +1,101 @@
> +..  BSD LICENSE
> +        Copyright(c) 2016 Intel Corporation. All rights reserved.
> +
> +    Redistribution and use in source and binary forms, with or without
> +    modification, are permitted provided that the following conditions
> +    are met:
> +
> +    * Redistributions of source code must retain the above copyright
> +    notice, this list of conditions and the following disclaimer.
> +    * Redistributions in binary form must reproduce the above copyright
> +    notice, this list of conditions and the following disclaimer in
> +    the documentation and/or other materials provided with the
> +    distribution.
> +    * Neither the name of Intel Corporation nor the names of its
> +    contributors may be used to endorse or promote products derived
> +    from this software without specific prior written permission.
> +
> +    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> +    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> +    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> +    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> +    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> +    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> +    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> +    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> +    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> +    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> +    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> +
> +KASUMI Crypto Poll Mode Driver
> +===============================
> +
> +The KASUMI PMD (**librte_pmd_kasumi**) provides poll mode crypto
> driver
> +support for utilizing Intel Libsso library, which implements F8 and F9
> functions
> +for KASUMI UEA1 cipher and UIA1 hash algorithms.
> +
> +Features
> +--------
> +
> +KASUMI PMD has support for:
> +
> +Cipher algorithm:
> +
> +* RTE_CRYPTO_SYM_CIPHER_KASUMI_F8
> +
> +Authentication algorithm:
> +
> +* RTE_CRYPTO_SYM_AUTH_KASUMI_F9
> +
> +Limitations
> +-----------
> +
> +* Chained mbufs are not supported.
> +* KASUMI(F9) supported only if hash offset field is byte-aligned.
> +
> +Installation
> +------------
> +
> +To build DPDK with the KASUMI_PMD the user is required to download
> +the export controlled ``libsso_kasumi`` library, by requesting it from
> +`<https://networkbuilders.intel.com/network-technologies/dpdk>`_.
> +Once approval has been granted, the user needs to log in
> +`<https://networkbuilders.intel.com/dpdklogin>`_
> +and click on "Kasumi Bit Stream crypto library" link, to download the library.
> +After downloading the library, the user needs to unpack and compile it
> +on their system before building DPDK::
> +
> +   make kasumi
> +
> +Initialization
> +--------------
> +
> +In order to enable this virtual crypto PMD, user must:
> +
> +* Export the environmental variable LIBSSO_KASUMI_PATH with the path
> where
> +  the library was extracted (kasumi folder).
> +
> +* Build the LIBSSO library (explained in Installation section).
> +
> +* Set CONFIG_RTE_LIBRTE_PMD_KASUMI=y in config/common_base.
> +
> +To use the PMD in an application, user must:
> +
> +* Call rte_eal_vdev_init("cryptodev_kasumi_pmd") within the application.
> +
> +* Use --vdev="cryptodev_kasumi_pmd" in the EAL options, which will call
> rte_eal_vdev_init() internally.
> +
> +The following parameters (all optional) can be provided in the previous two
> calls:
> +
> +* socket_id: Specify the socket where the memory for the device is going to
> be allocated
> +  (by default, socket_id will be the socket where the core that is creating the
> PMD is running on).
> +
> +* max_nb_queue_pairs: Specify the maximum number of queue pairs in
> the device (8 by default).
> +
> +* max_nb_sessions: Specify the maximum number of sessions that can be
> created (2048 by default).
> +
> +Example:
> +
> +.. code-block:: console
> +
> +    ./l2fwd-crypto -c 40 -n 4 --
> vdev="cryptodev_kasumi_pmd,socket_id=1,max_nb_sessions=128"
> diff --git a/doc/guides/cryptodevs/overview.rst
> b/doc/guides/cryptodevs/overview.rst
> index 5861440..d612f71 100644
> --- a/doc/guides/cryptodevs/overview.rst
> +++ b/doc/guides/cryptodevs/overview.rst
> @@ -33,62 +33,63 @@ Crypto Device Supported Functionality Matrices
>  Supported Feature Flags
> 
>  .. csv-table::
> -   :header: "Feature Flags", "qat", "null", "aesni_mb", "aesni_gcm",
> "snow3g"
> +   :header: "Feature Flags", "qat", "null", "aesni_mb", "aesni_gcm",
> "snow3g", "kasumi"
>     :stub-columns: 1
> 
> -   "RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO",x,x,x,x,x
> -   "RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO",,,,,
> -   "RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING",x,x,x,x,x
> -   "RTE_CRYPTODEV_FF_CPU_SSE",,,x,x,x
> -   "RTE_CRYPTODEV_FF_CPU_AVX",,,x,x,x
> -   "RTE_CRYPTODEV_FF_CPU_AVX2",,,x,x,
> -   "RTE_CRYPTODEV_FF_CPU_AESNI",,,x,x,
> -   "RTE_CRYPTODEV_FF_HW_ACCELERATED",x,,,,
> +   "RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO",x,x,x,x,x,x
> +   "RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO",,,,,,
> +   "RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING",x,x,x,x,x,x
> +   "RTE_CRYPTODEV_FF_CPU_SSE",,,x,x,x,x
> +   "RTE_CRYPTODEV_FF_CPU_AVX",,,x,x,x,x
> +   "RTE_CRYPTODEV_FF_CPU_AVX2",,,x,x,,
> +   "RTE_CRYPTODEV_FF_CPU_AESNI",,,x,x,,
> +   "RTE_CRYPTODEV_FF_HW_ACCELERATED",x,,,,,
> 
>  Supported Cipher Algorithms
> 
>  .. csv-table::
> -   :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm",
> "snow3g"
> +   :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm",
> "snow3g", "kasumi"
>     :stub-columns: 1
> 
> -   "NULL",,x,,,
> -   "AES_CBC_128",x,,x,,
> -   "AES_CBC_192",x,,x,,
> -   "AES_CBC_256",x,,x,,
> -   "AES_CTR_128",x,,x,,
> -   "AES_CTR_192",x,,x,,
> -   "AES_CTR_256",x,,x,,
> -   "SNOW3G_UEA2",x,,,,x
> +   "NULL",,x,,,,
> +   "AES_CBC_128",x,,x,,,
> +   "AES_CBC_192",x,,x,,,
> +   "AES_CBC_256",x,,x,,,
> +   "AES_CTR_128",x,,x,,,
> +   "AES_CTR_192",x,,x,,,
> +   "AES_CTR_256",x,,x,,,
> +   "SNOW3G_UEA2",x,,,,x,
> +   "KASUMI_F8",,,,,,x
> 
>  Supported Authentication Algorithms
> 
>  .. csv-table::
> -   :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm",
> "snow3g"
> +   :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm",
> "snow3g", "kasumi"
>     :stub-columns: 1
> 
> -   "NONE",,x,,,
> -   "MD5",,,,,
> -   "MD5_HMAC",,,x,,
> -   "SHA1",,,,,
> -   "SHA1_HMAC",x,,x,,
> -   "SHA224",,,,,
> -   "SHA224_HMAC",,,x,,
> -   "SHA256",,,,,
> -   "SHA256_HMAC",x,,x,,
> -   "SHA384",,,,,
> -   "SHA384_HMAC",,,x,,
> -   "SHA512",,,,,
> -   "SHA512_HMAC",x,,x,,
> -   "AES_XCBC",x,,x,,
> -   "SNOW3G_UIA2",x,,,,x
> -
> +   "NONE",,x,,,,
> +   "MD5",,,,,,
> +   "MD5_HMAC",,,x,,,
> +   "SHA1",,,,,,
> +   "SHA1_HMAC",x,,x,,,
> +   "SHA224",,,,,,
> +   "SHA224_HMAC",,,x,,,
> +   "SHA256",,,,,,
> +   "SHA256_HMAC",x,,x,,,
> +   "SHA384",,,,,,
> +   "SHA384_HMAC",,,x,,,
> +   "SHA512",,,,,,
> +   "SHA512_HMAC",x,,x,,,
> +   "AES_XCBC",x,,x,,,
> +   "SNOW3G_UIA2",x,,,,x,
> +   "KASUMI_F9",,,,,,x
> 
>  Supported AEAD Algorithms
> 
>  .. csv-table::
> -   :header: "AEAD Algorithms", "qat", "null", "aesni_mb", "aesni_gcm",
> "snow3g"
> +   :header: "AEAD Algorithms", "qat", "null", "aesni_mb", "aesni_gcm",
> "snow3g", "kasumi"
>     :stub-columns: 1
> 
> -   "AES_GCM_128",x,,x,,
> -   "AES_GCM_192",x,,,,
> -   "AES_GCM_256",x,,,,
> +   "AES_GCM_128",x,,x,,,
> +   "AES_GCM_192",x,,,,,
> +   "AES_GCM_256",x,,,,,
> diff --git a/doc/guides/rel_notes/release_16_07.rst
> b/doc/guides/rel_notes/release_16_07.rst
> index 131723c..eac476a 100644
> --- a/doc/guides/rel_notes/release_16_07.rst
> +++ b/doc/guides/rel_notes/release_16_07.rst
> @@ -70,6 +70,11 @@ New Features
>    * Enable RSS per network interface through the configuration file.
>    * Streamline the CLI code.
> 
> +* **Added KASUMI SW PMD.**
> +
> +  A new Crypto PMD has been added, which provides KASUMI F8 (UEA1)
> ciphering
> +  and KASUMI F9 (UIA1) hashing.
> +
> 
>  Resolved Issues
>  ---------------
> diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
> index b420538..dc4ef7f 100644
> --- a/drivers/crypto/Makefile
> +++ b/drivers/crypto/Makefile
> @@ -35,6 +35,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) +=
> aesni_gcm
>  DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
>  DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
>  DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
> +DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
>  DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
> 
>  include $(RTE_SDK)/mk/rte.subdir.mk
> diff --git a/drivers/crypto/kasumi/Makefile
> b/drivers/crypto/kasumi/Makefile
> new file mode 100644
> index 0000000..490ddd8
> --- /dev/null
> +++ b/drivers/crypto/kasumi/Makefile
> @@ -0,0 +1,64 @@
> +#   BSD LICENSE
> +#
> +#   Copyright(c) 2016 Intel Corporation. All rights reserved.
> +#
> +#   Redistribution and use in source and binary forms, with or without
> +#   modification, are permitted provided that the following conditions
> +#   are met:
> +#
> +#     * Redistributions of source code must retain the above copyright
> +#       notice, this list of conditions and the following disclaimer.
> +#     * Redistributions in binary form must reproduce the above copyright
> +#       notice, this list of conditions and the following disclaimer in
> +#       the documentation and/or other materials provided with the
> +#       distribution.
> +#     * Neither the name of Intel Corporation nor the names of its
> +#       contributors may be used to endorse or promote products derived
> +#       from this software without specific prior written permission.
> +#
> +#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> +#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> +#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> +#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> +#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> +#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> +#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> +#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> +#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> TORT
> +#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> +#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> +
> +include $(RTE_SDK)/mk/rte.vars.mk
> +
> +ifeq ($(LIBSSO_KASUMI_PATH),)
> +$(error "Please define LIBSSO_KASUMI_PATH environment variable")
> +endif
> +
> +# library name
> +LIB = librte_pmd_kasumi.a
> +
> +# build flags
> +CFLAGS += -O3
> +CFLAGS += $(WERROR_FLAGS)
> +
> +# library version
> +LIBABIVER := 1
> +
> +# versioning export map
> +EXPORT_MAP := rte_pmd_kasumi_version.map
> +
> +# external library include paths
> +CFLAGS += -I$(LIBSSO_KASUMI_PATH)
> +CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include
> +CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build
> +
> +# library source files
> +SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c
> +SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd_ops.c
> +
> +# library dependencies
> +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_eal
> +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mbuf
> +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_cryptodev
> +
> +include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c
> b/drivers/crypto/kasumi/rte_kasumi_pmd.c
> new file mode 100644
> index 0000000..0bf415d
> --- /dev/null
> +++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
> @@ -0,0 +1,658 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2016 Intel Corporation. All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Intel Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> + */
> +
> +#include <rte_common.h>
> +#include <rte_config.h>
> +#include <rte_hexdump.h>
> +#include <rte_cryptodev.h>
> +#include <rte_cryptodev_pmd.h>
> +#include <rte_dev.h>
> +#include <rte_malloc.h>
> +#include <rte_cpuflags.h>
> +#include <rte_kvargs.h>
> +
> +#include "rte_kasumi_pmd_private.h"
> +
> +#define KASUMI_KEY_LENGTH 16
> +#define KASUMI_IV_LENGTH 8
> +#define KASUMI_DIGEST_LENGTH 4
> +#define KASUMI_MAX_BURST 4
> +#define BYTE_LEN 8
> +
> +/**
> + * Global static parameter used to create a unique name for each KASUMI
> + * crypto device.
> + */
> +static unsigned unique_name_id;
> +
> +static inline int
> +create_unique_device_name(char *name, size_t size)
> +{
> +	int ret;
> +
> +	if (name == NULL)
> +		return -EINVAL;
> +
> +	ret = snprintf(name, size, "%s_%u",
> CRYPTODEV_NAME_KASUMI_PMD,
> +			unique_name_id++);
> +	if (ret < 0)
> +		return ret;
> +	return 0;
> +}
> +
> +/** Get xform chain order. */
> +static enum kasumi_operation
> +kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
> +{
> +	if (xform == NULL)
> +		return KASUMI_OP_NOT_SUPPORTED;
> +
> +	if (xform->next)
> +		if (xform->next->next != NULL)
> +			return KASUMI_OP_NOT_SUPPORTED;
> +
> +	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> +		if (xform->next == NULL)
> +			return KASUMI_OP_ONLY_AUTH;
> +		else if (xform->next->type ==
> RTE_CRYPTO_SYM_XFORM_CIPHER)
> +			return KASUMI_OP_AUTH_CIPHER;
> +		else
> +			return KASUMI_OP_NOT_SUPPORTED;
> +	}
> +
> +	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
> +		if (xform->next == NULL)
> +			return KASUMI_OP_ONLY_CIPHER;
> +		else if (xform->next->type ==
> RTE_CRYPTO_SYM_XFORM_AUTH)
> +			return KASUMI_OP_CIPHER_AUTH;
> +		else
> +			return KASUMI_OP_NOT_SUPPORTED;
> +	}
> +
> +	return KASUMI_OP_NOT_SUPPORTED;
> +}
> +
> +
> +/** Parse crypto xform chain and set private session parameters. */
> +int
> +kasumi_set_session_parameters(struct kasumi_session *sess,
> +		const struct rte_crypto_sym_xform *xform)
> +{
> +	const struct rte_crypto_sym_xform *auth_xform = NULL;
> +	const struct rte_crypto_sym_xform *cipher_xform = NULL;
> +	int mode;
> +
> +	/* Select Crypto operation - hash then cipher / cipher then hash */
> +	mode = kasumi_get_mode(xform);
> +
> +	switch (mode) {
> +	case KASUMI_OP_CIPHER_AUTH:
> +		auth_xform = xform->next;
> +		/* Fall-through */
> +	case KASUMI_OP_ONLY_CIPHER:
> +		cipher_xform = xform;
> +		break;
> +	case KASUMI_OP_AUTH_CIPHER:
> +		cipher_xform = xform->next;
> +		/* Fall-through */
> +	case KASUMI_OP_ONLY_AUTH:
> +		auth_xform = xform;
> +	}
> +
> +	if (mode == KASUMI_OP_NOT_SUPPORTED) {
> +		KASUMI_LOG_ERR("Unsupported operation chain order
> parameter");
> +		return -EINVAL;
> +	}
> +
> +	if (cipher_xform) {
> +		/* Only KASUMI F8 supported */
> +		if (cipher_xform->cipher.algo !=
> RTE_CRYPTO_CIPHER_KASUMI_F8)
> +			return -EINVAL;
> +		/* Initialize key */
> +		sso_kasumi_init_f8_key_sched(xform->cipher.key.data,
> +				&sess->pKeySched_cipher);
> +	}
> +
> +	if (auth_xform) {
> +		/* Only KASUMI F9 supported */
> +		if (auth_xform->auth.algo !=
> RTE_CRYPTO_AUTH_KASUMI_F9)
> +			return -EINVAL;
> +		sess->auth_op = auth_xform->auth.op;
> +		/* Initialize key */
> +		sso_kasumi_init_f9_key_sched(xform->auth.key.data,
> +				&sess->pKeySched_hash);
> +	}
> +
> +
> +	sess->op = mode;
> +
> +	return 0;
> +}
> +
> +/** Get KASUMI session. */
> +static struct kasumi_session *
> +kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
> +{
> +	struct kasumi_session *sess;
> +
> +	if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
> +		if (unlikely(op->sym->session->dev_type !=
> +				RTE_CRYPTODEV_KASUMI_PMD))
> +			return NULL;
> +
> +		sess = (struct kasumi_session *)op->sym->session->_private;
> +	} else  {
> +		struct rte_cryptodev_session *c_sess = NULL;
> +
> +		if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
> +			return NULL;
> +
> +		sess = (struct kasumi_session *)c_sess->_private;
> +
> +		if (unlikely(kasumi_set_session_parameters(sess,
> +				op->sym->xform) != 0))
> +			return NULL;
> +	}
> +
> +	return sess;
> +}
> +
> +/** Encrypt/decrypt mbufs with same cipher key. */
> +static uint8_t
> +process_kasumi_cipher_op(struct rte_crypto_op **ops,
> +		struct kasumi_session *session,
> +		uint8_t num_ops)
> +{
> +	unsigned i;
> +	uint8_t processed_ops = 0;
> +	uint8_t *src[num_ops], *dst[num_ops];
> +	uint64_t IV[num_ops];
> +	uint32_t num_bytes[num_ops];
> +
> +	for (i = 0; i < num_ops; i++) {
> +		/* Sanity checks. */
> +		if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) {
> +			ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +			KASUMI_LOG_ERR("iv");
> +			break;
> +		}
> +
> +		src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
> +				(ops[i]->sym->cipher.data.offset >> 3);
> +		dst[i] = ops[i]->sym->m_dst ?
> +			rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *)
> +
> +				(ops[i]->sym->cipher.data.offset >> 3) :
> +			rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
> +				(ops[i]->sym->cipher.data.offset >> 3);
> +		IV[i] = *((uint64_t *)(ops[i]->sym->cipher.iv.data));
> +		num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
> +
> +		processed_ops++;
> +	}
> +
> +	if (processed_ops != 0)
> +		sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, IV,
> +			src, dst, num_bytes, processed_ops);
> +
> +	return processed_ops;
> +}
> +
> +/** Encrypt/decrypt mbuf (bit level function). */
> +static uint8_t
> +process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
> +		struct kasumi_session *session)
> +{
> +	uint8_t *src, *dst;
> +	uint64_t IV;
> +	uint32_t length_in_bits, offset_in_bits;
> +
> +	/* Sanity checks. */
> +	if (unlikely(op->sym->cipher.iv.length != KASUMI_IV_LENGTH)) {
> +		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +		KASUMI_LOG_ERR("iv");
> +		return 0;
> +	}
> +
> +	offset_in_bits = op->sym->cipher.data.offset;
> +	src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
> +	dst = op->sym->m_dst ?
> +		rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *) :
> +		rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
> +	IV = *((uint64_t *)(op->sym->cipher.iv.data));
> +	length_in_bits = op->sym->cipher.data.length;
> +
> +	sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
> +			src, dst, length_in_bits, offset_in_bits);
> +
> +	return 1;
> +}
> +
> +/** Generate/verify hash from mbufs with same hash key. */
> +static int
> +process_kasumi_hash_op(struct rte_crypto_op **ops,
> +		struct kasumi_session *session,
> +		uint8_t num_ops)
> +{
> +	unsigned i;
> +	uint8_t processed_ops = 0;
> +	uint8_t *src, *dst;
> +	uint32_t length_in_bits;
> +	uint32_t num_bytes;
> +	uint32_t shift_bits;
> +	uint64_t IV;
> +	uint8_t direction;
> +
> +	for (i = 0; i < num_ops; i++) {
> +		if (unlikely(ops[i]->sym->auth.aad.length !=
> KASUMI_IV_LENGTH)) {
> +			ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +			KASUMI_LOG_ERR("aad");
> +			break;
> +		}
> +
> +		if (unlikely(ops[i]->sym->auth.digest.length !=
> KASUMI_DIGEST_LENGTH)) {
> +			ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +			KASUMI_LOG_ERR("digest");
> +			break;
> +		}
> +
> +		/* Data must be byte aligned */
> +		if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
> +			ops[i]->status =
> RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
> +			KASUMI_LOG_ERR("offset");
> +			break;
> +		}
> +
> +		length_in_bits = ops[i]->sym->auth.data.length;
> +
> +		src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
> +				(ops[i]->sym->auth.data.offset >> 3);
> +		/* IV from AAD */
> +		IV = *((uint64_t *)(ops[i]->sym->auth.aad.data));
> +		/* Direction from next bit after end of message */
> +		num_bytes = (length_in_bits >> 3) + 1;
> +		shift_bits = (BYTE_LEN - 1 - length_in_bits) % BYTE_LEN;
> +		direction = (src[num_bytes - 1] >> shift_bits) & 0x01;
> +
> +		if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
> +			dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym-
> >m_src,
> +					ops[i]->sym->auth.digest.length);
> +
> +			sso_kasumi_f9_1_buffer_user(&session-
> >pKeySched_hash,
> +					IV, src,
> +					length_in_bits,	dst, direction);
> +			/* Verify digest. */
> +			if (memcmp(dst, ops[i]->sym->auth.digest.data,
> +					ops[i]->sym->auth.digest.length) != 0)
> +				ops[i]->status =
> RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
> +
> +			/* Trim area used for digest from mbuf. */
> +			rte_pktmbuf_trim(ops[i]->sym->m_src,
> +					ops[i]->sym->auth.digest.length);
> +		} else  {
> +			dst = ops[i]->sym->auth.digest.data;
> +
> +			sso_kasumi_f9_1_buffer_user(&session-
> >pKeySched_hash,
> +					IV, src,
> +					length_in_bits, dst, direction);
> +		}
> +		processed_ops++;
> +	}
> +
> +	return processed_ops;
> +}
> +
> +/** Process a batch of crypto ops which shares the same session. */
> +static int
> +process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
> +		struct kasumi_qp *qp, uint8_t num_ops,
> +		uint16_t *accumulated_enqueued_ops)
> +{
> +	unsigned i;
> +	unsigned enqueued_ops, processed_ops;
> +
> +	switch (session->op) {
> +	case KASUMI_OP_ONLY_CIPHER:
> +		processed_ops = process_kasumi_cipher_op(ops,
> +				session, num_ops);
> +		break;
> +	case KASUMI_OP_ONLY_AUTH:
> +		processed_ops = process_kasumi_hash_op(ops, session,
> +				num_ops);
> +		break;
> +	case KASUMI_OP_CIPHER_AUTH:
> +		processed_ops = process_kasumi_cipher_op(ops, session,
> +				num_ops);
> +		process_kasumi_hash_op(ops, session, processed_ops);
> +		break;
> +	case KASUMI_OP_AUTH_CIPHER:
> +		processed_ops = process_kasumi_hash_op(ops, session,
> +				num_ops);
> +		process_kasumi_cipher_op(ops, session, processed_ops);
> +		break;
> +	default:
> +		/* Operation not supported. */
> +		processed_ops = 0;
> +	}
> +
> +	for (i = 0; i < num_ops; i++) {
> +		/*
> +		 * If there was no error/authentication failure,
> +		 * change status to successful.
> +		 */
> +		if (ops[i]->status ==
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
> +			ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> +		/* Free session if a session-less crypto op. */
> +		if (ops[i]->sym->sess_type ==
> RTE_CRYPTO_SYM_OP_SESSIONLESS) {
> +			rte_mempool_put(qp->sess_mp, ops[i]->sym-
> >session);
> +			ops[i]->sym->session = NULL;
> +		}
> +	}
> +
> +	enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
> +				(void **)ops, processed_ops);
> +	qp->qp_stats.enqueued_count += enqueued_ops;
> +	*accumulated_enqueued_ops += enqueued_ops;
> +
> +	return enqueued_ops;
> +}
> +
> +/** Process a crypto op with length/offset in bits. */
> +static int
> +process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
> +		struct kasumi_qp *qp, uint16_t
> *accumulated_enqueued_ops)
> +{
> +	unsigned enqueued_op, processed_op;
> +
> +	switch (session->op) {
> +	case KASUMI_OP_ONLY_CIPHER:
> +		processed_op = process_kasumi_cipher_op_bit(op,
> +				session);
> +		break;
> +	case KASUMI_OP_ONLY_AUTH:
> +		processed_op = process_kasumi_hash_op(&op, session, 1);
> +		break;
> +	case KASUMI_OP_CIPHER_AUTH:
> +		processed_op = process_kasumi_cipher_op_bit(op, session);
> +		if (processed_op == 1)
> +			process_kasumi_hash_op(&op, session, 1);
> +		break;
> +	case KASUMI_OP_AUTH_CIPHER:
> +		processed_op = process_kasumi_hash_op(&op, session, 1);
> +		if (processed_op == 1)
> +			process_kasumi_cipher_op_bit(op, session);
> +		break;
> +	default:
> +		/* Operation not supported. */
> +		processed_op = 0;
> +	}
> +
> +	/*
> +	 * If there was no error/authentication failure,
> +	 * change status to successful.
> +	 */
> +	if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
> +		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
> +
> +	/* Free session if a session-less crypto op. */
> +	if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
> +		rte_mempool_put(qp->sess_mp, op->sym->session);
> +		op->sym->session = NULL;
> +	}
> +
> +	enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void
> **)&op,
> +				processed_op);
> +	qp->qp_stats.enqueued_count += enqueued_op;
> +	*accumulated_enqueued_ops += enqueued_op;
> +
> +	return enqueued_op;
> +}
> +
> +static uint16_t
> +kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op
> **ops,
> +		uint16_t nb_ops)
> +{
> +	struct rte_crypto_op *c_ops[nb_ops];
> +	struct rte_crypto_op *curr_c_op;
> +
> +	struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
> +	struct kasumi_qp *qp = queue_pair;
> +	unsigned i;
> +	uint8_t burst_size = 0;
> +	uint16_t enqueued_ops = 0;
> +	uint8_t processed_ops;
> +
> +	for (i = 0; i < nb_ops; i++) {
> +		curr_c_op = ops[i];
> +
> +		/* Set status as enqueued (not processed yet) by default. */
> +		curr_c_op->status =
> RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
> +
> +		curr_sess = kasumi_get_session(qp, curr_c_op);
> +		if (unlikely(curr_sess == NULL ||
> +				curr_sess->op ==
> KASUMI_OP_NOT_SUPPORTED)) {
> +			curr_c_op->status =
> +
> 	RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
> +			break;
> +		}
> +
> +		/* If length/offset is at bit-level, process this buffer alone. */
> +		if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
> +				|| ((ops[i]->sym->cipher.data.offset
> +					% BYTE_LEN) != 0)) {
> +			/* Process the ops of the previous session. */
> +			if (prev_sess != NULL) {
> +				processed_ops = process_ops(c_ops,
> prev_sess,
> +						qp, burst_size,
> &enqueued_ops);
> +				if (processed_ops < burst_size) {
> +					burst_size = 0;
> +					break;
> +				}
> +
> +				burst_size = 0;
> +				prev_sess = NULL;
> +			}
> +
> +			processed_ops = process_op_bit(curr_c_op,
> curr_sess,
> +						qp, &enqueued_ops);
> +			if (processed_ops != 1)
> +				break;
> +
> +			continue;
> +		}
> +
> +		/* Batch ops that share the same session. */
> +		if (prev_sess == NULL) {
> +			prev_sess = curr_sess;
> +			c_ops[burst_size++] = curr_c_op;
> +		} else if (curr_sess == prev_sess) {
> +			c_ops[burst_size++] = curr_c_op;
> +			/*
> +			 * When there are enough ops to process in a batch,
> +			 * process them, and start a new batch.
> +			 */
> +			if (burst_size == KASUMI_MAX_BURST) {
> +				processed_ops = process_ops(c_ops,
> prev_sess,
> +						qp, burst_size,
> &enqueued_ops);
> +				if (processed_ops < burst_size) {
> +					burst_size = 0;
> +					break;
> +				}
> +
> +				burst_size = 0;
> +				prev_sess = NULL;
> +			}
> +		} else {
> +			/*
> +			 * Different session, process the ops
> +			 * of the previous session.
> +			 */
> +			processed_ops = process_ops(c_ops, prev_sess,
> +					qp, burst_size, &enqueued_ops);
> +			if (processed_ops < burst_size) {
> +				burst_size = 0;
> +				break;
> +			}
> +
> +			burst_size = 0;
> +			prev_sess = curr_sess;
> +
> +			c_ops[burst_size++] = curr_c_op;
> +		}
> +	}
> +
> +	if (burst_size != 0) {
> +		/* Process the crypto ops of the last session. */
> +		processed_ops = process_ops(c_ops, prev_sess,
> +				qp, burst_size, &enqueued_ops);
> +	}
> +
> +	qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
> +	return enqueued_ops;
> +}
> +
> +static uint16_t
> +kasumi_pmd_dequeue_burst(void *queue_pair,
> +		struct rte_crypto_op **c_ops, uint16_t nb_ops)
> +{
> +	struct kasumi_qp *qp = queue_pair;
> +
> +	unsigned nb_dequeued;
> +
> +	nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
> +			(void **)c_ops, nb_ops);
> +	qp->qp_stats.dequeued_count += nb_dequeued;
> +
> +	return nb_dequeued;
> +}
> +
> +static int cryptodev_kasumi_uninit(const char *name);
> +
> +static int
> +cryptodev_kasumi_create(const char *name,
> +		struct rte_crypto_vdev_init_params *init_params)
> +{
> +	struct rte_cryptodev *dev;
> +	char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
> +	struct kasumi_private *internals;
> +	uint64_t cpu_flags = 0;
> +
> +	/* Check CPU for supported vector instruction set */
> +	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
> +		cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
> +	else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
> +		cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
> +	else {
> +		KASUMI_LOG_ERR("Vector instructions are not supported by
> CPU");
> +		return -EFAULT;
> +	}
> +
> +	/* Create a unique device name. */
> +	if (create_unique_device_name(crypto_dev_name,
> +			RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
> +		KASUMI_LOG_ERR("failed to create unique cryptodev
> name");
> +		return -EINVAL;
> +	}
> +
> +	dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
> +			sizeof(struct kasumi_private), init_params-
> >socket_id);
> +	if (dev == NULL) {
> +		KASUMI_LOG_ERR("failed to create cryptodev vdev");
> +		goto init_error;
> +	}
> +
> +	dev->dev_type = RTE_CRYPTODEV_KASUMI_PMD;
> +	dev->dev_ops = rte_kasumi_pmd_ops;
> +
> +	/* Register RX/TX burst functions for data path. */
> +	dev->dequeue_burst = kasumi_pmd_dequeue_burst;
> +	dev->enqueue_burst = kasumi_pmd_enqueue_burst;
> +
> +	dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
> +			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
> +			cpu_flags;
> +
> +	internals = dev->data->dev_private;
> +
> +	internals->max_nb_queue_pairs = init_params-
> >max_nb_queue_pairs;
> +	internals->max_nb_sessions = init_params->max_nb_sessions;
> +
> +	return 0;
> +init_error:
> +	KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed",
> name);
> +
> +	cryptodev_kasumi_uninit(crypto_dev_name);
> +	return -EFAULT;
> +}
> +
> +static int
> +cryptodev_kasumi_init(const char *name,
> +		const char *input_args)
> +{
> +	struct rte_crypto_vdev_init_params init_params = {
> +		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
> +		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
> +		rte_socket_id()
> +	};
> +
> +	rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
> +
> +	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
> +			init_params.socket_id);
> +	RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
> +			init_params.max_nb_queue_pairs);
> +	RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
> +			init_params.max_nb_sessions);
> +
> +	return cryptodev_kasumi_create(name, &init_params);
> +}
> +
> +static int
> +cryptodev_kasumi_uninit(const char *name)
> +{
> +	if (name == NULL)
> +		return -EINVAL;
> +
> +	RTE_LOG(INFO, PMD, "Closing KASUMI crypto device %s"
> +			" on numa socket %u\n",
> +			name, rte_socket_id());
> +
> +	return 0;
> +}
> +
> +static struct rte_driver cryptodev_kasumi_pmd_drv = {
> +	.name = CRYPTODEV_NAME_KASUMI_PMD,
> +	.type = PMD_VDEV,
> +	.init = cryptodev_kasumi_init,
> +	.uninit = cryptodev_kasumi_uninit
> +};
> +
> +PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv);
> diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
> b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
> new file mode 100644
> index 0000000..da5854e
> --- /dev/null
> +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
> @@ -0,0 +1,344 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2016 Intel Corporation. All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Intel Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> + */
> +
> +#include <string.h>
> +
> +#include <rte_common.h>
> +#include <rte_malloc.h>
> +#include <rte_cryptodev_pmd.h>
> +
> +#include "rte_kasumi_pmd_private.h"
> +
> +static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
> +	{	/* KASUMI (F9) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_KASUMI_F9,
> +				.block_size = 8,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 4,
> +					.max = 4,
> +					.increment = 0
> +				},
> +				.aad_size = {
> +					.min = 9,
> +					.max = 9,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{	/* KASUMI (F8) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +			{.cipher = {
> +				.algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
> +				.block_size = 8,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.iv_size = {
> +					.min = 8,
> +					.max = 8,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
> +};
> +
> +/** Configure device */
> +static int
> +kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev)
> +{
> +	return 0;
> +}
> +
> +/** Start device */
> +static int
> +kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev)
> +{
> +	return 0;
> +}
> +
> +/** Stop device */
> +static void
> +kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev)
> +{
> +}
> +
> +/** Close device */
> +static int
> +kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev)
> +{
> +	return 0;
> +}
> +
> +
> +/** Get device statistics */
> +static void
> +kasumi_pmd_stats_get(struct rte_cryptodev *dev,
> +		struct rte_cryptodev_stats *stats)
> +{
> +	int qp_id;
> +
> +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> +		struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
> +
> +		stats->enqueued_count += qp->qp_stats.enqueued_count;
> +		stats->dequeued_count += qp->qp_stats.dequeued_count;
> +
> +		stats->enqueue_err_count += qp-
> >qp_stats.enqueue_err_count;
> +		stats->dequeue_err_count += qp-
> >qp_stats.dequeue_err_count;
> +	}
> +}
> +
> +/** Reset device statistics */
> +static void
> +kasumi_pmd_stats_reset(struct rte_cryptodev *dev)
> +{
> +	int qp_id;
> +
> +	for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
> +		struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
> +
> +		memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
> +	}
> +}
> +
> +
> +/** Get device info */
> +static void
> +kasumi_pmd_info_get(struct rte_cryptodev *dev,
> +		struct rte_cryptodev_info *dev_info)
> +{
> +	struct kasumi_private *internals = dev->data->dev_private;
> +
> +	if (dev_info != NULL) {
> +		dev_info->dev_type = dev->dev_type;
> +		dev_info->max_nb_queue_pairs = internals-
> >max_nb_queue_pairs;
> +		dev_info->sym.max_nb_sessions = internals-
> >max_nb_sessions;
> +		dev_info->feature_flags = dev->feature_flags;
> +		dev_info->capabilities = kasumi_pmd_capabilities;
> +	}
> +}
> +
> +/** Release queue pair */
> +static int
> +kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
> +{
> +	struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
> +
> +	if (qp != NULL) {
> +		rte_ring_free(qp->processed_ops);
> +		rte_free(qp);
> +		dev->data->queue_pairs[qp_id] = NULL;
> +	}
> +	return 0;
> +}
> +
> +/** set a unique name for the queue pair based on its name, dev_id and
> qp_id */
> +static int
> +kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
> +		struct kasumi_qp *qp)
> +{
> +	unsigned n = snprintf(qp->name, sizeof(qp->name),
> +			"kasumi_pmd_%u_qp_%u",
> +			dev->data->dev_id, qp->id);
> +
> +	if (n > sizeof(qp->name))
> +		return -1;
> +
> +	return 0;
> +}
> +
> +/** Create a ring to place processed ops on */
> +static struct rte_ring *
> +kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
> +		unsigned ring_size, int socket_id)
> +{
> +	struct rte_ring *r;
> +
> +	r = rte_ring_lookup(qp->name);
> +	if (r) {
> +		if (r->prod.size == ring_size) {
> +			KASUMI_LOG_INFO("Reusing existing ring %s"
> +					" for processed packets",
> +					 qp->name);
> +			return r;
> +		}
> +
> +		KASUMI_LOG_ERR("Unable to reuse existing ring %s"
> +				" for processed packets",
> +				 qp->name);
> +		return NULL;
> +	}
> +
> +	return rte_ring_create(qp->name, ring_size, socket_id,
> +			RING_F_SP_ENQ | RING_F_SC_DEQ);
> +}
> +
> +/** Setup a queue pair */
> +static int
> +kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
> +		const struct rte_cryptodev_qp_conf *qp_conf,
> +		 int socket_id)
> +{
> +	struct kasumi_qp *qp = NULL;
> +
> +	/* Free memory prior to re-allocation if needed. */
> +	if (dev->data->queue_pairs[qp_id] != NULL)
> +		kasumi_pmd_qp_release(dev, qp_id);
> +
> +	/* Allocate the queue pair data structure. */
> +	qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp),
> +					RTE_CACHE_LINE_SIZE, socket_id);
> +	if (qp == NULL)
> +		return (-ENOMEM);
> +
> +	qp->id = qp_id;
> +	dev->data->queue_pairs[qp_id] = qp;
> +
> +	if (kasumi_pmd_qp_set_unique_name(dev, qp))
> +		goto qp_setup_cleanup;
> +
> +	qp->processed_ops =
> kasumi_pmd_qp_create_processed_ops_ring(qp,
> +			qp_conf->nb_descriptors, socket_id);
> +	if (qp->processed_ops == NULL)
> +		goto qp_setup_cleanup;
> +
> +	qp->sess_mp = dev->data->session_pool;
> +
> +	memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
> +
> +	return 0;
> +
> +qp_setup_cleanup:
> +	rte_free(qp);
> +
> +	return -1;
> +}
> +
> +/** Start queue pair */
> +static int
> +kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
> +		__rte_unused uint16_t queue_pair_id)
> +{
> +	return -ENOTSUP;
> +}
> +
> +/** Stop queue pair */
> +static int
> +kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
> +		__rte_unused uint16_t queue_pair_id)
> +{
> +	return -ENOTSUP;
> +}
> +
> +/** Return the number of allocated queue pairs */
> +static uint32_t
> +kasumi_pmd_qp_count(struct rte_cryptodev *dev)
> +{
> +	return dev->data->nb_queue_pairs;
> +}
> +
> +/** Returns the size of the KASUMI session structure */
> +static unsigned
> +kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
> +{
> +	return sizeof(struct kasumi_session);
> +}
> +
> +/** Configure a KASUMI session from a crypto xform chain */
> +static void *
> +kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
> +		struct rte_crypto_sym_xform *xform,	void *sess)
> +{
> +	if (unlikely(sess == NULL)) {
> +		KASUMI_LOG_ERR("invalid session struct");
> +		return NULL;
> +	}
> +
> +	if (kasumi_set_session_parameters(sess, xform) != 0) {
> +		KASUMI_LOG_ERR("failed configure session parameters");
> +		return NULL;
> +	}
> +
> +	return sess;
> +}
> +
> +/** Clear the memory of session so it doesn't leave key material behind */
> +static void
> +kasumi_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void
> *sess)
> +{
> +	/*
> +	 * Current just resetting the whole data structure, need to
> investigate
> +	 * whether a more selective reset of key would be more performant
> +	 */
> +	if (sess)
> +		memset(sess, 0, sizeof(struct kasumi_session));
> +}
> +
> +struct rte_cryptodev_ops kasumi_pmd_ops = {
> +		.dev_configure      = kasumi_pmd_config,
> +		.dev_start          = kasumi_pmd_start,
> +		.dev_stop           = kasumi_pmd_stop,
> +		.dev_close          = kasumi_pmd_close,
> +
> +		.stats_get          = kasumi_pmd_stats_get,
> +		.stats_reset        = kasumi_pmd_stats_reset,
> +
> +		.dev_infos_get      = kasumi_pmd_info_get,
> +
> +		.queue_pair_setup   = kasumi_pmd_qp_setup,
> +		.queue_pair_release = kasumi_pmd_qp_release,
> +		.queue_pair_start   = kasumi_pmd_qp_start,
> +		.queue_pair_stop    = kasumi_pmd_qp_stop,
> +		.queue_pair_count   = kasumi_pmd_qp_count,
> +
> +		.session_get_size   = kasumi_pmd_session_get_size,
> +		.session_configure  = kasumi_pmd_session_configure,
> +		.session_clear      = kasumi_pmd_session_clear
> +};
> +
> +struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
> diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
> b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
> new file mode 100644
> index 0000000..04e1c43
> --- /dev/null
> +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
> @@ -0,0 +1,106 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2016 Intel Corporation. All rights reserved.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of Intel Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
> AND ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
> TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> + */
> +
> +#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
> +#define _RTE_KASUMI_PMD_PRIVATE_H_
> +
> +#include <sso_kasumi.h>
> +
> +#define KASUMI_LOG_ERR(fmt, args...) \
> +	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",  \
> +			CRYPTODEV_NAME_KASUMI_PMD, \
> +			__func__, __LINE__, ## args)
> +
> +#ifdef RTE_LIBRTE_KASUMI_DEBUG
> +#define KASUMI_LOG_INFO(fmt, args...) \
> +	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
> +			CRYPTODEV_NAME_KASUMI_PMD, \
> +			__func__, __LINE__, ## args)
> +
> +#define KASUMI_LOG_DBG(fmt, args...) \
> +	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
> +			CRYPTODEV_NAME_KASUMI_PMD, \
> +			__func__, __LINE__, ## args)
> +#else
> +#define KASUMI_LOG_INFO(fmt, args...)
> +#define KASUMI_LOG_DBG(fmt, args...)
> +#endif
> +
> +/** private data structure for each virtual KASUMI device */
> +struct kasumi_private {
> +	unsigned max_nb_queue_pairs;
> +	/**< Max number of queue pairs supported by device */
> +	unsigned max_nb_sessions;
> +	/**< Max number of sessions supported by device */
> +};
> +
> +/** KASUMI buffer queue pair */
> +struct kasumi_qp {
> +	uint16_t id;
> +	/**< Queue Pair Identifier */
> +	char name[RTE_CRYPTODEV_NAME_LEN];
> +	/**< Unique Queue Pair Name */
> +	struct rte_ring *processed_ops;
> +	/**< Ring for placing processed ops */
> +	struct rte_mempool *sess_mp;
> +	/**< Session Mempool */
> +	struct rte_cryptodev_stats qp_stats;
> +	/**< Queue pair statistics */
> +} __rte_cache_aligned;
> +
> +enum kasumi_operation {
> +	KASUMI_OP_ONLY_CIPHER,
> +	KASUMI_OP_ONLY_AUTH,
> +	KASUMI_OP_CIPHER_AUTH,
> +	KASUMI_OP_AUTH_CIPHER,
> +	KASUMI_OP_NOT_SUPPORTED
> +};
> +
> +/** KASUMI private session structure */
> +struct kasumi_session {
> +	/* Keys have to be 16-byte aligned */
> +	sso_kasumi_key_sched_t pKeySched_cipher;
> +	sso_kasumi_key_sched_t pKeySched_hash;
> +	enum kasumi_operation op;
> +	enum rte_crypto_auth_operation auth_op;
> +} __rte_cache_aligned;
> +
> +
> +int
> +kasumi_set_session_parameters(struct kasumi_session *sess,
> +		const struct rte_crypto_sym_xform *xform);
> +
> +
> +/** device specific operations function pointer structure */
> +struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
> +
> +#endif /* _RTE_KASUMI_PMD_PRIVATE_H_ */
> diff --git a/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
> b/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
> new file mode 100644
> index 0000000..8ffeca9
> --- /dev/null
> +++ b/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
> @@ -0,0 +1,3 @@
> +DPDK_16.07 {
> +	local: *;
> +};
> diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
> index e539559..8dc616d 100644
> --- a/examples/l2fwd-crypto/main.c
> +++ b/examples/l2fwd-crypto/main.c
> @@ -349,6 +349,7 @@ fill_supported_algorithm_tables(void)
>  	strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC],
> "SHA384_HMAC");
>  	strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC],
> "SHA512_HMAC");
>  	strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2],
> "SNOW3G_UIA2");
> +	strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9],
> "KASUMI_F9");
> 
>  	for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++)
>  		strcpy(supported_cipher_algo[i], "NOT_SUPPORTED");
> @@ -358,6 +359,7 @@ fill_supported_algorithm_tables(void)
>  	strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM],
> "AES_GCM");
>  	strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL");
> 
> 	strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA
> 2], "SNOW3G_UEA2");
> +	strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8],
> "KASUMI_F8");
>  }
> 
> 
> @@ -466,8 +468,9 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
>  				rte_pktmbuf_pkt_len(m) - cparams-
> >digest_length);
>  		op->sym->auth.digest.length = cparams->digest_length;
> 
> -		/* For SNOW3G algorithms, offset/length must be in bits */
> -		if (cparams->auth_algo ==
> RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
> +		/* For SNOW3G/KASUMI algorithms, offset/length must be
> in bits */
> +		if (cparams->auth_algo ==
> RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
> +				cparams->auth_algo ==
> RTE_CRYPTO_AUTH_KASUMI_F9) {
>  			op->sym->auth.data.offset = ipdata_offset << 3;
>  			op->sym->auth.data.length = data_len << 3;
>  		} else {
> @@ -488,7 +491,8 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
>  		op->sym->cipher.iv.length = cparams->iv.length;
> 
>  		/* For SNOW3G algorithms, offset/length must be in bits */
> -		if (cparams->cipher_algo ==
> RTE_CRYPTO_CIPHER_SNOW3G_UEA2) {
> +		if (cparams->cipher_algo ==
> RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
> +				cparams->cipher_algo ==
> RTE_CRYPTO_CIPHER_KASUMI_F8) {
>  			op->sym->cipher.data.offset = ipdata_offset << 3;
>  			if (cparams->do_hash && cparams->hash_verify)
>  				/* Do not cipher the hash tag */
> diff --git a/lib/librte_cryptodev/rte_crypto_sym.h
> b/lib/librte_cryptodev/rte_crypto_sym.h
> index 4ae9b9e..d9bd821 100644
> --- a/lib/librte_cryptodev/rte_crypto_sym.h
> +++ b/lib/librte_cryptodev/rte_crypto_sym.h
> @@ -388,7 +388,8 @@ struct rte_crypto_sym_op {
>  			  * this location.
>  			  *
>  			  * @note
> -			  * For Snow3G @
> RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
> +			  * For Snow3G @
> RTE_CRYPTO_CIPHER_SNOW3G_UEA2
> +			  * and KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8,
>  			  * this field should be in bits.
>  			  */
> 
> @@ -413,6 +414,7 @@ struct rte_crypto_sym_op {
>  			  *
>  			  * @note
>  			  * For Snow3G @
> RTE_CRYPTO_AUTH_SNOW3G_UEA2
> +			  * and KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8,
>  			  * this field should be in bits.
>  			  */
>  		} data; /**< Data offsets and length for ciphering */
> @@ -485,6 +487,7 @@ struct rte_crypto_sym_op {
>  			  *
>  			  * @note
>  			  * For Snow3G @
> RTE_CRYPTO_AUTH_SNOW3G_UIA2
> +			  * and KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
>  			  * this field should be in bits.
>  			  */
> 
> @@ -504,6 +507,7 @@ struct rte_crypto_sym_op {
>  			  *
>  			  * @note
>  			  * For Snow3G @
> RTE_CRYPTO_AUTH_SNOW3G_UIA2
> +			  * and KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9,
>  			  * this field should be in bits.
>  			  */
>  		} data; /**< Data offsets and length for authentication */
> diff --git a/lib/librte_cryptodev/rte_cryptodev.h
> b/lib/librte_cryptodev/rte_cryptodev.h
> index d47f1e8..27cf8ef 100644
> --- a/lib/librte_cryptodev/rte_cryptodev.h
> +++ b/lib/librte_cryptodev/rte_cryptodev.h
> @@ -59,12 +59,15 @@ extern "C" {
>  /**< Intel QAT Symmetric Crypto PMD device name */
>  #define CRYPTODEV_NAME_SNOW3G_PMD	("cryptodev_snow3g_pmd")
>  /**< SNOW 3G PMD device name */
> +#define CRYPTODEV_NAME_KASUMI_PMD	("cryptodev_kasumi_pmd")
> +/**< KASUMI PMD device name */
> 
>  /** Crypto device type */
>  enum rte_cryptodev_type {
>  	RTE_CRYPTODEV_NULL_PMD = 1,	/**< Null crypto PMD */
>  	RTE_CRYPTODEV_AESNI_GCM_PMD,	/**< AES-NI GCM PMD */
>  	RTE_CRYPTODEV_AESNI_MB_PMD,	/**< AES-NI multi buffer
> PMD */
> +	RTE_CRYPTODEV_KASUMI_PMD,	/**< KASUMI PMD */
>  	RTE_CRYPTODEV_QAT_SYM_PMD,	/**< QAT PMD Symmetric
> Crypto */
>  	RTE_CRYPTODEV_SNOW3G_PMD,	/**< SNOW 3G PMD */
>  };
> diff --git a/mk/rte.app.mk b/mk/rte.app.mk
> index e9969fc..21bed09 100644
> --- a/mk/rte.app.mk
> +++ b/mk/rte.app.mk
> @@ -134,6 +134,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO)
> += -lrte_pmd_null_crypto
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT)        += -lrte_pmd_qat -lcrypto
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G)     += -lrte_pmd_snow3g
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G)     += -
> L$(LIBSSO_PATH)/build -lsso
> +_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI)     += -lrte_pmd_kasumi
> +_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI)     += -
> L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
>  endif # CONFIG_RTE_LIBRTE_CRYPTODEV
> 
>  endif # !CONFIG_RTE_BUILD_SHARED_LIBS
> diff --git a/scripts/test-build.sh b/scripts/test-build.sh
> index 9a11f94..0cfbdbc 100755
> --- a/scripts/test-build.sh
> +++ b/scripts/test-build.sh
> @@ -46,6 +46,7 @@ default_path=$PATH
>  # - DPDK_MAKE_JOBS (int)
>  # - DPDK_NOTIFY (notify-send)
>  # - LIBSSO_PATH
> +# - LIBSSO_KASUMI_PATH
>  . $(dirname $(readlink -e $0))/load-devel-config.sh
> 
>  print_usage () {
> @@ -122,6 +123,7 @@ reset_env ()
>  	unset DPDK_DEP_ZLIB
>  	unset AESNI_MULTI_BUFFER_LIB_PATH
>  	unset LIBSSO_PATH
> +	unset LIBSSO_KASUMI_PATH
>  	unset PQOS_INSTALL_PATH
>  }
> 
> @@ -168,6 +170,8 @@ config () # <directory> <target> <options>
>  		sed -ri      's,(PMD_AESNI_GCM=)n,\1y,' $1/.config
>  		test -z "$LIBSSO_PATH" || \
>  		sed -ri         's,(PMD_SNOW3G=)n,\1y,' $1/.config
> +		test -z "$LIBSSO_KASUMI_PATH" || \
> +		sed -ri         's,(PMD_KASUMI=)n,\1y,' $1/.config
>  		test "$DPDK_DEP_SSL" != y || \
>  		sed -ri            's,(PMD_QAT=)n,\1y,' $1/.config
>  		sed -ri        's,(KNI_VHOST.*=)n,\1y,' $1/.config
> --
> 2.5.0



More information about the dev mailing list