[dpdk-dev] [PATCH] KNI: use a memzone pool for KNI alloc/release

Marc Sune marc.sune at bisdn.de
Thu Oct 9 09:05:25 CEST 2014


Hi Helin,

On 09/10/14 08:01, Zhang, Helin wrote:
> Hi Marc
>
> Thanks for the idea on KNI! I have comments/questions as follows. Please correct me if I am wrong!
>
>> -----Original Message-----
>> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Marc Sune
>> Sent: Monday, September 29, 2014 6:16 PM
>> To: dev at dpdk.org
>> Subject: [dpdk-dev] [PATCH] KNI: use a memzone pool for KNI alloc/release
>>
>> This patch implements the KNI memzone pool in order to:
>>
>> * prevent memzone exhaustion when allocating/deallocating KNI
>>    interfaces.
> What do you mean the "exhaustion"? Actually the memzones can be reused, though they cannot be resized.

The exhaustion problem is due to the fact that the current 
implementation binds in the memzone names with a suffix of the name of 
the interface. This means if a DPDK application creates multiple 
interfaces (and destroys them) and creates some more (with different 
names) you quickly run out of memzones, because the ones allocated 
cannot be reused and are there until the end of the application lifetime.

This is precisely our problem, and the reason to create this memzone 
pool, so that once the ifaces are destroyes (rte_kni_release()), those 
memzones can be reused.
>> * be able to allocate KNI interfaces with the same name as
>>    previously deallocated ones.
> I think the current implementation can already allocate the same name KNI interface after being deallocated, as each time memzone reservation will try to find the same name memzone first. If the name has already been used, that means it has even been allocated, we can find it back and reuse it, though the size cannot be changed.
> Have you encountered the problem you are trying to solve? Have you tried to use current implementation of KNI for the case you are trying to fix for?
> If I am not wrong, we may not need the changes, as its current implementation already supports the scenarios you are trying to support. The only thing is that the sizes of memzones cannot be changed, but it seems no idea on that.

You are right. This comment should be deleted, I forgot to do it. We 
found the root cause of this after finishing this patch in our problem.

I can rework the comment and resend.

Marc

>
>> It adds a new API call, rte_kni_init(max_kni_ifaces) that shall be called before
>> any call to rte_kni_alloc() if KNI is used.
>>
>> Signed-off-by: Marc Sune <marc.sune at bisdn.de>
>> ---
>>   lib/librte_kni/rte_kni.c |  302
>> ++++++++++++++++++++++++++++++++++++++--------
>>   lib/librte_kni/rte_kni.h |   18 +++
>>   2 files changed, 269 insertions(+), 51 deletions(-)
>>
>> diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c index
>> 76feef4..df55789 100644
>> --- a/lib/librte_kni/rte_kni.c
>> +++ b/lib/librte_kni/rte_kni.c
>> @@ -40,6 +40,7 @@
>>   #include <unistd.h>
>>   #include <sys/ioctl.h>
>>
>> +#include <rte_spinlock.h>
>>   #include <rte_string_fns.h>
>>   #include <rte_ethdev.h>
>>   #include <rte_malloc.h>
>> @@ -58,7 +59,7 @@
>>
>>   #define KNI_REQUEST_MBUF_NUM_MAX      32
>>
>> -#define KNI_MZ_CHECK(mz) do { if (mz) goto fail; } while (0)
>> +#define KNI_MEM_CHECK(cond) do { if (cond) goto kni_fail; } while (0)
>>
>>   /**
>>    * KNI context
>> @@ -66,6 +67,7 @@
>>   struct rte_kni {
>>   	char name[RTE_KNI_NAMESIZE];        /**< KNI interface name */
>>   	uint16_t group_id;                  /**< Group ID of KNI devices */
>> +	unsigned slot_id;                   /**< KNI pool slot ID */
>>   	struct rte_mempool *pktmbuf_pool;   /**< pkt mbuf mempool */
>>   	unsigned mbuf_size;                 /**< mbuf size */
>>
>> @@ -88,10 +90,48 @@ enum kni_ops_status {
>>   	KNI_REQ_REGISTERED,
>>   };
>>
>> +/**
>> +* KNI memzone pool slot
>> +*/
>> +struct rte_kni_memzone_slot{
>> +	unsigned id;
>> +	uint8_t in_use : 1;                    /**< slot in use */
>> +
>> +	//Memzones
>> +	const struct rte_memzone *m_ctx;       /**< KNI ctx */
>> +	const struct rte_memzone *m_tx_q;      /**< TX queue */
>> +	const struct rte_memzone *m_rx_q;      /**< RX queue */
>> +	const struct rte_memzone *m_alloc_q;   /**< Allocated mbufs queue */
>> +	const struct rte_memzone *m_free_q;    /**< To be freed mbufs queue
>> */
>> +	const struct rte_memzone *m_req_q;     /**< Request queue */
>> +	const struct rte_memzone *m_resp_q;    /**< Response queue */
>> +	const struct rte_memzone *m_sync_addr;
>> +
>> +	/* Free linked list */
>> +	struct rte_kni_memzone_slot *next;     /**< Next slot link.list */
>> +};
>> +
>> +/**
>> +* KNI memzone pool
>> +*/
>> +struct rte_kni_memzone_pool{
>> +	uint8_t initialized : 1;            /**< Global KNI pool init flag */
>> +
>> +	unsigned max_ifaces;                /**< Max. num of KNI ifaces */
>> +	struct rte_kni_memzone_slot *slots;        /**< Pool slots */
>> +	rte_spinlock_t mutex;               /**< alloc/relase mutex */
>> +
>> +	//Free memzone slots linked-list
>> +	struct rte_kni_memzone_slot *free;         /**< First empty slot */
>> +	struct rte_kni_memzone_slot *free_tail;    /**< Last empty slot */
>> +};
>> +
>> +
>>   static void kni_free_mbufs(struct rte_kni *kni);  static void
>> kni_allocate_mbufs(struct rte_kni *kni);
>>
>>   static volatile int kni_fd = -1;
>> +static struct rte_kni_memzone_pool kni_memzone_pool = {0};
>>
>>   static const struct rte_memzone *
>>   kni_memzone_reserve(const char *name, size_t len, int socket_id, @@
>> -105,6 +145,154 @@ kni_memzone_reserve(const char *name, size_t len, int
>> socket_id,
>>   	return mz;
>>   }
>>
>> +/* Pool mgmt */
>> +static struct rte_kni_memzone_slot*
>> +kni_memzone_pool_alloc(void)
>> +{
>> +	struct rte_kni_memzone_slot* slot;
>> +
>> +	rte_spinlock_lock(&kni_memzone_pool.mutex);
>> +
>> +	if(!kni_memzone_pool.free) {
>> +		rte_spinlock_unlock(&kni_memzone_pool.mutex);
>> +		return NULL;
>> +	}
>> +
>> +	slot = kni_memzone_pool.free;
>> +	kni_memzone_pool.free = slot->next;
>> +
>> +	if(!kni_memzone_pool.free)
>> +		kni_memzone_pool.free_tail = NULL;
>> +
>> +	rte_spinlock_unlock(&kni_memzone_pool.mutex);
>> +
>> +	return slot;
>> +}
>> +
>> +static void
>> +kni_memzone_pool_dealloc(struct rte_kni_memzone_slot* slot) {
>> +	rte_spinlock_lock(&kni_memzone_pool.mutex);
>> +
>> +	if(kni_memzone_pool.free)
>> +		kni_memzone_pool.free_tail->next = slot;
>> +	else
>> +		kni_memzone_pool.free = slot;
>> +
>> +	kni_memzone_pool.free_tail = slot;
>> +	slot->next = NULL;
>> +
>> +	rte_spinlock_unlock(&kni_memzone_pool.mutex);
>> +}
>> +
>> +
>> +/* Shall be called before any allocation happens */ void
>> +rte_kni_init(unsigned int max_kni_ifaces) {
>> +	unsigned i;
>> +	struct rte_kni_memzone_slot* it;
>> +	const struct rte_memzone *mz;
>> +#define OBJNAMSIZ 32
>> +	char obj_name[OBJNAMSIZ];
>> +	char mz_name[RTE_MEMZONE_NAMESIZE];
>> +
>> +	if(max_kni_ifaces == 0) {
>> +		//Panic
>> +		RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
>> +							max_kni_ifaces);
>> +		rte_panic("Unable to initialize KNI\n");
>> +	}
>> +
>> +	//Allocate slot objects
>> +	kni_memzone_pool.slots = (struct
>> rte_kni_memzone_slot*)rte_malloc(NULL,
>> +					sizeof(struct rte_kni_memzone_slot) *
>> +					max_kni_ifaces,
>> +					0);
>> +	KNI_MEM_CHECK(kni_memzone_pool.slots == NULL);
>> +
>> +	//Initialize general stuff
>> +	kni_memzone_pool.initialized = 1;
>> +	kni_memzone_pool.max_ifaces = max_kni_ifaces;
>> +	kni_memzone_pool.free = &kni_memzone_pool.slots[0];
>> +
>> +	//Pre-allocate all memzones of the slots; panic on error
>> +	for(i=0; i<max_kni_ifaces; i++) {
>> +
>> +		//Recover current slot
>> +		it = &kni_memzone_pool.slots[i];
>> +		it->id = i;
>> +
>> +		//Allocate KNI context
>> +		snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i);
>> +		mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
>> +					SOCKET_ID_ANY, 0);
>> +		KNI_MEM_CHECK(mz == NULL);
>> +		it->m_ctx = mz;
>> +
>> +		/* TX RING */
>> +		snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i);
>> +		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
>> +							SOCKET_ID_ANY, 0);
>> +		KNI_MEM_CHECK(mz == NULL);
>> +		it->m_tx_q = mz;
>> +
>> +		/* RX RING */
>> +		snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i);
>> +		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
>> +							SOCKET_ID_ANY, 0);
>> +		KNI_MEM_CHECK(mz == NULL);
>> +		it->m_rx_q = mz;
>> +
>> +		/* ALLOC RING */
>> +		snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i);
>> +		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
>> +							SOCKET_ID_ANY, 0);
>> +		KNI_MEM_CHECK(mz == NULL);
>> +		it->m_alloc_q = mz;
>> +
>> +		/* FREE RING */
>> +		snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i);
>> +		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
>> +							SOCKET_ID_ANY, 0);
>> +		KNI_MEM_CHECK(mz == NULL);
>> +		it->m_free_q = mz;
>> +
>> +		/* Request RING */
>> +		snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i);
>> +		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
>> +							SOCKET_ID_ANY, 0);
>> +		KNI_MEM_CHECK(mz == NULL);
>> +		it->m_req_q = mz;
>> +
>> +		/* Response RING */
>> +		snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i);
>> +		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
>> +							SOCKET_ID_ANY, 0);
>> +		KNI_MEM_CHECK(mz == NULL);
>> +		it->m_resp_q = mz;
>> +
>> +		/* Req/Resp sync mem area */
>> +		snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i);
>> +		mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
>> +							SOCKET_ID_ANY, 0);
>> +		KNI_MEM_CHECK(mz == NULL);
>> +		it->m_sync_addr = mz;
>> +
>> +		if(i+1 == max_kni_ifaces) {
>> +			it->next = NULL;
>> +			kni_memzone_pool.free_tail = it;
>> +		}else
>> +			it->next = &kni_memzone_pool.slots[i+1];
>> +	}
>> +
>> +	return;
>> +
>> +kni_fail:
>> +	rte_panic("Unable to allocate memory for max_kni_ifaces:%d."
>> +		"increase the amount of hugepages memory\n", max_kni_ifaces); }
>> +
>>   /* It is deprecated and just for backward compatibility */  struct rte_kni *
>> rte_kni_create(uint8_t port_id, @@ -140,14 +328,20 @@ rte_kni_alloc(struct
>> rte_mempool *pktmbuf_pool,
>>   	struct rte_kni_device_info dev_info;
>>   	struct rte_kni *ctx;
>>   	char intf_name[RTE_KNI_NAMESIZE];
>> -#define OBJNAMSIZ 32
>> -	char obj_name[OBJNAMSIZ];
>>   	char mz_name[RTE_MEMZONE_NAMESIZE];
>>   	const struct rte_memzone *mz;
>> +	struct rte_kni_memzone_slot* slot=NULL;
>>
>>   	if (!pktmbuf_pool || !conf || !conf->name[0])
>>   		return NULL;
>>
>> +	/* Check inited */
>> +	if (kni_memzone_pool.initialized != 1) {
>> +		RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. "
>> +				"Invoke rte_kni_init() first\n");
>> +		return NULL;
>> +	}
>> +
>>   	/* Check FD and open once */
>>   	if (kni_fd < 0) {
>>   		kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); @@ -158,16 +352,23
>> @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
>>   		}
>>   	}
>>
>> +	/* Get an available slot from the pool */
>> +	slot = kni_memzone_pool_alloc();
>> +	if(!slot) {
>> +		RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; "
>> +			"increase the number of max_kni_ifaces(current %d) or "
>> +			"deallocate unusued ones.\n",
>> +			kni_memzone_pool.max_ifaces);
>> +		return NULL;
>> +	}
>> +
>> +	//Recover ctx
>> +	ctx = slot->m_ctx->addr;
>>   	snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);
>> -	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%s",
>> intf_name);
>> -	mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
>> -				SOCKET_ID_ANY, 0);
>> -	KNI_MZ_CHECK(mz == NULL);
>> -	ctx = mz->addr;
>>
>>   	if (ctx->in_use) {
>>   		RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
>> -		goto fail;
>> +		return NULL;
>>   	}
>>   	memset(ctx, 0, sizeof(struct rte_kni));
>>   	if (ops)
>> @@ -190,83 +391,72 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
>>   	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
>>   		dev_info.bus, dev_info.devid, dev_info.function,
>>   			dev_info.vendor_id, dev_info.device_id);
>> -
>>   	/* TX RING */
>> -	snprintf(obj_name, OBJNAMSIZ, "kni_tx_%s", intf_name);
>> -	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
>> 0);
>> -	KNI_MZ_CHECK(mz == NULL);
>> +	mz = slot->m_tx_q;
>>   	ctx->tx_q = mz->addr;
>>   	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
>>   	dev_info.tx_phys = mz->phys_addr;
>>
>>   	/* RX RING */
>> -	snprintf(obj_name, OBJNAMSIZ, "kni_rx_%s", intf_name);
>> -	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
>> 0);
>> -	KNI_MZ_CHECK(mz == NULL);
>> +	mz = slot->m_rx_q;
>>   	ctx->rx_q = mz->addr;
>>   	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
>>   	dev_info.rx_phys = mz->phys_addr;
>>
>>   	/* ALLOC RING */
>> -	snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%s", intf_name);
>> -	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
>> 0);
>> -	KNI_MZ_CHECK(mz == NULL);
>> +	mz = slot->m_alloc_q;
>>   	ctx->alloc_q = mz->addr;
>>   	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
>>   	dev_info.alloc_phys = mz->phys_addr;
>>
>>   	/* FREE RING */
>> -	snprintf(obj_name, OBJNAMSIZ, "kni_free_%s", intf_name);
>> -	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
>> 0);
>> -	KNI_MZ_CHECK(mz == NULL);
>> +	mz = slot->m_free_q;
>>   	ctx->free_q = mz->addr;
>>   	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
>>   	dev_info.free_phys = mz->phys_addr;
>>
>>   	/* Request RING */
>> -	snprintf(obj_name, OBJNAMSIZ, "kni_req_%s", intf_name);
>> -	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
>> 0);
>> -	KNI_MZ_CHECK(mz == NULL);
>> +	mz = slot->m_req_q;
>>   	ctx->req_q = mz->addr;
>>   	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
>>   	dev_info.req_phys = mz->phys_addr;
>>
>>   	/* Response RING */
>> -	snprintf(obj_name, OBJNAMSIZ, "kni_resp_%s", intf_name);
>> -	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
>> 0);
>> -	KNI_MZ_CHECK(mz == NULL);
>> +	mz = slot->m_resp_q;
>>   	ctx->resp_q = mz->addr;
>>   	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
>>   	dev_info.resp_phys = mz->phys_addr;
>>
>>   	/* Req/Resp sync mem area */
>> -	snprintf(obj_name, OBJNAMSIZ, "kni_sync_%s", intf_name);
>> -	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
>> 0);
>> -	KNI_MZ_CHECK(mz == NULL);
>> +	mz = slot->m_sync_addr;
>>   	ctx->sync_addr = mz->addr;
>>   	dev_info.sync_va = mz->addr;
>>   	dev_info.sync_phys = mz->phys_addr;
>>
>> +
>>   	/* MBUF mempool */
>>   	snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME,
>>   		pktmbuf_pool->name);
>>   	mz = rte_memzone_lookup(mz_name);
>> -	KNI_MZ_CHECK(mz == NULL);
>> +	KNI_MEM_CHECK(mz == NULL);
>>   	dev_info.mbuf_va = mz->addr;
>>   	dev_info.mbuf_phys = mz->phys_addr;
>>   	ctx->pktmbuf_pool = pktmbuf_pool;
>>   	ctx->group_id = conf->group_id;
>> +	ctx->slot_id = slot->id;
>>   	ctx->mbuf_size = conf->mbuf_size;
>>
>>   	ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
>> -	KNI_MZ_CHECK(ret < 0);
>> +	KNI_MEM_CHECK(ret < 0);
>>
>>   	ctx->in_use = 1;
>>
>>   	return ctx;
>>
>> -fail:
>> -
>> +kni_fail:
>> +	if(slot)
>> +		kni_memzone_pool_dealloc(&kni_memzone_pool.slots[slot->id]);
>> +
>>   	return NULL;
>>   }
>>
>> @@ -287,6 +477,7 @@ int
>>   rte_kni_release(struct rte_kni *kni)
>>   {
>>   	struct rte_kni_device_info dev_info;
>> +	unsigned slot_id;
>>
>>   	if (!kni || !kni->in_use)
>>   		return -1;
>> @@ -302,8 +493,19 @@ rte_kni_release(struct rte_kni *kni)
>>   	kni_free_fifo(kni->rx_q);
>>   	kni_free_fifo(kni->alloc_q);
>>   	kni_free_fifo(kni->free_q);
>> +
>> +	slot_id = kni->slot_id;
>> +
>> +	//Memset
>>   	memset(kni, 0, sizeof(struct rte_kni));
>>
>> +	//Release memzone
>> +	if(slot_id > kni_memzone_pool.max_ifaces) {
>> +		rte_panic("KNI pool: corrupted slot ID: %d, max: %d\n",
>> +			slot_id, kni_memzone_pool.max_ifaces);
>> +	}
>> +	kni_memzone_pool_dealloc(&kni_memzone_pool.slots[slot_id]);
>> +
>>   	return 0;
>>   }
>>
>> @@ -437,23 +639,21 @@ rte_kni_get_port_id(struct rte_kni *kni)  struct
>> rte_kni *  rte_kni_get(const char *name)  {
>> -	struct rte_kni *kni;
>> -	const struct rte_memzone *mz;
>> -	char mz_name[RTE_MEMZONE_NAMESIZE];
>> -
>> -	if (!name || !name[0])
>> -		return NULL;
>> -
>> -	snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%s", name);
>> -	mz = rte_memzone_lookup(mz_name);
>> -	if (!mz)
>> -		return NULL;
>> -
>> -	kni = mz->addr;
>> -	if (!kni->in_use)
>> -		return NULL;
>> +	unsigned i;
>> +	struct rte_kni_memzone_slot* it;
>> +	struct rte_kni* kni;
>> +
>> +	//Could be improved perf-wise if necessary
>> +	for(i=0; i<kni_memzone_pool.max_ifaces; i++) {
>> +		it = &kni_memzone_pool.slots[i];
>> +		if(it->in_use == 0)
>> +			continue;
>> +		kni = it->m_ctx->addr;
>> +		if(strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0)
>> +			return kni;
>> +	}
>>
>> -	return kni;
>> +	return NULL;
>>   }
>>
>>   /*
>> diff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h index
>> 1a0b004..0159a1d 100644
>> --- a/lib/librte_kni/rte_kni.h
>> +++ b/lib/librte_kni/rte_kni.h
>> @@ -90,11 +90,27 @@ struct rte_kni_conf {  };
>>
>>   /**
>> + * Initialize and preallocate KNI subsystem
>> + *
>> + * This function is to be executed on the MASTER lcore only, after EAL
>> + * initialization and before any KNI interface is attempted to be
>> + * allocated
>> + *
>> + * @param max_kni_ifaces
>> + *  The maximum number of KNI interfaces that can coexist concurrently
>> +*/ extern void rte_kni_init(unsigned int max_kni_ifaces);
>> +
>> +
>> +/**
>>    * Allocate KNI interface according to the port id, mbuf size, mbuf pool,
>>    * configurations and callbacks for kernel requests.The KNI interface created
>>    * in the kernel space is the net interface the traditional Linux application
>>    * talking to.
>>    *
>> + * The rte_kni_alloc shall not be called before rte_kni_init() has been
>> + * called. rte_kni_alloc is thread safe.
>> + *
>>    * @param pktmbuf_pool
>>    *  The mempool for allocting mbufs for packets.
>>    * @param conf
>> @@ -138,6 +154,8 @@ extern struct rte_kni *rte_kni_create(uint8_t port_id,
>>    * Release KNI interface according to the context. It will also release the
>>    * paired KNI interface in kernel space. All processing on the specific KNI
>>    * context need to be stopped before calling this interface.
>> + *
>> + * rte_kni_release is thread safe.
>>    *
>>    * @param kni
>>    *  The pointer to the context of an existent KNI interface.
>> --
>> 1.7.10.4
> Regards,
> Helin
>



More information about the dev mailing list