[dpdk-dev] [PATCH] Spelling fixes

Stephen Hemminger stephen at networkplumber.org
Wed May 14 17:18:15 CEST 2014


Fix spelling errors found by codespell

Signed-off-by: Stephen Hemminger <stephen at networkplumber.org>

---
Resend because of no response

 lib/librte_eal/bsdapp/eal/eal.c                               |    2 +-
 lib/librte_eal/bsdapp/eal/include/exec-env/rte_kni_common.h   |    2 +-
 lib/librte_eal/common/include/i686/arch/rte_atomic.h          |   10 +++++-----
 lib/librte_eal/common/include/rte_atomic.h                    |   10 +++++-----
 lib/librte_eal/common/include/rte_eal.h                       |    2 +-
 lib/librte_eal/common/include/rte_interrupts.h                |    2 +-
 lib/librte_eal/linuxapp/eal/eal.c                             |    2 +-
 lib/librte_eal/linuxapp/eal/eal_interrupts.c                  |    4 ++--
 lib/librte_eal/linuxapp/eal/eal_memory.c                      |    2 +-
 lib/librte_eal/linuxapp/eal/eal_xen_memory.c                  |    2 +-
 lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h |    2 +-
 lib/librte_eal/linuxapp/igb_uio/igb_uio.c                     |    2 +-
 lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c           |    2 +-
 lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h                 |    2 +-
 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c            |    2 +-
 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c             |    2 +-
 lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c            |    4 ++--
 lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h             |    2 +-
 lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c               |    2 +-
 lib/librte_kni/rte_kni.h                                      |    4 ++--
 lib/librte_mempool/rte_mempool.c                              |    4 ++--
 lib/librte_mempool/rte_mempool.h                              |    8 ++++----
 lib/librte_pmd_e1000/e1000/e1000_82571.c                      |    2 +-
 lib/librte_pmd_e1000/e1000/e1000_mbx.c                        |    4 ++--
 lib/librte_pmd_e1000/em_ethdev.c                              |    2 +-
 lib/librte_pmd_e1000/em_rxtx.c                                |    2 +-
 lib/librte_pmd_e1000/igb_ethdev.c                             |    2 +-
 lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c                        |    4 ++--
 lib/librte_pmd_ixgbe/ixgbe_bypass.c                           |    6 +++---
 lib/librte_pmd_ixgbe/ixgbe_bypass_api.h                       |    4 ++--
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c                           |    2 +-
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c                             |    2 +-
 lib/librte_pmd_ixgbe/ixgbe_rxtx.h                             |    4 ++--
 lib/librte_pmd_ring/rte_eth_ring.c                            |    2 +-
 lib/librte_pmd_virtio/virtio_rxtx.c                           |    2 +-
 lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c                         |    2 +-
 lib/librte_pmd_xenvirt/rte_eth_xenvirt.c                      |    2 +-
 lib/librte_ring/rte_ring.h                                    |    2 +-
 38 files changed, 59 insertions(+), 59 deletions(-)

--- a/lib/librte_eal/bsdapp/eal/eal.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/bsdapp/eal/eal.c	2014-05-02 16:31:11.279562742 -0700
@@ -782,7 +782,7 @@ int rte_eal_has_hugepages(void)
 	return !internal_config.no_hugetlbfs;
 }
 
-/* Abstraction for port I/0 privilage */
+/* Abstraction for port I/0 privilege */
 static int
 rte_eal_iopl_init(void)
 {
--- a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_kni_common.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_kni_common.h	2014-05-02 16:31:11.279562742 -0700
@@ -92,7 +92,7 @@ struct rte_kni_request {
 
 /*
  * Fifo struct mapped in a shared memory. It describes a circular buffer FIFO
- * Write and read should wrap arround. Fifo is empty when write == read
+ * Write and read should wrap around. Fifo is empty when write == read
  * Writing should never overwrite the read position
  */
 struct rte_kni_fifo {
--- a/lib/librte_eal/common/include/i686/arch/rte_atomic.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/common/include/i686/arch/rte_atomic.h	2014-05-02 16:31:11.279562742 -0700
@@ -216,7 +216,7 @@ rte_atomic64_add(rte_atomic64_t *v, int6
  * @param v
  *   A pointer to the atomic counter.
  * @param dec
- *   The value to be substracted from the counter.
+ *   The value to be subtracted from the counter.
  */
 static inline void
 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
@@ -287,14 +287,14 @@ rte_atomic64_add_return(rte_atomic64_t *
  * Subtract a 64-bit value from an atomic counter and return the result.
  *
  * Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
- * and returns the value of v after the substraction.
+ * and returns the value of v after the subtraction.
  *
  * @param v
  *   A pointer to the atomic counter.
  * @param dec
- *   The value to be substracted from the counter.
+ *   The value to be subtracted from the counter.
  * @return
- *   The value of v after the substraction.
+ *   The value of v after the subtraction.
  */
 static inline int64_t
 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
@@ -336,7 +336,7 @@ static inline int rte_atomic64_inc_and_t
  * @param v
  *   A pointer to the atomic counter.
  * @return
- *   True if the result after substraction is 0; false otherwise.
+ *   True if the result after subtraction is 0; false otherwise.
  */
 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
 {
--- a/lib/librte_eal/common/include/rte_atomic.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/common/include/rte_atomic.h	2014-05-02 16:31:11.279562742 -0700
@@ -995,7 +995,7 @@ rte_atomic64_add(rte_atomic64_t *v, int6
  * @param v
  *   A pointer to the atomic counter.
  * @param dec
- *   The value to be substracted from the counter.
+ *   The value to be subtracted from the counter.
  */
 static inline void
 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
@@ -1050,14 +1050,14 @@ rte_atomic64_add_return(rte_atomic64_t *
  * Subtract a 64-bit value from an atomic counter and return the result.
  *
  * Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
- * and returns the value of v after the substraction.
+ * and returns the value of v after the subtraction.
  *
  * @param v
  *   A pointer to the atomic counter.
  * @param dec
- *   The value to be substracted from the counter.
+ *   The value to be subtracted from the counter.
  * @return
- *   The value of v after the substraction.
+ *   The value of v after the subtraction.
  */
 static inline int64_t
 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
@@ -1090,7 +1090,7 @@ static inline int rte_atomic64_inc_and_t
  * @param v
  *   A pointer to the atomic counter.
  * @return
- *   True if the result after substraction is 0; false otherwise.
+ *   True if the result after subtraction is 0; false otherwise.
  */
 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
 {
--- a/lib/librte_eal/common/include/rte_eal.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/common/include/rte_eal.h	2014-05-02 16:31:11.279562742 -0700
@@ -93,7 +93,7 @@ struct rte_config {
 } __attribute__((__packed__));
 
 /* Flag definitions for rte_config flags */
-#define EAL_FLG_HIGH_IOPL 1 /**< indicates high IO privilage in a linux env */
+#define EAL_FLG_HIGH_IOPL 1 /**< indicates high IO privilege in a linux env */
 
 /**
  * Get the global configuration structure.
--- a/lib/librte_eal/common/include/rte_interrupts.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/common/include/rte_interrupts.h	2014-05-02 16:31:11.279562742 -0700
@@ -45,7 +45,7 @@
 extern "C" {
 #endif
 
-/** Interupt handle */
+/** Interrupt handle */
 struct rte_intr_handle;
 
 /** Function to be registered for the specific interrupt */
--- a/lib/librte_eal/linuxapp/eal/eal.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/eal/eal.c	2014-05-02 16:31:11.279562742 -0700
@@ -943,7 +943,7 @@ rte_eal_mcfg_complete(void)
 }
 
 /*
- * Request iopl priviledge for all RPL, returns 0 on success
+ * Request iopl privilege for all RPL, returns 0 on success
  */
 static int
 rte_eal_iopl_init(void)
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c	2014-05-02 16:31:11.279562742 -0700
@@ -276,7 +276,7 @@ rte_intr_enable(struct rte_intr_handle *
 	/* not used at this moment */
 	case RTE_INTR_HANDLE_ALARM:
 		return -1;
-	/* unkown handle type */
+	/* unknown handle type */
 	default:
 		RTE_LOG(ERR, EAL,
 			"Unknown handle type of fd %d\n",
@@ -308,7 +308,7 @@ rte_intr_disable(struct rte_intr_handle
 	/* not used at this moment */
 	case RTE_INTR_HANDLE_ALARM:
 		return -1;
-	/* unkown handle type */
+	/* unknown handle type */
 	default:
 		RTE_LOG(ERR, EAL,
 			"Unknown handle type of fd %d\n",
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c	2014-05-02 16:31:11.279562742 -0700
@@ -221,7 +221,7 @@ aslr_enabled(void)
 }
 
 /*
- * Try to mmap *size bytes in /dev/zero. If it is succesful, return the
+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the
  * pointer to the mmap'd area and keep *size unmodified. Else, retry
  * with a smaller zone: decrease *size by hugepage_sz until it reaches
  * 0. In this case, return NULL. Note: this function returns an address
--- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c	2014-05-02 16:31:11.279562742 -0700
@@ -74,7 +74,7 @@ static int xen_fd = -1;
 static const char sys_dir_path[] = "/sys/kernel/mm/dom0-mm/memsize-mB";
 
 /*
- * Try to mmap *size bytes in /dev/zero. If it is succesful, return the
+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the
  * pointer to the mmap'd area and keep *size unmodified. Else, retry
  * with a smaller zone: decrease *size by mem_size until it reaches
  * 0. In this case, return NULL. Note: this function returns an address
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h	2014-05-02 16:31:11.279562742 -0700
@@ -92,7 +92,7 @@ struct rte_kni_request {
 
 /*
  * Fifo struct mapped in a shared memory. It describes a circular buffer FIFO
- * Write and read should wrap arround. Fifo is empty when write == read
+ * Write and read should wrap around. Fifo is empty when write == read
  * Writing should never overwrite the read position
  */
 struct rte_kni_fifo {
--- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c	2014-05-02 16:31:11.279562742 -0700
@@ -339,7 +339,7 @@ igbuio_dom0_mmap_phys(struct uio_info *i
 
 /**
  * This is uio device mmap method which will use igbuio mmap for Xen 
- * Dom0 enviroment.
+ * Dom0 environment.
  */
 static int
 igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c	2014-05-02 16:31:11.279562742 -0700
@@ -60,7 +60,7 @@ static s32 e1000_null_mbx_transact(struc
  *  @size: Length of buffer
  *  @mbx_id: id of mailbox to read
  *
- *  returns SUCCESS if it successfuly read message from buffer
+ *  returns SUCCESS if it successfully read message from buffer
  **/
 s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h	2014-05-02 16:31:11.279562742 -0700
@@ -92,7 +92,7 @@ struct igb_adapter;
 #define IGB_20K_ITR                      196
 #define IGB_70K_ITR                       56
 
-/* Interrupt modes, as used by the IntMode paramter */
+/* Interrupt modes, as used by the IntMode parameter */
 #define IGB_INT_MODE_LEGACY                0
 #define IGB_INT_MODE_MSI                   1
 #define IGB_INT_MODE_MSIX                  2
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c	2014-05-02 16:31:11.279562742 -0700
@@ -1007,7 +1007,7 @@ static void igb_set_interrupt_capability
 		/* start with one vector for every rx queue */
 		numvecs = adapter->num_rx_queues;
 
-		/* if tx handler is seperate add 1 for every tx queue */
+		/* if tx handler is separate add 1 for every tx queue */
 		if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
 			numvecs += adapter->num_tx_queues;
 
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ptp.c	2014-05-02 16:31:11.283562765 -0700
@@ -46,7 +46,7 @@
  *
  * Neither the 82576 nor the 82580 offer registers wide enough to hold
  * nanoseconds time values for very long. For the 82580, SYSTIM always
- * counts nanoseconds, but the upper 24 bits are not availible. The
+ * counts nanoseconds, but the upper 24 bits are not available. The
  * frequency is adjusted by changing the 32 bit fractional nanoseconds
  * register, TIMINCA.
  *
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c	2014-05-02 16:31:11.283562765 -0700
@@ -180,7 +180,7 @@ void igb_vmdq_set_rx_mode(struct net_dev
 		} else {
 			/*
 			 * Write addresses to the MTA, if the attempt fails
-			 * then we should just turn on promiscous mode so
+			 * then we should just turn on promiscuous mode so
 			 * that we can at least receive multicast traffic
 			 */
 			if (igb_write_mc_addr_list(adapter->netdev) != 0)
@@ -190,7 +190,7 @@ void igb_vmdq_set_rx_mode(struct net_dev
 		/*
 		 * Write addresses to available RAR registers, if there is not
 		 * sufficient space to store all the addresses then enable
-		 * unicast promiscous mode
+		 * unicast promiscuous mode
 		 */
 		if (igb_write_vm_addr_list(dev) < 0)
 			vmolr |= E1000_VMOLR_UPE;
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h	2014-05-02 16:31:11.283562765 -0700
@@ -3618,7 +3618,7 @@ static inline u32 mmd_eee_cap_to_ethtool
  * mmd_eee_adv_to_ethtool_adv_t
  * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
  *
- * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * A small helper function that translates the MMD EEE Advertisement (7.60)
  * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
  * settings.
  */
--- a/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_eal/linuxapp/xen_dom0/dom0_mm_misc.c	2014-05-02 16:31:11.283562765 -0700
@@ -224,7 +224,7 @@ dom0_memory_free(struct dom0_mm_data * m
 	if (num_block == 0)
 		return -1;
 
-	/* free memory and destory contiguous region in Xen*/
+	/* free memory and destroy contiguous region in Xen*/
 	for (i = 0; i< num_block; i++) {
 		vstart = mm_data->block_info[i].vir_addr;
 		if (vstart) {
--- a/lib/librte_kni/rte_kni.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_kni/rte_kni.h	2014-05-02 16:31:11.283562765 -0700
@@ -140,7 +140,7 @@ extern struct rte_kni *rte_kni_create(ui
  * context need to be stopped before calling this interface.
  *
  * @param kni
- *  The pointer to the context of an existant KNI interface.
+ *  The pointer to the context of an existent KNI interface.
  *
  * @return
  *  - 0 indicates success.
@@ -154,7 +154,7 @@ extern int rte_kni_release(struct rte_kn
  * Finally constructs the response mbuf and puts it back to the resp_q.
  *
  * @param kni
- *  The pointer to the context of an existant KNI interface.
+ *  The pointer to the context of an existent KNI interface.
  *
  * @return
  *  - 0 
--- a/lib/librte_mempool/rte_mempool.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_mempool/rte_mempool.c	2014-05-02 16:31:11.283562765 -0700
@@ -93,7 +93,7 @@ static unsigned get_gcd(unsigned a, unsi
 }
 
 /*
- * Depending on memory configuration, objects addresses are spreaded
+ * Depending on memory configuration, objects addresses are spread
  * between channels and ranks in RAM: the pool allocator will add
  * padding between objects. This function return the new size of the
  * object.
@@ -276,7 +276,7 @@ rte_mempool_calc_obj_size(uint32_t elt_s
 
 	/*
 	 * increase trailer to add padding between objects in order to
-	 * spread them accross memory channels/ranks
+	 * spread them across memory channels/ranks
 	 */
 	if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
 		unsigned new_size;
--- a/lib/librte_mempool/rte_mempool.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_mempool/rte_mempool.h	2014-05-02 16:31:11.283562765 -0700
@@ -474,7 +474,7 @@ typedef void (rte_mempool_ctor_t)(struct
  *   never be used. The access to the per-lcore table is of course
  *   faster than the multi-producer/consumer pool. The cache can be
  *   disabled if the cache_size argument is set to 0; it can be useful to
- *   avoid loosing objects in cache. Note that even if not used, the
+ *   avoid losing objects in cache. Note that even if not used, the
  *   memory space for cache is always reserved in a mempool structure,
  *   except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
  * @param private_data_size
@@ -565,7 +565,7 @@ rte_mempool_create(const char *name, uns
  *   never be used. The access to the per-lcore table is of course
  *   faster than the multi-producer/consumer pool. The cache can be
  *   disabled if the cache_size argument is set to 0; it can be useful to
- *   avoid loosing objects in cache. Note that even if not used, the
+ *   avoid losing objects in cache. Note that even if not used, the
  *   memory space for cache is always reserved in a mempool structure,
  *   except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
  * @param private_data_size
@@ -666,7 +666,7 @@ rte_mempool_xmem_create(const char *name
  *   never be used. The access to the per-lcore table is of course
  *   faster than the multi-producer/consumer pool. The cache can be
  *   disabled if the cache_size argument is set to 0; it can be useful to
- *   avoid loosing objects in cache. Note that even if not used, the
+ *   avoid losing objects in cache. Note that even if not used, the
  *   memory space for cache is always reserved in a mempool structure,
  *   except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
  * @param private_data_size
@@ -1332,7 +1332,7 @@ uint32_t rte_mempool_calc_obj_size(uint3
 
 /**
  * Calculate maximum amount of memory required to store given number of objects.
- * Assumes that the memory buffer will be alligned at page boundary.
+ * Assumes that the memory buffer will be aligned at page boundary.
  * Note, that if object size is bigger then page size, then it assumes that
  * we have a subsets of physically continuous  pages big enough to store
  * at least one object.
--- a/lib/librte_pmd_e1000/e1000/e1000_82571.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_e1000/e1000/e1000_82571.c	2014-05-02 16:31:11.283562765 -0700
@@ -395,7 +395,7 @@ STATIC s32 e1000_init_mac_params_82571(s
 	}
 
 	/* Ensure that the inter-port SWSM.SMBI lock bit is clear before
-	 * first NVM or PHY acess. This should be done for single-port
+	 * first NVM or PHY access. This should be done for single-port
 	 * devices, and for one port only on dual-port devices so that
 	 * for those devices we can still use the SMBI lock to synchronize
 	 * inter-port accesses to the PHY & NVM.
--- a/lib/librte_pmd_e1000/e1000/e1000_mbx.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_e1000/e1000/e1000_mbx.c	2014-05-02 16:31:11.283562765 -0700
@@ -68,7 +68,7 @@ STATIC s32 e1000_null_mbx_transact(struc
  *  @size: Length of buffer
  *  @mbx_id: id of mailbox to read
  *
- *  returns SUCCESS if it successfuly read message from buffer
+ *  returns SUCCESS if it successfully read message from buffer
  **/
 s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
@@ -492,7 +492,7 @@ out_no_write:
  *  @size: Length of buffer
  *  @mbx_id: id of mailbox to read
  *
- *  returns SUCCESS if it successfuly read message from buffer
+ *  returns SUCCESS if it successfully read message from buffer
  **/
 STATIC s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
 			     u16 E1000_UNUSEDARG mbx_id)
--- a/lib/librte_pmd_e1000/em_ethdev.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_e1000/em_ethdev.c	2014-05-02 16:31:11.283562765 -0700
@@ -644,7 +644,7 @@ em_hardware_init(struct e1000_hw *hw)
 	 *   frames to be received after sending an XOFF.
 	 * - Low water mark works best when it is very near the high water mark.
 	 *   This allows the receiver to restart by sending XON when it has
-	 *   drained a bit. Here we use an arbitary value of 1500 which will
+	 *   drained a bit. Here we use an arbitrary value of 1500 which will
 	 *   restart after one full frame is pulled from the buffer. There
 	 *   could be several smaller frames in the buffer and if so they will
 	 *   not trigger the XON until their total number reduces the buffer
--- a/lib/librte_pmd_e1000/em_rxtx.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_e1000/em_rxtx.c	2014-05-02 16:31:11.283562765 -0700
@@ -1703,7 +1703,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
 		 * limit for packet length, jumbo frame of any size
 		 * can be accepted, thus we have to enable scattered
 		 * rx if jumbo frames are enabled (or if buffer size
-		 * is too small to accomodate non-jumbo packets)
+		 * is too small to accommodate non-jumbo packets)
 		 * to avoid splitting packets that don't fit into
 		 * one buffer.
 		 */
--- a/lib/librte_pmd_e1000/igb_ethdev.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_e1000/igb_ethdev.c	2014-05-02 16:31:11.283562765 -0700
@@ -934,7 +934,7 @@ igb_hardware_init(struct e1000_hw *hw)
 	 *   frames to be received after sending an XOFF.
 	 * - Low water mark works best when it is very near the high water mark.
 	 *   This allows the receiver to restart by sending XON when it has
-	 *   drained a bit. Here we use an arbitary value of 1500 which will
+	 *   drained a bit. Here we use an arbitrary value of 1500 which will
 	 *   restart after one full frame is pulled from the buffer. There
 	 *   could be several smaller frames in the buffer and if so they will
 	 *   not trigger the XON until their total number reduces the buffer
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c	2014-05-02 16:31:11.283562765 -0700
@@ -41,7 +41,7 @@ POSSIBILITY OF SUCH DAMAGE.
  *  @size: Length of buffer
  *  @mbx_id: id of mailbox to read
  *
- *  returns SUCCESS if it successfuly read message from buffer
+ *  returns SUCCESS if it successfully read message from buffer
  **/
 s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
@@ -448,7 +448,7 @@ out_no_write:
  *  @size: Length of buffer
  *  @mbx_id: id of mailbox to read
  *
- *  returns SUCCESS if it successfuly read message from buffer
+ *  returns SUCCESS if it successfully read message from buffer
  **/
 STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
 			     u16 mbx_id)
--- a/lib/librte_pmd_ixgbe/ixgbe_bypass.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_ixgbe/ixgbe_bypass.c	2014-05-02 16:31:11.283562765 -0700
@@ -95,9 +95,9 @@ ixgbe_bypass_set_time(struct ixgbe_adapt
 }
 
 /**
- * ixgbe_bypass_init - Make some enviroment changes for bypass
+ * ixgbe_bypass_init - Make some environment changes for bypass
  *
- * @adapter: pointer to ixgbe_adapter sturcture for access to state bits
+ * @adapter: pointer to ixgbe_adapter structure for access to state bits
  *
  * This function collects all the modifications needed by the bypass
  * driver.
@@ -173,7 +173,7 @@ ixgbe_bypass_state_store(struct rte_eth_
 	if (ret_val)
 		goto exit;
 
-	/* Set AUTO back on so FW can recieve events */
+	/* Set AUTO back on so FW can receive events */
 	ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
 					 BYPASS_MODE_OFF_M, BYPASS_AUTO);
 
--- a/lib/librte_pmd_ixgbe/ixgbe_bypass_api.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_ixgbe/ixgbe_bypass_api.h	2014-05-02 16:31:11.283562765 -0700
@@ -155,7 +155,7 @@ static s32 ixgbe_bypass_rw_generic(struc
  *
  * If we send a write we can't be sure it took until we can read back
  * that same register.  It can be a problem as some of the feilds may
- * for valid reasons change inbetween the time wrote the register and
+ * for valid reasons change between the time wrote the register and
  * we read it again to verify.  So this function check everything we
  * can check and then assumes it worked.
  *
@@ -254,7 +254,7 @@ static s32 ixgbe_bypass_set_generic(stru
 }
 
 /**
- *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
+ *  ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address.
  *
  *  @hw: pointer to hardware structure
  *  @addr: The bypass eeprom address to read.
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c	2014-05-02 16:31:11.283562765 -0700
@@ -1966,7 +1966,7 @@ ixgbe_dev_link_status_print(struct rte_e
 }
 
 /*
- * It executes link_update after knowing an interrupt occured.
+ * It executes link_update after knowing an interrupt occurred.
  *
  * @param dev
  *  Pointer to struct rte_eth_dev.
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c	2014-05-02 16:31:11.283562765 -0700
@@ -2139,7 +2139,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_
 		     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
 	/*
-	 * Certain constaints must be met in order to use the bulk buffer
+	 * Certain constraints must be met in order to use the bulk buffer
 	 * allocation Rx burst function.
 	 */
 	use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h	2014-05-02 16:31:11.283562765 -0700
@@ -91,7 +91,7 @@ struct igb_tx_entry_v {
 };
 
 /**
- * continous entry sequence, gather by the same mempool 
+ * continuous entry sequence, gather by the same mempool
  */
 struct igb_tx_entry_seq {
 	const struct rte_mempool* pool;
@@ -165,7 +165,7 @@ struct igb_tx_queue {
 	uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
 	struct igb_tx_entry *sw_ring;      /**< virtual address of SW ring. */
 #ifdef RTE_IXGBE_INC_VECTOR
-	/** continous tx entry sequence within the same mempool */
+	/** continuous tx entry sequence within the same mempool */
 	struct igb_tx_entry_seq *sw_ring_seq; 
 #endif
 	volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
--- a/lib/librte_pmd_ring/rte_eth_ring.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_ring/rte_eth_ring.c	2014-05-02 16:31:11.283562765 -0700
@@ -221,7 +221,7 @@ rte_eth_from_rings(struct rte_ring *cons
 	struct rte_eth_dev *eth_dev = NULL;
 	unsigned i;
 
-	/* do some paramter checking */
+	/* do some parameter checking */
 	if (rx_queues == NULL && nb_rx_queues > 0)
 		goto error;
 	if (tx_queues == NULL && nb_tx_queues > 0)
--- a/lib/librte_pmd_virtio/virtio_rxtx.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_virtio/virtio_rxtx.c	2014-05-02 16:31:11.283562765 -0700
@@ -144,7 +144,7 @@ void
 virtio_dev_rxtx_start(struct rte_eth_dev *dev)
 {
 	/*
-	 * Start recieve and transmit vrings
+	 * Start receive and transmit vrings
 	 * -	Setup vring structure for all queues
 	 * -	Initialize descriptor for the rx vring
 	 * -	Allocate blank mbufs for the each rx descriptor
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c	2014-05-02 16:31:11.283562765 -0700
@@ -297,7 +297,7 @@ vmxnet3_xmit_pkts( void *tx_queue, struc
 
 			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
 
-			/* Fill the tx decriptor */
+			/* Fill the tx descriptor */
 			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
 			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
 			txd->addr = tbi->bufPA;
--- a/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c	2014-05-02 16:31:11.283562765 -0700
@@ -374,7 +374,7 @@ eth_link_update(struct rte_eth_dev *dev
 
 /*
  * Create shared vring between guest and host.
- * Memory is allocated through grant alloc driver, so it is not physical continous.
+ * Memory is allocated through grant alloc driver, so it is not physical continuous.
  */
 static void *
 gntalloc_vring_create(int queue_type, uint32_t size, int vtidx)
--- a/lib/librte_ring/rte_ring.h	2014-05-02 16:31:11.287562789 -0700
+++ b/lib/librte_ring/rte_ring.h	2014-05-02 16:31:11.287562789 -0700
@@ -405,7 +405,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring
 	}
 
 	/*
-	 * If there are other enqueues in progress that preceeded us,
+	 * If there are other enqueues in progress that preceded us,
 	 * we need to wait for them to complete
 	 */
 	while (unlikely(r->prod.tail != prod_head))


More information about the dev mailing list