[dpdk-dev] [PATCH 08/10] cxgbe: add LE-TCAM filtering support

Rahul Lakkireddy rahul.lakkireddy at chelsio.com
Wed Feb 3 09:32:29 CET 2016


Add support for setting LE-TCAM (Maskfull) filters.  IPv4 filters
occupy one index per filter, but IPv6 filters occupy 4 indices per
filter and must be on boundary aligned by 4.  Filters with lower
index have higher priority over filters with higher index.  When
a filter is hit, the rest of the filters with a higher index
are ignored and the action is taken immediately.

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy at chelsio.com>
Signed-off-by: Kumar Sanghvi <kumaras at chelsio.com>
---
 drivers/net/cxgbe/Makefile              |   1 +
 drivers/net/cxgbe/base/adapter.h        |  21 +
 drivers/net/cxgbe/base/common.h         |   2 +
 drivers/net/cxgbe/base/t4_hw.c          |   3 +
 drivers/net/cxgbe/base/t4_msg.h         |  39 ++
 drivers/net/cxgbe/base/t4_tcb.h         |  74 +++
 drivers/net/cxgbe/base/t4fw_interface.h | 145 ++++++
 drivers/net/cxgbe/cxgbe_filter.c        | 802 ++++++++++++++++++++++++++++++++
 drivers/net/cxgbe/cxgbe_filter.h        |  18 +
 drivers/net/cxgbe/cxgbe_main.c          |   6 +
 drivers/net/cxgbe/cxgbe_ofld.h          |   5 +
 11 files changed, 1116 insertions(+)
 create mode 100644 drivers/net/cxgbe/base/t4_tcb.h
 create mode 100644 drivers/net/cxgbe/cxgbe_filter.c

diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index f5f5828..3201aff 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -81,6 +81,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
 SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c
 SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += l2t.c
 SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += smt.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c
 
 # this lib depends upon:
 DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_eal lib/librte_ether
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index 6af5c8e..a866993 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -575,6 +575,27 @@ static inline void t4_os_write_unlock(rte_rwlock_t *lock)
 }
 
 /**
+ * t4_init_completion - initialize completion
+ * @c: the completion context
+ */
+static inline void t4_init_completion(struct t4_completion *c)
+{
+	c->done = 0;
+	t4_os_lock_init(&c->lock);
+}
+
+/**
+ * t4_complete - set completion as done
+ * @c: the completion context
+ */
+static inline void t4_complete(struct t4_completion *c)
+{
+	t4_os_lock(&c->lock);
+	c->done = 1;
+	t4_os_unlock(&c->lock);
+}
+
+/**
  * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
  * @dev: the rte_eth_dev
  *
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 2b39c10..21dca32 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -162,7 +162,9 @@ struct tp_params {
 	int vlan_shift;
 	int vnic_shift;
 	int port_shift;
+	int tos_shift;
 	int protocol_shift;
+	int ethertype_shift;
 };
 
 struct vpd_params {
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index de2e6b7..b35876c 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -2574,8 +2574,11 @@ int t4_init_tp_params(struct adapter *adap)
 	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
 	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+	adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
 							       F_PROTOCOL);
+	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
+								F_ETHERTYPE);
 
 	/*
 	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h
index 6dc255b..57534f0 100644
--- a/drivers/net/cxgbe/base/t4_msg.h
+++ b/drivers/net/cxgbe/base/t4_msg.h
@@ -35,10 +35,12 @@
 #define T4_MSG_H
 
 enum {
+	CPL_SET_TCB_FIELD     = 0x5,
 	CPL_L2T_WRITE_REQ     = 0x12,
 	CPL_SMT_WRITE_REQ     = 0x14,
 	CPL_L2T_WRITE_RPL     = 0x23,
 	CPL_SMT_WRITE_RPL     = 0x2E,
+	CPL_SET_TCB_RPL       = 0x3A,
 	CPL_SGE_EGR_UPDATE    = 0xA5,
 	CPL_FW4_MSG           = 0xC0,
 	CPL_FW6_MSG           = 0xE0,
@@ -125,6 +127,43 @@ struct work_request_hdr {
 #define WR_HDR_SIZE 0
 #endif
 
+/* cpl_get_tcb.reply_ctrl fields */
+#define S_QUEUENO    0
+#define V_QUEUENO(x) ((x) << S_QUEUENO)
+
+#define S_REPLY_CHAN    14
+#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
+
+#define S_NO_REPLY    15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+
+struct cpl_set_tcb_field {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 word_cookie;
+	__be64 mask;
+	__be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define S_WORD    0
+#define V_WORD(x) ((x) << S_WORD)
+
+#define S_COOKIE    5
+#define M_COOKIE    0x7
+#define V_COOKIE(x) ((x) << S_COOKIE)
+#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
+
+struct cpl_set_tcb_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 rsvd;
+	__u8   cookie;
+	__u8   status;
+	__be64 oldval;
+};
+
 struct cpl_tx_data {
 	union opcode_tid ot;
 	__be32 len;
diff --git a/drivers/net/cxgbe/base/t4_tcb.h b/drivers/net/cxgbe/base/t4_tcb.h
new file mode 100644
index 0000000..36afd56
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4_tcb.h
@@ -0,0 +1,74 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015-2016 Chelsio Communications.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Chelsio Communications nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _T4_TCB_DEFS_H
+#define _T4_TCB_DEFS_H
+
+/* 31:24 */
+#define W_TCB_SMAC_SEL    0
+#define S_TCB_SMAC_SEL    24
+#define M_TCB_SMAC_SEL    0xffULL
+#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
+
+/* 95:32 */
+#define W_TCB_T_FLAGS    1
+
+/* 105:96 */
+#define W_TCB_RSS_INFO    3
+#define S_TCB_RSS_INFO    0
+#define M_TCB_RSS_INFO    0x3ffULL
+#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO)
+
+/* 191:160 */
+#define W_TCB_TIMESTAMP    5
+#define S_TCB_TIMESTAMP    0
+#define M_TCB_TIMESTAMP    0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+/* 223:192 */
+#define S_TCB_T_RTT_TS_RECENT_AGE    0
+#define M_TCB_T_RTT_TS_RECENT_AGE    0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+#define S_TF_MIGRATING    0
+#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
+
+#define S_TF_NON_OFFLOAD    1
+#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
+
+#define S_TF_CCTRL_ECE    60
+
+#define S_TF_CCTRL_CWR    61
+
+#define S_TF_CCTRL_RFR    62
+#endif /* _T4_TCB_DEFS_H */
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index 8a8652a..d3e4de5 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -82,6 +82,7 @@ enum fw_memtype {
  ********************************/
 
 enum fw_wr_opcodes {
+	FW_FILTER_WR		= 0x02,
 	FW_TP_WR		= 0x05,
 	FW_ETH_TX_PKT_WR	= 0x08,
 	FW_ETH_TX_PKTS_WR	= 0x09,
@@ -156,6 +157,150 @@ struct fw_eth_tx_pkts_wr {
 	__u8   type;
 };
 
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+	FW_FILTER_WR_SUCCESS,
+	FW_FILTER_WR_FLT_ADDED,
+	FW_FILTER_WR_FLT_DELETED,
+	FW_FILTER_WR_SMT_TBL_FULL,
+	FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+	__be32 op_pkd;
+	__be32 len16_pkd;
+	__be64 r3;
+	__be32 tid_to_iq;
+	__be32 del_filter_to_l2tix;
+	__be16 ethtype;
+	__be16 ethtypem;
+	__u8   frag_to_ovlan_vldm;
+	__u8   smac_sel;
+	__be16 rx_chan_rx_rpl_iq;
+	__be32 maci_to_matchtypem;
+	__u8   ptcl;
+	__u8   ptclm;
+	__u8   ttyp;
+	__u8   ttypm;
+	__be16 ivlan;
+	__be16 ivlanm;
+	__be16 ovlan;
+	__be16 ovlanm;
+	__u8   lip[16];
+	__u8   lipm[16];
+	__u8   fip[16];
+	__u8   fipm[16];
+	__be16 lp;
+	__be16 lpm;
+	__be16 fp;
+	__be16 fpm;
+	__be16 r7;
+	__u8   sma[6];
+};
+
+#define S_FW_FILTER_WR_TID	12
+#define V_FW_FILTER_WR_TID(x)	((x) << S_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE		11
+#define V_FW_FILTER_WR_RQTYPE(x)	((x) << S_FW_FILTER_WR_RQTYPE)
+
+#define S_FW_FILTER_WR_NOREPLY		10
+#define V_FW_FILTER_WR_NOREPLY(x)	((x) << S_FW_FILTER_WR_NOREPLY)
+
+#define S_FW_FILTER_WR_IQ	0
+#define V_FW_FILTER_WR_IQ(x)	((x) << S_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER	31
+#define V_FW_FILTER_WR_DEL_FILTER(x)	((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER	V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID		25
+#define V_FW_FILTER_WR_RPTTID(x)	((x) << S_FW_FILTER_WR_RPTTID)
+
+#define S_FW_FILTER_WR_DROP	24
+#define V_FW_FILTER_WR_DROP(x)	((x) << S_FW_FILTER_WR_DROP)
+
+#define S_FW_FILTER_WR_DIRSTEER		23
+#define V_FW_FILTER_WR_DIRSTEER(x)	((x) << S_FW_FILTER_WR_DIRSTEER)
+
+#define S_FW_FILTER_WR_MASKHASH		22
+#define V_FW_FILTER_WR_MASKHASH(x)	((x) << S_FW_FILTER_WR_MASKHASH)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH	21
+#define V_FW_FILTER_WR_DIRSTEERHASH(x)	((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+
+#define S_FW_FILTER_WR_LPBK	20
+#define V_FW_FILTER_WR_LPBK(x)	((x) << S_FW_FILTER_WR_LPBK)
+
+#define S_FW_FILTER_WR_DMAC	19
+#define V_FW_FILTER_WR_DMAC(x)	((x) << S_FW_FILTER_WR_DMAC)
+
+#define S_FW_FILTER_WR_INSVLAN		17
+#define V_FW_FILTER_WR_INSVLAN(x)	((x) << S_FW_FILTER_WR_INSVLAN)
+
+#define S_FW_FILTER_WR_RMVLAN		16
+#define V_FW_FILTER_WR_RMVLAN(x)	((x) << S_FW_FILTER_WR_RMVLAN)
+
+#define S_FW_FILTER_WR_HITCNTS		15
+#define V_FW_FILTER_WR_HITCNTS(x)	((x) << S_FW_FILTER_WR_HITCNTS)
+
+#define S_FW_FILTER_WR_TXCHAN		13
+#define V_FW_FILTER_WR_TXCHAN(x)	((x) << S_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO	12
+#define V_FW_FILTER_WR_PRIO(x)	((x) << S_FW_FILTER_WR_PRIO)
+
+#define S_FW_FILTER_WR_L2TIX	0
+#define V_FW_FILTER_WR_L2TIX(x)	((x) << S_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG	7
+#define V_FW_FILTER_WR_FRAG(x)	((x) << S_FW_FILTER_WR_FRAG)
+
+#define S_FW_FILTER_WR_FRAGM	6
+#define V_FW_FILTER_WR_FRAGM(x)	((x) << S_FW_FILTER_WR_FRAGM)
+
+#define S_FW_FILTER_WR_IVLAN_VLD	5
+#define V_FW_FILTER_WR_IVLAN_VLD(x)	((x) << S_FW_FILTER_WR_IVLAN_VLD)
+
+#define S_FW_FILTER_WR_OVLAN_VLD	4
+#define V_FW_FILTER_WR_OVLAN_VLD(x)	((x) << S_FW_FILTER_WR_OVLAN_VLD)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM	3
+#define V_FW_FILTER_WR_IVLAN_VLDM(x)	((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM	2
+#define V_FW_FILTER_WR_OVLAN_VLDM(x)	((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+
+#define S_FW_FILTER_WR_RX_CHAN		15
+#define V_FW_FILTER_WR_RX_CHAN(x)	((x) << S_FW_FILTER_WR_RX_CHAN)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ	0
+#define V_FW_FILTER_WR_RX_RPL_IQ(x)	((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI	23
+#define V_FW_FILTER_WR_MACI(x)	((x) << S_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM	14
+#define V_FW_FILTER_WR_MACIM(x)	((x) << S_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE	13
+#define V_FW_FILTER_WR_FCOE(x)	((x) << S_FW_FILTER_WR_FCOE)
+
+#define S_FW_FILTER_WR_FCOEM	12
+#define V_FW_FILTER_WR_FCOEM(x)	((x) << S_FW_FILTER_WR_FCOEM)
+
+#define S_FW_FILTER_WR_PORT	9
+#define V_FW_FILTER_WR_PORT(x)	((x) << S_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM	6
+#define V_FW_FILTER_WR_PORTM(x)	((x) << S_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE	3
+#define V_FW_FILTER_WR_MATCHTYPE(x)	((x) << S_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM	0
+#define V_FW_FILTER_WR_MATCHTYPEM(x)	((x) << S_FW_FILTER_WR_MATCHTYPEM)
+
 /******************************************************************************
  *  C O M M A N D s
  *********************/
diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
new file mode 100644
index 0000000..d4e32b1
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -0,0 +1,802 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015-2016 Chelsio Communications.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Chelsio Communications nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_tcb.h"
+#include "t4fw_interface.h"
+#include "l2t.h"
+#include "smt.h"
+#include "clip_tbl.h"
+#include "cxgbe_filter.h"
+
+/**
+ * Validate if the requested filter specification can be set by checking
+ * if the requested features have been enabled
+ */
+static int validate_filter(struct adapter *adapter,
+			   struct ch_filter_specification *fs)
+{
+	u32 fconf, iconf;
+
+	/*
+	 * Check for unconfigured fields being used.
+	 */
+	fconf = adapter->params.tp.vlan_pri_map;
+	iconf = adapter->params.tp.ingress_config;
+
+#define S(_field) \
+	(fs->val._field || fs->mask._field)
+#define U(_mask, _field) \
+	(!(fconf & (_mask)) && S(_field))
+
+	if (U(F_FCOE, fcoe) || U(F_PORT, iport) || U(F_TOS, tos) ||
+	    U(F_ETHERTYPE, ethtype) || U(F_MACMATCH, macidx) ||
+	    U(F_MPSHITTYPE, matchtype) || U(F_FRAGMENTATION, frag) ||
+	    U(F_PROTOCOL, proto) ||
+	    U(F_VNIC_ID, pfvf_vld) ||
+	    U(F_VNIC_ID, ovlan_vld) ||
+	    U(F_VLAN, ivlan_vld))
+		return -EOPNOTSUPP;
+
+	/*
+	 * We need to translate any PF/VF specification into that
+	 * internal format below.
+	 */
+	if (S(pfvf_vld) && S(ovlan_vld))
+		return -EOPNOTSUPP;
+	if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
+	    (S(ovlan_vld) && (iconf & F_VNIC)))
+		return -EOPNOTSUPP;
+	if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
+		return -ERANGE;
+	fs->mask.pf &= 0x7;
+	fs->mask.vf &= 0x7f;
+
+#undef S
+#undef U
+
+	/*
+	 * If the user is requesting that the filter action loop
+	 * matching packets back out one of our ports, make sure that
+	 * the egress port is in range.
+	 */
+	if (fs->action == FILTER_SWITCH &&
+	    fs->eport >= adapter->params.nports)
+		return -ERANGE;
+
+	/*
+	 * Don't allow various trivially obvious bogus out-of-range
+	 * values ...
+	 */
+	if (fs->val.iport >= adapter->params.nports)
+		return -ERANGE;
+
+	return 0;
+}
+
+/**
+ * Get the queue to which the traffic must be steered to.
+ */
+static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
+				      struct ch_filter_specification *fs)
+{
+	struct port_info *pi = ethdev2pinfo(dev);
+	struct adapter *adapter = pi->adapter;
+	unsigned int iq;
+
+	/*
+	 * If the user has requested steering matching Ingress Packets
+	 * to a specific Queue Set, we need to make sure it's in range
+	 * for the port and map that into the Absolute Queue ID of the
+	 * Queue Set's Response Queue.
+	 */
+	if (!fs->dirsteer) {
+		iq = 0;
+	} else {
+		/*
+		 * If the iq id is greater than the number of qsets,
+		 * then assume it is an absolute qid.
+		 */
+		if (fs->iq < pi->n_rx_qsets)
+			iq = adapter->sge.ethrxq[pi->first_qset +
+						 fs->iq].rspq.abs_id;
+		else
+			iq = fs->iq;
+	}
+
+	return iq;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+	if (f->locked)
+		return -EPERM;
+	if (f->pending)
+		return -EBUSY;
+
+	return 0;
+}
+
+/**
+ * Send CPL_SET_TCB_FIELD message
+ */
+static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
+			  u16 word, u64 mask, u64 val, int no_reply)
+{
+	struct rte_mbuf *mbuf;
+	struct cpl_set_tcb_field *req;
+	struct sge_ctrl_txq *ctrlq;
+
+	ctrlq = &adapter->sge.ctrlq[0];
+	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+	BUG_ON(!mbuf);
+
+	mbuf->data_len = sizeof(*req);
+	mbuf->pkt_len = mbuf->data_len;
+
+	req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+	memset(req, 0, sizeof(*req));
+	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
+	req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
+				      V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
+				      V_NO_REPLY(no_reply));
+	req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
+	req->mask = cpu_to_be64(mask);
+	req->val = cpu_to_be64(val);
+
+	t4_mgmt_tx(ctrlq, mbuf);
+}
+
+/**
+ * Set one of the t_flags bits in the TCB.
+ */
+static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
+			  unsigned int bit_pos, unsigned int val, int no_reply)
+{
+	set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
+		      (unsigned long long)val << bit_pos, no_reply);
+}
+
+/**
+ * Clear a filter and release any of its resources that we own.  This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct filter_entry *f)
+{
+	/*
+	 * If the filter has loopback rewriteing rules then we'll need to free
+	 * any existing Layer Two Table (L2T) entries of the filter rule.  The
+	 * firmware will handle freeing up any Source MAC Table (SMT) entries
+	 * used for rewriting Source MAC Addresses in loopback rules.
+	 */
+	if (f->l2t)
+		cxgbe_l2t_release(f->l2t);
+
+	if (f->smt)
+		cxgbe_smt_release(f->smt);
+
+	/*
+	 * The zeroing of the filter rule below clears the filter valid,
+	 * pending, locked flags, l2t pointer, etc. so it's all we need for
+	 * this operation.
+	 */
+	memset(f, 0, sizeof(*f));
+}
+
+/**
+ * Clear all set filters
+ */
+void cxgbe_clear_all_filters(struct adapter *adapter)
+{
+	unsigned int i;
+
+	if (adapter->tids.ftid_tab) {
+		struct filter_entry *f = &adapter->tids.ftid_tab[0];
+
+		for (i = 0; i < adapter->tids.nftids; i++, f++)
+			if (f->valid || f->pending)
+				clear_filter(f);
+	}
+}
+
+/**
+ * Check if entry already filled.
+ */
+static bool is_filter_set(struct tid_info *t, int fidx, int family)
+{
+	bool result = FALSE;
+	int i, max;
+
+	/* IPv6 requires four slots and IPv4 requires only 1 slot.
+	 * Ensure, there's enough slots available.
+	 */
+	max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
+
+	t4_os_lock(&t->ftid_lock);
+	for (i = fidx; i <= max; i++) {
+		if (rte_bitmap_get(t->ftid_bmap, i)) {
+			result = TRUE;
+			break;
+		}
+	}
+	t4_os_unlock(&t->ftid_lock);
+	return result;
+}
+
+/**
+ * Set the corresponding entry in the bitmap. 4 slots are
+ * marked for IPv6, whereas only 1 slot is marked for IPv4.
+ */
+static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
+{
+	t4_os_lock(&t->ftid_lock);
+	if (rte_bitmap_get(t->ftid_bmap, fidx)) {
+		t4_os_unlock(&t->ftid_lock);
+		return -EBUSY;
+	}
+
+	if (family == FILTER_TYPE_IPV4) {
+		rte_bitmap_set(t->ftid_bmap, fidx);
+	} else {
+		rte_bitmap_set(t->ftid_bmap, fidx);
+		rte_bitmap_set(t->ftid_bmap, fidx + 1);
+		rte_bitmap_set(t->ftid_bmap, fidx + 2);
+		rte_bitmap_set(t->ftid_bmap, fidx + 3);
+	}
+	t4_os_unlock(&t->ftid_lock);
+	return 0;
+}
+
+/**
+ * Clear the corresponding entry in the bitmap. 4 slots are
+ * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
+ */
+static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+	t4_os_lock(&t->ftid_lock);
+	if (family == FILTER_TYPE_IPV4) {
+		rte_bitmap_clear(t->ftid_bmap, fidx);
+	} else {
+		rte_bitmap_clear(t->ftid_bmap, fidx);
+		rte_bitmap_clear(t->ftid_bmap, fidx + 1);
+		rte_bitmap_clear(t->ftid_bmap, fidx + 2);
+		rte_bitmap_clear(t->ftid_bmap, fidx + 3);
+	}
+	t4_os_unlock(&t->ftid_lock);
+}
+
+/**
+ * t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter.  If @qid is
+ * negative the delete notification is suppressed.
+ */
+static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+	memset(wr, 0, sizeof(*wr));
+	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
+	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
+				    V_FW_FILTER_WR_NOREPLY(qid < 0));
+	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
+	if (qid >= 0)
+		wr->rx_chan_rx_rpl_iq =
+				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
+/**
+ * Create FW work request to delete the filter at a specified index
+ */
+static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+	struct adapter *adapter = ethdev2adap(dev);
+	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+	struct rte_mbuf *mbuf;
+	struct fw_filter_wr *fwr;
+	struct sge_ctrl_txq *ctrlq;
+	unsigned int port_id = ethdev2pinfo(dev)->port_id;
+
+	ctrlq = &adapter->sge.ctrlq[port_id];
+	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+	if (!mbuf)
+		return -ENOMEM;
+
+	mbuf->data_len = sizeof(*fwr);
+	mbuf->pkt_len = mbuf->data_len;
+
+	fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+	t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+	/*
+	 * Mark the filter as "pending" and ship off the Filter Work Request.
+	 * When we get the Work Request Reply we'll clear the pending status.
+	 */
+	f->pending = 1;
+	t4_mgmt_tx(ctrlq, mbuf);
+	return 0;
+}
+
+/**
+ * Delete the filter at the specified index (if valid).  The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+int delete_filter(struct rte_eth_dev *dev, unsigned int fidx)
+{
+	struct adapter *adapter = ethdev2adap(dev);
+	struct filter_entry *f;
+	int ret;
+	unsigned int max_fidx;
+
+	max_fidx = adapter->tids.nftids;
+	if (fidx >= max_fidx)
+		return -ERANGE;
+
+	f = &adapter->tids.ftid_tab[fidx];
+	ret = writable_filter(f);
+	if (ret)
+		return ret;
+	if (f->valid)
+		return del_filter_wr(dev, fidx);
+
+	return 0;
+}
+
+/**
+ * Send a Work Request to write the filter at a specified index.  We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+	struct adapter *adapter = ethdev2adap(dev);
+	struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+	struct rte_mbuf *mbuf;
+	struct fw_filter_wr *fwr;
+	struct sge_ctrl_txq *ctrlq;
+	unsigned int port_id = ethdev2pinfo(dev)->port_id;
+	int ret;
+
+	ctrlq = &adapter->sge.ctrlq[port_id];
+	mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+	if (!mbuf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	mbuf->data_len = sizeof(*fwr);
+	mbuf->pkt_len = mbuf->data_len;
+
+	fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+	memset(fwr, 0, sizeof(*fwr));
+
+	/*
+	 * If the new filter requires loopback Destination MAC and/or VLAN
+	 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+	 * the filter.
+	 */
+	if (f->fs.newdmac || f->fs.newvlan) {
+		/* allocate L2T entry for new filter */
+		f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
+						   f->fs.eport, f->fs.dmac);
+		if (!f->l2t) {
+			ret = -ENOMEM;
+			goto error;
+		}
+	}
+
+	/*
+	 * If the new filter requires loopback Source MAC rewriting then
+	 * we need to allocate a SMT entry for the filter.
+	 */
+	if (f->fs.newsmac) {
+		f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
+		if (!f->smt) {
+			if (f->l2t) {
+				cxgbe_l2t_release(f->l2t);
+				f->l2t = NULL;
+			}
+			ret = -ENOMEM;
+			goto error;
+		}
+		f->smtidx = f->smt->idx;
+	}
+
+	/*
+	 * Construct the work request to set the filter.
+	 */
+	fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+	fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
+	fwr->tid_to_iq =
+		cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
+			    V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+			    V_FW_FILTER_WR_NOREPLY(0) |
+			    V_FW_FILTER_WR_IQ(f->fs.iq));
+	fwr->del_filter_to_l2tix =
+		cpu_to_be32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
+			    V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+			    V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+			    V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
+			    V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
+			    V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+			    V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
+			    V_FW_FILTER_WR_INSVLAN(
+				    f->fs.newvlan == VLAN_INSERT ||
+				    f->fs.newvlan == VLAN_REWRITE) |
+			    V_FW_FILTER_WR_RMVLAN(
+				    f->fs.newvlan == VLAN_REMOVE ||
+				    f->fs.newvlan == VLAN_REWRITE) |
+			    V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+			    V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+			    V_FW_FILTER_WR_PRIO(f->fs.prio) |
+			    V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
+	fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
+	fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
+	fwr->frag_to_ovlan_vldm =
+		     (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
+		      V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
+		      V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
+		      V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
+		      V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
+		      V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
+	fwr->smac_sel = 0;
+	fwr->rx_chan_rx_rpl_iq =
+		cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
+			    V_FW_FILTER_WR_RX_RPL_IQ(
+				    adapter->sge.fw_evtq.abs_id));
+	fwr->maci_to_matchtypem =
+		cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
+			    V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
+			    V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
+			    V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
+			    V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+			    V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
+			    V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
+			    V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
+	fwr->ptcl = f->fs.val.proto;
+	fwr->ptclm = f->fs.mask.proto;
+	fwr->ttyp = f->fs.val.tos;
+	fwr->ttypm = f->fs.mask.tos;
+	fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
+	fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
+	fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
+	fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
+	rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+	rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+	rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+	rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+	fwr->lp = cpu_to_be16(f->fs.val.lport);
+	fwr->lpm = cpu_to_be16(f->fs.mask.lport);
+	fwr->fp = cpu_to_be16(f->fs.val.fport);
+	fwr->fpm = cpu_to_be16(f->fs.mask.fport);
+
+	/*
+	 * Mark the filter as "pending" and ship off the Filter Work Request.
+	 * When we get the Work Request Reply we'll clear the pending status.
+	 */
+	f->pending = 1;
+	t4_mgmt_tx(ctrlq, mbuf);
+	return 0;
+
+error:
+	rte_pktmbuf_free(mbuf);
+out:
+	return ret;
+}
+
+/**
+ * Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise.  We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+		     struct ch_filter_specification *fs,
+		     struct filter_ctx *ctx)
+{
+	struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+	struct adapter *adapter = pi->adapter;
+	struct filter_entry *f;
+	int ret;
+
+	if (filter_id >= adapter->tids.nftids)
+		return -ERANGE;
+
+	ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+	if (!ret) {
+		dev_warn(adap, "%s: could not find filter entry: %u\n",
+			 __func__, filter_id);
+		return -EINVAL;
+	}
+
+	f = &adapter->tids.ftid_tab[filter_id];
+	ret = writable_filter(f);
+	if (ret)
+		return ret;
+
+	if (f->valid) {
+		f->ctx = ctx;
+		cxgbe_clear_ftid(&adapter->tids,
+				 f->tid - adapter->tids.ftid_base,
+				 f->fs.type ? FILTER_TYPE_IPV6 :
+					      FILTER_TYPE_IPV4);
+		return del_filter_wr(dev, filter_id);
+	}
+
+	/*
+	 * If the caller has passed in a Completion Context then we need to
+	 * mark it as a successful completion so they don't stall waiting
+	 * for it.
+	 */
+	if (ctx) {
+		ctx->result = 0;
+		t4_complete(&ctx->completion);
+	}
+
+	return 0;
+}
+
+/**
+ * Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware.  Return 0 on success, an error number
+ * otherwise.  We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+		     struct ch_filter_specification *fs,
+		     struct filter_ctx *ctx)
+{
+	struct port_info *pi = ethdev2pinfo(dev);
+	struct adapter *adapter = pi->adapter;
+	u32 iconf;
+	unsigned int fidx, iq, fid_bit = 0;
+	struct filter_entry *f;
+	int ret;
+
+	if (filter_id >= adapter->tids.nftids)
+		return -ERANGE;
+
+	ret = validate_filter(adapter, fs);
+	if (ret)
+		return ret;
+
+	ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+	if (ret)
+		return -EBUSY;
+
+	iq = get_filter_steerq(dev, fs);
+
+	/*
+	 * IPv6 filters occupy four slots and must be aligned on
+	 * four-slot boundaries.  IPv4 filters only occupy a single
+	 * slot and have no alignment requirements but writing a new
+	 * IPv4 filter into the middle of an existing IPv6 filter
+	 * requires clearing the old IPv6 filter.
+	 */
+	if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
+		/*
+		 * If our IPv4 filter isn't being written to a
+		 * multiple of four filter index and there's an IPv6
+		 * filter at the multiple of 4 base slot, then we need
+		 * to delete that IPv6 filter ...
+		 */
+		fidx = filter_id & ~0x3;
+		if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
+			f = &adapter->tids.ftid_tab[fidx];
+			ret = delete_filter(dev, fidx);
+			if (ret)
+				return ret;
+			if (f->valid) {
+				fid_bit = f->tid;
+				fid_bit -= adapter->tids.ftid_base;
+				cxgbe_clear_ftid(&adapter->tids,
+						 fid_bit, FILTER_TYPE_IPV6);
+			}
+		}
+	} else { /* IPv6 */
+		/*
+		 * Ensure that the IPv6 filter is aligned on a
+		 * multiple of 4 boundary.
+		 */
+		if (filter_id & 0x3)
+			return -EINVAL;
+
+		/*
+		 * Check all except the base overlapping IPv4 filter
+		 * slots.
+		 */
+		for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+			f = &adapter->tids.ftid_tab[fidx];
+			ret = delete_filter(dev, fidx);
+			if (ret)
+				return ret;
+			if (f->valid) {
+				fid_bit = f->tid;
+				fid_bit -=  adapter->tids.ftid_base;
+				cxgbe_clear_ftid(&adapter->tids,
+						 fid_bit, FILTER_TYPE_IPV4);
+			}
+		}
+	}
+
+	/*
+	 * Check to make sure that provided filter index is not
+	 * already in use by someone else
+	 */
+	f = &adapter->tids.ftid_tab[filter_id];
+	if (f->valid)
+		return -EBUSY;
+
+	fidx = adapter->tids.ftid_base + filter_id;
+	fid_bit = filter_id;
+	ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
+			     fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
+	if (ret)
+		return ret;
+
+	/*
+	 * Check to make sure the filter requested is writable ...
+	 */
+	ret = writable_filter(f);
+	if (ret) {
+		/* Clear the bits we have set above */
+		cxgbe_clear_ftid(&adapter->tids, fid_bit,
+				 fs->type ? FILTER_TYPE_IPV6 :
+					    FILTER_TYPE_IPV4);
+		return ret;
+	}
+
+	/*
+	 * Clear out any old resources being used by the filter before
+	 * we start constructing the new filter.
+	 */
+	if (f->valid)
+		clear_filter(f);
+
+	/*
+	 * Convert the filter specification into our internal format.
+	 * We copy the PF/VF specification into the Outer VLAN field
+	 * here so the rest of the code -- including the interface to
+	 * the firmware -- doesn't have to constantly do these checks.
+	 */
+	f->fs = *fs;
+	f->fs.iq = iq;
+	f->dev = dev;
+
+	iconf = adapter->params.tp.ingress_config;
+	if (iconf & F_VNIC) {
+		f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
+		f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
+		f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+		f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+	}
+
+	/*
+	 * Attempt to set the filter.  If we don't succeed, we clear
+	 * it and return the failure.
+	 */
+	f->ctx = ctx;
+	f->tid = fidx; /* Save the actual tid */
+	ret = set_filter_wr(dev, filter_id);
+	if (ret) {
+		fid_bit = f->tid - adapter->tids.ftid_base;
+		cxgbe_clear_ftid(&adapter->tids, fid_bit,
+				 fs->type ? FILTER_TYPE_IPV6 :
+					    FILTER_TYPE_IPV4);
+		clear_filter(f);
+	}
+
+	return ret;
+}
+
+/**
+ * Handle a LE-TCAM filter write/deletion reply.
+ */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+	struct filter_entry *f = NULL;
+	unsigned int tid = GET_TID(rpl);
+	int idx, max_fidx = adap->tids.nftids;
+
+	/* Get the corresponding filter entry for this tid */
+	if (adap->tids.ftid_tab) {
+		/* Check this in normal filter region */
+		idx = tid - adap->tids.ftid_base;
+		if (idx >= max_fidx)
+			return;
+
+		f = &adap->tids.ftid_tab[idx];
+		if (f->tid != tid)
+			return;
+	}
+
+	/* We found the filter entry for this tid */
+	if (f) {
+		unsigned int ret = G_COOKIE(rpl->cookie);
+		struct filter_ctx *ctx;
+
+		/*
+		 * Pull off any filter operation context attached to the
+		 * filter.
+		 */
+		ctx = f->ctx;
+		f->ctx = NULL;
+
+		if (ret == FW_FILTER_WR_FLT_DELETED) {
+			/*
+			 * Clear the filter when we get confirmation from the
+			 * hardware that the filter has been deleted.
+			 */
+			clear_filter(f);
+			if (ctx)
+				ctx->result = 0;
+		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
+			f->pending = 0;  /* asynchronous setup completed */
+			f->valid = 1;
+			if (ctx) {
+				ctx->tid = f->tid;
+				ctx->result = 0;
+			}
+
+			if (f->fs.newsmac) {
+				/* do a set-tcb for smac-sel and CWR bit.. */
+				set_tcb_tflag(adap, f->tid, S_TF_CCTRL_CWR,
+					      1, 1);
+				set_tcb_field(adap, f->tid, W_TCB_SMAC_SEL,
+					      V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
+					      V_TCB_SMAC_SEL(f->smtidx), 1);
+			}
+		} else {
+			/*
+			 * Something went wrong.  Issue a warning about the
+			 * problem and clear everything out.
+			 */
+			dev_warn(adap, "filter %u setup failed with error %u\n",
+				 idx, ret);
+			clear_filter(f);
+			if (ctx)
+				ctx->result = -EINVAL;
+		}
+
+		if (ctx)
+			t4_complete(&ctx->completion);
+	}
+}
diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h
index b03ccca..96c15d2 100644
--- a/drivers/net/cxgbe/cxgbe_filter.h
+++ b/drivers/net/cxgbe/cxgbe_filter.h
@@ -34,6 +34,8 @@
 #ifndef _CXGBE_FILTER_H_
 #define _CXGBE_FILTER_H_
 
+#include "t4_msg.h"
+
 /*
  * Defined bit width of user definable filter tuples
  */
@@ -232,4 +234,20 @@ struct filter_entry {
 	 */
 	struct ch_filter_specification fs;
 };
+
+struct adapter;
+
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct filter_entry *f);
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx);
+int delete_filter(struct rte_eth_dev *dev, unsigned int fidx);
+int writable_filter(struct filter_entry *f);
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+		     struct ch_filter_specification *fs,
+		     struct filter_ctx *ctx);
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+		     struct ch_filter_specification *fs,
+		     struct filter_ctx *ctx);
+
+void cxgbe_clear_all_filters(struct adapter *adapter);
 #endif /* _CXGBE_FILTER_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index e7d017e..dfb6567 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -69,6 +69,7 @@
 #include "clip_tbl.h"
 #include "l2t.h"
 #include "smt.h"
+#include "cxgbe_filter.h"
 
 /**
  * Allocate a chunk of memory. The allocated memory is cleared.
@@ -118,6 +119,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 		const struct cpl_fw6_msg *msg = (const void *)rsp;
 
 		t4_handle_fw_rpl(q->adapter, msg->data);
+	} else if (opcode == CPL_SET_TCB_RPL) {
+		const struct cpl_set_tcb_rpl *p = (const void *)rsp;
+
+		filter_rpl(q->adapter, p);
 	} else if (opcode == CPL_SMT_WRITE_RPL) {
 		const struct cpl_smt_write_rpl *p = (const void *)rsp;
 
@@ -1232,6 +1237,7 @@ void cxgbe_close(struct adapter *adapter)
 	int i;
 
 	if (adapter->flags & FULL_INIT_DONE) {
+		cxgbe_clear_all_filters(adapter);
 		tid_free(&adapter->tids);
 		t4_cleanup_clip_tbl(adapter);
 		t4_cleanup_l2t(adapter);
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
index 19971e7..115472e 100644
--- a/drivers/net/cxgbe/cxgbe_ofld.h
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -47,6 +47,11 @@
 	(w)->wr.wr_lo = cpu_to_be64(0); \
 } while (0)
 
+#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \
+	INIT_TP_WR(w, tid); \
+	OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
 /*
  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
  */
-- 
2.5.3



More information about the dev mailing list