[PATCH v3 04/10] net/ice/base: set speculative execution barrier
Dhanya Pillai
dhanya.r.pillai at intel.com
Tue May 27 15:17:23 CEST 2025
From: Lukasz Krakowiak <lukaszx.krakowiak at intel.com>
Fix issues related to SPECULATIVE_EXECUTION_DATA_LEAK.
This changes set speculative execution barrier to functions:
* ice_sched_add_vsi_child_nodes,
* ice_sched_add_vsi_support_nodes,
* ice_sched_move_vsi_to_agg,
* ice_prof_has_mask_idx,
* ice_alloc_prof_mask.
Also, Added memfence definitions.
Signed-off-by: Lukasz Krakowiak <lukaszx.krakowiak at intel.com>
Signed-off-by: Dhanya Pillai <dhanya.r.pillai at intel.com>
---
drivers/net/intel/ice/base/ice_flex_pipe.c | 2 ++
drivers/net/intel/ice/base/ice_osdep.h | 6 ++++++
drivers/net/intel/ice/base/ice_sched.c | 3 +++
3 files changed, 11 insertions(+)
diff --git a/drivers/net/intel/ice/base/ice_flex_pipe.c b/drivers/net/intel/ice/base/ice_flex_pipe.c
index 6dd5588f85..dc8c92e203 100644
--- a/drivers/net/intel/ice/base/ice_flex_pipe.c
+++ b/drivers/net/intel/ice/base/ice_flex_pipe.c
@@ -1280,6 +1280,7 @@ ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
if (hw->blk[blk].masks.masks[i].in_use &&
hw->blk[blk].masks.masks[i].idx == idx) {
found = true;
+ ice_memfence_read();
if (hw->blk[blk].masks.masks[i].mask == mask)
match = true;
break;
@@ -1648,6 +1649,7 @@ ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
/* if mask is in use and it exactly duplicates the
* desired mask and index, then in can be reused
*/
+ ice_memfence_read();
if (hw->blk[blk].masks.masks[i].mask == mask &&
hw->blk[blk].masks.masks[i].idx == idx) {
found_copy = true;
diff --git a/drivers/net/intel/ice/base/ice_osdep.h b/drivers/net/intel/ice/base/ice_osdep.h
index ad6cde9896..7588ad3dbc 100644
--- a/drivers/net/intel/ice/base/ice_osdep.h
+++ b/drivers/net/intel/ice/base/ice_osdep.h
@@ -203,6 +203,12 @@ struct __rte_packed_begin ice_virt_mem {
#define ice_memset(a, b, c, d) memset((a), (b), (c))
#define ice_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
+/* Memory fence barrier */
+#define ice_memfence_read()
+#define ice_memfence_read_write()
+#define ice_memfence_write()
+
+
/* SW spinlock */
struct ice_lock {
rte_spinlock_t spinlock;
diff --git a/drivers/net/intel/ice/base/ice_sched.c b/drivers/net/intel/ice/base/ice_sched.c
index a8a149f541..be9393a7d6 100644
--- a/drivers/net/intel/ice/base/ice_sched.c
+++ b/drivers/net/intel/ice/base/ice_sched.c
@@ -1748,6 +1748,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
node = node->sibling;
}
} else {
+ ice_memfence_read();
parent = parent->children[0];
}
}
@@ -1840,6 +1841,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
/* The newly added node can be a new parent for the next
* layer nodes
*/
+ ice_memfence_read();
if (num_added)
parent = ice_sched_find_node_by_teid(tc_node,
first_node_teid);
@@ -2431,6 +2433,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
/* The newly added node can be a new parent for the next
* layer nodes
*/
+ ice_memfence_read();
if (num_nodes_added)
parent = ice_sched_find_node_by_teid(tc_node,
first_node_teid);
--
2.43.0
More information about the dev
mailing list