[PATCH v2 13/21] common/idpf: avoid variable 0-init
Soumyadeep Hore
soumyadeep.hore at intel.com
Tue Jun 4 10:06:03 CEST 2024
Dont initialize the variables if not needed.
Also use 'err' instead of 'status', 'ret_code', 'ret' etc.
for consistency and change the return label 'sq_send_command_out'
to 'err_unlock'.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore at intel.com>
---
drivers/common/idpf/base/idpf_controlq.c | 63 +++++++++----------
.../common/idpf/base/idpf_controlq_setup.c | 18 +++---
2 files changed, 39 insertions(+), 42 deletions(-)
diff --git a/drivers/common/idpf/base/idpf_controlq.c b/drivers/common/idpf/base/idpf_controlq.c
index b5ba9c3bd0..bd23e54421 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -61,7 +61,7 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
*/
static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
{
- int i = 0;
+ int i;
for (i = 0; i < cq->ring_size; i++) {
struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
@@ -134,7 +134,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
{
struct idpf_ctlq_info *cq;
bool is_rxq = false;
- int status = 0;
+ int err;
if (!qinfo->len || !qinfo->buf_size ||
qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
@@ -164,16 +164,16 @@ int idpf_ctlq_add(struct idpf_hw *hw,
is_rxq = true;
/* fallthrough */
case IDPF_CTLQ_TYPE_MAILBOX_TX:
- status = idpf_ctlq_alloc_ring_res(hw, cq);
+ err = idpf_ctlq_alloc_ring_res(hw, cq);
break;
default:
- status = -EINVAL;
+ err = -EINVAL;
break;
}
- if (status)
+ if (err)
#ifdef NVME_CPF
- return status;
+ return err;
#else
goto init_free_q;
#endif
@@ -187,7 +187,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
idpf_calloc(hw, qinfo->len,
sizeof(struct idpf_ctlq_msg *));
if (!cq->bi.tx_msg) {
- status = -ENOMEM;
+ err = -ENOMEM;
goto init_dealloc_q_mem;
}
#endif
@@ -203,17 +203,16 @@ int idpf_ctlq_add(struct idpf_hw *hw,
#ifndef NVME_CPF
*cq_out = cq;
- return status;
+ return 0;
init_dealloc_q_mem:
/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);
init_free_q:
idpf_free(hw, cq);
- cq = NULL;
#endif
- return status;
+ return err;
}
/**
@@ -249,8 +248,8 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
#endif
{
struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
- int ret_code = 0;
- int i = 0;
+ int err;
+ int i;
LIST_INIT(&hw->cq_list_head);
@@ -261,19 +260,19 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
cq = *(ctlq + i);
#endif
- ret_code = idpf_ctlq_add(hw, qinfo, &cq);
- if (ret_code)
+ err = idpf_ctlq_add(hw, qinfo, &cq);
+ if (err)
goto init_destroy_qs;
}
- return ret_code;
+ return 0;
init_destroy_qs:
LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
idpf_ctlq_info, cq_list)
idpf_ctlq_remove(hw, cq);
- return ret_code;
+ return err;
}
/**
@@ -307,9 +306,9 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
{
struct idpf_ctlq_desc *desc;
- int num_desc_avail = 0;
- int status = 0;
- int i = 0;
+ int num_desc_avail;
+ int err = 0;
+ int i;
if (!cq || !cq->ring_size)
return -ENOBUFS;
@@ -319,8 +318,8 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
- status = -ENOSPC;
- goto sq_send_command_out;
+ err = -ENOSPC;
+ goto err_unlock;
}
for (i = 0; i < num_q_msg; i++) {
@@ -391,10 +390,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
wr32(hw, cq->reg.tail, cq->next_to_use);
-sq_send_command_out:
+err_unlock:
idpf_release_lock(&cq->cq_lock);
- return status;
+ return err;
}
/**
@@ -418,9 +417,8 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
struct idpf_ctlq_msg *msg_status[], bool force)
{
struct idpf_ctlq_desc *desc;
- u16 i = 0, num_to_clean;
+ u16 i, num_to_clean;
u16 ntc, desc_err;
- int ret = 0;
if (!cq || !cq->ring_size)
return -ENOBUFS;
@@ -467,7 +465,7 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
/* Return number of descriptors actually cleaned */
*clean_count = i;
- return ret;
+ return 0;
}
/**
@@ -534,7 +532,6 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 ntp = cq->next_to_post;
bool buffs_avail = false;
u16 tbp = ntp + 1;
- int status = 0;
int i = 0;
if (*buff_count > cq->ring_size)
@@ -635,7 +632,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
- return status;
+ return 0;
}
/**
@@ -654,8 +651,8 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
{
u16 num_to_clean, ntc, ret_val, flags;
struct idpf_ctlq_desc *desc;
- int ret_code = 0;
- u16 i = 0;
+ int err = 0;
+ u16 i;
if (!cq || !cq->ring_size)
return -ENOBUFS;
@@ -688,7 +685,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
IDPF_CTLQ_FLAG_FTYPE_S;
if (flags & IDPF_CTLQ_FLAG_ERR)
- ret_code = -EBADMSG;
+ err = -EBADMSG;
q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
@@ -734,7 +731,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
*num_q_msg = i;
if (*num_q_msg == 0)
- ret_code = -ENOMSG;
+ err = -ENOMSG;
- return ret_code;
+ return err;
}
diff --git a/drivers/common/idpf/base/idpf_controlq_setup.c b/drivers/common/idpf/base/idpf_controlq_setup.c
index 21f43c74f5..cd6bcb1cf0 100644
--- a/drivers/common/idpf/base/idpf_controlq_setup.c
+++ b/drivers/common/idpf/base/idpf_controlq_setup.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2023 Intel Corporation
+ * Copyright(c) 2001-2024 Intel Corporation
*/
@@ -34,7 +34,7 @@ static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
struct idpf_ctlq_info *cq)
{
- int i = 0;
+ int i;
/* Do not allocate DMA buffers for transmit queues */
if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
@@ -153,20 +153,20 @@ void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
*/
int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
- int ret_code;
+ int err;
/* verify input for valid configuration */
if (!cq->ring_size || !cq->buf_size)
return -EINVAL;
/* allocate the ring memory */
- ret_code = idpf_ctlq_alloc_desc_ring(hw, cq);
- if (ret_code)
- return ret_code;
+ err = idpf_ctlq_alloc_desc_ring(hw, cq);
+ if (err)
+ return err;
/* allocate buffers in the rings */
- ret_code = idpf_ctlq_alloc_bufs(hw, cq);
- if (ret_code)
+ err = idpf_ctlq_alloc_bufs(hw, cq);
+ if (err)
goto idpf_init_cq_free_ring;
/* success! */
@@ -174,5 +174,5 @@ int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
idpf_init_cq_free_ring:
idpf_free_dma_mem(hw, &cq->desc_ring);
- return ret_code;
+ return err;
}
--
2.43.0
More information about the dev
mailing list