RDMA/irdma: Use list_last_entry/list_first_entry
Use list_last_entry and list_first_entry instead of using prev and next
pointers.
Link: https://lore.kernel.org/r/20210608211415.680-1-shiraz.saleem@intel.com
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
index c0be6e3..58e7d87 100644
--- a/drivers/infiniband/hw/irdma/puda.c
+++ b/drivers/infiniband/hw/irdma/puda.c
@@ -1419,7 +1419,7 @@ irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu,
error:
while (!list_empty(&pbufl)) {
- buf = (struct irdma_puda_buf *)(pbufl.prev);
+ buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
list_move(&buf->list, rxlist);
}
if (txbuf)
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 8f04347..b4b91cb 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -425,8 +425,8 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
spin_lock_irqsave(&cqp->req_lock, flags);
if (!list_empty(&cqp->cqp_avail_reqs)) {
- cqp_request = list_entry(cqp->cqp_avail_reqs.next,
- struct irdma_cqp_request, list);
+ cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
+ struct irdma_cqp_request, list);
list_del_init(&cqp_request->list);
}
spin_unlock_irqrestore(&cqp->req_lock, flags);