mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
octeontx2-af: Simplify context writing and reading to hardware
Simplify NIX context reading and writing by using hardware maximum context size instead of using individual sizes of each context type. Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com> Link: https://patch.msgid.link/1761388367-16579-2-git-send-email-sbhatta@marvell.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
committed by
Paolo Abeni
parent
1bae0fd900
commit
85708c5d5f
@@ -1149,36 +1149,36 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
case NIX_AQ_INSTOP_WRITE:
|
||||
if (req->ctype == NIX_AQ_CTYPE_RQ)
|
||||
memcpy(mask, &req->rq_mask,
|
||||
sizeof(struct nix_rq_ctx_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_SQ)
|
||||
memcpy(mask, &req->sq_mask,
|
||||
sizeof(struct nix_sq_ctx_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_CQ)
|
||||
memcpy(mask, &req->cq_mask,
|
||||
sizeof(struct nix_cq_ctx_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_RSS)
|
||||
memcpy(mask, &req->rss_mask,
|
||||
sizeof(struct nix_rsse_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_MCE)
|
||||
memcpy(mask, &req->mce_mask,
|
||||
sizeof(struct nix_rx_mce_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
|
||||
memcpy(mask, &req->prof_mask,
|
||||
sizeof(struct nix_bandprof_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
fallthrough;
|
||||
case NIX_AQ_INSTOP_INIT:
|
||||
if (req->ctype == NIX_AQ_CTYPE_RQ)
|
||||
memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
|
||||
memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_SQ)
|
||||
memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
|
||||
memcpy(ctx, &req->sq, NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_CQ)
|
||||
memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
|
||||
memcpy(ctx, &req->cq, NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_RSS)
|
||||
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
|
||||
memcpy(ctx, &req->rss, NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_MCE)
|
||||
memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
|
||||
memcpy(ctx, &req->mce, NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
|
||||
memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
|
||||
memcpy(ctx, &req->prof, NIX_MAX_CTX_SIZE);
|
||||
break;
|
||||
case NIX_AQ_INSTOP_NOP:
|
||||
case NIX_AQ_INSTOP_READ:
|
||||
@@ -1243,22 +1243,22 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
if (req->op == NIX_AQ_INSTOP_READ) {
|
||||
if (req->ctype == NIX_AQ_CTYPE_RQ)
|
||||
memcpy(&rsp->rq, ctx,
|
||||
sizeof(struct nix_rq_ctx_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_SQ)
|
||||
memcpy(&rsp->sq, ctx,
|
||||
sizeof(struct nix_sq_ctx_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_CQ)
|
||||
memcpy(&rsp->cq, ctx,
|
||||
sizeof(struct nix_cq_ctx_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_RSS)
|
||||
memcpy(&rsp->rss, ctx,
|
||||
sizeof(struct nix_rsse_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_MCE)
|
||||
memcpy(&rsp->mce, ctx,
|
||||
sizeof(struct nix_rx_mce_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
|
||||
memcpy(&rsp->prof, ctx,
|
||||
sizeof(struct nix_bandprof_s));
|
||||
NIX_MAX_CTX_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1289,8 +1289,8 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
/* Make copy of original context & mask which are required
|
||||
* for resubmission
|
||||
*/
|
||||
memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
|
||||
memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
|
||||
memcpy(&aq_req.cq_mask, &req->cq_mask, NIX_MAX_CTX_SIZE);
|
||||
memcpy(&aq_req.cq, &req->cq, NIX_MAX_CTX_SIZE);
|
||||
|
||||
/* exclude fields which HW can update */
|
||||
aq_req.cq_mask.cq_err = 0;
|
||||
@@ -1309,7 +1309,7 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
* updated fields are masked out for request and response
|
||||
* comparison
|
||||
*/
|
||||
for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
|
||||
for (word = 0; word < NIX_MAX_CTX_SIZE / sizeof(u64);
|
||||
word++) {
|
||||
*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
|
||||
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
|
||||
@@ -1317,7 +1317,7 @@ static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
|
||||
}
|
||||
|
||||
if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
|
||||
if (memcmp(&aq_req.cq, &aq_rsp.cq, NIX_MAX_CTX_SIZE))
|
||||
return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -13,6 +13,8 @@
|
||||
|
||||
#define RVU_MULTI_BLK_VER 0x7ULL
|
||||
|
||||
#define NIX_MAX_CTX_SIZE 128
|
||||
|
||||
/* RVU Block Address Enumeration */
|
||||
enum rvu_block_addr_e {
|
||||
BLKADDR_RVUM = 0x0ULL,
|
||||
@@ -370,8 +372,12 @@ struct nix_cq_ctx_s {
|
||||
u64 qsize : 4;
|
||||
u64 cq_err_int : 8;
|
||||
u64 cq_err_int_ena : 8;
|
||||
/* Ensure all context sizes are 128 bytes */
|
||||
u64 padding[12];
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct nix_cq_ctx_s) == NIX_MAX_CTX_SIZE);
|
||||
|
||||
/* CN10K NIX Receive queue context structure */
|
||||
struct nix_cn10k_rq_ctx_s {
|
||||
u64 ena : 1;
|
||||
@@ -460,6 +466,8 @@ struct nix_cn10k_rq_ctx_s {
|
||||
u64 rsvd_1023_960; /* W15 */
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct nix_cn10k_rq_ctx_s) == NIX_MAX_CTX_SIZE);
|
||||
|
||||
/* CN10K NIX Send queue context structure */
|
||||
struct nix_cn10k_sq_ctx_s {
|
||||
u64 ena : 1;
|
||||
@@ -523,6 +531,8 @@ struct nix_cn10k_sq_ctx_s {
|
||||
u64 rsvd_1023_1008 : 16;
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct nix_cn10k_sq_ctx_s) == NIX_MAX_CTX_SIZE);
|
||||
|
||||
/* NIX Receive queue context structure */
|
||||
struct nix_rq_ctx_s {
|
||||
u64 ena : 1;
|
||||
@@ -594,6 +604,8 @@ struct nix_rq_ctx_s {
|
||||
u64 rsvd_1023_960; /* W15 */
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct nix_rq_ctx_s) == NIX_MAX_CTX_SIZE);
|
||||
|
||||
/* NIX sqe sizes */
|
||||
enum nix_maxsqesz {
|
||||
NIX_MAXSQESZ_W16 = 0x0,
|
||||
@@ -668,13 +680,18 @@ struct nix_sq_ctx_s {
|
||||
u64 rsvd_1023_1008 : 16;
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct nix_sq_ctx_s) == NIX_MAX_CTX_SIZE);
|
||||
|
||||
/* NIX Receive side scaling entry structure*/
|
||||
struct nix_rsse_s {
|
||||
uint32_t rq : 20;
|
||||
uint32_t reserved_20_31 : 12;
|
||||
|
||||
/* Ensure all context sizes are minimum 128 bytes */
|
||||
u64 padding[15];
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct nix_rsse_s) == NIX_MAX_CTX_SIZE);
|
||||
|
||||
/* NIX receive multicast/mirror entry structure */
|
||||
struct nix_rx_mce_s {
|
||||
uint64_t op : 2;
|
||||
@@ -684,8 +701,12 @@ struct nix_rx_mce_s {
|
||||
uint64_t rsvd_31_24 : 8;
|
||||
uint64_t pf_func : 16;
|
||||
uint64_t next : 16;
|
||||
/* Ensure all context sizes are minimum 128 bytes */
|
||||
u64 padding[15];
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct nix_rx_mce_s) == NIX_MAX_CTX_SIZE);
|
||||
|
||||
enum nix_band_prof_layers {
|
||||
BAND_PROF_LEAF_LAYER = 0,
|
||||
BAND_PROF_INVAL_LAYER = 1,
|
||||
@@ -769,6 +790,8 @@ struct nix_bandprof_s {
|
||||
uint64_t reserved_1008_1023 : 16;
|
||||
};
|
||||
|
||||
static_assert(sizeof(struct nix_bandprof_s) == NIX_MAX_CTX_SIZE);
|
||||
|
||||
enum nix_lsoalg {
|
||||
NIX_LSOALG_NOP,
|
||||
NIX_LSOALG_ADD_SEGNUM,
|
||||
|
||||
Reference in New Issue
Block a user