|
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
|
|
|
walk->to = (struct phys_sge_pairs *)(dsgl + 1);
|
|
|
}
|
|
|
|
|
|
-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
|
|
|
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
|
|
|
+ int pci_chan_id)
|
|
|
{
|
|
|
struct cpl_rx_phys_dsgl *phys_cpl;
|
|
|
|
|
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
|
|
|
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
|
|
|
phys_cpl->rss_hdr_int.qid = htons(qid);
|
|
|
phys_cpl->rss_hdr_int.hash_val = 0;
|
|
|
+ phys_cpl->rss_hdr_int.channel = pci_chan_id;
|
|
|
}
|
|
|
|
|
|
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
|
|
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
|
|
|
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
|
|
|
!!lcb, ctx->tx_qidx);
|
|
|
|
|
|
- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
|
|
|
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
|
|
|
qid);
|
|
|
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
|
|
|
((sizeof(chcr_req->wreq)) >> 4)));
|
|
@@ -1340,16 +1342,23 @@ static int chcr_device_init(struct chcr_context *ctx)
|
|
|
adap->vres.ncrypto_fc);
|
|
|
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
|
|
|
txq_perchan = ntxq / u_ctx->lldi.nchan;
|
|
|
- rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
|
|
|
- rxq_idx += id % rxq_perchan;
|
|
|
- txq_idx = ctx->dev->tx_channel_id * txq_perchan;
|
|
|
- txq_idx += id % txq_perchan;
|
|
|
spin_lock(&ctx->dev->lock_chcr_dev);
|
|
|
- ctx->rx_qidx = rxq_idx;
|
|
|
- ctx->tx_qidx = txq_idx;
|
|
|
+ ctx->tx_chan_id = ctx->dev->tx_channel_id;
|
|
|
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
|
|
|
ctx->dev->rx_channel_id = 0;
|
|
|
spin_unlock(&ctx->dev->lock_chcr_dev);
|
|
|
+ rxq_idx = ctx->tx_chan_id * rxq_perchan;
|
|
|
+ rxq_idx += id % rxq_perchan;
|
|
|
+ txq_idx = ctx->tx_chan_id * txq_perchan;
|
|
|
+ txq_idx += id % txq_perchan;
|
|
|
+ ctx->rx_qidx = rxq_idx;
|
|
|
+ ctx->tx_qidx = txq_idx;
|
|
|
+ /* Channel Id used by SGE to forward packet to Host.
|
|
|
+ * Same value should be used in cpl_fw6_pld RSS_CH field
|
|
|
+ * by FW. Driver programs PCI channel ID to be used in fw
|
|
|
+ * at the time of queue allocation with value "pi->tx_chan"
|
|
|
+ */
|
|
|
+ ctx->pci_chan_id = txq_idx / txq_perchan;
|
|
|
}
|
|
|
out:
|
|
|
return err;
|
|
@@ -2504,6 +2513,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
struct dsgl_walk dsgl_walk;
|
|
|
unsigned int authsize = crypto_aead_authsize(tfm);
|
|
|
+ struct chcr_context *ctx = a_ctx(tfm);
|
|
|
u32 temp;
|
|
|
|
|
|
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
|
@@ -2513,7 +2523,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
|
|
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
|
|
|
temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
|
|
|
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
|
|
|
- dsgl_walk_end(&dsgl_walk, qid);
|
|
|
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
|
|
}
|
|
|
|
|
|
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
|
@@ -2545,6 +2555,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
|
|
unsigned short qid)
|
|
|
{
|
|
|
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
|
|
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
|
|
|
+ struct chcr_context *ctx = c_ctx(tfm);
|
|
|
struct dsgl_walk dsgl_walk;
|
|
|
|
|
|
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
|
@@ -2553,7 +2565,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
|
|
reqctx->dstsg = dsgl_walk.last_sg;
|
|
|
reqctx->dst_ofst = dsgl_walk.last_sg_len;
|
|
|
|
|
|
- dsgl_walk_end(&dsgl_walk, qid);
|
|
|
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
|
|
}
|
|
|
|
|
|
void chcr_add_hash_src_ent(struct ahash_request *req,
|