|
@@ -29,6 +29,7 @@
|
|
|
static LIST_HEAD(uld_ctx_list);
|
|
|
static DEFINE_MUTEX(dev_mutex);
|
|
|
static atomic_t dev_count;
|
|
|
+static struct uld_ctx *ctx_rr;
|
|
|
|
|
|
typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
|
|
|
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
|
|
@@ -49,25 +50,28 @@ static struct cxgb4_uld_info chcr_uld_info = {
|
|
|
.rx_handler = chcr_uld_rx_handler,
|
|
|
};
|
|
|
|
|
|
-int assign_chcr_device(struct chcr_dev **dev)
|
|
|
+struct uld_ctx *assign_chcr_device(void)
|
|
|
{
|
|
|
- struct uld_ctx *u_ctx;
|
|
|
- int ret = -ENXIO;
|
|
|
+ struct uld_ctx *u_ctx = NULL;
|
|
|
|
|
|
/*
|
|
|
- * Which device to use if multiple devices are available TODO
|
|
|
- * May be select the device based on round robin. One session
|
|
|
- * must go to the same device to maintain the ordering.
|
|
|
+ * When multiple devices are present in system select
|
|
|
+ * device in round-robin fashion for crypto operations
|
|
|
+ * Although One session must use the same device to
|
|
|
+ * maintain request-response ordering.
|
|
|
*/
|
|
|
- mutex_lock(&dev_mutex); /* TODO ? */
|
|
|
- list_for_each_entry(u_ctx, &uld_ctx_list, entry)
|
|
|
- if (u_ctx->dev) {
|
|
|
- *dev = u_ctx->dev;
|
|
|
- ret = 0;
|
|
|
- break;
|
|
|
+ mutex_lock(&dev_mutex);
|
|
|
+ if (!list_empty(&uld_ctx_list)) {
|
|
|
+ u_ctx = ctx_rr;
|
|
|
+ if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
|
|
|
+ ctx_rr = list_first_entry(&uld_ctx_list,
|
|
|
+ struct uld_ctx,
|
|
|
+ entry);
|
|
|
+ else
|
|
|
+ ctx_rr = list_next_entry(ctx_rr, entry);
|
|
|
}
|
|
|
mutex_unlock(&dev_mutex);
|
|
|
- return ret;
|
|
|
+ return u_ctx;
|
|
|
}
|
|
|
|
|
|
static int chcr_dev_add(struct uld_ctx *u_ctx)
|
|
@@ -82,11 +86,27 @@ static int chcr_dev_add(struct uld_ctx *u_ctx)
|
|
|
u_ctx->dev = dev;
|
|
|
dev->u_ctx = u_ctx;
|
|
|
atomic_inc(&dev_count);
|
|
|
+ mutex_lock(&dev_mutex);
|
|
|
+ list_add_tail(&u_ctx->entry, &uld_ctx_list);
|
|
|
+ if (!ctx_rr)
|
|
|
+ ctx_rr = u_ctx;
|
|
|
+ mutex_unlock(&dev_mutex);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static int chcr_dev_remove(struct uld_ctx *u_ctx)
|
|
|
{
|
|
|
+ if (ctx_rr == u_ctx) {
|
|
|
+ if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
|
|
|
+ ctx_rr = list_first_entry(&uld_ctx_list,
|
|
|
+ struct uld_ctx,
|
|
|
+ entry);
|
|
|
+ else
|
|
|
+ ctx_rr = list_next_entry(ctx_rr, entry);
|
|
|
+ }
|
|
|
+ list_del(&u_ctx->entry);
|
|
|
+ if (list_empty(&uld_ctx_list))
|
|
|
+ ctx_rr = NULL;
|
|
|
kfree(u_ctx->dev);
|
|
|
u_ctx->dev = NULL;
|
|
|
atomic_dec(&dev_count);
|
|
@@ -139,10 +159,11 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
|
|
|
u_ctx = ERR_PTR(-ENOMEM);
|
|
|
goto out;
|
|
|
}
|
|
|
+ if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
|
|
|
+ u_ctx = ERR_PTR(-ENOMEM);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
u_ctx->lldi = *lld;
|
|
|
- mutex_lock(&dev_mutex);
|
|
|
- list_add_tail(&u_ctx->entry, &uld_ctx_list);
|
|
|
- mutex_unlock(&dev_mutex);
|
|
|
out:
|
|
|
return u_ctx;
|
|
|
}
|