|
@@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
|
|
struct ccp_op op;
|
|
struct ccp_op op;
|
|
unsigned int unit_size, dm_offset;
|
|
unsigned int unit_size, dm_offset;
|
|
bool in_place = false;
|
|
bool in_place = false;
|
|
|
|
+ unsigned int sb_count;
|
|
|
|
+ enum ccp_aes_type aestype;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
switch (xts->unit_size) {
|
|
switch (xts->unit_size) {
|
|
@@ -1061,7 +1063,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- if (xts->key_len != AES_KEYSIZE_128)
|
|
|
|
|
|
+ if (xts->key_len == AES_KEYSIZE_128)
|
|
|
|
+ aestype = CCP_AES_TYPE_128;
|
|
|
|
+ else
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
|
|
if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
|
|
@@ -1083,23 +1087,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
|
|
op.sb_key = cmd_q->sb_key;
|
|
op.sb_key = cmd_q->sb_key;
|
|
op.sb_ctx = cmd_q->sb_ctx;
|
|
op.sb_ctx = cmd_q->sb_ctx;
|
|
op.init = 1;
|
|
op.init = 1;
|
|
|
|
+ op.u.xts.type = aestype;
|
|
op.u.xts.action = xts->action;
|
|
op.u.xts.action = xts->action;
|
|
op.u.xts.unit_size = xts->unit_size;
|
|
op.u.xts.unit_size = xts->unit_size;
|
|
|
|
|
|
- /* All supported key sizes fit in a single (32-byte) SB entry
|
|
|
|
- * and must be in little endian format. Use the 256-bit byte
|
|
|
|
- * swap passthru option to convert from big endian to little
|
|
|
|
- * endian.
|
|
|
|
|
|
+ /* A version 3 device only supports 128-bit keys, which fits into a
|
|
|
|
+ * single SB entry. A version 5 device uses a 512-bit vector, so two
|
|
|
|
+ * SB entries.
|
|
*/
|
|
*/
|
|
|
|
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
|
|
|
|
+ sb_count = CCP_XTS_AES_KEY_SB_COUNT;
|
|
|
|
+ else
|
|
|
|
+ sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
|
|
ret = ccp_init_dm_workarea(&key, cmd_q,
|
|
ret = ccp_init_dm_workarea(&key, cmd_q,
|
|
- CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
|
|
|
|
|
|
+ sb_count * CCP_SB_BYTES,
|
|
DMA_TO_DEVICE);
|
|
DMA_TO_DEVICE);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
- dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
|
|
|
|
- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
|
|
|
|
- ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
|
|
|
|
|
|
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
|
|
|
|
+ /* All supported key sizes must be in little endian format.
|
|
|
|
+ * Use the 256-bit byte swap passthru option to convert from
|
|
|
|
+ * big endian to little endian.
|
|
|
|
+ */
|
|
|
|
+ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
|
|
|
|
+ ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
|
|
|
|
+ ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
|
|
|
|
+ } else {
|
|
|
|
+ /* Version 5 CCPs use a 512-bit space for the key: each portion
|
|
|
|
+ * occupies 256 bits, or one entire slot, and is zero-padded.
|
|
|
|
+ */
|
|
|
|
+ unsigned int pad;
|
|
|
|
+
|
|
|
|
+ dm_offset = CCP_SB_BYTES;
|
|
|
|
+ pad = dm_offset - xts->key_len;
|
|
|
|
+ ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
|
|
|
|
+ ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
|
|
|
|
+ xts->key_len);
|
|
|
|
+ }
|
|
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
|
|
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
|
|
CCP_PASSTHRU_BYTESWAP_256BIT);
|
|
CCP_PASSTHRU_BYTESWAP_256BIT);
|
|
if (ret) {
|
|
if (ret) {
|