|
@@ -40,6 +40,19 @@ struct qcom_scm {
|
|
|
struct reset_controller_dev reset;
|
|
|
};
|
|
|
|
|
|
+struct qcom_scm_current_perm_info {
|
|
|
+ __le32 vmid;
|
|
|
+ __le32 perm;
|
|
|
+ __le64 ctx;
|
|
|
+ __le32 ctx_size;
|
|
|
+ __le32 unused;
|
|
|
+};
|
|
|
+
|
|
|
+struct qcom_scm_mem_map_info {
|
|
|
+ __le64 mem_addr;
|
|
|
+ __le64 mem_size;
|
|
|
+};
|
|
|
+
|
|
|
static struct qcom_scm *__scm;
|
|
|
|
|
|
static int qcom_scm_clk_enable(void)
|
|
@@ -348,6 +361,88 @@ int qcom_scm_set_remote_state(u32 state, u32 id)
|
|
|
}
|
|
|
EXPORT_SYMBOL(qcom_scm_set_remote_state);
|
|
|
|
|
|
+/**
|
|
|
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
|
|
|
+ * @mem_addr: mem region whose ownership need to be reassigned
|
|
|
+ * @mem_sz: size of the region.
|
|
|
+ * @srcvm: vmid for current set of owners, each set bit in
|
|
|
+ * flag indicate a unique owner
|
|
|
+ * @newvm: array having new owners and corrsponding permission
|
|
|
+ * flags
|
|
|
+ * @dest_cnt: number of owners in next set.
|
|
|
+ *
|
|
|
+ * Return negative errno on failure, 0 on success, with @srcvm updated.
|
|
|
+ */
|
|
|
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
|
|
|
+ unsigned int *srcvm,
|
|
|
+ struct qcom_scm_vmperm *newvm, int dest_cnt)
|
|
|
+{
|
|
|
+ struct qcom_scm_current_perm_info *destvm;
|
|
|
+ struct qcom_scm_mem_map_info *mem_to_map;
|
|
|
+ phys_addr_t mem_to_map_phys;
|
|
|
+ phys_addr_t dest_phys;
|
|
|
+ phys_addr_t ptr_phys;
|
|
|
+ size_t mem_to_map_sz;
|
|
|
+ size_t dest_sz;
|
|
|
+ size_t src_sz;
|
|
|
+ size_t ptr_sz;
|
|
|
+ int next_vm;
|
|
|
+ __le32 *src;
|
|
|
+ void *ptr;
|
|
|
+ int ret;
|
|
|
+ int len;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ src_sz = hweight_long(*srcvm) * sizeof(*src);
|
|
|
+ mem_to_map_sz = sizeof(*mem_to_map);
|
|
|
+ dest_sz = dest_cnt * sizeof(*destvm);
|
|
|
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
|
|
|
+ ALIGN(dest_sz, SZ_64);
|
|
|
+
|
|
|
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
|
|
|
+ if (!ptr)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ /* Fill source vmid detail */
|
|
|
+ src = ptr;
|
|
|
+ len = hweight_long(*srcvm);
|
|
|
+ for (i = 0; i < len; i++) {
|
|
|
+ src[i] = cpu_to_le32(ffs(*srcvm) - 1);
|
|
|
+ *srcvm ^= 1 << (ffs(*srcvm) - 1);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Fill details of mem buff to map */
|
|
|
+ mem_to_map = ptr + ALIGN(src_sz, SZ_64);
|
|
|
+ mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
|
|
|
+ mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
|
|
|
+ mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
|
|
|
+
|
|
|
+ next_vm = 0;
|
|
|
+ /* Fill details of next vmid detail */
|
|
|
+ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
|
|
|
+ dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
|
|
|
+ for (i = 0; i < dest_cnt; i++) {
|
|
|
+ destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
|
|
|
+ destvm[i].perm = cpu_to_le32(newvm[i].perm);
|
|
|
+ destvm[i].ctx = 0;
|
|
|
+ destvm[i].ctx_size = 0;
|
|
|
+ next_vm |= BIT(newvm[i].vmid);
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
|
|
|
+ ptr_phys, src_sz, dest_phys, dest_sz);
|
|
|
+ dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(__scm->dev,
|
|
|
+ "Assign memory protection call failed %d.\n", ret);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ *srcvm = next_vm;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(qcom_scm_assign_mem);
|
|
|
+
|
|
|
static int qcom_scm_probe(struct platform_device *pdev)
|
|
|
{
|
|
|
struct qcom_scm *scm;
|