瀏覽代碼

Merge branch 'topic/4.19/am65x' of git://git.ti.com/rpmsg/remoteproc into rpmsg-ti-linux-4.19.y

Pull in the dedicated AM65x remoteproc topic branch that includes various
fixes in the K3 R5F remoteproc driver for kernel crashes in memcpy() and
memset() caused during remoteproc loading into the R5F TCMs and on-chip
SRAMs due to the mapping of these regions as device-type memory.

* 'topic/4.19/am65x' of git://git.ti.com/rpmsg/remoteproc:
  remoteproc/k3-r5: fix memzero issues on reserved SRAM regions
  remoteproc/k3-r5: fix loading into BTCM when using R5 local addresses
  remoteproc/k3-r5: fix unaligned data faults with TCMs during loading

Signed-off-by: Suman Anna <s-anna@ti.com>
Suman Anna 6 年之前
父節點
當前提交
84b28fc8e9
共有 1 個文件被更改,包括 32 次插入16 次删除
  1. 32 16
      drivers/remoteproc/ti_k3_r5_remoteproc.c

+ 32 - 16
drivers/remoteproc/ti_k3_r5_remoteproc.c

@@ -567,22 +567,21 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, int len,
 	if (len <= 0)
 		return NULL;
 
-	/* handle R5-view of ATCM addresses first using address 0 */
-	size = core->mem[0].size;
-	if (da >= 0 && ((da + len) <= size)) {
-		offset = da;
-		va = core->mem[0].cpu_addr + offset;
-		return (__force void *)va;
-	}
-
-	/* handle SoC-view addresses for ATCM and BTCM */
+	/* handle both R5 and SoC views of ATCM and BTCM */
 	for (i = 0; i < core->num_mems; i++) {
 		bus_addr = core->mem[i].bus_addr;
 		dev_addr = core->mem[i].dev_addr;
 		size = core->mem[i].size;
 
-		if (da >= bus_addr &&
-		    ((da + len) <= (bus_addr + size))) {
+		/* handle R5-view addresses of TCMs */
+		if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+			offset = da - dev_addr;
+			va = core->mem[i].cpu_addr + offset;
+			return (__force void *)va;
+		}
+
+		/* handle SoC-view addresses of TCMs */
+		if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
 			offset = da - bus_addr;
 			va = core->mem[i].cpu_addr + offset;
 			return (__force void *)va;
@@ -958,11 +957,28 @@ static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
 	for (i = 0; i < num_mems; i++) {
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						   mem_names[i]);
-		core->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
-		if (IS_ERR(core->mem[i].cpu_addr)) {
-			dev_err(dev, "failed to parse and map %s memory\n",
+		if (!res) {
+			dev_err(dev, "found no memory resource for %s\n",
 				mem_names[i]);
+			ret = -EINVAL;
+			goto fail;
+		}
+		if (!devm_request_mem_region(dev, res->start,
+					     resource_size(res),
+					     dev_name(dev))) {
+			dev_err(dev, "could not request %s region for resource\n",
+				mem_names[i]);
+			ret = -EBUSY;
+			goto fail;
+		}
+
+		core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+							resource_size(res));
+		if (IS_ERR(core->mem[i].cpu_addr)) {
+			dev_err(dev, "failed to map %s memory\n", mem_names[i]);
 			ret = PTR_ERR(core->mem[i].cpu_addr);
+			devm_release_mem_region(dev, res->start,
+						resource_size(res));
 			goto fail;
 		}
 		core->mem[i].bus_addr = res->start;
@@ -1047,8 +1063,8 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
 		core->sram[i].bus_addr = res.start;
 		core->sram[i].dev_addr = res.start;
 		core->sram[i].size = resource_size(&res);
-		core->sram[i].cpu_addr = ioremap(res.start,
-						 resource_size(&res));
+		core->sram[i].cpu_addr = ioremap_wc(res.start,
+						    resource_size(&res));
 		if (!core->sram[i].cpu_addr) {
 			dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
 				i, &res.start);