소스 검색

Merge tag 'qcom-drivers-for-4.8' into HEAD

Qualcomm ARM Based Driver Updates for v4.8

* Rework of SCM driver
* Add file patterns for Qualcomm Maintainers entry
* Add worker for wcnss_ctrl signaling
* Fixes for smp2p
* Update smem_state properties to match documentation
* Add SCM Peripheral Authentication service
* Expose SCM PAS command 10 as a reset controller
Andy Gross 9 년 전
부모
커밋
2cd83c75d4

+ 2 - 2
Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt

@@ -68,7 +68,7 @@ important.
 	Value type: <u32>
 	Definition: must be 2 - denoting the bit in the entry and IRQ flags
 
-- #qcom,state-cells:
+- #qcom,smem-state-cells:
 	Usage: required for outgoing entries
 	Value type: <u32>
 	Definition: must be 1 - denoting the bit in the entry
@@ -92,7 +92,7 @@ wcnss-smp2p {
 	wcnss_smp2p_out: master-kernel {
 		qcom,entry-name = "master-kernel";
 
-		#qcom,state-cells = <1>;
+		#qcom,smem-state-cells = <1>;
 	};
 
 	wcnss_smp2p_in: slave-kernel {

+ 2 - 2
Documentation/devicetree/bindings/soc/qcom/qcom,smsm.txt

@@ -51,7 +51,7 @@ important.
 	Definition: specifies the offset, in words, of the first bit for this
 		    entry
 
-- #qcom,state-cells:
+- #qcom,smem-state-cells:
 	Usage: required for local entry
 	Value type: <u32>
 	Definition: must be 1 - denotes bit number
@@ -91,7 +91,7 @@ smsm {
 	apps_smsm: apps@0 {
 		reg = <0>;
 
-		#qcom,state-cells = <1>;
+		#qcom,smem-state-cells = <1>;
 	};
 
 	wcnss_smsm: wcnss@7 {

+ 1 - 0
MAINTAINERS

@@ -1521,6 +1521,7 @@ M:	David Brown <david.brown@linaro.org>
 L:	linux-arm-msm@vger.kernel.org
 L:	linux-soc@vger.kernel.org
 S:	Maintained
+F:	Documentation/devicetree/bindings/soc/qcom/
 F:	arch/arm/boot/dts/qcom-*.dts
 F:	arch/arm/boot/dts/qcom-*.dtsi
 F:	arch/arm/mach-qcom/

+ 1 - 0
drivers/firmware/Kconfig

@@ -184,6 +184,7 @@ config FW_CFG_SYSFS_CMDLINE
 config QCOM_SCM
 	bool
 	depends on ARM || ARM64
+	select RESET_CONTROLLER
 
 config QCOM_SCM_32
 	def_bool y

+ 194 - 133
drivers/firmware/qcom_scm-32.c

@@ -23,8 +23,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/qcom_scm.h>
-
-#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
 
 #include "qcom_scm.h"
 
@@ -96,44 +95,6 @@ struct qcom_scm_response {
 	__le32 is_complete;
 };
 
-/**
- * alloc_qcom_scm_command() - Allocate an SCM command
- * @cmd_size: size of the command buffer
- * @resp_size: size of the response buffer
- *
- * Allocate an SCM command, including enough room for the command
- * and response headers as well as the command and response buffers.
- *
- * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails.
- */
-static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size)
-{
-	struct qcom_scm_command *cmd;
-	size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size +
-		resp_size;
-	u32 offset;
-
-	cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
-	if (cmd) {
-		cmd->len = cpu_to_le32(len);
-		offset = offsetof(struct qcom_scm_command, buf);
-		cmd->buf_offset = cpu_to_le32(offset);
-		cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
-	}
-	return cmd;
-}
-
-/**
- * free_qcom_scm_command() - Free an SCM command
- * @cmd: command to free
- *
- * Free an SCM command.
- */
-static inline void free_qcom_scm_command(struct qcom_scm_command *cmd)
-{
-	kfree(cmd);
-}
-
 /**
  * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
  * @cmd: command
@@ -168,23 +129,6 @@ static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response
 	return (void *)rsp + le32_to_cpu(rsp->buf_offset);
 }
 
-static int qcom_scm_remap_error(int err)
-{
-	pr_err("qcom_scm_call failed with error code %d\n", err);
-	switch (err) {
-	case QCOM_SCM_ERROR:
-		return -EIO;
-	case QCOM_SCM_EINVAL_ADDR:
-	case QCOM_SCM_EINVAL_ARG:
-		return -EINVAL;
-	case QCOM_SCM_EOPNOTSUPP:
-		return -EOPNOTSUPP;
-	case QCOM_SCM_ENOMEM:
-		return -ENOMEM;
-	}
-	return -EINVAL;
-}
-
 static u32 smc(u32 cmd_addr)
 {
 	int context_id;
@@ -209,45 +153,9 @@ static u32 smc(u32 cmd_addr)
 	return r0;
 }
 
-static int __qcom_scm_call(const struct qcom_scm_command *cmd)
-{
-	int ret;
-	u32 cmd_addr = virt_to_phys(cmd);
-
-	/*
-	 * Flush the command buffer so that the secure world sees
-	 * the correct data.
-	 */
-	secure_flush_area(cmd, cmd->len);
-
-	ret = smc(cmd_addr);
-	if (ret < 0)
-		ret = qcom_scm_remap_error(ret);
-
-	return ret;
-}
-
-static void qcom_scm_inv_range(unsigned long start, unsigned long end)
-{
-	u32 cacheline_size, ctr;
-
-	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
-	cacheline_size = 4 << ((ctr >> 16) & 0xf);
-
-	start = round_down(start, cacheline_size);
-	end = round_up(end, cacheline_size);
-	outer_inv_range(start, end);
-	while (start < end) {
-		asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
-		     : "memory");
-		start += cacheline_size;
-	}
-	dsb();
-	isb();
-}
-
 /**
  * qcom_scm_call() - Send an SCM command
+ * @dev: struct device
  * @svc_id: service identifier
  * @cmd_id: command identifier
  * @cmd_buf: command buffer
@@ -264,42 +172,59 @@ static void qcom_scm_inv_range(unsigned long start, unsigned long end)
  * and response buffers is taken care of by qcom_scm_call; however, callers are
  * responsible for any other cached buffers passed over to the secure world.
  */
-static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
-			size_t cmd_len, void *resp_buf, size_t resp_len)
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+			 const void *cmd_buf, size_t cmd_len, void *resp_buf,
+			 size_t resp_len)
 {
 	int ret;
 	struct qcom_scm_command *cmd;
 	struct qcom_scm_response *rsp;
-	unsigned long start, end;
+	size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+	dma_addr_t cmd_phys;
 
-	cmd = alloc_qcom_scm_command(cmd_len, resp_len);
+	cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
 	if (!cmd)
 		return -ENOMEM;
 
+	cmd->len = cpu_to_le32(alloc_len);
+	cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+	cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+
 	cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
 	if (cmd_buf)
 		memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
 
+	rsp = qcom_scm_command_to_response(cmd);
+
+	cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, cmd_phys)) {
+		kfree(cmd);
+		return -ENOMEM;
+	}
+
 	mutex_lock(&qcom_scm_lock);
-	ret = __qcom_scm_call(cmd);
+	ret = smc(cmd_phys);
+	if (ret < 0)
+		ret = qcom_scm_remap_error(ret);
 	mutex_unlock(&qcom_scm_lock);
 	if (ret)
 		goto out;
 
-	rsp = qcom_scm_command_to_response(cmd);
-	start = (unsigned long)rsp;
-
 	do {
-		qcom_scm_inv_range(start, start + sizeof(*rsp));
+		dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+					sizeof(*rsp), DMA_FROM_DEVICE);
 	} while (!rsp->is_complete);
 
-	end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len;
-	qcom_scm_inv_range(start, end);
-
-	if (resp_buf)
-		memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len);
+	if (resp_buf) {
+		dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+					le32_to_cpu(rsp->buf_offset),
+					resp_len, DMA_FROM_DEVICE);
+		memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
+		       resp_len);
+	}
 out:
-	free_qcom_scm_command(cmd);
+	dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+	kfree(cmd);
 	return ret;
 }
 
@@ -342,6 +267,41 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
 	return r0;
 }
 
+/**
+ * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
+ * @svc_id:	service identifier
+ * @cmd_id:	command identifier
+ * @arg1:	first argument
+ * @arg2:	second argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+	int context_id;
+
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
+	register u32 r1 asm("r1") = (u32)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+
+	asm volatile(
+			__asmeq("%0", "r0")
+			__asmeq("%1", "r0")
+			__asmeq("%2", "r1")
+			__asmeq("%3", "r2")
+			__asmeq("%4", "r3")
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc    #0      @ switch to secure world\n"
+			: "=r" (r0)
+			: "r" (r0), "r" (r1), "r" (r2), "r" (r3)
+			);
+	return r0;
+}
+
 u32 qcom_scm_get_version(void)
 {
 	int context_id;
@@ -378,22 +338,6 @@ u32 qcom_scm_get_version(void)
 }
 EXPORT_SYMBOL(qcom_scm_get_version);
 
-/*
- * Set the cold/warm boot address for one of the CPU cores.
- */
-static int qcom_scm_set_boot_addr(u32 addr, int flags)
-{
-	struct {
-		__le32 flags;
-		__le32 addr;
-	} cmd;
-
-	cmd.addr = cpu_to_le32(addr);
-	cmd.flags = cpu_to_le32(flags);
-	return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
-			&cmd, sizeof(cmd), NULL, 0);
-}
-
 /**
  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
  * @entry: Entry point function for the cpus
@@ -423,7 +367,8 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
 			set_cpu_present(cpu, false);
 	}
 
-	return qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
+	return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+				    flags, virt_to_phys(entry));
 }
 
 /**
@@ -434,11 +379,16 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
  * Set the Linux entry point for the SCM to transfer control to when coming
  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
  */
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+				  const cpumask_t *cpus)
 {
 	int ret;
 	int flags = 0;
 	int cpu;
+	struct {
+		__le32 flags;
+		__le32 addr;
+	} cmd;
 
 	/*
 	 * Reassign only if we are switching from hotplug entry point
@@ -454,7 +404,10 @@ int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
 	if (!flags)
 		return 0;
 
-	ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
+	cmd.addr = cpu_to_le32(virt_to_phys(entry));
+	cmd.flags = cpu_to_le32(flags);
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+			    &cmd, sizeof(cmd), NULL, 0);
 	if (!ret) {
 		for_each_cpu(cpu, cpus)
 			qcom_scm_wb[cpu].entry = entry;
@@ -477,25 +430,133 @@ void __qcom_scm_cpu_power_down(u32 flags)
 			flags & QCOM_SCM_FLUSH_FLAG_MASK);
 }
 
-int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
 {
 	int ret;
 	__le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
 	__le32 ret_val = 0;
 
-	ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd,
-			sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
+			    &svc_cmd, sizeof(svc_cmd), &ret_val,
+			    sizeof(ret_val));
 	if (ret)
 		return ret;
 
 	return le32_to_cpu(ret_val);
 }
 
-int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
+			u32 req_cnt, u32 *resp)
 {
 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
 		return -ERANGE;
 
-	return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
+	return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
 		req, req_cnt * sizeof(*req), resp, sizeof(*resp));
 }
+
+void __qcom_scm_init(void)
+{
+}
+
+bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
+{
+	__le32 out;
+	__le32 in;
+	int ret;
+
+	in = cpu_to_le32(peripheral);
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+			    &in, sizeof(in),
+			    &out, sizeof(out));
+
+	return ret ? false : !!out;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+			      dma_addr_t metadata_phys)
+{
+	__le32 scm_ret;
+	int ret;
+	struct {
+		__le32 proc;
+		__le32 image_addr;
+	} request;
+
+	request.proc = cpu_to_le32(peripheral);
+	request.image_addr = cpu_to_le32(metadata_phys);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_INIT_IMAGE_CMD,
+			    &request, sizeof(request),
+			    &scm_ret, sizeof(scm_ret));
+
+	return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+			     phys_addr_t addr, phys_addr_t size)
+{
+	__le32 scm_ret;
+	int ret;
+	struct {
+		__le32 proc;
+		__le32 addr;
+		__le32 len;
+	} request;
+
+	request.proc = cpu_to_le32(peripheral);
+	request.addr = cpu_to_le32(addr);
+	request.len = cpu_to_le32(size);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_MEM_SETUP_CMD,
+			    &request, sizeof(request),
+			    &scm_ret, sizeof(scm_ret));
+
+	return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
+{
+	__le32 out;
+	__le32 in;
+	int ret;
+
+	in = cpu_to_le32(peripheral);
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+			    &in, sizeof(in),
+			    &out, sizeof(out));
+
+	return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
+{
+	__le32 out;
+	__le32 in;
+	int ret;
+
+	in = cpu_to_le32(peripheral);
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+			    QCOM_SCM_PAS_SHUTDOWN_CMD,
+			    &in, sizeof(in),
+			    &out, sizeof(out));
+
+	return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+	__le32 out;
+	__le32 in = cpu_to_le32(reset);
+	int ret;
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
+			&in, sizeof(in),
+			&out, sizeof(out));
+
+	return ret ? : le32_to_cpu(out);
+}

+ 302 - 5
drivers/firmware/qcom_scm-64.c

@@ -12,7 +12,150 @@
 
 #include <linux/io.h>
 #include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
 #include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+	QCOM_SCM_VAL,
+	QCOM_SCM_RO,
+	QCOM_SCM_RW,
+	QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+			   (((a) & 0x3) << 4) | \
+			   (((b) & 0x3) << 6) | \
+			   (((c) & 0x3) << 8) | \
+			   (((d) & 0x3) << 10) | \
+			   (((e) & 0x3) << 12) | \
+			   (((f) & 0x3) << 14) | \
+			   (((g) & 0x3) << 16) | \
+			   (((h) & 0x3) << 18) | \
+			   (((i) & 0x3) << 20) | \
+			   (((j) & 0x3) << 22) | \
+			   ((num) & 0xf))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+/**
+ * struct qcom_scm_desc
+ * @arginfo:	Metadata describing the arguments in args[]
+ * @args:	The array of arguments for the secure syscall
+ * @res:	The values returned by the secure syscall
+ */
+struct qcom_scm_desc {
+	u32 arginfo;
+	u64 args[MAX_QCOM_SCM_ARGS];
+};
+
+static u64 qcom_smccc_convention = -1;
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define N_EXT_QCOM_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
+
+/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev:	device
+ * @svc_id:	service identifier
+ * @cmd_id:	command identifier
+ * @desc:	Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+*/
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+			 const struct qcom_scm_desc *desc,
+			 struct arm_smccc_res *res)
+{
+	int arglen = desc->arginfo & 0xf;
+	int retry_count = 0, i;
+	u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
+	u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+	dma_addr_t args_phys = 0;
+	void *args_virt = NULL;
+	size_t alloc_len;
+
+	if (unlikely(arglen > N_REGISTER_ARGS)) {
+		alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
+		args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+
+		if (!args_virt)
+			return -ENOMEM;
+
+		if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+			__le32 *args = args_virt;
+
+			for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+				args[i] = cpu_to_le32(desc->args[i +
+						      FIRST_EXT_ARG_IDX]);
+		} else {
+			__le64 *args = args_virt;
+
+			for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+				args[i] = cpu_to_le64(desc->args[i +
+						      FIRST_EXT_ARG_IDX]);
+		}
+
+		args_phys = dma_map_single(dev, args_virt, alloc_len,
+					   DMA_TO_DEVICE);
+
+		if (dma_mapping_error(dev, args_phys)) {
+			kfree(args_virt);
+			return -ENOMEM;
+		}
+
+		x5 = args_phys;
+	}
+
+	do {
+		mutex_lock(&qcom_scm_lock);
+
+		cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+					 qcom_smccc_convention,
+					 ARM_SMCCC_OWNER_SIP, fn_id);
+
+		do {
+			arm_smccc_smc(cmd, desc->arginfo, desc->args[0],
+				      desc->args[1], desc->args[2], x5, 0, 0,
+				      res);
+		} while (res->a0 == QCOM_SCM_INTERRUPTED);
+
+		mutex_unlock(&qcom_scm_lock);
+
+		if (res->a0 == QCOM_SCM_V2_EBUSY) {
+			if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+				break;
+			msleep(QCOM_SCM_EBUSY_WAIT_MS);
+		}
+	}  while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+	if (args_virt) {
+		dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+		kfree(args_virt);
+	}
+
+	if (res->a0 < 0)
+		return qcom_scm_remap_error(res->a0);
+
+	return 0;
+}
 
 /**
  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
@@ -29,13 +172,15 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
 
 /**
  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @dev: Device pointer
  * @entry: Entry point function for the cpus
  * @cpus: The cpumask of cpus that will use the entry point
  *
  * Set the Linux entry point for the SCM to transfer control to when coming
  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
  */
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+				  const cpumask_t *cpus)
 {
 	return -ENOTSUPP;
 }
@@ -52,12 +197,164 @@ void __qcom_scm_cpu_power_down(u32 flags)
 {
 }
 
-int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
 {
-	return -ENOTSUPP;
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.arginfo = QCOM_SCM_ARGS(1);
+	desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) |
+			(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
+			    &desc, &res);
+
+	return ret ? : res.a1;
 }
 
-int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
+			u32 req_cnt, u32 *resp)
 {
-	return -ENOTSUPP;
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+		return -ERANGE;
+
+	desc.args[0] = req[0].addr;
+	desc.args[1] = req[0].val;
+	desc.args[2] = req[1].addr;
+	desc.args[3] = req[1].val;
+	desc.args[4] = req[2].addr;
+	desc.args[5] = req[2].val;
+	desc.args[6] = req[3].addr;
+	desc.args[7] = req[3].val;
+	desc.args[8] = req[4].addr;
+	desc.args[9] = req[4].val;
+	desc.arginfo = QCOM_SCM_ARGS(10);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc,
+			    &res);
+	*resp = res.a1;
+
+	return ret;
+}
+
+void __qcom_scm_init(void)
+{
+	u64 cmd;
+	struct arm_smccc_res res;
+	u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD);
+
+	/* First try a SMC64 call */
+	cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
+				 ARM_SMCCC_OWNER_SIP, function);
+
+	arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)),
+		      0, 0, 0, 0, 0, &res);
+
+	if (!res.a0 && res.a1)
+		qcom_smccc_convention = ARM_SMCCC_SMC_64;
+	else
+		qcom_smccc_convention = ARM_SMCCC_SMC_32;
+}
+
+bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.arginfo = QCOM_SCM_ARGS(1);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+				QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+				&desc, &res);
+
+	return ret ? false : !!res.a1;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+			      dma_addr_t metadata_phys)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.args[1] = metadata_phys;
+	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+				&desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+			      phys_addr_t addr, phys_addr_t size)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.args[1] = addr;
+	desc.args[2] = size;
+	desc.arginfo = QCOM_SCM_ARGS(3);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+				&desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.arginfo = QCOM_SCM_ARGS(1);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+				QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+				&desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
+{
+	int ret;
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+
+	desc.args[0] = peripheral;
+	desc.arginfo = QCOM_SCM_ARGS(1);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+			&desc, &res);
+
+	return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+	struct qcom_scm_desc desc = {0};
+	struct arm_smccc_res res;
+	int ret;
+
+	desc.args[0] = reset;
+	desc.args[1] = 0;
+	desc.arginfo = QCOM_SCM_ARGS(2);
+
+	ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc,
+			    &res);
+
+	return ret ? : res.a1;
 }

+ 334 - 11
drivers/firmware/qcom_scm.c

@@ -10,19 +10,64 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
-
+#include <linux/platform_device.h>
+#include <linux/module.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
+#include <linux/dma-mapping.h>
 #include <linux/types.h>
 #include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/reset-controller.h>
 
 #include "qcom_scm.h"
 
+struct qcom_scm {
+	struct device *dev;
+	struct clk *core_clk;
+	struct clk *iface_clk;
+	struct clk *bus_clk;
+	struct reset_controller_dev reset;
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+	int ret;
+
+	ret = clk_prepare_enable(__scm->core_clk);
+	if (ret)
+		goto bail;
+
+	ret = clk_prepare_enable(__scm->iface_clk);
+	if (ret)
+		goto disable_core;
+
+	ret = clk_prepare_enable(__scm->bus_clk);
+	if (ret)
+		goto disable_iface;
+
+	return 0;
+
+disable_iface:
+	clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+	clk_disable_unprepare(__scm->core_clk);
+bail:
+	return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+	clk_disable_unprepare(__scm->core_clk);
+	clk_disable_unprepare(__scm->iface_clk);
+	clk_disable_unprepare(__scm->bus_clk);
+}
+
 /**
  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
  * @entry: Entry point function for the cpus
@@ -47,7 +92,7 @@ EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
  */
 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
 {
-	return __qcom_scm_set_warm_boot_addr(entry, cpus);
+	return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
 }
 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
 
@@ -72,12 +117,17 @@ EXPORT_SYMBOL(qcom_scm_cpu_power_down);
  */
 bool qcom_scm_hdcp_available(void)
 {
-	int ret;
+	int ret = qcom_scm_clk_enable();
+
+	if (ret)
+		return ret;
 
-	ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP,
-		QCOM_SCM_CMD_HDCP);
+	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+						QCOM_SCM_CMD_HDCP);
 
-	return (ret > 0) ? true : false;
+	qcom_scm_clk_disable();
+
+	return ret > 0 ? true : false;
 }
 EXPORT_SYMBOL(qcom_scm_hdcp_available);
 
@@ -91,6 +141,279 @@ EXPORT_SYMBOL(qcom_scm_hdcp_available);
  */
 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
 {
-	return __qcom_scm_hdcp_req(req, req_cnt, resp);
+	int ret = qcom_scm_clk_enable();
+
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
+	qcom_scm_clk_disable();
+	return ret;
 }
 EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ *			      available for the given peripherial
+ * @peripheral:	peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+	int ret;
+
+	ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+					   QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+	if (ret <= 0)
+		return false;
+
+	return __qcom_scm_pas_supported(__scm->dev, peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ *			       state machine for a given peripheral, using the
+ *			       metadata
+ * @peripheral: peripheral id
+ * @metadata:	pointer to memory containing ELF header, program header table
+ *		and optional blob of data used for authenticating the metadata
+ *		and the rest of the firmware
+ * @size:	size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+	dma_addr_t mdata_phys;
+	void *mdata_buf;
+	int ret;
+
+	/*
+	 * During the scm call memory protection will be enabled for the meta
+	 * data blob, so make sure it's physically contiguous, 4K aligned and
+	 * non-cachable to avoid XPU violations.
+	 */
+	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
+				       GFP_KERNEL);
+	if (!mdata_buf) {
+		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+		return -ENOMEM;
+	}
+	memcpy(mdata_buf, metadata, size);
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		goto free_metadata;
+
+	ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
+
+	qcom_scm_clk_disable();
+
+free_metadata:
+	dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ *			      for firmware loading
+ * @peripheral:	peripheral id
+ * @addr:	start address of memory area to prepare
+ * @size:	size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+	int ret;
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
+	qcom_scm_clk_disable();
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ *				   and reset the remote processor
+ * @peripheral:	peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+	int ret;
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
+	qcom_scm_clk_disable();
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+	int ret;
+
+	ret = qcom_scm_clk_enable();
+	if (ret)
+		return ret;
+
+	ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
+	qcom_scm_clk_disable();
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
+				     unsigned long idx)
+{
+	if (idx != 0)
+		return -EINVAL;
+
+	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
+}
+
+static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
+				       unsigned long idx)
+{
+	if (idx != 0)
+		return -EINVAL;
+
+	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
+}
+
+static const struct reset_control_ops qcom_scm_pas_reset_ops = {
+	.assert = qcom_scm_pas_reset_assert,
+	.deassert = qcom_scm_pas_reset_deassert,
+};
+
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+	struct qcom_scm *scm;
+	int ret;
+
+	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+	if (!scm)
+		return -ENOMEM;
+
+	scm->core_clk = devm_clk_get(&pdev->dev, "core");
+	if (IS_ERR(scm->core_clk)) {
+		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+			return PTR_ERR(scm->core_clk);
+
+		scm->core_clk = NULL;
+	}
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) {
+		scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+		if (IS_ERR(scm->iface_clk)) {
+			if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "failed to acquire iface clk\n");
+			return PTR_ERR(scm->iface_clk);
+		}
+
+		scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+		if (IS_ERR(scm->bus_clk)) {
+			if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "failed to acquire bus clk\n");
+			return PTR_ERR(scm->bus_clk);
+		}
+	}
+
+	scm->reset.ops = &qcom_scm_pas_reset_ops;
+	scm->reset.nr_resets = 1;
+	scm->reset.of_node = pdev->dev.of_node;
+	reset_controller_register(&scm->reset);
+
+	/* vote for max clk rate for highest performance */
+	ret = clk_set_rate(scm->core_clk, INT_MAX);
+	if (ret)
+		return ret;
+
+	__scm = scm;
+	__scm->dev = &pdev->dev;
+
+	__qcom_scm_init();
+
+	return 0;
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+	{ .compatible = "qcom,scm-apq8064",},
+	{ .compatible = "qcom,scm-msm8660",},
+	{ .compatible = "qcom,scm-msm8960",},
+	{ .compatible = "qcom,scm",},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
+
+static struct platform_driver qcom_scm_driver = {
+	.driver = {
+		.name	= "qcom_scm",
+		.of_match_table = qcom_scm_dt_match,
+	},
+	.probe = qcom_scm_probe,
+};
+
+static int __init qcom_scm_init(void)
+{
+	struct device_node *np, *fw_np;
+	int ret;
+
+	fw_np = of_find_node_by_name(NULL, "firmware");
+
+	if (!fw_np)
+		return -ENODEV;
+
+	np = of_find_matching_node(fw_np, qcom_scm_dt_match);
+
+	if (!np) {
+		of_node_put(fw_np);
+		return -ENODEV;
+	}
+
+	of_node_put(np);
+
+	ret = of_platform_populate(fw_np, qcom_scm_dt_match, NULL, NULL);
+
+	of_node_put(fw_np);
+
+	if (ret)
+		return ret;
+
+	return platform_driver_register(&qcom_scm_driver);
+}
+
+arch_initcall(qcom_scm_init);
+
+static void __exit qcom_scm_exit(void)
+{
+	platform_driver_unregister(&qcom_scm_driver);
+}
+module_exit(qcom_scm_exit);
+
+MODULE_DESCRIPTION("Qualcomm SCM driver");
+MODULE_LICENSE("GPL v2");

+ 43 - 4
drivers/firmware/qcom_scm.h

@@ -19,7 +19,8 @@
 #define QCOM_SCM_FLAG_HLOS		0x01
 #define QCOM_SCM_FLAG_COLDBOOT_MC	0x02
 #define QCOM_SCM_FLAG_WARMBOOT_MC	0x04
-extern int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+		const cpumask_t *cpus);
 extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
 
 #define QCOM_SCM_CMD_TERMINATE_PC	0x2
@@ -29,14 +30,34 @@ extern void __qcom_scm_cpu_power_down(u32 flags);
 
 #define QCOM_SCM_SVC_INFO		0x6
 #define QCOM_IS_CALL_AVAIL_CMD		0x1
-extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+		u32 cmd_id);
 
 #define QCOM_SCM_SVC_HDCP		0x11
 #define QCOM_SCM_CMD_HDCP		0x01
-extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
-		u32 *resp);
+extern int __qcom_scm_hdcp_req(struct device *dev,
+		struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+extern void __qcom_scm_init(void);
+
+#define QCOM_SCM_SVC_PIL		0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD	0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD	0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD	0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD	0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD	0x7
+#define QCOM_SCM_PAS_MSS_RESET		0xa
+extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral);
+extern int  __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+		dma_addr_t metadata_phys);
+extern int  __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+		phys_addr_t addr, phys_addr_t size);
+extern int  __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral);
+extern int  __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral);
+extern int  __qcom_scm_pas_mss_reset(struct device *dev, bool reset);
 
 /* common error codes */
+#define QCOM_SCM_V2_EBUSY	-12
 #define QCOM_SCM_ENOMEM		-5
 #define QCOM_SCM_EOPNOTSUPP	-4
 #define QCOM_SCM_EINVAL_ADDR	-3
@@ -44,4 +65,22 @@ extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
 #define QCOM_SCM_ERROR		-1
 #define QCOM_SCM_INTERRUPTED	1
 
+static inline int qcom_scm_remap_error(int err)
+{
+	switch (err) {
+	case QCOM_SCM_ERROR:
+		return -EIO;
+	case QCOM_SCM_EINVAL_ADDR:
+	case QCOM_SCM_EINVAL_ARG:
+		return -EINVAL;
+	case QCOM_SCM_EOPNOTSUPP:
+		return -EOPNOTSUPP;
+	case QCOM_SCM_ENOMEM:
+		return -ENOMEM;
+	case QCOM_SCM_V2_EBUSY:
+		return -EBUSY;
+	}
+	return -EINVAL;
+}
+
 #endif

+ 6 - 6
drivers/soc/qcom/smem_state.c

@@ -104,26 +104,26 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
 
 	if (con_id) {
 		index = of_property_match_string(dev->of_node,
-						 "qcom,state-names",
+						 "qcom,smem-state-names",
 						 con_id);
 		if (index < 0) {
-			dev_err(dev, "missing qcom,state-names\n");
+			dev_err(dev, "missing qcom,smem-state-names\n");
 			return ERR_PTR(index);
 		}
 	}
 
 	ret = of_parse_phandle_with_args(dev->of_node,
-					 "qcom,state",
-					 "#qcom,state-cells",
+					 "qcom,smem-states",
+					 "#qcom,smem-state-cells",
 					 index,
 					 &args);
 	if (ret) {
-		dev_err(dev, "failed to parse qcom,state property\n");
+		dev_err(dev, "failed to parse qcom,smem-states property\n");
 		return ERR_PTR(ret);
 	}
 
 	if (args.args_count != 1) {
-		dev_err(dev, "invalid #qcom,state-cells\n");
+		dev_err(dev, "invalid #qcom,smem-state-cells\n");
 		return ERR_PTR(-EINVAL);
 	}
 

+ 4 - 3
drivers/soc/qcom/smp2p.c

@@ -196,7 +196,7 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data)
 	/* Match newly created entries */
 	for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
 		list_for_each_entry(entry, &smp2p->inbound, node) {
-			memcpy_fromio(buf, in->entries[i].name, sizeof(buf));
+			memcpy(buf, in->entries[i].name, sizeof(buf));
 			if (!strcmp(buf, entry->name)) {
 				entry->value = &in->entries[i].value;
 				break;
@@ -343,12 +343,13 @@ static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
 
 	/* Allocate an entry from the smem item */
 	strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
-	memcpy_toio(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
-	out->valid_entries++;
+	memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
 
 	/* Make the logical entry reference the physical value */
 	entry->value = &out->entries[out->valid_entries].value;
 
+	out->valid_entries++;
+
 	entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
 	if (IS_ERR(entry->state)) {
 		dev_err(smp2p->dev, "failed to register qcom_smem_state\n");

+ 1 - 1
drivers/soc/qcom/smsm.c

@@ -495,7 +495,7 @@ static int qcom_smsm_probe(struct platform_device *pdev)
 	if (!smsm->hosts)
 		return -ENOMEM;
 
-	local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,state-cells");
+	local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,smem-state-cells");
 	if (!local_node) {
 		dev_err(&pdev->dev, "no state entry\n");
 		return -EINVAL;

+ 106 - 19
drivers/soc/qcom/wcnss_ctrl.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016, Linaro Ltd.
  * Copyright (c) 2015, Sony Mobile Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -14,8 +15,16 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/soc/qcom/smd.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
 
 #define WCNSS_REQUEST_TIMEOUT	(5 * HZ)
+#define WCNSS_CBC_TIMEOUT	(10 * HZ)
+
+#define WCNSS_ACK_DONE_BOOTING	1
+#define WCNSS_ACK_COLD_BOOTING	2
 
 #define NV_FRAGMENT_SIZE	3072
 #define NVBIN_FILE		"wlan/prima/WCNSS_qcom_wlan_nv.bin"
@@ -25,17 +34,19 @@
  * @dev:	device handle
  * @channel:	SMD channel handle
  * @ack:	completion for outstanding requests
+ * @cbc:	completion for cbc complete indication
  * @ack_status:	status of the outstanding request
- * @download_nv_work: worker for uploading nv binary
+ * @probe_work: worker for uploading nv binary
  */
 struct wcnss_ctrl {
 	struct device *dev;
 	struct qcom_smd_channel *channel;
 
 	struct completion ack;
+	struct completion cbc;
 	int ack_status;
 
-	struct work_struct download_nv_work;
+	struct work_struct probe_work;
 };
 
 /* message types */
@@ -48,6 +59,11 @@ enum {
 	WCNSS_UPLOAD_CAL_RESP,
 	WCNSS_DOWNLOAD_CAL_REQ,
 	WCNSS_DOWNLOAD_CAL_RESP,
+	WCNSS_VBAT_LEVEL_IND,
+	WCNSS_BUILD_VERSION_REQ,
+	WCNSS_BUILD_VERSION_RESP,
+	WCNSS_PM_CONFIG_REQ,
+	WCNSS_CBC_COMPLETE_IND,
 };
 
 /**
@@ -128,7 +144,7 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
 			 version->major, version->minor,
 			 version->version, version->revision);
 
-		schedule_work(&wcnss->download_nv_work);
+		complete(&wcnss->ack);
 		break;
 	case WCNSS_DOWNLOAD_NV_RESP:
 		if (count != sizeof(*nvresp)) {
@@ -141,6 +157,10 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
 		wcnss->ack_status = nvresp->status;
 		complete(&wcnss->ack);
 		break;
+	case WCNSS_CBC_COMPLETE_IND:
+		dev_dbg(wcnss->dev, "cold boot complete\n");
+		complete(&wcnss->cbc);
+		break;
 	default:
 		dev_info(wcnss->dev, "unknown message type %d\n", hdr->type);
 		break;
@@ -156,20 +176,32 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
 static int wcnss_request_version(struct wcnss_ctrl *wcnss)
 {
 	struct wcnss_msg_hdr msg;
+	int ret;
 
 	msg.type = WCNSS_VERSION_REQ;
 	msg.len = sizeof(msg);
+	ret = qcom_smd_send(wcnss->channel, &msg, sizeof(msg));
+	if (ret < 0)
+		return ret;
+
+	ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT);
+	if (!ret) {
+		dev_err(wcnss->dev, "timeout waiting for version response\n");
+		return -ETIMEDOUT;
+	}
 
-	return qcom_smd_send(wcnss->channel, &msg, sizeof(msg));
+	return 0;
 }
 
 /**
  * wcnss_download_nv() - send nv binary to WCNSS
- * @work:	work struct to acquire wcnss context
+ * @wcnss:	wcnss_ctrl state handle
+ * @expect_cbc:	indicator to caller that an cbc event is expected
+ *
+ * Returns 0 on success. Negative errno on failure.
  */
-static void wcnss_download_nv(struct work_struct *work)
+static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
 {
-	struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, download_nv_work);
 	struct wcnss_download_nv_req *req;
 	const struct firmware *fw;
 	const void *data;
@@ -178,10 +210,10 @@ static void wcnss_download_nv(struct work_struct *work)
 
 	req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
 	if (!req)
-		return;
+		return -ENOMEM;
 
 	ret = request_firmware(&fw, NVBIN_FILE, wcnss->dev);
-	if (ret) {
+	if (ret < 0) {
 		dev_err(wcnss->dev, "Failed to load nv file %s: %d\n",
 			NVBIN_FILE, ret);
 		goto free_req;
@@ -207,7 +239,7 @@ static void wcnss_download_nv(struct work_struct *work)
 		memcpy(req->fragment, data, req->frag_size);
 
 		ret = qcom_smd_send(wcnss->channel, req, req->hdr.len);
-		if (ret) {
+		if (ret < 0) {
 			dev_err(wcnss->dev, "failed to send smd packet\n");
 			goto release_fw;
 		}
@@ -220,16 +252,58 @@ static void wcnss_download_nv(struct work_struct *work)
 	} while (left > 0);
 
 	ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
-	if (!ret)
+	if (!ret) {
 		dev_err(wcnss->dev, "timeout waiting for nv upload ack\n");
-	else if (wcnss->ack_status != 1)
-		dev_err(wcnss->dev, "nv upload response failed err: %d\n",
-			wcnss->ack_status);
+		ret = -ETIMEDOUT;
+	} else {
+		*expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
+		ret = 0;
+	}
 
 release_fw:
 	release_firmware(fw);
 free_req:
 	kfree(req);
+
+	return ret;
+}
+
+/**
+ * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS
+ * @wcnss:	wcnss handle, retrieved from drvdata
+ * @name:	SMD channel name
+ * @cb:		callback to handle incoming data on the channel
+ */
+struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+{
+	struct wcnss_ctrl *_wcnss = wcnss;
+
+	return qcom_smd_open_channel(_wcnss->channel, name, cb);
+}
+EXPORT_SYMBOL(qcom_wcnss_open_channel);
+
+static void wcnss_async_probe(struct work_struct *work)
+{
+	struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
+	bool expect_cbc;
+	int ret;
+
+	ret = wcnss_request_version(wcnss);
+	if (ret < 0)
+		return;
+
+	ret = wcnss_download_nv(wcnss, &expect_cbc);
+	if (ret < 0)
+		return;
+
+	/* Wait for pending cold boot completion if indicated by the nv downloader */
+	if (expect_cbc) {
+		ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT);
+		if (!ret)
+			dev_err(wcnss->dev, "expected cold boot completion\n");
+	}
+
+	of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
 }
 
 static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
@@ -244,25 +318,38 @@ static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
 	wcnss->channel = sdev->channel;
 
 	init_completion(&wcnss->ack);
-	INIT_WORK(&wcnss->download_nv_work, wcnss_download_nv);
+	init_completion(&wcnss->cbc);
+	INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
 
 	qcom_smd_set_drvdata(sdev->channel, wcnss);
+	dev_set_drvdata(&sdev->dev, wcnss);
+
+	schedule_work(&wcnss->probe_work);
+
+	return 0;
+}
+
+static void wcnss_ctrl_remove(struct qcom_smd_device *sdev)
+{
+	struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(sdev->channel);
 
-	return wcnss_request_version(wcnss);
+	cancel_work_sync(&wcnss->probe_work);
+	of_platform_depopulate(&sdev->dev);
 }
 
-static const struct qcom_smd_id wcnss_ctrl_smd_match[] = {
-	{ .name = "WCNSS_CTRL" },
+static const struct of_device_id wcnss_ctrl_of_match[] = {
+	{ .compatible = "qcom,wcnss", },
 	{}
 };
 
 static struct qcom_smd_driver wcnss_ctrl_driver = {
 	.probe = wcnss_ctrl_probe,
+	.remove = wcnss_ctrl_remove,
 	.callback = wcnss_ctrl_smd_callback,
-	.smd_match_table = wcnss_ctrl_smd_match,
 	.driver  = {
 		.name  = "qcom_wcnss_ctrl",
 		.owner = THIS_MODULE,
+		.of_match_table = wcnss_ctrl_of_match,
 	},
 };
 

+ 8 - 0
include/linux/qcom_scm.h

@@ -29,6 +29,14 @@ extern bool qcom_scm_hdcp_available(void);
 extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
 		u32 *resp);
 
+extern bool qcom_scm_pas_supported(u32 peripheral);
+extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
+		size_t size);
+extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
+		phys_addr_t size);
+extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int qcom_scm_pas_shutdown(u32 peripheral);
+
 #define QCOM_SCM_CPU_PWR_DOWN_L2_ON	0x0
 #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF	0x1
 

+ 8 - 0
include/linux/soc/qcom/wcnss_ctrl.h

@@ -0,0 +1,8 @@
+#ifndef __WCNSS_CTRL_H__
+#define __WCNSS_CTRL_H__
+
+#include <linux/soc/qcom/smd.h>
+
+struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
+
+#endif