qcom_q6v5_mss.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508
  1. /*
  2. * Qualcomm self-authenticating modem subsystem remoteproc driver
  3. *
  4. * Copyright (C) 2016 Linaro Ltd.
  5. * Copyright (C) 2014 Sony Mobile Communications AB
  6. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/clk.h>
  18. #include <linux/delay.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/kernel.h>
  22. #include <linux/mfd/syscon.h>
  23. #include <linux/module.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_device.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/regmap.h>
  28. #include <linux/regulator/consumer.h>
  29. #include <linux/remoteproc.h>
  30. #include <linux/reset.h>
  31. #include <linux/soc/qcom/mdt_loader.h>
  32. #include <linux/iopoll.h>
  33. #include "remoteproc_internal.h"
  34. #include "qcom_common.h"
  35. #include "qcom_q6v5.h"
  36. #include <linux/qcom_scm.h>
  37. #define MPSS_CRASH_REASON_SMEM 421
  38. /* RMB Status Register Values */
  39. #define RMB_PBL_SUCCESS 0x1
  40. #define RMB_MBA_XPU_UNLOCKED 0x1
  41. #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
  42. #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
  43. #define RMB_MBA_AUTH_COMPLETE 0x4
  44. /* PBL/MBA interface registers */
  45. #define RMB_MBA_IMAGE_REG 0x00
  46. #define RMB_PBL_STATUS_REG 0x04
  47. #define RMB_MBA_COMMAND_REG 0x08
  48. #define RMB_MBA_STATUS_REG 0x0C
  49. #define RMB_PMI_META_DATA_REG 0x10
  50. #define RMB_PMI_CODE_START_REG 0x14
  51. #define RMB_PMI_CODE_LENGTH_REG 0x18
  52. #define RMB_MBA_MSS_STATUS 0x40
  53. #define RMB_MBA_ALT_RESET 0x44
  54. #define RMB_CMD_META_DATA_READY 0x1
  55. #define RMB_CMD_LOAD_READY 0x2
  56. /* QDSP6SS Register Offsets */
  57. #define QDSP6SS_RESET_REG 0x014
  58. #define QDSP6SS_GFMUX_CTL_REG 0x020
  59. #define QDSP6SS_PWR_CTL_REG 0x030
  60. #define QDSP6SS_MEM_PWR_CTL 0x0B0
  61. #define QDSP6SS_STRAP_ACC 0x110
  62. /* AXI Halt Register Offsets */
  63. #define AXI_HALTREQ_REG 0x0
  64. #define AXI_HALTACK_REG 0x4
  65. #define AXI_IDLE_REG 0x8
  66. #define HALT_ACK_TIMEOUT_MS 100
  67. /* QDSP6SS_RESET */
  68. #define Q6SS_STOP_CORE BIT(0)
  69. #define Q6SS_CORE_ARES BIT(1)
  70. #define Q6SS_BUS_ARES_ENABLE BIT(2)
  71. /* QDSP6SS_GFMUX_CTL */
  72. #define Q6SS_CLK_ENABLE BIT(1)
  73. /* QDSP6SS_PWR_CTL */
  74. #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
  75. #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
  76. #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
  77. #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
  78. #define Q6SS_ETB_SLP_NRET_N BIT(17)
  79. #define Q6SS_L2DATA_STBY_N BIT(18)
  80. #define Q6SS_SLP_RET_N BIT(19)
  81. #define Q6SS_CLAMP_IO BIT(20)
  82. #define QDSS_BHS_ON BIT(21)
  83. #define QDSS_LDO_BYP BIT(22)
  84. /* QDSP6v56 parameters */
  85. #define QDSP6v56_LDO_BYP BIT(25)
  86. #define QDSP6v56_BHS_ON BIT(24)
  87. #define QDSP6v56_CLAMP_WL BIT(21)
  88. #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
  89. #define HALT_CHECK_MAX_LOOPS 200
  90. #define QDSP6SS_XO_CBCR 0x0038
  91. #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
  92. /* QDSP6v65 parameters */
  93. #define QDSP6SS_SLEEP 0x3C
  94. #define QDSP6SS_BOOT_CORE_START 0x400
  95. #define QDSP6SS_BOOT_CMD 0x404
  96. #define SLEEP_CHECK_MAX_LOOPS 200
  97. #define BOOT_FSM_TIMEOUT 10000
  98. struct reg_info {
  99. struct regulator *reg;
  100. int uV;
  101. int uA;
  102. };
  103. struct qcom_mss_reg_res {
  104. const char *supply;
  105. int uV;
  106. int uA;
  107. };
  108. struct rproc_hexagon_res {
  109. const char *hexagon_mba_image;
  110. struct qcom_mss_reg_res *proxy_supply;
  111. struct qcom_mss_reg_res *active_supply;
  112. char **proxy_clk_names;
  113. char **reset_clk_names;
  114. char **active_clk_names;
  115. int version;
  116. bool need_mem_protection;
  117. bool has_alt_reset;
  118. };
  119. struct q6v5 {
  120. struct device *dev;
  121. struct rproc *rproc;
  122. void __iomem *reg_base;
  123. void __iomem *rmb_base;
  124. struct regmap *halt_map;
  125. u32 halt_q6;
  126. u32 halt_modem;
  127. u32 halt_nc;
  128. struct reset_control *mss_restart;
  129. struct reset_control *pdc_reset;
  130. struct qcom_q6v5 q6v5;
  131. struct clk *active_clks[8];
  132. struct clk *reset_clks[4];
  133. struct clk *proxy_clks[4];
  134. int active_clk_count;
  135. int reset_clk_count;
  136. int proxy_clk_count;
  137. struct reg_info active_regs[1];
  138. struct reg_info proxy_regs[3];
  139. int active_reg_count;
  140. int proxy_reg_count;
  141. bool running;
  142. bool dump_mba_loaded;
  143. unsigned long dump_segment_mask;
  144. unsigned long dump_complete_mask;
  145. phys_addr_t mba_phys;
  146. void *mba_region;
  147. size_t mba_size;
  148. phys_addr_t mpss_phys;
  149. phys_addr_t mpss_reloc;
  150. void *mpss_region;
  151. size_t mpss_size;
  152. struct qcom_rproc_glink glink_subdev;
  153. struct qcom_rproc_subdev smd_subdev;
  154. struct qcom_rproc_ssr ssr_subdev;
  155. struct qcom_sysmon *sysmon;
  156. bool need_mem_protection;
  157. bool has_alt_reset;
  158. int mpss_perm;
  159. int mba_perm;
  160. int version;
  161. };
  162. enum {
  163. MSS_MSM8916,
  164. MSS_MSM8974,
  165. MSS_MSM8996,
  166. MSS_SDM845,
  167. };
  168. static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
  169. const struct qcom_mss_reg_res *reg_res)
  170. {
  171. int rc;
  172. int i;
  173. if (!reg_res)
  174. return 0;
  175. for (i = 0; reg_res[i].supply; i++) {
  176. regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
  177. if (IS_ERR(regs[i].reg)) {
  178. rc = PTR_ERR(regs[i].reg);
  179. if (rc != -EPROBE_DEFER)
  180. dev_err(dev, "Failed to get %s\n regulator",
  181. reg_res[i].supply);
  182. return rc;
  183. }
  184. regs[i].uV = reg_res[i].uV;
  185. regs[i].uA = reg_res[i].uA;
  186. }
  187. return i;
  188. }
  189. static int q6v5_regulator_enable(struct q6v5 *qproc,
  190. struct reg_info *regs, int count)
  191. {
  192. int ret;
  193. int i;
  194. for (i = 0; i < count; i++) {
  195. if (regs[i].uV > 0) {
  196. ret = regulator_set_voltage(regs[i].reg,
  197. regs[i].uV, INT_MAX);
  198. if (ret) {
  199. dev_err(qproc->dev,
  200. "Failed to request voltage for %d.\n",
  201. i);
  202. goto err;
  203. }
  204. }
  205. if (regs[i].uA > 0) {
  206. ret = regulator_set_load(regs[i].reg,
  207. regs[i].uA);
  208. if (ret < 0) {
  209. dev_err(qproc->dev,
  210. "Failed to set regulator mode\n");
  211. goto err;
  212. }
  213. }
  214. ret = regulator_enable(regs[i].reg);
  215. if (ret) {
  216. dev_err(qproc->dev, "Regulator enable failed\n");
  217. goto err;
  218. }
  219. }
  220. return 0;
  221. err:
  222. for (; i >= 0; i--) {
  223. if (regs[i].uV > 0)
  224. regulator_set_voltage(regs[i].reg, 0, INT_MAX);
  225. if (regs[i].uA > 0)
  226. regulator_set_load(regs[i].reg, 0);
  227. regulator_disable(regs[i].reg);
  228. }
  229. return ret;
  230. }
  231. static void q6v5_regulator_disable(struct q6v5 *qproc,
  232. struct reg_info *regs, int count)
  233. {
  234. int i;
  235. for (i = 0; i < count; i++) {
  236. if (regs[i].uV > 0)
  237. regulator_set_voltage(regs[i].reg, 0, INT_MAX);
  238. if (regs[i].uA > 0)
  239. regulator_set_load(regs[i].reg, 0);
  240. regulator_disable(regs[i].reg);
  241. }
  242. }
  243. static int q6v5_clk_enable(struct device *dev,
  244. struct clk **clks, int count)
  245. {
  246. int rc;
  247. int i;
  248. for (i = 0; i < count; i++) {
  249. rc = clk_prepare_enable(clks[i]);
  250. if (rc) {
  251. dev_err(dev, "Clock enable failed\n");
  252. goto err;
  253. }
  254. }
  255. return 0;
  256. err:
  257. for (i--; i >= 0; i--)
  258. clk_disable_unprepare(clks[i]);
  259. return rc;
  260. }
  261. static void q6v5_clk_disable(struct device *dev,
  262. struct clk **clks, int count)
  263. {
  264. int i;
  265. for (i = 0; i < count; i++)
  266. clk_disable_unprepare(clks[i]);
  267. }
  268. static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
  269. bool remote_owner, phys_addr_t addr,
  270. size_t size)
  271. {
  272. struct qcom_scm_vmperm next;
  273. if (!qproc->need_mem_protection)
  274. return 0;
  275. if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
  276. return 0;
  277. if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
  278. return 0;
  279. next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
  280. next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
  281. return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
  282. current_perm, &next, 1);
  283. }
  284. static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
  285. {
  286. struct q6v5 *qproc = rproc->priv;
  287. memcpy(qproc->mba_region, fw->data, fw->size);
  288. return 0;
  289. }
  290. static int q6v5_reset_assert(struct q6v5 *qproc)
  291. {
  292. int ret;
  293. if (qproc->has_alt_reset) {
  294. reset_control_assert(qproc->pdc_reset);
  295. ret = reset_control_reset(qproc->mss_restart);
  296. reset_control_deassert(qproc->pdc_reset);
  297. } else {
  298. ret = reset_control_assert(qproc->mss_restart);
  299. }
  300. return ret;
  301. }
  302. static int q6v5_reset_deassert(struct q6v5 *qproc)
  303. {
  304. int ret;
  305. if (qproc->has_alt_reset) {
  306. reset_control_assert(qproc->pdc_reset);
  307. writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
  308. ret = reset_control_reset(qproc->mss_restart);
  309. writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
  310. reset_control_deassert(qproc->pdc_reset);
  311. } else {
  312. ret = reset_control_deassert(qproc->mss_restart);
  313. }
  314. return ret;
  315. }
  316. static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
  317. {
  318. unsigned long timeout;
  319. s32 val;
  320. timeout = jiffies + msecs_to_jiffies(ms);
  321. for (;;) {
  322. val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
  323. if (val)
  324. break;
  325. if (time_after(jiffies, timeout))
  326. return -ETIMEDOUT;
  327. msleep(1);
  328. }
  329. return val;
  330. }
  331. static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
  332. {
  333. unsigned long timeout;
  334. s32 val;
  335. timeout = jiffies + msecs_to_jiffies(ms);
  336. for (;;) {
  337. val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
  338. if (val < 0)
  339. break;
  340. if (!status && val)
  341. break;
  342. else if (status && val == status)
  343. break;
  344. if (time_after(jiffies, timeout))
  345. return -ETIMEDOUT;
  346. msleep(1);
  347. }
  348. return val;
  349. }
  350. static int q6v5proc_reset(struct q6v5 *qproc)
  351. {
  352. u32 val;
  353. int ret;
  354. int i;
  355. if (qproc->version == MSS_SDM845) {
  356. val = readl(qproc->reg_base + QDSP6SS_SLEEP);
  357. val |= 0x1;
  358. writel(val, qproc->reg_base + QDSP6SS_SLEEP);
  359. ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
  360. val, !(val & BIT(31)), 1,
  361. SLEEP_CHECK_MAX_LOOPS);
  362. if (ret) {
  363. dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
  364. return -ETIMEDOUT;
  365. }
  366. /* De-assert QDSP6 stop core */
  367. writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
  368. /* Trigger boot FSM */
  369. writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
  370. ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
  371. val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
  372. if (ret) {
  373. dev_err(qproc->dev, "Boot FSM failed to complete.\n");
  374. /* Reset the modem so that boot FSM is in reset state */
  375. q6v5_reset_deassert(qproc);
  376. return ret;
  377. }
  378. goto pbl_wait;
  379. } else if (qproc->version == MSS_MSM8996) {
  380. /* Override the ACC value if required */
  381. writel(QDSP6SS_ACC_OVERRIDE_VAL,
  382. qproc->reg_base + QDSP6SS_STRAP_ACC);
  383. /* Assert resets, stop core */
  384. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  385. val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
  386. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  387. /* BHS require xo cbcr to be enabled */
  388. val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
  389. val |= 0x1;
  390. writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
  391. /* Read CLKOFF bit to go low indicating CLK is enabled */
  392. ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
  393. val, !(val & BIT(31)), 1,
  394. HALT_CHECK_MAX_LOOPS);
  395. if (ret) {
  396. dev_err(qproc->dev,
  397. "xo cbcr enabling timed out (rc:%d)\n", ret);
  398. return ret;
  399. }
  400. /* Enable power block headswitch and wait for it to stabilize */
  401. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  402. val |= QDSP6v56_BHS_ON;
  403. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  404. val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  405. udelay(1);
  406. /* Put LDO in bypass mode */
  407. val |= QDSP6v56_LDO_BYP;
  408. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  409. /* Deassert QDSP6 compiler memory clamp */
  410. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  411. val &= ~QDSP6v56_CLAMP_QMC_MEM;
  412. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  413. /* Deassert memory peripheral sleep and L2 memory standby */
  414. val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
  415. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  416. /* Turn on L1, L2, ETB and JU memories 1 at a time */
  417. val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
  418. for (i = 19; i >= 0; i--) {
  419. val |= BIT(i);
  420. writel(val, qproc->reg_base +
  421. QDSP6SS_MEM_PWR_CTL);
  422. /*
  423. * Read back value to ensure the write is done then
  424. * wait for 1us for both memory peripheral and data
  425. * array to turn on.
  426. */
  427. val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
  428. udelay(1);
  429. }
  430. /* Remove word line clamp */
  431. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  432. val &= ~QDSP6v56_CLAMP_WL;
  433. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  434. } else {
  435. /* Assert resets, stop core */
  436. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  437. val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
  438. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  439. /* Enable power block headswitch and wait for it to stabilize */
  440. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  441. val |= QDSS_BHS_ON | QDSS_LDO_BYP;
  442. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  443. val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  444. udelay(1);
  445. /*
  446. * Turn on memories. L2 banks should be done individually
  447. * to minimize inrush current.
  448. */
  449. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  450. val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
  451. Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
  452. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  453. val |= Q6SS_L2DATA_SLP_NRET_N_2;
  454. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  455. val |= Q6SS_L2DATA_SLP_NRET_N_1;
  456. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  457. val |= Q6SS_L2DATA_SLP_NRET_N_0;
  458. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  459. }
  460. /* Remove IO clamp */
  461. val &= ~Q6SS_CLAMP_IO;
  462. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  463. /* Bring core out of reset */
  464. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  465. val &= ~Q6SS_CORE_ARES;
  466. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  467. /* Turn on core clock */
  468. val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
  469. val |= Q6SS_CLK_ENABLE;
  470. writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
  471. /* Start core execution */
  472. val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
  473. val &= ~Q6SS_STOP_CORE;
  474. writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
  475. pbl_wait:
  476. /* Wait for PBL status */
  477. ret = q6v5_rmb_pbl_wait(qproc, 1000);
  478. if (ret == -ETIMEDOUT) {
  479. dev_err(qproc->dev, "PBL boot timed out\n");
  480. } else if (ret != RMB_PBL_SUCCESS) {
  481. dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
  482. ret = -EINVAL;
  483. } else {
  484. ret = 0;
  485. }
  486. return ret;
  487. }
  488. static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
  489. struct regmap *halt_map,
  490. u32 offset)
  491. {
  492. unsigned long timeout;
  493. unsigned int val;
  494. int ret;
  495. /* Check if we're already idle */
  496. ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
  497. if (!ret && val)
  498. return;
  499. /* Assert halt request */
  500. regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
  501. /* Wait for halt */
  502. timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
  503. for (;;) {
  504. ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
  505. if (ret || val || time_after(jiffies, timeout))
  506. break;
  507. msleep(1);
  508. }
  509. ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
  510. if (ret || !val)
  511. dev_err(qproc->dev, "port failed halt\n");
  512. /* Clear halt request (port will remain halted until reset) */
  513. regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
  514. }
  515. static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
  516. {
  517. unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
  518. dma_addr_t phys;
  519. int mdata_perm;
  520. int xferop_ret;
  521. void *ptr;
  522. int ret;
  523. ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
  524. if (!ptr) {
  525. dev_err(qproc->dev, "failed to allocate mdt buffer\n");
  526. return -ENOMEM;
  527. }
  528. memcpy(ptr, fw->data, fw->size);
  529. /* Hypervisor mapping to access metadata by modem */
  530. mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
  531. ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
  532. true, phys, fw->size);
  533. if (ret) {
  534. dev_err(qproc->dev,
  535. "assigning Q6 access to metadata failed: %d\n", ret);
  536. ret = -EAGAIN;
  537. goto free_dma_attrs;
  538. }
  539. writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
  540. writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
  541. ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
  542. if (ret == -ETIMEDOUT)
  543. dev_err(qproc->dev, "MPSS header authentication timed out\n");
  544. else if (ret < 0)
  545. dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
  546. /* Metadata authentication done, remove modem access */
  547. xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
  548. false, phys, fw->size);
  549. if (xferop_ret)
  550. dev_warn(qproc->dev,
  551. "mdt buffer not reclaimed system may become unstable\n");
  552. free_dma_attrs:
  553. dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
  554. return ret < 0 ? ret : 0;
  555. }
  556. static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
  557. {
  558. if (phdr->p_type != PT_LOAD)
  559. return false;
  560. if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
  561. return false;
  562. if (!phdr->p_memsz)
  563. return false;
  564. return true;
  565. }
  566. static int q6v5_mba_load(struct q6v5 *qproc)
  567. {
  568. int ret;
  569. int xfermemop_ret;
  570. qcom_q6v5_prepare(&qproc->q6v5);
  571. ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
  572. qproc->proxy_reg_count);
  573. if (ret) {
  574. dev_err(qproc->dev, "failed to enable proxy supplies\n");
  575. goto disable_irqs;
  576. }
  577. ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
  578. qproc->proxy_clk_count);
  579. if (ret) {
  580. dev_err(qproc->dev, "failed to enable proxy clocks\n");
  581. goto disable_proxy_reg;
  582. }
  583. ret = q6v5_regulator_enable(qproc, qproc->active_regs,
  584. qproc->active_reg_count);
  585. if (ret) {
  586. dev_err(qproc->dev, "failed to enable supplies\n");
  587. goto disable_proxy_clk;
  588. }
  589. ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
  590. qproc->reset_clk_count);
  591. if (ret) {
  592. dev_err(qproc->dev, "failed to enable reset clocks\n");
  593. goto disable_vdd;
  594. }
  595. ret = q6v5_reset_deassert(qproc);
  596. if (ret) {
  597. dev_err(qproc->dev, "failed to deassert mss restart\n");
  598. goto disable_reset_clks;
  599. }
  600. ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
  601. qproc->active_clk_count);
  602. if (ret) {
  603. dev_err(qproc->dev, "failed to enable clocks\n");
  604. goto assert_reset;
  605. }
  606. /* Assign MBA image access in DDR to q6 */
  607. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
  608. qproc->mba_phys, qproc->mba_size);
  609. if (ret) {
  610. dev_err(qproc->dev,
  611. "assigning Q6 access to mba memory failed: %d\n", ret);
  612. goto disable_active_clks;
  613. }
  614. writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
  615. ret = q6v5proc_reset(qproc);
  616. if (ret)
  617. goto reclaim_mba;
  618. ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
  619. if (ret == -ETIMEDOUT) {
  620. dev_err(qproc->dev, "MBA boot timed out\n");
  621. goto halt_axi_ports;
  622. } else if (ret != RMB_MBA_XPU_UNLOCKED &&
  623. ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
  624. dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
  625. ret = -EINVAL;
  626. goto halt_axi_ports;
  627. }
  628. qproc->dump_mba_loaded = true;
  629. return 0;
  630. halt_axi_ports:
  631. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
  632. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
  633. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
  634. reclaim_mba:
  635. xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
  636. qproc->mba_phys,
  637. qproc->mba_size);
  638. if (xfermemop_ret) {
  639. dev_err(qproc->dev,
  640. "Failed to reclaim mba buffer, system may become unstable\n");
  641. }
  642. disable_active_clks:
  643. q6v5_clk_disable(qproc->dev, qproc->active_clks,
  644. qproc->active_clk_count);
  645. assert_reset:
  646. q6v5_reset_assert(qproc);
  647. disable_reset_clks:
  648. q6v5_clk_disable(qproc->dev, qproc->reset_clks,
  649. qproc->reset_clk_count);
  650. disable_vdd:
  651. q6v5_regulator_disable(qproc, qproc->active_regs,
  652. qproc->active_reg_count);
  653. disable_proxy_clk:
  654. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  655. qproc->proxy_clk_count);
  656. disable_proxy_reg:
  657. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  658. qproc->proxy_reg_count);
  659. disable_irqs:
  660. qcom_q6v5_unprepare(&qproc->q6v5);
  661. return ret;
  662. }
  663. static void q6v5_mba_reclaim(struct q6v5 *qproc)
  664. {
  665. int ret;
  666. u32 val;
  667. qproc->dump_mba_loaded = false;
  668. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
  669. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
  670. q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
  671. if (qproc->version == MSS_MSM8996) {
  672. /*
  673. * To avoid high MX current during LPASS/MSS restart.
  674. */
  675. val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  676. val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
  677. QDSP6v56_CLAMP_QMC_MEM;
  678. writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
  679. }
  680. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
  681. false, qproc->mpss_phys,
  682. qproc->mpss_size);
  683. WARN_ON(ret);
  684. q6v5_reset_assert(qproc);
  685. q6v5_clk_disable(qproc->dev, qproc->reset_clks,
  686. qproc->reset_clk_count);
  687. q6v5_clk_disable(qproc->dev, qproc->active_clks,
  688. qproc->active_clk_count);
  689. q6v5_regulator_disable(qproc, qproc->active_regs,
  690. qproc->active_reg_count);
  691. /* In case of failure or coredump scenario where reclaiming MBA memory
  692. * could not happen reclaim it here.
  693. */
  694. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
  695. qproc->mba_phys,
  696. qproc->mba_size);
  697. WARN_ON(ret);
  698. ret = qcom_q6v5_unprepare(&qproc->q6v5);
  699. if (ret) {
  700. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  701. qproc->proxy_clk_count);
  702. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  703. qproc->proxy_reg_count);
  704. }
  705. }
  706. static int q6v5_mpss_load(struct q6v5 *qproc)
  707. {
  708. const struct elf32_phdr *phdrs;
  709. const struct elf32_phdr *phdr;
  710. const struct firmware *seg_fw;
  711. const struct firmware *fw;
  712. struct elf32_hdr *ehdr;
  713. phys_addr_t mpss_reloc;
  714. phys_addr_t boot_addr;
  715. phys_addr_t min_addr = PHYS_ADDR_MAX;
  716. phys_addr_t max_addr = 0;
  717. bool relocate = false;
  718. char seg_name[10];
  719. ssize_t offset;
  720. size_t size = 0;
  721. void *ptr;
  722. int ret;
  723. int i;
  724. ret = request_firmware(&fw, "modem.mdt", qproc->dev);
  725. if (ret < 0) {
  726. dev_err(qproc->dev, "unable to load modem.mdt\n");
  727. return ret;
  728. }
  729. /* Initialize the RMB validator */
  730. writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
  731. ret = q6v5_mpss_init_image(qproc, fw);
  732. if (ret)
  733. goto release_firmware;
  734. ehdr = (struct elf32_hdr *)fw->data;
  735. phdrs = (struct elf32_phdr *)(ehdr + 1);
  736. for (i = 0; i < ehdr->e_phnum; i++) {
  737. phdr = &phdrs[i];
  738. if (!q6v5_phdr_valid(phdr))
  739. continue;
  740. if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
  741. relocate = true;
  742. if (phdr->p_paddr < min_addr)
  743. min_addr = phdr->p_paddr;
  744. if (phdr->p_paddr + phdr->p_memsz > max_addr)
  745. max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
  746. }
  747. mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
  748. qproc->mpss_reloc = mpss_reloc;
  749. /* Load firmware segments */
  750. for (i = 0; i < ehdr->e_phnum; i++) {
  751. phdr = &phdrs[i];
  752. if (!q6v5_phdr_valid(phdr))
  753. continue;
  754. offset = phdr->p_paddr - mpss_reloc;
  755. if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
  756. dev_err(qproc->dev, "segment outside memory range\n");
  757. ret = -EINVAL;
  758. goto release_firmware;
  759. }
  760. ptr = qproc->mpss_region + offset;
  761. if (phdr->p_filesz) {
  762. snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
  763. ret = request_firmware(&seg_fw, seg_name, qproc->dev);
  764. if (ret) {
  765. dev_err(qproc->dev, "failed to load %s\n", seg_name);
  766. goto release_firmware;
  767. }
  768. memcpy(ptr, seg_fw->data, seg_fw->size);
  769. release_firmware(seg_fw);
  770. }
  771. if (phdr->p_memsz > phdr->p_filesz) {
  772. memset(ptr + phdr->p_filesz, 0,
  773. phdr->p_memsz - phdr->p_filesz);
  774. }
  775. size += phdr->p_memsz;
  776. }
  777. /* Transfer ownership of modem ddr region to q6 */
  778. ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
  779. qproc->mpss_phys, qproc->mpss_size);
  780. if (ret) {
  781. dev_err(qproc->dev,
  782. "assigning Q6 access to mpss memory failed: %d\n", ret);
  783. ret = -EAGAIN;
  784. goto release_firmware;
  785. }
  786. boot_addr = relocate ? qproc->mpss_phys : min_addr;
  787. writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
  788. writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
  789. writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
  790. ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
  791. if (ret == -ETIMEDOUT)
  792. dev_err(qproc->dev, "MPSS authentication timed out\n");
  793. else if (ret < 0)
  794. dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
  795. release_firmware:
  796. release_firmware(fw);
  797. return ret < 0 ? ret : 0;
  798. }
  799. static void qcom_q6v5_dump_segment(struct rproc *rproc,
  800. struct rproc_dump_segment *segment,
  801. void *dest)
  802. {
  803. int ret = 0;
  804. struct q6v5 *qproc = rproc->priv;
  805. unsigned long mask = BIT((unsigned long)segment->priv);
  806. void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
  807. /* Unlock mba before copying segments */
  808. if (!qproc->dump_mba_loaded)
  809. ret = q6v5_mba_load(qproc);
  810. if (!ptr || ret)
  811. memset(dest, 0xff, segment->size);
  812. else
  813. memcpy(dest, ptr, segment->size);
  814. qproc->dump_segment_mask |= mask;
  815. /* Reclaim mba after copying segments */
  816. if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
  817. if (qproc->dump_mba_loaded)
  818. q6v5_mba_reclaim(qproc);
  819. }
  820. }
  821. static int q6v5_start(struct rproc *rproc)
  822. {
  823. struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
  824. int xfermemop_ret;
  825. int ret;
  826. ret = q6v5_mba_load(qproc);
  827. if (ret)
  828. return ret;
  829. dev_info(qproc->dev, "MBA booted, loading mpss\n");
  830. ret = q6v5_mpss_load(qproc);
  831. if (ret)
  832. goto reclaim_mpss;
  833. ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
  834. if (ret == -ETIMEDOUT) {
  835. dev_err(qproc->dev, "start timed out\n");
  836. goto reclaim_mpss;
  837. }
  838. xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
  839. qproc->mba_phys,
  840. qproc->mba_size);
  841. if (xfermemop_ret)
  842. dev_err(qproc->dev,
  843. "Failed to reclaim mba buffer system may become unstable\n");
  844. /* Reset Dump Segment Mask */
  845. qproc->dump_segment_mask = 0;
  846. qproc->running = true;
  847. return 0;
  848. reclaim_mpss:
  849. xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
  850. false, qproc->mpss_phys,
  851. qproc->mpss_size);
  852. WARN_ON(xfermemop_ret);
  853. q6v5_mba_reclaim(qproc);
  854. return ret;
  855. }
  856. static int q6v5_stop(struct rproc *rproc)
  857. {
  858. struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
  859. int ret;
  860. qproc->running = false;
  861. ret = qcom_q6v5_request_stop(&qproc->q6v5);
  862. if (ret == -ETIMEDOUT)
  863. dev_err(qproc->dev, "timed out on wait\n");
  864. q6v5_mba_reclaim(qproc);
  865. return 0;
  866. }
  867. static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
  868. {
  869. struct q6v5 *qproc = rproc->priv;
  870. int offset;
  871. offset = da - qproc->mpss_reloc;
  872. if (offset < 0 || offset + len > qproc->mpss_size)
  873. return NULL;
  874. return qproc->mpss_region + offset;
  875. }
  876. static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
  877. const struct firmware *mba_fw)
  878. {
  879. const struct firmware *fw;
  880. const struct elf32_phdr *phdrs;
  881. const struct elf32_phdr *phdr;
  882. const struct elf32_hdr *ehdr;
  883. struct q6v5 *qproc = rproc->priv;
  884. unsigned long i;
  885. int ret;
  886. ret = request_firmware(&fw, "modem.mdt", qproc->dev);
  887. if (ret < 0) {
  888. dev_err(qproc->dev, "unable to load modem.mdt\n");
  889. return ret;
  890. }
  891. ehdr = (struct elf32_hdr *)fw->data;
  892. phdrs = (struct elf32_phdr *)(ehdr + 1);
  893. qproc->dump_complete_mask = 0;
  894. for (i = 0; i < ehdr->e_phnum; i++) {
  895. phdr = &phdrs[i];
  896. if (!q6v5_phdr_valid(phdr))
  897. continue;
  898. ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
  899. phdr->p_memsz,
  900. qcom_q6v5_dump_segment,
  901. (void *)i);
  902. if (ret)
  903. break;
  904. qproc->dump_complete_mask |= BIT(i);
  905. }
  906. release_firmware(fw);
  907. return ret;
  908. }
  909. static const struct rproc_ops q6v5_ops = {
  910. .start = q6v5_start,
  911. .stop = q6v5_stop,
  912. .da_to_va = q6v5_da_to_va,
  913. .parse_fw = qcom_q6v5_register_dump_segments,
  914. .load = q6v5_load,
  915. };
  916. static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
  917. {
  918. struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
  919. q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
  920. qproc->proxy_clk_count);
  921. q6v5_regulator_disable(qproc, qproc->proxy_regs,
  922. qproc->proxy_reg_count);
  923. }
  924. static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
  925. {
  926. struct of_phandle_args args;
  927. struct resource *res;
  928. int ret;
  929. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
  930. qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
  931. if (IS_ERR(qproc->reg_base))
  932. return PTR_ERR(qproc->reg_base);
  933. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
  934. qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
  935. if (IS_ERR(qproc->rmb_base))
  936. return PTR_ERR(qproc->rmb_base);
  937. ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
  938. "qcom,halt-regs", 3, 0, &args);
  939. if (ret < 0) {
  940. dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
  941. return -EINVAL;
  942. }
  943. qproc->halt_map = syscon_node_to_regmap(args.np);
  944. of_node_put(args.np);
  945. if (IS_ERR(qproc->halt_map))
  946. return PTR_ERR(qproc->halt_map);
  947. qproc->halt_q6 = args.args[0];
  948. qproc->halt_modem = args.args[1];
  949. qproc->halt_nc = args.args[2];
  950. return 0;
  951. }
  952. static int q6v5_init_clocks(struct device *dev, struct clk **clks,
  953. char **clk_names)
  954. {
  955. int i;
  956. if (!clk_names)
  957. return 0;
  958. for (i = 0; clk_names[i]; i++) {
  959. clks[i] = devm_clk_get(dev, clk_names[i]);
  960. if (IS_ERR(clks[i])) {
  961. int rc = PTR_ERR(clks[i]);
  962. if (rc != -EPROBE_DEFER)
  963. dev_err(dev, "Failed to get %s clock\n",
  964. clk_names[i]);
  965. return rc;
  966. }
  967. }
  968. return i;
  969. }
  970. static int q6v5_init_reset(struct q6v5 *qproc)
  971. {
  972. qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
  973. "mss_restart");
  974. if (IS_ERR(qproc->mss_restart)) {
  975. dev_err(qproc->dev, "failed to acquire mss restart\n");
  976. return PTR_ERR(qproc->mss_restart);
  977. }
  978. if (qproc->has_alt_reset) {
  979. qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
  980. "pdc_reset");
  981. if (IS_ERR(qproc->pdc_reset)) {
  982. dev_err(qproc->dev, "failed to acquire pdc reset\n");
  983. return PTR_ERR(qproc->pdc_reset);
  984. }
  985. }
  986. return 0;
  987. }
  988. static int q6v5_alloc_memory_region(struct q6v5 *qproc)
  989. {
  990. struct device_node *child;
  991. struct device_node *node;
  992. struct resource r;
  993. int ret;
  994. child = of_get_child_by_name(qproc->dev->of_node, "mba");
  995. node = of_parse_phandle(child, "memory-region", 0);
  996. ret = of_address_to_resource(node, 0, &r);
  997. if (ret) {
  998. dev_err(qproc->dev, "unable to resolve mba region\n");
  999. return ret;
  1000. }
  1001. of_node_put(node);
  1002. qproc->mba_phys = r.start;
  1003. qproc->mba_size = resource_size(&r);
  1004. qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
  1005. if (!qproc->mba_region) {
  1006. dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
  1007. &r.start, qproc->mba_size);
  1008. return -EBUSY;
  1009. }
  1010. child = of_get_child_by_name(qproc->dev->of_node, "mpss");
  1011. node = of_parse_phandle(child, "memory-region", 0);
  1012. ret = of_address_to_resource(node, 0, &r);
  1013. if (ret) {
  1014. dev_err(qproc->dev, "unable to resolve mpss region\n");
  1015. return ret;
  1016. }
  1017. of_node_put(node);
  1018. qproc->mpss_phys = qproc->mpss_reloc = r.start;
  1019. qproc->mpss_size = resource_size(&r);
  1020. qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
  1021. if (!qproc->mpss_region) {
  1022. dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
  1023. &r.start, qproc->mpss_size);
  1024. return -EBUSY;
  1025. }
  1026. return 0;
  1027. }
  1028. static int q6v5_probe(struct platform_device *pdev)
  1029. {
  1030. const struct rproc_hexagon_res *desc;
  1031. struct q6v5 *qproc;
  1032. struct rproc *rproc;
  1033. int ret;
  1034. desc = of_device_get_match_data(&pdev->dev);
  1035. if (!desc)
  1036. return -EINVAL;
  1037. if (desc->need_mem_protection && !qcom_scm_is_available())
  1038. return -EPROBE_DEFER;
  1039. rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
  1040. desc->hexagon_mba_image, sizeof(*qproc));
  1041. if (!rproc) {
  1042. dev_err(&pdev->dev, "failed to allocate rproc\n");
  1043. return -ENOMEM;
  1044. }
  1045. qproc = (struct q6v5 *)rproc->priv;
  1046. qproc->dev = &pdev->dev;
  1047. qproc->rproc = rproc;
  1048. platform_set_drvdata(pdev, qproc);
  1049. ret = q6v5_init_mem(qproc, pdev);
  1050. if (ret)
  1051. goto free_rproc;
  1052. ret = q6v5_alloc_memory_region(qproc);
  1053. if (ret)
  1054. goto free_rproc;
  1055. ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
  1056. desc->proxy_clk_names);
  1057. if (ret < 0) {
  1058. dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
  1059. goto free_rproc;
  1060. }
  1061. qproc->proxy_clk_count = ret;
  1062. ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
  1063. desc->reset_clk_names);
  1064. if (ret < 0) {
  1065. dev_err(&pdev->dev, "Failed to get reset clocks.\n");
  1066. goto free_rproc;
  1067. }
  1068. qproc->reset_clk_count = ret;
  1069. ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
  1070. desc->active_clk_names);
  1071. if (ret < 0) {
  1072. dev_err(&pdev->dev, "Failed to get active clocks.\n");
  1073. goto free_rproc;
  1074. }
  1075. qproc->active_clk_count = ret;
  1076. ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
  1077. desc->proxy_supply);
  1078. if (ret < 0) {
  1079. dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
  1080. goto free_rproc;
  1081. }
  1082. qproc->proxy_reg_count = ret;
  1083. ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
  1084. desc->active_supply);
  1085. if (ret < 0) {
  1086. dev_err(&pdev->dev, "Failed to get active regulators.\n");
  1087. goto free_rproc;
  1088. }
  1089. qproc->active_reg_count = ret;
  1090. qproc->has_alt_reset = desc->has_alt_reset;
  1091. ret = q6v5_init_reset(qproc);
  1092. if (ret)
  1093. goto free_rproc;
  1094. qproc->version = desc->version;
  1095. qproc->need_mem_protection = desc->need_mem_protection;
  1096. ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
  1097. qcom_msa_handover);
  1098. if (ret)
  1099. goto free_rproc;
  1100. qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
  1101. qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
  1102. qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
  1103. qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
  1104. qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
  1105. qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
  1106. ret = rproc_add(rproc);
  1107. if (ret)
  1108. goto free_rproc;
  1109. return 0;
  1110. free_rproc:
  1111. rproc_free(rproc);
  1112. return ret;
  1113. }
  1114. static int q6v5_remove(struct platform_device *pdev)
  1115. {
  1116. struct q6v5 *qproc = platform_get_drvdata(pdev);
  1117. rproc_del(qproc->rproc);
  1118. qcom_remove_sysmon_subdev(qproc->sysmon);
  1119. qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
  1120. qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
  1121. qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
  1122. rproc_free(qproc->rproc);
  1123. return 0;
  1124. }
  1125. static const struct rproc_hexagon_res sdm845_mss = {
  1126. .hexagon_mba_image = "mba.mbn",
  1127. .proxy_clk_names = (char*[]){
  1128. "xo",
  1129. "prng",
  1130. NULL
  1131. },
  1132. .reset_clk_names = (char*[]){
  1133. "iface",
  1134. "snoc_axi",
  1135. NULL
  1136. },
  1137. .active_clk_names = (char*[]){
  1138. "bus",
  1139. "mem",
  1140. "gpll0_mss",
  1141. "mnoc_axi",
  1142. NULL
  1143. },
  1144. .need_mem_protection = true,
  1145. .has_alt_reset = true,
  1146. .version = MSS_SDM845,
  1147. };
  1148. static const struct rproc_hexagon_res msm8996_mss = {
  1149. .hexagon_mba_image = "mba.mbn",
  1150. .proxy_clk_names = (char*[]){
  1151. "xo",
  1152. "pnoc",
  1153. NULL
  1154. },
  1155. .active_clk_names = (char*[]){
  1156. "iface",
  1157. "bus",
  1158. "mem",
  1159. "gpll0_mss_clk",
  1160. NULL
  1161. },
  1162. .need_mem_protection = true,
  1163. .has_alt_reset = false,
  1164. .version = MSS_MSM8996,
  1165. };
  1166. static const struct rproc_hexagon_res msm8916_mss = {
  1167. .hexagon_mba_image = "mba.mbn",
  1168. .proxy_supply = (struct qcom_mss_reg_res[]) {
  1169. {
  1170. .supply = "mx",
  1171. .uV = 1050000,
  1172. },
  1173. {
  1174. .supply = "cx",
  1175. .uA = 100000,
  1176. },
  1177. {
  1178. .supply = "pll",
  1179. .uA = 100000,
  1180. },
  1181. {}
  1182. },
  1183. .proxy_clk_names = (char*[]){
  1184. "xo",
  1185. NULL
  1186. },
  1187. .active_clk_names = (char*[]){
  1188. "iface",
  1189. "bus",
  1190. "mem",
  1191. NULL
  1192. },
  1193. .need_mem_protection = false,
  1194. .has_alt_reset = false,
  1195. .version = MSS_MSM8916,
  1196. };
  1197. static const struct rproc_hexagon_res msm8974_mss = {
  1198. .hexagon_mba_image = "mba.b00",
  1199. .proxy_supply = (struct qcom_mss_reg_res[]) {
  1200. {
  1201. .supply = "mx",
  1202. .uV = 1050000,
  1203. },
  1204. {
  1205. .supply = "cx",
  1206. .uA = 100000,
  1207. },
  1208. {
  1209. .supply = "pll",
  1210. .uA = 100000,
  1211. },
  1212. {}
  1213. },
  1214. .active_supply = (struct qcom_mss_reg_res[]) {
  1215. {
  1216. .supply = "mss",
  1217. .uV = 1050000,
  1218. .uA = 100000,
  1219. },
  1220. {}
  1221. },
  1222. .proxy_clk_names = (char*[]){
  1223. "xo",
  1224. NULL
  1225. },
  1226. .active_clk_names = (char*[]){
  1227. "iface",
  1228. "bus",
  1229. "mem",
  1230. NULL
  1231. },
  1232. .need_mem_protection = false,
  1233. .has_alt_reset = false,
  1234. .version = MSS_MSM8974,
  1235. };
  1236. static const struct of_device_id q6v5_of_match[] = {
  1237. { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
  1238. { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
  1239. { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
  1240. { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
  1241. { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
  1242. { },
  1243. };
  1244. MODULE_DEVICE_TABLE(of, q6v5_of_match);
  1245. static struct platform_driver q6v5_driver = {
  1246. .probe = q6v5_probe,
  1247. .remove = q6v5_remove,
  1248. .driver = {
  1249. .name = "qcom-q6v5-mss",
  1250. .of_match_table = q6v5_of_match,
  1251. },
  1252. };
  1253. module_platform_driver(q6v5_driver);
  1254. MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
  1255. MODULE_LICENSE("GPL v2");