pm_domains.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. /*
  2. * Rockchip Generic power domain support.
  3. *
  4. * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/io.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/err.h>
  13. #include <linux/pm_clock.h>
  14. #include <linux/pm_domain.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_platform.h>
  17. #include <linux/clk.h>
  18. #include <linux/regmap.h>
  19. #include <linux/mfd/syscon.h>
  20. #include <dt-bindings/power/rk3288-power.h>
  21. #include <dt-bindings/power/rk3328-power.h>
  22. #include <dt-bindings/power/rk3368-power.h>
  23. #include <dt-bindings/power/rk3399-power.h>
  24. struct rockchip_domain_info {
  25. int pwr_mask;
  26. int status_mask;
  27. int req_mask;
  28. int idle_mask;
  29. int ack_mask;
  30. bool active_wakeup;
  31. int pwr_w_mask;
  32. int req_w_mask;
  33. };
  34. struct rockchip_pmu_info {
  35. u32 pwr_offset;
  36. u32 status_offset;
  37. u32 req_offset;
  38. u32 idle_offset;
  39. u32 ack_offset;
  40. u32 core_pwrcnt_offset;
  41. u32 gpu_pwrcnt_offset;
  42. unsigned int core_power_transition_time;
  43. unsigned int gpu_power_transition_time;
  44. int num_domains;
  45. const struct rockchip_domain_info *domain_info;
  46. };
  47. #define MAX_QOS_REGS_NUM 5
  48. #define QOS_PRIORITY 0x08
  49. #define QOS_MODE 0x0c
  50. #define QOS_BANDWIDTH 0x10
  51. #define QOS_SATURATION 0x14
  52. #define QOS_EXTCONTROL 0x18
  53. struct rockchip_pm_domain {
  54. struct generic_pm_domain genpd;
  55. const struct rockchip_domain_info *info;
  56. struct rockchip_pmu *pmu;
  57. int num_qos;
  58. struct regmap **qos_regmap;
  59. u32 *qos_save_regs[MAX_QOS_REGS_NUM];
  60. int num_clks;
  61. struct clk *clks[];
  62. };
  63. struct rockchip_pmu {
  64. struct device *dev;
  65. struct regmap *regmap;
  66. const struct rockchip_pmu_info *info;
  67. struct mutex mutex; /* mutex lock for pmu */
  68. struct genpd_onecell_data genpd_data;
  69. struct generic_pm_domain *domains[];
  70. };
  71. #define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
  72. #define DOMAIN(pwr, status, req, idle, ack, wakeup) \
  73. { \
  74. .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
  75. .status_mask = (status >= 0) ? BIT(status) : 0, \
  76. .req_mask = (req >= 0) ? BIT(req) : 0, \
  77. .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
  78. .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
  79. .active_wakeup = wakeup, \
  80. }
  81. #define DOMAIN_M(pwr, status, req, idle, ack, wakeup) \
  82. { \
  83. .pwr_w_mask = (pwr >= 0) ? BIT(pwr + 16) : 0, \
  84. .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
  85. .status_mask = (status >= 0) ? BIT(status) : 0, \
  86. .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \
  87. .req_mask = (req >= 0) ? BIT(req) : 0, \
  88. .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
  89. .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
  90. .active_wakeup = wakeup, \
  91. }
  92. #define DOMAIN_RK3288(pwr, status, req, wakeup) \
  93. DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
  94. #define DOMAIN_RK3328(pwr, status, req, wakeup) \
  95. DOMAIN_M(pwr, pwr, req, (req) + 10, req, wakeup)
  96. #define DOMAIN_RK3368(pwr, status, req, wakeup) \
  97. DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
  98. #define DOMAIN_RK3399(pwr, status, req, wakeup) \
  99. DOMAIN(pwr, status, req, req, req, wakeup)
  100. static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
  101. {
  102. struct rockchip_pmu *pmu = pd->pmu;
  103. const struct rockchip_domain_info *pd_info = pd->info;
  104. unsigned int val;
  105. regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
  106. return (val & pd_info->idle_mask) == pd_info->idle_mask;
  107. }
  108. static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
  109. {
  110. unsigned int val;
  111. regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
  112. return val;
  113. }
  114. static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
  115. bool idle)
  116. {
  117. const struct rockchip_domain_info *pd_info = pd->info;
  118. struct generic_pm_domain *genpd = &pd->genpd;
  119. struct rockchip_pmu *pmu = pd->pmu;
  120. unsigned int target_ack;
  121. unsigned int val;
  122. bool is_idle;
  123. int ret;
  124. if (pd_info->req_mask == 0)
  125. return 0;
  126. else if (pd_info->req_w_mask)
  127. regmap_write(pmu->regmap, pmu->info->req_offset,
  128. idle ? (pd_info->req_mask | pd_info->req_w_mask) :
  129. pd_info->req_w_mask);
  130. else
  131. regmap_update_bits(pmu->regmap, pmu->info->req_offset,
  132. pd_info->req_mask, idle ? -1U : 0);
  133. dsb(sy);
  134. /* Wait util idle_ack = 1 */
  135. target_ack = idle ? pd_info->ack_mask : 0;
  136. ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
  137. (val & pd_info->ack_mask) == target_ack,
  138. 0, 10000);
  139. if (ret) {
  140. dev_err(pmu->dev,
  141. "failed to get ack on domain '%s', val=0x%x\n",
  142. genpd->name, val);
  143. return ret;
  144. }
  145. ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
  146. is_idle, is_idle == idle, 0, 10000);
  147. if (ret) {
  148. dev_err(pmu->dev,
  149. "failed to set idle on domain '%s', val=%d\n",
  150. genpd->name, is_idle);
  151. return ret;
  152. }
  153. return 0;
  154. }
  155. static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
  156. {
  157. int i;
  158. for (i = 0; i < pd->num_qos; i++) {
  159. regmap_read(pd->qos_regmap[i],
  160. QOS_PRIORITY,
  161. &pd->qos_save_regs[0][i]);
  162. regmap_read(pd->qos_regmap[i],
  163. QOS_MODE,
  164. &pd->qos_save_regs[1][i]);
  165. regmap_read(pd->qos_regmap[i],
  166. QOS_BANDWIDTH,
  167. &pd->qos_save_regs[2][i]);
  168. regmap_read(pd->qos_regmap[i],
  169. QOS_SATURATION,
  170. &pd->qos_save_regs[3][i]);
  171. regmap_read(pd->qos_regmap[i],
  172. QOS_EXTCONTROL,
  173. &pd->qos_save_regs[4][i]);
  174. }
  175. return 0;
  176. }
  177. static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
  178. {
  179. int i;
  180. for (i = 0; i < pd->num_qos; i++) {
  181. regmap_write(pd->qos_regmap[i],
  182. QOS_PRIORITY,
  183. pd->qos_save_regs[0][i]);
  184. regmap_write(pd->qos_regmap[i],
  185. QOS_MODE,
  186. pd->qos_save_regs[1][i]);
  187. regmap_write(pd->qos_regmap[i],
  188. QOS_BANDWIDTH,
  189. pd->qos_save_regs[2][i]);
  190. regmap_write(pd->qos_regmap[i],
  191. QOS_SATURATION,
  192. pd->qos_save_regs[3][i]);
  193. regmap_write(pd->qos_regmap[i],
  194. QOS_EXTCONTROL,
  195. pd->qos_save_regs[4][i]);
  196. }
  197. return 0;
  198. }
  199. static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
  200. {
  201. struct rockchip_pmu *pmu = pd->pmu;
  202. unsigned int val;
  203. /* check idle status for idle-only domains */
  204. if (pd->info->status_mask == 0)
  205. return !rockchip_pmu_domain_is_idle(pd);
  206. regmap_read(pmu->regmap, pmu->info->status_offset, &val);
  207. /* 1'b0: power on, 1'b1: power off */
  208. return !(val & pd->info->status_mask);
  209. }
  210. static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
  211. bool on)
  212. {
  213. struct rockchip_pmu *pmu = pd->pmu;
  214. struct generic_pm_domain *genpd = &pd->genpd;
  215. bool is_on;
  216. if (pd->info->pwr_mask == 0)
  217. return;
  218. else if (pd->info->pwr_w_mask)
  219. regmap_write(pmu->regmap, pmu->info->pwr_offset,
  220. on ? pd->info->pwr_mask :
  221. (pd->info->pwr_mask | pd->info->pwr_w_mask));
  222. else
  223. regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
  224. pd->info->pwr_mask, on ? 0 : -1U);
  225. dsb(sy);
  226. if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
  227. is_on == on, 0, 10000)) {
  228. dev_err(pmu->dev,
  229. "failed to set domain '%s', val=%d\n",
  230. genpd->name, is_on);
  231. return;
  232. }
  233. }
  234. static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
  235. {
  236. int i;
  237. mutex_lock(&pd->pmu->mutex);
  238. if (rockchip_pmu_domain_is_on(pd) != power_on) {
  239. for (i = 0; i < pd->num_clks; i++)
  240. clk_enable(pd->clks[i]);
  241. if (!power_on) {
  242. rockchip_pmu_save_qos(pd);
  243. /* if powering down, idle request to NIU first */
  244. rockchip_pmu_set_idle_request(pd, true);
  245. }
  246. rockchip_do_pmu_set_power_domain(pd, power_on);
  247. if (power_on) {
  248. /* if powering up, leave idle mode */
  249. rockchip_pmu_set_idle_request(pd, false);
  250. rockchip_pmu_restore_qos(pd);
  251. }
  252. for (i = pd->num_clks - 1; i >= 0; i--)
  253. clk_disable(pd->clks[i]);
  254. }
  255. mutex_unlock(&pd->pmu->mutex);
  256. return 0;
  257. }
  258. static int rockchip_pd_power_on(struct generic_pm_domain *domain)
  259. {
  260. struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
  261. return rockchip_pd_power(pd, true);
  262. }
  263. static int rockchip_pd_power_off(struct generic_pm_domain *domain)
  264. {
  265. struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
  266. return rockchip_pd_power(pd, false);
  267. }
  268. static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
  269. struct device *dev)
  270. {
  271. struct clk *clk;
  272. int i;
  273. int error;
  274. dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name);
  275. error = pm_clk_create(dev);
  276. if (error) {
  277. dev_err(dev, "pm_clk_create failed %d\n", error);
  278. return error;
  279. }
  280. i = 0;
  281. while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
  282. dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk);
  283. error = pm_clk_add_clk(dev, clk);
  284. if (error) {
  285. dev_err(dev, "pm_clk_add_clk failed %d\n", error);
  286. clk_put(clk);
  287. pm_clk_destroy(dev);
  288. return error;
  289. }
  290. }
  291. return 0;
  292. }
  293. static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
  294. struct device *dev)
  295. {
  296. dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name);
  297. pm_clk_destroy(dev);
  298. }
  299. static bool rockchip_active_wakeup(struct device *dev)
  300. {
  301. struct generic_pm_domain *genpd;
  302. struct rockchip_pm_domain *pd;
  303. genpd = pd_to_genpd(dev->pm_domain);
  304. pd = container_of(genpd, struct rockchip_pm_domain, genpd);
  305. return pd->info->active_wakeup;
  306. }
  307. static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
  308. struct device_node *node)
  309. {
  310. const struct rockchip_domain_info *pd_info;
  311. struct rockchip_pm_domain *pd;
  312. struct device_node *qos_node;
  313. struct clk *clk;
  314. int clk_cnt;
  315. int i, j;
  316. u32 id;
  317. int error;
  318. error = of_property_read_u32(node, "reg", &id);
  319. if (error) {
  320. dev_err(pmu->dev,
  321. "%s: failed to retrieve domain id (reg): %d\n",
  322. node->name, error);
  323. return -EINVAL;
  324. }
  325. if (id >= pmu->info->num_domains) {
  326. dev_err(pmu->dev, "%s: invalid domain id %d\n",
  327. node->name, id);
  328. return -EINVAL;
  329. }
  330. pd_info = &pmu->info->domain_info[id];
  331. if (!pd_info) {
  332. dev_err(pmu->dev, "%s: undefined domain id %d\n",
  333. node->name, id);
  334. return -EINVAL;
  335. }
  336. clk_cnt = of_count_phandle_with_args(node, "clocks", "#clock-cells");
  337. pd = devm_kzalloc(pmu->dev,
  338. sizeof(*pd) + clk_cnt * sizeof(pd->clks[0]),
  339. GFP_KERNEL);
  340. if (!pd)
  341. return -ENOMEM;
  342. pd->info = pd_info;
  343. pd->pmu = pmu;
  344. for (i = 0; i < clk_cnt; i++) {
  345. clk = of_clk_get(node, i);
  346. if (IS_ERR(clk)) {
  347. error = PTR_ERR(clk);
  348. dev_err(pmu->dev,
  349. "%s: failed to get clk at index %d: %d\n",
  350. node->name, i, error);
  351. goto err_out;
  352. }
  353. error = clk_prepare(clk);
  354. if (error) {
  355. dev_err(pmu->dev,
  356. "%s: failed to prepare clk %pC (index %d): %d\n",
  357. node->name, clk, i, error);
  358. clk_put(clk);
  359. goto err_out;
  360. }
  361. pd->clks[pd->num_clks++] = clk;
  362. dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n",
  363. clk, node->name);
  364. }
  365. pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
  366. NULL);
  367. if (pd->num_qos > 0) {
  368. pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
  369. sizeof(*pd->qos_regmap),
  370. GFP_KERNEL);
  371. if (!pd->qos_regmap) {
  372. error = -ENOMEM;
  373. goto err_out;
  374. }
  375. for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
  376. pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
  377. pd->num_qos,
  378. sizeof(u32),
  379. GFP_KERNEL);
  380. if (!pd->qos_save_regs[j]) {
  381. error = -ENOMEM;
  382. goto err_out;
  383. }
  384. }
  385. for (j = 0; j < pd->num_qos; j++) {
  386. qos_node = of_parse_phandle(node, "pm_qos", j);
  387. if (!qos_node) {
  388. error = -ENODEV;
  389. goto err_out;
  390. }
  391. pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
  392. if (IS_ERR(pd->qos_regmap[j])) {
  393. error = -ENODEV;
  394. of_node_put(qos_node);
  395. goto err_out;
  396. }
  397. of_node_put(qos_node);
  398. }
  399. }
  400. error = rockchip_pd_power(pd, true);
  401. if (error) {
  402. dev_err(pmu->dev,
  403. "failed to power on domain '%s': %d\n",
  404. node->name, error);
  405. goto err_out;
  406. }
  407. pd->genpd.name = node->name;
  408. pd->genpd.power_off = rockchip_pd_power_off;
  409. pd->genpd.power_on = rockchip_pd_power_on;
  410. pd->genpd.attach_dev = rockchip_pd_attach_dev;
  411. pd->genpd.detach_dev = rockchip_pd_detach_dev;
  412. pd->genpd.dev_ops.active_wakeup = rockchip_active_wakeup;
  413. pd->genpd.flags = GENPD_FLAG_PM_CLK;
  414. pm_genpd_init(&pd->genpd, NULL, false);
  415. pmu->genpd_data.domains[id] = &pd->genpd;
  416. return 0;
  417. err_out:
  418. while (--i >= 0) {
  419. clk_unprepare(pd->clks[i]);
  420. clk_put(pd->clks[i]);
  421. }
  422. return error;
  423. }
  424. static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
  425. {
  426. int i, ret;
  427. /*
  428. * We're in the error cleanup already, so we only complain,
  429. * but won't emit another error on top of the original one.
  430. */
  431. ret = pm_genpd_remove(&pd->genpd);
  432. if (ret < 0)
  433. dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
  434. pd->genpd.name, ret);
  435. for (i = 0; i < pd->num_clks; i++) {
  436. clk_unprepare(pd->clks[i]);
  437. clk_put(pd->clks[i]);
  438. }
  439. /* protect the zeroing of pm->num_clks */
  440. mutex_lock(&pd->pmu->mutex);
  441. pd->num_clks = 0;
  442. mutex_unlock(&pd->pmu->mutex);
  443. /* devm will free our memory */
  444. }
  445. static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
  446. {
  447. struct generic_pm_domain *genpd;
  448. struct rockchip_pm_domain *pd;
  449. int i;
  450. for (i = 0; i < pmu->genpd_data.num_domains; i++) {
  451. genpd = pmu->genpd_data.domains[i];
  452. if (genpd) {
  453. pd = to_rockchip_pd(genpd);
  454. rockchip_pm_remove_one_domain(pd);
  455. }
  456. }
  457. /* devm will free our memory */
  458. }
  459. static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
  460. u32 domain_reg_offset,
  461. unsigned int count)
  462. {
  463. /* First configure domain power down transition count ... */
  464. regmap_write(pmu->regmap, domain_reg_offset, count);
  465. /* ... and then power up count. */
  466. regmap_write(pmu->regmap, domain_reg_offset + 4, count);
  467. }
  468. static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
  469. struct device_node *parent)
  470. {
  471. struct device_node *np;
  472. struct generic_pm_domain *child_domain, *parent_domain;
  473. int error;
  474. for_each_child_of_node(parent, np) {
  475. u32 idx;
  476. error = of_property_read_u32(parent, "reg", &idx);
  477. if (error) {
  478. dev_err(pmu->dev,
  479. "%s: failed to retrieve domain id (reg): %d\n",
  480. parent->name, error);
  481. goto err_out;
  482. }
  483. parent_domain = pmu->genpd_data.domains[idx];
  484. error = rockchip_pm_add_one_domain(pmu, np);
  485. if (error) {
  486. dev_err(pmu->dev, "failed to handle node %s: %d\n",
  487. np->name, error);
  488. goto err_out;
  489. }
  490. error = of_property_read_u32(np, "reg", &idx);
  491. if (error) {
  492. dev_err(pmu->dev,
  493. "%s: failed to retrieve domain id (reg): %d\n",
  494. np->name, error);
  495. goto err_out;
  496. }
  497. child_domain = pmu->genpd_data.domains[idx];
  498. error = pm_genpd_add_subdomain(parent_domain, child_domain);
  499. if (error) {
  500. dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
  501. parent_domain->name, child_domain->name, error);
  502. goto err_out;
  503. } else {
  504. dev_dbg(pmu->dev, "%s add subdomain: %s\n",
  505. parent_domain->name, child_domain->name);
  506. }
  507. rockchip_pm_add_subdomain(pmu, np);
  508. }
  509. return 0;
  510. err_out:
  511. of_node_put(np);
  512. return error;
  513. }
  514. static int rockchip_pm_domain_probe(struct platform_device *pdev)
  515. {
  516. struct device *dev = &pdev->dev;
  517. struct device_node *np = dev->of_node;
  518. struct device_node *node;
  519. struct device *parent;
  520. struct rockchip_pmu *pmu;
  521. const struct of_device_id *match;
  522. const struct rockchip_pmu_info *pmu_info;
  523. int error;
  524. if (!np) {
  525. dev_err(dev, "device tree node not found\n");
  526. return -ENODEV;
  527. }
  528. match = of_match_device(dev->driver->of_match_table, dev);
  529. if (!match || !match->data) {
  530. dev_err(dev, "missing pmu data\n");
  531. return -EINVAL;
  532. }
  533. pmu_info = match->data;
  534. pmu = devm_kzalloc(dev,
  535. sizeof(*pmu) +
  536. pmu_info->num_domains * sizeof(pmu->domains[0]),
  537. GFP_KERNEL);
  538. if (!pmu)
  539. return -ENOMEM;
  540. pmu->dev = &pdev->dev;
  541. mutex_init(&pmu->mutex);
  542. pmu->info = pmu_info;
  543. pmu->genpd_data.domains = pmu->domains;
  544. pmu->genpd_data.num_domains = pmu_info->num_domains;
  545. parent = dev->parent;
  546. if (!parent) {
  547. dev_err(dev, "no parent for syscon devices\n");
  548. return -ENODEV;
  549. }
  550. pmu->regmap = syscon_node_to_regmap(parent->of_node);
  551. if (IS_ERR(pmu->regmap)) {
  552. dev_err(dev, "no regmap available\n");
  553. return PTR_ERR(pmu->regmap);
  554. }
  555. /*
  556. * Configure power up and down transition delays for CORE
  557. * and GPU domains.
  558. */
  559. if (pmu_info->core_power_transition_time)
  560. rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
  561. pmu_info->core_power_transition_time);
  562. if (pmu_info->gpu_pwrcnt_offset)
  563. rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
  564. pmu_info->gpu_power_transition_time);
  565. error = -ENODEV;
  566. for_each_available_child_of_node(np, node) {
  567. error = rockchip_pm_add_one_domain(pmu, node);
  568. if (error) {
  569. dev_err(dev, "failed to handle node %s: %d\n",
  570. node->name, error);
  571. of_node_put(node);
  572. goto err_out;
  573. }
  574. error = rockchip_pm_add_subdomain(pmu, node);
  575. if (error < 0) {
  576. dev_err(dev, "failed to handle subdomain node %s: %d\n",
  577. node->name, error);
  578. of_node_put(node);
  579. goto err_out;
  580. }
  581. }
  582. if (error) {
  583. dev_dbg(dev, "no power domains defined\n");
  584. goto err_out;
  585. }
  586. error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
  587. if (error) {
  588. dev_err(dev, "failed to add provider: %d\n", error);
  589. goto err_out;
  590. }
  591. return 0;
  592. err_out:
  593. rockchip_pm_domain_cleanup(pmu);
  594. return error;
  595. }
  596. static const struct rockchip_domain_info rk3288_pm_domains[] = {
  597. [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
  598. [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
  599. [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
  600. [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
  601. };
  602. static const struct rockchip_domain_info rk3328_pm_domains[] = {
  603. [RK3328_PD_CORE] = DOMAIN_RK3328(-1, 0, 0, false),
  604. [RK3328_PD_GPU] = DOMAIN_RK3328(-1, 1, 1, false),
  605. [RK3328_PD_BUS] = DOMAIN_RK3328(-1, 2, 2, true),
  606. [RK3328_PD_MSCH] = DOMAIN_RK3328(-1, 3, 3, true),
  607. [RK3328_PD_PERI] = DOMAIN_RK3328(-1, 4, 4, true),
  608. [RK3328_PD_VIDEO] = DOMAIN_RK3328(-1, 5, 5, false),
  609. [RK3328_PD_HEVC] = DOMAIN_RK3328(-1, 6, 6, false),
  610. [RK3328_PD_VIO] = DOMAIN_RK3328(-1, 8, 8, false),
  611. [RK3328_PD_VPU] = DOMAIN_RK3328(-1, 9, 9, false),
  612. };
  613. static const struct rockchip_domain_info rk3368_pm_domains[] = {
  614. [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
  615. [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
  616. [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
  617. [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
  618. [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
  619. };
  620. static const struct rockchip_domain_info rk3399_pm_domains[] = {
  621. [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
  622. [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
  623. [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
  624. [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
  625. [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
  626. [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
  627. [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
  628. [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
  629. [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
  630. [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
  631. [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
  632. [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
  633. [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
  634. [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
  635. [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
  636. [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
  637. [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
  638. [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
  639. [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
  640. [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
  641. [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
  642. [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
  643. [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
  644. [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
  645. [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
  646. [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
  647. [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
  648. };
  649. static const struct rockchip_pmu_info rk3288_pmu = {
  650. .pwr_offset = 0x08,
  651. .status_offset = 0x0c,
  652. .req_offset = 0x10,
  653. .idle_offset = 0x14,
  654. .ack_offset = 0x14,
  655. .core_pwrcnt_offset = 0x34,
  656. .gpu_pwrcnt_offset = 0x3c,
  657. .core_power_transition_time = 24, /* 1us */
  658. .gpu_power_transition_time = 24, /* 1us */
  659. .num_domains = ARRAY_SIZE(rk3288_pm_domains),
  660. .domain_info = rk3288_pm_domains,
  661. };
  662. static const struct rockchip_pmu_info rk3328_pmu = {
  663. .req_offset = 0x414,
  664. .idle_offset = 0x484,
  665. .ack_offset = 0x484,
  666. .num_domains = ARRAY_SIZE(rk3328_pm_domains),
  667. .domain_info = rk3328_pm_domains,
  668. };
  669. static const struct rockchip_pmu_info rk3368_pmu = {
  670. .pwr_offset = 0x0c,
  671. .status_offset = 0x10,
  672. .req_offset = 0x3c,
  673. .idle_offset = 0x40,
  674. .ack_offset = 0x40,
  675. .core_pwrcnt_offset = 0x48,
  676. .gpu_pwrcnt_offset = 0x50,
  677. .core_power_transition_time = 24,
  678. .gpu_power_transition_time = 24,
  679. .num_domains = ARRAY_SIZE(rk3368_pm_domains),
  680. .domain_info = rk3368_pm_domains,
  681. };
  682. static const struct rockchip_pmu_info rk3399_pmu = {
  683. .pwr_offset = 0x14,
  684. .status_offset = 0x18,
  685. .req_offset = 0x60,
  686. .idle_offset = 0x64,
  687. .ack_offset = 0x68,
  688. /* ARM Trusted Firmware manages power transition times */
  689. .num_domains = ARRAY_SIZE(rk3399_pm_domains),
  690. .domain_info = rk3399_pm_domains,
  691. };
  692. static const struct of_device_id rockchip_pm_domain_dt_match[] = {
  693. {
  694. .compatible = "rockchip,rk3288-power-controller",
  695. .data = (void *)&rk3288_pmu,
  696. },
  697. {
  698. .compatible = "rockchip,rk3328-power-controller",
  699. .data = (void *)&rk3328_pmu,
  700. },
  701. {
  702. .compatible = "rockchip,rk3368-power-controller",
  703. .data = (void *)&rk3368_pmu,
  704. },
  705. {
  706. .compatible = "rockchip,rk3399-power-controller",
  707. .data = (void *)&rk3399_pmu,
  708. },
  709. { /* sentinel */ },
  710. };
  711. static struct platform_driver rockchip_pm_domain_driver = {
  712. .probe = rockchip_pm_domain_probe,
  713. .driver = {
  714. .name = "rockchip-pm-domain",
  715. .of_match_table = rockchip_pm_domain_dt_match,
  716. /*
  717. * We can't forcibly eject devices form power domain,
  718. * so we can't really remove power domains once they
  719. * were added.
  720. */
  721. .suppress_bind_attrs = true,
  722. },
  723. };
  724. static int __init rockchip_pm_domain_drv_register(void)
  725. {
  726. return platform_driver_register(&rockchip_pm_domain_driver);
  727. }
  728. postcore_initcall(rockchip_pm_domain_drv_register);