acpi_lpss.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020
  1. /*
  2. * ACPI support for Intel Lynxpoint LPSS.
  3. *
  4. * Copyright (C) 2013, Intel Corporation
  5. * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  6. * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/acpi.h>
  13. #include <linux/clkdev.h>
  14. #include <linux/clk-provider.h>
  15. #include <linux/err.h>
  16. #include <linux/io.h>
  17. #include <linux/mutex.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/platform_data/clk-lpss.h>
  20. #include <linux/platform_data/x86/pmc_atom.h>
  21. #include <linux/pm_domain.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/pwm.h>
  24. #include <linux/delay.h>
  25. #include "internal.h"
  26. ACPI_MODULE_NAME("acpi_lpss");
  27. #ifdef CONFIG_X86_INTEL_LPSS
  28. #include <asm/cpu_device_id.h>
  29. #include <asm/intel-family.h>
  30. #include <asm/iosf_mbi.h>
  31. #define LPSS_ADDR(desc) ((unsigned long)&desc)
  32. #define LPSS_CLK_SIZE 0x04
  33. #define LPSS_LTR_SIZE 0x18
  34. /* Offsets relative to LPSS_PRIVATE_OFFSET */
  35. #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
  36. #define LPSS_RESETS 0x04
  37. #define LPSS_RESETS_RESET_FUNC BIT(0)
  38. #define LPSS_RESETS_RESET_APB BIT(1)
  39. #define LPSS_GENERAL 0x08
  40. #define LPSS_GENERAL_LTR_MODE_SW BIT(2)
  41. #define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
  42. #define LPSS_SW_LTR 0x10
  43. #define LPSS_AUTO_LTR 0x14
  44. #define LPSS_LTR_SNOOP_REQ BIT(15)
  45. #define LPSS_LTR_SNOOP_MASK 0x0000FFFF
  46. #define LPSS_LTR_SNOOP_LAT_1US 0x800
  47. #define LPSS_LTR_SNOOP_LAT_32US 0xC00
  48. #define LPSS_LTR_SNOOP_LAT_SHIFT 5
  49. #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
  50. #define LPSS_LTR_MAX_VAL 0x3FF
  51. #define LPSS_TX_INT 0x20
  52. #define LPSS_TX_INT_MASK BIT(1)
  53. #define LPSS_PRV_REG_COUNT 9
  54. /* LPSS Flags */
  55. #define LPSS_CLK BIT(0)
  56. #define LPSS_CLK_GATE BIT(1)
  57. #define LPSS_CLK_DIVIDER BIT(2)
  58. #define LPSS_LTR BIT(3)
  59. #define LPSS_SAVE_CTX BIT(4)
  60. #define LPSS_NO_D3_DELAY BIT(5)
  61. struct lpss_private_data;
  62. struct lpss_device_desc {
  63. unsigned int flags;
  64. const char *clk_con_id;
  65. unsigned int prv_offset;
  66. size_t prv_size_override;
  67. struct property_entry *properties;
  68. void (*setup)(struct lpss_private_data *pdata);
  69. };
  70. static const struct lpss_device_desc lpss_dma_desc = {
  71. .flags = LPSS_CLK,
  72. };
  73. struct lpss_private_data {
  74. struct acpi_device *adev;
  75. void __iomem *mmio_base;
  76. resource_size_t mmio_size;
  77. unsigned int fixed_clk_rate;
  78. struct clk *clk;
  79. const struct lpss_device_desc *dev_desc;
  80. u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
  81. };
  82. /* LPSS run time quirks */
  83. static unsigned int lpss_quirks;
  84. /*
  85. * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
  86. *
  87. * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
  88. * it can be powered off automatically whenever the last LPSS device goes down.
  89. * In case of no power any access to the DMA controller will hang the system.
  90. * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
  91. * well as on ASuS T100TA transformer.
  92. *
  93. * This quirk overrides power state of entire LPSS island to keep DMA powered
  94. * on whenever we have at least one other device in use.
  95. */
  96. #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
  97. /* UART Component Parameter Register */
  98. #define LPSS_UART_CPR 0xF4
  99. #define LPSS_UART_CPR_AFCE BIT(4)
  100. static void lpss_uart_setup(struct lpss_private_data *pdata)
  101. {
  102. unsigned int offset;
  103. u32 val;
  104. offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
  105. val = readl(pdata->mmio_base + offset);
  106. writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
  107. val = readl(pdata->mmio_base + LPSS_UART_CPR);
  108. if (!(val & LPSS_UART_CPR_AFCE)) {
  109. offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
  110. val = readl(pdata->mmio_base + offset);
  111. val |= LPSS_GENERAL_UART_RTS_OVRD;
  112. writel(val, pdata->mmio_base + offset);
  113. }
  114. }
  115. static void lpss_deassert_reset(struct lpss_private_data *pdata)
  116. {
  117. unsigned int offset;
  118. u32 val;
  119. offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
  120. val = readl(pdata->mmio_base + offset);
  121. val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
  122. writel(val, pdata->mmio_base + offset);
  123. }
  124. /*
  125. * BYT PWM used for backlight control by the i915 driver on systems without
  126. * the Crystal Cove PMIC.
  127. */
  128. static struct pwm_lookup byt_pwm_lookup[] = {
  129. PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
  130. "pwm_backlight", 0, PWM_POLARITY_NORMAL,
  131. "pwm-lpss-platform"),
  132. };
  133. static void byt_pwm_setup(struct lpss_private_data *pdata)
  134. {
  135. struct acpi_device *adev = pdata->adev;
  136. /* Only call pwm_add_table for the first PWM controller */
  137. if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
  138. return;
  139. if (!acpi_dev_present("INT33FD", NULL, -1))
  140. pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
  141. }
  142. #define LPSS_I2C_ENABLE 0x6c
  143. static void byt_i2c_setup(struct lpss_private_data *pdata)
  144. {
  145. lpss_deassert_reset(pdata);
  146. if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
  147. pdata->fixed_clk_rate = 133000000;
  148. writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
  149. }
  150. /* BSW PWM used for backlight control by the i915 driver */
  151. static struct pwm_lookup bsw_pwm_lookup[] = {
  152. PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
  153. "pwm_backlight", 0, PWM_POLARITY_NORMAL,
  154. "pwm-lpss-platform"),
  155. };
  156. static void bsw_pwm_setup(struct lpss_private_data *pdata)
  157. {
  158. struct acpi_device *adev = pdata->adev;
  159. /* Only call pwm_add_table for the first PWM controller */
  160. if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
  161. return;
  162. pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
  163. }
  164. static const struct lpss_device_desc lpt_dev_desc = {
  165. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
  166. .prv_offset = 0x800,
  167. };
  168. static const struct lpss_device_desc lpt_i2c_dev_desc = {
  169. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
  170. .prv_offset = 0x800,
  171. };
  172. static struct property_entry uart_properties[] = {
  173. PROPERTY_ENTRY_U32("reg-io-width", 4),
  174. PROPERTY_ENTRY_U32("reg-shift", 2),
  175. PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
  176. { },
  177. };
  178. static const struct lpss_device_desc lpt_uart_dev_desc = {
  179. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
  180. .clk_con_id = "baudclk",
  181. .prv_offset = 0x800,
  182. .setup = lpss_uart_setup,
  183. .properties = uart_properties,
  184. };
  185. static const struct lpss_device_desc lpt_sdio_dev_desc = {
  186. .flags = LPSS_LTR,
  187. .prv_offset = 0x1000,
  188. .prv_size_override = 0x1018,
  189. };
  190. static const struct lpss_device_desc byt_pwm_dev_desc = {
  191. .flags = LPSS_SAVE_CTX,
  192. .setup = byt_pwm_setup,
  193. };
  194. static const struct lpss_device_desc bsw_pwm_dev_desc = {
  195. .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
  196. .setup = bsw_pwm_setup,
  197. };
  198. static const struct lpss_device_desc byt_uart_dev_desc = {
  199. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
  200. .clk_con_id = "baudclk",
  201. .prv_offset = 0x800,
  202. .setup = lpss_uart_setup,
  203. .properties = uart_properties,
  204. };
  205. static const struct lpss_device_desc bsw_uart_dev_desc = {
  206. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
  207. | LPSS_NO_D3_DELAY,
  208. .clk_con_id = "baudclk",
  209. .prv_offset = 0x800,
  210. .setup = lpss_uart_setup,
  211. .properties = uart_properties,
  212. };
  213. static const struct lpss_device_desc byt_spi_dev_desc = {
  214. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
  215. .prv_offset = 0x400,
  216. };
  217. static const struct lpss_device_desc byt_sdio_dev_desc = {
  218. .flags = LPSS_CLK,
  219. };
  220. static const struct lpss_device_desc byt_i2c_dev_desc = {
  221. .flags = LPSS_CLK | LPSS_SAVE_CTX,
  222. .prv_offset = 0x800,
  223. .setup = byt_i2c_setup,
  224. };
  225. static const struct lpss_device_desc bsw_i2c_dev_desc = {
  226. .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
  227. .prv_offset = 0x800,
  228. .setup = byt_i2c_setup,
  229. };
  230. static const struct lpss_device_desc bsw_spi_dev_desc = {
  231. .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
  232. | LPSS_NO_D3_DELAY,
  233. .prv_offset = 0x400,
  234. .setup = lpss_deassert_reset,
  235. };
  236. #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
  237. static const struct x86_cpu_id lpss_cpu_ids[] = {
  238. ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
  239. ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
  240. {}
  241. };
  242. #else
  243. #define LPSS_ADDR(desc) (0UL)
  244. #endif /* CONFIG_X86_INTEL_LPSS */
  245. static const struct acpi_device_id acpi_lpss_device_ids[] = {
  246. /* Generic LPSS devices */
  247. { "INTL9C60", LPSS_ADDR(lpss_dma_desc) },
  248. /* Lynxpoint LPSS devices */
  249. { "INT33C0", LPSS_ADDR(lpt_dev_desc) },
  250. { "INT33C1", LPSS_ADDR(lpt_dev_desc) },
  251. { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc) },
  252. { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc) },
  253. { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc) },
  254. { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc) },
  255. { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc) },
  256. { "INT33C7", },
  257. /* BayTrail LPSS devices */
  258. { "80860F09", LPSS_ADDR(byt_pwm_dev_desc) },
  259. { "80860F0A", LPSS_ADDR(byt_uart_dev_desc) },
  260. { "80860F0E", LPSS_ADDR(byt_spi_dev_desc) },
  261. { "80860F14", LPSS_ADDR(byt_sdio_dev_desc) },
  262. { "80860F41", LPSS_ADDR(byt_i2c_dev_desc) },
  263. { "INT33B2", },
  264. { "INT33FC", },
  265. /* Braswell LPSS devices */
  266. { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
  267. { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
  268. { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
  269. { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
  270. /* Broadwell LPSS devices */
  271. { "INT3430", LPSS_ADDR(lpt_dev_desc) },
  272. { "INT3431", LPSS_ADDR(lpt_dev_desc) },
  273. { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
  274. { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc) },
  275. { "INT3434", LPSS_ADDR(lpt_uart_dev_desc) },
  276. { "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
  277. { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
  278. { "INT3437", },
  279. /* Wildcat Point LPSS devices */
  280. { "INT3438", LPSS_ADDR(lpt_dev_desc) },
  281. { }
  282. };
  283. #ifdef CONFIG_X86_INTEL_LPSS
  284. static int is_memory(struct acpi_resource *res, void *not_used)
  285. {
  286. struct resource r;
  287. return !acpi_dev_resource_memory(res, &r);
  288. }
  289. /* LPSS main clock device. */
  290. static struct platform_device *lpss_clk_dev;
  291. static inline void lpt_register_clock_device(void)
  292. {
  293. lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
  294. }
  295. static int register_device_clock(struct acpi_device *adev,
  296. struct lpss_private_data *pdata)
  297. {
  298. const struct lpss_device_desc *dev_desc = pdata->dev_desc;
  299. const char *devname = dev_name(&adev->dev);
  300. struct clk *clk = ERR_PTR(-ENODEV);
  301. struct lpss_clk_data *clk_data;
  302. const char *parent, *clk_name;
  303. void __iomem *prv_base;
  304. if (!lpss_clk_dev)
  305. lpt_register_clock_device();
  306. clk_data = platform_get_drvdata(lpss_clk_dev);
  307. if (!clk_data)
  308. return -ENODEV;
  309. clk = clk_data->clk;
  310. if (!pdata->mmio_base
  311. || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
  312. return -ENODATA;
  313. parent = clk_data->name;
  314. prv_base = pdata->mmio_base + dev_desc->prv_offset;
  315. if (pdata->fixed_clk_rate) {
  316. clk = clk_register_fixed_rate(NULL, devname, parent, 0,
  317. pdata->fixed_clk_rate);
  318. goto out;
  319. }
  320. if (dev_desc->flags & LPSS_CLK_GATE) {
  321. clk = clk_register_gate(NULL, devname, parent, 0,
  322. prv_base, 0, 0, NULL);
  323. parent = devname;
  324. }
  325. if (dev_desc->flags & LPSS_CLK_DIVIDER) {
  326. /* Prevent division by zero */
  327. if (!readl(prv_base))
  328. writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
  329. clk_name = kasprintf(GFP_KERNEL, "%s-div", devname);
  330. if (!clk_name)
  331. return -ENOMEM;
  332. clk = clk_register_fractional_divider(NULL, clk_name, parent,
  333. 0, prv_base,
  334. 1, 15, 16, 15, 0, NULL);
  335. parent = clk_name;
  336. clk_name = kasprintf(GFP_KERNEL, "%s-update", devname);
  337. if (!clk_name) {
  338. kfree(parent);
  339. return -ENOMEM;
  340. }
  341. clk = clk_register_gate(NULL, clk_name, parent,
  342. CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
  343. prv_base, 31, 0, NULL);
  344. kfree(parent);
  345. kfree(clk_name);
  346. }
  347. out:
  348. if (IS_ERR(clk))
  349. return PTR_ERR(clk);
  350. pdata->clk = clk;
  351. clk_register_clkdev(clk, dev_desc->clk_con_id, devname);
  352. return 0;
  353. }
  354. static int acpi_lpss_create_device(struct acpi_device *adev,
  355. const struct acpi_device_id *id)
  356. {
  357. const struct lpss_device_desc *dev_desc;
  358. struct lpss_private_data *pdata;
  359. struct resource_entry *rentry;
  360. struct list_head resource_list;
  361. struct platform_device *pdev;
  362. int ret;
  363. dev_desc = (const struct lpss_device_desc *)id->driver_data;
  364. if (!dev_desc) {
  365. pdev = acpi_create_platform_device(adev, NULL);
  366. return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
  367. }
  368. pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
  369. if (!pdata)
  370. return -ENOMEM;
  371. INIT_LIST_HEAD(&resource_list);
  372. ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
  373. if (ret < 0)
  374. goto err_out;
  375. list_for_each_entry(rentry, &resource_list, node)
  376. if (resource_type(rentry->res) == IORESOURCE_MEM) {
  377. if (dev_desc->prv_size_override)
  378. pdata->mmio_size = dev_desc->prv_size_override;
  379. else
  380. pdata->mmio_size = resource_size(rentry->res);
  381. pdata->mmio_base = ioremap(rentry->res->start,
  382. pdata->mmio_size);
  383. break;
  384. }
  385. acpi_dev_free_resource_list(&resource_list);
  386. if (!pdata->mmio_base) {
  387. /* Skip the device, but continue the namespace scan. */
  388. ret = 0;
  389. goto err_out;
  390. }
  391. pdata->adev = adev;
  392. pdata->dev_desc = dev_desc;
  393. if (dev_desc->setup)
  394. dev_desc->setup(pdata);
  395. if (dev_desc->flags & LPSS_CLK) {
  396. ret = register_device_clock(adev, pdata);
  397. if (ret) {
  398. /* Skip the device, but continue the namespace scan. */
  399. ret = 0;
  400. goto err_out;
  401. }
  402. }
  403. /*
  404. * This works around a known issue in ACPI tables where LPSS devices
  405. * have _PS0 and _PS3 without _PSC (and no power resources), so
  406. * acpi_bus_init_power() will assume that the BIOS has put them into D0.
  407. */
  408. ret = acpi_device_fix_up_power(adev);
  409. if (ret) {
  410. /* Skip the device, but continue the namespace scan. */
  411. ret = 0;
  412. goto err_out;
  413. }
  414. adev->driver_data = pdata;
  415. pdev = acpi_create_platform_device(adev, dev_desc->properties);
  416. if (!IS_ERR_OR_NULL(pdev)) {
  417. return 1;
  418. }
  419. ret = PTR_ERR(pdev);
  420. adev->driver_data = NULL;
  421. err_out:
  422. kfree(pdata);
  423. return ret;
  424. }
  425. static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
  426. {
  427. return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
  428. }
  429. static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
  430. unsigned int reg)
  431. {
  432. writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
  433. }
  434. static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
  435. {
  436. struct acpi_device *adev;
  437. struct lpss_private_data *pdata;
  438. unsigned long flags;
  439. int ret;
  440. ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
  441. if (WARN_ON(ret))
  442. return ret;
  443. spin_lock_irqsave(&dev->power.lock, flags);
  444. if (pm_runtime_suspended(dev)) {
  445. ret = -EAGAIN;
  446. goto out;
  447. }
  448. pdata = acpi_driver_data(adev);
  449. if (WARN_ON(!pdata || !pdata->mmio_base)) {
  450. ret = -ENODEV;
  451. goto out;
  452. }
  453. *val = __lpss_reg_read(pdata, reg);
  454. out:
  455. spin_unlock_irqrestore(&dev->power.lock, flags);
  456. return ret;
  457. }
  458. static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
  459. char *buf)
  460. {
  461. u32 ltr_value = 0;
  462. unsigned int reg;
  463. int ret;
  464. reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
  465. ret = lpss_reg_read(dev, reg, &ltr_value);
  466. if (ret)
  467. return ret;
  468. return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
  469. }
  470. static ssize_t lpss_ltr_mode_show(struct device *dev,
  471. struct device_attribute *attr, char *buf)
  472. {
  473. u32 ltr_mode = 0;
  474. char *outstr;
  475. int ret;
  476. ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
  477. if (ret)
  478. return ret;
  479. outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
  480. return sprintf(buf, "%s\n", outstr);
  481. }
  482. static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
  483. static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
  484. static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
  485. static struct attribute *lpss_attrs[] = {
  486. &dev_attr_auto_ltr.attr,
  487. &dev_attr_sw_ltr.attr,
  488. &dev_attr_ltr_mode.attr,
  489. NULL,
  490. };
  491. static const struct attribute_group lpss_attr_group = {
  492. .attrs = lpss_attrs,
  493. .name = "lpss_ltr",
  494. };
  495. static void acpi_lpss_set_ltr(struct device *dev, s32 val)
  496. {
  497. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  498. u32 ltr_mode, ltr_val;
  499. ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
  500. if (val < 0) {
  501. if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
  502. ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
  503. __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
  504. }
  505. return;
  506. }
  507. ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
  508. if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
  509. ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
  510. val = LPSS_LTR_MAX_VAL;
  511. } else if (val > LPSS_LTR_MAX_VAL) {
  512. ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
  513. val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
  514. } else {
  515. ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
  516. }
  517. ltr_val |= val;
  518. __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
  519. if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
  520. ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
  521. __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
  522. }
  523. }
  524. #ifdef CONFIG_PM
  525. /**
  526. * acpi_lpss_save_ctx() - Save the private registers of LPSS device
  527. * @dev: LPSS device
  528. * @pdata: pointer to the private data of the LPSS device
  529. *
  530. * Most LPSS devices have private registers which may loose their context when
  531. * the device is powered down. acpi_lpss_save_ctx() saves those registers into
  532. * prv_reg_ctx array.
  533. */
  534. static void acpi_lpss_save_ctx(struct device *dev,
  535. struct lpss_private_data *pdata)
  536. {
  537. unsigned int i;
  538. for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
  539. unsigned long offset = i * sizeof(u32);
  540. pdata->prv_reg_ctx[i] = __lpss_reg_read(pdata, offset);
  541. dev_dbg(dev, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
  542. pdata->prv_reg_ctx[i], offset);
  543. }
  544. }
  545. /**
  546. * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
  547. * @dev: LPSS device
  548. * @pdata: pointer to the private data of the LPSS device
  549. *
  550. * Restores the registers that were previously stored with acpi_lpss_save_ctx().
  551. */
  552. static void acpi_lpss_restore_ctx(struct device *dev,
  553. struct lpss_private_data *pdata)
  554. {
  555. unsigned int i;
  556. for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
  557. unsigned long offset = i * sizeof(u32);
  558. __lpss_reg_write(pdata->prv_reg_ctx[i], pdata, offset);
  559. dev_dbg(dev, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
  560. pdata->prv_reg_ctx[i], offset);
  561. }
  562. }
  563. static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data *pdata)
  564. {
  565. /*
  566. * The following delay is needed or the subsequent write operations may
  567. * fail. The LPSS devices are actually PCI devices and the PCI spec
  568. * expects 10ms delay before the device can be accessed after D3 to D0
  569. * transition. However some platforms like BSW does not need this delay.
  570. */
  571. unsigned int delay = 10; /* default 10ms delay */
  572. if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
  573. delay = 0;
  574. msleep(delay);
  575. }
  576. static int acpi_lpss_activate(struct device *dev)
  577. {
  578. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  579. int ret;
  580. ret = acpi_dev_runtime_resume(dev);
  581. if (ret)
  582. return ret;
  583. acpi_lpss_d3_to_d0_delay(pdata);
  584. /*
  585. * This is called only on ->probe() stage where a device is either in
  586. * known state defined by BIOS or most likely powered off. Due to this
  587. * we have to deassert reset line to be sure that ->probe() will
  588. * recognize the device.
  589. */
  590. if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
  591. lpss_deassert_reset(pdata);
  592. return 0;
  593. }
  594. static void acpi_lpss_dismiss(struct device *dev)
  595. {
  596. acpi_dev_runtime_suspend(dev);
  597. }
  598. #ifdef CONFIG_PM_SLEEP
  599. static int acpi_lpss_suspend_late(struct device *dev)
  600. {
  601. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  602. int ret;
  603. ret = pm_generic_suspend_late(dev);
  604. if (ret)
  605. return ret;
  606. if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
  607. acpi_lpss_save_ctx(dev, pdata);
  608. return acpi_dev_suspend_late(dev);
  609. }
  610. static int acpi_lpss_resume_early(struct device *dev)
  611. {
  612. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  613. int ret;
  614. ret = acpi_dev_resume_early(dev);
  615. if (ret)
  616. return ret;
  617. acpi_lpss_d3_to_d0_delay(pdata);
  618. if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
  619. acpi_lpss_restore_ctx(dev, pdata);
  620. return pm_generic_resume_early(dev);
  621. }
  622. #endif /* CONFIG_PM_SLEEP */
  623. /* IOSF SB for LPSS island */
  624. #define LPSS_IOSF_UNIT_LPIOEP 0xA0
  625. #define LPSS_IOSF_UNIT_LPIO1 0xAB
  626. #define LPSS_IOSF_UNIT_LPIO2 0xAC
  627. #define LPSS_IOSF_PMCSR 0x84
  628. #define LPSS_PMCSR_D0 0
  629. #define LPSS_PMCSR_D3hot 3
  630. #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
  631. #define LPSS_IOSF_GPIODEF0 0x154
  632. #define LPSS_GPIODEF0_DMA1_D3 BIT(2)
  633. #define LPSS_GPIODEF0_DMA2_D3 BIT(3)
  634. #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
  635. #define LPSS_GPIODEF0_DMA_LLP BIT(13)
  636. static DEFINE_MUTEX(lpss_iosf_mutex);
  637. static void lpss_iosf_enter_d3_state(void)
  638. {
  639. u32 value1 = 0;
  640. u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
  641. u32 value2 = LPSS_PMCSR_D3hot;
  642. u32 mask2 = LPSS_PMCSR_Dx_MASK;
  643. /*
  644. * PMC provides an information about actual status of the LPSS devices.
  645. * Here we read the values related to LPSS power island, i.e. LPSS
  646. * devices, excluding both LPSS DMA controllers, along with SCC domain.
  647. */
  648. u32 func_dis, d3_sts_0, pmc_status, pmc_mask = 0xfe000ffe;
  649. int ret;
  650. ret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);
  651. if (ret)
  652. return;
  653. mutex_lock(&lpss_iosf_mutex);
  654. ret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);
  655. if (ret)
  656. goto exit;
  657. /*
  658. * Get the status of entire LPSS power island per device basis.
  659. * Shutdown both LPSS DMA controllers if and only if all other devices
  660. * are already in D3hot.
  661. */
  662. pmc_status = (~(d3_sts_0 | func_dis)) & pmc_mask;
  663. if (pmc_status)
  664. goto exit;
  665. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
  666. LPSS_IOSF_PMCSR, value2, mask2);
  667. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
  668. LPSS_IOSF_PMCSR, value2, mask2);
  669. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
  670. LPSS_IOSF_GPIODEF0, value1, mask1);
  671. exit:
  672. mutex_unlock(&lpss_iosf_mutex);
  673. }
  674. static void lpss_iosf_exit_d3_state(void)
  675. {
  676. u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
  677. LPSS_GPIODEF0_DMA_LLP;
  678. u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
  679. u32 value2 = LPSS_PMCSR_D0;
  680. u32 mask2 = LPSS_PMCSR_Dx_MASK;
  681. mutex_lock(&lpss_iosf_mutex);
  682. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
  683. LPSS_IOSF_GPIODEF0, value1, mask1);
  684. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,
  685. LPSS_IOSF_PMCSR, value2, mask2);
  686. iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
  687. LPSS_IOSF_PMCSR, value2, mask2);
  688. mutex_unlock(&lpss_iosf_mutex);
  689. }
  690. static int acpi_lpss_runtime_suspend(struct device *dev)
  691. {
  692. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  693. int ret;
  694. ret = pm_generic_runtime_suspend(dev);
  695. if (ret)
  696. return ret;
  697. if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
  698. acpi_lpss_save_ctx(dev, pdata);
  699. ret = acpi_dev_runtime_suspend(dev);
  700. /*
  701. * This call must be last in the sequence, otherwise PMC will return
  702. * wrong status for devices being about to be powered off. See
  703. * lpss_iosf_enter_d3_state() for further information.
  704. */
  705. if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
  706. lpss_iosf_enter_d3_state();
  707. return ret;
  708. }
  709. static int acpi_lpss_runtime_resume(struct device *dev)
  710. {
  711. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  712. int ret;
  713. /*
  714. * This call is kept first to be in symmetry with
  715. * acpi_lpss_runtime_suspend() one.
  716. */
  717. if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
  718. lpss_iosf_exit_d3_state();
  719. ret = acpi_dev_runtime_resume(dev);
  720. if (ret)
  721. return ret;
  722. acpi_lpss_d3_to_d0_delay(pdata);
  723. if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
  724. acpi_lpss_restore_ctx(dev, pdata);
  725. return pm_generic_runtime_resume(dev);
  726. }
  727. #endif /* CONFIG_PM */
  728. static struct dev_pm_domain acpi_lpss_pm_domain = {
  729. #ifdef CONFIG_PM
  730. .activate = acpi_lpss_activate,
  731. .dismiss = acpi_lpss_dismiss,
  732. #endif
  733. .ops = {
  734. #ifdef CONFIG_PM
  735. #ifdef CONFIG_PM_SLEEP
  736. .prepare = acpi_subsys_prepare,
  737. .complete = pm_complete_with_resume_check,
  738. .suspend = acpi_subsys_suspend,
  739. .suspend_late = acpi_lpss_suspend_late,
  740. .resume_early = acpi_lpss_resume_early,
  741. .freeze = acpi_subsys_freeze,
  742. .poweroff = acpi_subsys_suspend,
  743. .poweroff_late = acpi_lpss_suspend_late,
  744. .restore_early = acpi_lpss_resume_early,
  745. #endif
  746. .runtime_suspend = acpi_lpss_runtime_suspend,
  747. .runtime_resume = acpi_lpss_runtime_resume,
  748. #endif
  749. },
  750. };
  751. static int acpi_lpss_platform_notify(struct notifier_block *nb,
  752. unsigned long action, void *data)
  753. {
  754. struct platform_device *pdev = to_platform_device(data);
  755. struct lpss_private_data *pdata;
  756. struct acpi_device *adev;
  757. const struct acpi_device_id *id;
  758. id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
  759. if (!id || !id->driver_data)
  760. return 0;
  761. if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
  762. return 0;
  763. pdata = acpi_driver_data(adev);
  764. if (!pdata)
  765. return 0;
  766. if (pdata->mmio_base &&
  767. pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
  768. dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
  769. return 0;
  770. }
  771. switch (action) {
  772. case BUS_NOTIFY_BIND_DRIVER:
  773. dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
  774. break;
  775. case BUS_NOTIFY_DRIVER_NOT_BOUND:
  776. case BUS_NOTIFY_UNBOUND_DRIVER:
  777. dev_pm_domain_set(&pdev->dev, NULL);
  778. break;
  779. case BUS_NOTIFY_ADD_DEVICE:
  780. dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
  781. if (pdata->dev_desc->flags & LPSS_LTR)
  782. return sysfs_create_group(&pdev->dev.kobj,
  783. &lpss_attr_group);
  784. break;
  785. case BUS_NOTIFY_DEL_DEVICE:
  786. if (pdata->dev_desc->flags & LPSS_LTR)
  787. sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
  788. dev_pm_domain_set(&pdev->dev, NULL);
  789. break;
  790. default:
  791. break;
  792. }
  793. return 0;
  794. }
  795. static struct notifier_block acpi_lpss_nb = {
  796. .notifier_call = acpi_lpss_platform_notify,
  797. };
  798. static void acpi_lpss_bind(struct device *dev)
  799. {
  800. struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
  801. if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
  802. return;
  803. if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
  804. dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
  805. else
  806. dev_err(dev, "MMIO size insufficient to access LTR\n");
  807. }
  808. static void acpi_lpss_unbind(struct device *dev)
  809. {
  810. dev->power.set_latency_tolerance = NULL;
  811. }
  812. static struct acpi_scan_handler lpss_handler = {
  813. .ids = acpi_lpss_device_ids,
  814. .attach = acpi_lpss_create_device,
  815. .bind = acpi_lpss_bind,
  816. .unbind = acpi_lpss_unbind,
  817. };
  818. void __init acpi_lpss_init(void)
  819. {
  820. const struct x86_cpu_id *id;
  821. int ret;
  822. ret = lpt_clk_init();
  823. if (ret)
  824. return;
  825. id = x86_match_cpu(lpss_cpu_ids);
  826. if (id)
  827. lpss_quirks |= LPSS_QUIRK_ALWAYS_POWER_ON;
  828. bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
  829. acpi_scan_add_handler(&lpss_handler);
  830. }
  831. #else
  832. static struct acpi_scan_handler lpss_handler = {
  833. .ids = acpi_lpss_device_ids,
  834. };
  835. void __init acpi_lpss_init(void)
  836. {
  837. acpi_scan_add_handler(&lpss_handler);
  838. }
  839. #endif /* CONFIG_X86_INTEL_LPSS */