llcc-slice.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
  4. *
  5. */
  6. #include <linux/bitmap.h>
  7. #include <linux/bitops.h>
  8. #include <linux/device.h>
  9. #include <linux/io.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/mutex.h>
  13. #include <linux/of_device.h>
  14. #include <linux/regmap.h>
  15. #include <linux/sizes.h>
  16. #include <linux/slab.h>
  17. #include <linux/soc/qcom/llcc-qcom.h>
  18. #define ACTIVATE BIT(0)
  19. #define DEACTIVATE BIT(1)
  20. #define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
  21. #define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
  22. #define ACT_CTRL_ACT_TRIG BIT(0)
  23. #define ACT_CTRL_OPCODE_SHIFT 0x01
  24. #define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
  25. #define ATTR1_FIXED_SIZE_SHIFT 0x03
  26. #define ATTR1_PRIORITY_SHIFT 0x04
  27. #define ATTR1_MAX_CAP_SHIFT 0x10
  28. #define ATTR0_RES_WAYS_MASK GENMASK(11, 0)
  29. #define ATTR0_BONUS_WAYS_MASK GENMASK(27, 16)
  30. #define ATTR0_BONUS_WAYS_SHIFT 0x10
  31. #define LLCC_STATUS_READ_DELAY 100
  32. #define CACHE_LINE_SIZE_SHIFT 6
  33. #define LLCC_COMMON_STATUS0 0x0003000c
  34. #define LLCC_LB_CNT_MASK GENMASK(31, 28)
  35. #define LLCC_LB_CNT_SHIFT 28
  36. #define MAX_CAP_TO_BYTES(n) (n * SZ_1K)
  37. #define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K)
  38. #define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
  39. #define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
  40. #define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
  41. #define BANK_OFFSET_STRIDE 0x80000
  42. static struct llcc_drv_data *drv_data;
  43. static const struct regmap_config llcc_regmap_config = {
  44. .reg_bits = 32,
  45. .reg_stride = 4,
  46. .val_bits = 32,
  47. .fast_io = true,
  48. };
  49. /**
  50. * llcc_slice_getd - get llcc slice descriptor
  51. * @uid: usecase_id for the client
  52. *
  53. * A pointer to llcc slice descriptor will be returned on success and
  54. * and error pointer is returned on failure
  55. */
  56. struct llcc_slice_desc *llcc_slice_getd(u32 uid)
  57. {
  58. const struct llcc_slice_config *cfg;
  59. struct llcc_slice_desc *desc;
  60. u32 sz, count;
  61. cfg = drv_data->cfg;
  62. sz = drv_data->cfg_size;
  63. for (count = 0; cfg && count < sz; count++, cfg++)
  64. if (cfg->usecase_id == uid)
  65. break;
  66. if (count == sz || !cfg)
  67. return ERR_PTR(-ENODEV);
  68. desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  69. if (!desc)
  70. return ERR_PTR(-ENOMEM);
  71. desc->slice_id = cfg->slice_id;
  72. desc->slice_size = cfg->max_cap;
  73. return desc;
  74. }
  75. EXPORT_SYMBOL_GPL(llcc_slice_getd);
  76. /**
  77. * llcc_slice_putd - llcc slice descritpor
  78. * @desc: Pointer to llcc slice descriptor
  79. */
  80. void llcc_slice_putd(struct llcc_slice_desc *desc)
  81. {
  82. kfree(desc);
  83. }
  84. EXPORT_SYMBOL_GPL(llcc_slice_putd);
  85. static int llcc_update_act_ctrl(u32 sid,
  86. u32 act_ctrl_reg_val, u32 status)
  87. {
  88. u32 act_ctrl_reg;
  89. u32 status_reg;
  90. u32 slice_status;
  91. int ret;
  92. act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
  93. status_reg = LLCC_TRP_STATUSn(sid);
  94. /* Set the ACTIVE trigger */
  95. act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
  96. ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg,
  97. act_ctrl_reg_val);
  98. if (ret)
  99. return ret;
  100. /* Clear the ACTIVE trigger */
  101. act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
  102. ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg,
  103. act_ctrl_reg_val);
  104. if (ret)
  105. return ret;
  106. ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
  107. slice_status, !(slice_status & status),
  108. 0, LLCC_STATUS_READ_DELAY);
  109. return ret;
  110. }
  111. /**
  112. * llcc_slice_activate - Activate the llcc slice
  113. * @desc: Pointer to llcc slice descriptor
  114. *
  115. * A value of zero will be returned on success and a negative errno will
  116. * be returned in error cases
  117. */
  118. int llcc_slice_activate(struct llcc_slice_desc *desc)
  119. {
  120. int ret;
  121. u32 act_ctrl_val;
  122. mutex_lock(&drv_data->lock);
  123. if (test_bit(desc->slice_id, drv_data->bitmap)) {
  124. mutex_unlock(&drv_data->lock);
  125. return 0;
  126. }
  127. act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
  128. ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
  129. DEACTIVATE);
  130. if (ret) {
  131. mutex_unlock(&drv_data->lock);
  132. return ret;
  133. }
  134. __set_bit(desc->slice_id, drv_data->bitmap);
  135. mutex_unlock(&drv_data->lock);
  136. return ret;
  137. }
  138. EXPORT_SYMBOL_GPL(llcc_slice_activate);
  139. /**
  140. * llcc_slice_deactivate - Deactivate the llcc slice
  141. * @desc: Pointer to llcc slice descriptor
  142. *
  143. * A value of zero will be returned on success and a negative errno will
  144. * be returned in error cases
  145. */
  146. int llcc_slice_deactivate(struct llcc_slice_desc *desc)
  147. {
  148. u32 act_ctrl_val;
  149. int ret;
  150. mutex_lock(&drv_data->lock);
  151. if (!test_bit(desc->slice_id, drv_data->bitmap)) {
  152. mutex_unlock(&drv_data->lock);
  153. return 0;
  154. }
  155. act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
  156. ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
  157. ACTIVATE);
  158. if (ret) {
  159. mutex_unlock(&drv_data->lock);
  160. return ret;
  161. }
  162. __clear_bit(desc->slice_id, drv_data->bitmap);
  163. mutex_unlock(&drv_data->lock);
  164. return ret;
  165. }
  166. EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
  167. /**
  168. * llcc_get_slice_id - return the slice id
  169. * @desc: Pointer to llcc slice descriptor
  170. */
  171. int llcc_get_slice_id(struct llcc_slice_desc *desc)
  172. {
  173. return desc->slice_id;
  174. }
  175. EXPORT_SYMBOL_GPL(llcc_get_slice_id);
  176. /**
  177. * llcc_get_slice_size - return the slice id
  178. * @desc: Pointer to llcc slice descriptor
  179. */
  180. size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
  181. {
  182. return desc->slice_size;
  183. }
  184. EXPORT_SYMBOL_GPL(llcc_get_slice_size);
  185. static int qcom_llcc_cfg_program(struct platform_device *pdev)
  186. {
  187. int i;
  188. u32 attr1_cfg;
  189. u32 attr0_cfg;
  190. u32 attr1_val;
  191. u32 attr0_val;
  192. u32 max_cap_cacheline;
  193. u32 sz;
  194. int ret = 0;
  195. const struct llcc_slice_config *llcc_table;
  196. struct llcc_slice_desc desc;
  197. sz = drv_data->cfg_size;
  198. llcc_table = drv_data->cfg;
  199. for (i = 0; i < sz; i++) {
  200. attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
  201. attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
  202. attr1_val = llcc_table[i].cache_mode;
  203. attr1_val |= llcc_table[i].probe_target_ways <<
  204. ATTR1_PROBE_TARGET_WAYS_SHIFT;
  205. attr1_val |= llcc_table[i].fixed_size <<
  206. ATTR1_FIXED_SIZE_SHIFT;
  207. attr1_val |= llcc_table[i].priority <<
  208. ATTR1_PRIORITY_SHIFT;
  209. max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
  210. /* LLCC instances can vary for each target.
  211. * The SW writes to broadcast register which gets propagated
  212. * to each llcc instace (llcc0,.. llccN).
  213. * Since the size of the memory is divided equally amongst the
  214. * llcc instances, we need to configure the max cap accordingly.
  215. */
  216. max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
  217. max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
  218. attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
  219. attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
  220. attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
  221. ret = regmap_write(drv_data->bcast_regmap, attr1_cfg,
  222. attr1_val);
  223. if (ret)
  224. return ret;
  225. ret = regmap_write(drv_data->bcast_regmap, attr0_cfg,
  226. attr0_val);
  227. if (ret)
  228. return ret;
  229. if (llcc_table[i].activate_on_init) {
  230. desc.slice_id = llcc_table[i].slice_id;
  231. ret = llcc_slice_activate(&desc);
  232. }
  233. }
  234. return ret;
  235. }
  236. int qcom_llcc_probe(struct platform_device *pdev,
  237. const struct llcc_slice_config *llcc_cfg, u32 sz)
  238. {
  239. u32 num_banks;
  240. struct device *dev = &pdev->dev;
  241. struct resource *llcc_banks_res, *llcc_bcast_res;
  242. void __iomem *llcc_banks_base, *llcc_bcast_base;
  243. int ret, i;
  244. struct platform_device *llcc_edac;
  245. drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
  246. if (!drv_data)
  247. return -ENOMEM;
  248. llcc_banks_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  249. "llcc_base");
  250. llcc_banks_base = devm_ioremap_resource(&pdev->dev, llcc_banks_res);
  251. if (IS_ERR(llcc_banks_base))
  252. return PTR_ERR(llcc_banks_base);
  253. drv_data->regmap = devm_regmap_init_mmio(dev, llcc_banks_base,
  254. &llcc_regmap_config);
  255. if (IS_ERR(drv_data->regmap))
  256. return PTR_ERR(drv_data->regmap);
  257. llcc_bcast_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  258. "llcc_broadcast_base");
  259. llcc_bcast_base = devm_ioremap_resource(&pdev->dev, llcc_bcast_res);
  260. if (IS_ERR(llcc_bcast_base))
  261. return PTR_ERR(llcc_bcast_base);
  262. drv_data->bcast_regmap = devm_regmap_init_mmio(dev, llcc_bcast_base,
  263. &llcc_regmap_config);
  264. if (IS_ERR(drv_data->bcast_regmap))
  265. return PTR_ERR(drv_data->bcast_regmap);
  266. ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
  267. &num_banks);
  268. if (ret)
  269. return ret;
  270. num_banks &= LLCC_LB_CNT_MASK;
  271. num_banks >>= LLCC_LB_CNT_SHIFT;
  272. drv_data->num_banks = num_banks;
  273. for (i = 0; i < sz; i++)
  274. if (llcc_cfg[i].slice_id > drv_data->max_slices)
  275. drv_data->max_slices = llcc_cfg[i].slice_id;
  276. drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
  277. GFP_KERNEL);
  278. if (!drv_data->offsets)
  279. return -ENOMEM;
  280. for (i = 0; i < num_banks; i++)
  281. drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
  282. drv_data->bitmap = devm_kcalloc(dev,
  283. BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
  284. GFP_KERNEL);
  285. if (!drv_data->bitmap)
  286. return -ENOMEM;
  287. drv_data->cfg = llcc_cfg;
  288. drv_data->cfg_size = sz;
  289. mutex_init(&drv_data->lock);
  290. platform_set_drvdata(pdev, drv_data);
  291. ret = qcom_llcc_cfg_program(pdev);
  292. if (ret)
  293. return ret;
  294. drv_data->ecc_irq = platform_get_irq(pdev, 0);
  295. if (drv_data->ecc_irq >= 0) {
  296. llcc_edac = platform_device_register_data(&pdev->dev,
  297. "qcom_llcc_edac", -1, drv_data,
  298. sizeof(*drv_data));
  299. if (IS_ERR(llcc_edac))
  300. dev_err(dev, "Failed to register llcc edac driver\n");
  301. }
  302. return ret;
  303. }
  304. EXPORT_SYMBOL_GPL(qcom_llcc_probe);
  305. MODULE_LICENSE("GPL v2");