i915_sysfs.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. /*
  2. * Copyright © 2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Ben Widawsky <ben@bwidawsk.net>
  25. *
  26. */
  27. #include <linux/device.h>
  28. #include <linux/module.h>
  29. #include <linux/stat.h>
  30. #include <linux/sysfs.h>
  31. #include "intel_drv.h"
  32. #include "i915_drv.h"
  33. static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
  34. {
  35. struct drm_minor *minor = dev_get_drvdata(kdev);
  36. return to_i915(minor->dev);
  37. }
  38. #ifdef CONFIG_PM
  39. static u32 calc_residency(struct drm_i915_private *dev_priv,
  40. i915_reg_t reg)
  41. {
  42. u64 raw_time; /* 32b value may overflow during fixed point math */
  43. u64 units = 128ULL, div = 100000ULL;
  44. u32 ret;
  45. if (!intel_enable_rc6())
  46. return 0;
  47. intel_runtime_pm_get(dev_priv);
  48. /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
  49. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  50. units = 1;
  51. div = dev_priv->czclk_freq;
  52. if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
  53. units <<= 8;
  54. } else if (IS_GEN9_LP(dev_priv)) {
  55. units = 1;
  56. div = 1200; /* 833.33ns */
  57. }
  58. raw_time = I915_READ(reg) * units;
  59. ret = DIV_ROUND_UP_ULL(raw_time, div);
  60. intel_runtime_pm_put(dev_priv);
  61. return ret;
  62. }
  63. static ssize_t
  64. show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
  65. {
  66. return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
  67. }
  68. static ssize_t
  69. show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  70. {
  71. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  72. u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
  73. return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  74. }
  75. static ssize_t
  76. show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  77. {
  78. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  79. u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
  80. return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
  81. }
  82. static ssize_t
  83. show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  84. {
  85. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  86. u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
  87. return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
  88. }
  89. static ssize_t
  90. show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  91. {
  92. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  93. u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
  94. return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  95. }
  96. static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
  97. static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
  98. static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
  99. static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
  100. static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
  101. static struct attribute *rc6_attrs[] = {
  102. &dev_attr_rc6_enable.attr,
  103. &dev_attr_rc6_residency_ms.attr,
  104. NULL
  105. };
  106. static struct attribute_group rc6_attr_group = {
  107. .name = power_group_name,
  108. .attrs = rc6_attrs
  109. };
  110. static struct attribute *rc6p_attrs[] = {
  111. &dev_attr_rc6p_residency_ms.attr,
  112. &dev_attr_rc6pp_residency_ms.attr,
  113. NULL
  114. };
  115. static struct attribute_group rc6p_attr_group = {
  116. .name = power_group_name,
  117. .attrs = rc6p_attrs
  118. };
  119. static struct attribute *media_rc6_attrs[] = {
  120. &dev_attr_media_rc6_residency_ms.attr,
  121. NULL
  122. };
  123. static struct attribute_group media_rc6_attr_group = {
  124. .name = power_group_name,
  125. .attrs = media_rc6_attrs
  126. };
  127. #endif
  128. static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
  129. {
  130. if (!HAS_L3_DPF(dev_priv))
  131. return -EPERM;
  132. if (offset % 4 != 0)
  133. return -EINVAL;
  134. if (offset >= GEN7_L3LOG_SIZE)
  135. return -ENXIO;
  136. return 0;
  137. }
  138. static ssize_t
  139. i915_l3_read(struct file *filp, struct kobject *kobj,
  140. struct bin_attribute *attr, char *buf,
  141. loff_t offset, size_t count)
  142. {
  143. struct device *kdev = kobj_to_dev(kobj);
  144. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  145. struct drm_device *dev = &dev_priv->drm;
  146. int slice = (int)(uintptr_t)attr->private;
  147. int ret;
  148. count = round_down(count, 4);
  149. ret = l3_access_valid(dev_priv, offset);
  150. if (ret)
  151. return ret;
  152. count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
  153. ret = i915_mutex_lock_interruptible(dev);
  154. if (ret)
  155. return ret;
  156. if (dev_priv->l3_parity.remap_info[slice])
  157. memcpy(buf,
  158. dev_priv->l3_parity.remap_info[slice] + (offset/4),
  159. count);
  160. else
  161. memset(buf, 0, count);
  162. mutex_unlock(&dev->struct_mutex);
  163. return count;
  164. }
  165. static ssize_t
  166. i915_l3_write(struct file *filp, struct kobject *kobj,
  167. struct bin_attribute *attr, char *buf,
  168. loff_t offset, size_t count)
  169. {
  170. struct device *kdev = kobj_to_dev(kobj);
  171. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  172. struct drm_device *dev = &dev_priv->drm;
  173. struct i915_gem_context *ctx;
  174. u32 *temp = NULL; /* Just here to make handling failures easy */
  175. int slice = (int)(uintptr_t)attr->private;
  176. int ret;
  177. if (!HAS_HW_CONTEXTS(dev_priv))
  178. return -ENXIO;
  179. ret = l3_access_valid(dev_priv, offset);
  180. if (ret)
  181. return ret;
  182. ret = i915_mutex_lock_interruptible(dev);
  183. if (ret)
  184. return ret;
  185. if (!dev_priv->l3_parity.remap_info[slice]) {
  186. temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
  187. if (!temp) {
  188. mutex_unlock(&dev->struct_mutex);
  189. return -ENOMEM;
  190. }
  191. }
  192. /* TODO: Ideally we really want a GPU reset here to make sure errors
  193. * aren't propagated. Since I cannot find a stable way to reset the GPU
  194. * at this point it is left as a TODO.
  195. */
  196. if (temp)
  197. dev_priv->l3_parity.remap_info[slice] = temp;
  198. memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
  199. /* NB: We defer the remapping until we switch to the context */
  200. list_for_each_entry(ctx, &dev_priv->context_list, link)
  201. ctx->remap_slice |= (1<<slice);
  202. mutex_unlock(&dev->struct_mutex);
  203. return count;
  204. }
  205. static struct bin_attribute dpf_attrs = {
  206. .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
  207. .size = GEN7_L3LOG_SIZE,
  208. .read = i915_l3_read,
  209. .write = i915_l3_write,
  210. .mmap = NULL,
  211. .private = (void *)0
  212. };
  213. static struct bin_attribute dpf_attrs_1 = {
  214. .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
  215. .size = GEN7_L3LOG_SIZE,
  216. .read = i915_l3_read,
  217. .write = i915_l3_write,
  218. .mmap = NULL,
  219. .private = (void *)1
  220. };
  221. static ssize_t gt_act_freq_mhz_show(struct device *kdev,
  222. struct device_attribute *attr, char *buf)
  223. {
  224. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  225. int ret;
  226. intel_runtime_pm_get(dev_priv);
  227. mutex_lock(&dev_priv->rps.hw_lock);
  228. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  229. u32 freq;
  230. freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
  231. ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
  232. } else {
  233. u32 rpstat = I915_READ(GEN6_RPSTAT1);
  234. if (IS_GEN9(dev_priv))
  235. ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
  236. else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  237. ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
  238. else
  239. ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
  240. ret = intel_gpu_freq(dev_priv, ret);
  241. }
  242. mutex_unlock(&dev_priv->rps.hw_lock);
  243. intel_runtime_pm_put(dev_priv);
  244. return snprintf(buf, PAGE_SIZE, "%d\n", ret);
  245. }
  246. static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
  247. struct device_attribute *attr, char *buf)
  248. {
  249. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  250. return snprintf(buf, PAGE_SIZE, "%d\n",
  251. intel_gpu_freq(dev_priv,
  252. dev_priv->rps.cur_freq));
  253. }
  254. static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  255. {
  256. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  257. return snprintf(buf, PAGE_SIZE, "%d\n",
  258. intel_gpu_freq(dev_priv,
  259. dev_priv->rps.boost_freq));
  260. }
  261. static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
  262. struct device_attribute *attr,
  263. const char *buf, size_t count)
  264. {
  265. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  266. u32 val;
  267. ssize_t ret;
  268. ret = kstrtou32(buf, 0, &val);
  269. if (ret)
  270. return ret;
  271. /* Validate against (static) hardware limits */
  272. val = intel_freq_opcode(dev_priv, val);
  273. if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
  274. return -EINVAL;
  275. mutex_lock(&dev_priv->rps.hw_lock);
  276. dev_priv->rps.boost_freq = val;
  277. mutex_unlock(&dev_priv->rps.hw_lock);
  278. return count;
  279. }
  280. static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
  281. struct device_attribute *attr, char *buf)
  282. {
  283. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  284. return snprintf(buf, PAGE_SIZE, "%d\n",
  285. intel_gpu_freq(dev_priv,
  286. dev_priv->rps.efficient_freq));
  287. }
  288. static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  289. {
  290. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  291. return snprintf(buf, PAGE_SIZE, "%d\n",
  292. intel_gpu_freq(dev_priv,
  293. dev_priv->rps.max_freq_softlimit));
  294. }
  295. static ssize_t gt_max_freq_mhz_store(struct device *kdev,
  296. struct device_attribute *attr,
  297. const char *buf, size_t count)
  298. {
  299. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  300. u32 val;
  301. ssize_t ret;
  302. ret = kstrtou32(buf, 0, &val);
  303. if (ret)
  304. return ret;
  305. intel_runtime_pm_get(dev_priv);
  306. mutex_lock(&dev_priv->rps.hw_lock);
  307. val = intel_freq_opcode(dev_priv, val);
  308. if (val < dev_priv->rps.min_freq ||
  309. val > dev_priv->rps.max_freq ||
  310. val < dev_priv->rps.min_freq_softlimit) {
  311. mutex_unlock(&dev_priv->rps.hw_lock);
  312. intel_runtime_pm_put(dev_priv);
  313. return -EINVAL;
  314. }
  315. if (val > dev_priv->rps.rp0_freq)
  316. DRM_DEBUG("User requested overclocking to %d\n",
  317. intel_gpu_freq(dev_priv, val));
  318. dev_priv->rps.max_freq_softlimit = val;
  319. val = clamp_t(int, dev_priv->rps.cur_freq,
  320. dev_priv->rps.min_freq_softlimit,
  321. dev_priv->rps.max_freq_softlimit);
  322. /* We still need *_set_rps to process the new max_delay and
  323. * update the interrupt limits and PMINTRMSK even though
  324. * frequency request may be unchanged. */
  325. intel_set_rps(dev_priv, val);
  326. mutex_unlock(&dev_priv->rps.hw_lock);
  327. intel_runtime_pm_put(dev_priv);
  328. return count;
  329. }
  330. static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  331. {
  332. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  333. return snprintf(buf, PAGE_SIZE, "%d\n",
  334. intel_gpu_freq(dev_priv,
  335. dev_priv->rps.min_freq_softlimit));
  336. }
  337. static ssize_t gt_min_freq_mhz_store(struct device *kdev,
  338. struct device_attribute *attr,
  339. const char *buf, size_t count)
  340. {
  341. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  342. u32 val;
  343. ssize_t ret;
  344. ret = kstrtou32(buf, 0, &val);
  345. if (ret)
  346. return ret;
  347. intel_runtime_pm_get(dev_priv);
  348. mutex_lock(&dev_priv->rps.hw_lock);
  349. val = intel_freq_opcode(dev_priv, val);
  350. if (val < dev_priv->rps.min_freq ||
  351. val > dev_priv->rps.max_freq ||
  352. val > dev_priv->rps.max_freq_softlimit) {
  353. mutex_unlock(&dev_priv->rps.hw_lock);
  354. intel_runtime_pm_put(dev_priv);
  355. return -EINVAL;
  356. }
  357. dev_priv->rps.min_freq_softlimit = val;
  358. val = clamp_t(int, dev_priv->rps.cur_freq,
  359. dev_priv->rps.min_freq_softlimit,
  360. dev_priv->rps.max_freq_softlimit);
  361. /* We still need *_set_rps to process the new min_delay and
  362. * update the interrupt limits and PMINTRMSK even though
  363. * frequency request may be unchanged. */
  364. intel_set_rps(dev_priv, val);
  365. mutex_unlock(&dev_priv->rps.hw_lock);
  366. intel_runtime_pm_put(dev_priv);
  367. return count;
  368. }
  369. static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
  370. static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
  371. static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store);
  372. static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
  373. static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
  374. static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
  375. static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
  376. static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  377. static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  378. static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
  379. /* For now we have a static number of RP states */
  380. static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
  381. {
  382. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  383. u32 val;
  384. if (attr == &dev_attr_gt_RP0_freq_mhz)
  385. val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
  386. else if (attr == &dev_attr_gt_RP1_freq_mhz)
  387. val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
  388. else if (attr == &dev_attr_gt_RPn_freq_mhz)
  389. val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
  390. else
  391. BUG();
  392. return snprintf(buf, PAGE_SIZE, "%d\n", val);
  393. }
  394. static const struct attribute *gen6_attrs[] = {
  395. &dev_attr_gt_act_freq_mhz.attr,
  396. &dev_attr_gt_cur_freq_mhz.attr,
  397. &dev_attr_gt_boost_freq_mhz.attr,
  398. &dev_attr_gt_max_freq_mhz.attr,
  399. &dev_attr_gt_min_freq_mhz.attr,
  400. &dev_attr_gt_RP0_freq_mhz.attr,
  401. &dev_attr_gt_RP1_freq_mhz.attr,
  402. &dev_attr_gt_RPn_freq_mhz.attr,
  403. NULL,
  404. };
  405. static const struct attribute *vlv_attrs[] = {
  406. &dev_attr_gt_act_freq_mhz.attr,
  407. &dev_attr_gt_cur_freq_mhz.attr,
  408. &dev_attr_gt_boost_freq_mhz.attr,
  409. &dev_attr_gt_max_freq_mhz.attr,
  410. &dev_attr_gt_min_freq_mhz.attr,
  411. &dev_attr_gt_RP0_freq_mhz.attr,
  412. &dev_attr_gt_RP1_freq_mhz.attr,
  413. &dev_attr_gt_RPn_freq_mhz.attr,
  414. &dev_attr_vlv_rpe_freq_mhz.attr,
  415. NULL,
  416. };
  417. #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
  418. static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
  419. struct bin_attribute *attr, char *buf,
  420. loff_t off, size_t count)
  421. {
  422. struct device *kdev = kobj_to_dev(kobj);
  423. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  424. struct drm_device *dev = &dev_priv->drm;
  425. struct i915_error_state_file_priv error_priv;
  426. struct drm_i915_error_state_buf error_str;
  427. ssize_t ret_count = 0;
  428. int ret;
  429. memset(&error_priv, 0, sizeof(error_priv));
  430. ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
  431. if (ret)
  432. return ret;
  433. error_priv.i915 = dev_priv;
  434. i915_error_state_get(dev, &error_priv);
  435. ret = i915_error_state_to_str(&error_str, &error_priv);
  436. if (ret)
  437. goto out;
  438. ret_count = count < error_str.bytes ? count : error_str.bytes;
  439. memcpy(buf, error_str.buf, ret_count);
  440. out:
  441. i915_error_state_put(&error_priv);
  442. i915_error_state_buf_release(&error_str);
  443. return ret ?: ret_count;
  444. }
  445. static ssize_t error_state_write(struct file *file, struct kobject *kobj,
  446. struct bin_attribute *attr, char *buf,
  447. loff_t off, size_t count)
  448. {
  449. struct device *kdev = kobj_to_dev(kobj);
  450. struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  451. DRM_DEBUG_DRIVER("Resetting error state\n");
  452. i915_destroy_error_state(dev_priv);
  453. return count;
  454. }
  455. static struct bin_attribute error_state_attr = {
  456. .attr.name = "error",
  457. .attr.mode = S_IRUSR | S_IWUSR,
  458. .size = 0,
  459. .read = error_state_read,
  460. .write = error_state_write,
  461. };
  462. static void i915_setup_error_capture(struct device *kdev)
  463. {
  464. if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
  465. DRM_ERROR("error_state sysfs setup failed\n");
  466. }
  467. static void i915_teardown_error_capture(struct device *kdev)
  468. {
  469. sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
  470. }
  471. #else
  472. static void i915_setup_error_capture(struct device *kdev) {}
  473. static void i915_teardown_error_capture(struct device *kdev) {}
  474. #endif
  475. void i915_setup_sysfs(struct drm_i915_private *dev_priv)
  476. {
  477. struct device *kdev = dev_priv->drm.primary->kdev;
  478. int ret;
  479. #ifdef CONFIG_PM
  480. if (HAS_RC6(dev_priv)) {
  481. ret = sysfs_merge_group(&kdev->kobj,
  482. &rc6_attr_group);
  483. if (ret)
  484. DRM_ERROR("RC6 residency sysfs setup failed\n");
  485. }
  486. if (HAS_RC6p(dev_priv)) {
  487. ret = sysfs_merge_group(&kdev->kobj,
  488. &rc6p_attr_group);
  489. if (ret)
  490. DRM_ERROR("RC6p residency sysfs setup failed\n");
  491. }
  492. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  493. ret = sysfs_merge_group(&kdev->kobj,
  494. &media_rc6_attr_group);
  495. if (ret)
  496. DRM_ERROR("Media RC6 residency sysfs setup failed\n");
  497. }
  498. #endif
  499. if (HAS_L3_DPF(dev_priv)) {
  500. ret = device_create_bin_file(kdev, &dpf_attrs);
  501. if (ret)
  502. DRM_ERROR("l3 parity sysfs setup failed\n");
  503. if (NUM_L3_SLICES(dev_priv) > 1) {
  504. ret = device_create_bin_file(kdev,
  505. &dpf_attrs_1);
  506. if (ret)
  507. DRM_ERROR("l3 parity slice 1 setup failed\n");
  508. }
  509. }
  510. ret = 0;
  511. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  512. ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
  513. else if (INTEL_GEN(dev_priv) >= 6)
  514. ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
  515. if (ret)
  516. DRM_ERROR("RPS sysfs setup failed\n");
  517. i915_setup_error_capture(kdev);
  518. }
  519. void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
  520. {
  521. struct device *kdev = dev_priv->drm.primary->kdev;
  522. i915_teardown_error_capture(kdev);
  523. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  524. sysfs_remove_files(&kdev->kobj, vlv_attrs);
  525. else
  526. sysfs_remove_files(&kdev->kobj, gen6_attrs);
  527. device_remove_bin_file(kdev, &dpf_attrs_1);
  528. device_remove_bin_file(kdev, &dpf_attrs);
  529. #ifdef CONFIG_PM
  530. sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
  531. sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
  532. #endif
  533. }