base.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include "conn.h"
  26. #include "dp.h"
  27. #include "head.h"
  28. #include "ior.h"
  29. #include "outp.h"
  30. #include <core/client.h>
  31. #include <core/notify.h>
  32. #include <core/oproxy.h>
  33. #include <subdev/bios.h>
  34. #include <subdev/bios/dcb.h>
  35. #include <nvif/class.h>
  36. #include <nvif/cl0046.h>
  37. #include <nvif/event.h>
  38. #include <nvif/unpack.h>
  39. static void
  40. nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int id)
  41. {
  42. struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
  43. struct nvkm_head *head = nvkm_head_find(disp, id);
  44. if (head)
  45. head->func->vblank_put(head);
  46. }
  47. static void
  48. nvkm_disp_vblank_init(struct nvkm_event *event, int type, int id)
  49. {
  50. struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
  51. struct nvkm_head *head = nvkm_head_find(disp, id);
  52. if (head)
  53. head->func->vblank_get(head);
  54. }
  55. static int
  56. nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
  57. struct nvkm_notify *notify)
  58. {
  59. struct nvkm_disp *disp =
  60. container_of(notify->event, typeof(*disp), vblank);
  61. union {
  62. struct nvif_notify_head_req_v0 v0;
  63. } *req = data;
  64. int ret = -ENOSYS;
  65. if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
  66. notify->size = sizeof(struct nvif_notify_head_rep_v0);
  67. if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
  68. notify->types = 1;
  69. notify->index = req->v0.head;
  70. return 0;
  71. }
  72. }
  73. return ret;
  74. }
  75. static const struct nvkm_event_func
  76. nvkm_disp_vblank_func = {
  77. .ctor = nvkm_disp_vblank_ctor,
  78. .init = nvkm_disp_vblank_init,
  79. .fini = nvkm_disp_vblank_fini,
  80. };
  81. void
  82. nvkm_disp_vblank(struct nvkm_disp *disp, int head)
  83. {
  84. struct nvif_notify_head_rep_v0 rep = {};
  85. nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
  86. }
  87. static int
  88. nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
  89. struct nvkm_notify *notify)
  90. {
  91. struct nvkm_disp *disp =
  92. container_of(notify->event, typeof(*disp), hpd);
  93. union {
  94. struct nvif_notify_conn_req_v0 v0;
  95. } *req = data;
  96. struct nvkm_outp *outp;
  97. int ret = -ENOSYS;
  98. if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
  99. notify->size = sizeof(struct nvif_notify_conn_rep_v0);
  100. list_for_each_entry(outp, &disp->outp, head) {
  101. if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
  102. if (ret = -ENODEV, outp->conn->hpd.event) {
  103. notify->types = req->v0.mask;
  104. notify->index = req->v0.conn;
  105. ret = 0;
  106. }
  107. break;
  108. }
  109. }
  110. }
  111. return ret;
  112. }
  113. static const struct nvkm_event_func
  114. nvkm_disp_hpd_func = {
  115. .ctor = nvkm_disp_hpd_ctor
  116. };
  117. int
  118. nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
  119. {
  120. struct nvkm_disp *disp = nvkm_disp(object->engine);
  121. switch (type) {
  122. case NV04_DISP_NTFY_VBLANK:
  123. *event = &disp->vblank;
  124. return 0;
  125. case NV04_DISP_NTFY_CONN:
  126. *event = &disp->hpd;
  127. return 0;
  128. default:
  129. break;
  130. }
  131. return -EINVAL;
  132. }
  133. static void
  134. nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
  135. {
  136. struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
  137. mutex_lock(&disp->engine.subdev.mutex);
  138. if (disp->client == oproxy)
  139. disp->client = NULL;
  140. mutex_unlock(&disp->engine.subdev.mutex);
  141. }
  142. static const struct nvkm_oproxy_func
  143. nvkm_disp_class = {
  144. .dtor[1] = nvkm_disp_class_del,
  145. };
  146. static int
  147. nvkm_disp_class_new(struct nvkm_device *device,
  148. const struct nvkm_oclass *oclass, void *data, u32 size,
  149. struct nvkm_object **pobject)
  150. {
  151. const struct nvkm_disp_oclass *sclass = oclass->engn;
  152. struct nvkm_disp *disp = nvkm_disp(oclass->engine);
  153. struct nvkm_oproxy *oproxy;
  154. int ret;
  155. ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
  156. if (ret)
  157. return ret;
  158. *pobject = &oproxy->base;
  159. mutex_lock(&disp->engine.subdev.mutex);
  160. if (disp->client) {
  161. mutex_unlock(&disp->engine.subdev.mutex);
  162. return -EBUSY;
  163. }
  164. disp->client = oproxy;
  165. mutex_unlock(&disp->engine.subdev.mutex);
  166. return sclass->ctor(disp, oclass, data, size, &oproxy->object);
  167. }
  168. static const struct nvkm_device_oclass
  169. nvkm_disp_sclass = {
  170. .ctor = nvkm_disp_class_new,
  171. };
  172. static int
  173. nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
  174. const struct nvkm_device_oclass **class)
  175. {
  176. struct nvkm_disp *disp = nvkm_disp(oclass->engine);
  177. if (index == 0) {
  178. const struct nvkm_disp_oclass *root = disp->func->root(disp);
  179. oclass->base = root->base;
  180. oclass->engn = root;
  181. *class = &nvkm_disp_sclass;
  182. return 0;
  183. }
  184. return 1;
  185. }
  186. static void
  187. nvkm_disp_intr(struct nvkm_engine *engine)
  188. {
  189. struct nvkm_disp *disp = nvkm_disp(engine);
  190. disp->func->intr(disp);
  191. }
  192. static int
  193. nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
  194. {
  195. struct nvkm_disp *disp = nvkm_disp(engine);
  196. struct nvkm_conn *conn;
  197. struct nvkm_outp *outp;
  198. list_for_each_entry(outp, &disp->outp, head) {
  199. nvkm_outp_fini(outp);
  200. }
  201. list_for_each_entry(conn, &disp->conn, head) {
  202. nvkm_conn_fini(conn);
  203. }
  204. return 0;
  205. }
  206. static int
  207. nvkm_disp_init(struct nvkm_engine *engine)
  208. {
  209. struct nvkm_disp *disp = nvkm_disp(engine);
  210. struct nvkm_conn *conn;
  211. struct nvkm_outp *outp;
  212. list_for_each_entry(conn, &disp->conn, head) {
  213. nvkm_conn_init(conn);
  214. }
  215. list_for_each_entry(outp, &disp->outp, head) {
  216. nvkm_outp_init(outp);
  217. }
  218. return 0;
  219. }
  220. static int
  221. nvkm_disp_oneinit(struct nvkm_engine *engine)
  222. {
  223. struct nvkm_disp *disp = nvkm_disp(engine);
  224. struct nvkm_subdev *subdev = &disp->engine.subdev;
  225. struct nvkm_bios *bios = subdev->device->bios;
  226. struct nvkm_outp *outp, *outt, *pair;
  227. struct nvkm_conn *conn;
  228. struct nvkm_head *head;
  229. struct nvbios_connE connE;
  230. struct dcb_output dcbE;
  231. u8 hpd = 0, ver, hdr;
  232. u32 data;
  233. int ret, i;
  234. /* Create output path objects for each VBIOS display path. */
  235. i = -1;
  236. while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
  237. if (ver < 0x40) /* No support for chipsets prior to NV50. */
  238. break;
  239. if (dcbE.type == DCB_OUTPUT_UNUSED)
  240. continue;
  241. if (dcbE.type == DCB_OUTPUT_EOL)
  242. break;
  243. outp = NULL;
  244. switch (dcbE.type) {
  245. case DCB_OUTPUT_ANALOG:
  246. case DCB_OUTPUT_TV:
  247. case DCB_OUTPUT_TMDS:
  248. case DCB_OUTPUT_LVDS:
  249. ret = nvkm_outp_new(disp, i, &dcbE, &outp);
  250. break;
  251. case DCB_OUTPUT_DP:
  252. ret = nvkm_dp_new(disp, i, &dcbE, &outp);
  253. break;
  254. default:
  255. nvkm_warn(subdev, "dcb %d type %d unknown\n",
  256. i, dcbE.type);
  257. continue;
  258. }
  259. if (ret) {
  260. if (outp) {
  261. if (ret != -ENODEV)
  262. OUTP_ERR(outp, "ctor failed: %d", ret);
  263. else
  264. OUTP_DBG(outp, "not supported");
  265. nvkm_outp_del(&outp);
  266. continue;
  267. }
  268. nvkm_error(subdev, "failed to create outp %d\n", i);
  269. continue;
  270. }
  271. list_add_tail(&outp->head, &disp->outp);
  272. hpd = max(hpd, (u8)(dcbE.connector + 1));
  273. }
  274. /* Create connector objects based on available output paths. */
  275. list_for_each_entry_safe(outp, outt, &disp->outp, head) {
  276. /* VBIOS data *should* give us the most useful information. */
  277. data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
  278. &connE);
  279. /* No bios connector data... */
  280. if (!data) {
  281. /* Heuristic: anything with the same ccb index is
  282. * considered to be on the same connector, any
  283. * output path without an associated ccb entry will
  284. * be put on its own connector.
  285. */
  286. int ccb_index = outp->info.i2c_index;
  287. if (ccb_index != 0xf) {
  288. list_for_each_entry(pair, &disp->outp, head) {
  289. if (pair->info.i2c_index == ccb_index) {
  290. outp->conn = pair->conn;
  291. break;
  292. }
  293. }
  294. }
  295. /* Connector shared with another output path. */
  296. if (outp->conn)
  297. continue;
  298. memset(&connE, 0x00, sizeof(connE));
  299. connE.type = DCB_CONNECTOR_NONE;
  300. i = -1;
  301. } else {
  302. i = outp->info.connector;
  303. }
  304. /* Check that we haven't already created this connector. */
  305. list_for_each_entry(conn, &disp->conn, head) {
  306. if (conn->index == outp->info.connector) {
  307. outp->conn = conn;
  308. break;
  309. }
  310. }
  311. if (outp->conn)
  312. continue;
  313. /* Apparently we need to create a new one! */
  314. ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
  315. if (ret) {
  316. nvkm_error(&disp->engine.subdev,
  317. "failed to create outp %d conn: %d\n",
  318. outp->index, ret);
  319. nvkm_conn_del(&outp->conn);
  320. list_del(&outp->head);
  321. nvkm_outp_del(&outp);
  322. continue;
  323. }
  324. list_add_tail(&outp->conn->head, &disp->conn);
  325. }
  326. ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
  327. if (ret)
  328. return ret;
  329. i = 0;
  330. list_for_each_entry(head, &disp->head, head)
  331. i = max(i, head->id + 1);
  332. return nvkm_event_init(&nvkm_disp_vblank_func, 1, i, &disp->vblank);
  333. }
  334. static void *
  335. nvkm_disp_dtor(struct nvkm_engine *engine)
  336. {
  337. struct nvkm_disp *disp = nvkm_disp(engine);
  338. struct nvkm_conn *conn;
  339. struct nvkm_outp *outp;
  340. void *data = disp;
  341. if (disp->func->dtor)
  342. data = disp->func->dtor(disp);
  343. nvkm_event_fini(&disp->vblank);
  344. nvkm_event_fini(&disp->hpd);
  345. while (!list_empty(&disp->conn)) {
  346. conn = list_first_entry(&disp->conn, typeof(*conn), head);
  347. list_del(&conn->head);
  348. nvkm_conn_del(&conn);
  349. }
  350. while (!list_empty(&disp->outp)) {
  351. outp = list_first_entry(&disp->outp, typeof(*outp), head);
  352. list_del(&outp->head);
  353. nvkm_outp_del(&outp);
  354. }
  355. while (!list_empty(&disp->ior)) {
  356. struct nvkm_ior *ior =
  357. list_first_entry(&disp->ior, typeof(*ior), head);
  358. nvkm_ior_del(&ior);
  359. }
  360. while (!list_empty(&disp->head)) {
  361. struct nvkm_head *head =
  362. list_first_entry(&disp->head, typeof(*head), head);
  363. nvkm_head_del(&head);
  364. }
  365. return data;
  366. }
  367. static const struct nvkm_engine_func
  368. nvkm_disp = {
  369. .dtor = nvkm_disp_dtor,
  370. .oneinit = nvkm_disp_oneinit,
  371. .init = nvkm_disp_init,
  372. .fini = nvkm_disp_fini,
  373. .intr = nvkm_disp_intr,
  374. .base.sclass = nvkm_disp_class_get,
  375. };
  376. int
  377. nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
  378. int index, struct nvkm_disp *disp)
  379. {
  380. disp->func = func;
  381. INIT_LIST_HEAD(&disp->head);
  382. INIT_LIST_HEAD(&disp->ior);
  383. INIT_LIST_HEAD(&disp->outp);
  384. INIT_LIST_HEAD(&disp->conn);
  385. return nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
  386. }
  387. int
  388. nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
  389. int index, struct nvkm_disp **pdisp)
  390. {
  391. if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
  392. return -ENOMEM;
  393. return nvkm_disp_ctor(func, device, index, *pdisp);
  394. }