base.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "priv.h"
  25. #include "conn.h"
  26. #include "outp.h"
  27. #include <core/client.h>
  28. #include <core/notify.h>
  29. #include <core/oproxy.h>
  30. #include <subdev/bios.h>
  31. #include <subdev/bios/dcb.h>
  32. #include <nvif/class.h>
  33. #include <nvif/cl0046.h>
  34. #include <nvif/event.h>
  35. #include <nvif/unpack.h>
  36. static void
  37. nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head)
  38. {
  39. struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
  40. disp->func->head.vblank_fini(disp, head);
  41. }
  42. static void
  43. nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head)
  44. {
  45. struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
  46. disp->func->head.vblank_init(disp, head);
  47. }
  48. static int
  49. nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
  50. struct nvkm_notify *notify)
  51. {
  52. struct nvkm_disp *disp =
  53. container_of(notify->event, typeof(*disp), vblank);
  54. union {
  55. struct nvif_notify_head_req_v0 v0;
  56. } *req = data;
  57. int ret = -ENOSYS;
  58. if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
  59. notify->size = sizeof(struct nvif_notify_head_rep_v0);
  60. if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
  61. notify->types = 1;
  62. notify->index = req->v0.head;
  63. return 0;
  64. }
  65. }
  66. return ret;
  67. }
  68. static const struct nvkm_event_func
  69. nvkm_disp_vblank_func = {
  70. .ctor = nvkm_disp_vblank_ctor,
  71. .init = nvkm_disp_vblank_init,
  72. .fini = nvkm_disp_vblank_fini,
  73. };
  74. void
  75. nvkm_disp_vblank(struct nvkm_disp *disp, int head)
  76. {
  77. struct nvif_notify_head_rep_v0 rep = {};
  78. nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
  79. }
  80. static int
  81. nvkm_disp_hpd_ctor(struct nvkm_object *object, void *data, u32 size,
  82. struct nvkm_notify *notify)
  83. {
  84. struct nvkm_disp *disp =
  85. container_of(notify->event, typeof(*disp), hpd);
  86. union {
  87. struct nvif_notify_conn_req_v0 v0;
  88. } *req = data;
  89. struct nvkm_output *outp;
  90. int ret = -ENOSYS;
  91. if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, false))) {
  92. notify->size = sizeof(struct nvif_notify_conn_rep_v0);
  93. list_for_each_entry(outp, &disp->outp, head) {
  94. if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
  95. if (ret = -ENODEV, outp->conn->hpd.event) {
  96. notify->types = req->v0.mask;
  97. notify->index = req->v0.conn;
  98. ret = 0;
  99. }
  100. break;
  101. }
  102. }
  103. }
  104. return ret;
  105. }
  106. static const struct nvkm_event_func
  107. nvkm_disp_hpd_func = {
  108. .ctor = nvkm_disp_hpd_ctor
  109. };
  110. int
  111. nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
  112. {
  113. struct nvkm_disp *disp = nvkm_disp(object->engine);
  114. switch (type) {
  115. case NV04_DISP_NTFY_VBLANK:
  116. *event = &disp->vblank;
  117. return 0;
  118. case NV04_DISP_NTFY_CONN:
  119. *event = &disp->hpd;
  120. return 0;
  121. default:
  122. break;
  123. }
  124. return -EINVAL;
  125. }
  126. static void
  127. nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
  128. {
  129. struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
  130. mutex_lock(&disp->engine.subdev.mutex);
  131. if (disp->client == oproxy)
  132. disp->client = NULL;
  133. mutex_unlock(&disp->engine.subdev.mutex);
  134. }
  135. static const struct nvkm_oproxy_func
  136. nvkm_disp_class = {
  137. .dtor[1] = nvkm_disp_class_del,
  138. };
  139. static int
  140. nvkm_disp_class_new(struct nvkm_device *device,
  141. const struct nvkm_oclass *oclass, void *data, u32 size,
  142. struct nvkm_object **pobject)
  143. {
  144. const struct nvkm_disp_oclass *sclass = oclass->engn;
  145. struct nvkm_disp *disp = nvkm_disp(oclass->engine);
  146. struct nvkm_oproxy *oproxy;
  147. int ret;
  148. ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
  149. if (ret)
  150. return ret;
  151. *pobject = &oproxy->base;
  152. mutex_lock(&disp->engine.subdev.mutex);
  153. if (disp->client) {
  154. mutex_unlock(&disp->engine.subdev.mutex);
  155. return -EBUSY;
  156. }
  157. disp->client = oproxy;
  158. mutex_unlock(&disp->engine.subdev.mutex);
  159. return sclass->ctor(disp, oclass, data, size, &oproxy->object);
  160. }
  161. static const struct nvkm_device_oclass
  162. nvkm_disp_sclass = {
  163. .ctor = nvkm_disp_class_new,
  164. };
  165. static int
  166. nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
  167. const struct nvkm_device_oclass **class)
  168. {
  169. struct nvkm_disp *disp = nvkm_disp(oclass->engine);
  170. if (index == 0) {
  171. const struct nvkm_disp_oclass *root = disp->func->root(disp);
  172. oclass->base = root->base;
  173. oclass->engn = root;
  174. *class = &nvkm_disp_sclass;
  175. return 0;
  176. }
  177. return 1;
  178. }
  179. static void
  180. nvkm_disp_intr(struct nvkm_engine *engine)
  181. {
  182. struct nvkm_disp *disp = nvkm_disp(engine);
  183. disp->func->intr(disp);
  184. }
  185. static int
  186. nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
  187. {
  188. struct nvkm_disp *disp = nvkm_disp(engine);
  189. struct nvkm_connector *conn;
  190. struct nvkm_output *outp;
  191. list_for_each_entry(outp, &disp->outp, head) {
  192. nvkm_output_fini(outp);
  193. }
  194. list_for_each_entry(conn, &disp->conn, head) {
  195. nvkm_connector_fini(conn);
  196. }
  197. return 0;
  198. }
  199. static int
  200. nvkm_disp_init(struct nvkm_engine *engine)
  201. {
  202. struct nvkm_disp *disp = nvkm_disp(engine);
  203. struct nvkm_connector *conn;
  204. struct nvkm_output *outp;
  205. list_for_each_entry(conn, &disp->conn, head) {
  206. nvkm_connector_init(conn);
  207. }
  208. list_for_each_entry(outp, &disp->outp, head) {
  209. nvkm_output_init(outp);
  210. }
  211. return 0;
  212. }
  213. static void *
  214. nvkm_disp_dtor(struct nvkm_engine *engine)
  215. {
  216. struct nvkm_disp *disp = nvkm_disp(engine);
  217. struct nvkm_connector *conn;
  218. struct nvkm_output *outp;
  219. void *data = disp;
  220. if (disp->func->dtor)
  221. data = disp->func->dtor(disp);
  222. nvkm_event_fini(&disp->vblank);
  223. nvkm_event_fini(&disp->hpd);
  224. while (!list_empty(&disp->outp)) {
  225. outp = list_first_entry(&disp->outp, typeof(*outp), head);
  226. list_del(&outp->head);
  227. nvkm_output_del(&outp);
  228. }
  229. while (!list_empty(&disp->conn)) {
  230. conn = list_first_entry(&disp->conn, typeof(*conn), head);
  231. list_del(&conn->head);
  232. nvkm_connector_del(&conn);
  233. }
  234. return data;
  235. }
  236. static const struct nvkm_engine_func
  237. nvkm_disp = {
  238. .dtor = nvkm_disp_dtor,
  239. .init = nvkm_disp_init,
  240. .fini = nvkm_disp_fini,
  241. .intr = nvkm_disp_intr,
  242. .base.sclass = nvkm_disp_class_get,
  243. };
  244. int
  245. nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
  246. int index, int heads, struct nvkm_disp *disp)
  247. {
  248. struct nvkm_bios *bios = device->bios;
  249. struct nvkm_output *outp, *outt, *pair;
  250. struct nvkm_connector *conn;
  251. struct nvbios_connE connE;
  252. struct dcb_output dcbE;
  253. u8 hpd = 0, ver, hdr;
  254. u32 data;
  255. int ret, i;
  256. INIT_LIST_HEAD(&disp->outp);
  257. INIT_LIST_HEAD(&disp->conn);
  258. disp->func = func;
  259. disp->head.nr = heads;
  260. ret = nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
  261. if (ret)
  262. return ret;
  263. /* create output objects for each display path in the vbios */
  264. i = -1;
  265. while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
  266. const struct nvkm_disp_func_outp *outps;
  267. int (*ctor)(struct nvkm_disp *, int, struct dcb_output *,
  268. struct nvkm_output **);
  269. if (dcbE.type == DCB_OUTPUT_UNUSED)
  270. continue;
  271. if (dcbE.type == DCB_OUTPUT_EOL)
  272. break;
  273. outp = NULL;
  274. switch (dcbE.location) {
  275. case 0: outps = &disp->func->outp.internal; break;
  276. case 1: outps = &disp->func->outp.external; break;
  277. default:
  278. nvkm_warn(&disp->engine.subdev,
  279. "dcb %d locn %d unknown\n", i, dcbE.location);
  280. continue;
  281. }
  282. switch (dcbE.type) {
  283. case DCB_OUTPUT_ANALOG: ctor = outps->crt ; break;
  284. case DCB_OUTPUT_TV : ctor = outps->tv ; break;
  285. case DCB_OUTPUT_TMDS : ctor = outps->tmds; break;
  286. case DCB_OUTPUT_LVDS : ctor = outps->lvds; break;
  287. case DCB_OUTPUT_DP : ctor = outps->dp ; break;
  288. default:
  289. nvkm_warn(&disp->engine.subdev,
  290. "dcb %d type %d unknown\n", i, dcbE.type);
  291. continue;
  292. }
  293. if (ctor)
  294. ret = ctor(disp, i, &dcbE, &outp);
  295. else
  296. ret = -ENODEV;
  297. if (ret) {
  298. if (ret == -ENODEV) {
  299. nvkm_debug(&disp->engine.subdev,
  300. "dcb %d %d/%d not supported\n",
  301. i, dcbE.location, dcbE.type);
  302. continue;
  303. }
  304. nvkm_error(&disp->engine.subdev,
  305. "failed to create output %d\n", i);
  306. nvkm_output_del(&outp);
  307. continue;
  308. }
  309. list_add_tail(&outp->head, &disp->outp);
  310. hpd = max(hpd, (u8)(dcbE.connector + 1));
  311. }
  312. /* create connector objects based on the outputs we support */
  313. list_for_each_entry_safe(outp, outt, &disp->outp, head) {
  314. /* bios data *should* give us the most useful information */
  315. data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
  316. &connE);
  317. /* no bios connector data... */
  318. if (!data) {
  319. /* heuristic: anything with the same ccb index is
  320. * considered to be on the same connector, any
  321. * output path without an associated ccb entry will
  322. * be put on its own connector
  323. */
  324. int ccb_index = outp->info.i2c_index;
  325. if (ccb_index != 0xf) {
  326. list_for_each_entry(pair, &disp->outp, head) {
  327. if (pair->info.i2c_index == ccb_index) {
  328. outp->conn = pair->conn;
  329. break;
  330. }
  331. }
  332. }
  333. /* connector shared with another output path */
  334. if (outp->conn)
  335. continue;
  336. memset(&connE, 0x00, sizeof(connE));
  337. connE.type = DCB_CONNECTOR_NONE;
  338. i = -1;
  339. } else {
  340. i = outp->info.connector;
  341. }
  342. /* check that we haven't already created this connector */
  343. list_for_each_entry(conn, &disp->conn, head) {
  344. if (conn->index == outp->info.connector) {
  345. outp->conn = conn;
  346. break;
  347. }
  348. }
  349. if (outp->conn)
  350. continue;
  351. /* apparently we need to create a new one! */
  352. ret = nvkm_connector_new(disp, i, &connE, &outp->conn);
  353. if (ret) {
  354. nvkm_error(&disp->engine.subdev,
  355. "failed to create output %d conn: %d\n",
  356. outp->index, ret);
  357. nvkm_connector_del(&outp->conn);
  358. list_del(&outp->head);
  359. nvkm_output_del(&outp);
  360. continue;
  361. }
  362. list_add_tail(&outp->conn->head, &disp->conn);
  363. }
  364. ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
  365. if (ret)
  366. return ret;
  367. ret = nvkm_event_init(&nvkm_disp_vblank_func, 1, heads, &disp->vblank);
  368. if (ret)
  369. return ret;
  370. return 0;
  371. }
  372. int
  373. nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
  374. int index, int heads, struct nvkm_disp **pdisp)
  375. {
  376. if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
  377. return -ENOMEM;
  378. return nvkm_disp_ctor(func, device, index, heads, *pdisp);
  379. }