sti_gdp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /*
  2. * Copyright (C) STMicroelectronics SA 2014
  3. * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
  4. * Fabien Dessenne <fabien.dessenne@st.com>
  5. * for STMicroelectronics.
  6. * License terms: GNU General Public License (GPL), version 2
  7. */
  8. #include <linux/clk.h>
  9. #include <linux/dma-mapping.h>
  10. #include "sti_compositor.h"
  11. #include "sti_gdp.h"
  12. #include "sti_layer.h"
  13. #include "sti_vtg.h"
  14. #define ALPHASWITCH BIT(6)
  15. #define ENA_COLOR_FILL BIT(8)
  16. #define BIGNOTLITTLE BIT(23)
  17. #define WAIT_NEXT_VSYNC BIT(31)
  18. /* GDP color formats */
  19. #define GDP_RGB565 0x00
  20. #define GDP_RGB888 0x01
  21. #define GDP_RGB888_32 0x02
  22. #define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
  23. #define GDP_ARGB8565 0x04
  24. #define GDP_ARGB8888 0x05
  25. #define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
  26. #define GDP_ARGB1555 0x06
  27. #define GDP_ARGB4444 0x07
  28. #define GDP_CLUT8 0x0B
  29. #define GDP_YCBR888 0x10
  30. #define GDP_YCBR422R 0x12
  31. #define GDP_AYCBR8888 0x15
  32. #define GAM_GDP_CTL_OFFSET 0x00
  33. #define GAM_GDP_AGC_OFFSET 0x04
  34. #define GAM_GDP_VPO_OFFSET 0x0C
  35. #define GAM_GDP_VPS_OFFSET 0x10
  36. #define GAM_GDP_PML_OFFSET 0x14
  37. #define GAM_GDP_PMP_OFFSET 0x18
  38. #define GAM_GDP_SIZE_OFFSET 0x1C
  39. #define GAM_GDP_NVN_OFFSET 0x24
  40. #define GAM_GDP_KEY1_OFFSET 0x28
  41. #define GAM_GDP_KEY2_OFFSET 0x2C
  42. #define GAM_GDP_PPT_OFFSET 0x34
  43. #define GAM_GDP_CML_OFFSET 0x3C
  44. #define GAM_GDP_MST_OFFSET 0x68
  45. #define GAM_GDP_ALPHARANGE_255 BIT(5)
  46. #define GAM_GDP_AGC_FULL_RANGE 0x00808080
  47. #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
  48. #define GAM_GDP_SIZE_MAX 0x7FF
  49. #define GDP_NODE_NB_BANK 2
  50. #define GDP_NODE_PER_FIELD 2
  51. struct sti_gdp_node {
  52. u32 gam_gdp_ctl;
  53. u32 gam_gdp_agc;
  54. u32 reserved1;
  55. u32 gam_gdp_vpo;
  56. u32 gam_gdp_vps;
  57. u32 gam_gdp_pml;
  58. u32 gam_gdp_pmp;
  59. u32 gam_gdp_size;
  60. u32 reserved2;
  61. u32 gam_gdp_nvn;
  62. u32 gam_gdp_key1;
  63. u32 gam_gdp_key2;
  64. u32 reserved3;
  65. u32 gam_gdp_ppt;
  66. u32 reserved4;
  67. u32 gam_gdp_cml;
  68. };
  69. struct sti_gdp_node_list {
  70. struct sti_gdp_node *top_field;
  71. dma_addr_t top_field_paddr;
  72. struct sti_gdp_node *btm_field;
  73. dma_addr_t btm_field_paddr;
  74. };
  75. /**
  76. * STI GDP structure
  77. *
  78. * @layer: layer structure
  79. * @clk_pix: pixel clock for the current gdp
  80. * @clk_main_parent: gdp parent clock if main path used
  81. * @clk_aux_parent: gdp parent clock if aux path used
  82. * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
  83. * @is_curr_top: true if the current node processed is the top field
  84. * @node_list: array of node list
  85. */
  86. struct sti_gdp {
  87. struct sti_layer layer;
  88. struct clk *clk_pix;
  89. struct clk *clk_main_parent;
  90. struct clk *clk_aux_parent;
  91. struct notifier_block vtg_field_nb;
  92. bool is_curr_top;
  93. struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
  94. };
  95. #define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
  96. static const uint32_t gdp_supported_formats[] = {
  97. DRM_FORMAT_XRGB8888,
  98. DRM_FORMAT_XBGR8888,
  99. DRM_FORMAT_ARGB8888,
  100. DRM_FORMAT_ABGR8888,
  101. DRM_FORMAT_ARGB4444,
  102. DRM_FORMAT_ARGB1555,
  103. DRM_FORMAT_RGB565,
  104. DRM_FORMAT_RGB888,
  105. DRM_FORMAT_AYUV,
  106. DRM_FORMAT_YUV444,
  107. DRM_FORMAT_VYUY,
  108. DRM_FORMAT_C8,
  109. };
  110. static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
  111. {
  112. return gdp_supported_formats;
  113. }
  114. static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
  115. {
  116. return ARRAY_SIZE(gdp_supported_formats);
  117. }
  118. static int sti_gdp_fourcc2format(int fourcc)
  119. {
  120. switch (fourcc) {
  121. case DRM_FORMAT_XRGB8888:
  122. return GDP_RGB888_32;
  123. case DRM_FORMAT_XBGR8888:
  124. return GDP_XBGR8888;
  125. case DRM_FORMAT_ARGB8888:
  126. return GDP_ARGB8888;
  127. case DRM_FORMAT_ABGR8888:
  128. return GDP_ABGR8888;
  129. case DRM_FORMAT_ARGB4444:
  130. return GDP_ARGB4444;
  131. case DRM_FORMAT_ARGB1555:
  132. return GDP_ARGB1555;
  133. case DRM_FORMAT_RGB565:
  134. return GDP_RGB565;
  135. case DRM_FORMAT_RGB888:
  136. return GDP_RGB888;
  137. case DRM_FORMAT_AYUV:
  138. return GDP_AYCBR8888;
  139. case DRM_FORMAT_YUV444:
  140. return GDP_YCBR888;
  141. case DRM_FORMAT_VYUY:
  142. return GDP_YCBR422R;
  143. case DRM_FORMAT_C8:
  144. return GDP_CLUT8;
  145. }
  146. return -1;
  147. }
  148. static int sti_gdp_get_alpharange(int format)
  149. {
  150. switch (format) {
  151. case GDP_ARGB8565:
  152. case GDP_ARGB8888:
  153. case GDP_AYCBR8888:
  154. case GDP_ABGR8888:
  155. return GAM_GDP_ALPHARANGE_255;
  156. }
  157. return 0;
  158. }
  159. /**
  160. * sti_gdp_get_free_nodes
  161. * @layer: gdp layer
  162. *
  163. * Look for a GDP node list that is not currently read by the HW.
  164. *
  165. * RETURNS:
  166. * Pointer to the free GDP node list
  167. */
  168. static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
  169. {
  170. int hw_nvn;
  171. struct sti_gdp *gdp = to_sti_gdp(layer);
  172. unsigned int i;
  173. hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
  174. if (!hw_nvn)
  175. goto end;
  176. for (i = 0; i < GDP_NODE_NB_BANK; i++)
  177. if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
  178. (hw_nvn != gdp->node_list[i].top_field_paddr))
  179. return &gdp->node_list[i];
  180. /* in hazardious cases restart with the first node */
  181. DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
  182. sti_layer_to_str(layer), hw_nvn);
  183. end:
  184. return &gdp->node_list[0];
  185. }
  186. /**
  187. * sti_gdp_get_current_nodes
  188. * @layer: GDP layer
  189. *
  190. * Look for GDP nodes that are currently read by the HW.
  191. *
  192. * RETURNS:
  193. * Pointer to the current GDP node list
  194. */
  195. static
  196. struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
  197. {
  198. int hw_nvn;
  199. struct sti_gdp *gdp = to_sti_gdp(layer);
  200. unsigned int i;
  201. hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
  202. if (!hw_nvn)
  203. goto end;
  204. for (i = 0; i < GDP_NODE_NB_BANK; i++)
  205. if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
  206. (hw_nvn == gdp->node_list[i].top_field_paddr))
  207. return &gdp->node_list[i];
  208. end:
  209. DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
  210. hw_nvn, sti_layer_to_str(layer));
  211. return NULL;
  212. }
  213. /**
  214. * sti_gdp_prepare_layer
  215. * @lay: gdp layer
  216. * @first_prepare: true if it is the first time this function is called
  217. *
  218. * Update the free GDP node list according to the layer properties.
  219. *
  220. * RETURNS:
  221. * 0 on success.
  222. */
  223. static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
  224. {
  225. struct sti_gdp_node_list *list;
  226. struct sti_gdp_node *top_field, *btm_field;
  227. struct drm_display_mode *mode = layer->mode;
  228. struct device *dev = layer->dev;
  229. struct sti_gdp *gdp = to_sti_gdp(layer);
  230. struct sti_compositor *compo = dev_get_drvdata(dev);
  231. int format;
  232. unsigned int depth, bpp;
  233. int rate = mode->clock * 1000;
  234. int res;
  235. u32 ydo, xdo, yds, xds;
  236. list = sti_gdp_get_free_nodes(layer);
  237. top_field = list->top_field;
  238. btm_field = list->btm_field;
  239. dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
  240. sti_layer_to_str(layer), top_field, btm_field);
  241. /* Build the top field from layer params */
  242. top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
  243. top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
  244. format = sti_gdp_fourcc2format(layer->format);
  245. if (format == -1) {
  246. DRM_ERROR("Format not supported by GDP %.4s\n",
  247. (char *)&layer->format);
  248. return 1;
  249. }
  250. top_field->gam_gdp_ctl |= format;
  251. top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
  252. top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
  253. /* pixel memory location */
  254. drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
  255. top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
  256. top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
  257. top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
  258. /* input parameters */
  259. top_field->gam_gdp_pmp = layer->pitches[0];
  260. top_field->gam_gdp_size =
  261. clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
  262. clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
  263. /* output parameters */
  264. ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
  265. yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
  266. xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
  267. xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
  268. top_field->gam_gdp_vpo = (ydo << 16) | xdo;
  269. top_field->gam_gdp_vps = (yds << 16) | xds;
  270. /* Same content and chained together */
  271. memcpy(btm_field, top_field, sizeof(*btm_field));
  272. top_field->gam_gdp_nvn = list->btm_field_paddr;
  273. btm_field->gam_gdp_nvn = list->top_field_paddr;
  274. /* Interlaced mode */
  275. if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
  276. btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
  277. layer->pitches[0];
  278. if (first_prepare) {
  279. /* Register gdp callback */
  280. if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
  281. compo->vtg_main : compo->vtg_aux,
  282. &gdp->vtg_field_nb, layer->mixer_id)) {
  283. DRM_ERROR("Cannot register VTG notifier\n");
  284. return 1;
  285. }
  286. /* Set and enable gdp clock */
  287. if (gdp->clk_pix) {
  288. struct clk *clkp;
  289. /* According to the mixer used, the gdp pixel clock
  290. * should have a different parent clock. */
  291. if (layer->mixer_id == STI_MIXER_MAIN)
  292. clkp = gdp->clk_main_parent;
  293. else
  294. clkp = gdp->clk_aux_parent;
  295. if (clkp)
  296. clk_set_parent(gdp->clk_pix, clkp);
  297. res = clk_set_rate(gdp->clk_pix, rate);
  298. if (res < 0) {
  299. DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
  300. rate);
  301. return 1;
  302. }
  303. if (clk_prepare_enable(gdp->clk_pix)) {
  304. DRM_ERROR("Failed to prepare/enable gdp\n");
  305. return 1;
  306. }
  307. }
  308. }
  309. return 0;
  310. }
  311. /**
  312. * sti_gdp_commit_layer
  313. * @lay: gdp layer
  314. *
  315. * Update the NVN field of the 'right' field of the current GDP node (being
  316. * used by the HW) with the address of the updated ('free') top field GDP node.
  317. * - In interlaced mode the 'right' field is the bottom field as we update
  318. * frames starting from their top field
  319. * - In progressive mode, we update both bottom and top fields which are
  320. * equal nodes.
  321. * At the next VSYNC, the updated node list will be used by the HW.
  322. *
  323. * RETURNS:
  324. * 0 on success.
  325. */
  326. static int sti_gdp_commit_layer(struct sti_layer *layer)
  327. {
  328. struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
  329. struct sti_gdp_node *updated_top_node = updated_list->top_field;
  330. struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
  331. struct sti_gdp *gdp = to_sti_gdp(layer);
  332. u32 dma_updated_top = updated_list->top_field_paddr;
  333. u32 dma_updated_btm = updated_list->btm_field_paddr;
  334. struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
  335. dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
  336. sti_layer_to_str(layer),
  337. updated_top_node, updated_btm_node);
  338. dev_dbg(layer->dev, "Current NVN:0x%X\n",
  339. readl(layer->regs + GAM_GDP_NVN_OFFSET));
  340. dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
  341. (unsigned long)layer->paddr,
  342. readl(layer->regs + GAM_GDP_PML_OFFSET));
  343. if (curr_list == NULL) {
  344. /* First update or invalid node should directly write in the
  345. * hw register */
  346. DRM_DEBUG_DRIVER("%s first update (or invalid node)",
  347. sti_layer_to_str(layer));
  348. writel(gdp->is_curr_top == true ?
  349. dma_updated_btm : dma_updated_top,
  350. layer->regs + GAM_GDP_NVN_OFFSET);
  351. return 0;
  352. }
  353. if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
  354. if (gdp->is_curr_top == true) {
  355. /* Do not update in the middle of the frame, but
  356. * postpone the update after the bottom field has
  357. * been displayed */
  358. curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
  359. } else {
  360. /* Direct update to avoid one frame delay */
  361. writel(dma_updated_top,
  362. layer->regs + GAM_GDP_NVN_OFFSET);
  363. }
  364. } else {
  365. /* Direct update for progressive to avoid one frame delay */
  366. writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
  367. }
  368. return 0;
  369. }
  370. /**
  371. * sti_gdp_disable_layer
  372. * @lay: gdp layer
  373. *
  374. * Disable a GDP.
  375. *
  376. * RETURNS:
  377. * 0 on success.
  378. */
  379. static int sti_gdp_disable_layer(struct sti_layer *layer)
  380. {
  381. unsigned int i;
  382. struct sti_gdp *gdp = to_sti_gdp(layer);
  383. struct sti_compositor *compo = dev_get_drvdata(layer->dev);
  384. DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
  385. /* Set the nodes as 'to be ignored on mixer' */
  386. for (i = 0; i < GDP_NODE_NB_BANK; i++) {
  387. gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
  388. gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
  389. }
  390. if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
  391. compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
  392. DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
  393. if (gdp->clk_pix)
  394. clk_disable_unprepare(gdp->clk_pix);
  395. return 0;
  396. }
  397. /**
  398. * sti_gdp_field_cb
  399. * @nb: notifier block
  400. * @event: event message
  401. * @data: private data
  402. *
  403. * Handle VTG top field and bottom field event.
  404. *
  405. * RETURNS:
  406. * 0 on success.
  407. */
  408. int sti_gdp_field_cb(struct notifier_block *nb,
  409. unsigned long event, void *data)
  410. {
  411. struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
  412. switch (event) {
  413. case VTG_TOP_FIELD_EVENT:
  414. gdp->is_curr_top = true;
  415. break;
  416. case VTG_BOTTOM_FIELD_EVENT:
  417. gdp->is_curr_top = false;
  418. break;
  419. default:
  420. DRM_ERROR("unsupported event: %lu\n", event);
  421. break;
  422. }
  423. return 0;
  424. }
  425. static void sti_gdp_init(struct sti_layer *layer)
  426. {
  427. struct sti_gdp *gdp = to_sti_gdp(layer);
  428. struct device_node *np = layer->dev->of_node;
  429. dma_addr_t dma_addr;
  430. void *base;
  431. unsigned int i, size;
  432. /* Allocate all the nodes within a single memory page */
  433. size = sizeof(struct sti_gdp_node) *
  434. GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
  435. base = dma_alloc_writecombine(layer->dev,
  436. size, &dma_addr, GFP_KERNEL | GFP_DMA);
  437. if (!base) {
  438. DRM_ERROR("Failed to allocate memory for GDP node\n");
  439. return;
  440. }
  441. memset(base, 0, size);
  442. for (i = 0; i < GDP_NODE_NB_BANK; i++) {
  443. if (dma_addr & 0xF) {
  444. DRM_ERROR("Mem alignment failed\n");
  445. return;
  446. }
  447. gdp->node_list[i].top_field = base;
  448. gdp->node_list[i].top_field_paddr = dma_addr;
  449. DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
  450. base += sizeof(struct sti_gdp_node);
  451. dma_addr += sizeof(struct sti_gdp_node);
  452. if (dma_addr & 0xF) {
  453. DRM_ERROR("Mem alignment failed\n");
  454. return;
  455. }
  456. gdp->node_list[i].btm_field = base;
  457. gdp->node_list[i].btm_field_paddr = dma_addr;
  458. DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
  459. base += sizeof(struct sti_gdp_node);
  460. dma_addr += sizeof(struct sti_gdp_node);
  461. }
  462. if (of_device_is_compatible(np, "st,stih407-compositor")) {
  463. /* GDP of STiH407 chip have its own pixel clock */
  464. char *clk_name;
  465. switch (layer->desc) {
  466. case STI_GDP_0:
  467. clk_name = "pix_gdp1";
  468. break;
  469. case STI_GDP_1:
  470. clk_name = "pix_gdp2";
  471. break;
  472. case STI_GDP_2:
  473. clk_name = "pix_gdp3";
  474. break;
  475. case STI_GDP_3:
  476. clk_name = "pix_gdp4";
  477. break;
  478. default:
  479. DRM_ERROR("GDP id not recognized\n");
  480. return;
  481. }
  482. gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
  483. if (IS_ERR(gdp->clk_pix))
  484. DRM_ERROR("Cannot get %s clock\n", clk_name);
  485. gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
  486. if (IS_ERR(gdp->clk_main_parent))
  487. DRM_ERROR("Cannot get main_parent clock\n");
  488. gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
  489. if (IS_ERR(gdp->clk_aux_parent))
  490. DRM_ERROR("Cannot get aux_parent clock\n");
  491. }
  492. }
  493. static const struct sti_layer_funcs gdp_ops = {
  494. .get_formats = sti_gdp_get_formats,
  495. .get_nb_formats = sti_gdp_get_nb_formats,
  496. .init = sti_gdp_init,
  497. .prepare = sti_gdp_prepare_layer,
  498. .commit = sti_gdp_commit_layer,
  499. .disable = sti_gdp_disable_layer,
  500. };
  501. struct sti_layer *sti_gdp_create(struct device *dev, int id)
  502. {
  503. struct sti_gdp *gdp;
  504. gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
  505. if (!gdp) {
  506. DRM_ERROR("Failed to allocate memory for GDP\n");
  507. return NULL;
  508. }
  509. gdp->layer.ops = &gdp_ops;
  510. gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
  511. return (struct sti_layer *)gdp;
  512. }