hfi_venus.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622
  1. /*
  2. * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2017 Linaro Ltd.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/delay.h>
  16. #include <linux/device.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/iopoll.h>
  20. #include <linux/kernel.h>
  21. #include <linux/qcom_scm.h>
  22. #include <linux/slab.h>
  23. #include "core.h"
  24. #include "hfi_cmds.h"
  25. #include "hfi_msgs.h"
  26. #include "hfi_venus.h"
  27. #include "hfi_venus_io.h"
  28. #define HFI_MASK_QHDR_TX_TYPE 0xff000000
  29. #define HFI_MASK_QHDR_RX_TYPE 0x00ff0000
  30. #define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00
  31. #define HFI_MASK_QHDR_ID_TYPE 0x000000ff
  32. #define HFI_HOST_TO_CTRL_CMD_Q 0
  33. #define HFI_CTRL_TO_HOST_MSG_Q 1
  34. #define HFI_CTRL_TO_HOST_DBG_Q 2
  35. #define HFI_MASK_QHDR_STATUS 0x000000ff
  36. #define IFACEQ_NUM 3
  37. #define IFACEQ_CMD_IDX 0
  38. #define IFACEQ_MSG_IDX 1
  39. #define IFACEQ_DBG_IDX 2
  40. #define IFACEQ_MAX_BUF_COUNT 50
  41. #define IFACEQ_MAX_PARALLEL_CLNTS 16
  42. #define IFACEQ_DFLT_QHDR 0x01010000
  43. #define POLL_INTERVAL_US 50
  44. #define IFACEQ_MAX_PKT_SIZE 1024
  45. #define IFACEQ_MED_PKT_SIZE 768
  46. #define IFACEQ_MIN_PKT_SIZE 8
  47. #define IFACEQ_VAR_SMALL_PKT_SIZE 100
  48. #define IFACEQ_VAR_LARGE_PKT_SIZE 512
  49. #define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12)
  50. enum tzbsp_video_state {
  51. TZBSP_VIDEO_STATE_SUSPEND = 0,
  52. TZBSP_VIDEO_STATE_RESUME
  53. };
  54. struct hfi_queue_table_header {
  55. u32 version;
  56. u32 size;
  57. u32 qhdr0_offset;
  58. u32 qhdr_size;
  59. u32 num_q;
  60. u32 num_active_q;
  61. };
  62. struct hfi_queue_header {
  63. u32 status;
  64. u32 start_addr;
  65. u32 type;
  66. u32 q_size;
  67. u32 pkt_size;
  68. u32 pkt_drop_cnt;
  69. u32 rx_wm;
  70. u32 tx_wm;
  71. u32 rx_req;
  72. u32 tx_req;
  73. u32 rx_irq_status;
  74. u32 tx_irq_status;
  75. u32 read_idx;
  76. u32 write_idx;
  77. };
  78. #define IFACEQ_TABLE_SIZE \
  79. (sizeof(struct hfi_queue_table_header) + \
  80. sizeof(struct hfi_queue_header) * IFACEQ_NUM)
  81. #define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \
  82. IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
  83. #define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
  84. (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \
  85. ((i) * sizeof(struct hfi_queue_header)))
  86. #define QDSS_SIZE SZ_4K
  87. #define SFR_SIZE SZ_4K
  88. #define QUEUE_SIZE \
  89. (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
  90. #define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
  91. #define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
  92. #define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
  93. #define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
  94. ALIGNED_QDSS_SIZE, SZ_1M)
  95. struct mem_desc {
  96. dma_addr_t da; /* device address */
  97. void *kva; /* kernel virtual address */
  98. u32 size;
  99. unsigned long attrs;
  100. };
  101. struct iface_queue {
  102. struct hfi_queue_header *qhdr;
  103. struct mem_desc qmem;
  104. };
  105. enum venus_state {
  106. VENUS_STATE_DEINIT = 1,
  107. VENUS_STATE_INIT,
  108. };
  109. struct venus_hfi_device {
  110. struct venus_core *core;
  111. u32 irq_status;
  112. u32 last_packet_type;
  113. bool power_enabled;
  114. bool suspended;
  115. enum venus_state state;
  116. /* serialize read / write to the shared memory */
  117. struct mutex lock;
  118. struct completion pwr_collapse_prep;
  119. struct completion release_resource;
  120. struct mem_desc ifaceq_table;
  121. struct mem_desc sfr;
  122. struct iface_queue queues[IFACEQ_NUM];
  123. u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
  124. u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
  125. };
  126. static bool venus_pkt_debug;
  127. static int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
  128. static bool venus_sys_idle_indicator;
  129. static bool venus_fw_low_power_mode = true;
  130. static int venus_hw_rsp_timeout = 1000;
  131. static bool venus_fw_coverage;
  132. static void venus_set_state(struct venus_hfi_device *hdev,
  133. enum venus_state state)
  134. {
  135. mutex_lock(&hdev->lock);
  136. hdev->state = state;
  137. mutex_unlock(&hdev->lock);
  138. }
  139. static bool venus_is_valid_state(struct venus_hfi_device *hdev)
  140. {
  141. return hdev->state != VENUS_STATE_DEINIT;
  142. }
  143. static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
  144. {
  145. size_t pkt_size = *(u32 *)packet;
  146. if (!venus_pkt_debug)
  147. return;
  148. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
  149. pkt_size, true);
  150. }
  151. static int venus_write_queue(struct venus_hfi_device *hdev,
  152. struct iface_queue *queue,
  153. void *packet, u32 *rx_req)
  154. {
  155. struct hfi_queue_header *qhdr;
  156. u32 dwords, new_wr_idx;
  157. u32 empty_space, rd_idx, wr_idx, qsize;
  158. u32 *wr_ptr;
  159. if (!queue->qmem.kva)
  160. return -EINVAL;
  161. qhdr = queue->qhdr;
  162. if (!qhdr)
  163. return -EINVAL;
  164. venus_dump_packet(hdev, packet);
  165. dwords = (*(u32 *)packet) >> 2;
  166. if (!dwords)
  167. return -EINVAL;
  168. rd_idx = qhdr->read_idx;
  169. wr_idx = qhdr->write_idx;
  170. qsize = qhdr->q_size;
  171. /* ensure rd/wr indices's are read from memory */
  172. rmb();
  173. if (wr_idx >= rd_idx)
  174. empty_space = qsize - (wr_idx - rd_idx);
  175. else
  176. empty_space = rd_idx - wr_idx;
  177. if (empty_space <= dwords) {
  178. qhdr->tx_req = 1;
  179. /* ensure tx_req is updated in memory */
  180. wmb();
  181. return -ENOSPC;
  182. }
  183. qhdr->tx_req = 0;
  184. /* ensure tx_req is updated in memory */
  185. wmb();
  186. new_wr_idx = wr_idx + dwords;
  187. wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
  188. if (new_wr_idx < qsize) {
  189. memcpy(wr_ptr, packet, dwords << 2);
  190. } else {
  191. size_t len;
  192. new_wr_idx -= qsize;
  193. len = (dwords - new_wr_idx) << 2;
  194. memcpy(wr_ptr, packet, len);
  195. memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
  196. }
  197. /* make sure packet is written before updating the write index */
  198. wmb();
  199. qhdr->write_idx = new_wr_idx;
  200. *rx_req = qhdr->rx_req ? 1 : 0;
  201. /* make sure write index is updated before an interrupt is raised */
  202. mb();
  203. return 0;
  204. }
  205. static int venus_read_queue(struct venus_hfi_device *hdev,
  206. struct iface_queue *queue, void *pkt, u32 *tx_req)
  207. {
  208. struct hfi_queue_header *qhdr;
  209. u32 dwords, new_rd_idx;
  210. u32 rd_idx, wr_idx, type, qsize;
  211. u32 *rd_ptr;
  212. u32 recv_request = 0;
  213. int ret = 0;
  214. if (!queue->qmem.kva)
  215. return -EINVAL;
  216. qhdr = queue->qhdr;
  217. if (!qhdr)
  218. return -EINVAL;
  219. type = qhdr->type;
  220. rd_idx = qhdr->read_idx;
  221. wr_idx = qhdr->write_idx;
  222. qsize = qhdr->q_size;
  223. /* make sure data is valid before using it */
  224. rmb();
  225. /*
  226. * Do not set receive request for debug queue, if set, Venus generates
  227. * interrupt for debug messages even when there is no response message
  228. * available. In general debug queue will not become full as it is being
  229. * emptied out for every interrupt from Venus. Venus will anyway
  230. * generates interrupt if it is full.
  231. */
  232. if (type & HFI_CTRL_TO_HOST_MSG_Q)
  233. recv_request = 1;
  234. if (rd_idx == wr_idx) {
  235. qhdr->rx_req = recv_request;
  236. *tx_req = 0;
  237. /* update rx_req field in memory */
  238. wmb();
  239. return -ENODATA;
  240. }
  241. rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
  242. dwords = *rd_ptr >> 2;
  243. if (!dwords)
  244. return -EINVAL;
  245. new_rd_idx = rd_idx + dwords;
  246. if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
  247. if (new_rd_idx < qsize) {
  248. memcpy(pkt, rd_ptr, dwords << 2);
  249. } else {
  250. size_t len;
  251. new_rd_idx -= qsize;
  252. len = (dwords - new_rd_idx) << 2;
  253. memcpy(pkt, rd_ptr, len);
  254. memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
  255. }
  256. } else {
  257. /* bad packet received, dropping */
  258. new_rd_idx = qhdr->write_idx;
  259. ret = -EBADMSG;
  260. }
  261. /* ensure the packet is read before updating read index */
  262. rmb();
  263. qhdr->read_idx = new_rd_idx;
  264. /* ensure updating read index */
  265. wmb();
  266. rd_idx = qhdr->read_idx;
  267. wr_idx = qhdr->write_idx;
  268. /* ensure rd/wr indices are read from memory */
  269. rmb();
  270. if (rd_idx != wr_idx)
  271. qhdr->rx_req = 0;
  272. else
  273. qhdr->rx_req = recv_request;
  274. *tx_req = qhdr->tx_req ? 1 : 0;
  275. /* ensure rx_req is stored to memory and tx_req is loaded from memory */
  276. mb();
  277. venus_dump_packet(hdev, pkt);
  278. return ret;
  279. }
  280. static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
  281. u32 size)
  282. {
  283. struct device *dev = hdev->core->dev;
  284. desc->attrs = DMA_ATTR_WRITE_COMBINE;
  285. desc->size = ALIGN(size, SZ_4K);
  286. desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
  287. desc->attrs);
  288. if (!desc->kva)
  289. return -ENOMEM;
  290. return 0;
  291. }
  292. static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
  293. {
  294. struct device *dev = hdev->core->dev;
  295. dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
  296. }
  297. static void venus_writel(struct venus_hfi_device *hdev, u32 reg, u32 value)
  298. {
  299. writel(value, hdev->core->base + reg);
  300. }
  301. static u32 venus_readl(struct venus_hfi_device *hdev, u32 reg)
  302. {
  303. return readl(hdev->core->base + reg);
  304. }
  305. static void venus_set_registers(struct venus_hfi_device *hdev)
  306. {
  307. const struct venus_resources *res = hdev->core->res;
  308. const struct reg_val *tbl = res->reg_tbl;
  309. unsigned int count = res->reg_tbl_size;
  310. unsigned int i;
  311. for (i = 0; i < count; i++)
  312. venus_writel(hdev, tbl[i].reg, tbl[i].value);
  313. }
  314. static void venus_soft_int(struct venus_hfi_device *hdev)
  315. {
  316. venus_writel(hdev, CPU_IC_SOFTINT, BIT(CPU_IC_SOFTINT_H2A_SHIFT));
  317. }
  318. static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
  319. void *pkt)
  320. {
  321. struct device *dev = hdev->core->dev;
  322. struct hfi_pkt_hdr *cmd_packet;
  323. struct iface_queue *queue;
  324. u32 rx_req;
  325. int ret;
  326. if (!venus_is_valid_state(hdev))
  327. return -EINVAL;
  328. cmd_packet = (struct hfi_pkt_hdr *)pkt;
  329. hdev->last_packet_type = cmd_packet->pkt_type;
  330. queue = &hdev->queues[IFACEQ_CMD_IDX];
  331. ret = venus_write_queue(hdev, queue, pkt, &rx_req);
  332. if (ret) {
  333. dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
  334. return ret;
  335. }
  336. if (rx_req)
  337. venus_soft_int(hdev);
  338. return 0;
  339. }
  340. static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt)
  341. {
  342. int ret;
  343. mutex_lock(&hdev->lock);
  344. ret = venus_iface_cmdq_write_nolock(hdev, pkt);
  345. mutex_unlock(&hdev->lock);
  346. return ret;
  347. }
  348. static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
  349. u32 size, u32 addr, void *cookie)
  350. {
  351. struct venus_hfi_device *hdev = to_hfi_priv(core);
  352. struct hfi_sys_set_resource_pkt *pkt;
  353. u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
  354. int ret;
  355. if (id == VIDC_RESOURCE_NONE)
  356. return 0;
  357. pkt = (struct hfi_sys_set_resource_pkt *)packet;
  358. ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
  359. if (ret)
  360. return ret;
  361. ret = venus_iface_cmdq_write(hdev, pkt);
  362. if (ret)
  363. return ret;
  364. return 0;
  365. }
  366. static int venus_boot_core(struct venus_hfi_device *hdev)
  367. {
  368. struct device *dev = hdev->core->dev;
  369. static const unsigned int max_tries = 100;
  370. u32 ctrl_status = 0;
  371. unsigned int count = 0;
  372. int ret = 0;
  373. venus_writel(hdev, VIDC_CTRL_INIT, BIT(VIDC_CTRL_INIT_CTRL_SHIFT));
  374. venus_writel(hdev, WRAPPER_INTR_MASK, WRAPPER_INTR_MASK_A2HVCODEC_MASK);
  375. venus_writel(hdev, CPU_CS_SCIACMDARG3, 1);
  376. while (!ctrl_status && count < max_tries) {
  377. ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
  378. if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
  379. dev_err(dev, "invalid setting for UC_REGION\n");
  380. ret = -EINVAL;
  381. break;
  382. }
  383. usleep_range(500, 1000);
  384. count++;
  385. }
  386. if (count >= max_tries)
  387. ret = -ETIMEDOUT;
  388. return ret;
  389. }
  390. static u32 venus_hwversion(struct venus_hfi_device *hdev)
  391. {
  392. struct device *dev = hdev->core->dev;
  393. u32 ver = venus_readl(hdev, WRAPPER_HW_VERSION);
  394. u32 major, minor, step;
  395. major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
  396. major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
  397. minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
  398. minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
  399. step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
  400. dev_dbg(dev, "venus hw version %x.%x.%x\n", major, minor, step);
  401. return major;
  402. }
  403. static int venus_run(struct venus_hfi_device *hdev)
  404. {
  405. struct device *dev = hdev->core->dev;
  406. int ret;
  407. /*
  408. * Re-program all of the registers that get reset as a result of
  409. * regulator_disable() and _enable()
  410. */
  411. venus_set_registers(hdev);
  412. venus_writel(hdev, UC_REGION_ADDR, hdev->ifaceq_table.da);
  413. venus_writel(hdev, UC_REGION_SIZE, SHARED_QSIZE);
  414. venus_writel(hdev, CPU_CS_SCIACMDARG2, hdev->ifaceq_table.da);
  415. venus_writel(hdev, CPU_CS_SCIACMDARG1, 0x01);
  416. if (hdev->sfr.da)
  417. venus_writel(hdev, SFR_ADDR, hdev->sfr.da);
  418. ret = venus_boot_core(hdev);
  419. if (ret) {
  420. dev_err(dev, "failed to reset venus core\n");
  421. return ret;
  422. }
  423. venus_hwversion(hdev);
  424. return 0;
  425. }
  426. static int venus_halt_axi(struct venus_hfi_device *hdev)
  427. {
  428. void __iomem *base = hdev->core->base;
  429. struct device *dev = hdev->core->dev;
  430. u32 val;
  431. int ret;
  432. if (IS_V4(hdev->core)) {
  433. val = venus_readl(hdev, WRAPPER_CPU_AXI_HALT);
  434. val |= WRAPPER_CPU_AXI_HALT_HALT;
  435. venus_writel(hdev, WRAPPER_CPU_AXI_HALT, val);
  436. ret = readl_poll_timeout(base + WRAPPER_CPU_AXI_HALT_STATUS,
  437. val,
  438. val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
  439. POLL_INTERVAL_US,
  440. VBIF_AXI_HALT_ACK_TIMEOUT_US);
  441. if (ret) {
  442. dev_err(dev, "AXI bus port halt timeout\n");
  443. return ret;
  444. }
  445. return 0;
  446. }
  447. /* Halt AXI and AXI IMEM VBIF Access */
  448. val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0);
  449. val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
  450. venus_writel(hdev, VBIF_AXI_HALT_CTRL0, val);
  451. /* Request for AXI bus port halt */
  452. ret = readl_poll_timeout(base + VBIF_AXI_HALT_CTRL1, val,
  453. val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
  454. POLL_INTERVAL_US,
  455. VBIF_AXI_HALT_ACK_TIMEOUT_US);
  456. if (ret) {
  457. dev_err(dev, "AXI bus port halt timeout\n");
  458. return ret;
  459. }
  460. return 0;
  461. }
  462. static int venus_power_off(struct venus_hfi_device *hdev)
  463. {
  464. int ret;
  465. if (!hdev->power_enabled)
  466. return 0;
  467. ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0);
  468. if (ret)
  469. return ret;
  470. ret = venus_halt_axi(hdev);
  471. if (ret)
  472. return ret;
  473. hdev->power_enabled = false;
  474. return 0;
  475. }
  476. static int venus_power_on(struct venus_hfi_device *hdev)
  477. {
  478. int ret;
  479. if (hdev->power_enabled)
  480. return 0;
  481. ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_RESUME, 0);
  482. if (ret)
  483. goto err;
  484. ret = venus_run(hdev);
  485. if (ret)
  486. goto err_suspend;
  487. hdev->power_enabled = true;
  488. return 0;
  489. err_suspend:
  490. qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0);
  491. err:
  492. hdev->power_enabled = false;
  493. return ret;
  494. }
  495. static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
  496. void *pkt)
  497. {
  498. struct iface_queue *queue;
  499. u32 tx_req;
  500. int ret;
  501. if (!venus_is_valid_state(hdev))
  502. return -EINVAL;
  503. queue = &hdev->queues[IFACEQ_MSG_IDX];
  504. ret = venus_read_queue(hdev, queue, pkt, &tx_req);
  505. if (ret)
  506. return ret;
  507. if (tx_req)
  508. venus_soft_int(hdev);
  509. return 0;
  510. }
  511. static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
  512. {
  513. int ret;
  514. mutex_lock(&hdev->lock);
  515. ret = venus_iface_msgq_read_nolock(hdev, pkt);
  516. mutex_unlock(&hdev->lock);
  517. return ret;
  518. }
  519. static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
  520. void *pkt)
  521. {
  522. struct iface_queue *queue;
  523. u32 tx_req;
  524. int ret;
  525. ret = venus_is_valid_state(hdev);
  526. if (!ret)
  527. return -EINVAL;
  528. queue = &hdev->queues[IFACEQ_DBG_IDX];
  529. ret = venus_read_queue(hdev, queue, pkt, &tx_req);
  530. if (ret)
  531. return ret;
  532. if (tx_req)
  533. venus_soft_int(hdev);
  534. return 0;
  535. }
  536. static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
  537. {
  538. int ret;
  539. if (!pkt)
  540. return -EINVAL;
  541. mutex_lock(&hdev->lock);
  542. ret = venus_iface_dbgq_read_nolock(hdev, pkt);
  543. mutex_unlock(&hdev->lock);
  544. return ret;
  545. }
  546. static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
  547. {
  548. qhdr->status = 1;
  549. qhdr->type = IFACEQ_DFLT_QHDR;
  550. qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
  551. qhdr->pkt_size = 0;
  552. qhdr->rx_wm = 1;
  553. qhdr->tx_wm = 1;
  554. qhdr->rx_req = 1;
  555. qhdr->tx_req = 0;
  556. qhdr->rx_irq_status = 0;
  557. qhdr->tx_irq_status = 0;
  558. qhdr->read_idx = 0;
  559. qhdr->write_idx = 0;
  560. }
  561. static void venus_interface_queues_release(struct venus_hfi_device *hdev)
  562. {
  563. mutex_lock(&hdev->lock);
  564. venus_free(hdev, &hdev->ifaceq_table);
  565. venus_free(hdev, &hdev->sfr);
  566. memset(hdev->queues, 0, sizeof(hdev->queues));
  567. memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
  568. memset(&hdev->sfr, 0, sizeof(hdev->sfr));
  569. mutex_unlock(&hdev->lock);
  570. }
  571. static int venus_interface_queues_init(struct venus_hfi_device *hdev)
  572. {
  573. struct hfi_queue_table_header *tbl_hdr;
  574. struct iface_queue *queue;
  575. struct hfi_sfr *sfr;
  576. struct mem_desc desc = {0};
  577. unsigned int offset;
  578. unsigned int i;
  579. int ret;
  580. ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
  581. if (ret)
  582. return ret;
  583. hdev->ifaceq_table = desc;
  584. offset = IFACEQ_TABLE_SIZE;
  585. for (i = 0; i < IFACEQ_NUM; i++) {
  586. queue = &hdev->queues[i];
  587. queue->qmem.da = desc.da + offset;
  588. queue->qmem.kva = desc.kva + offset;
  589. queue->qmem.size = IFACEQ_QUEUE_SIZE;
  590. offset += queue->qmem.size;
  591. queue->qhdr =
  592. IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
  593. venus_set_qhdr_defaults(queue->qhdr);
  594. queue->qhdr->start_addr = queue->qmem.da;
  595. if (i == IFACEQ_CMD_IDX)
  596. queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
  597. else if (i == IFACEQ_MSG_IDX)
  598. queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
  599. else if (i == IFACEQ_DBG_IDX)
  600. queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
  601. }
  602. tbl_hdr = hdev->ifaceq_table.kva;
  603. tbl_hdr->version = 0;
  604. tbl_hdr->size = IFACEQ_TABLE_SIZE;
  605. tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
  606. tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
  607. tbl_hdr->num_q = IFACEQ_NUM;
  608. tbl_hdr->num_active_q = IFACEQ_NUM;
  609. /*
  610. * Set receive request to zero on debug queue as there is no
  611. * need of interrupt from video hardware for debug messages
  612. */
  613. queue = &hdev->queues[IFACEQ_DBG_IDX];
  614. queue->qhdr->rx_req = 0;
  615. ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
  616. if (ret) {
  617. hdev->sfr.da = 0;
  618. } else {
  619. hdev->sfr = desc;
  620. sfr = hdev->sfr.kva;
  621. sfr->buf_size = ALIGNED_SFR_SIZE;
  622. }
  623. /* ensure table and queue header structs are settled in memory */
  624. wmb();
  625. return 0;
  626. }
  627. static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
  628. {
  629. struct hfi_sys_set_property_pkt *pkt;
  630. u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
  631. int ret;
  632. pkt = (struct hfi_sys_set_property_pkt *)packet;
  633. pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
  634. ret = venus_iface_cmdq_write(hdev, pkt);
  635. if (ret)
  636. return ret;
  637. return 0;
  638. }
  639. static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
  640. {
  641. struct hfi_sys_set_property_pkt *pkt;
  642. u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
  643. int ret;
  644. pkt = (struct hfi_sys_set_property_pkt *)packet;
  645. pkt_sys_coverage_config(pkt, mode);
  646. ret = venus_iface_cmdq_write(hdev, pkt);
  647. if (ret)
  648. return ret;
  649. return 0;
  650. }
  651. static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
  652. bool enable)
  653. {
  654. struct hfi_sys_set_property_pkt *pkt;
  655. u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
  656. int ret;
  657. if (!enable)
  658. return 0;
  659. pkt = (struct hfi_sys_set_property_pkt *)packet;
  660. pkt_sys_idle_indicator(pkt, enable);
  661. ret = venus_iface_cmdq_write(hdev, pkt);
  662. if (ret)
  663. return ret;
  664. return 0;
  665. }
  666. static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
  667. bool enable)
  668. {
  669. struct hfi_sys_set_property_pkt *pkt;
  670. u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
  671. int ret;
  672. pkt = (struct hfi_sys_set_property_pkt *)packet;
  673. pkt_sys_power_control(pkt, enable);
  674. ret = venus_iface_cmdq_write(hdev, pkt);
  675. if (ret)
  676. return ret;
  677. return 0;
  678. }
  679. static int venus_get_queue_size(struct venus_hfi_device *hdev,
  680. unsigned int index)
  681. {
  682. struct hfi_queue_header *qhdr;
  683. if (index >= IFACEQ_NUM)
  684. return -EINVAL;
  685. qhdr = hdev->queues[index].qhdr;
  686. if (!qhdr)
  687. return -EINVAL;
  688. return abs(qhdr->read_idx - qhdr->write_idx);
  689. }
  690. static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
  691. {
  692. struct device *dev = hdev->core->dev;
  693. int ret;
  694. ret = venus_sys_set_debug(hdev, venus_fw_debug);
  695. if (ret)
  696. dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
  697. /*
  698. * Idle indicator is disabled by default on some 4xx firmware versions,
  699. * enable it explicitly in order to make suspend functional by checking
  700. * WFI (wait-for-interrupt) bit.
  701. */
  702. if (IS_V4(hdev->core))
  703. venus_sys_idle_indicator = true;
  704. ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
  705. if (ret)
  706. dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
  707. ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
  708. if (ret)
  709. dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
  710. ret);
  711. return ret;
  712. }
  713. static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type)
  714. {
  715. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  716. struct hfi_session_pkt pkt;
  717. pkt_session_cmd(&pkt, pkt_type, inst);
  718. return venus_iface_cmdq_write(hdev, &pkt);
  719. }
  720. static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
  721. {
  722. struct device *dev = hdev->core->dev;
  723. void *packet = hdev->dbg_buf;
  724. while (!venus_iface_dbgq_read(hdev, packet)) {
  725. struct hfi_msg_sys_coverage_pkt *pkt = packet;
  726. if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
  727. struct hfi_msg_sys_debug_pkt *pkt = packet;
  728. dev_dbg(dev, "%s", pkt->msg_data);
  729. }
  730. }
  731. }
  732. static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
  733. bool wait)
  734. {
  735. unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
  736. struct hfi_sys_pc_prep_pkt pkt;
  737. int ret;
  738. init_completion(&hdev->pwr_collapse_prep);
  739. pkt_sys_pc_prep(&pkt);
  740. ret = venus_iface_cmdq_write(hdev, &pkt);
  741. if (ret)
  742. return ret;
  743. if (!wait)
  744. return 0;
  745. ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
  746. if (!ret) {
  747. venus_flush_debug_queue(hdev);
  748. return -ETIMEDOUT;
  749. }
  750. return 0;
  751. }
  752. static int venus_are_queues_empty(struct venus_hfi_device *hdev)
  753. {
  754. int ret1, ret2;
  755. ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
  756. if (ret1 < 0)
  757. return ret1;
  758. ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
  759. if (ret2 < 0)
  760. return ret2;
  761. if (!ret1 && !ret2)
  762. return 1;
  763. return 0;
  764. }
  765. static void venus_sfr_print(struct venus_hfi_device *hdev)
  766. {
  767. struct device *dev = hdev->core->dev;
  768. struct hfi_sfr *sfr = hdev->sfr.kva;
  769. void *p;
  770. if (!sfr)
  771. return;
  772. p = memchr(sfr->data, '\0', sfr->buf_size);
  773. /*
  774. * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
  775. * that Venus is in the process of crashing.
  776. */
  777. if (!p)
  778. sfr->data[sfr->buf_size - 1] = '\0';
  779. dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
  780. }
  781. static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
  782. void *packet)
  783. {
  784. struct hfi_msg_event_notify_pkt *event_pkt = packet;
  785. if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
  786. return;
  787. venus_set_state(hdev, VENUS_STATE_DEINIT);
  788. /*
  789. * Once SYS_ERROR received from HW, it is safe to halt the AXI.
  790. * With SYS_ERROR, Venus FW may have crashed and HW might be
  791. * active and causing unnecessary transactions. Hence it is
  792. * safe to stop all AXI transactions from venus subsystem.
  793. */
  794. venus_halt_axi(hdev);
  795. venus_sfr_print(hdev);
  796. }
  797. static irqreturn_t venus_isr_thread(struct venus_core *core)
  798. {
  799. struct venus_hfi_device *hdev = to_hfi_priv(core);
  800. const struct venus_resources *res;
  801. void *pkt;
  802. u32 msg_ret;
  803. if (!hdev)
  804. return IRQ_NONE;
  805. res = hdev->core->res;
  806. pkt = hdev->pkt_buf;
  807. if (hdev->irq_status & WRAPPER_INTR_STATUS_A2HWD_MASK) {
  808. venus_sfr_print(hdev);
  809. hfi_process_watchdog_timeout(core);
  810. }
  811. while (!venus_iface_msgq_read(hdev, pkt)) {
  812. msg_ret = hfi_process_msg_packet(core, pkt);
  813. switch (msg_ret) {
  814. case HFI_MSG_EVENT_NOTIFY:
  815. venus_process_msg_sys_error(hdev, pkt);
  816. break;
  817. case HFI_MSG_SYS_INIT:
  818. venus_hfi_core_set_resource(core, res->vmem_id,
  819. res->vmem_size,
  820. res->vmem_addr,
  821. hdev);
  822. break;
  823. case HFI_MSG_SYS_RELEASE_RESOURCE:
  824. complete(&hdev->release_resource);
  825. break;
  826. case HFI_MSG_SYS_PC_PREP:
  827. complete(&hdev->pwr_collapse_prep);
  828. break;
  829. default:
  830. break;
  831. }
  832. }
  833. venus_flush_debug_queue(hdev);
  834. return IRQ_HANDLED;
  835. }
  836. static irqreturn_t venus_isr(struct venus_core *core)
  837. {
  838. struct venus_hfi_device *hdev = to_hfi_priv(core);
  839. u32 status;
  840. if (!hdev)
  841. return IRQ_NONE;
  842. status = venus_readl(hdev, WRAPPER_INTR_STATUS);
  843. if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
  844. status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
  845. status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
  846. hdev->irq_status = status;
  847. venus_writel(hdev, CPU_CS_A2HSOFTINTCLR, 1);
  848. venus_writel(hdev, WRAPPER_INTR_CLEAR, status);
  849. return IRQ_WAKE_THREAD;
  850. }
  851. static int venus_core_init(struct venus_core *core)
  852. {
  853. struct venus_hfi_device *hdev = to_hfi_priv(core);
  854. struct device *dev = core->dev;
  855. struct hfi_sys_get_property_pkt version_pkt;
  856. struct hfi_sys_init_pkt pkt;
  857. int ret;
  858. pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
  859. venus_set_state(hdev, VENUS_STATE_INIT);
  860. ret = venus_iface_cmdq_write(hdev, &pkt);
  861. if (ret)
  862. return ret;
  863. pkt_sys_image_version(&version_pkt);
  864. ret = venus_iface_cmdq_write(hdev, &version_pkt);
  865. if (ret)
  866. dev_warn(dev, "failed to send image version pkt to fw\n");
  867. ret = venus_sys_set_default_properties(hdev);
  868. if (ret)
  869. return ret;
  870. return 0;
  871. }
  872. static int venus_core_deinit(struct venus_core *core)
  873. {
  874. struct venus_hfi_device *hdev = to_hfi_priv(core);
  875. venus_set_state(hdev, VENUS_STATE_DEINIT);
  876. hdev->suspended = true;
  877. hdev->power_enabled = false;
  878. return 0;
  879. }
  880. static int venus_core_ping(struct venus_core *core, u32 cookie)
  881. {
  882. struct venus_hfi_device *hdev = to_hfi_priv(core);
  883. struct hfi_sys_ping_pkt pkt;
  884. pkt_sys_ping(&pkt, cookie);
  885. return venus_iface_cmdq_write(hdev, &pkt);
  886. }
  887. static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
  888. {
  889. struct venus_hfi_device *hdev = to_hfi_priv(core);
  890. struct hfi_sys_test_ssr_pkt pkt;
  891. int ret;
  892. ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
  893. if (ret)
  894. return ret;
  895. return venus_iface_cmdq_write(hdev, &pkt);
  896. }
  897. static int venus_session_init(struct venus_inst *inst, u32 session_type,
  898. u32 codec)
  899. {
  900. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  901. struct hfi_session_init_pkt pkt;
  902. int ret;
  903. ret = pkt_session_init(&pkt, inst, session_type, codec);
  904. if (ret)
  905. goto err;
  906. ret = venus_iface_cmdq_write(hdev, &pkt);
  907. if (ret)
  908. goto err;
  909. return 0;
  910. err:
  911. venus_flush_debug_queue(hdev);
  912. return ret;
  913. }
  914. static int venus_session_end(struct venus_inst *inst)
  915. {
  916. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  917. struct device *dev = hdev->core->dev;
  918. if (venus_fw_coverage) {
  919. if (venus_sys_set_coverage(hdev, venus_fw_coverage))
  920. dev_warn(dev, "fw coverage msg ON failed\n");
  921. }
  922. return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END);
  923. }
  924. static int venus_session_abort(struct venus_inst *inst)
  925. {
  926. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  927. venus_flush_debug_queue(hdev);
  928. return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT);
  929. }
  930. static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
  931. {
  932. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  933. struct hfi_session_flush_pkt pkt;
  934. int ret;
  935. ret = pkt_session_flush(&pkt, inst, flush_mode);
  936. if (ret)
  937. return ret;
  938. return venus_iface_cmdq_write(hdev, &pkt);
  939. }
  940. static int venus_session_start(struct venus_inst *inst)
  941. {
  942. return venus_session_cmd(inst, HFI_CMD_SESSION_START);
  943. }
  944. static int venus_session_stop(struct venus_inst *inst)
  945. {
  946. return venus_session_cmd(inst, HFI_CMD_SESSION_STOP);
  947. }
  948. static int venus_session_continue(struct venus_inst *inst)
  949. {
  950. return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE);
  951. }
  952. static int venus_session_etb(struct venus_inst *inst,
  953. struct hfi_frame_data *in_frame)
  954. {
  955. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  956. u32 session_type = inst->session_type;
  957. int ret;
  958. if (session_type == VIDC_SESSION_TYPE_DEC) {
  959. struct hfi_session_empty_buffer_compressed_pkt pkt;
  960. ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
  961. if (ret)
  962. return ret;
  963. ret = venus_iface_cmdq_write(hdev, &pkt);
  964. } else if (session_type == VIDC_SESSION_TYPE_ENC) {
  965. struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
  966. ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
  967. if (ret)
  968. return ret;
  969. ret = venus_iface_cmdq_write(hdev, &pkt);
  970. } else {
  971. ret = -EINVAL;
  972. }
  973. return ret;
  974. }
  975. static int venus_session_ftb(struct venus_inst *inst,
  976. struct hfi_frame_data *out_frame)
  977. {
  978. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  979. struct hfi_session_fill_buffer_pkt pkt;
  980. int ret;
  981. ret = pkt_session_ftb(&pkt, inst, out_frame);
  982. if (ret)
  983. return ret;
  984. return venus_iface_cmdq_write(hdev, &pkt);
  985. }
  986. static int venus_session_set_buffers(struct venus_inst *inst,
  987. struct hfi_buffer_desc *bd)
  988. {
  989. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  990. struct hfi_session_set_buffers_pkt *pkt;
  991. u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
  992. int ret;
  993. if (bd->buffer_type == HFI_BUFFER_INPUT)
  994. return 0;
  995. pkt = (struct hfi_session_set_buffers_pkt *)packet;
  996. ret = pkt_session_set_buffers(pkt, inst, bd);
  997. if (ret)
  998. return ret;
  999. return venus_iface_cmdq_write(hdev, pkt);
  1000. }
  1001. static int venus_session_unset_buffers(struct venus_inst *inst,
  1002. struct hfi_buffer_desc *bd)
  1003. {
  1004. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  1005. struct hfi_session_release_buffer_pkt *pkt;
  1006. u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
  1007. int ret;
  1008. if (bd->buffer_type == HFI_BUFFER_INPUT)
  1009. return 0;
  1010. pkt = (struct hfi_session_release_buffer_pkt *)packet;
  1011. ret = pkt_session_unset_buffers(pkt, inst, bd);
  1012. if (ret)
  1013. return ret;
  1014. return venus_iface_cmdq_write(hdev, pkt);
  1015. }
  1016. static int venus_session_load_res(struct venus_inst *inst)
  1017. {
  1018. return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES);
  1019. }
  1020. static int venus_session_release_res(struct venus_inst *inst)
  1021. {
  1022. return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES);
  1023. }
  1024. static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
  1025. u32 seq_hdr_len)
  1026. {
  1027. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  1028. struct hfi_session_parse_sequence_header_pkt *pkt;
  1029. u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
  1030. int ret;
  1031. pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
  1032. ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
  1033. if (ret)
  1034. return ret;
  1035. ret = venus_iface_cmdq_write(hdev, pkt);
  1036. if (ret)
  1037. return ret;
  1038. return 0;
  1039. }
  1040. static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
  1041. u32 seq_hdr_len)
  1042. {
  1043. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  1044. struct hfi_session_get_sequence_header_pkt *pkt;
  1045. u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
  1046. int ret;
  1047. pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
  1048. ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
  1049. if (ret)
  1050. return ret;
  1051. return venus_iface_cmdq_write(hdev, pkt);
  1052. }
  1053. static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
  1054. void *pdata)
  1055. {
  1056. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  1057. struct hfi_session_set_property_pkt *pkt;
  1058. u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
  1059. int ret;
  1060. pkt = (struct hfi_session_set_property_pkt *)packet;
  1061. ret = pkt_session_set_property(pkt, inst, ptype, pdata);
  1062. if (ret)
  1063. return ret;
  1064. return venus_iface_cmdq_write(hdev, pkt);
  1065. }
  1066. static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
  1067. {
  1068. struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
  1069. struct hfi_session_get_property_pkt pkt;
  1070. int ret;
  1071. ret = pkt_session_get_property(&pkt, inst, ptype);
  1072. if (ret)
  1073. return ret;
  1074. return venus_iface_cmdq_write(hdev, &pkt);
  1075. }
  1076. static int venus_resume(struct venus_core *core)
  1077. {
  1078. struct venus_hfi_device *hdev = to_hfi_priv(core);
  1079. int ret = 0;
  1080. mutex_lock(&hdev->lock);
  1081. if (!hdev->suspended)
  1082. goto unlock;
  1083. ret = venus_power_on(hdev);
  1084. unlock:
  1085. if (!ret)
  1086. hdev->suspended = false;
  1087. mutex_unlock(&hdev->lock);
  1088. return ret;
  1089. }
  1090. static int venus_suspend_1xx(struct venus_core *core)
  1091. {
  1092. struct venus_hfi_device *hdev = to_hfi_priv(core);
  1093. struct device *dev = core->dev;
  1094. u32 ctrl_status;
  1095. int ret;
  1096. if (!hdev->power_enabled || hdev->suspended)
  1097. return 0;
  1098. mutex_lock(&hdev->lock);
  1099. ret = venus_is_valid_state(hdev);
  1100. mutex_unlock(&hdev->lock);
  1101. if (!ret) {
  1102. dev_err(dev, "bad state, cannot suspend\n");
  1103. return -EINVAL;
  1104. }
  1105. ret = venus_prepare_power_collapse(hdev, true);
  1106. if (ret) {
  1107. dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
  1108. return ret;
  1109. }
  1110. mutex_lock(&hdev->lock);
  1111. if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
  1112. mutex_unlock(&hdev->lock);
  1113. return -EINVAL;
  1114. }
  1115. ret = venus_are_queues_empty(hdev);
  1116. if (ret < 0 || !ret) {
  1117. mutex_unlock(&hdev->lock);
  1118. return -EINVAL;
  1119. }
  1120. ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
  1121. if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
  1122. mutex_unlock(&hdev->lock);
  1123. return -EINVAL;
  1124. }
  1125. ret = venus_power_off(hdev);
  1126. if (ret) {
  1127. mutex_unlock(&hdev->lock);
  1128. return ret;
  1129. }
  1130. hdev->suspended = true;
  1131. mutex_unlock(&hdev->lock);
  1132. return 0;
  1133. }
  1134. static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
  1135. {
  1136. u32 ctrl_status, cpu_status;
  1137. cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
  1138. ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
  1139. if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
  1140. ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
  1141. return true;
  1142. return false;
  1143. }
  1144. static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
  1145. {
  1146. u32 ctrl_status, cpu_status;
  1147. cpu_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
  1148. ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
  1149. if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
  1150. ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
  1151. return true;
  1152. return false;
  1153. }
  1154. static int venus_suspend_3xx(struct venus_core *core)
  1155. {
  1156. struct venus_hfi_device *hdev = to_hfi_priv(core);
  1157. struct device *dev = core->dev;
  1158. bool val;
  1159. int ret;
  1160. if (!hdev->power_enabled || hdev->suspended)
  1161. return 0;
  1162. mutex_lock(&hdev->lock);
  1163. ret = venus_is_valid_state(hdev);
  1164. mutex_unlock(&hdev->lock);
  1165. if (!ret) {
  1166. dev_err(dev, "bad state, cannot suspend\n");
  1167. return -EINVAL;
  1168. }
  1169. /*
  1170. * Power collapse sequence for Venus 3xx and 4xx versions:
  1171. * 1. Check for ARM9 and video core to be idle by checking WFI bit
  1172. * (bit 0) in CPU status register and by checking Idle (bit 30) in
  1173. * Control status register for video core.
  1174. * 2. Send a command to prepare for power collapse.
  1175. * 3. Check for WFI and PC_READY bits.
  1176. */
  1177. ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
  1178. 1500, 100 * 1500);
  1179. if (ret)
  1180. return ret;
  1181. ret = venus_prepare_power_collapse(hdev, false);
  1182. if (ret) {
  1183. dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
  1184. return ret;
  1185. }
  1186. ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
  1187. 1500, 100 * 1500);
  1188. if (ret)
  1189. return ret;
  1190. mutex_lock(&hdev->lock);
  1191. ret = venus_power_off(hdev);
  1192. if (ret) {
  1193. dev_err(dev, "venus_power_off (%d)\n", ret);
  1194. mutex_unlock(&hdev->lock);
  1195. return ret;
  1196. }
  1197. hdev->suspended = true;
  1198. mutex_unlock(&hdev->lock);
  1199. return 0;
  1200. }
  1201. static int venus_suspend(struct venus_core *core)
  1202. {
  1203. if (IS_V3(core) || IS_V4(core))
  1204. return venus_suspend_3xx(core);
  1205. return venus_suspend_1xx(core);
  1206. }
  1207. static const struct hfi_ops venus_hfi_ops = {
  1208. .core_init = venus_core_init,
  1209. .core_deinit = venus_core_deinit,
  1210. .core_ping = venus_core_ping,
  1211. .core_trigger_ssr = venus_core_trigger_ssr,
  1212. .session_init = venus_session_init,
  1213. .session_end = venus_session_end,
  1214. .session_abort = venus_session_abort,
  1215. .session_flush = venus_session_flush,
  1216. .session_start = venus_session_start,
  1217. .session_stop = venus_session_stop,
  1218. .session_continue = venus_session_continue,
  1219. .session_etb = venus_session_etb,
  1220. .session_ftb = venus_session_ftb,
  1221. .session_set_buffers = venus_session_set_buffers,
  1222. .session_unset_buffers = venus_session_unset_buffers,
  1223. .session_load_res = venus_session_load_res,
  1224. .session_release_res = venus_session_release_res,
  1225. .session_parse_seq_hdr = venus_session_parse_seq_hdr,
  1226. .session_get_seq_hdr = venus_session_get_seq_hdr,
  1227. .session_set_property = venus_session_set_property,
  1228. .session_get_property = venus_session_get_property,
  1229. .resume = venus_resume,
  1230. .suspend = venus_suspend,
  1231. .isr = venus_isr,
  1232. .isr_thread = venus_isr_thread,
  1233. };
  1234. void venus_hfi_destroy(struct venus_core *core)
  1235. {
  1236. struct venus_hfi_device *hdev = to_hfi_priv(core);
  1237. venus_interface_queues_release(hdev);
  1238. mutex_destroy(&hdev->lock);
  1239. kfree(hdev);
  1240. core->priv = NULL;
  1241. core->ops = NULL;
  1242. }
  1243. int venus_hfi_create(struct venus_core *core)
  1244. {
  1245. struct venus_hfi_device *hdev;
  1246. int ret;
  1247. hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
  1248. if (!hdev)
  1249. return -ENOMEM;
  1250. mutex_init(&hdev->lock);
  1251. hdev->core = core;
  1252. hdev->suspended = true;
  1253. core->priv = hdev;
  1254. core->ops = &venus_hfi_ops;
  1255. core->core_caps = ENC_ROTATION_CAPABILITY | ENC_SCALING_CAPABILITY |
  1256. ENC_DEINTERLACE_CAPABILITY |
  1257. DEC_MULTI_STREAM_CAPABILITY;
  1258. ret = venus_interface_queues_init(hdev);
  1259. if (ret)
  1260. goto err_kfree;
  1261. return 0;
  1262. err_kfree:
  1263. kfree(hdev);
  1264. core->priv = NULL;
  1265. core->ops = NULL;
  1266. return ret;
  1267. }