cx18-mailbox.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /*
  2. * cx18 mailbox functions
  3. *
  4. * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
  5. * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <stdarg.h>
  18. #include "cx18-driver.h"
  19. #include "cx18-io.h"
  20. #include "cx18-scb.h"
  21. #include "cx18-irq.h"
  22. #include "cx18-mailbox.h"
  23. #include "cx18-queue.h"
  24. #include "cx18-streams.h"
  25. #include "cx18-alsa-pcm.h" /* FIXME make configurable */
  26. static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" };
  27. #define API_FAST (1 << 2) /* Short timeout */
  28. #define API_SLOW (1 << 3) /* Additional 300ms timeout */
  29. struct cx18_api_info {
  30. u32 cmd;
  31. u8 flags; /* Flags, see above */
  32. u8 rpu; /* Processing unit */
  33. const char *name; /* The name of the command */
  34. };
  35. #define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
  36. static const struct cx18_api_info api_info[] = {
  37. /* MPEG encoder API */
  38. API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
  39. API_ENTRY(CPU, CX18_EPU_DEBUG, 0),
  40. API_ENTRY(CPU, CX18_CREATE_TASK, 0),
  41. API_ENTRY(CPU, CX18_DESTROY_TASK, 0),
  42. API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW),
  43. API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW),
  44. API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0),
  45. API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0),
  46. API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
  47. API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0),
  48. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0),
  49. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0),
  50. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0),
  51. API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0),
  52. API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0),
  53. API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0),
  54. API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0),
  55. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0),
  56. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0),
  57. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0),
  58. API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0),
  59. API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW),
  60. API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0),
  61. API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0),
  62. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0),
  63. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0),
  64. API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0),
  65. API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0),
  66. API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0),
  67. API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0),
  68. API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0),
  69. API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0),
  70. API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0),
  71. API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0),
  72. API_ENTRY(CPU, CX18_CPU_SET_VFC_PARAM, 0),
  73. API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0),
  74. API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST),
  75. API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW),
  76. API_ENTRY(APU, CX18_APU_START, 0),
  77. API_ENTRY(APU, CX18_APU_STOP, 0),
  78. API_ENTRY(APU, CX18_APU_RESETAI, 0),
  79. API_ENTRY(CPU, CX18_CPU_DEBUG_PEEK32, 0),
  80. API_ENTRY(0, 0, 0),
  81. };
  82. static const struct cx18_api_info *find_api_info(u32 cmd)
  83. {
  84. int i;
  85. for (i = 0; api_info[i].cmd; i++)
  86. if (api_info[i].cmd == cmd)
  87. return &api_info[i];
  88. return NULL;
  89. }
  90. /* Call with buf of n*11+1 bytes */
  91. static char *u32arr2hex(u32 data[], int n, char *buf)
  92. {
  93. char *p;
  94. int i;
  95. for (i = 0, p = buf; i < n; i++, p += 11) {
  96. /* kernel snprintf() appends '\0' always */
  97. snprintf(p, 12, " %#010x", data[i]);
  98. }
  99. *p = '\0';
  100. return buf;
  101. }
  102. static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
  103. {
  104. char argstr[MAX_MB_ARGUMENTS*11+1];
  105. if (!(cx18_debug & CX18_DBGFLG_API))
  106. return;
  107. CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s\n",
  108. name, mb->request, mb->ack, mb->cmd, mb->error,
  109. u32arr2hex(mb->args, MAX_MB_ARGUMENTS, argstr));
  110. }
  111. /*
  112. * Functions that run in a work_queue work handling context
  113. */
  114. static void cx18_mdl_send_to_dvb(struct cx18_stream *s, struct cx18_mdl *mdl)
  115. {
  116. struct cx18_buffer *buf;
  117. if (s->dvb == NULL || !s->dvb->enabled || mdl->bytesused == 0)
  118. return;
  119. /* We ignore mdl and buf readpos accounting here - it doesn't matter */
  120. /* The likely case */
  121. if (list_is_singular(&mdl->buf_list)) {
  122. buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
  123. list);
  124. if (buf->bytesused)
  125. dvb_dmx_swfilter(&s->dvb->demux,
  126. buf->buf, buf->bytesused);
  127. return;
  128. }
  129. list_for_each_entry(buf, &mdl->buf_list, list) {
  130. if (buf->bytesused == 0)
  131. break;
  132. dvb_dmx_swfilter(&s->dvb->demux, buf->buf, buf->bytesused);
  133. }
  134. }
  135. static void cx18_mdl_send_to_videobuf(struct cx18_stream *s,
  136. struct cx18_mdl *mdl)
  137. {
  138. struct cx18_videobuf_buffer *vb_buf;
  139. struct cx18_buffer *buf;
  140. u8 *p;
  141. u32 offset = 0;
  142. int dispatch = 0;
  143. if (mdl->bytesused == 0)
  144. return;
  145. /* Acquire a videobuf buffer, clone to and and release it */
  146. spin_lock(&s->vb_lock);
  147. if (list_empty(&s->vb_capture))
  148. goto out;
  149. vb_buf = list_first_entry(&s->vb_capture, struct cx18_videobuf_buffer,
  150. vb.queue);
  151. p = videobuf_to_vmalloc(&vb_buf->vb);
  152. if (!p)
  153. goto out;
  154. offset = vb_buf->bytes_used;
  155. list_for_each_entry(buf, &mdl->buf_list, list) {
  156. if (buf->bytesused == 0)
  157. break;
  158. if ((offset + buf->bytesused) <= vb_buf->vb.bsize) {
  159. memcpy(p + offset, buf->buf, buf->bytesused);
  160. offset += buf->bytesused;
  161. vb_buf->bytes_used += buf->bytesused;
  162. }
  163. }
  164. /* If we've filled the buffer as per the callers res then dispatch it */
  165. if (vb_buf->bytes_used >= s->vb_bytes_per_frame) {
  166. dispatch = 1;
  167. vb_buf->bytes_used = 0;
  168. }
  169. if (dispatch) {
  170. v4l2_get_timestamp(&vb_buf->vb.ts);
  171. list_del(&vb_buf->vb.queue);
  172. vb_buf->vb.state = VIDEOBUF_DONE;
  173. wake_up(&vb_buf->vb.done);
  174. }
  175. mod_timer(&s->vb_timeout, msecs_to_jiffies(2000) + jiffies);
  176. out:
  177. spin_unlock(&s->vb_lock);
  178. }
  179. static void cx18_mdl_send_to_alsa(struct cx18 *cx, struct cx18_stream *s,
  180. struct cx18_mdl *mdl)
  181. {
  182. struct cx18_buffer *buf;
  183. if (mdl->bytesused == 0)
  184. return;
  185. /* We ignore mdl and buf readpos accounting here - it doesn't matter */
  186. /* The likely case */
  187. if (list_is_singular(&mdl->buf_list)) {
  188. buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
  189. list);
  190. if (buf->bytesused)
  191. cx->pcm_announce_callback(cx->alsa, buf->buf,
  192. buf->bytesused);
  193. return;
  194. }
  195. list_for_each_entry(buf, &mdl->buf_list, list) {
  196. if (buf->bytesused == 0)
  197. break;
  198. cx->pcm_announce_callback(cx->alsa, buf->buf, buf->bytesused);
  199. }
  200. }
  201. static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
  202. {
  203. u32 handle, mdl_ack_count, id;
  204. struct cx18_mailbox *mb;
  205. struct cx18_mdl_ack *mdl_ack;
  206. struct cx18_stream *s;
  207. struct cx18_mdl *mdl;
  208. int i;
  209. mb = &order->mb;
  210. handle = mb->args[0];
  211. s = cx18_handle_to_stream(cx, handle);
  212. if (s == NULL) {
  213. CX18_WARN("Got DMA done notification for unknown/inactive handle %d, %s mailbox seq no %d\n",
  214. handle,
  215. (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
  216. "stale" : "good", mb->request);
  217. return;
  218. }
  219. mdl_ack_count = mb->args[2];
  220. mdl_ack = order->mdl_ack;
  221. for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
  222. id = mdl_ack->id;
  223. /*
  224. * Simple integrity check for processing a stale (and possibly
  225. * inconsistent mailbox): make sure the MDL id is in the
  226. * valid range for the stream.
  227. *
  228. * We go through the trouble of dealing with stale mailboxes
  229. * because most of the time, the mailbox data is still valid and
  230. * unchanged (and in practice the firmware ping-pongs the
  231. * two mdl_ack buffers so mdl_acks are not stale).
  232. *
  233. * There are occasions when we get a half changed mailbox,
  234. * which this check catches for a handle & id mismatch. If the
  235. * handle and id do correspond, the worst case is that we
  236. * completely lost the old MDL, but pick up the new MDL
  237. * early (but the new mdl_ack is guaranteed to be good in this
  238. * case as the firmware wouldn't point us to a new mdl_ack until
  239. * it's filled in).
  240. *
  241. * cx18_queue_get_mdl() will detect the lost MDLs
  242. * and send them back to q_free for fw rotation eventually.
  243. */
  244. if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
  245. !(id >= s->mdl_base_idx &&
  246. id < (s->mdl_base_idx + s->buffers))) {
  247. CX18_WARN("Fell behind! Ignoring stale mailbox with inconsistent data. Lost MDL for mailbox seq no %d\n",
  248. mb->request);
  249. break;
  250. }
  251. mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used);
  252. CX18_DEBUG_HI_DMA("DMA DONE for %s (MDL %d)\n", s->name, id);
  253. if (mdl == NULL) {
  254. CX18_WARN("Could not find MDL %d for stream %s\n",
  255. id, s->name);
  256. continue;
  257. }
  258. CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
  259. s->name, mdl->bytesused);
  260. if (s->type == CX18_ENC_STREAM_TYPE_TS) {
  261. cx18_mdl_send_to_dvb(s, mdl);
  262. cx18_enqueue(s, mdl, &s->q_free);
  263. } else if (s->type == CX18_ENC_STREAM_TYPE_PCM) {
  264. /* Pass the data to cx18-alsa */
  265. if (cx->pcm_announce_callback != NULL) {
  266. cx18_mdl_send_to_alsa(cx, s, mdl);
  267. cx18_enqueue(s, mdl, &s->q_free);
  268. } else {
  269. cx18_enqueue(s, mdl, &s->q_full);
  270. }
  271. } else if (s->type == CX18_ENC_STREAM_TYPE_YUV) {
  272. cx18_mdl_send_to_videobuf(s, mdl);
  273. cx18_enqueue(s, mdl, &s->q_free);
  274. } else {
  275. cx18_enqueue(s, mdl, &s->q_full);
  276. if (s->type == CX18_ENC_STREAM_TYPE_IDX)
  277. cx18_stream_rotate_idx_mdls(cx);
  278. }
  279. }
  280. /* Put as many MDLs as possible back into fw use */
  281. cx18_stream_load_fw_queue(s);
  282. wake_up(&cx->dma_waitq);
  283. if (s->id != -1)
  284. wake_up(&s->waitq);
  285. }
  286. static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order)
  287. {
  288. char *p;
  289. char *str = order->str;
  290. CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
  291. p = strchr(str, '.');
  292. if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
  293. CX18_INFO("FW version: %s\n", p - 1);
  294. }
  295. static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order)
  296. {
  297. switch (order->rpu) {
  298. case CPU:
  299. {
  300. switch (order->mb.cmd) {
  301. case CX18_EPU_DMA_DONE:
  302. epu_dma_done(cx, order);
  303. break;
  304. case CX18_EPU_DEBUG:
  305. epu_debug(cx, order);
  306. break;
  307. default:
  308. CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
  309. order->mb.cmd);
  310. break;
  311. }
  312. break;
  313. }
  314. case APU:
  315. CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
  316. order->mb.cmd);
  317. break;
  318. default:
  319. break;
  320. }
  321. }
  322. static
  323. void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order)
  324. {
  325. atomic_set(&order->pending, 0);
  326. }
  327. void cx18_in_work_handler(struct work_struct *work)
  328. {
  329. struct cx18_in_work_order *order =
  330. container_of(work, struct cx18_in_work_order, work);
  331. struct cx18 *cx = order->cx;
  332. epu_cmd(cx, order);
  333. free_in_work_order(cx, order);
  334. }
  335. /*
  336. * Functions that run in an interrupt handling context
  337. */
  338. static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
  339. {
  340. struct cx18_mailbox __iomem *ack_mb;
  341. u32 ack_irq, req;
  342. switch (order->rpu) {
  343. case APU:
  344. ack_irq = IRQ_EPU_TO_APU_ACK;
  345. ack_mb = &cx->scb->apu2epu_mb;
  346. break;
  347. case CPU:
  348. ack_irq = IRQ_EPU_TO_CPU_ACK;
  349. ack_mb = &cx->scb->cpu2epu_mb;
  350. break;
  351. default:
  352. CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
  353. order->rpu, order->mb.cmd);
  354. return;
  355. }
  356. req = order->mb.request;
  357. /* Don't ack if the RPU has gotten impatient and timed us out */
  358. if (req != cx18_readl(cx, &ack_mb->request) ||
  359. req == cx18_readl(cx, &ack_mb->ack)) {
  360. CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u) while processing\n",
  361. rpu_str[order->rpu], rpu_str[order->rpu], req);
  362. order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
  363. return;
  364. }
  365. cx18_writel(cx, req, &ack_mb->ack);
  366. cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
  367. return;
  368. }
  369. static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
  370. {
  371. u32 handle, mdl_ack_offset, mdl_ack_count;
  372. struct cx18_mailbox *mb;
  373. int i;
  374. mb = &order->mb;
  375. handle = mb->args[0];
  376. mdl_ack_offset = mb->args[1];
  377. mdl_ack_count = mb->args[2];
  378. if (handle == CX18_INVALID_TASK_HANDLE ||
  379. mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
  380. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  381. mb_ack_irq(cx, order);
  382. return -1;
  383. }
  384. for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
  385. ((u32 *)order->mdl_ack)[i / sizeof(u32)] =
  386. cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
  387. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  388. mb_ack_irq(cx, order);
  389. return 1;
  390. }
  391. static
  392. int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order)
  393. {
  394. u32 str_offset;
  395. char *str = order->str;
  396. str[0] = '\0';
  397. str_offset = order->mb.args[1];
  398. if (str_offset) {
  399. cx18_setup_page(cx, str_offset);
  400. cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252);
  401. str[252] = '\0';
  402. cx18_setup_page(cx, SCB_OFFSET);
  403. }
  404. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  405. mb_ack_irq(cx, order);
  406. return str_offset ? 1 : 0;
  407. }
  408. static inline
  409. int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order)
  410. {
  411. int ret = -1;
  412. switch (order->rpu) {
  413. case CPU:
  414. {
  415. switch (order->mb.cmd) {
  416. case CX18_EPU_DMA_DONE:
  417. ret = epu_dma_done_irq(cx, order);
  418. break;
  419. case CX18_EPU_DEBUG:
  420. ret = epu_debug_irq(cx, order);
  421. break;
  422. default:
  423. CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
  424. order->mb.cmd);
  425. break;
  426. }
  427. break;
  428. }
  429. case APU:
  430. CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
  431. order->mb.cmd);
  432. break;
  433. default:
  434. break;
  435. }
  436. return ret;
  437. }
  438. static inline
  439. struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx)
  440. {
  441. int i;
  442. struct cx18_in_work_order *order = NULL;
  443. for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
  444. /*
  445. * We only need "pending" atomic to inspect its contents,
  446. * and need not do a check and set because:
  447. * 1. Any work handler thread only clears "pending" and only
  448. * on one, particular work order at a time, per handler thread.
  449. * 2. "pending" is only set here, and we're serialized because
  450. * we're called in an IRQ handler context.
  451. */
  452. if (atomic_read(&cx->in_work_order[i].pending) == 0) {
  453. order = &cx->in_work_order[i];
  454. atomic_set(&order->pending, 1);
  455. break;
  456. }
  457. }
  458. return order;
  459. }
  460. void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
  461. {
  462. struct cx18_mailbox __iomem *mb;
  463. struct cx18_mailbox *order_mb;
  464. struct cx18_in_work_order *order;
  465. int submit;
  466. int i;
  467. switch (rpu) {
  468. case CPU:
  469. mb = &cx->scb->cpu2epu_mb;
  470. break;
  471. case APU:
  472. mb = &cx->scb->apu2epu_mb;
  473. break;
  474. default:
  475. return;
  476. }
  477. order = alloc_in_work_order_irq(cx);
  478. if (order == NULL) {
  479. CX18_WARN("Unable to find blank work order form to schedule incoming mailbox command processing\n");
  480. return;
  481. }
  482. order->flags = 0;
  483. order->rpu = rpu;
  484. order_mb = &order->mb;
  485. /* mb->cmd and mb->args[0] through mb->args[2] */
  486. for (i = 0; i < 4; i++)
  487. (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
  488. /* mb->request and mb->ack. N.B. we want to read mb->ack last */
  489. for (i = 0; i < 2; i++)
  490. (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
  491. if (order_mb->request == order_mb->ack) {
  492. CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u)\n",
  493. rpu_str[rpu], rpu_str[rpu], order_mb->request);
  494. if (cx18_debug & CX18_DBGFLG_WARN)
  495. dump_mb(cx, order_mb, "incoming");
  496. order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
  497. }
  498. /*
  499. * Individual EPU command processing is responsible for ack-ing
  500. * a non-stale mailbox as soon as possible
  501. */
  502. submit = epu_cmd_irq(cx, order);
  503. if (submit > 0) {
  504. queue_work(cx->in_work_queue, &order->work);
  505. }
  506. }
  507. /*
  508. * Functions called from a non-interrupt, non work_queue context
  509. */
  510. static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
  511. {
  512. const struct cx18_api_info *info = find_api_info(cmd);
  513. u32 irq, req, ack, err;
  514. struct cx18_mailbox __iomem *mb;
  515. wait_queue_head_t *waitq;
  516. struct mutex *mb_lock;
  517. unsigned long int t0, timeout, ret;
  518. int i;
  519. char argstr[MAX_MB_ARGUMENTS*11+1];
  520. DEFINE_WAIT(w);
  521. if (info == NULL) {
  522. CX18_WARN("unknown cmd %x\n", cmd);
  523. return -EINVAL;
  524. }
  525. if (cx18_debug & CX18_DBGFLG_API) { /* only call u32arr2hex if needed */
  526. if (cmd == CX18_CPU_DE_SET_MDL) {
  527. if (cx18_debug & CX18_DBGFLG_HIGHVOL)
  528. CX18_DEBUG_HI_API("%s\tcmd %#010x args%s\n",
  529. info->name, cmd,
  530. u32arr2hex(data, args, argstr));
  531. } else
  532. CX18_DEBUG_API("%s\tcmd %#010x args%s\n",
  533. info->name, cmd,
  534. u32arr2hex(data, args, argstr));
  535. }
  536. switch (info->rpu) {
  537. case APU:
  538. waitq = &cx->mb_apu_waitq;
  539. mb_lock = &cx->epu2apu_mb_lock;
  540. irq = IRQ_EPU_TO_APU;
  541. mb = &cx->scb->epu2apu_mb;
  542. break;
  543. case CPU:
  544. waitq = &cx->mb_cpu_waitq;
  545. mb_lock = &cx->epu2cpu_mb_lock;
  546. irq = IRQ_EPU_TO_CPU;
  547. mb = &cx->scb->epu2cpu_mb;
  548. break;
  549. default:
  550. CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
  551. return -EINVAL;
  552. }
  553. mutex_lock(mb_lock);
  554. /*
  555. * Wait for an in-use mailbox to complete
  556. *
  557. * If the XPU is responding with Ack's, the mailbox shouldn't be in
  558. * a busy state, since we serialize access to it on our end.
  559. *
  560. * If the wait for ack after sending a previous command was interrupted
  561. * by a signal, we may get here and find a busy mailbox. After waiting,
  562. * mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
  563. */
  564. req = cx18_readl(cx, &mb->request);
  565. timeout = msecs_to_jiffies(10);
  566. ret = wait_event_timeout(*waitq,
  567. (ack = cx18_readl(cx, &mb->ack)) == req,
  568. timeout);
  569. if (req != ack) {
  570. /* waited long enough, make the mbox "not busy" from our end */
  571. cx18_writel(cx, req, &mb->ack);
  572. CX18_ERR("mbox was found stuck busy when setting up for %s; clearing busy and trying to proceed\n",
  573. info->name);
  574. } else if (ret != timeout)
  575. CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
  576. jiffies_to_msecs(timeout-ret));
  577. /* Build the outgoing mailbox */
  578. req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1;
  579. cx18_writel(cx, cmd, &mb->cmd);
  580. for (i = 0; i < args; i++)
  581. cx18_writel(cx, data[i], &mb->args[i]);
  582. cx18_writel(cx, 0, &mb->error);
  583. cx18_writel(cx, req, &mb->request);
  584. cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */
  585. /*
  586. * Notify the XPU and wait for it to send an Ack back
  587. */
  588. timeout = msecs_to_jiffies((info->flags & API_FAST) ? 10 : 20);
  589. CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
  590. irq, info->name);
  591. /* So we don't miss the wakeup, prepare to wait before notifying fw */
  592. prepare_to_wait(waitq, &w, TASK_UNINTERRUPTIBLE);
  593. cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
  594. t0 = jiffies;
  595. ack = cx18_readl(cx, &mb->ack);
  596. if (ack != req) {
  597. schedule_timeout(timeout);
  598. ret = jiffies - t0;
  599. ack = cx18_readl(cx, &mb->ack);
  600. } else {
  601. ret = jiffies - t0;
  602. }
  603. finish_wait(waitq, &w);
  604. if (req != ack) {
  605. mutex_unlock(mb_lock);
  606. if (ret >= timeout) {
  607. /* Timed out */
  608. CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU acknowledgment\n",
  609. info->name, jiffies_to_msecs(ret));
  610. } else {
  611. CX18_DEBUG_WARN("woken up before mailbox ack was ready after submitting %s to RPU. only waited %d msecs on req %u but awakened with unmatched ack %u\n",
  612. info->name,
  613. jiffies_to_msecs(ret),
  614. req, ack);
  615. }
  616. return -EINVAL;
  617. }
  618. if (ret >= timeout)
  619. CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment sending %s; timed out waiting %d msecs\n",
  620. info->name, jiffies_to_msecs(ret));
  621. else
  622. CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
  623. jiffies_to_msecs(ret), info->name);
  624. /* Collect data returned by the XPU */
  625. for (i = 0; i < MAX_MB_ARGUMENTS; i++)
  626. data[i] = cx18_readl(cx, &mb->args[i]);
  627. err = cx18_readl(cx, &mb->error);
  628. mutex_unlock(mb_lock);
  629. /*
  630. * Wait for XPU to perform extra actions for the caller in some cases.
  631. * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all MDLs
  632. * back in a burst shortly thereafter
  633. */
  634. if (info->flags & API_SLOW)
  635. cx18_msleep_timeout(300, 0);
  636. if (err)
  637. CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
  638. info->name);
  639. return err ? -EIO : 0;
  640. }
  641. int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
  642. {
  643. return cx18_api_call(cx, cmd, args, data);
  644. }
  645. static int cx18_set_filter_param(struct cx18_stream *s)
  646. {
  647. struct cx18 *cx = s->cx;
  648. u32 mode;
  649. int ret;
  650. mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0);
  651. ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  652. s->handle, 1, mode, cx->spatial_strength);
  653. mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0);
  654. ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  655. s->handle, 0, mode, cx->temporal_strength);
  656. ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  657. s->handle, 2, cx->filter_mode >> 2, 0);
  658. return ret;
  659. }
  660. int cx18_api_func(void *priv, u32 cmd, int in, int out,
  661. u32 data[CX2341X_MBOX_MAX_DATA])
  662. {
  663. struct cx18_stream *s = priv;
  664. struct cx18 *cx = s->cx;
  665. switch (cmd) {
  666. case CX2341X_ENC_SET_OUTPUT_PORT:
  667. return 0;
  668. case CX2341X_ENC_SET_FRAME_RATE:
  669. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6,
  670. s->handle, 0, 0, 0, 0, data[0]);
  671. case CX2341X_ENC_SET_FRAME_SIZE:
  672. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3,
  673. s->handle, data[1], data[0]);
  674. case CX2341X_ENC_SET_STREAM_TYPE:
  675. return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2,
  676. s->handle, data[0]);
  677. case CX2341X_ENC_SET_ASPECT_RATIO:
  678. return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2,
  679. s->handle, data[0]);
  680. case CX2341X_ENC_SET_GOP_PROPERTIES:
  681. return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3,
  682. s->handle, data[0], data[1]);
  683. case CX2341X_ENC_SET_GOP_CLOSURE:
  684. return 0;
  685. case CX2341X_ENC_SET_AUDIO_PROPERTIES:
  686. return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
  687. s->handle, data[0]);
  688. case CX2341X_ENC_MUTE_AUDIO:
  689. return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
  690. s->handle, data[0]);
  691. case CX2341X_ENC_SET_BIT_RATE:
  692. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5,
  693. s->handle, data[0], data[1], data[2], data[3]);
  694. case CX2341X_ENC_MUTE_VIDEO:
  695. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
  696. s->handle, data[0]);
  697. case CX2341X_ENC_SET_FRAME_DROP_RATE:
  698. return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2,
  699. s->handle, data[0]);
  700. case CX2341X_ENC_MISC:
  701. return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4,
  702. s->handle, data[0], data[1], data[2]);
  703. case CX2341X_ENC_SET_DNR_FILTER_MODE:
  704. cx->filter_mode = (data[0] & 3) | (data[1] << 2);
  705. return cx18_set_filter_param(s);
  706. case CX2341X_ENC_SET_DNR_FILTER_PROPS:
  707. cx->spatial_strength = data[0];
  708. cx->temporal_strength = data[1];
  709. return cx18_set_filter_param(s);
  710. case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
  711. return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3,
  712. s->handle, data[0], data[1]);
  713. case CX2341X_ENC_SET_CORING_LEVELS:
  714. return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5,
  715. s->handle, data[0], data[1], data[2], data[3]);
  716. }
  717. CX18_WARN("Unknown cmd %x\n", cmd);
  718. return 0;
  719. }
  720. int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS],
  721. u32 cmd, int args, ...)
  722. {
  723. va_list ap;
  724. int i;
  725. va_start(ap, args);
  726. for (i = 0; i < args; i++)
  727. data[i] = va_arg(ap, u32);
  728. va_end(ap);
  729. return cx18_api(cx, cmd, args, data);
  730. }
  731. int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
  732. {
  733. u32 data[MAX_MB_ARGUMENTS];
  734. va_list ap;
  735. int i;
  736. if (cx == NULL) {
  737. CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
  738. return 0;
  739. }
  740. if (args > MAX_MB_ARGUMENTS) {
  741. CX18_ERR("args too big (cmd=%x)\n", cmd);
  742. args = MAX_MB_ARGUMENTS;
  743. }
  744. va_start(ap, args);
  745. for (i = 0; i < args; i++)
  746. data[i] = va_arg(ap, u32);
  747. va_end(ap);
  748. return cx18_api(cx, cmd, args, data);
  749. }