cx18-mailbox.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859
  1. /*
  2. * cx18 mailbox functions
  3. *
  4. * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
  5. * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  20. * 02111-1307 USA
  21. */
  22. #include <stdarg.h>
  23. #include "cx18-driver.h"
  24. #include "cx18-io.h"
  25. #include "cx18-scb.h"
  26. #include "cx18-irq.h"
  27. #include "cx18-mailbox.h"
  28. #include "cx18-queue.h"
  29. #include "cx18-streams.h"
  30. #include "cx18-alsa-pcm.h" /* FIXME make configurable */
  31. static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" };
  32. #define API_FAST (1 << 2) /* Short timeout */
  33. #define API_SLOW (1 << 3) /* Additional 300ms timeout */
  34. struct cx18_api_info {
  35. u32 cmd;
  36. u8 flags; /* Flags, see above */
  37. u8 rpu; /* Processing unit */
  38. const char *name; /* The name of the command */
  39. };
  40. #define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
  41. static const struct cx18_api_info api_info[] = {
  42. /* MPEG encoder API */
  43. API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
  44. API_ENTRY(CPU, CX18_EPU_DEBUG, 0),
  45. API_ENTRY(CPU, CX18_CREATE_TASK, 0),
  46. API_ENTRY(CPU, CX18_DESTROY_TASK, 0),
  47. API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW),
  48. API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW),
  49. API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0),
  50. API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0),
  51. API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
  52. API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0),
  53. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0),
  54. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0),
  55. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0),
  56. API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0),
  57. API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0),
  58. API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0),
  59. API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0),
  60. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0),
  61. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0),
  62. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0),
  63. API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0),
  64. API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW),
  65. API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0),
  66. API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0),
  67. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0),
  68. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0),
  69. API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0),
  70. API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0),
  71. API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0),
  72. API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0),
  73. API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0),
  74. API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0),
  75. API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0),
  76. API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0),
  77. API_ENTRY(CPU, CX18_CPU_SET_VFC_PARAM, 0),
  78. API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0),
  79. API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST),
  80. API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW),
  81. API_ENTRY(APU, CX18_APU_START, 0),
  82. API_ENTRY(APU, CX18_APU_STOP, 0),
  83. API_ENTRY(APU, CX18_APU_RESETAI, 0),
  84. API_ENTRY(CPU, CX18_CPU_DEBUG_PEEK32, 0),
  85. API_ENTRY(0, 0, 0),
  86. };
  87. static const struct cx18_api_info *find_api_info(u32 cmd)
  88. {
  89. int i;
  90. for (i = 0; api_info[i].cmd; i++)
  91. if (api_info[i].cmd == cmd)
  92. return &api_info[i];
  93. return NULL;
  94. }
  95. /* Call with buf of n*11+1 bytes */
  96. static char *u32arr2hex(u32 data[], int n, char *buf)
  97. {
  98. char *p;
  99. int i;
  100. for (i = 0, p = buf; i < n; i++, p += 11) {
  101. /* kernel snprintf() appends '\0' always */
  102. snprintf(p, 12, " %#010x", data[i]);
  103. }
  104. *p = '\0';
  105. return buf;
  106. }
  107. static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
  108. {
  109. char argstr[MAX_MB_ARGUMENTS*11+1];
  110. if (!(cx18_debug & CX18_DBGFLG_API))
  111. return;
  112. CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s\n",
  113. name, mb->request, mb->ack, mb->cmd, mb->error,
  114. u32arr2hex(mb->args, MAX_MB_ARGUMENTS, argstr));
  115. }
  116. /*
  117. * Functions that run in a work_queue work handling context
  118. */
  119. static void cx18_mdl_send_to_dvb(struct cx18_stream *s, struct cx18_mdl *mdl)
  120. {
  121. struct cx18_buffer *buf;
  122. if (s->dvb == NULL || !s->dvb->enabled || mdl->bytesused == 0)
  123. return;
  124. /* We ignore mdl and buf readpos accounting here - it doesn't matter */
  125. /* The likely case */
  126. if (list_is_singular(&mdl->buf_list)) {
  127. buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
  128. list);
  129. if (buf->bytesused)
  130. dvb_dmx_swfilter(&s->dvb->demux,
  131. buf->buf, buf->bytesused);
  132. return;
  133. }
  134. list_for_each_entry(buf, &mdl->buf_list, list) {
  135. if (buf->bytesused == 0)
  136. break;
  137. dvb_dmx_swfilter(&s->dvb->demux, buf->buf, buf->bytesused);
  138. }
  139. }
  140. static void cx18_mdl_send_to_videobuf(struct cx18_stream *s,
  141. struct cx18_mdl *mdl)
  142. {
  143. struct cx18_videobuf_buffer *vb_buf;
  144. struct cx18_buffer *buf;
  145. u8 *p;
  146. u32 offset = 0;
  147. int dispatch = 0;
  148. if (mdl->bytesused == 0)
  149. return;
  150. /* Acquire a videobuf buffer, clone to and and release it */
  151. spin_lock(&s->vb_lock);
  152. if (list_empty(&s->vb_capture))
  153. goto out;
  154. vb_buf = list_first_entry(&s->vb_capture, struct cx18_videobuf_buffer,
  155. vb.queue);
  156. p = videobuf_to_vmalloc(&vb_buf->vb);
  157. if (!p)
  158. goto out;
  159. offset = vb_buf->bytes_used;
  160. list_for_each_entry(buf, &mdl->buf_list, list) {
  161. if (buf->bytesused == 0)
  162. break;
  163. if ((offset + buf->bytesused) <= vb_buf->vb.bsize) {
  164. memcpy(p + offset, buf->buf, buf->bytesused);
  165. offset += buf->bytesused;
  166. vb_buf->bytes_used += buf->bytesused;
  167. }
  168. }
  169. /* If we've filled the buffer as per the callers res then dispatch it */
  170. if (vb_buf->bytes_used >= s->vb_bytes_per_frame) {
  171. dispatch = 1;
  172. vb_buf->bytes_used = 0;
  173. }
  174. if (dispatch) {
  175. v4l2_get_timestamp(&vb_buf->vb.ts);
  176. list_del(&vb_buf->vb.queue);
  177. vb_buf->vb.state = VIDEOBUF_DONE;
  178. wake_up(&vb_buf->vb.done);
  179. }
  180. mod_timer(&s->vb_timeout, msecs_to_jiffies(2000) + jiffies);
  181. out:
  182. spin_unlock(&s->vb_lock);
  183. }
  184. static void cx18_mdl_send_to_alsa(struct cx18 *cx, struct cx18_stream *s,
  185. struct cx18_mdl *mdl)
  186. {
  187. struct cx18_buffer *buf;
  188. if (mdl->bytesused == 0)
  189. return;
  190. /* We ignore mdl and buf readpos accounting here - it doesn't matter */
  191. /* The likely case */
  192. if (list_is_singular(&mdl->buf_list)) {
  193. buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
  194. list);
  195. if (buf->bytesused)
  196. cx->pcm_announce_callback(cx->alsa, buf->buf,
  197. buf->bytesused);
  198. return;
  199. }
  200. list_for_each_entry(buf, &mdl->buf_list, list) {
  201. if (buf->bytesused == 0)
  202. break;
  203. cx->pcm_announce_callback(cx->alsa, buf->buf, buf->bytesused);
  204. }
  205. }
  206. static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
  207. {
  208. u32 handle, mdl_ack_count, id;
  209. struct cx18_mailbox *mb;
  210. struct cx18_mdl_ack *mdl_ack;
  211. struct cx18_stream *s;
  212. struct cx18_mdl *mdl;
  213. int i;
  214. mb = &order->mb;
  215. handle = mb->args[0];
  216. s = cx18_handle_to_stream(cx, handle);
  217. if (s == NULL) {
  218. CX18_WARN("Got DMA done notification for unknown/inactive handle %d, %s mailbox seq no %d\n",
  219. handle,
  220. (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
  221. "stale" : "good", mb->request);
  222. return;
  223. }
  224. mdl_ack_count = mb->args[2];
  225. mdl_ack = order->mdl_ack;
  226. for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
  227. id = mdl_ack->id;
  228. /*
  229. * Simple integrity check for processing a stale (and possibly
  230. * inconsistent mailbox): make sure the MDL id is in the
  231. * valid range for the stream.
  232. *
  233. * We go through the trouble of dealing with stale mailboxes
  234. * because most of the time, the mailbox data is still valid and
  235. * unchanged (and in practice the firmware ping-pongs the
  236. * two mdl_ack buffers so mdl_acks are not stale).
  237. *
  238. * There are occasions when we get a half changed mailbox,
  239. * which this check catches for a handle & id mismatch. If the
  240. * handle and id do correspond, the worst case is that we
  241. * completely lost the old MDL, but pick up the new MDL
  242. * early (but the new mdl_ack is guaranteed to be good in this
  243. * case as the firmware wouldn't point us to a new mdl_ack until
  244. * it's filled in).
  245. *
  246. * cx18_queue_get_mdl() will detect the lost MDLs
  247. * and send them back to q_free for fw rotation eventually.
  248. */
  249. if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
  250. !(id >= s->mdl_base_idx &&
  251. id < (s->mdl_base_idx + s->buffers))) {
  252. CX18_WARN("Fell behind! Ignoring stale mailbox with inconsistent data. Lost MDL for mailbox seq no %d\n",
  253. mb->request);
  254. break;
  255. }
  256. mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used);
  257. CX18_DEBUG_HI_DMA("DMA DONE for %s (MDL %d)\n", s->name, id);
  258. if (mdl == NULL) {
  259. CX18_WARN("Could not find MDL %d for stream %s\n",
  260. id, s->name);
  261. continue;
  262. }
  263. CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
  264. s->name, mdl->bytesused);
  265. if (s->type == CX18_ENC_STREAM_TYPE_TS) {
  266. cx18_mdl_send_to_dvb(s, mdl);
  267. cx18_enqueue(s, mdl, &s->q_free);
  268. } else if (s->type == CX18_ENC_STREAM_TYPE_PCM) {
  269. /* Pass the data to cx18-alsa */
  270. if (cx->pcm_announce_callback != NULL) {
  271. cx18_mdl_send_to_alsa(cx, s, mdl);
  272. cx18_enqueue(s, mdl, &s->q_free);
  273. } else {
  274. cx18_enqueue(s, mdl, &s->q_full);
  275. }
  276. } else if (s->type == CX18_ENC_STREAM_TYPE_YUV) {
  277. cx18_mdl_send_to_videobuf(s, mdl);
  278. cx18_enqueue(s, mdl, &s->q_free);
  279. } else {
  280. cx18_enqueue(s, mdl, &s->q_full);
  281. if (s->type == CX18_ENC_STREAM_TYPE_IDX)
  282. cx18_stream_rotate_idx_mdls(cx);
  283. }
  284. }
  285. /* Put as many MDLs as possible back into fw use */
  286. cx18_stream_load_fw_queue(s);
  287. wake_up(&cx->dma_waitq);
  288. if (s->id != -1)
  289. wake_up(&s->waitq);
  290. }
  291. static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order)
  292. {
  293. char *p;
  294. char *str = order->str;
  295. CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
  296. p = strchr(str, '.');
  297. if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
  298. CX18_INFO("FW version: %s\n", p - 1);
  299. }
  300. static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order)
  301. {
  302. switch (order->rpu) {
  303. case CPU:
  304. {
  305. switch (order->mb.cmd) {
  306. case CX18_EPU_DMA_DONE:
  307. epu_dma_done(cx, order);
  308. break;
  309. case CX18_EPU_DEBUG:
  310. epu_debug(cx, order);
  311. break;
  312. default:
  313. CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
  314. order->mb.cmd);
  315. break;
  316. }
  317. break;
  318. }
  319. case APU:
  320. CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
  321. order->mb.cmd);
  322. break;
  323. default:
  324. break;
  325. }
  326. }
  327. static
  328. void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order)
  329. {
  330. atomic_set(&order->pending, 0);
  331. }
  332. void cx18_in_work_handler(struct work_struct *work)
  333. {
  334. struct cx18_in_work_order *order =
  335. container_of(work, struct cx18_in_work_order, work);
  336. struct cx18 *cx = order->cx;
  337. epu_cmd(cx, order);
  338. free_in_work_order(cx, order);
  339. }
  340. /*
  341. * Functions that run in an interrupt handling context
  342. */
  343. static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
  344. {
  345. struct cx18_mailbox __iomem *ack_mb;
  346. u32 ack_irq, req;
  347. switch (order->rpu) {
  348. case APU:
  349. ack_irq = IRQ_EPU_TO_APU_ACK;
  350. ack_mb = &cx->scb->apu2epu_mb;
  351. break;
  352. case CPU:
  353. ack_irq = IRQ_EPU_TO_CPU_ACK;
  354. ack_mb = &cx->scb->cpu2epu_mb;
  355. break;
  356. default:
  357. CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
  358. order->rpu, order->mb.cmd);
  359. return;
  360. }
  361. req = order->mb.request;
  362. /* Don't ack if the RPU has gotten impatient and timed us out */
  363. if (req != cx18_readl(cx, &ack_mb->request) ||
  364. req == cx18_readl(cx, &ack_mb->ack)) {
  365. CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u) while processing\n",
  366. rpu_str[order->rpu], rpu_str[order->rpu], req);
  367. order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
  368. return;
  369. }
  370. cx18_writel(cx, req, &ack_mb->ack);
  371. cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
  372. return;
  373. }
  374. static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
  375. {
  376. u32 handle, mdl_ack_offset, mdl_ack_count;
  377. struct cx18_mailbox *mb;
  378. int i;
  379. mb = &order->mb;
  380. handle = mb->args[0];
  381. mdl_ack_offset = mb->args[1];
  382. mdl_ack_count = mb->args[2];
  383. if (handle == CX18_INVALID_TASK_HANDLE ||
  384. mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
  385. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  386. mb_ack_irq(cx, order);
  387. return -1;
  388. }
  389. for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
  390. ((u32 *)order->mdl_ack)[i / sizeof(u32)] =
  391. cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
  392. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  393. mb_ack_irq(cx, order);
  394. return 1;
  395. }
  396. static
  397. int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order)
  398. {
  399. u32 str_offset;
  400. char *str = order->str;
  401. str[0] = '\0';
  402. str_offset = order->mb.args[1];
  403. if (str_offset) {
  404. cx18_setup_page(cx, str_offset);
  405. cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252);
  406. str[252] = '\0';
  407. cx18_setup_page(cx, SCB_OFFSET);
  408. }
  409. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  410. mb_ack_irq(cx, order);
  411. return str_offset ? 1 : 0;
  412. }
  413. static inline
  414. int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order)
  415. {
  416. int ret = -1;
  417. switch (order->rpu) {
  418. case CPU:
  419. {
  420. switch (order->mb.cmd) {
  421. case CX18_EPU_DMA_DONE:
  422. ret = epu_dma_done_irq(cx, order);
  423. break;
  424. case CX18_EPU_DEBUG:
  425. ret = epu_debug_irq(cx, order);
  426. break;
  427. default:
  428. CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
  429. order->mb.cmd);
  430. break;
  431. }
  432. break;
  433. }
  434. case APU:
  435. CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
  436. order->mb.cmd);
  437. break;
  438. default:
  439. break;
  440. }
  441. return ret;
  442. }
  443. static inline
  444. struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx)
  445. {
  446. int i;
  447. struct cx18_in_work_order *order = NULL;
  448. for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
  449. /*
  450. * We only need "pending" atomic to inspect its contents,
  451. * and need not do a check and set because:
  452. * 1. Any work handler thread only clears "pending" and only
  453. * on one, particular work order at a time, per handler thread.
  454. * 2. "pending" is only set here, and we're serialized because
  455. * we're called in an IRQ handler context.
  456. */
  457. if (atomic_read(&cx->in_work_order[i].pending) == 0) {
  458. order = &cx->in_work_order[i];
  459. atomic_set(&order->pending, 1);
  460. break;
  461. }
  462. }
  463. return order;
  464. }
  465. void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
  466. {
  467. struct cx18_mailbox __iomem *mb;
  468. struct cx18_mailbox *order_mb;
  469. struct cx18_in_work_order *order;
  470. int submit;
  471. int i;
  472. switch (rpu) {
  473. case CPU:
  474. mb = &cx->scb->cpu2epu_mb;
  475. break;
  476. case APU:
  477. mb = &cx->scb->apu2epu_mb;
  478. break;
  479. default:
  480. return;
  481. }
  482. order = alloc_in_work_order_irq(cx);
  483. if (order == NULL) {
  484. CX18_WARN("Unable to find blank work order form to schedule incoming mailbox command processing\n");
  485. return;
  486. }
  487. order->flags = 0;
  488. order->rpu = rpu;
  489. order_mb = &order->mb;
  490. /* mb->cmd and mb->args[0] through mb->args[2] */
  491. for (i = 0; i < 4; i++)
  492. (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
  493. /* mb->request and mb->ack. N.B. we want to read mb->ack last */
  494. for (i = 0; i < 2; i++)
  495. (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
  496. if (order_mb->request == order_mb->ack) {
  497. CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u)\n",
  498. rpu_str[rpu], rpu_str[rpu], order_mb->request);
  499. if (cx18_debug & CX18_DBGFLG_WARN)
  500. dump_mb(cx, order_mb, "incoming");
  501. order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
  502. }
  503. /*
  504. * Individual EPU command processing is responsible for ack-ing
  505. * a non-stale mailbox as soon as possible
  506. */
  507. submit = epu_cmd_irq(cx, order);
  508. if (submit > 0) {
  509. queue_work(cx->in_work_queue, &order->work);
  510. }
  511. }
  512. /*
  513. * Functions called from a non-interrupt, non work_queue context
  514. */
  515. static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
  516. {
  517. const struct cx18_api_info *info = find_api_info(cmd);
  518. u32 irq, req, ack, err;
  519. struct cx18_mailbox __iomem *mb;
  520. wait_queue_head_t *waitq;
  521. struct mutex *mb_lock;
  522. unsigned long int t0, timeout, ret;
  523. int i;
  524. char argstr[MAX_MB_ARGUMENTS*11+1];
  525. DEFINE_WAIT(w);
  526. if (info == NULL) {
  527. CX18_WARN("unknown cmd %x\n", cmd);
  528. return -EINVAL;
  529. }
  530. if (cx18_debug & CX18_DBGFLG_API) { /* only call u32arr2hex if needed */
  531. if (cmd == CX18_CPU_DE_SET_MDL) {
  532. if (cx18_debug & CX18_DBGFLG_HIGHVOL)
  533. CX18_DEBUG_HI_API("%s\tcmd %#010x args%s\n",
  534. info->name, cmd,
  535. u32arr2hex(data, args, argstr));
  536. } else
  537. CX18_DEBUG_API("%s\tcmd %#010x args%s\n",
  538. info->name, cmd,
  539. u32arr2hex(data, args, argstr));
  540. }
  541. switch (info->rpu) {
  542. case APU:
  543. waitq = &cx->mb_apu_waitq;
  544. mb_lock = &cx->epu2apu_mb_lock;
  545. irq = IRQ_EPU_TO_APU;
  546. mb = &cx->scb->epu2apu_mb;
  547. break;
  548. case CPU:
  549. waitq = &cx->mb_cpu_waitq;
  550. mb_lock = &cx->epu2cpu_mb_lock;
  551. irq = IRQ_EPU_TO_CPU;
  552. mb = &cx->scb->epu2cpu_mb;
  553. break;
  554. default:
  555. CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
  556. return -EINVAL;
  557. }
  558. mutex_lock(mb_lock);
  559. /*
  560. * Wait for an in-use mailbox to complete
  561. *
  562. * If the XPU is responding with Ack's, the mailbox shouldn't be in
  563. * a busy state, since we serialize access to it on our end.
  564. *
  565. * If the wait for ack after sending a previous command was interrupted
  566. * by a signal, we may get here and find a busy mailbox. After waiting,
  567. * mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
  568. */
  569. req = cx18_readl(cx, &mb->request);
  570. timeout = msecs_to_jiffies(10);
  571. ret = wait_event_timeout(*waitq,
  572. (ack = cx18_readl(cx, &mb->ack)) == req,
  573. timeout);
  574. if (req != ack) {
  575. /* waited long enough, make the mbox "not busy" from our end */
  576. cx18_writel(cx, req, &mb->ack);
  577. CX18_ERR("mbox was found stuck busy when setting up for %s; clearing busy and trying to proceed\n",
  578. info->name);
  579. } else if (ret != timeout)
  580. CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
  581. jiffies_to_msecs(timeout-ret));
  582. /* Build the outgoing mailbox */
  583. req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1;
  584. cx18_writel(cx, cmd, &mb->cmd);
  585. for (i = 0; i < args; i++)
  586. cx18_writel(cx, data[i], &mb->args[i]);
  587. cx18_writel(cx, 0, &mb->error);
  588. cx18_writel(cx, req, &mb->request);
  589. cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */
  590. /*
  591. * Notify the XPU and wait for it to send an Ack back
  592. */
  593. timeout = msecs_to_jiffies((info->flags & API_FAST) ? 10 : 20);
  594. CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
  595. irq, info->name);
  596. /* So we don't miss the wakeup, prepare to wait before notifying fw */
  597. prepare_to_wait(waitq, &w, TASK_UNINTERRUPTIBLE);
  598. cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
  599. t0 = jiffies;
  600. ack = cx18_readl(cx, &mb->ack);
  601. if (ack != req) {
  602. schedule_timeout(timeout);
  603. ret = jiffies - t0;
  604. ack = cx18_readl(cx, &mb->ack);
  605. } else {
  606. ret = jiffies - t0;
  607. }
  608. finish_wait(waitq, &w);
  609. if (req != ack) {
  610. mutex_unlock(mb_lock);
  611. if (ret >= timeout) {
  612. /* Timed out */
  613. CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU acknowledgment\n",
  614. info->name, jiffies_to_msecs(ret));
  615. } else {
  616. CX18_DEBUG_WARN("woken up before mailbox ack was ready after submitting %s to RPU. only waited %d msecs on req %u but awakened with unmatched ack %u\n",
  617. info->name,
  618. jiffies_to_msecs(ret),
  619. req, ack);
  620. }
  621. return -EINVAL;
  622. }
  623. if (ret >= timeout)
  624. CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment sending %s; timed out waiting %d msecs\n",
  625. info->name, jiffies_to_msecs(ret));
  626. else
  627. CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
  628. jiffies_to_msecs(ret), info->name);
  629. /* Collect data returned by the XPU */
  630. for (i = 0; i < MAX_MB_ARGUMENTS; i++)
  631. data[i] = cx18_readl(cx, &mb->args[i]);
  632. err = cx18_readl(cx, &mb->error);
  633. mutex_unlock(mb_lock);
  634. /*
  635. * Wait for XPU to perform extra actions for the caller in some cases.
  636. * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all MDLs
  637. * back in a burst shortly thereafter
  638. */
  639. if (info->flags & API_SLOW)
  640. cx18_msleep_timeout(300, 0);
  641. if (err)
  642. CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
  643. info->name);
  644. return err ? -EIO : 0;
  645. }
  646. int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
  647. {
  648. return cx18_api_call(cx, cmd, args, data);
  649. }
  650. static int cx18_set_filter_param(struct cx18_stream *s)
  651. {
  652. struct cx18 *cx = s->cx;
  653. u32 mode;
  654. int ret;
  655. mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0);
  656. ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  657. s->handle, 1, mode, cx->spatial_strength);
  658. mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0);
  659. ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  660. s->handle, 0, mode, cx->temporal_strength);
  661. ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  662. s->handle, 2, cx->filter_mode >> 2, 0);
  663. return ret;
  664. }
  665. int cx18_api_func(void *priv, u32 cmd, int in, int out,
  666. u32 data[CX2341X_MBOX_MAX_DATA])
  667. {
  668. struct cx18_stream *s = priv;
  669. struct cx18 *cx = s->cx;
  670. switch (cmd) {
  671. case CX2341X_ENC_SET_OUTPUT_PORT:
  672. return 0;
  673. case CX2341X_ENC_SET_FRAME_RATE:
  674. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6,
  675. s->handle, 0, 0, 0, 0, data[0]);
  676. case CX2341X_ENC_SET_FRAME_SIZE:
  677. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3,
  678. s->handle, data[1], data[0]);
  679. case CX2341X_ENC_SET_STREAM_TYPE:
  680. return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2,
  681. s->handle, data[0]);
  682. case CX2341X_ENC_SET_ASPECT_RATIO:
  683. return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2,
  684. s->handle, data[0]);
  685. case CX2341X_ENC_SET_GOP_PROPERTIES:
  686. return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3,
  687. s->handle, data[0], data[1]);
  688. case CX2341X_ENC_SET_GOP_CLOSURE:
  689. return 0;
  690. case CX2341X_ENC_SET_AUDIO_PROPERTIES:
  691. return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
  692. s->handle, data[0]);
  693. case CX2341X_ENC_MUTE_AUDIO:
  694. return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
  695. s->handle, data[0]);
  696. case CX2341X_ENC_SET_BIT_RATE:
  697. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5,
  698. s->handle, data[0], data[1], data[2], data[3]);
  699. case CX2341X_ENC_MUTE_VIDEO:
  700. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
  701. s->handle, data[0]);
  702. case CX2341X_ENC_SET_FRAME_DROP_RATE:
  703. return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2,
  704. s->handle, data[0]);
  705. case CX2341X_ENC_MISC:
  706. return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4,
  707. s->handle, data[0], data[1], data[2]);
  708. case CX2341X_ENC_SET_DNR_FILTER_MODE:
  709. cx->filter_mode = (data[0] & 3) | (data[1] << 2);
  710. return cx18_set_filter_param(s);
  711. case CX2341X_ENC_SET_DNR_FILTER_PROPS:
  712. cx->spatial_strength = data[0];
  713. cx->temporal_strength = data[1];
  714. return cx18_set_filter_param(s);
  715. case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
  716. return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3,
  717. s->handle, data[0], data[1]);
  718. case CX2341X_ENC_SET_CORING_LEVELS:
  719. return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5,
  720. s->handle, data[0], data[1], data[2], data[3]);
  721. }
  722. CX18_WARN("Unknown cmd %x\n", cmd);
  723. return 0;
  724. }
  725. int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS],
  726. u32 cmd, int args, ...)
  727. {
  728. va_list ap;
  729. int i;
  730. va_start(ap, args);
  731. for (i = 0; i < args; i++)
  732. data[i] = va_arg(ap, u32);
  733. va_end(ap);
  734. return cx18_api(cx, cmd, args, data);
  735. }
  736. int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
  737. {
  738. u32 data[MAX_MB_ARGUMENTS];
  739. va_list ap;
  740. int i;
  741. if (cx == NULL) {
  742. CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
  743. return 0;
  744. }
  745. if (args > MAX_MB_ARGUMENTS) {
  746. CX18_ERR("args too big (cmd=%x)\n", cmd);
  747. args = MAX_MB_ARGUMENTS;
  748. }
  749. va_start(ap, args);
  750. for (i = 0; i < args; i++)
  751. data[i] = va_arg(ap, u32);
  752. va_end(ap);
  753. return cx18_api(cx, cmd, args, data);
  754. }