request_manager.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. **********************************************************************/
  19. #include <linux/pci.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/vmalloc.h>
  22. #include "liquidio_common.h"
  23. #include "octeon_droq.h"
  24. #include "octeon_iq.h"
  25. #include "response_manager.h"
  26. #include "octeon_device.h"
  27. #include "octeon_main.h"
  28. #include "octeon_network.h"
  29. #include "cn66xx_device.h"
  30. #include "cn23xx_pf_device.h"
  31. #include "cn23xx_vf_device.h"
  32. struct iq_post_status {
  33. int status;
  34. int index;
  35. };
  36. static void check_db_timeout(struct work_struct *work);
  37. static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
  38. static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
  39. static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
  40. {
  41. struct octeon_instr_queue *iq =
  42. (struct octeon_instr_queue *)oct->instr_queue[iq_no];
  43. return iq->iqcmd_64B;
  44. }
  45. #define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
  46. /* Define this to return the request status comaptible to old code */
  47. /*#define OCTEON_USE_OLD_REQ_STATUS*/
  48. /* Return 0 on success, 1 on failure */
  49. int octeon_init_instr_queue(struct octeon_device *oct,
  50. union oct_txpciq txpciq,
  51. u32 num_descs)
  52. {
  53. struct octeon_instr_queue *iq;
  54. struct octeon_iq_config *conf = NULL;
  55. u32 iq_no = (u32)txpciq.s.q_no;
  56. u32 q_size;
  57. struct cavium_wq *db_wq;
  58. int numa_node = dev_to_node(&oct->pci_dev->dev);
  59. if (OCTEON_CN6XXX(oct))
  60. conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
  61. else if (OCTEON_CN23XX_PF(oct))
  62. conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
  63. else if (OCTEON_CN23XX_VF(oct))
  64. conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
  65. if (!conf) {
  66. dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
  67. oct->chip_id);
  68. return 1;
  69. }
  70. q_size = (u32)conf->instr_type * num_descs;
  71. iq = oct->instr_queue[iq_no];
  72. iq->oct_dev = oct;
  73. iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
  74. if (!iq->base_addr) {
  75. dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
  76. iq_no);
  77. return 1;
  78. }
  79. iq->max_count = num_descs;
  80. /* Initialize a list to holds requests that have been posted to Octeon
  81. * but has yet to be fetched by octeon
  82. */
  83. iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
  84. numa_node);
  85. if (!iq->request_list)
  86. iq->request_list = vmalloc(sizeof(*iq->request_list) *
  87. num_descs);
  88. if (!iq->request_list) {
  89. lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
  90. dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
  91. iq_no);
  92. return 1;
  93. }
  94. memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
  95. dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
  96. iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
  97. iq->txpciq.u64 = txpciq.u64;
  98. iq->fill_threshold = (u32)conf->db_min;
  99. iq->fill_cnt = 0;
  100. iq->host_write_index = 0;
  101. iq->octeon_read_index = 0;
  102. iq->flush_index = 0;
  103. iq->last_db_time = 0;
  104. iq->do_auto_flush = 1;
  105. iq->db_timeout = (u32)conf->db_timeout;
  106. atomic_set(&iq->instr_pending, 0);
  107. /* Initialize the spinlock for this instruction queue */
  108. spin_lock_init(&iq->lock);
  109. spin_lock_init(&iq->post_lock);
  110. spin_lock_init(&iq->iq_flush_running_lock);
  111. oct->io_qmask.iq |= BIT_ULL(iq_no);
  112. /* Set the 32B/64B mode for each input queue */
  113. oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
  114. iq->iqcmd_64B = (conf->instr_type == 64);
  115. oct->fn_list.setup_iq_regs(oct, iq_no);
  116. oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
  117. WQ_MEM_RECLAIM,
  118. 0);
  119. if (!oct->check_db_wq[iq_no].wq) {
  120. vfree(iq->request_list);
  121. iq->request_list = NULL;
  122. lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
  123. dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
  124. iq_no);
  125. return 1;
  126. }
  127. db_wq = &oct->check_db_wq[iq_no];
  128. INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
  129. db_wq->wk.ctxptr = oct;
  130. db_wq->wk.ctxul = iq_no;
  131. queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
  132. return 0;
  133. }
  134. int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
  135. {
  136. u64 desc_size = 0, q_size;
  137. struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
  138. cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
  139. destroy_workqueue(oct->check_db_wq[iq_no].wq);
  140. if (OCTEON_CN6XXX(oct))
  141. desc_size =
  142. CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
  143. else if (OCTEON_CN23XX_PF(oct))
  144. desc_size =
  145. CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
  146. else if (OCTEON_CN23XX_VF(oct))
  147. desc_size =
  148. CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
  149. vfree(iq->request_list);
  150. if (iq->base_addr) {
  151. q_size = iq->max_count * desc_size;
  152. lio_dma_free(oct, (u32)q_size, iq->base_addr,
  153. iq->base_addr_dma);
  154. oct->io_qmask.iq &= ~(1ULL << iq_no);
  155. vfree(oct->instr_queue[iq_no]);
  156. oct->instr_queue[iq_no] = NULL;
  157. oct->num_iqs--;
  158. return 0;
  159. }
  160. return 1;
  161. }
  162. /* Return 0 on success, 1 on failure */
  163. int octeon_setup_iq(struct octeon_device *oct,
  164. int ifidx,
  165. int q_index,
  166. union oct_txpciq txpciq,
  167. u32 num_descs,
  168. void *app_ctx)
  169. {
  170. u32 iq_no = (u32)txpciq.s.q_no;
  171. int numa_node = dev_to_node(&oct->pci_dev->dev);
  172. if (oct->instr_queue[iq_no]) {
  173. dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
  174. iq_no);
  175. oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
  176. oct->instr_queue[iq_no]->app_ctx = app_ctx;
  177. return 0;
  178. }
  179. oct->instr_queue[iq_no] =
  180. vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
  181. if (!oct->instr_queue[iq_no])
  182. oct->instr_queue[iq_no] =
  183. vmalloc(sizeof(struct octeon_instr_queue));
  184. if (!oct->instr_queue[iq_no])
  185. return 1;
  186. memset(oct->instr_queue[iq_no], 0,
  187. sizeof(struct octeon_instr_queue));
  188. oct->instr_queue[iq_no]->q_index = q_index;
  189. oct->instr_queue[iq_no]->app_ctx = app_ctx;
  190. oct->instr_queue[iq_no]->ifidx = ifidx;
  191. if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
  192. vfree(oct->instr_queue[iq_no]);
  193. oct->instr_queue[iq_no] = NULL;
  194. return 1;
  195. }
  196. oct->num_iqs++;
  197. if (oct->fn_list.enable_io_queues(oct))
  198. return 1;
  199. return 0;
  200. }
  201. int lio_wait_for_instr_fetch(struct octeon_device *oct)
  202. {
  203. int i, retry = 1000, pending, instr_cnt = 0;
  204. do {
  205. instr_cnt = 0;
  206. for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
  207. if (!(oct->io_qmask.iq & BIT_ULL(i)))
  208. continue;
  209. pending =
  210. atomic_read(&oct->instr_queue[i]->instr_pending);
  211. if (pending)
  212. __check_db_timeout(oct, i);
  213. instr_cnt += pending;
  214. }
  215. if (instr_cnt == 0)
  216. break;
  217. schedule_timeout_uninterruptible(1);
  218. } while (retry-- && instr_cnt);
  219. return instr_cnt;
  220. }
  221. static inline void
  222. ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
  223. {
  224. if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
  225. writel(iq->fill_cnt, iq->doorbell_reg);
  226. /* make sure doorbell write goes through */
  227. mmiowb();
  228. iq->fill_cnt = 0;
  229. iq->last_db_time = jiffies;
  230. return;
  231. }
  232. }
  233. static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
  234. u8 *cmd)
  235. {
  236. u8 *iqptr, cmdsize;
  237. cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
  238. iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
  239. memcpy(iqptr, cmd, cmdsize);
  240. }
  241. static inline struct iq_post_status
  242. __post_command2(struct octeon_instr_queue *iq, u8 *cmd)
  243. {
  244. struct iq_post_status st;
  245. st.status = IQ_SEND_OK;
  246. /* This ensures that the read index does not wrap around to the same
  247. * position if queue gets full before Octeon could fetch any instr.
  248. */
  249. if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
  250. st.status = IQ_SEND_FAILED;
  251. st.index = -1;
  252. return st;
  253. }
  254. if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
  255. st.status = IQ_SEND_STOP;
  256. __copy_cmd_into_iq(iq, cmd);
  257. /* "index" is returned, host_write_index is modified. */
  258. st.index = iq->host_write_index;
  259. iq->host_write_index = incr_index(iq->host_write_index, 1,
  260. iq->max_count);
  261. iq->fill_cnt++;
  262. /* Flush the command into memory. We need to be sure the data is in
  263. * memory before indicating that the instruction is pending.
  264. */
  265. wmb();
  266. atomic_inc(&iq->instr_pending);
  267. return st;
  268. }
  269. int
  270. octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
  271. void (*fn)(void *))
  272. {
  273. if (reqtype > REQTYPE_LAST) {
  274. dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
  275. __func__, reqtype);
  276. return -EINVAL;
  277. }
  278. reqtype_free_fn[oct->octeon_id][reqtype] = fn;
  279. return 0;
  280. }
  281. static inline void
  282. __add_to_request_list(struct octeon_instr_queue *iq,
  283. int idx, void *buf, int reqtype)
  284. {
  285. iq->request_list[idx].buf = buf;
  286. iq->request_list[idx].reqtype = reqtype;
  287. }
  288. /* Can only run in process context */
  289. int
  290. lio_process_iq_request_list(struct octeon_device *oct,
  291. struct octeon_instr_queue *iq, u32 napi_budget)
  292. {
  293. int reqtype;
  294. void *buf;
  295. u32 old = iq->flush_index;
  296. u32 inst_count = 0;
  297. unsigned int pkts_compl = 0, bytes_compl = 0;
  298. struct octeon_soft_command *sc;
  299. struct octeon_instr_irh *irh;
  300. unsigned long flags;
  301. while (old != iq->octeon_read_index) {
  302. reqtype = iq->request_list[old].reqtype;
  303. buf = iq->request_list[old].buf;
  304. if (reqtype == REQTYPE_NONE)
  305. goto skip_this;
  306. octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
  307. &bytes_compl);
  308. switch (reqtype) {
  309. case REQTYPE_NORESP_NET:
  310. case REQTYPE_NORESP_NET_SG:
  311. case REQTYPE_RESP_NET_SG:
  312. reqtype_free_fn[oct->octeon_id][reqtype](buf);
  313. break;
  314. case REQTYPE_RESP_NET:
  315. case REQTYPE_SOFT_COMMAND:
  316. sc = buf;
  317. if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
  318. irh = (struct octeon_instr_irh *)
  319. &sc->cmd.cmd3.irh;
  320. else
  321. irh = (struct octeon_instr_irh *)
  322. &sc->cmd.cmd2.irh;
  323. if (irh->rflag) {
  324. /* We're expecting a response from Octeon.
  325. * It's up to lio_process_ordered_list() to
  326. * process sc. Add sc to the ordered soft
  327. * command response list because we expect
  328. * a response from Octeon.
  329. */
  330. spin_lock_irqsave
  331. (&oct->response_list
  332. [OCTEON_ORDERED_SC_LIST].lock,
  333. flags);
  334. atomic_inc(&oct->response_list
  335. [OCTEON_ORDERED_SC_LIST].
  336. pending_req_count);
  337. list_add_tail(&sc->node, &oct->response_list
  338. [OCTEON_ORDERED_SC_LIST].head);
  339. spin_unlock_irqrestore
  340. (&oct->response_list
  341. [OCTEON_ORDERED_SC_LIST].lock,
  342. flags);
  343. } else {
  344. if (sc->callback) {
  345. /* This callback must not sleep */
  346. sc->callback(oct, OCTEON_REQUEST_DONE,
  347. sc->callback_arg);
  348. }
  349. }
  350. break;
  351. default:
  352. dev_err(&oct->pci_dev->dev,
  353. "%s Unknown reqtype: %d buf: %p at idx %d\n",
  354. __func__, reqtype, buf, old);
  355. }
  356. iq->request_list[old].buf = NULL;
  357. iq->request_list[old].reqtype = 0;
  358. skip_this:
  359. inst_count++;
  360. old = incr_index(old, 1, iq->max_count);
  361. if ((napi_budget) && (inst_count >= napi_budget))
  362. break;
  363. }
  364. if (bytes_compl)
  365. octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
  366. bytes_compl);
  367. iq->flush_index = old;
  368. return inst_count;
  369. }
  370. /* Can only be called from process context */
  371. int
  372. octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
  373. u32 napi_budget)
  374. {
  375. u32 inst_processed = 0;
  376. u32 tot_inst_processed = 0;
  377. int tx_done = 1;
  378. if (!spin_trylock(&iq->iq_flush_running_lock))
  379. return tx_done;
  380. spin_lock_bh(&iq->lock);
  381. iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
  382. do {
  383. /* Process any outstanding IQ packets. */
  384. if (iq->flush_index == iq->octeon_read_index)
  385. break;
  386. if (napi_budget)
  387. inst_processed =
  388. lio_process_iq_request_list(oct, iq,
  389. napi_budget -
  390. tot_inst_processed);
  391. else
  392. inst_processed =
  393. lio_process_iq_request_list(oct, iq, 0);
  394. if (inst_processed) {
  395. atomic_sub(inst_processed, &iq->instr_pending);
  396. iq->stats.instr_processed += inst_processed;
  397. }
  398. tot_inst_processed += inst_processed;
  399. inst_processed = 0;
  400. } while (tot_inst_processed < napi_budget);
  401. if (napi_budget && (tot_inst_processed >= napi_budget))
  402. tx_done = 0;
  403. iq->last_db_time = jiffies;
  404. spin_unlock_bh(&iq->lock);
  405. spin_unlock(&iq->iq_flush_running_lock);
  406. return tx_done;
  407. }
  408. /* Process instruction queue after timeout.
  409. * This routine gets called from a workqueue or when removing the module.
  410. */
  411. static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
  412. {
  413. struct octeon_instr_queue *iq;
  414. u64 next_time;
  415. if (!oct)
  416. return;
  417. iq = oct->instr_queue[iq_no];
  418. if (!iq)
  419. return;
  420. /* return immediately, if no work pending */
  421. if (!atomic_read(&iq->instr_pending))
  422. return;
  423. /* If jiffies - last_db_time < db_timeout do nothing */
  424. next_time = iq->last_db_time + iq->db_timeout;
  425. if (!time_after(jiffies, (unsigned long)next_time))
  426. return;
  427. iq->last_db_time = jiffies;
  428. /* Flush the instruction queue */
  429. octeon_flush_iq(oct, iq, 0);
  430. lio_enable_irq(NULL, iq);
  431. }
  432. /* Called by the Poll thread at regular intervals to check the instruction
  433. * queue for commands to be posted and for commands that were fetched by Octeon.
  434. */
  435. static void check_db_timeout(struct work_struct *work)
  436. {
  437. struct cavium_wk *wk = (struct cavium_wk *)work;
  438. struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
  439. u64 iq_no = wk->ctxul;
  440. struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
  441. u32 delay = 10;
  442. __check_db_timeout(oct, iq_no);
  443. queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
  444. }
  445. int
  446. octeon_send_command(struct octeon_device *oct, u32 iq_no,
  447. u32 force_db, void *cmd, void *buf,
  448. u32 datasize, u32 reqtype)
  449. {
  450. struct iq_post_status st;
  451. struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
  452. /* Get the lock and prevent other tasks and tx interrupt handler from
  453. * running.
  454. */
  455. spin_lock_bh(&iq->post_lock);
  456. st = __post_command2(iq, cmd);
  457. if (st.status != IQ_SEND_FAILED) {
  458. octeon_report_sent_bytes_to_bql(buf, reqtype);
  459. __add_to_request_list(iq, st.index, buf, reqtype);
  460. INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
  461. INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
  462. if (force_db)
  463. ring_doorbell(oct, iq);
  464. } else {
  465. INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
  466. }
  467. spin_unlock_bh(&iq->post_lock);
  468. /* This is only done here to expedite packets being flushed
  469. * for cases where there are no IQ completion interrupts.
  470. */
  471. return st.status;
  472. }
  473. void
  474. octeon_prepare_soft_command(struct octeon_device *oct,
  475. struct octeon_soft_command *sc,
  476. u8 opcode,
  477. u8 subcode,
  478. u32 irh_ossp,
  479. u64 ossp0,
  480. u64 ossp1)
  481. {
  482. struct octeon_config *oct_cfg;
  483. struct octeon_instr_ih2 *ih2;
  484. struct octeon_instr_ih3 *ih3;
  485. struct octeon_instr_pki_ih3 *pki_ih3;
  486. struct octeon_instr_irh *irh;
  487. struct octeon_instr_rdp *rdp;
  488. WARN_ON(opcode > 15);
  489. WARN_ON(subcode > 127);
  490. oct_cfg = octeon_get_conf(oct);
  491. if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
  492. ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
  493. ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
  494. pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
  495. pki_ih3->w = 1;
  496. pki_ih3->raw = 1;
  497. pki_ih3->utag = 1;
  498. pki_ih3->uqpg =
  499. oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
  500. pki_ih3->utt = 1;
  501. pki_ih3->tag = LIO_CONTROL;
  502. pki_ih3->tagtype = ATOMIC_TAG;
  503. pki_ih3->qpg =
  504. oct->instr_queue[sc->iq_no]->txpciq.s.qpg;
  505. pki_ih3->pm = 0x7;
  506. pki_ih3->sl = 8;
  507. if (sc->datasize)
  508. ih3->dlengsz = sc->datasize;
  509. irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
  510. irh->opcode = opcode;
  511. irh->subcode = subcode;
  512. /* opcode/subcode specific parameters (ossp) */
  513. irh->ossp = irh_ossp;
  514. sc->cmd.cmd3.ossp[0] = ossp0;
  515. sc->cmd.cmd3.ossp[1] = ossp1;
  516. if (sc->rdatasize) {
  517. rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
  518. rdp->pcie_port = oct->pcie_port;
  519. rdp->rlen = sc->rdatasize;
  520. irh->rflag = 1;
  521. /*PKI IH3*/
  522. /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
  523. ih3->fsz = LIO_SOFTCMDRESP_IH3;
  524. } else {
  525. irh->rflag = 0;
  526. /*PKI IH3*/
  527. /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
  528. ih3->fsz = LIO_PCICMD_O3;
  529. }
  530. } else {
  531. ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
  532. ih2->tagtype = ATOMIC_TAG;
  533. ih2->tag = LIO_CONTROL;
  534. ih2->raw = 1;
  535. ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
  536. if (sc->datasize) {
  537. ih2->dlengsz = sc->datasize;
  538. ih2->rs = 1;
  539. }
  540. irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
  541. irh->opcode = opcode;
  542. irh->subcode = subcode;
  543. /* opcode/subcode specific parameters (ossp) */
  544. irh->ossp = irh_ossp;
  545. sc->cmd.cmd2.ossp[0] = ossp0;
  546. sc->cmd.cmd2.ossp[1] = ossp1;
  547. if (sc->rdatasize) {
  548. rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
  549. rdp->pcie_port = oct->pcie_port;
  550. rdp->rlen = sc->rdatasize;
  551. irh->rflag = 1;
  552. /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
  553. ih2->fsz = LIO_SOFTCMDRESP_IH2;
  554. } else {
  555. irh->rflag = 0;
  556. /* irh + ossp[0] + ossp[1] = 24 bytes */
  557. ih2->fsz = LIO_PCICMD_O2;
  558. }
  559. }
  560. }
  561. int octeon_send_soft_command(struct octeon_device *oct,
  562. struct octeon_soft_command *sc)
  563. {
  564. struct octeon_instr_ih2 *ih2;
  565. struct octeon_instr_ih3 *ih3;
  566. struct octeon_instr_irh *irh;
  567. u32 len;
  568. if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
  569. ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
  570. if (ih3->dlengsz) {
  571. WARN_ON(!sc->dmadptr);
  572. sc->cmd.cmd3.dptr = sc->dmadptr;
  573. }
  574. irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
  575. if (irh->rflag) {
  576. WARN_ON(!sc->dmarptr);
  577. WARN_ON(!sc->status_word);
  578. *sc->status_word = COMPLETION_WORD_INIT;
  579. sc->cmd.cmd3.rptr = sc->dmarptr;
  580. }
  581. len = (u32)ih3->dlengsz;
  582. } else {
  583. ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
  584. if (ih2->dlengsz) {
  585. WARN_ON(!sc->dmadptr);
  586. sc->cmd.cmd2.dptr = sc->dmadptr;
  587. }
  588. irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
  589. if (irh->rflag) {
  590. WARN_ON(!sc->dmarptr);
  591. WARN_ON(!sc->status_word);
  592. *sc->status_word = COMPLETION_WORD_INIT;
  593. sc->cmd.cmd2.rptr = sc->dmarptr;
  594. }
  595. len = (u32)ih2->dlengsz;
  596. }
  597. if (sc->wait_time)
  598. sc->timeout = jiffies + sc->wait_time;
  599. return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
  600. len, REQTYPE_SOFT_COMMAND));
  601. }
  602. int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
  603. {
  604. int i;
  605. u64 dma_addr;
  606. struct octeon_soft_command *sc;
  607. INIT_LIST_HEAD(&oct->sc_buf_pool.head);
  608. spin_lock_init(&oct->sc_buf_pool.lock);
  609. atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
  610. for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
  611. sc = (struct octeon_soft_command *)
  612. lio_dma_alloc(oct,
  613. SOFT_COMMAND_BUFFER_SIZE,
  614. (dma_addr_t *)&dma_addr);
  615. if (!sc) {
  616. octeon_free_sc_buffer_pool(oct);
  617. return 1;
  618. }
  619. sc->dma_addr = dma_addr;
  620. sc->size = SOFT_COMMAND_BUFFER_SIZE;
  621. list_add_tail(&sc->node, &oct->sc_buf_pool.head);
  622. }
  623. return 0;
  624. }
  625. int octeon_free_sc_buffer_pool(struct octeon_device *oct)
  626. {
  627. struct list_head *tmp, *tmp2;
  628. struct octeon_soft_command *sc;
  629. spin_lock_bh(&oct->sc_buf_pool.lock);
  630. list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
  631. list_del(tmp);
  632. sc = (struct octeon_soft_command *)tmp;
  633. lio_dma_free(oct, sc->size, sc, sc->dma_addr);
  634. }
  635. INIT_LIST_HEAD(&oct->sc_buf_pool.head);
  636. spin_unlock_bh(&oct->sc_buf_pool.lock);
  637. return 0;
  638. }
  639. struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
  640. u32 datasize,
  641. u32 rdatasize,
  642. u32 ctxsize)
  643. {
  644. u64 dma_addr;
  645. u32 size;
  646. u32 offset = sizeof(struct octeon_soft_command);
  647. struct octeon_soft_command *sc = NULL;
  648. struct list_head *tmp;
  649. WARN_ON((offset + datasize + rdatasize + ctxsize) >
  650. SOFT_COMMAND_BUFFER_SIZE);
  651. spin_lock_bh(&oct->sc_buf_pool.lock);
  652. if (list_empty(&oct->sc_buf_pool.head)) {
  653. spin_unlock_bh(&oct->sc_buf_pool.lock);
  654. return NULL;
  655. }
  656. list_for_each(tmp, &oct->sc_buf_pool.head)
  657. break;
  658. list_del(tmp);
  659. atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
  660. spin_unlock_bh(&oct->sc_buf_pool.lock);
  661. sc = (struct octeon_soft_command *)tmp;
  662. dma_addr = sc->dma_addr;
  663. size = sc->size;
  664. memset(sc, 0, sc->size);
  665. sc->dma_addr = dma_addr;
  666. sc->size = size;
  667. if (ctxsize) {
  668. sc->ctxptr = (u8 *)sc + offset;
  669. sc->ctxsize = ctxsize;
  670. }
  671. /* Start data at 128 byte boundary */
  672. offset = (offset + ctxsize + 127) & 0xffffff80;
  673. if (datasize) {
  674. sc->virtdptr = (u8 *)sc + offset;
  675. sc->dmadptr = dma_addr + offset;
  676. sc->datasize = datasize;
  677. }
  678. /* Start rdata at 128 byte boundary */
  679. offset = (offset + datasize + 127) & 0xffffff80;
  680. if (rdatasize) {
  681. WARN_ON(rdatasize < 16);
  682. sc->virtrptr = (u8 *)sc + offset;
  683. sc->dmarptr = dma_addr + offset;
  684. sc->rdatasize = rdatasize;
  685. sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
  686. }
  687. return sc;
  688. }
  689. void octeon_free_soft_command(struct octeon_device *oct,
  690. struct octeon_soft_command *sc)
  691. {
  692. spin_lock_bh(&oct->sc_buf_pool.lock);
  693. list_add_tail(&sc->node, &oct->sc_buf_pool.head);
  694. atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
  695. spin_unlock_bh(&oct->sc_buf_pool.lock);
  696. }