be_cmds.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481
  1. /**
  2. * Copyright (C) 2005 - 2015 Emulex
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@avagotech.com
  12. *
  13. * Emulex
  14. * 3333 Susan Street
  15. * Costa Mesa, CA 92626
  16. */
  17. #include <scsi/iscsi_proto.h>
  18. #include "be_main.h"
  19. #include "be.h"
  20. #include "be_mgmt.h"
  21. int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
  22. {
  23. u32 sreset;
  24. u8 *pci_reset_offset = 0;
  25. u8 *pci_online0_offset = 0;
  26. u8 *pci_online1_offset = 0;
  27. u32 pconline0 = 0;
  28. u32 pconline1 = 0;
  29. u32 i;
  30. pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
  31. pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
  32. pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
  33. sreset = readl((void *)pci_reset_offset);
  34. sreset |= BE2_SET_RESET;
  35. writel(sreset, (void *)pci_reset_offset);
  36. i = 0;
  37. while (sreset & BE2_SET_RESET) {
  38. if (i > 64)
  39. break;
  40. msleep(100);
  41. sreset = readl((void *)pci_reset_offset);
  42. i++;
  43. }
  44. if (sreset & BE2_SET_RESET) {
  45. printk(KERN_ERR DRV_NAME
  46. " Soft Reset did not deassert\n");
  47. return -EIO;
  48. }
  49. pconline1 = BE2_MPU_IRAM_ONLINE;
  50. writel(pconline0, (void *)pci_online0_offset);
  51. writel(pconline1, (void *)pci_online1_offset);
  52. sreset |= BE2_SET_RESET;
  53. writel(sreset, (void *)pci_reset_offset);
  54. i = 0;
  55. while (sreset & BE2_SET_RESET) {
  56. if (i > 64)
  57. break;
  58. msleep(1);
  59. sreset = readl((void *)pci_reset_offset);
  60. i++;
  61. }
  62. if (sreset & BE2_SET_RESET) {
  63. printk(KERN_ERR DRV_NAME
  64. " MPU Online Soft Reset did not deassert\n");
  65. return -EIO;
  66. }
  67. return 0;
  68. }
  69. int be_chk_reset_complete(struct beiscsi_hba *phba)
  70. {
  71. unsigned int num_loop;
  72. u8 *mpu_sem = 0;
  73. u32 status;
  74. num_loop = 1000;
  75. mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
  76. msleep(5000);
  77. while (num_loop) {
  78. status = readl((void *)mpu_sem);
  79. if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
  80. break;
  81. msleep(60);
  82. num_loop--;
  83. }
  84. if ((status & 0x80000000) || (!num_loop)) {
  85. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  86. "BC_%d : Failed in be_chk_reset_complete"
  87. "status = 0x%x\n", status);
  88. return -EIO;
  89. }
  90. return 0;
  91. }
  92. void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
  93. {
  94. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  95. u32 val = 0;
  96. set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
  97. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  98. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  99. /* ring doorbell after all of request and state is written */
  100. wmb();
  101. iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
  102. }
  103. unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
  104. {
  105. unsigned int tag = 0;
  106. spin_lock(&phba->ctrl.mcc_lock);
  107. if (phba->ctrl.mcc_tag_available) {
  108. tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
  109. phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
  110. phba->ctrl.mcc_numtag[tag] = 0;
  111. phba->ctrl.ptag_state[tag].tag_state = 0;
  112. }
  113. if (tag) {
  114. phba->ctrl.mcc_tag_available--;
  115. if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
  116. phba->ctrl.mcc_alloc_index = 0;
  117. else
  118. phba->ctrl.mcc_alloc_index++;
  119. }
  120. spin_unlock(&phba->ctrl.mcc_lock);
  121. return tag;
  122. }
  123. /*
  124. * beiscsi_mccq_compl()- Wait for completion of MBX
  125. * @phba: Driver private structure
  126. * @tag: Tag for the MBX Command
  127. * @wrb: the WRB used for the MBX Command
  128. * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
  129. *
  130. * Waits for MBX completion with the passed TAG.
  131. *
  132. * return
  133. * Success: 0
  134. * Failure: Non-Zero
  135. **/
  136. int beiscsi_mccq_compl(struct beiscsi_hba *phba,
  137. uint32_t tag, struct be_mcc_wrb **wrb,
  138. struct be_dma_mem *mbx_cmd_mem)
  139. {
  140. int rc = 0;
  141. uint32_t mcc_tag_response;
  142. uint16_t status = 0, addl_status = 0, wrb_num = 0;
  143. struct be_mcc_wrb *temp_wrb;
  144. struct be_cmd_req_hdr *mbx_hdr;
  145. struct be_cmd_resp_hdr *mbx_resp_hdr;
  146. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  147. if (beiscsi_error(phba)) {
  148. free_mcc_tag(&phba->ctrl, tag);
  149. return -EPERM;
  150. }
  151. /* wait for the mccq completion */
  152. rc = wait_event_interruptible_timeout(
  153. phba->ctrl.mcc_wait[tag],
  154. phba->ctrl.mcc_numtag[tag],
  155. msecs_to_jiffies(
  156. BEISCSI_HOST_MBX_TIMEOUT));
  157. /**
  158. * If MBOX cmd timeout expired, tag and resource allocated
  159. * for cmd is not freed until FW returns completion.
  160. */
  161. if (rc <= 0) {
  162. struct be_dma_mem *tag_mem;
  163. /**
  164. * PCI/DMA memory allocated and posted in non-embedded mode
  165. * will have mbx_cmd_mem != NULL.
  166. * Save virtual and bus addresses for the command so that it
  167. * can be freed later.
  168. **/
  169. tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
  170. if (mbx_cmd_mem) {
  171. tag_mem->size = mbx_cmd_mem->size;
  172. tag_mem->va = mbx_cmd_mem->va;
  173. tag_mem->dma = mbx_cmd_mem->dma;
  174. } else
  175. tag_mem->size = 0;
  176. /* first make tag_mem_state visible to all */
  177. wmb();
  178. set_bit(MCC_TAG_STATE_TIMEOUT,
  179. &phba->ctrl.ptag_state[tag].tag_state);
  180. beiscsi_log(phba, KERN_ERR,
  181. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  182. BEISCSI_LOG_CONFIG,
  183. "BC_%d : MBX Cmd Completion timed out\n");
  184. return -EBUSY;
  185. }
  186. rc = 0;
  187. mcc_tag_response = phba->ctrl.mcc_numtag[tag];
  188. status = (mcc_tag_response & CQE_STATUS_MASK);
  189. addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
  190. CQE_STATUS_ADDL_SHIFT);
  191. if (mbx_cmd_mem) {
  192. mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
  193. } else {
  194. wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
  195. CQE_STATUS_WRB_SHIFT;
  196. temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
  197. mbx_hdr = embedded_payload(temp_wrb);
  198. if (wrb)
  199. *wrb = temp_wrb;
  200. }
  201. if (status || addl_status) {
  202. beiscsi_log(phba, KERN_WARNING,
  203. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  204. BEISCSI_LOG_CONFIG,
  205. "BC_%d : MBX Cmd Failed for "
  206. "Subsys : %d Opcode : %d with "
  207. "Status : %d and Extd_Status : %d\n",
  208. mbx_hdr->subsystem,
  209. mbx_hdr->opcode,
  210. status, addl_status);
  211. rc = -EIO;
  212. if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
  213. mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
  214. beiscsi_log(phba, KERN_WARNING,
  215. BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
  216. BEISCSI_LOG_CONFIG,
  217. "BC_%d : Insufficient Buffer Error "
  218. "Resp_Len : %d Actual_Resp_Len : %d\n",
  219. mbx_resp_hdr->response_length,
  220. mbx_resp_hdr->actual_resp_len);
  221. rc = -EAGAIN;
  222. }
  223. }
  224. free_mcc_tag(&phba->ctrl, tag);
  225. return rc;
  226. }
  227. void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
  228. {
  229. spin_lock(&ctrl->mcc_lock);
  230. tag = tag & 0x000000FF;
  231. ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
  232. if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
  233. ctrl->mcc_free_index = 0;
  234. else
  235. ctrl->mcc_free_index++;
  236. ctrl->mcc_tag_available++;
  237. spin_unlock(&ctrl->mcc_lock);
  238. }
  239. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  240. {
  241. if (compl->flags != 0) {
  242. compl->flags = le32_to_cpu(compl->flags);
  243. WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  244. return true;
  245. } else
  246. return false;
  247. }
  248. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  249. {
  250. compl->flags = 0;
  251. }
  252. /*
  253. * be_mcc_compl_process()- Check the MBX comapletion status
  254. * @ctrl: Function specific MBX data structure
  255. * @compl: Completion status of MBX Command
  256. *
  257. * Check for the MBX completion status when BMBX method used
  258. *
  259. * return
  260. * Success: Zero
  261. * Failure: Non-Zero
  262. **/
  263. static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
  264. struct be_mcc_compl *compl)
  265. {
  266. u16 compl_status, extd_status;
  267. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  268. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  269. struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
  270. struct be_cmd_resp_hdr *resp_hdr;
  271. be_dws_le_to_cpu(compl, 4);
  272. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  273. CQE_STATUS_COMPL_MASK;
  274. if (compl_status != MCC_STATUS_SUCCESS) {
  275. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  276. CQE_STATUS_EXTD_MASK;
  277. beiscsi_log(phba, KERN_ERR,
  278. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  279. "BC_%d : error in cmd completion: "
  280. "Subsystem : %d Opcode : %d "
  281. "status(compl/extd)=%d/%d\n",
  282. hdr->subsystem, hdr->opcode,
  283. compl_status, extd_status);
  284. if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
  285. resp_hdr = (struct be_cmd_resp_hdr *) hdr;
  286. if (resp_hdr->response_length)
  287. return 0;
  288. }
  289. return -EINVAL;
  290. }
  291. return 0;
  292. }
  293. int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
  294. struct be_mcc_compl *compl)
  295. {
  296. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  297. u16 compl_status, extd_status;
  298. struct be_dma_mem *tag_mem;
  299. unsigned short tag;
  300. be_dws_le_to_cpu(compl, 4);
  301. tag = (compl->tag0 & 0x000000FF);
  302. if (!test_bit(MCC_TAG_STATE_RUNNING,
  303. &ctrl->ptag_state[tag].tag_state)) {
  304. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
  305. BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
  306. "BC_%d : MBX cmd completed but not posted\n");
  307. return 0;
  308. }
  309. if (test_bit(MCC_TAG_STATE_TIMEOUT,
  310. &ctrl->ptag_state[tag].tag_state)) {
  311. beiscsi_log(phba, KERN_WARNING,
  312. BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
  313. BEISCSI_LOG_CONFIG,
  314. "BC_%d : MBX Completion for timeout Command from FW\n");
  315. /**
  316. * Check for the size before freeing resource.
  317. * Only for non-embedded cmd, PCI resource is allocated.
  318. **/
  319. tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
  320. if (tag_mem->size)
  321. pci_free_consistent(ctrl->pdev, tag_mem->size,
  322. tag_mem->va, tag_mem->dma);
  323. free_mcc_tag(ctrl, tag);
  324. return 0;
  325. }
  326. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  327. CQE_STATUS_COMPL_MASK;
  328. /* The ctrl.mcc_numtag[tag] is filled with
  329. * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
  330. * [7:0] = compl_status
  331. */
  332. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  333. CQE_STATUS_EXTD_MASK;
  334. ctrl->mcc_numtag[tag] = 0x80000000;
  335. ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
  336. ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
  337. ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
  338. /* write ordering implied in wake_up_interruptible */
  339. clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
  340. wake_up_interruptible(&ctrl->mcc_wait[tag]);
  341. return 0;
  342. }
  343. static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
  344. {
  345. struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
  346. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  347. if (be_mcc_compl_is_new(compl)) {
  348. queue_tail_inc(mcc_cq);
  349. return compl;
  350. }
  351. return NULL;
  352. }
  353. /**
  354. * beiscsi_fail_session(): Closing session with appropriate error
  355. * @cls_session: ptr to session
  356. **/
  357. void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
  358. {
  359. iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
  360. }
  361. static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
  362. struct be_async_event_link_state *evt)
  363. {
  364. if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
  365. ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
  366. (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
  367. phba->state = BE_ADAPTER_LINK_DOWN;
  368. beiscsi_log(phba, KERN_ERR,
  369. BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
  370. "BC_%d : Link Down on Port %d\n",
  371. evt->physical_port);
  372. iscsi_host_for_each_session(phba->shost,
  373. beiscsi_fail_session);
  374. } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
  375. ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
  376. (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
  377. phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
  378. phba->get_boot = BE_GET_BOOT_RETRIES;
  379. beiscsi_log(phba, KERN_ERR,
  380. BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
  381. "BC_%d : Link UP on Port %d\n",
  382. evt->physical_port);
  383. }
  384. }
  385. static char *beiscsi_port_misconf_event_msg[] = {
  386. "Physical Link is functional.",
  387. "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
  388. "Optics of two types installed - Remove one optic or install matching pair of optics.",
  389. "Incompatible optics - Replace with compatible optics for card to function.",
  390. "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
  391. "Uncertified optics - Replace with Avago Certified optics to enable link operation."
  392. };
  393. static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
  394. struct be_mcc_compl *compl)
  395. {
  396. struct be_async_event_sli *async_sli;
  397. u8 evt_type, state, old_state, le;
  398. char *sev = KERN_WARNING;
  399. char *msg = NULL;
  400. evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
  401. evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
  402. /* processing only MISCONFIGURED physical port event */
  403. if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
  404. return;
  405. async_sli = (struct be_async_event_sli *)compl;
  406. state = async_sli->event_data1 >>
  407. (phba->fw_config.phys_port * 8) & 0xff;
  408. le = async_sli->event_data2 >>
  409. (phba->fw_config.phys_port * 8) & 0xff;
  410. old_state = phba->optic_state;
  411. phba->optic_state = state;
  412. if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
  413. /* fw is reporting a state we don't know, log and return */
  414. __beiscsi_log(phba, KERN_ERR,
  415. "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
  416. phba->port_name, async_sli->event_data1);
  417. return;
  418. }
  419. if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
  420. /* log link effect for unqualified-4, uncertified-5 optics */
  421. if (state > 3)
  422. msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
  423. " Link is non-operational." :
  424. " Link is operational.";
  425. /* 1 - info */
  426. if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
  427. sev = KERN_INFO;
  428. /* 2 - error */
  429. if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
  430. sev = KERN_ERR;
  431. }
  432. if (old_state != phba->optic_state)
  433. __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
  434. phba->port_name,
  435. beiscsi_port_misconf_event_msg[state],
  436. !msg ? "" : msg);
  437. }
  438. void beiscsi_process_async_event(struct beiscsi_hba *phba,
  439. struct be_mcc_compl *compl)
  440. {
  441. char *sev = KERN_INFO;
  442. u8 evt_code;
  443. /* interpret flags as an async trailer */
  444. evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
  445. evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
  446. switch (evt_code) {
  447. case ASYNC_EVENT_CODE_LINK_STATE:
  448. beiscsi_async_link_state_process(phba,
  449. (struct be_async_event_link_state *)compl);
  450. break;
  451. case ASYNC_EVENT_CODE_ISCSI:
  452. phba->state |= BE_ADAPTER_CHECK_BOOT;
  453. phba->get_boot = BE_GET_BOOT_RETRIES;
  454. sev = KERN_ERR;
  455. break;
  456. case ASYNC_EVENT_CODE_SLI:
  457. beiscsi_process_async_sli(phba, compl);
  458. break;
  459. default:
  460. /* event not registered */
  461. sev = KERN_ERR;
  462. }
  463. beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  464. "BC_%d : ASYNC Event: status 0x%08x flags 0x%08x\n",
  465. compl->status, compl->flags);
  466. }
  467. int beiscsi_process_mcc(struct beiscsi_hba *phba)
  468. {
  469. struct be_mcc_compl *compl;
  470. int num = 0, status = 0;
  471. struct be_ctrl_info *ctrl = &phba->ctrl;
  472. spin_lock_bh(&phba->ctrl.mcc_cq_lock);
  473. while ((compl = be_mcc_compl_get(phba))) {
  474. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  475. beiscsi_process_async_event(phba, compl);
  476. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  477. status = be_mcc_compl_process(ctrl, compl);
  478. atomic_dec(&phba->ctrl.mcc_obj.q.used);
  479. }
  480. be_mcc_compl_use(compl);
  481. num++;
  482. }
  483. if (num)
  484. hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
  485. spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
  486. return status;
  487. }
  488. /*
  489. * be_mcc_wait_compl()- Wait for MBX completion
  490. * @phba: driver private structure
  491. *
  492. * Wait till no more pending mcc requests are present
  493. *
  494. * return
  495. * Success: 0
  496. * Failure: Non-Zero
  497. *
  498. **/
  499. static int be_mcc_wait_compl(struct beiscsi_hba *phba)
  500. {
  501. int i, status;
  502. for (i = 0; i < mcc_timeout; i++) {
  503. if (beiscsi_error(phba))
  504. return -EIO;
  505. status = beiscsi_process_mcc(phba);
  506. if (status)
  507. return status;
  508. if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
  509. break;
  510. udelay(100);
  511. }
  512. if (i == mcc_timeout) {
  513. beiscsi_log(phba, KERN_ERR,
  514. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  515. "BC_%d : FW Timed Out\n");
  516. phba->fw_timeout = true;
  517. beiscsi_ue_detect(phba);
  518. return -EBUSY;
  519. }
  520. return 0;
  521. }
  522. /*
  523. * be_mcc_notify_wait()- Notify and wait for Compl
  524. * @phba: driver private structure
  525. *
  526. * Notify MCC requests and wait for completion
  527. *
  528. * return
  529. * Success: 0
  530. * Failure: Non-Zero
  531. **/
  532. int be_mcc_notify_wait(struct beiscsi_hba *phba, unsigned int tag)
  533. {
  534. be_mcc_notify(phba, tag);
  535. return be_mcc_wait_compl(phba);
  536. }
  537. /*
  538. * be_mbox_db_ready_wait()- Check ready status
  539. * @ctrl: Function specific MBX data structure
  540. *
  541. * Check for the ready status of FW to send BMBX
  542. * commands to adapter.
  543. *
  544. * return
  545. * Success: 0
  546. * Failure: Non-Zero
  547. **/
  548. static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
  549. {
  550. /* wait 30s for generic non-flash MBOX operation */
  551. #define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
  552. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  553. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  554. unsigned long timeout;
  555. u32 ready;
  556. /*
  557. * This BMBX busy wait path is used during init only.
  558. * For the commands executed during init, 5s should suffice.
  559. */
  560. timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
  561. do {
  562. if (beiscsi_error(phba))
  563. return -EIO;
  564. ready = ioread32(db);
  565. if (ready == 0xffffffff)
  566. return -EIO;
  567. ready &= MPU_MAILBOX_DB_RDY_MASK;
  568. if (ready)
  569. return 0;
  570. if (time_after(jiffies, timeout))
  571. break;
  572. msleep(20);
  573. } while (!ready);
  574. beiscsi_log(phba, KERN_ERR,
  575. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  576. "BC_%d : FW Timed Out\n");
  577. phba->fw_timeout = true;
  578. beiscsi_ue_detect(phba);
  579. return -EBUSY;
  580. }
  581. /*
  582. * be_mbox_notify: Notify adapter of new BMBX command
  583. * @ctrl: Function specific MBX data structure
  584. *
  585. * Ring doorbell to inform adapter of a BMBX command
  586. * to process
  587. *
  588. * return
  589. * Success: 0
  590. * Failure: Non-Zero
  591. **/
  592. int be_mbox_notify(struct be_ctrl_info *ctrl)
  593. {
  594. int status;
  595. u32 val = 0;
  596. void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
  597. struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
  598. struct be_mcc_mailbox *mbox = mbox_mem->va;
  599. struct be_mcc_compl *compl = &mbox->compl;
  600. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  601. status = be_mbox_db_ready_wait(ctrl);
  602. if (status)
  603. return status;
  604. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  605. val |= MPU_MAILBOX_DB_HI_MASK;
  606. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  607. iowrite32(val, db);
  608. status = be_mbox_db_ready_wait(ctrl);
  609. if (status)
  610. return status;
  611. val = 0;
  612. val &= ~MPU_MAILBOX_DB_RDY_MASK;
  613. val &= ~MPU_MAILBOX_DB_HI_MASK;
  614. val |= (u32) (mbox_mem->dma >> 4) << 2;
  615. iowrite32(val, db);
  616. status = be_mbox_db_ready_wait(ctrl);
  617. if (status)
  618. return status;
  619. /* RDY is set; small delay before CQE read. */
  620. udelay(1);
  621. if (be_mcc_compl_is_new(compl)) {
  622. status = be_mcc_compl_process(ctrl, &mbox->compl);
  623. be_mcc_compl_use(compl);
  624. if (status) {
  625. beiscsi_log(phba, KERN_ERR,
  626. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  627. "BC_%d : After be_mcc_compl_process\n");
  628. return status;
  629. }
  630. } else {
  631. beiscsi_log(phba, KERN_ERR,
  632. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  633. "BC_%d : Invalid Mailbox Completion\n");
  634. return -EBUSY;
  635. }
  636. return 0;
  637. }
  638. /*
  639. * Insert the mailbox address into the doorbell in two steps
  640. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  641. */
  642. static int be_mbox_notify_wait(struct beiscsi_hba *phba)
  643. {
  644. int status;
  645. u32 val = 0;
  646. void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
  647. struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
  648. struct be_mcc_mailbox *mbox = mbox_mem->va;
  649. struct be_mcc_compl *compl = &mbox->compl;
  650. struct be_ctrl_info *ctrl = &phba->ctrl;
  651. status = be_mbox_db_ready_wait(ctrl);
  652. if (status)
  653. return status;
  654. val |= MPU_MAILBOX_DB_HI_MASK;
  655. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  656. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  657. iowrite32(val, db);
  658. /* wait for ready to be set */
  659. status = be_mbox_db_ready_wait(ctrl);
  660. if (status != 0)
  661. return status;
  662. val = 0;
  663. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  664. val |= (u32)(mbox_mem->dma >> 4) << 2;
  665. iowrite32(val, db);
  666. status = be_mbox_db_ready_wait(ctrl);
  667. if (status != 0)
  668. return status;
  669. /* A cq entry has been made now */
  670. if (be_mcc_compl_is_new(compl)) {
  671. status = be_mcc_compl_process(ctrl, &mbox->compl);
  672. be_mcc_compl_use(compl);
  673. if (status)
  674. return status;
  675. } else {
  676. beiscsi_log(phba, KERN_ERR,
  677. BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
  678. "BC_%d : invalid mailbox completion\n");
  679. return -EBUSY;
  680. }
  681. return 0;
  682. }
  683. void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  684. bool embedded, u8 sge_cnt)
  685. {
  686. if (embedded)
  687. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  688. else
  689. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  690. MCC_WRB_SGE_CNT_SHIFT;
  691. wrb->payload_length = payload_len;
  692. be_dws_cpu_to_le(wrb, 8);
  693. }
  694. void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  695. u8 subsystem, u8 opcode, int cmd_len)
  696. {
  697. req_hdr->opcode = opcode;
  698. req_hdr->subsystem = subsystem;
  699. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  700. req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
  701. }
  702. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  703. struct be_dma_mem *mem)
  704. {
  705. int i, buf_pages;
  706. u64 dma = (u64) mem->dma;
  707. buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  708. for (i = 0; i < buf_pages; i++) {
  709. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  710. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  711. dma += PAGE_SIZE_4K;
  712. }
  713. }
  714. static u32 eq_delay_to_mult(u32 usec_delay)
  715. {
  716. #define MAX_INTR_RATE 651042
  717. const u32 round = 10;
  718. u32 multiplier;
  719. if (usec_delay == 0)
  720. multiplier = 0;
  721. else {
  722. u32 interrupt_rate = 1000000 / usec_delay;
  723. if (interrupt_rate == 0)
  724. multiplier = 1023;
  725. else {
  726. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  727. multiplier /= interrupt_rate;
  728. multiplier = (multiplier + round / 2) / round;
  729. multiplier = min(multiplier, (u32) 1023);
  730. }
  731. }
  732. return multiplier;
  733. }
  734. struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
  735. {
  736. return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  737. }
  738. struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
  739. {
  740. struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
  741. struct be_mcc_wrb *wrb;
  742. WARN_ON(atomic_read(&mccq->used) >= mccq->len);
  743. wrb = queue_head_node(mccq);
  744. memset(wrb, 0, sizeof(*wrb));
  745. wrb->tag0 = (mccq->head & 0x000000FF) << 16;
  746. queue_head_inc(mccq);
  747. atomic_inc(&mccq->used);
  748. return wrb;
  749. }
  750. int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
  751. struct be_queue_info *eq, int eq_delay)
  752. {
  753. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  754. struct be_cmd_req_eq_create *req = embedded_payload(wrb);
  755. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  756. struct be_dma_mem *q_mem = &eq->dma_mem;
  757. int status;
  758. mutex_lock(&ctrl->mbox_lock);
  759. memset(wrb, 0, sizeof(*wrb));
  760. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  761. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  762. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  763. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  764. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  765. PCI_FUNC(ctrl->pdev->devfn));
  766. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  767. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  768. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  769. __ilog2_u32(eq->len / 256));
  770. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  771. eq_delay_to_mult(eq_delay));
  772. be_dws_cpu_to_le(req->context, sizeof(req->context));
  773. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  774. status = be_mbox_notify(ctrl);
  775. if (!status) {
  776. eq->id = le16_to_cpu(resp->eq_id);
  777. eq->created = true;
  778. }
  779. mutex_unlock(&ctrl->mbox_lock);
  780. return status;
  781. }
  782. /**
  783. * be_cmd_fw_initialize()- Initialize FW
  784. * @ctrl: Pointer to function control structure
  785. *
  786. * Send FW initialize pattern for the function.
  787. *
  788. * return
  789. * Success: 0
  790. * Failure: Non-Zero value
  791. **/
  792. int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
  793. {
  794. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  795. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  796. int status;
  797. u8 *endian_check;
  798. mutex_lock(&ctrl->mbox_lock);
  799. memset(wrb, 0, sizeof(*wrb));
  800. endian_check = (u8 *) wrb;
  801. *endian_check++ = 0xFF;
  802. *endian_check++ = 0x12;
  803. *endian_check++ = 0x34;
  804. *endian_check++ = 0xFF;
  805. *endian_check++ = 0xFF;
  806. *endian_check++ = 0x56;
  807. *endian_check++ = 0x78;
  808. *endian_check++ = 0xFF;
  809. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  810. status = be_mbox_notify(ctrl);
  811. if (status)
  812. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  813. "BC_%d : be_cmd_fw_initialize Failed\n");
  814. mutex_unlock(&ctrl->mbox_lock);
  815. return status;
  816. }
  817. /**
  818. * be_cmd_fw_uninit()- Uinitialize FW
  819. * @ctrl: Pointer to function control structure
  820. *
  821. * Send FW uninitialize pattern for the function
  822. *
  823. * return
  824. * Success: 0
  825. * Failure: Non-Zero value
  826. **/
  827. int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
  828. {
  829. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  830. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  831. int status;
  832. u8 *endian_check;
  833. mutex_lock(&ctrl->mbox_lock);
  834. memset(wrb, 0, sizeof(*wrb));
  835. endian_check = (u8 *) wrb;
  836. *endian_check++ = 0xFF;
  837. *endian_check++ = 0xAA;
  838. *endian_check++ = 0xBB;
  839. *endian_check++ = 0xFF;
  840. *endian_check++ = 0xFF;
  841. *endian_check++ = 0xCC;
  842. *endian_check++ = 0xDD;
  843. *endian_check = 0xFF;
  844. be_dws_cpu_to_le(wrb, sizeof(*wrb));
  845. status = be_mbox_notify(ctrl);
  846. if (status)
  847. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  848. "BC_%d : be_cmd_fw_uninit Failed\n");
  849. mutex_unlock(&ctrl->mbox_lock);
  850. return status;
  851. }
  852. int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
  853. struct be_queue_info *cq, struct be_queue_info *eq,
  854. bool sol_evts, bool no_delay, int coalesce_wm)
  855. {
  856. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  857. struct be_cmd_req_cq_create *req = embedded_payload(wrb);
  858. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  859. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  860. struct be_dma_mem *q_mem = &cq->dma_mem;
  861. void *ctxt = &req->context;
  862. int status;
  863. mutex_lock(&ctrl->mbox_lock);
  864. memset(wrb, 0, sizeof(*wrb));
  865. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  866. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  867. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  868. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  869. if (is_chip_be2_be3r(phba)) {
  870. AMAP_SET_BITS(struct amap_cq_context, coalescwm,
  871. ctxt, coalesce_wm);
  872. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  873. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  874. __ilog2_u32(cq->len / 256));
  875. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  876. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  877. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  878. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  879. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  880. AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
  881. PCI_FUNC(ctrl->pdev->devfn));
  882. } else {
  883. req->hdr.version = MBX_CMD_VER2;
  884. req->page_size = 1;
  885. AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
  886. ctxt, coalesce_wm);
  887. AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
  888. ctxt, no_delay);
  889. AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
  890. __ilog2_u32(cq->len / 256));
  891. AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
  892. AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
  893. AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
  894. AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
  895. }
  896. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  897. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  898. status = be_mbox_notify(ctrl);
  899. if (!status) {
  900. cq->id = le16_to_cpu(resp->cq_id);
  901. cq->created = true;
  902. } else
  903. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  904. "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
  905. status);
  906. mutex_unlock(&ctrl->mbox_lock);
  907. return status;
  908. }
  909. static u32 be_encoded_q_len(int q_len)
  910. {
  911. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  912. if (len_encoded == 16)
  913. len_encoded = 0;
  914. return len_encoded;
  915. }
  916. int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
  917. struct be_queue_info *mccq,
  918. struct be_queue_info *cq)
  919. {
  920. struct be_mcc_wrb *wrb;
  921. struct be_cmd_req_mcc_create_ext *req;
  922. struct be_dma_mem *q_mem = &mccq->dma_mem;
  923. struct be_ctrl_info *ctrl;
  924. void *ctxt;
  925. int status;
  926. mutex_lock(&phba->ctrl.mbox_lock);
  927. ctrl = &phba->ctrl;
  928. wrb = wrb_from_mbox(&ctrl->mbox_mem);
  929. memset(wrb, 0, sizeof(*wrb));
  930. req = embedded_payload(wrb);
  931. ctxt = &req->context;
  932. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  933. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  934. OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
  935. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  936. req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
  937. req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
  938. req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
  939. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
  940. PCI_FUNC(phba->pcidev->devfn));
  941. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  942. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  943. be_encoded_q_len(mccq->len));
  944. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  945. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  946. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  947. status = be_mbox_notify_wait(phba);
  948. if (!status) {
  949. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  950. mccq->id = le16_to_cpu(resp->id);
  951. mccq->created = true;
  952. }
  953. mutex_unlock(&phba->ctrl.mbox_lock);
  954. return status;
  955. }
  956. int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
  957. int queue_type)
  958. {
  959. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  960. struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
  961. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  962. u8 subsys = 0, opcode = 0;
  963. int status;
  964. beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
  965. "BC_%d : In beiscsi_cmd_q_destroy "
  966. "queue_type : %d\n", queue_type);
  967. mutex_lock(&ctrl->mbox_lock);
  968. memset(wrb, 0, sizeof(*wrb));
  969. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  970. switch (queue_type) {
  971. case QTYPE_EQ:
  972. subsys = CMD_SUBSYSTEM_COMMON;
  973. opcode = OPCODE_COMMON_EQ_DESTROY;
  974. break;
  975. case QTYPE_CQ:
  976. subsys = CMD_SUBSYSTEM_COMMON;
  977. opcode = OPCODE_COMMON_CQ_DESTROY;
  978. break;
  979. case QTYPE_MCCQ:
  980. subsys = CMD_SUBSYSTEM_COMMON;
  981. opcode = OPCODE_COMMON_MCC_DESTROY;
  982. break;
  983. case QTYPE_WRBQ:
  984. subsys = CMD_SUBSYSTEM_ISCSI;
  985. opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
  986. break;
  987. case QTYPE_DPDUQ:
  988. subsys = CMD_SUBSYSTEM_ISCSI;
  989. opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
  990. break;
  991. case QTYPE_SGL:
  992. subsys = CMD_SUBSYSTEM_ISCSI;
  993. opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
  994. break;
  995. default:
  996. mutex_unlock(&ctrl->mbox_lock);
  997. BUG();
  998. return -ENXIO;
  999. }
  1000. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  1001. if (queue_type != QTYPE_SGL)
  1002. req->id = cpu_to_le16(q->id);
  1003. status = be_mbox_notify(ctrl);
  1004. mutex_unlock(&ctrl->mbox_lock);
  1005. return status;
  1006. }
  1007. /**
  1008. * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
  1009. * @ctrl: ptr to ctrl_info
  1010. * @cq: Completion Queue
  1011. * @dq: Default Queue
  1012. * @lenght: ring size
  1013. * @entry_size: size of each entry in DEFQ
  1014. * @is_header: Header or Data DEFQ
  1015. * @ulp_num: Bind to which ULP
  1016. *
  1017. * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
  1018. * on this queue by the FW
  1019. *
  1020. * return
  1021. * Success: 0
  1022. * Failure: Non-Zero Value
  1023. *
  1024. **/
  1025. int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
  1026. struct be_queue_info *cq,
  1027. struct be_queue_info *dq, int length,
  1028. int entry_size, uint8_t is_header,
  1029. uint8_t ulp_num)
  1030. {
  1031. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1032. struct be_defq_create_req *req = embedded_payload(wrb);
  1033. struct be_dma_mem *q_mem = &dq->dma_mem;
  1034. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  1035. void *ctxt = &req->context;
  1036. int status;
  1037. mutex_lock(&ctrl->mbox_lock);
  1038. memset(wrb, 0, sizeof(*wrb));
  1039. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1040. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1041. OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
  1042. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1043. if (phba->fw_config.dual_ulp_aware) {
  1044. req->ulp_num = ulp_num;
  1045. req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
  1046. req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
  1047. }
  1048. if (is_chip_be2_be3r(phba)) {
  1049. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1050. rx_pdid, ctxt, 0);
  1051. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1052. rx_pdid_valid, ctxt, 1);
  1053. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1054. pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
  1055. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1056. ring_size, ctxt,
  1057. be_encoded_q_len(length /
  1058. sizeof(struct phys_addr)));
  1059. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1060. default_buffer_size, ctxt, entry_size);
  1061. AMAP_SET_BITS(struct amap_be_default_pdu_context,
  1062. cq_id_recv, ctxt, cq->id);
  1063. } else {
  1064. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1065. rx_pdid, ctxt, 0);
  1066. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1067. rx_pdid_valid, ctxt, 1);
  1068. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1069. ring_size, ctxt,
  1070. be_encoded_q_len(length /
  1071. sizeof(struct phys_addr)));
  1072. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1073. default_buffer_size, ctxt, entry_size);
  1074. AMAP_SET_BITS(struct amap_default_pdu_context_ext,
  1075. cq_id_recv, ctxt, cq->id);
  1076. }
  1077. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1078. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1079. status = be_mbox_notify(ctrl);
  1080. if (!status) {
  1081. struct be_ring *defq_ring;
  1082. struct be_defq_create_resp *resp = embedded_payload(wrb);
  1083. dq->id = le16_to_cpu(resp->id);
  1084. dq->created = true;
  1085. if (is_header)
  1086. defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
  1087. else
  1088. defq_ring = &phba->phwi_ctrlr->
  1089. default_pdu_data[ulp_num];
  1090. defq_ring->id = dq->id;
  1091. if (!phba->fw_config.dual_ulp_aware) {
  1092. defq_ring->ulp_num = BEISCSI_ULP0;
  1093. defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
  1094. } else {
  1095. defq_ring->ulp_num = resp->ulp_num;
  1096. defq_ring->doorbell_offset = resp->doorbell_offset;
  1097. }
  1098. }
  1099. mutex_unlock(&ctrl->mbox_lock);
  1100. return status;
  1101. }
  1102. /**
  1103. * be_cmd_wrbq_create()- Create WRBQ
  1104. * @ctrl: ptr to ctrl_info
  1105. * @q_mem: memory details for the queue
  1106. * @wrbq: queue info
  1107. * @pwrb_context: ptr to wrb_context
  1108. * @ulp_num: ULP on which the WRBQ is to be created
  1109. *
  1110. * Create WRBQ on the passed ULP_NUM.
  1111. *
  1112. **/
  1113. int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
  1114. struct be_dma_mem *q_mem,
  1115. struct be_queue_info *wrbq,
  1116. struct hwi_wrb_context *pwrb_context,
  1117. uint8_t ulp_num)
  1118. {
  1119. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1120. struct be_wrbq_create_req *req = embedded_payload(wrb);
  1121. struct be_wrbq_create_resp *resp = embedded_payload(wrb);
  1122. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  1123. int status;
  1124. mutex_lock(&ctrl->mbox_lock);
  1125. memset(wrb, 0, sizeof(*wrb));
  1126. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1127. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1128. OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
  1129. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1130. if (phba->fw_config.dual_ulp_aware) {
  1131. req->ulp_num = ulp_num;
  1132. req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
  1133. req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
  1134. }
  1135. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1136. status = be_mbox_notify(ctrl);
  1137. if (!status) {
  1138. wrbq->id = le16_to_cpu(resp->cid);
  1139. wrbq->created = true;
  1140. pwrb_context->cid = wrbq->id;
  1141. if (!phba->fw_config.dual_ulp_aware) {
  1142. pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
  1143. pwrb_context->ulp_num = BEISCSI_ULP0;
  1144. } else {
  1145. pwrb_context->ulp_num = resp->ulp_num;
  1146. pwrb_context->doorbell_offset = resp->doorbell_offset;
  1147. }
  1148. }
  1149. mutex_unlock(&ctrl->mbox_lock);
  1150. return status;
  1151. }
  1152. int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
  1153. struct be_dma_mem *q_mem)
  1154. {
  1155. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1156. struct be_post_template_pages_req *req = embedded_payload(wrb);
  1157. int status;
  1158. mutex_lock(&ctrl->mbox_lock);
  1159. memset(wrb, 0, sizeof(*wrb));
  1160. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1161. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1162. OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
  1163. sizeof(*req));
  1164. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1165. req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
  1166. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1167. status = be_mbox_notify(ctrl);
  1168. mutex_unlock(&ctrl->mbox_lock);
  1169. return status;
  1170. }
  1171. int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
  1172. {
  1173. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1174. struct be_remove_template_pages_req *req = embedded_payload(wrb);
  1175. int status;
  1176. mutex_lock(&ctrl->mbox_lock);
  1177. memset(wrb, 0, sizeof(*wrb));
  1178. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1179. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1180. OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
  1181. sizeof(*req));
  1182. req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
  1183. status = be_mbox_notify(ctrl);
  1184. mutex_unlock(&ctrl->mbox_lock);
  1185. return status;
  1186. }
  1187. int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
  1188. struct be_dma_mem *q_mem,
  1189. u32 page_offset, u32 num_pages)
  1190. {
  1191. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1192. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  1193. struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
  1194. int status;
  1195. unsigned int curr_pages;
  1196. u32 internal_page_offset = 0;
  1197. u32 temp_num_pages = num_pages;
  1198. if (num_pages == 0xff)
  1199. num_pages = 1;
  1200. mutex_lock(&ctrl->mbox_lock);
  1201. do {
  1202. memset(wrb, 0, sizeof(*wrb));
  1203. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1204. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1205. OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
  1206. sizeof(*req));
  1207. curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
  1208. pages);
  1209. req->num_pages = min(num_pages, curr_pages);
  1210. req->page_offset = page_offset;
  1211. be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
  1212. q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
  1213. internal_page_offset += req->num_pages;
  1214. page_offset += req->num_pages;
  1215. num_pages -= req->num_pages;
  1216. if (temp_num_pages == 0xff)
  1217. req->num_pages = temp_num_pages;
  1218. status = be_mbox_notify(ctrl);
  1219. if (status) {
  1220. beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
  1221. "BC_%d : FW CMD to map iscsi frags failed.\n");
  1222. goto error;
  1223. }
  1224. } while (num_pages > 0);
  1225. error:
  1226. mutex_unlock(&ctrl->mbox_lock);
  1227. if (status != 0)
  1228. beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
  1229. return status;
  1230. }
  1231. int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
  1232. {
  1233. struct be_ctrl_info *ctrl = &phba->ctrl;
  1234. struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
  1235. struct be_post_sgl_pages_req *req = embedded_payload(wrb);
  1236. int status;
  1237. mutex_lock(&ctrl->mbox_lock);
  1238. req = embedded_payload(wrb);
  1239. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
  1240. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1241. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  1242. status = be_mbox_notify_wait(phba);
  1243. mutex_unlock(&ctrl->mbox_lock);
  1244. return status;
  1245. }
  1246. /**
  1247. * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
  1248. * @phba: device priv structure instance
  1249. * @vlan_tag: TAG to be set
  1250. *
  1251. * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
  1252. *
  1253. * returns
  1254. * TAG for the MBX Cmd
  1255. * **/
  1256. int be_cmd_set_vlan(struct beiscsi_hba *phba,
  1257. uint16_t vlan_tag)
  1258. {
  1259. unsigned int tag = 0;
  1260. struct be_mcc_wrb *wrb;
  1261. struct be_cmd_set_vlan_req *req;
  1262. struct be_ctrl_info *ctrl = &phba->ctrl;
  1263. if (mutex_lock_interruptible(&ctrl->mbox_lock))
  1264. return 0;
  1265. tag = alloc_mcc_tag(phba);
  1266. if (!tag) {
  1267. mutex_unlock(&ctrl->mbox_lock);
  1268. return tag;
  1269. }
  1270. wrb = wrb_from_mccq(phba);
  1271. req = embedded_payload(wrb);
  1272. wrb->tag0 |= tag;
  1273. be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
  1274. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
  1275. OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
  1276. sizeof(*req));
  1277. req->interface_hndl = phba->interface_handle;
  1278. req->vlan_priority = vlan_tag;
  1279. be_mcc_notify(phba, tag);
  1280. mutex_unlock(&ctrl->mbox_lock);
  1281. return tag;
  1282. }