fjes_hw.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. /*
  2. * FUJITSU Extended Socket Network Device driver
  3. * Copyright (c) 2015 FUJITSU LIMITED
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, see <http://www.gnu.org/licenses/>.
  16. *
  17. * The full GNU General Public License is included in this distribution in
  18. * the file called "COPYING".
  19. *
  20. */
  21. #include "fjes_hw.h"
  22. #include "fjes.h"
  23. #include "fjes_trace.h"
  24. static void fjes_hw_update_zone_task(struct work_struct *);
  25. static void fjes_hw_epstop_task(struct work_struct *);
  26. /* supported MTU list */
  27. const u32 fjes_support_mtu[] = {
  28. FJES_MTU_DEFINE(8 * 1024),
  29. FJES_MTU_DEFINE(16 * 1024),
  30. FJES_MTU_DEFINE(32 * 1024),
  31. FJES_MTU_DEFINE(64 * 1024),
  32. 0
  33. };
  34. u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
  35. {
  36. u8 *base = hw->base;
  37. u32 value = 0;
  38. value = readl(&base[reg]);
  39. return value;
  40. }
  41. static u8 *fjes_hw_iomap(struct fjes_hw *hw)
  42. {
  43. u8 *base;
  44. if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
  45. fjes_driver_name)) {
  46. pr_err("request_mem_region failed\n");
  47. return NULL;
  48. }
  49. base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
  50. return base;
  51. }
  52. static void fjes_hw_iounmap(struct fjes_hw *hw)
  53. {
  54. iounmap(hw->base);
  55. release_mem_region(hw->hw_res.start, hw->hw_res.size);
  56. }
  57. int fjes_hw_reset(struct fjes_hw *hw)
  58. {
  59. union REG_DCTL dctl;
  60. int timeout;
  61. dctl.reg = 0;
  62. dctl.bits.reset = 1;
  63. wr32(XSCT_DCTL, dctl.reg);
  64. timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
  65. dctl.reg = rd32(XSCT_DCTL);
  66. while ((dctl.bits.reset == 1) && (timeout > 0)) {
  67. msleep(1000);
  68. dctl.reg = rd32(XSCT_DCTL);
  69. timeout -= 1000;
  70. }
  71. return timeout > 0 ? 0 : -EIO;
  72. }
  73. static int fjes_hw_get_max_epid(struct fjes_hw *hw)
  74. {
  75. union REG_MAX_EP info;
  76. info.reg = rd32(XSCT_MAX_EP);
  77. return info.bits.maxep;
  78. }
  79. static int fjes_hw_get_my_epid(struct fjes_hw *hw)
  80. {
  81. union REG_OWNER_EPID info;
  82. info.reg = rd32(XSCT_OWNER_EPID);
  83. return info.bits.epid;
  84. }
  85. static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
  86. {
  87. size_t size;
  88. size = sizeof(struct fjes_device_shared_info) +
  89. (sizeof(u8) * hw->max_epid);
  90. hw->hw_info.share = kzalloc(size, GFP_KERNEL);
  91. if (!hw->hw_info.share)
  92. return -ENOMEM;
  93. hw->hw_info.share->epnum = hw->max_epid;
  94. return 0;
  95. }
  96. static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
  97. {
  98. kfree(hw->hw_info.share);
  99. hw->hw_info.share = NULL;
  100. }
  101. static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
  102. {
  103. void *mem;
  104. mem = vzalloc(EP_BUFFER_SIZE);
  105. if (!mem)
  106. return -ENOMEM;
  107. epbh->buffer = mem;
  108. epbh->size = EP_BUFFER_SIZE;
  109. epbh->info = (union ep_buffer_info *)mem;
  110. epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
  111. return 0;
  112. }
  113. static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
  114. {
  115. vfree(epbh->buffer);
  116. epbh->buffer = NULL;
  117. epbh->size = 0;
  118. epbh->info = NULL;
  119. epbh->ring = NULL;
  120. }
  121. void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
  122. {
  123. union ep_buffer_info *info = epbh->info;
  124. u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
  125. int i;
  126. for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
  127. vlan_id[i] = info->v1i.vlan_id[i];
  128. memset(info, 0, sizeof(union ep_buffer_info));
  129. info->v1i.version = 0; /* version 0 */
  130. for (i = 0; i < ETH_ALEN; i++)
  131. info->v1i.mac_addr[i] = mac_addr[i];
  132. info->v1i.head = 0;
  133. info->v1i.tail = 1;
  134. info->v1i.info_size = sizeof(union ep_buffer_info);
  135. info->v1i.buffer_size = epbh->size - info->v1i.info_size;
  136. info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
  137. info->v1i.count_max =
  138. EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
  139. for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
  140. info->v1i.vlan_id[i] = vlan_id[i];
  141. info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
  142. }
  143. void
  144. fjes_hw_init_command_registers(struct fjes_hw *hw,
  145. struct fjes_device_command_param *param)
  146. {
  147. /* Request Buffer length */
  148. wr32(XSCT_REQBL, (__le32)(param->req_len));
  149. /* Response Buffer Length */
  150. wr32(XSCT_RESPBL, (__le32)(param->res_len));
  151. /* Request Buffer Address */
  152. wr32(XSCT_REQBAL,
  153. (__le32)(param->req_start & GENMASK_ULL(31, 0)));
  154. wr32(XSCT_REQBAH,
  155. (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
  156. /* Response Buffer Address */
  157. wr32(XSCT_RESPBAL,
  158. (__le32)(param->res_start & GENMASK_ULL(31, 0)));
  159. wr32(XSCT_RESPBAH,
  160. (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
  161. /* Share status address */
  162. wr32(XSCT_SHSTSAL,
  163. (__le32)(param->share_start & GENMASK_ULL(31, 0)));
  164. wr32(XSCT_SHSTSAH,
  165. (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
  166. }
  167. static int fjes_hw_setup(struct fjes_hw *hw)
  168. {
  169. u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
  170. struct fjes_device_command_param param;
  171. struct ep_share_mem_info *buf_pair;
  172. unsigned long flags;
  173. size_t mem_size;
  174. int result;
  175. int epidx;
  176. void *buf;
  177. hw->hw_info.max_epid = &hw->max_epid;
  178. hw->hw_info.my_epid = &hw->my_epid;
  179. buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
  180. GFP_KERNEL);
  181. if (!buf)
  182. return -ENOMEM;
  183. hw->ep_shm_info = (struct ep_share_mem_info *)buf;
  184. mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
  185. hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
  186. if (!(hw->hw_info.req_buf))
  187. return -ENOMEM;
  188. hw->hw_info.req_buf_size = mem_size;
  189. mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
  190. hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
  191. if (!(hw->hw_info.res_buf))
  192. return -ENOMEM;
  193. hw->hw_info.res_buf_size = mem_size;
  194. result = fjes_hw_alloc_shared_status_region(hw);
  195. if (result)
  196. return result;
  197. hw->hw_info.buffer_share_bit = 0;
  198. hw->hw_info.buffer_unshare_reserve_bit = 0;
  199. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  200. if (epidx != hw->my_epid) {
  201. buf_pair = &hw->ep_shm_info[epidx];
  202. result = fjes_hw_alloc_epbuf(&buf_pair->tx);
  203. if (result)
  204. return result;
  205. result = fjes_hw_alloc_epbuf(&buf_pair->rx);
  206. if (result)
  207. return result;
  208. spin_lock_irqsave(&hw->rx_status_lock, flags);
  209. fjes_hw_setup_epbuf(&buf_pair->tx, mac,
  210. fjes_support_mtu[0]);
  211. fjes_hw_setup_epbuf(&buf_pair->rx, mac,
  212. fjes_support_mtu[0]);
  213. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  214. }
  215. }
  216. memset(&param, 0, sizeof(param));
  217. param.req_len = hw->hw_info.req_buf_size;
  218. param.req_start = __pa(hw->hw_info.req_buf);
  219. param.res_len = hw->hw_info.res_buf_size;
  220. param.res_start = __pa(hw->hw_info.res_buf);
  221. param.share_start = __pa(hw->hw_info.share->ep_status);
  222. fjes_hw_init_command_registers(hw, &param);
  223. return 0;
  224. }
  225. static void fjes_hw_cleanup(struct fjes_hw *hw)
  226. {
  227. int epidx;
  228. if (!hw->ep_shm_info)
  229. return;
  230. fjes_hw_free_shared_status_region(hw);
  231. kfree(hw->hw_info.req_buf);
  232. hw->hw_info.req_buf = NULL;
  233. kfree(hw->hw_info.res_buf);
  234. hw->hw_info.res_buf = NULL;
  235. for (epidx = 0; epidx < hw->max_epid ; epidx++) {
  236. if (epidx == hw->my_epid)
  237. continue;
  238. fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
  239. fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
  240. }
  241. kfree(hw->ep_shm_info);
  242. hw->ep_shm_info = NULL;
  243. }
  244. int fjes_hw_init(struct fjes_hw *hw)
  245. {
  246. int ret;
  247. hw->base = fjes_hw_iomap(hw);
  248. if (!hw->base)
  249. return -EIO;
  250. ret = fjes_hw_reset(hw);
  251. if (ret)
  252. return ret;
  253. fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
  254. INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
  255. INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
  256. mutex_init(&hw->hw_info.lock);
  257. spin_lock_init(&hw->rx_status_lock);
  258. hw->max_epid = fjes_hw_get_max_epid(hw);
  259. hw->my_epid = fjes_hw_get_my_epid(hw);
  260. if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
  261. return -ENXIO;
  262. ret = fjes_hw_setup(hw);
  263. hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE);
  264. hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
  265. return ret;
  266. }
  267. void fjes_hw_exit(struct fjes_hw *hw)
  268. {
  269. int ret;
  270. if (hw->base) {
  271. if (hw->debug_mode) {
  272. /* disable debug mode */
  273. mutex_lock(&hw->hw_info.lock);
  274. fjes_hw_stop_debug(hw);
  275. mutex_unlock(&hw->hw_info.lock);
  276. }
  277. vfree(hw->hw_info.trace);
  278. hw->hw_info.trace = NULL;
  279. hw->hw_info.trace_size = 0;
  280. hw->debug_mode = 0;
  281. ret = fjes_hw_reset(hw);
  282. if (ret)
  283. pr_err("%s: reset error", __func__);
  284. fjes_hw_iounmap(hw);
  285. hw->base = NULL;
  286. }
  287. fjes_hw_cleanup(hw);
  288. cancel_work_sync(&hw->update_zone_task);
  289. cancel_work_sync(&hw->epstop_task);
  290. }
  291. static enum fjes_dev_command_response_e
  292. fjes_hw_issue_request_command(struct fjes_hw *hw,
  293. enum fjes_dev_command_request_type type)
  294. {
  295. enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
  296. union REG_CR cr;
  297. union REG_CS cs;
  298. int timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
  299. cr.reg = 0;
  300. cr.bits.req_start = 1;
  301. cr.bits.req_code = type;
  302. wr32(XSCT_CR, cr.reg);
  303. cr.reg = rd32(XSCT_CR);
  304. if (cr.bits.error == 0) {
  305. timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
  306. cs.reg = rd32(XSCT_CS);
  307. while ((cs.bits.complete != 1) && timeout > 0) {
  308. msleep(1000);
  309. cs.reg = rd32(XSCT_CS);
  310. timeout -= 1000;
  311. }
  312. if (cs.bits.complete == 1)
  313. ret = FJES_CMD_STATUS_NORMAL;
  314. else if (timeout <= 0)
  315. ret = FJES_CMD_STATUS_TIMEOUT;
  316. } else {
  317. switch (cr.bits.err_info) {
  318. case FJES_CMD_REQ_ERR_INFO_PARAM:
  319. ret = FJES_CMD_STATUS_ERROR_PARAM;
  320. break;
  321. case FJES_CMD_REQ_ERR_INFO_STATUS:
  322. ret = FJES_CMD_STATUS_ERROR_STATUS;
  323. break;
  324. default:
  325. ret = FJES_CMD_STATUS_UNKNOWN;
  326. break;
  327. }
  328. }
  329. trace_fjes_hw_issue_request_command(&cr, &cs, timeout, ret);
  330. return ret;
  331. }
  332. int fjes_hw_request_info(struct fjes_hw *hw)
  333. {
  334. union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
  335. union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
  336. enum fjes_dev_command_response_e ret;
  337. int result;
  338. memset(req_buf, 0, hw->hw_info.req_buf_size);
  339. memset(res_buf, 0, hw->hw_info.res_buf_size);
  340. req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
  341. res_buf->info.length = 0;
  342. res_buf->info.code = 0;
  343. ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
  344. trace_fjes_hw_request_info(hw, res_buf);
  345. result = 0;
  346. if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
  347. res_buf->info.length) {
  348. trace_fjes_hw_request_info_err("Invalid res_buf");
  349. result = -ENOMSG;
  350. } else if (ret == FJES_CMD_STATUS_NORMAL) {
  351. switch (res_buf->info.code) {
  352. case FJES_CMD_REQ_RES_CODE_NORMAL:
  353. result = 0;
  354. break;
  355. default:
  356. result = -EPERM;
  357. break;
  358. }
  359. } else {
  360. switch (ret) {
  361. case FJES_CMD_STATUS_UNKNOWN:
  362. result = -EPERM;
  363. break;
  364. case FJES_CMD_STATUS_TIMEOUT:
  365. trace_fjes_hw_request_info_err("Timeout");
  366. result = -EBUSY;
  367. break;
  368. case FJES_CMD_STATUS_ERROR_PARAM:
  369. result = -EPERM;
  370. break;
  371. case FJES_CMD_STATUS_ERROR_STATUS:
  372. result = -EPERM;
  373. break;
  374. default:
  375. result = -EPERM;
  376. break;
  377. }
  378. }
  379. return result;
  380. }
  381. int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
  382. struct ep_share_mem_info *buf_pair)
  383. {
  384. union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
  385. union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
  386. enum fjes_dev_command_response_e ret;
  387. int page_count;
  388. int timeout;
  389. int i, idx;
  390. void *addr;
  391. int result;
  392. if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
  393. return 0;
  394. memset(req_buf, 0, hw->hw_info.req_buf_size);
  395. memset(res_buf, 0, hw->hw_info.res_buf_size);
  396. req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
  397. buf_pair->tx.size,
  398. buf_pair->rx.size);
  399. req_buf->share_buffer.epid = dest_epid;
  400. idx = 0;
  401. req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
  402. page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
  403. for (i = 0; i < page_count; i++) {
  404. addr = ((u8 *)(buf_pair->tx.buffer)) +
  405. (i * EP_BUFFER_INFO_SIZE);
  406. req_buf->share_buffer.buffer[idx++] =
  407. (__le64)(page_to_phys(vmalloc_to_page(addr)) +
  408. offset_in_page(addr));
  409. }
  410. req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
  411. page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
  412. for (i = 0; i < page_count; i++) {
  413. addr = ((u8 *)(buf_pair->rx.buffer)) +
  414. (i * EP_BUFFER_INFO_SIZE);
  415. req_buf->share_buffer.buffer[idx++] =
  416. (__le64)(page_to_phys(vmalloc_to_page(addr)) +
  417. offset_in_page(addr));
  418. }
  419. res_buf->share_buffer.length = 0;
  420. res_buf->share_buffer.code = 0;
  421. trace_fjes_hw_register_buff_addr_req(req_buf, buf_pair);
  422. ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
  423. timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
  424. while ((ret == FJES_CMD_STATUS_NORMAL) &&
  425. (res_buf->share_buffer.length ==
  426. FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
  427. (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
  428. (timeout > 0)) {
  429. msleep(200 + hw->my_epid * 20);
  430. timeout -= (200 + hw->my_epid * 20);
  431. res_buf->share_buffer.length = 0;
  432. res_buf->share_buffer.code = 0;
  433. ret = fjes_hw_issue_request_command(
  434. hw, FJES_CMD_REQ_SHARE_BUFFER);
  435. }
  436. result = 0;
  437. trace_fjes_hw_register_buff_addr(res_buf, timeout);
  438. if (res_buf->share_buffer.length !=
  439. FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) {
  440. trace_fjes_hw_register_buff_addr_err("Invalid res_buf");
  441. result = -ENOMSG;
  442. } else if (ret == FJES_CMD_STATUS_NORMAL) {
  443. switch (res_buf->share_buffer.code) {
  444. case FJES_CMD_REQ_RES_CODE_NORMAL:
  445. result = 0;
  446. set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
  447. break;
  448. case FJES_CMD_REQ_RES_CODE_BUSY:
  449. trace_fjes_hw_register_buff_addr_err("Busy Timeout");
  450. result = -EBUSY;
  451. break;
  452. default:
  453. result = -EPERM;
  454. break;
  455. }
  456. } else {
  457. switch (ret) {
  458. case FJES_CMD_STATUS_UNKNOWN:
  459. result = -EPERM;
  460. break;
  461. case FJES_CMD_STATUS_TIMEOUT:
  462. trace_fjes_hw_register_buff_addr_err("Timeout");
  463. result = -EBUSY;
  464. break;
  465. case FJES_CMD_STATUS_ERROR_PARAM:
  466. case FJES_CMD_STATUS_ERROR_STATUS:
  467. default:
  468. result = -EPERM;
  469. break;
  470. }
  471. }
  472. return result;
  473. }
  474. int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
  475. {
  476. union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
  477. union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
  478. struct fjes_device_shared_info *share = hw->hw_info.share;
  479. enum fjes_dev_command_response_e ret;
  480. int timeout;
  481. int result;
  482. if (!hw->base)
  483. return -EPERM;
  484. if (!req_buf || !res_buf || !share)
  485. return -EPERM;
  486. if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
  487. return 0;
  488. memset(req_buf, 0, hw->hw_info.req_buf_size);
  489. memset(res_buf, 0, hw->hw_info.res_buf_size);
  490. req_buf->unshare_buffer.length =
  491. FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
  492. req_buf->unshare_buffer.epid = dest_epid;
  493. res_buf->unshare_buffer.length = 0;
  494. res_buf->unshare_buffer.code = 0;
  495. trace_fjes_hw_unregister_buff_addr_req(req_buf);
  496. ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
  497. timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
  498. while ((ret == FJES_CMD_STATUS_NORMAL) &&
  499. (res_buf->unshare_buffer.length ==
  500. FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
  501. (res_buf->unshare_buffer.code ==
  502. FJES_CMD_REQ_RES_CODE_BUSY) &&
  503. (timeout > 0)) {
  504. msleep(200 + hw->my_epid * 20);
  505. timeout -= (200 + hw->my_epid * 20);
  506. res_buf->unshare_buffer.length = 0;
  507. res_buf->unshare_buffer.code = 0;
  508. ret =
  509. fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
  510. }
  511. result = 0;
  512. trace_fjes_hw_unregister_buff_addr(res_buf, timeout);
  513. if (res_buf->unshare_buffer.length !=
  514. FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
  515. trace_fjes_hw_unregister_buff_addr_err("Invalid res_buf");
  516. result = -ENOMSG;
  517. } else if (ret == FJES_CMD_STATUS_NORMAL) {
  518. switch (res_buf->unshare_buffer.code) {
  519. case FJES_CMD_REQ_RES_CODE_NORMAL:
  520. result = 0;
  521. clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
  522. break;
  523. case FJES_CMD_REQ_RES_CODE_BUSY:
  524. trace_fjes_hw_unregister_buff_addr_err("Busy Timeout");
  525. result = -EBUSY;
  526. break;
  527. default:
  528. result = -EPERM;
  529. break;
  530. }
  531. } else {
  532. switch (ret) {
  533. case FJES_CMD_STATUS_UNKNOWN:
  534. result = -EPERM;
  535. break;
  536. case FJES_CMD_STATUS_TIMEOUT:
  537. trace_fjes_hw_unregister_buff_addr_err("Timeout");
  538. result = -EBUSY;
  539. break;
  540. case FJES_CMD_STATUS_ERROR_PARAM:
  541. case FJES_CMD_STATUS_ERROR_STATUS:
  542. default:
  543. result = -EPERM;
  544. break;
  545. }
  546. }
  547. return result;
  548. }
  549. int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
  550. enum REG_ICTL_MASK mask)
  551. {
  552. u32 ig = mask | dest_epid;
  553. wr32(XSCT_IG, cpu_to_le32(ig));
  554. return 0;
  555. }
  556. u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
  557. {
  558. u32 cur_is;
  559. cur_is = rd32(XSCT_IS);
  560. return cur_is;
  561. }
  562. void fjes_hw_set_irqmask(struct fjes_hw *hw,
  563. enum REG_ICTL_MASK intr_mask, bool mask)
  564. {
  565. if (mask)
  566. wr32(XSCT_IMS, intr_mask);
  567. else
  568. wr32(XSCT_IMC, intr_mask);
  569. }
  570. bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
  571. {
  572. if (epid >= hw->max_epid)
  573. return false;
  574. if ((hw->ep_shm_info[epid].es_status !=
  575. FJES_ZONING_STATUS_ENABLE) ||
  576. (hw->ep_shm_info[hw->my_epid].zone ==
  577. FJES_ZONING_ZONE_TYPE_NONE))
  578. return false;
  579. else
  580. return (hw->ep_shm_info[epid].zone ==
  581. hw->ep_shm_info[hw->my_epid].zone);
  582. }
  583. int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
  584. int dest_epid)
  585. {
  586. int value = false;
  587. if (dest_epid < share->epnum)
  588. value = share->ep_status[dest_epid];
  589. return value;
  590. }
  591. static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
  592. {
  593. return test_bit(src_epid, &hw->txrx_stop_req_bit);
  594. }
  595. static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
  596. {
  597. return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
  598. FJES_RX_STOP_REQ_DONE);
  599. }
  600. enum ep_partner_status
  601. fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
  602. {
  603. enum ep_partner_status status;
  604. if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
  605. if (fjes_hw_epid_is_stop_requested(hw, epid)) {
  606. status = EP_PARTNER_WAITING;
  607. } else {
  608. if (fjes_hw_epid_is_stop_process_done(hw, epid))
  609. status = EP_PARTNER_COMPLETE;
  610. else
  611. status = EP_PARTNER_SHARED;
  612. }
  613. } else {
  614. status = EP_PARTNER_UNSHARE;
  615. }
  616. return status;
  617. }
  618. void fjes_hw_raise_epstop(struct fjes_hw *hw)
  619. {
  620. enum ep_partner_status status;
  621. unsigned long flags;
  622. int epidx;
  623. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  624. if (epidx == hw->my_epid)
  625. continue;
  626. status = fjes_hw_get_partner_ep_status(hw, epidx);
  627. switch (status) {
  628. case EP_PARTNER_SHARED:
  629. fjes_hw_raise_interrupt(hw, epidx,
  630. REG_ICTL_MASK_TXRX_STOP_REQ);
  631. hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
  632. break;
  633. default:
  634. break;
  635. }
  636. set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
  637. set_bit(epidx, &hw->txrx_stop_req_bit);
  638. spin_lock_irqsave(&hw->rx_status_lock, flags);
  639. hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
  640. FJES_RX_STOP_REQ_REQUEST;
  641. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  642. }
  643. }
  644. int fjes_hw_wait_epstop(struct fjes_hw *hw)
  645. {
  646. enum ep_partner_status status;
  647. union ep_buffer_info *info;
  648. int wait_time = 0;
  649. int epidx;
  650. while (hw->hw_info.buffer_unshare_reserve_bit &&
  651. (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
  652. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  653. if (epidx == hw->my_epid)
  654. continue;
  655. status = fjes_hw_epid_is_shared(hw->hw_info.share,
  656. epidx);
  657. info = hw->ep_shm_info[epidx].rx.info;
  658. if ((!status ||
  659. (info->v1i.rx_status &
  660. FJES_RX_STOP_REQ_DONE)) &&
  661. test_bit(epidx,
  662. &hw->hw_info.buffer_unshare_reserve_bit)) {
  663. clear_bit(epidx,
  664. &hw->hw_info.buffer_unshare_reserve_bit);
  665. }
  666. }
  667. msleep(100);
  668. wait_time += 100;
  669. }
  670. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  671. if (epidx == hw->my_epid)
  672. continue;
  673. if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
  674. clear_bit(epidx,
  675. &hw->hw_info.buffer_unshare_reserve_bit);
  676. }
  677. return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
  678. ? 0 : -EBUSY;
  679. }
  680. bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
  681. {
  682. union ep_buffer_info *info = epbh->info;
  683. return (info->common.version == version);
  684. }
  685. bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
  686. {
  687. union ep_buffer_info *info = epbh->info;
  688. return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
  689. info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
  690. }
  691. bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
  692. {
  693. union ep_buffer_info *info = epbh->info;
  694. bool ret = false;
  695. int i;
  696. if (vlan_id == 0) {
  697. ret = true;
  698. } else {
  699. for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
  700. if (vlan_id == info->v1i.vlan_id[i]) {
  701. ret = true;
  702. break;
  703. }
  704. }
  705. }
  706. return ret;
  707. }
  708. bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
  709. {
  710. union ep_buffer_info *info = epbh->info;
  711. int i;
  712. for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
  713. if (info->v1i.vlan_id[i] == 0) {
  714. info->v1i.vlan_id[i] = vlan_id;
  715. return true;
  716. }
  717. }
  718. return false;
  719. }
  720. void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
  721. {
  722. union ep_buffer_info *info = epbh->info;
  723. int i;
  724. if (0 != vlan_id) {
  725. for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
  726. if (vlan_id == info->v1i.vlan_id[i])
  727. info->v1i.vlan_id[i] = 0;
  728. }
  729. }
  730. }
  731. bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
  732. {
  733. union ep_buffer_info *info = epbh->info;
  734. if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
  735. return true;
  736. if (info->v1i.count_max == 0)
  737. return true;
  738. return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
  739. info->v1i.count_max);
  740. }
  741. void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
  742. size_t *psize)
  743. {
  744. union ep_buffer_info *info = epbh->info;
  745. struct esmem_frame *ring_frame;
  746. void *frame;
  747. ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
  748. (info->v1i.head,
  749. info->v1i.count_max) *
  750. info->v1i.frame_max]);
  751. *psize = (size_t)ring_frame->frame_size;
  752. frame = ring_frame->frame_data;
  753. return frame;
  754. }
  755. void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
  756. {
  757. union ep_buffer_info *info = epbh->info;
  758. if (fjes_hw_epbuf_rx_is_empty(epbh))
  759. return;
  760. EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
  761. }
  762. int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
  763. void *frame, size_t size)
  764. {
  765. union ep_buffer_info *info = epbh->info;
  766. struct esmem_frame *ring_frame;
  767. if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
  768. return -ENOBUFS;
  769. ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
  770. (info->v1i.tail - 1,
  771. info->v1i.count_max) *
  772. info->v1i.frame_max]);
  773. ring_frame->frame_size = size;
  774. memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
  775. EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
  776. return 0;
  777. }
  778. static void fjes_hw_update_zone_task(struct work_struct *work)
  779. {
  780. struct fjes_hw *hw = container_of(work,
  781. struct fjes_hw, update_zone_task);
  782. struct my_s {u8 es_status; u8 zone; } *info;
  783. union fjes_device_command_res *res_buf;
  784. enum ep_partner_status pstatus;
  785. struct fjes_adapter *adapter;
  786. struct net_device *netdev;
  787. unsigned long flags;
  788. ulong unshare_bit = 0;
  789. ulong share_bit = 0;
  790. ulong irq_bit = 0;
  791. int epidx;
  792. int ret;
  793. adapter = (struct fjes_adapter *)hw->back;
  794. netdev = adapter->netdev;
  795. res_buf = hw->hw_info.res_buf;
  796. info = (struct my_s *)&res_buf->info.info;
  797. mutex_lock(&hw->hw_info.lock);
  798. ret = fjes_hw_request_info(hw);
  799. switch (ret) {
  800. case -ENOMSG:
  801. case -EBUSY:
  802. default:
  803. if (!work_pending(&adapter->force_close_task)) {
  804. adapter->force_reset = true;
  805. schedule_work(&adapter->force_close_task);
  806. }
  807. break;
  808. case 0:
  809. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  810. if (epidx == hw->my_epid) {
  811. hw->ep_shm_info[epidx].es_status =
  812. info[epidx].es_status;
  813. hw->ep_shm_info[epidx].zone =
  814. info[epidx].zone;
  815. continue;
  816. }
  817. pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
  818. switch (pstatus) {
  819. case EP_PARTNER_UNSHARE:
  820. default:
  821. if ((info[epidx].zone !=
  822. FJES_ZONING_ZONE_TYPE_NONE) &&
  823. (info[epidx].es_status ==
  824. FJES_ZONING_STATUS_ENABLE) &&
  825. (info[epidx].zone ==
  826. info[hw->my_epid].zone))
  827. set_bit(epidx, &share_bit);
  828. else
  829. set_bit(epidx, &unshare_bit);
  830. break;
  831. case EP_PARTNER_COMPLETE:
  832. case EP_PARTNER_WAITING:
  833. if ((info[epidx].zone ==
  834. FJES_ZONING_ZONE_TYPE_NONE) ||
  835. (info[epidx].es_status !=
  836. FJES_ZONING_STATUS_ENABLE) ||
  837. (info[epidx].zone !=
  838. info[hw->my_epid].zone)) {
  839. set_bit(epidx,
  840. &adapter->unshare_watch_bitmask);
  841. set_bit(epidx,
  842. &hw->hw_info.buffer_unshare_reserve_bit);
  843. }
  844. break;
  845. case EP_PARTNER_SHARED:
  846. if ((info[epidx].zone ==
  847. FJES_ZONING_ZONE_TYPE_NONE) ||
  848. (info[epidx].es_status !=
  849. FJES_ZONING_STATUS_ENABLE) ||
  850. (info[epidx].zone !=
  851. info[hw->my_epid].zone))
  852. set_bit(epidx, &irq_bit);
  853. break;
  854. }
  855. hw->ep_shm_info[epidx].es_status =
  856. info[epidx].es_status;
  857. hw->ep_shm_info[epidx].zone = info[epidx].zone;
  858. }
  859. break;
  860. }
  861. mutex_unlock(&hw->hw_info.lock);
  862. for (epidx = 0; epidx < hw->max_epid; epidx++) {
  863. if (epidx == hw->my_epid)
  864. continue;
  865. if (test_bit(epidx, &share_bit)) {
  866. spin_lock_irqsave(&hw->rx_status_lock, flags);
  867. fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
  868. netdev->dev_addr, netdev->mtu);
  869. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  870. mutex_lock(&hw->hw_info.lock);
  871. ret = fjes_hw_register_buff_addr(
  872. hw, epidx, &hw->ep_shm_info[epidx]);
  873. switch (ret) {
  874. case 0:
  875. break;
  876. case -ENOMSG:
  877. case -EBUSY:
  878. default:
  879. if (!work_pending(&adapter->force_close_task)) {
  880. adapter->force_reset = true;
  881. schedule_work(
  882. &adapter->force_close_task);
  883. }
  884. break;
  885. }
  886. mutex_unlock(&hw->hw_info.lock);
  887. hw->ep_shm_info[epidx].ep_stats
  888. .com_regist_buf_exec += 1;
  889. }
  890. if (test_bit(epidx, &unshare_bit)) {
  891. mutex_lock(&hw->hw_info.lock);
  892. ret = fjes_hw_unregister_buff_addr(hw, epidx);
  893. switch (ret) {
  894. case 0:
  895. break;
  896. case -ENOMSG:
  897. case -EBUSY:
  898. default:
  899. if (!work_pending(&adapter->force_close_task)) {
  900. adapter->force_reset = true;
  901. schedule_work(
  902. &adapter->force_close_task);
  903. }
  904. break;
  905. }
  906. mutex_unlock(&hw->hw_info.lock);
  907. hw->ep_shm_info[epidx].ep_stats
  908. .com_unregist_buf_exec += 1;
  909. if (ret == 0) {
  910. spin_lock_irqsave(&hw->rx_status_lock, flags);
  911. fjes_hw_setup_epbuf(
  912. &hw->ep_shm_info[epidx].tx,
  913. netdev->dev_addr, netdev->mtu);
  914. spin_unlock_irqrestore(&hw->rx_status_lock,
  915. flags);
  916. }
  917. }
  918. if (test_bit(epidx, &irq_bit)) {
  919. fjes_hw_raise_interrupt(hw, epidx,
  920. REG_ICTL_MASK_TXRX_STOP_REQ);
  921. hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
  922. set_bit(epidx, &hw->txrx_stop_req_bit);
  923. spin_lock_irqsave(&hw->rx_status_lock, flags);
  924. hw->ep_shm_info[epidx].tx.
  925. info->v1i.rx_status |=
  926. FJES_RX_STOP_REQ_REQUEST;
  927. spin_unlock_irqrestore(&hw->rx_status_lock, flags);
  928. set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
  929. }
  930. }
  931. if (irq_bit || adapter->unshare_watch_bitmask) {
  932. if (!work_pending(&adapter->unshare_watch_task))
  933. queue_work(adapter->control_wq,
  934. &adapter->unshare_watch_task);
  935. }
  936. }
  937. static void fjes_hw_epstop_task(struct work_struct *work)
  938. {
  939. struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
  940. struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
  941. unsigned long flags;
  942. ulong remain_bit;
  943. int epid_bit;
  944. while ((remain_bit = hw->epstop_req_bit)) {
  945. for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
  946. if (remain_bit & 1) {
  947. spin_lock_irqsave(&hw->rx_status_lock, flags);
  948. hw->ep_shm_info[epid_bit].
  949. tx.info->v1i.rx_status |=
  950. FJES_RX_STOP_REQ_DONE;
  951. spin_unlock_irqrestore(&hw->rx_status_lock,
  952. flags);
  953. clear_bit(epid_bit, &hw->epstop_req_bit);
  954. set_bit(epid_bit,
  955. &adapter->unshare_watch_bitmask);
  956. if (!work_pending(&adapter->unshare_watch_task))
  957. queue_work(
  958. adapter->control_wq,
  959. &adapter->unshare_watch_task);
  960. }
  961. }
  962. }
  963. }
  964. int fjes_hw_start_debug(struct fjes_hw *hw)
  965. {
  966. union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
  967. union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
  968. enum fjes_dev_command_response_e ret;
  969. int page_count;
  970. int result = 0;
  971. void *addr;
  972. int i;
  973. if (!hw->hw_info.trace)
  974. return -EPERM;
  975. memset(hw->hw_info.trace, 0, FJES_DEBUG_BUFFER_SIZE);
  976. memset(req_buf, 0, hw->hw_info.req_buf_size);
  977. memset(res_buf, 0, hw->hw_info.res_buf_size);
  978. req_buf->start_trace.length =
  979. FJES_DEV_COMMAND_START_DBG_REQ_LEN(hw->hw_info.trace_size);
  980. req_buf->start_trace.mode = hw->debug_mode;
  981. req_buf->start_trace.buffer_len = hw->hw_info.trace_size;
  982. page_count = hw->hw_info.trace_size / FJES_DEBUG_PAGE_SIZE;
  983. for (i = 0; i < page_count; i++) {
  984. addr = ((u8 *)hw->hw_info.trace) + i * FJES_DEBUG_PAGE_SIZE;
  985. req_buf->start_trace.buffer[i] =
  986. (__le64)(page_to_phys(vmalloc_to_page(addr)) +
  987. offset_in_page(addr));
  988. }
  989. res_buf->start_trace.length = 0;
  990. res_buf->start_trace.code = 0;
  991. trace_fjes_hw_start_debug_req(req_buf);
  992. ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_START_DEBUG);
  993. trace_fjes_hw_start_debug(res_buf);
  994. if (res_buf->start_trace.length !=
  995. FJES_DEV_COMMAND_START_DBG_RES_LEN) {
  996. result = -ENOMSG;
  997. trace_fjes_hw_start_debug_err("Invalid res_buf");
  998. } else if (ret == FJES_CMD_STATUS_NORMAL) {
  999. switch (res_buf->start_trace.code) {
  1000. case FJES_CMD_REQ_RES_CODE_NORMAL:
  1001. result = 0;
  1002. break;
  1003. default:
  1004. result = -EPERM;
  1005. break;
  1006. }
  1007. } else {
  1008. switch (ret) {
  1009. case FJES_CMD_STATUS_UNKNOWN:
  1010. result = -EPERM;
  1011. break;
  1012. case FJES_CMD_STATUS_TIMEOUT:
  1013. trace_fjes_hw_start_debug_err("Busy Timeout");
  1014. result = -EBUSY;
  1015. break;
  1016. case FJES_CMD_STATUS_ERROR_PARAM:
  1017. case FJES_CMD_STATUS_ERROR_STATUS:
  1018. default:
  1019. result = -EPERM;
  1020. break;
  1021. }
  1022. }
  1023. return result;
  1024. }
  1025. int fjes_hw_stop_debug(struct fjes_hw *hw)
  1026. {
  1027. union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
  1028. union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
  1029. enum fjes_dev_command_response_e ret;
  1030. int result = 0;
  1031. if (!hw->hw_info.trace)
  1032. return -EPERM;
  1033. memset(req_buf, 0, hw->hw_info.req_buf_size);
  1034. memset(res_buf, 0, hw->hw_info.res_buf_size);
  1035. req_buf->stop_trace.length = FJES_DEV_COMMAND_STOP_DBG_REQ_LEN;
  1036. res_buf->stop_trace.length = 0;
  1037. res_buf->stop_trace.code = 0;
  1038. ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_STOP_DEBUG);
  1039. trace_fjes_hw_stop_debug(res_buf);
  1040. if (res_buf->stop_trace.length != FJES_DEV_COMMAND_STOP_DBG_RES_LEN) {
  1041. trace_fjes_hw_stop_debug_err("Invalid res_buf");
  1042. result = -ENOMSG;
  1043. } else if (ret == FJES_CMD_STATUS_NORMAL) {
  1044. switch (res_buf->stop_trace.code) {
  1045. case FJES_CMD_REQ_RES_CODE_NORMAL:
  1046. result = 0;
  1047. hw->debug_mode = 0;
  1048. break;
  1049. default:
  1050. result = -EPERM;
  1051. break;
  1052. }
  1053. } else {
  1054. switch (ret) {
  1055. case FJES_CMD_STATUS_UNKNOWN:
  1056. result = -EPERM;
  1057. break;
  1058. case FJES_CMD_STATUS_TIMEOUT:
  1059. result = -EBUSY;
  1060. trace_fjes_hw_stop_debug_err("Busy Timeout");
  1061. break;
  1062. case FJES_CMD_STATUS_ERROR_PARAM:
  1063. case FJES_CMD_STATUS_ERROR_STATUS:
  1064. default:
  1065. result = -EPERM;
  1066. break;
  1067. }
  1068. }
  1069. return result;
  1070. }