bdc_ep.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
  4. *
  5. * Copyright (C) 2014 Broadcom Corporation
  6. *
  7. * Author: Ashwini Pahuja
  8. *
  9. * Based on drivers under drivers/usb/
  10. */
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/kernel.h>
  15. #include <linux/delay.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/ioport.h>
  18. #include <linux/sched.h>
  19. #include <linux/slab.h>
  20. #include <linux/errno.h>
  21. #include <linux/init.h>
  22. #include <linux/timer.h>
  23. #include <linux/list.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/device.h>
  27. #include <linux/usb/ch9.h>
  28. #include <linux/usb/gadget.h>
  29. #include <linux/usb/otg.h>
  30. #include <linux/pm.h>
  31. #include <linux/io.h>
  32. #include <linux/irq.h>
  33. #include <asm/unaligned.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/usb/composite.h>
  36. #include "bdc.h"
  37. #include "bdc_ep.h"
  38. #include "bdc_cmd.h"
  39. #include "bdc_dbg.h"
  40. static const char * const ep0_state_string[] = {
  41. "WAIT_FOR_SETUP",
  42. "WAIT_FOR_DATA_START",
  43. "WAIT_FOR_DATA_XMIT",
  44. "WAIT_FOR_STATUS_START",
  45. "WAIT_FOR_STATUS_XMIT",
  46. "STATUS_PENDING"
  47. };
  48. /* Free the bdl during ep disable */
  49. static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
  50. {
  51. struct bd_list *bd_list = &ep->bd_list;
  52. struct bdc *bdc = ep->bdc;
  53. struct bd_table *bd_table;
  54. int index;
  55. dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n",
  56. __func__, ep->name, num_tabs);
  57. if (!bd_list->bd_table_array) {
  58. dev_dbg(bdc->dev, "%s already freed\n", ep->name);
  59. return;
  60. }
  61. for (index = 0; index < num_tabs; index++) {
  62. /*
  63. * check if the bd_table struct is allocated ?
  64. * if yes, then check if bd memory has been allocated, then
  65. * free the dma_pool and also the bd_table struct memory
  66. */
  67. bd_table = bd_list->bd_table_array[index];
  68. dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
  69. if (!bd_table) {
  70. dev_dbg(bdc->dev, "bd_table not allocated\n");
  71. continue;
  72. }
  73. if (!bd_table->start_bd) {
  74. dev_dbg(bdc->dev, "bd dma pool not allocated\n");
  75. continue;
  76. }
  77. dev_dbg(bdc->dev,
  78. "Free dma pool start_bd:%p dma:%llx\n",
  79. bd_table->start_bd,
  80. (unsigned long long)bd_table->dma);
  81. dma_pool_free(bdc->bd_table_pool,
  82. bd_table->start_bd,
  83. bd_table->dma);
  84. /* Free the bd_table structure */
  85. kfree(bd_table);
  86. }
  87. /* Free the bd table array */
  88. kfree(ep->bd_list.bd_table_array);
  89. }
  90. /*
  91. * chain the tables, by insteting a chain bd at the end of prev_table, pointing
  92. * to next_table
  93. */
  94. static inline void chain_table(struct bd_table *prev_table,
  95. struct bd_table *next_table,
  96. u32 bd_p_tab)
  97. {
  98. /* Chain the prev table to next table */
  99. prev_table->start_bd[bd_p_tab-1].offset[0] =
  100. cpu_to_le32(lower_32_bits(next_table->dma));
  101. prev_table->start_bd[bd_p_tab-1].offset[1] =
  102. cpu_to_le32(upper_32_bits(next_table->dma));
  103. prev_table->start_bd[bd_p_tab-1].offset[2] =
  104. 0x0;
  105. prev_table->start_bd[bd_p_tab-1].offset[3] =
  106. cpu_to_le32(MARK_CHAIN_BD);
  107. }
  108. /* Allocate the bdl for ep, during config ep */
  109. static int ep_bd_list_alloc(struct bdc_ep *ep)
  110. {
  111. struct bd_table *prev_table = NULL;
  112. int index, num_tabs, bd_p_tab;
  113. struct bdc *bdc = ep->bdc;
  114. struct bd_table *bd_table;
  115. dma_addr_t dma;
  116. if (usb_endpoint_xfer_isoc(ep->desc))
  117. num_tabs = NUM_TABLES_ISOCH;
  118. else
  119. num_tabs = NUM_TABLES;
  120. bd_p_tab = NUM_BDS_PER_TABLE;
  121. /* if there is only 1 table in bd list then loop chain to self */
  122. dev_dbg(bdc->dev,
  123. "%s ep:%p num_tabs:%d\n",
  124. __func__, ep, num_tabs);
  125. /* Allocate memory for table array */
  126. ep->bd_list.bd_table_array = kcalloc(num_tabs,
  127. sizeof(struct bd_table *),
  128. GFP_ATOMIC);
  129. if (!ep->bd_list.bd_table_array)
  130. return -ENOMEM;
  131. /* Allocate memory for each table */
  132. for (index = 0; index < num_tabs; index++) {
  133. /* Allocate memory for bd_table structure */
  134. bd_table = kzalloc(sizeof(struct bd_table), GFP_ATOMIC);
  135. if (!bd_table)
  136. goto fail;
  137. bd_table->start_bd = dma_pool_zalloc(bdc->bd_table_pool,
  138. GFP_ATOMIC,
  139. &dma);
  140. if (!bd_table->start_bd) {
  141. kfree(bd_table);
  142. goto fail;
  143. }
  144. bd_table->dma = dma;
  145. dev_dbg(bdc->dev,
  146. "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
  147. index, bd_table->start_bd,
  148. (unsigned long long)bd_table->dma, prev_table);
  149. ep->bd_list.bd_table_array[index] = bd_table;
  150. if (prev_table)
  151. chain_table(prev_table, bd_table, bd_p_tab);
  152. prev_table = bd_table;
  153. }
  154. chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab);
  155. /* Memory allocation is successful, now init the internal fields */
  156. ep->bd_list.num_tabs = num_tabs;
  157. ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1;
  158. ep->bd_list.num_tabs = num_tabs;
  159. ep->bd_list.num_bds_table = bd_p_tab;
  160. ep->bd_list.eqp_bdi = 0;
  161. ep->bd_list.hwd_bdi = 0;
  162. return 0;
  163. fail:
  164. /* Free the bd_table_array, bd_table struct, bd's */
  165. ep_bd_list_free(ep, num_tabs);
  166. return -ENOMEM;
  167. }
  168. /* returns how many bd's are need for this transfer */
  169. static inline int bd_needed_req(struct bdc_req *req)
  170. {
  171. int bd_needed = 0;
  172. int remaining;
  173. /* 1 bd needed for 0 byte transfer */
  174. if (req->usb_req.length == 0)
  175. return 1;
  176. /* remaining bytes after tranfering all max BD size BD's */
  177. remaining = req->usb_req.length % BD_MAX_BUFF_SIZE;
  178. if (remaining)
  179. bd_needed++;
  180. /* How many maximum BUFF size BD's ? */
  181. remaining = req->usb_req.length / BD_MAX_BUFF_SIZE;
  182. bd_needed += remaining;
  183. return bd_needed;
  184. }
  185. /* returns the bd index(bdi) corresponding to bd dma address */
  186. static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr)
  187. {
  188. struct bd_list *bd_list = &ep->bd_list;
  189. dma_addr_t dma_first_bd, dma_last_bd;
  190. struct bdc *bdc = ep->bdc;
  191. struct bd_table *bd_table;
  192. bool found = false;
  193. int tbi, bdi;
  194. dma_first_bd = dma_last_bd = 0;
  195. dev_dbg(bdc->dev, "%s %llx\n",
  196. __func__, (unsigned long long)bd_dma_addr);
  197. /*
  198. * Find in which table this bd_dma_addr belongs?, go through the table
  199. * array and compare addresses of first and last address of bd of each
  200. * table
  201. */
  202. for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
  203. bd_table = bd_list->bd_table_array[tbi];
  204. dma_first_bd = bd_table->dma;
  205. dma_last_bd = bd_table->dma +
  206. (sizeof(struct bdc_bd) *
  207. (bd_list->num_bds_table - 1));
  208. dev_dbg(bdc->dev, "dma_first_bd:%llx dma_last_bd:%llx\n",
  209. (unsigned long long)dma_first_bd,
  210. (unsigned long long)dma_last_bd);
  211. if (bd_dma_addr >= dma_first_bd && bd_dma_addr <= dma_last_bd) {
  212. found = true;
  213. break;
  214. }
  215. }
  216. if (unlikely(!found)) {
  217. dev_err(bdc->dev, "%s FATAL err, bd not found\n", __func__);
  218. return -EINVAL;
  219. }
  220. /* Now we know the table, find the bdi */
  221. bdi = (bd_dma_addr - dma_first_bd) / sizeof(struct bdc_bd);
  222. /* return the global bdi, to compare with ep eqp_bdi */
  223. return (bdi + (tbi * bd_list->num_bds_table));
  224. }
  225. /* returns the table index(tbi) of the given bdi */
  226. static int bdi_to_tbi(struct bdc_ep *ep, int bdi)
  227. {
  228. int tbi;
  229. tbi = bdi / ep->bd_list.num_bds_table;
  230. dev_vdbg(ep->bdc->dev,
  231. "bdi:%d num_bds_table:%d tbi:%d\n",
  232. bdi, ep->bd_list.num_bds_table, tbi);
  233. return tbi;
  234. }
  235. /* Find the bdi last bd in the transfer */
  236. static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
  237. {
  238. int end_bdi;
  239. end_bdi = next_hwd_bdi - 1;
  240. if (end_bdi < 0)
  241. end_bdi = ep->bd_list.max_bdi - 1;
  242. else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
  243. end_bdi--;
  244. return end_bdi;
  245. }
  246. /*
  247. * How many transfer bd's are available on this ep bdl, chain bds are not
  248. * counted in available bds
  249. */
  250. static int bd_available_ep(struct bdc_ep *ep)
  251. {
  252. struct bd_list *bd_list = &ep->bd_list;
  253. int available1, available2;
  254. struct bdc *bdc = ep->bdc;
  255. int chain_bd1, chain_bd2;
  256. int available_bd = 0;
  257. available1 = available2 = chain_bd1 = chain_bd2 = 0;
  258. /* if empty then we have all bd's available - number of chain bd's */
  259. if (bd_list->eqp_bdi == bd_list->hwd_bdi)
  260. return bd_list->max_bdi - bd_list->num_tabs;
  261. /*
  262. * Depending upon where eqp and dqp pointers are, caculate number
  263. * of avaialble bd's
  264. */
  265. if (bd_list->hwd_bdi < bd_list->eqp_bdi) {
  266. /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
  267. available1 = bd_list->max_bdi - bd_list->eqp_bdi;
  268. available2 = bd_list->hwd_bdi;
  269. chain_bd1 = available1 / bd_list->num_bds_table;
  270. chain_bd2 = available2 / bd_list->num_bds_table;
  271. dev_vdbg(bdc->dev, "chain_bd1:%d chain_bd2:%d\n",
  272. chain_bd1, chain_bd2);
  273. available_bd = available1 + available2 - chain_bd1 - chain_bd2;
  274. } else {
  275. /* available bd's are from eqp..dqp - number of chain bd's */
  276. available1 = bd_list->hwd_bdi - bd_list->eqp_bdi;
  277. /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
  278. if ((bd_list->hwd_bdi - bd_list->eqp_bdi)
  279. <= bd_list->num_bds_table) {
  280. /* If there any chain bd in between */
  281. if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
  282. == bdi_to_tbi(ep, bd_list->eqp_bdi))) {
  283. available_bd = available1 - 1;
  284. }
  285. } else {
  286. chain_bd1 = available1 / bd_list->num_bds_table;
  287. available_bd = available1 - chain_bd1;
  288. }
  289. }
  290. /*
  291. * we need to keep one extra bd to check if ring is full or empty so
  292. * reduce by 1
  293. */
  294. available_bd--;
  295. dev_vdbg(bdc->dev, "available_bd:%d\n", available_bd);
  296. return available_bd;
  297. }
  298. /* Notify the hardware after queueing the bd to bdl */
  299. void bdc_notify_xfr(struct bdc *bdc, u32 epnum)
  300. {
  301. struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
  302. dev_vdbg(bdc->dev, "%s epnum:%d\n", __func__, epnum);
  303. /*
  304. * We don't have anyway to check if ep state is running,
  305. * except the software flags.
  306. */
  307. if (unlikely(ep->flags & BDC_EP_STOP))
  308. ep->flags &= ~BDC_EP_STOP;
  309. bdc_writel(bdc->regs, BDC_XSFNTF, epnum);
  310. }
  311. /* returns the bd corresponding to bdi */
  312. static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
  313. {
  314. int tbi = bdi_to_tbi(ep, bdi);
  315. int local_bdi = 0;
  316. local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
  317. dev_vdbg(ep->bdc->dev,
  318. "%s bdi:%d local_bdi:%d\n",
  319. __func__, bdi, local_bdi);
  320. return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
  321. }
  322. /* Advance the enqueue pointer */
  323. static void ep_bdlist_eqp_adv(struct bdc_ep *ep)
  324. {
  325. ep->bd_list.eqp_bdi++;
  326. /* if it's chain bd, then move to next */
  327. if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0)
  328. ep->bd_list.eqp_bdi++;
  329. /* if the eqp is pointing to last + 1 then move back to 0 */
  330. if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1))
  331. ep->bd_list.eqp_bdi = 0;
  332. }
  333. /* Setup the first bd for ep0 transfer */
  334. static int setup_first_bd_ep0(struct bdc *bdc, struct bdc_req *req, u32 *dword3)
  335. {
  336. u16 wValue;
  337. u32 req_len;
  338. req->ep->dir = 0;
  339. req_len = req->usb_req.length;
  340. switch (bdc->ep0_state) {
  341. case WAIT_FOR_DATA_START:
  342. *dword3 |= BD_TYPE_DS;
  343. if (bdc->setup_pkt.bRequestType & USB_DIR_IN)
  344. *dword3 |= BD_DIR_IN;
  345. /* check if zlp will be needed */
  346. wValue = le16_to_cpu(bdc->setup_pkt.wValue);
  347. if ((wValue > req_len) &&
  348. (req_len % bdc->gadget.ep0->maxpacket == 0)) {
  349. dev_dbg(bdc->dev, "ZLP needed wVal:%d len:%d MaxP:%d\n",
  350. wValue, req_len,
  351. bdc->gadget.ep0->maxpacket);
  352. bdc->zlp_needed = true;
  353. }
  354. break;
  355. case WAIT_FOR_STATUS_START:
  356. *dword3 |= BD_TYPE_SS;
  357. if (!le16_to_cpu(bdc->setup_pkt.wLength) ||
  358. !(bdc->setup_pkt.bRequestType & USB_DIR_IN))
  359. *dword3 |= BD_DIR_IN;
  360. break;
  361. default:
  362. dev_err(bdc->dev,
  363. "Unknown ep0 state for queueing bd ep0_state:%s\n",
  364. ep0_state_string[bdc->ep0_state]);
  365. return -EINVAL;
  366. }
  367. return 0;
  368. }
  369. /* Setup the bd dma descriptor for a given request */
  370. static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
  371. {
  372. dma_addr_t buf_add = req->usb_req.dma;
  373. u32 maxp, tfs, dword2, dword3;
  374. struct bd_transfer *bd_xfr;
  375. struct bd_list *bd_list;
  376. struct bdc_ep *ep;
  377. struct bdc_bd *bd;
  378. int ret, bdnum;
  379. u32 req_len;
  380. ep = req->ep;
  381. bd_list = &ep->bd_list;
  382. bd_xfr = &req->bd_xfr;
  383. bd_xfr->req = req;
  384. bd_xfr->start_bdi = bd_list->eqp_bdi;
  385. bd = bdi_to_bd(ep, bd_list->eqp_bdi);
  386. req_len = req->usb_req.length;
  387. maxp = usb_endpoint_maxp(ep->desc);
  388. tfs = roundup(req->usb_req.length, maxp);
  389. tfs = tfs/maxp;
  390. dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
  391. __func__, ep->name, num_bds, tfs, req_len, bd);
  392. for (bdnum = 0; bdnum < num_bds; bdnum++) {
  393. dword2 = dword3 = 0;
  394. /* First bd */
  395. if (!bdnum) {
  396. dword3 |= BD_SOT|BD_SBF|(tfs<<BD_TFS_SHIFT);
  397. dword2 |= BD_LTF;
  398. /* format of first bd for ep0 is different than other */
  399. if (ep->ep_num == 1) {
  400. ret = setup_first_bd_ep0(bdc, req, &dword3);
  401. if (ret)
  402. return ret;
  403. }
  404. }
  405. if (!req->ep->dir)
  406. dword3 |= BD_ISP;
  407. if (req_len > BD_MAX_BUFF_SIZE) {
  408. dword2 |= BD_MAX_BUFF_SIZE;
  409. req_len -= BD_MAX_BUFF_SIZE;
  410. } else {
  411. /* this should be the last bd */
  412. dword2 |= req_len;
  413. dword3 |= BD_IOC;
  414. dword3 |= BD_EOT;
  415. }
  416. /* Currently only 1 INT target is supported */
  417. dword2 |= BD_INTR_TARGET(0);
  418. bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
  419. if (unlikely(!bd)) {
  420. dev_err(bdc->dev, "Err bd pointing to wrong addr\n");
  421. return -EINVAL;
  422. }
  423. /* write bd */
  424. bd->offset[0] = cpu_to_le32(lower_32_bits(buf_add));
  425. bd->offset[1] = cpu_to_le32(upper_32_bits(buf_add));
  426. bd->offset[2] = cpu_to_le32(dword2);
  427. bd->offset[3] = cpu_to_le32(dword3);
  428. /* advance eqp pointer */
  429. ep_bdlist_eqp_adv(ep);
  430. /* advance the buff pointer */
  431. buf_add += BD_MAX_BUFF_SIZE;
  432. dev_vdbg(bdc->dev, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
  433. (unsigned long long)buf_add, req_len, bd,
  434. ep->bd_list.eqp_bdi);
  435. bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
  436. bd->offset[3] = cpu_to_le32(BD_SBF);
  437. }
  438. /* clear the STOP BD fetch bit from the first bd of this xfr */
  439. bd = bdi_to_bd(ep, bd_xfr->start_bdi);
  440. bd->offset[3] &= cpu_to_le32(~BD_SBF);
  441. /* the new eqp will be next hw dqp */
  442. bd_xfr->num_bds = num_bds;
  443. bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi;
  444. /* everything is written correctly before notifying the HW */
  445. wmb();
  446. return 0;
  447. }
  448. /* Queue the xfr */
  449. static int bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
  450. {
  451. int num_bds, bd_available;
  452. struct bdc_ep *ep;
  453. int ret;
  454. ep = req->ep;
  455. dev_dbg(bdc->dev, "%s req:%p\n", __func__, req);
  456. dev_dbg(bdc->dev, "eqp_bdi:%d hwd_bdi:%d\n",
  457. ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi);
  458. num_bds = bd_needed_req(req);
  459. bd_available = bd_available_ep(ep);
  460. /* how many bd's are avaialble on ep */
  461. if (num_bds > bd_available)
  462. return -ENOMEM;
  463. ret = setup_bd_list_xfr(bdc, req, num_bds);
  464. if (ret)
  465. return ret;
  466. list_add_tail(&req->queue, &ep->queue);
  467. bdc_dbg_bd_list(bdc, ep);
  468. bdc_notify_xfr(bdc, ep->ep_num);
  469. return 0;
  470. }
  471. /* callback to gadget layer when xfr completes */
  472. static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
  473. int status)
  474. {
  475. struct bdc *bdc = ep->bdc;
  476. if (req == NULL || &req->queue == NULL || &req->usb_req == NULL)
  477. return;
  478. dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
  479. list_del(&req->queue);
  480. req->usb_req.status = status;
  481. usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
  482. if (req->usb_req.complete) {
  483. spin_unlock(&bdc->lock);
  484. usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
  485. spin_lock(&bdc->lock);
  486. }
  487. }
  488. /* Disable the endpoint */
  489. int bdc_ep_disable(struct bdc_ep *ep)
  490. {
  491. struct bdc_req *req;
  492. struct bdc *bdc;
  493. int ret;
  494. ret = 0;
  495. bdc = ep->bdc;
  496. dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num);
  497. /* Stop the endpoint */
  498. ret = bdc_stop_ep(bdc, ep->ep_num);
  499. /*
  500. * Intentionally don't check the ret value of stop, it can fail in
  501. * disconnect scenarios, continue with dconfig
  502. */
  503. /* de-queue any pending requests */
  504. while (!list_empty(&ep->queue)) {
  505. req = list_entry(ep->queue.next, struct bdc_req,
  506. queue);
  507. bdc_req_complete(ep, req, -ESHUTDOWN);
  508. }
  509. /* deconfigure the endpoint */
  510. ret = bdc_dconfig_ep(bdc, ep);
  511. if (ret)
  512. dev_warn(bdc->dev,
  513. "dconfig fail but continue with memory free");
  514. ep->flags = 0;
  515. /* ep0 memory is not freed, but reused on next connect sr */
  516. if (ep->ep_num == 1)
  517. return 0;
  518. /* Free the bdl memory */
  519. ep_bd_list_free(ep, ep->bd_list.num_tabs);
  520. ep->desc = NULL;
  521. ep->comp_desc = NULL;
  522. ep->usb_ep.desc = NULL;
  523. ep->ep_type = 0;
  524. return ret;
  525. }
  526. /* Enable the ep */
  527. int bdc_ep_enable(struct bdc_ep *ep)
  528. {
  529. struct bdc *bdc;
  530. int ret = 0;
  531. bdc = ep->bdc;
  532. dev_dbg(bdc->dev, "%s NUM_TABLES:%d %d\n",
  533. __func__, NUM_TABLES, NUM_TABLES_ISOCH);
  534. ret = ep_bd_list_alloc(ep);
  535. if (ret) {
  536. dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret);
  537. return -ENOMEM;
  538. }
  539. bdc_dbg_bd_list(bdc, ep);
  540. /* only for ep0: config ep is called for ep0 from connect event */
  541. ep->flags |= BDC_EP_ENABLED;
  542. if (ep->ep_num == 1)
  543. return ret;
  544. /* Issue a configure endpoint command */
  545. ret = bdc_config_ep(bdc, ep);
  546. if (ret)
  547. return ret;
  548. ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc);
  549. ep->usb_ep.desc = ep->desc;
  550. ep->usb_ep.comp_desc = ep->comp_desc;
  551. ep->ep_type = usb_endpoint_type(ep->desc);
  552. ep->flags |= BDC_EP_ENABLED;
  553. return 0;
  554. }
  555. /* EP0 related code */
  556. /* Queue a status stage BD */
  557. static int ep0_queue_status_stage(struct bdc *bdc)
  558. {
  559. struct bdc_req *status_req;
  560. struct bdc_ep *ep;
  561. status_req = &bdc->status_req;
  562. ep = bdc->bdc_ep_array[1];
  563. status_req->ep = ep;
  564. status_req->usb_req.length = 0;
  565. status_req->usb_req.status = -EINPROGRESS;
  566. status_req->usb_req.actual = 0;
  567. status_req->usb_req.complete = NULL;
  568. bdc_queue_xfr(bdc, status_req);
  569. return 0;
  570. }
  571. /* Queue xfr on ep0 */
  572. static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
  573. {
  574. struct bdc *bdc;
  575. int ret;
  576. bdc = ep->bdc;
  577. dev_dbg(bdc->dev, "%s()\n", __func__);
  578. req->usb_req.actual = 0;
  579. req->usb_req.status = -EINPROGRESS;
  580. req->epnum = ep->ep_num;
  581. if (bdc->delayed_status) {
  582. bdc->delayed_status = false;
  583. /* if status stage was delayed? */
  584. if (bdc->ep0_state == WAIT_FOR_STATUS_START) {
  585. /* Queue a status stage BD */
  586. ep0_queue_status_stage(bdc);
  587. bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
  588. return 0;
  589. }
  590. } else {
  591. /*
  592. * if delayed status is false and 0 length transfer is requested
  593. * i.e. for status stage of some setup request, then just
  594. * return from here the status stage is queued independently
  595. */
  596. if (req->usb_req.length == 0)
  597. return 0;
  598. }
  599. ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
  600. if (ret) {
  601. dev_err(bdc->dev, "dma mapping failed %s\n", ep->name);
  602. return ret;
  603. }
  604. return bdc_queue_xfr(bdc, req);
  605. }
  606. /* Queue data stage */
  607. static int ep0_queue_data_stage(struct bdc *bdc)
  608. {
  609. struct bdc_ep *ep;
  610. dev_dbg(bdc->dev, "%s\n", __func__);
  611. ep = bdc->bdc_ep_array[1];
  612. bdc->ep0_req.ep = ep;
  613. bdc->ep0_req.usb_req.complete = NULL;
  614. return ep0_queue(ep, &bdc->ep0_req);
  615. }
  616. /* Queue req on ep */
  617. static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
  618. {
  619. struct bdc *bdc;
  620. int ret = 0;
  621. if (!req || !ep->usb_ep.desc)
  622. return -EINVAL;
  623. bdc = ep->bdc;
  624. req->usb_req.actual = 0;
  625. req->usb_req.status = -EINPROGRESS;
  626. req->epnum = ep->ep_num;
  627. ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
  628. if (ret) {
  629. dev_err(bdc->dev, "dma mapping failed\n");
  630. return ret;
  631. }
  632. return bdc_queue_xfr(bdc, req);
  633. }
  634. /* Dequeue a request from ep */
  635. static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
  636. {
  637. int start_bdi, end_bdi, tbi, eqp_bdi, curr_hw_dqpi;
  638. bool start_pending, end_pending;
  639. bool first_remove = false;
  640. struct bdc_req *first_req;
  641. struct bdc_bd *bd_start;
  642. struct bd_table *table;
  643. dma_addr_t next_bd_dma;
  644. u64 deq_ptr_64 = 0;
  645. struct bdc *bdc;
  646. u32 tmp_32;
  647. int ret;
  648. bdc = ep->bdc;
  649. start_pending = end_pending = false;
  650. eqp_bdi = ep->bd_list.eqp_bdi - 1;
  651. if (eqp_bdi < 0)
  652. eqp_bdi = ep->bd_list.max_bdi;
  653. start_bdi = req->bd_xfr.start_bdi;
  654. end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
  655. dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
  656. __func__, ep->name, start_bdi, end_bdi);
  657. dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
  658. ep, (void *)ep->usb_ep.desc);
  659. /* Stop the ep to see where the HW is ? */
  660. ret = bdc_stop_ep(bdc, ep->ep_num);
  661. /* if there is an issue with stopping ep, then no need to go further */
  662. if (ret)
  663. return 0;
  664. /*
  665. * After endpoint is stopped, there can be 3 cases, the request
  666. * is processed, pending or in the middle of processing
  667. */
  668. /* The current hw dequeue pointer */
  669. tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0);
  670. deq_ptr_64 = tmp_32;
  671. tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1);
  672. deq_ptr_64 |= ((u64)tmp_32 << 32);
  673. /* we have the dma addr of next bd that will be fetched by hardware */
  674. curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64);
  675. if (curr_hw_dqpi < 0)
  676. return curr_hw_dqpi;
  677. /*
  678. * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
  679. * curr_hw_dqbdi..eqp_bdi.
  680. */
  681. /* Check if start_bdi and end_bdi are in range of HW owned BD's */
  682. if (curr_hw_dqpi > eqp_bdi) {
  683. /* there is a wrap from last to 0 */
  684. if (start_bdi >= curr_hw_dqpi || start_bdi <= eqp_bdi) {
  685. start_pending = true;
  686. end_pending = true;
  687. } else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
  688. end_pending = true;
  689. }
  690. } else {
  691. if (start_bdi >= curr_hw_dqpi) {
  692. start_pending = true;
  693. end_pending = true;
  694. } else if (end_bdi >= curr_hw_dqpi) {
  695. end_pending = true;
  696. }
  697. }
  698. dev_dbg(bdc->dev,
  699. "start_pending:%d end_pending:%d speed:%d\n",
  700. start_pending, end_pending, bdc->gadget.speed);
  701. /* If both start till end are processes, we cannot deq req */
  702. if (!start_pending && !end_pending)
  703. return -EINVAL;
  704. /*
  705. * if ep_dequeue is called after disconnect then just return
  706. * success from here
  707. */
  708. if (bdc->gadget.speed == USB_SPEED_UNKNOWN)
  709. return 0;
  710. tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
  711. table = ep->bd_list.bd_table_array[tbi];
  712. next_bd_dma = table->dma +
  713. sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
  714. tbi * ep->bd_list.num_bds_table);
  715. first_req = list_first_entry(&ep->queue, struct bdc_req,
  716. queue);
  717. if (req == first_req)
  718. first_remove = true;
  719. /*
  720. * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
  721. * incase if start is pending this is the first request in the list
  722. * then issue ep_bla instead of marking as chain bd
  723. */
  724. if (start_pending && !first_remove) {
  725. /*
  726. * Mark the start bd as Chain bd, and point the chain
  727. * bd to next_bd_dma
  728. */
  729. bd_start = bdi_to_bd(ep, start_bdi);
  730. bd_start->offset[0] = cpu_to_le32(lower_32_bits(next_bd_dma));
  731. bd_start->offset[1] = cpu_to_le32(upper_32_bits(next_bd_dma));
  732. bd_start->offset[2] = 0x0;
  733. bd_start->offset[3] = cpu_to_le32(MARK_CHAIN_BD);
  734. bdc_dbg_bd_list(bdc, ep);
  735. } else if (end_pending) {
  736. /*
  737. * The transfer is stopped in the middle, move the
  738. * HW deq pointer to next_bd_dma
  739. */
  740. ret = bdc_ep_bla(bdc, ep, next_bd_dma);
  741. if (ret) {
  742. dev_err(bdc->dev, "error in ep_bla:%d\n", ret);
  743. return ret;
  744. }
  745. }
  746. return 0;
  747. }
  748. /* Halt/Clear the ep based on value */
  749. static int ep_set_halt(struct bdc_ep *ep, u32 value)
  750. {
  751. struct bdc *bdc;
  752. int ret;
  753. bdc = ep->bdc;
  754. dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
  755. if (value) {
  756. dev_dbg(bdc->dev, "Halt\n");
  757. if (ep->ep_num == 1)
  758. bdc->ep0_state = WAIT_FOR_SETUP;
  759. ret = bdc_ep_set_stall(bdc, ep->ep_num);
  760. if (ret)
  761. dev_err(bdc->dev, "failed to set STALL on %s\n",
  762. ep->name);
  763. else
  764. ep->flags |= BDC_EP_STALL;
  765. } else {
  766. /* Clear */
  767. dev_dbg(bdc->dev, "Before Clear\n");
  768. ret = bdc_ep_clear_stall(bdc, ep->ep_num);
  769. if (ret)
  770. dev_err(bdc->dev, "failed to clear STALL on %s\n",
  771. ep->name);
  772. else
  773. ep->flags &= ~BDC_EP_STALL;
  774. dev_dbg(bdc->dev, "After Clear\n");
  775. }
  776. return ret;
  777. }
  778. /* Free all the ep */
  779. void bdc_free_ep(struct bdc *bdc)
  780. {
  781. struct bdc_ep *ep;
  782. u8 epnum;
  783. dev_dbg(bdc->dev, "%s\n", __func__);
  784. for (epnum = 1; epnum < bdc->num_eps; epnum++) {
  785. ep = bdc->bdc_ep_array[epnum];
  786. if (!ep)
  787. continue;
  788. if (ep->flags & BDC_EP_ENABLED)
  789. ep_bd_list_free(ep, ep->bd_list.num_tabs);
  790. /* ep0 is not in this gadget list */
  791. if (epnum != 1)
  792. list_del(&ep->usb_ep.ep_list);
  793. kfree(ep);
  794. }
  795. }
  796. /* USB2 spec, section 7.1.20 */
  797. static int bdc_set_test_mode(struct bdc *bdc)
  798. {
  799. u32 usb2_pm;
  800. usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
  801. usb2_pm &= ~BDC_PTC_MASK;
  802. dev_dbg(bdc->dev, "%s\n", __func__);
  803. switch (bdc->test_mode) {
  804. case TEST_J:
  805. case TEST_K:
  806. case TEST_SE0_NAK:
  807. case TEST_PACKET:
  808. case TEST_FORCE_EN:
  809. usb2_pm |= bdc->test_mode << 28;
  810. break;
  811. default:
  812. return -EINVAL;
  813. }
  814. dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
  815. bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
  816. return 0;
  817. }
  818. /*
  819. * Helper function to handle Transfer status report with status as either
  820. * success or short
  821. */
  822. static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep,
  823. struct bdc_sr *sreport)
  824. {
  825. int short_bdi, start_bdi, end_bdi, max_len_bds, chain_bds;
  826. struct bd_list *bd_list = &ep->bd_list;
  827. int actual_length, length_short;
  828. struct bd_transfer *bd_xfr;
  829. struct bdc_bd *short_bd;
  830. struct bdc_req *req;
  831. u64 deq_ptr_64 = 0;
  832. int status = 0;
  833. int sr_status;
  834. u32 tmp_32;
  835. dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
  836. bdc_dbg_srr(bdc, 0);
  837. /* do not process thie sr if ignore flag is set */
  838. if (ep->ignore_next_sr) {
  839. ep->ignore_next_sr = false;
  840. return;
  841. }
  842. if (unlikely(list_empty(&ep->queue))) {
  843. dev_warn(bdc->dev, "xfr srr with no BD's queued\n");
  844. return;
  845. }
  846. req = list_entry(ep->queue.next, struct bdc_req,
  847. queue);
  848. bd_xfr = &req->bd_xfr;
  849. sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
  850. /*
  851. * sr_status is short and this transfer has more than 1 bd then it needs
  852. * special handling, this is only applicable for bulk and ctrl
  853. */
  854. if (sr_status == XSF_SHORT && bd_xfr->num_bds > 1) {
  855. /*
  856. * This is multi bd xfr, lets see which bd
  857. * caused short transfer and how many bytes have been
  858. * transferred so far.
  859. */
  860. tmp_32 = le32_to_cpu(sreport->offset[0]);
  861. deq_ptr_64 = tmp_32;
  862. tmp_32 = le32_to_cpu(sreport->offset[1]);
  863. deq_ptr_64 |= ((u64)tmp_32 << 32);
  864. short_bdi = bd_add_to_bdi(ep, deq_ptr_64);
  865. if (unlikely(short_bdi < 0))
  866. dev_warn(bdc->dev, "bd doesn't exist?\n");
  867. start_bdi = bd_xfr->start_bdi;
  868. /*
  869. * We know the start_bdi and short_bdi, how many xfr
  870. * bds in between
  871. */
  872. if (start_bdi <= short_bdi) {
  873. max_len_bds = short_bdi - start_bdi;
  874. if (max_len_bds <= bd_list->num_bds_table) {
  875. if (!(bdi_to_tbi(ep, start_bdi) ==
  876. bdi_to_tbi(ep, short_bdi)))
  877. max_len_bds--;
  878. } else {
  879. chain_bds = max_len_bds/bd_list->num_bds_table;
  880. max_len_bds -= chain_bds;
  881. }
  882. } else {
  883. /* there is a wrap in the ring within a xfr */
  884. chain_bds = (bd_list->max_bdi - start_bdi)/
  885. bd_list->num_bds_table;
  886. chain_bds += short_bdi/bd_list->num_bds_table;
  887. max_len_bds = bd_list->max_bdi - start_bdi;
  888. max_len_bds += short_bdi;
  889. max_len_bds -= chain_bds;
  890. }
  891. /* max_len_bds is the number of full length bds */
  892. end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi);
  893. if (!(end_bdi == short_bdi))
  894. ep->ignore_next_sr = true;
  895. actual_length = max_len_bds * BD_MAX_BUFF_SIZE;
  896. short_bd = bdi_to_bd(ep, short_bdi);
  897. /* length queued */
  898. length_short = le32_to_cpu(short_bd->offset[2]) & 0x1FFFFF;
  899. /* actual length trensfered */
  900. length_short -= SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
  901. actual_length += length_short;
  902. req->usb_req.actual = actual_length;
  903. } else {
  904. req->usb_req.actual = req->usb_req.length -
  905. SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
  906. dev_dbg(bdc->dev,
  907. "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
  908. req->usb_req.length, req->usb_req.actual,
  909. bd_xfr->next_hwd_bdi);
  910. }
  911. /* Update the dequeue pointer */
  912. ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi;
  913. if (req->usb_req.actual < req->usb_req.length) {
  914. dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num);
  915. if (req->usb_req.short_not_ok)
  916. status = -EREMOTEIO;
  917. }
  918. bdc_req_complete(ep, bd_xfr->req, status);
  919. }
  920. /* EP0 setup related packet handlers */
  921. /*
  922. * Setup packet received, just store the packet and process on next DS or SS
  923. * started SR
  924. */
  925. void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport)
  926. {
  927. struct usb_ctrlrequest *setup_pkt;
  928. u32 len;
  929. dev_dbg(bdc->dev,
  930. "%s ep0_state:%s\n",
  931. __func__, ep0_state_string[bdc->ep0_state]);
  932. /* Store received setup packet */
  933. setup_pkt = &bdc->setup_pkt;
  934. memcpy(setup_pkt, &sreport->offset[0], sizeof(*setup_pkt));
  935. len = le16_to_cpu(setup_pkt->wLength);
  936. if (!len)
  937. bdc->ep0_state = WAIT_FOR_STATUS_START;
  938. else
  939. bdc->ep0_state = WAIT_FOR_DATA_START;
  940. dev_dbg(bdc->dev,
  941. "%s exit ep0_state:%s\n",
  942. __func__, ep0_state_string[bdc->ep0_state]);
  943. }
  944. /* Stall ep0 */
  945. static void ep0_stall(struct bdc *bdc)
  946. {
  947. struct bdc_ep *ep = bdc->bdc_ep_array[1];
  948. struct bdc_req *req;
  949. dev_dbg(bdc->dev, "%s\n", __func__);
  950. bdc->delayed_status = false;
  951. ep_set_halt(ep, 1);
  952. /* de-queue any pendig requests */
  953. while (!list_empty(&ep->queue)) {
  954. req = list_entry(ep->queue.next, struct bdc_req,
  955. queue);
  956. bdc_req_complete(ep, req, -ESHUTDOWN);
  957. }
  958. }
  959. /* SET_ADD handlers */
  960. static int ep0_set_address(struct bdc *bdc, struct usb_ctrlrequest *ctrl)
  961. {
  962. enum usb_device_state state = bdc->gadget.state;
  963. int ret = 0;
  964. u32 addr;
  965. addr = le16_to_cpu(ctrl->wValue);
  966. dev_dbg(bdc->dev,
  967. "%s addr:%d dev state:%d\n",
  968. __func__, addr, state);
  969. if (addr > 127)
  970. return -EINVAL;
  971. switch (state) {
  972. case USB_STATE_DEFAULT:
  973. case USB_STATE_ADDRESS:
  974. /* Issue Address device command */
  975. ret = bdc_address_device(bdc, addr);
  976. if (ret)
  977. return ret;
  978. if (addr)
  979. usb_gadget_set_state(&bdc->gadget, USB_STATE_ADDRESS);
  980. else
  981. usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
  982. bdc->dev_addr = addr;
  983. break;
  984. default:
  985. dev_warn(bdc->dev,
  986. "SET Address in wrong device state %d\n",
  987. state);
  988. ret = -EINVAL;
  989. }
  990. return ret;
  991. }
  992. /* Handler for SET/CLEAR FEATURE requests for device */
  993. static int ep0_handle_feature_dev(struct bdc *bdc, u16 wValue,
  994. u16 wIndex, bool set)
  995. {
  996. enum usb_device_state state = bdc->gadget.state;
  997. u32 usppms = 0;
  998. dev_dbg(bdc->dev, "%s set:%d dev state:%d\n",
  999. __func__, set, state);
  1000. switch (wValue) {
  1001. case USB_DEVICE_REMOTE_WAKEUP:
  1002. dev_dbg(bdc->dev, "USB_DEVICE_REMOTE_WAKEUP\n");
  1003. if (set)
  1004. bdc->devstatus |= REMOTE_WAKE_ENABLE;
  1005. else
  1006. bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
  1007. break;
  1008. case USB_DEVICE_TEST_MODE:
  1009. dev_dbg(bdc->dev, "USB_DEVICE_TEST_MODE\n");
  1010. if ((wIndex & 0xFF) ||
  1011. (bdc->gadget.speed != USB_SPEED_HIGH) || !set)
  1012. return -EINVAL;
  1013. bdc->test_mode = wIndex >> 8;
  1014. break;
  1015. case USB_DEVICE_U1_ENABLE:
  1016. dev_dbg(bdc->dev, "USB_DEVICE_U1_ENABLE\n");
  1017. if (bdc->gadget.speed != USB_SPEED_SUPER ||
  1018. state != USB_STATE_CONFIGURED)
  1019. return -EINVAL;
  1020. usppms = bdc_readl(bdc->regs, BDC_USPPMS);
  1021. if (set) {
  1022. /* clear previous u1t */
  1023. usppms &= ~BDC_U1T(BDC_U1T_MASK);
  1024. usppms |= BDC_U1T(U1_TIMEOUT);
  1025. usppms |= BDC_U1E | BDC_PORT_W1S;
  1026. bdc->devstatus |= (1 << USB_DEV_STAT_U1_ENABLED);
  1027. } else {
  1028. usppms &= ~BDC_U1E;
  1029. usppms |= BDC_PORT_W1S;
  1030. bdc->devstatus &= ~(1 << USB_DEV_STAT_U1_ENABLED);
  1031. }
  1032. bdc_writel(bdc->regs, BDC_USPPMS, usppms);
  1033. break;
  1034. case USB_DEVICE_U2_ENABLE:
  1035. dev_dbg(bdc->dev, "USB_DEVICE_U2_ENABLE\n");
  1036. if (bdc->gadget.speed != USB_SPEED_SUPER ||
  1037. state != USB_STATE_CONFIGURED)
  1038. return -EINVAL;
  1039. usppms = bdc_readl(bdc->regs, BDC_USPPMS);
  1040. if (set) {
  1041. usppms |= BDC_U2E;
  1042. usppms |= BDC_U2A;
  1043. bdc->devstatus |= (1 << USB_DEV_STAT_U2_ENABLED);
  1044. } else {
  1045. usppms &= ~BDC_U2E;
  1046. usppms &= ~BDC_U2A;
  1047. bdc->devstatus &= ~(1 << USB_DEV_STAT_U2_ENABLED);
  1048. }
  1049. bdc_writel(bdc->regs, BDC_USPPMS, usppms);
  1050. break;
  1051. case USB_DEVICE_LTM_ENABLE:
  1052. dev_dbg(bdc->dev, "USB_DEVICE_LTM_ENABLE?\n");
  1053. if (bdc->gadget.speed != USB_SPEED_SUPER ||
  1054. state != USB_STATE_CONFIGURED)
  1055. return -EINVAL;
  1056. break;
  1057. default:
  1058. dev_err(bdc->dev, "Unknown wValue:%d\n", wValue);
  1059. return -EOPNOTSUPP;
  1060. } /* USB_RECIP_DEVICE end */
  1061. return 0;
  1062. }
  1063. /* SET/CLEAR FEATURE handler */
  1064. static int ep0_handle_feature(struct bdc *bdc,
  1065. struct usb_ctrlrequest *setup_pkt, bool set)
  1066. {
  1067. enum usb_device_state state = bdc->gadget.state;
  1068. struct bdc_ep *ep;
  1069. u16 wValue;
  1070. u16 wIndex;
  1071. int epnum;
  1072. wValue = le16_to_cpu(setup_pkt->wValue);
  1073. wIndex = le16_to_cpu(setup_pkt->wIndex);
  1074. dev_dbg(bdc->dev,
  1075. "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
  1076. __func__, wValue, wIndex, state,
  1077. bdc->gadget.speed, set);
  1078. switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
  1079. case USB_RECIP_DEVICE:
  1080. return ep0_handle_feature_dev(bdc, wValue, wIndex, set);
  1081. case USB_RECIP_INTERFACE:
  1082. dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
  1083. /* USB3 spec, sec 9.4.9 */
  1084. if (wValue != USB_INTRF_FUNC_SUSPEND)
  1085. return -EINVAL;
  1086. /* USB3 spec, Table 9-8 */
  1087. if (set) {
  1088. if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) {
  1089. dev_dbg(bdc->dev, "SET REMOTE_WAKEUP\n");
  1090. bdc->devstatus |= REMOTE_WAKE_ENABLE;
  1091. } else {
  1092. dev_dbg(bdc->dev, "CLEAR REMOTE_WAKEUP\n");
  1093. bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
  1094. }
  1095. }
  1096. break;
  1097. case USB_RECIP_ENDPOINT:
  1098. dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
  1099. if (wValue != USB_ENDPOINT_HALT)
  1100. return -EINVAL;
  1101. epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1102. if (epnum) {
  1103. if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  1104. epnum = epnum * 2 + 1;
  1105. else
  1106. epnum *= 2;
  1107. } else {
  1108. epnum = 1; /*EP0*/
  1109. }
  1110. /*
  1111. * If CLEAR_FEATURE on ep0 then don't do anything as the stall
  1112. * condition on ep0 has already been cleared when SETUP packet
  1113. * was received.
  1114. */
  1115. if (epnum == 1 && !set) {
  1116. dev_dbg(bdc->dev, "ep0 stall already cleared\n");
  1117. return 0;
  1118. }
  1119. dev_dbg(bdc->dev, "epnum=%d\n", epnum);
  1120. ep = bdc->bdc_ep_array[epnum];
  1121. if (!ep)
  1122. return -EINVAL;
  1123. return ep_set_halt(ep, set);
  1124. default:
  1125. dev_err(bdc->dev, "Unknown recipient\n");
  1126. return -EINVAL;
  1127. }
  1128. return 0;
  1129. }
  1130. /* GET_STATUS request handler */
  1131. static int ep0_handle_status(struct bdc *bdc,
  1132. struct usb_ctrlrequest *setup_pkt)
  1133. {
  1134. enum usb_device_state state = bdc->gadget.state;
  1135. struct bdc_ep *ep;
  1136. u16 usb_status = 0;
  1137. u32 epnum;
  1138. u16 wIndex;
  1139. /* USB2.0 spec sec 9.4.5 */
  1140. if (state == USB_STATE_DEFAULT)
  1141. return -EINVAL;
  1142. wIndex = le16_to_cpu(setup_pkt->wIndex);
  1143. dev_dbg(bdc->dev, "%s\n", __func__);
  1144. usb_status = bdc->devstatus;
  1145. switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
  1146. case USB_RECIP_DEVICE:
  1147. dev_dbg(bdc->dev,
  1148. "USB_RECIP_DEVICE devstatus:%08x\n",
  1149. bdc->devstatus);
  1150. /* USB3 spec, sec 9.4.5 */
  1151. if (bdc->gadget.speed == USB_SPEED_SUPER)
  1152. usb_status &= ~REMOTE_WAKE_ENABLE;
  1153. break;
  1154. case USB_RECIP_INTERFACE:
  1155. dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
  1156. if (bdc->gadget.speed == USB_SPEED_SUPER) {
  1157. /*
  1158. * This should come from func for Func remote wkup
  1159. * usb_status |=1;
  1160. */
  1161. if (bdc->devstatus & REMOTE_WAKE_ENABLE)
  1162. usb_status |= REMOTE_WAKE_ENABLE;
  1163. } else {
  1164. usb_status = 0;
  1165. }
  1166. break;
  1167. case USB_RECIP_ENDPOINT:
  1168. dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
  1169. epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
  1170. if (epnum) {
  1171. if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
  1172. epnum = epnum*2 + 1;
  1173. else
  1174. epnum *= 2;
  1175. } else {
  1176. epnum = 1; /* EP0 */
  1177. }
  1178. ep = bdc->bdc_ep_array[epnum];
  1179. if (!ep) {
  1180. dev_err(bdc->dev, "ISSUE, GET_STATUS for invalid EP ?");
  1181. return -EINVAL;
  1182. }
  1183. if (ep->flags & BDC_EP_STALL)
  1184. usb_status |= 1 << USB_ENDPOINT_HALT;
  1185. break;
  1186. default:
  1187. dev_err(bdc->dev, "Unknown recipient for get_status\n");
  1188. return -EINVAL;
  1189. }
  1190. /* prepare a data stage for GET_STATUS */
  1191. dev_dbg(bdc->dev, "usb_status=%08x\n", usb_status);
  1192. *(__le16 *)bdc->ep0_response_buff = cpu_to_le16(usb_status);
  1193. bdc->ep0_req.usb_req.length = 2;
  1194. bdc->ep0_req.usb_req.buf = &bdc->ep0_response_buff;
  1195. ep0_queue_data_stage(bdc);
  1196. return 0;
  1197. }
  1198. static void ep0_set_sel_cmpl(struct usb_ep *_ep, struct usb_request *_req)
  1199. {
  1200. /* ep0_set_sel_cmpl */
  1201. }
  1202. /* Queue data stage to handle 6 byte SET_SEL request */
  1203. static int ep0_set_sel(struct bdc *bdc,
  1204. struct usb_ctrlrequest *setup_pkt)
  1205. {
  1206. struct bdc_ep *ep;
  1207. u16 wLength;
  1208. dev_dbg(bdc->dev, "%s\n", __func__);
  1209. wLength = le16_to_cpu(setup_pkt->wLength);
  1210. if (unlikely(wLength != 6)) {
  1211. dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
  1212. return -EINVAL;
  1213. }
  1214. ep = bdc->bdc_ep_array[1];
  1215. bdc->ep0_req.ep = ep;
  1216. bdc->ep0_req.usb_req.length = 6;
  1217. bdc->ep0_req.usb_req.buf = bdc->ep0_response_buff;
  1218. bdc->ep0_req.usb_req.complete = ep0_set_sel_cmpl;
  1219. ep0_queue_data_stage(bdc);
  1220. return 0;
  1221. }
  1222. /*
  1223. * Queue a 0 byte bd only if wLength is more than the length and and length is
  1224. * a multiple of MaxPacket then queue 0 byte BD
  1225. */
  1226. static int ep0_queue_zlp(struct bdc *bdc)
  1227. {
  1228. int ret;
  1229. dev_dbg(bdc->dev, "%s\n", __func__);
  1230. bdc->ep0_req.ep = bdc->bdc_ep_array[1];
  1231. bdc->ep0_req.usb_req.length = 0;
  1232. bdc->ep0_req.usb_req.complete = NULL;
  1233. bdc->ep0_state = WAIT_FOR_DATA_START;
  1234. ret = bdc_queue_xfr(bdc, &bdc->ep0_req);
  1235. if (ret) {
  1236. dev_err(bdc->dev, "err queueing zlp :%d\n", ret);
  1237. return ret;
  1238. }
  1239. bdc->ep0_state = WAIT_FOR_DATA_XMIT;
  1240. return 0;
  1241. }
  1242. /* Control request handler */
  1243. static int handle_control_request(struct bdc *bdc)
  1244. {
  1245. enum usb_device_state state = bdc->gadget.state;
  1246. struct usb_ctrlrequest *setup_pkt;
  1247. int delegate_setup = 0;
  1248. int ret = 0;
  1249. int config = 0;
  1250. setup_pkt = &bdc->setup_pkt;
  1251. dev_dbg(bdc->dev, "%s\n", __func__);
  1252. if ((setup_pkt->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  1253. switch (setup_pkt->bRequest) {
  1254. case USB_REQ_SET_ADDRESS:
  1255. dev_dbg(bdc->dev, "USB_REQ_SET_ADDRESS\n");
  1256. ret = ep0_set_address(bdc, setup_pkt);
  1257. bdc->devstatus &= DEVSTATUS_CLEAR;
  1258. break;
  1259. case USB_REQ_SET_CONFIGURATION:
  1260. dev_dbg(bdc->dev, "USB_REQ_SET_CONFIGURATION\n");
  1261. if (state == USB_STATE_ADDRESS) {
  1262. usb_gadget_set_state(&bdc->gadget,
  1263. USB_STATE_CONFIGURED);
  1264. } else if (state == USB_STATE_CONFIGURED) {
  1265. /*
  1266. * USB2 spec sec 9.4.7, if wValue is 0 then dev
  1267. * is moved to addressed state
  1268. */
  1269. config = le16_to_cpu(setup_pkt->wValue);
  1270. if (!config)
  1271. usb_gadget_set_state(
  1272. &bdc->gadget,
  1273. USB_STATE_ADDRESS);
  1274. }
  1275. delegate_setup = 1;
  1276. break;
  1277. case USB_REQ_SET_FEATURE:
  1278. dev_dbg(bdc->dev, "USB_REQ_SET_FEATURE\n");
  1279. ret = ep0_handle_feature(bdc, setup_pkt, 1);
  1280. break;
  1281. case USB_REQ_CLEAR_FEATURE:
  1282. dev_dbg(bdc->dev, "USB_REQ_CLEAR_FEATURE\n");
  1283. ret = ep0_handle_feature(bdc, setup_pkt, 0);
  1284. break;
  1285. case USB_REQ_GET_STATUS:
  1286. dev_dbg(bdc->dev, "USB_REQ_GET_STATUS\n");
  1287. ret = ep0_handle_status(bdc, setup_pkt);
  1288. break;
  1289. case USB_REQ_SET_SEL:
  1290. dev_dbg(bdc->dev, "USB_REQ_SET_SEL\n");
  1291. ret = ep0_set_sel(bdc, setup_pkt);
  1292. break;
  1293. case USB_REQ_SET_ISOCH_DELAY:
  1294. dev_warn(bdc->dev,
  1295. "USB_REQ_SET_ISOCH_DELAY not handled\n");
  1296. ret = 0;
  1297. break;
  1298. default:
  1299. delegate_setup = 1;
  1300. }
  1301. } else {
  1302. delegate_setup = 1;
  1303. }
  1304. if (delegate_setup) {
  1305. spin_unlock(&bdc->lock);
  1306. ret = bdc->gadget_driver->setup(&bdc->gadget, setup_pkt);
  1307. spin_lock(&bdc->lock);
  1308. }
  1309. return ret;
  1310. }
  1311. /* EP0: Data stage started */
  1312. void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport)
  1313. {
  1314. struct bdc_ep *ep;
  1315. int ret = 0;
  1316. dev_dbg(bdc->dev, "%s\n", __func__);
  1317. ep = bdc->bdc_ep_array[1];
  1318. /* If ep0 was stalled, the clear it first */
  1319. if (ep->flags & BDC_EP_STALL) {
  1320. ret = ep_set_halt(ep, 0);
  1321. if (ret)
  1322. goto err;
  1323. }
  1324. if (bdc->ep0_state != WAIT_FOR_DATA_START)
  1325. dev_warn(bdc->dev,
  1326. "Data stage not expected ep0_state:%s\n",
  1327. ep0_state_string[bdc->ep0_state]);
  1328. ret = handle_control_request(bdc);
  1329. if (ret == USB_GADGET_DELAYED_STATUS) {
  1330. /*
  1331. * The ep0 state will remain WAIT_FOR_DATA_START till
  1332. * we received ep_queue on ep0
  1333. */
  1334. bdc->delayed_status = true;
  1335. return;
  1336. }
  1337. if (!ret) {
  1338. bdc->ep0_state = WAIT_FOR_DATA_XMIT;
  1339. dev_dbg(bdc->dev,
  1340. "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
  1341. return;
  1342. }
  1343. err:
  1344. ep0_stall(bdc);
  1345. }
  1346. /* EP0: status stage started */
  1347. void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport)
  1348. {
  1349. struct usb_ctrlrequest *setup_pkt;
  1350. struct bdc_ep *ep;
  1351. int ret = 0;
  1352. dev_dbg(bdc->dev,
  1353. "%s ep0_state:%s",
  1354. __func__, ep0_state_string[bdc->ep0_state]);
  1355. ep = bdc->bdc_ep_array[1];
  1356. /* check if ZLP was queued? */
  1357. if (bdc->zlp_needed)
  1358. bdc->zlp_needed = false;
  1359. if (ep->flags & BDC_EP_STALL) {
  1360. ret = ep_set_halt(ep, 0);
  1361. if (ret)
  1362. goto err;
  1363. }
  1364. if ((bdc->ep0_state != WAIT_FOR_STATUS_START) &&
  1365. (bdc->ep0_state != WAIT_FOR_DATA_XMIT))
  1366. dev_err(bdc->dev,
  1367. "Status stage recv but ep0_state:%s\n",
  1368. ep0_state_string[bdc->ep0_state]);
  1369. /* check if data stage is in progress ? */
  1370. if (bdc->ep0_state == WAIT_FOR_DATA_XMIT) {
  1371. bdc->ep0_state = STATUS_PENDING;
  1372. /* Status stage will be queued upon Data stage transmit event */
  1373. dev_dbg(bdc->dev,
  1374. "status started but data not transmitted yet\n");
  1375. return;
  1376. }
  1377. setup_pkt = &bdc->setup_pkt;
  1378. /*
  1379. * 2 stage setup then only process the setup, for 3 stage setup the date
  1380. * stage is already handled
  1381. */
  1382. if (!le16_to_cpu(setup_pkt->wLength)) {
  1383. ret = handle_control_request(bdc);
  1384. if (ret == USB_GADGET_DELAYED_STATUS) {
  1385. bdc->delayed_status = true;
  1386. /* ep0_state will remain WAIT_FOR_STATUS_START */
  1387. return;
  1388. }
  1389. }
  1390. if (!ret) {
  1391. /* Queue a status stage BD */
  1392. ep0_queue_status_stage(bdc);
  1393. bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
  1394. dev_dbg(bdc->dev,
  1395. "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
  1396. return;
  1397. }
  1398. err:
  1399. ep0_stall(bdc);
  1400. }
  1401. /* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
  1402. static void ep0_xsf_complete(struct bdc *bdc, struct bdc_sr *sreport)
  1403. {
  1404. dev_dbg(bdc->dev, "%s\n", __func__);
  1405. switch (bdc->ep0_state) {
  1406. case WAIT_FOR_DATA_XMIT:
  1407. bdc->ep0_state = WAIT_FOR_STATUS_START;
  1408. break;
  1409. case WAIT_FOR_STATUS_XMIT:
  1410. bdc->ep0_state = WAIT_FOR_SETUP;
  1411. if (bdc->test_mode) {
  1412. int ret;
  1413. dev_dbg(bdc->dev, "test_mode:%d\n", bdc->test_mode);
  1414. ret = bdc_set_test_mode(bdc);
  1415. if (ret < 0) {
  1416. dev_err(bdc->dev, "Err in setting Test mode\n");
  1417. return;
  1418. }
  1419. bdc->test_mode = 0;
  1420. }
  1421. break;
  1422. case STATUS_PENDING:
  1423. bdc_xsf_ep0_status_start(bdc, sreport);
  1424. break;
  1425. default:
  1426. dev_err(bdc->dev,
  1427. "Unknown ep0_state:%s\n",
  1428. ep0_state_string[bdc->ep0_state]);
  1429. }
  1430. }
  1431. /* xfr completion status report handler */
  1432. void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport)
  1433. {
  1434. struct bdc_ep *ep;
  1435. u32 sr_status;
  1436. u8 ep_num;
  1437. ep_num = (le32_to_cpu(sreport->offset[3])>>4) & 0x1f;
  1438. ep = bdc->bdc_ep_array[ep_num];
  1439. if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
  1440. dev_err(bdc->dev, "xsf for ep not enabled\n");
  1441. return;
  1442. }
  1443. /*
  1444. * check if this transfer is after link went from U3->U0 due
  1445. * to remote wakeup
  1446. */
  1447. if (bdc->devstatus & FUNC_WAKE_ISSUED) {
  1448. bdc->devstatus &= ~(FUNC_WAKE_ISSUED);
  1449. dev_dbg(bdc->dev, "%s clearing FUNC_WAKE_ISSUED flag\n",
  1450. __func__);
  1451. }
  1452. sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
  1453. dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
  1454. __func__, sr_status, ep->name);
  1455. switch (sr_status) {
  1456. case XSF_SUCC:
  1457. case XSF_SHORT:
  1458. handle_xsr_succ_status(bdc, ep, sreport);
  1459. if (ep_num == 1)
  1460. ep0_xsf_complete(bdc, sreport);
  1461. break;
  1462. case XSF_SETUP_RECV:
  1463. case XSF_DATA_START:
  1464. case XSF_STATUS_START:
  1465. if (ep_num != 1) {
  1466. dev_err(bdc->dev,
  1467. "ep0 related packets on non ep0 endpoint");
  1468. return;
  1469. }
  1470. bdc->sr_xsf_ep0[sr_status - XSF_SETUP_RECV](bdc, sreport);
  1471. break;
  1472. case XSF_BABB:
  1473. if (ep_num == 1) {
  1474. dev_dbg(bdc->dev, "Babble on ep0 zlp_need:%d\n",
  1475. bdc->zlp_needed);
  1476. /*
  1477. * If the last completed transfer had wLength >Data Len,
  1478. * and Len is multiple of MaxPacket,then queue ZLP
  1479. */
  1480. if (bdc->zlp_needed) {
  1481. /* queue 0 length bd */
  1482. ep0_queue_zlp(bdc);
  1483. return;
  1484. }
  1485. }
  1486. dev_warn(bdc->dev, "Babble on ep not handled\n");
  1487. break;
  1488. default:
  1489. dev_warn(bdc->dev, "sr status not handled:%x\n", sr_status);
  1490. break;
  1491. }
  1492. }
  1493. static int bdc_gadget_ep_queue(struct usb_ep *_ep,
  1494. struct usb_request *_req, gfp_t gfp_flags)
  1495. {
  1496. struct bdc_req *req;
  1497. unsigned long flags;
  1498. struct bdc_ep *ep;
  1499. struct bdc *bdc;
  1500. int ret;
  1501. if (!_ep || !_ep->desc)
  1502. return -ESHUTDOWN;
  1503. if (!_req || !_req->complete || !_req->buf)
  1504. return -EINVAL;
  1505. ep = to_bdc_ep(_ep);
  1506. req = to_bdc_req(_req);
  1507. bdc = ep->bdc;
  1508. dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
  1509. dev_dbg(bdc->dev, "queuing request %p to %s length %d zero:%d\n",
  1510. _req, ep->name, _req->length, _req->zero);
  1511. if (!ep->usb_ep.desc) {
  1512. dev_warn(bdc->dev,
  1513. "trying to queue req %p to disabled %s\n",
  1514. _req, ep->name);
  1515. return -ESHUTDOWN;
  1516. }
  1517. if (_req->length > MAX_XFR_LEN) {
  1518. dev_warn(bdc->dev,
  1519. "req length > supported MAX:%d requested:%d\n",
  1520. MAX_XFR_LEN, _req->length);
  1521. return -EOPNOTSUPP;
  1522. }
  1523. spin_lock_irqsave(&bdc->lock, flags);
  1524. if (ep == bdc->bdc_ep_array[1])
  1525. ret = ep0_queue(ep, req);
  1526. else
  1527. ret = ep_queue(ep, req);
  1528. spin_unlock_irqrestore(&bdc->lock, flags);
  1529. return ret;
  1530. }
  1531. static int bdc_gadget_ep_dequeue(struct usb_ep *_ep,
  1532. struct usb_request *_req)
  1533. {
  1534. struct bdc_req *req;
  1535. unsigned long flags;
  1536. struct bdc_ep *ep;
  1537. struct bdc *bdc;
  1538. int ret;
  1539. if (!_ep || !_req)
  1540. return -EINVAL;
  1541. ep = to_bdc_ep(_ep);
  1542. req = to_bdc_req(_req);
  1543. bdc = ep->bdc;
  1544. dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
  1545. bdc_dbg_bd_list(bdc, ep);
  1546. spin_lock_irqsave(&bdc->lock, flags);
  1547. /* make sure it's still queued on this endpoint */
  1548. list_for_each_entry(req, &ep->queue, queue) {
  1549. if (&req->usb_req == _req)
  1550. break;
  1551. }
  1552. if (&req->usb_req != _req) {
  1553. spin_unlock_irqrestore(&bdc->lock, flags);
  1554. dev_err(bdc->dev, "usb_req !=req n");
  1555. return -EINVAL;
  1556. }
  1557. ret = ep_dequeue(ep, req);
  1558. if (ret) {
  1559. ret = -EOPNOTSUPP;
  1560. goto err;
  1561. }
  1562. bdc_req_complete(ep, req, -ECONNRESET);
  1563. err:
  1564. bdc_dbg_bd_list(bdc, ep);
  1565. spin_unlock_irqrestore(&bdc->lock, flags);
  1566. return ret;
  1567. }
  1568. static int bdc_gadget_ep_set_halt(struct usb_ep *_ep, int value)
  1569. {
  1570. unsigned long flags;
  1571. struct bdc_ep *ep;
  1572. struct bdc *bdc;
  1573. int ret;
  1574. ep = to_bdc_ep(_ep);
  1575. bdc = ep->bdc;
  1576. dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
  1577. spin_lock_irqsave(&bdc->lock, flags);
  1578. if (usb_endpoint_xfer_isoc(ep->usb_ep.desc))
  1579. ret = -EINVAL;
  1580. else if (!list_empty(&ep->queue))
  1581. ret = -EAGAIN;
  1582. else
  1583. ret = ep_set_halt(ep, value);
  1584. spin_unlock_irqrestore(&bdc->lock, flags);
  1585. return ret;
  1586. }
  1587. static struct usb_request *bdc_gadget_alloc_request(struct usb_ep *_ep,
  1588. gfp_t gfp_flags)
  1589. {
  1590. struct bdc_req *req;
  1591. struct bdc_ep *ep;
  1592. req = kzalloc(sizeof(*req), gfp_flags);
  1593. if (!req)
  1594. return NULL;
  1595. ep = to_bdc_ep(_ep);
  1596. req->ep = ep;
  1597. req->epnum = ep->ep_num;
  1598. req->usb_req.dma = DMA_ADDR_INVALID;
  1599. dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
  1600. return &req->usb_req;
  1601. }
  1602. static void bdc_gadget_free_request(struct usb_ep *_ep,
  1603. struct usb_request *_req)
  1604. {
  1605. struct bdc_req *req;
  1606. req = to_bdc_req(_req);
  1607. kfree(req);
  1608. }
  1609. /* endpoint operations */
  1610. /* configure endpoint and also allocate resources */
  1611. static int bdc_gadget_ep_enable(struct usb_ep *_ep,
  1612. const struct usb_endpoint_descriptor *desc)
  1613. {
  1614. unsigned long flags;
  1615. struct bdc_ep *ep;
  1616. struct bdc *bdc;
  1617. int ret;
  1618. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
  1619. pr_debug("bdc_gadget_ep_enable invalid parameters\n");
  1620. return -EINVAL;
  1621. }
  1622. if (!desc->wMaxPacketSize) {
  1623. pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
  1624. return -EINVAL;
  1625. }
  1626. ep = to_bdc_ep(_ep);
  1627. bdc = ep->bdc;
  1628. /* Sanity check, upper layer will not send enable for ep0 */
  1629. if (ep == bdc->bdc_ep_array[1])
  1630. return -EINVAL;
  1631. if (!bdc->gadget_driver
  1632. || bdc->gadget.speed == USB_SPEED_UNKNOWN) {
  1633. return -ESHUTDOWN;
  1634. }
  1635. dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name);
  1636. spin_lock_irqsave(&bdc->lock, flags);
  1637. ep->desc = desc;
  1638. ep->comp_desc = _ep->comp_desc;
  1639. ret = bdc_ep_enable(ep);
  1640. spin_unlock_irqrestore(&bdc->lock, flags);
  1641. return ret;
  1642. }
  1643. static int bdc_gadget_ep_disable(struct usb_ep *_ep)
  1644. {
  1645. unsigned long flags;
  1646. struct bdc_ep *ep;
  1647. struct bdc *bdc;
  1648. int ret;
  1649. if (!_ep) {
  1650. pr_debug("bdc: invalid parameters\n");
  1651. return -EINVAL;
  1652. }
  1653. ep = to_bdc_ep(_ep);
  1654. bdc = ep->bdc;
  1655. /* Upper layer will not call this for ep0, but do a sanity check */
  1656. if (ep == bdc->bdc_ep_array[1]) {
  1657. dev_warn(bdc->dev, "%s called for ep0\n", __func__);
  1658. return -EINVAL;
  1659. }
  1660. dev_dbg(bdc->dev,
  1661. "%s() ep:%s ep->flags:%08x\n",
  1662. __func__, ep->name, ep->flags);
  1663. if (!(ep->flags & BDC_EP_ENABLED)) {
  1664. dev_warn(bdc->dev, "%s is already disabled\n", ep->name);
  1665. return 0;
  1666. }
  1667. spin_lock_irqsave(&bdc->lock, flags);
  1668. ret = bdc_ep_disable(ep);
  1669. spin_unlock_irqrestore(&bdc->lock, flags);
  1670. return ret;
  1671. }
  1672. static const struct usb_ep_ops bdc_gadget_ep_ops = {
  1673. .enable = bdc_gadget_ep_enable,
  1674. .disable = bdc_gadget_ep_disable,
  1675. .alloc_request = bdc_gadget_alloc_request,
  1676. .free_request = bdc_gadget_free_request,
  1677. .queue = bdc_gadget_ep_queue,
  1678. .dequeue = bdc_gadget_ep_dequeue,
  1679. .set_halt = bdc_gadget_ep_set_halt
  1680. };
  1681. /* dir = 1 is IN */
  1682. static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
  1683. {
  1684. struct bdc_ep *ep;
  1685. dev_dbg(bdc->dev, "%s epnum=%d dir=%d\n", __func__, epnum, dir);
  1686. ep = kzalloc(sizeof(*ep), GFP_KERNEL);
  1687. if (!ep)
  1688. return -ENOMEM;
  1689. ep->bdc = bdc;
  1690. ep->dir = dir;
  1691. if (dir)
  1692. ep->usb_ep.caps.dir_in = true;
  1693. else
  1694. ep->usb_ep.caps.dir_out = true;
  1695. /* ep->ep_num is the index inside bdc_ep */
  1696. if (epnum == 1) {
  1697. ep->ep_num = 1;
  1698. bdc->bdc_ep_array[ep->ep_num] = ep;
  1699. snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
  1700. usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
  1701. ep->usb_ep.caps.type_control = true;
  1702. ep->comp_desc = NULL;
  1703. bdc->gadget.ep0 = &ep->usb_ep;
  1704. } else {
  1705. if (dir)
  1706. ep->ep_num = epnum * 2 - 1;
  1707. else
  1708. ep->ep_num = epnum * 2 - 2;
  1709. bdc->bdc_ep_array[ep->ep_num] = ep;
  1710. snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1,
  1711. dir & 1 ? "in" : "out");
  1712. usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
  1713. ep->usb_ep.caps.type_iso = true;
  1714. ep->usb_ep.caps.type_bulk = true;
  1715. ep->usb_ep.caps.type_int = true;
  1716. ep->usb_ep.max_streams = 0;
  1717. list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
  1718. }
  1719. ep->usb_ep.ops = &bdc_gadget_ep_ops;
  1720. ep->usb_ep.name = ep->name;
  1721. ep->flags = 0;
  1722. ep->ignore_next_sr = false;
  1723. dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
  1724. ep, ep->usb_ep.name, epnum, ep->ep_num);
  1725. INIT_LIST_HEAD(&ep->queue);
  1726. return 0;
  1727. }
  1728. /* Init all ep */
  1729. int bdc_init_ep(struct bdc *bdc)
  1730. {
  1731. u8 epnum;
  1732. int ret;
  1733. dev_dbg(bdc->dev, "%s()\n", __func__);
  1734. INIT_LIST_HEAD(&bdc->gadget.ep_list);
  1735. /* init ep0 */
  1736. ret = init_ep(bdc, 1, 0);
  1737. if (ret) {
  1738. dev_err(bdc->dev, "init ep ep0 fail %d\n", ret);
  1739. return ret;
  1740. }
  1741. for (epnum = 2; epnum <= bdc->num_eps / 2; epnum++) {
  1742. /* OUT */
  1743. ret = init_ep(bdc, epnum, 0);
  1744. if (ret) {
  1745. dev_err(bdc->dev,
  1746. "init ep failed for:%d error: %d\n",
  1747. epnum, ret);
  1748. return ret;
  1749. }
  1750. /* IN */
  1751. ret = init_ep(bdc, epnum, 1);
  1752. if (ret) {
  1753. dev_err(bdc->dev,
  1754. "init ep failed for:%d error: %d\n",
  1755. epnum, ret);
  1756. return ret;
  1757. }
  1758. }
  1759. return 0;
  1760. }