isp1760-hcd.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268
  1. /*
  2. * Driver for the NXP ISP1760 chip
  3. *
  4. * However, the code might contain some bugs. What doesn't work for sure is:
  5. * - ISO
  6. * - OTG
  7. e The interrupt line is configured as active low, level.
  8. *
  9. * (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
  10. *
  11. * (c) 2011 Arvid Brodin <arvid.brodin@enea.com>
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/slab.h>
  17. #include <linux/list.h>
  18. #include <linux/usb.h>
  19. #include <linux/usb/hcd.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/io.h>
  23. #include <linux/mm.h>
  24. #include <linux/timer.h>
  25. #include <asm/unaligned.h>
  26. #include <asm/cacheflush.h>
  27. #include <linux/gpio.h>
  28. #include "isp1760-hcd.h"
  29. static struct kmem_cache *qtd_cachep;
  30. static struct kmem_cache *qh_cachep;
  31. static struct kmem_cache *urb_listitem_cachep;
  32. enum queue_head_types {
  33. QH_CONTROL,
  34. QH_BULK,
  35. QH_INTERRUPT,
  36. QH_END
  37. };
  38. struct isp1760_hcd {
  39. u32 hcs_params;
  40. spinlock_t lock;
  41. struct slotinfo atl_slots[32];
  42. int atl_done_map;
  43. struct slotinfo int_slots[32];
  44. int int_done_map;
  45. struct memory_chunk memory_pool[BLOCKS];
  46. struct list_head qh_list[QH_END];
  47. /* periodic schedule support */
  48. #define DEFAULT_I_TDPS 1024
  49. unsigned periodic_size;
  50. unsigned i_thresh;
  51. unsigned long reset_done;
  52. unsigned long next_statechange;
  53. unsigned int devflags;
  54. int rst_gpio;
  55. };
  56. static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
  57. {
  58. return (struct isp1760_hcd *) (hcd->hcd_priv);
  59. }
  60. /* Section 2.2 Host Controller Capability Registers */
  61. #define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */
  62. #define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
  63. #define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
  64. #define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
  65. #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
  66. #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
  67. #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
  68. /* Section 2.3 Host Controller Operational Registers */
  69. #define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
  70. #define CMD_RESET (1<<1) /* reset HC not bus */
  71. #define CMD_RUN (1<<0) /* start/stop HC */
  72. #define STS_PCD (1<<2) /* port change detect */
  73. #define FLAG_CF (1<<0) /* true: we'll support "high speed" */
  74. #define PORT_OWNER (1<<13) /* true: companion hc owns this port */
  75. #define PORT_POWER (1<<12) /* true: has power (see PPC) */
  76. #define PORT_USB11(x) (((x) & (3 << 10)) == (1 << 10)) /* USB 1.1 device */
  77. #define PORT_RESET (1<<8) /* reset port */
  78. #define PORT_SUSPEND (1<<7) /* suspend port */
  79. #define PORT_RESUME (1<<6) /* resume it */
  80. #define PORT_PE (1<<2) /* port enable */
  81. #define PORT_CSC (1<<1) /* connect status change */
  82. #define PORT_CONNECT (1<<0) /* device connected */
  83. #define PORT_RWC_BITS (PORT_CSC)
  84. struct isp1760_qtd {
  85. u8 packet_type;
  86. void *data_buffer;
  87. u32 payload_addr;
  88. /* the rest is HCD-private */
  89. struct list_head qtd_list;
  90. struct urb *urb;
  91. size_t length;
  92. size_t actual_length;
  93. /* QTD_ENQUEUED: waiting for transfer (inactive) */
  94. /* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
  95. /* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
  96. interrupt handler may touch this qtd! */
  97. /* QTD_XFER_COMPLETE: payload has been transferred successfully */
  98. /* QTD_RETIRE: transfer error/abort qtd */
  99. #define QTD_ENQUEUED 0
  100. #define QTD_PAYLOAD_ALLOC 1
  101. #define QTD_XFER_STARTED 2
  102. #define QTD_XFER_COMPLETE 3
  103. #define QTD_RETIRE 4
  104. u32 status;
  105. };
  106. /* Queue head, one for each active endpoint */
  107. struct isp1760_qh {
  108. struct list_head qh_list;
  109. struct list_head qtd_list;
  110. u32 toggle;
  111. u32 ping;
  112. int slot;
  113. int tt_buffer_dirty; /* See USB2.0 spec section 11.17.5 */
  114. };
  115. struct urb_listitem {
  116. struct list_head urb_list;
  117. struct urb *urb;
  118. };
  119. /*
  120. * Access functions for isp176x registers (addresses 0..0x03FF).
  121. */
  122. static u32 reg_read32(void __iomem *base, u32 reg)
  123. {
  124. return readl(base + reg);
  125. }
  126. static void reg_write32(void __iomem *base, u32 reg, u32 val)
  127. {
  128. writel(val, base + reg);
  129. }
  130. /*
  131. * Access functions for isp176x memory (offset >= 0x0400).
  132. *
  133. * bank_reads8() reads memory locations prefetched by an earlier write to
  134. * HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
  135. * bank optimizations, you should use the more generic mem_reads8() below.
  136. *
  137. * For access to ptd memory, use the specialized ptd_read() and ptd_write()
  138. * below.
  139. *
  140. * These functions copy via MMIO data to/from the device. memcpy_{to|from}io()
  141. * doesn't quite work because some people have to enforce 32-bit access
  142. */
  143. static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
  144. __u32 *dst, u32 bytes)
  145. {
  146. __u32 __iomem *src;
  147. u32 val;
  148. __u8 *src_byteptr;
  149. __u8 *dst_byteptr;
  150. src = src_base + (bank_addr | src_offset);
  151. if (src_offset < PAYLOAD_OFFSET) {
  152. while (bytes >= 4) {
  153. *dst = le32_to_cpu(__raw_readl(src));
  154. bytes -= 4;
  155. src++;
  156. dst++;
  157. }
  158. } else {
  159. while (bytes >= 4) {
  160. *dst = __raw_readl(src);
  161. bytes -= 4;
  162. src++;
  163. dst++;
  164. }
  165. }
  166. if (!bytes)
  167. return;
  168. /* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
  169. * allocated.
  170. */
  171. if (src_offset < PAYLOAD_OFFSET)
  172. val = le32_to_cpu(__raw_readl(src));
  173. else
  174. val = __raw_readl(src);
  175. dst_byteptr = (void *) dst;
  176. src_byteptr = (void *) &val;
  177. while (bytes > 0) {
  178. *dst_byteptr = *src_byteptr;
  179. dst_byteptr++;
  180. src_byteptr++;
  181. bytes--;
  182. }
  183. }
  184. static void mem_reads8(void __iomem *src_base, u32 src_offset, void *dst,
  185. u32 bytes)
  186. {
  187. reg_write32(src_base, HC_MEMORY_REG, src_offset + ISP_BANK(0));
  188. ndelay(90);
  189. bank_reads8(src_base, src_offset, ISP_BANK(0), dst, bytes);
  190. }
  191. static void mem_writes8(void __iomem *dst_base, u32 dst_offset,
  192. __u32 const *src, u32 bytes)
  193. {
  194. __u32 __iomem *dst;
  195. dst = dst_base + dst_offset;
  196. if (dst_offset < PAYLOAD_OFFSET) {
  197. while (bytes >= 4) {
  198. __raw_writel(cpu_to_le32(*src), dst);
  199. bytes -= 4;
  200. src++;
  201. dst++;
  202. }
  203. } else {
  204. while (bytes >= 4) {
  205. __raw_writel(*src, dst);
  206. bytes -= 4;
  207. src++;
  208. dst++;
  209. }
  210. }
  211. if (!bytes)
  212. return;
  213. /* in case we have 3, 2 or 1 bytes left. The buffer is allocated and the
  214. * extra bytes should not be read by the HW.
  215. */
  216. if (dst_offset < PAYLOAD_OFFSET)
  217. __raw_writel(cpu_to_le32(*src), dst);
  218. else
  219. __raw_writel(*src, dst);
  220. }
  221. /*
  222. * Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
  223. * INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
  224. */
  225. static void ptd_read(void __iomem *base, u32 ptd_offset, u32 slot,
  226. struct ptd *ptd)
  227. {
  228. reg_write32(base, HC_MEMORY_REG,
  229. ISP_BANK(0) + ptd_offset + slot*sizeof(*ptd));
  230. ndelay(90);
  231. bank_reads8(base, ptd_offset + slot*sizeof(*ptd), ISP_BANK(0),
  232. (void *) ptd, sizeof(*ptd));
  233. }
  234. static void ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
  235. struct ptd *ptd)
  236. {
  237. mem_writes8(base, ptd_offset + slot*sizeof(*ptd) + sizeof(ptd->dw0),
  238. &ptd->dw1, 7*sizeof(ptd->dw1));
  239. /* Make sure dw0 gets written last (after other dw's and after payload)
  240. since it contains the enable bit */
  241. wmb();
  242. mem_writes8(base, ptd_offset + slot*sizeof(*ptd), &ptd->dw0,
  243. sizeof(ptd->dw0));
  244. }
  245. /* memory management of the 60kb on the chip from 0x1000 to 0xffff */
  246. static void init_memory(struct isp1760_hcd *priv)
  247. {
  248. int i, curr;
  249. u32 payload_addr;
  250. payload_addr = PAYLOAD_OFFSET;
  251. for (i = 0; i < BLOCK_1_NUM; i++) {
  252. priv->memory_pool[i].start = payload_addr;
  253. priv->memory_pool[i].size = BLOCK_1_SIZE;
  254. priv->memory_pool[i].free = 1;
  255. payload_addr += priv->memory_pool[i].size;
  256. }
  257. curr = i;
  258. for (i = 0; i < BLOCK_2_NUM; i++) {
  259. priv->memory_pool[curr + i].start = payload_addr;
  260. priv->memory_pool[curr + i].size = BLOCK_2_SIZE;
  261. priv->memory_pool[curr + i].free = 1;
  262. payload_addr += priv->memory_pool[curr + i].size;
  263. }
  264. curr = i;
  265. for (i = 0; i < BLOCK_3_NUM; i++) {
  266. priv->memory_pool[curr + i].start = payload_addr;
  267. priv->memory_pool[curr + i].size = BLOCK_3_SIZE;
  268. priv->memory_pool[curr + i].free = 1;
  269. payload_addr += priv->memory_pool[curr + i].size;
  270. }
  271. WARN_ON(payload_addr - priv->memory_pool[0].start > PAYLOAD_AREA_SIZE);
  272. }
  273. static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
  274. {
  275. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  276. int i;
  277. WARN_ON(qtd->payload_addr);
  278. if (!qtd->length)
  279. return;
  280. for (i = 0; i < BLOCKS; i++) {
  281. if (priv->memory_pool[i].size >= qtd->length &&
  282. priv->memory_pool[i].free) {
  283. priv->memory_pool[i].free = 0;
  284. qtd->payload_addr = priv->memory_pool[i].start;
  285. return;
  286. }
  287. }
  288. }
  289. static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
  290. {
  291. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  292. int i;
  293. if (!qtd->payload_addr)
  294. return;
  295. for (i = 0; i < BLOCKS; i++) {
  296. if (priv->memory_pool[i].start == qtd->payload_addr) {
  297. WARN_ON(priv->memory_pool[i].free);
  298. priv->memory_pool[i].free = 1;
  299. qtd->payload_addr = 0;
  300. return;
  301. }
  302. }
  303. dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
  304. __func__, qtd->payload_addr);
  305. WARN_ON(1);
  306. qtd->payload_addr = 0;
  307. }
  308. static int handshake(struct usb_hcd *hcd, u32 reg,
  309. u32 mask, u32 done, int usec)
  310. {
  311. u32 result;
  312. do {
  313. result = reg_read32(hcd->regs, reg);
  314. if (result == ~0)
  315. return -ENODEV;
  316. result &= mask;
  317. if (result == done)
  318. return 0;
  319. udelay(1);
  320. usec--;
  321. } while (usec > 0);
  322. return -ETIMEDOUT;
  323. }
  324. /* reset a non-running (STS_HALT == 1) controller */
  325. static int ehci_reset(struct usb_hcd *hcd)
  326. {
  327. int retval;
  328. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  329. u32 command = reg_read32(hcd->regs, HC_USBCMD);
  330. command |= CMD_RESET;
  331. reg_write32(hcd->regs, HC_USBCMD, command);
  332. hcd->state = HC_STATE_HALT;
  333. priv->next_statechange = jiffies;
  334. retval = handshake(hcd, HC_USBCMD,
  335. CMD_RESET, 0, 250 * 1000);
  336. return retval;
  337. }
  338. static struct isp1760_qh *qh_alloc(gfp_t flags)
  339. {
  340. struct isp1760_qh *qh;
  341. qh = kmem_cache_zalloc(qh_cachep, flags);
  342. if (!qh)
  343. return NULL;
  344. INIT_LIST_HEAD(&qh->qh_list);
  345. INIT_LIST_HEAD(&qh->qtd_list);
  346. qh->slot = -1;
  347. return qh;
  348. }
  349. static void qh_free(struct isp1760_qh *qh)
  350. {
  351. WARN_ON(!list_empty(&qh->qtd_list));
  352. WARN_ON(qh->slot > -1);
  353. kmem_cache_free(qh_cachep, qh);
  354. }
  355. /* one-time init, only for memory state */
  356. static int priv_init(struct usb_hcd *hcd)
  357. {
  358. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  359. u32 hcc_params;
  360. int i;
  361. spin_lock_init(&priv->lock);
  362. for (i = 0; i < QH_END; i++)
  363. INIT_LIST_HEAD(&priv->qh_list[i]);
  364. /*
  365. * hw default: 1K periodic list heads, one per frame.
  366. * periodic_size can shrink by USBCMD update if hcc_params allows.
  367. */
  368. priv->periodic_size = DEFAULT_I_TDPS;
  369. /* controllers may cache some of the periodic schedule ... */
  370. hcc_params = reg_read32(hcd->regs, HC_HCCPARAMS);
  371. /* full frame cache */
  372. if (HCC_ISOC_CACHE(hcc_params))
  373. priv->i_thresh = 8;
  374. else /* N microframes cached */
  375. priv->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
  376. return 0;
  377. }
  378. static int isp1760_hc_setup(struct usb_hcd *hcd)
  379. {
  380. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  381. int result;
  382. u32 scratch, hwmode;
  383. /* low-level chip reset */
  384. if (gpio_is_valid(priv->rst_gpio)) {
  385. unsigned int rst_lvl;
  386. rst_lvl = (priv->devflags &
  387. ISP1760_FLAG_RESET_ACTIVE_HIGH) ? 1 : 0;
  388. gpio_set_value(priv->rst_gpio, rst_lvl);
  389. mdelay(50);
  390. gpio_set_value(priv->rst_gpio, !rst_lvl);
  391. }
  392. /* Setup HW Mode Control: This assumes a level active-low interrupt */
  393. hwmode = HW_DATA_BUS_32BIT;
  394. if (priv->devflags & ISP1760_FLAG_BUS_WIDTH_16)
  395. hwmode &= ~HW_DATA_BUS_32BIT;
  396. if (priv->devflags & ISP1760_FLAG_ANALOG_OC)
  397. hwmode |= HW_ANA_DIGI_OC;
  398. if (priv->devflags & ISP1760_FLAG_DACK_POL_HIGH)
  399. hwmode |= HW_DACK_POL_HIGH;
  400. if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
  401. hwmode |= HW_DREQ_POL_HIGH;
  402. if (priv->devflags & ISP1760_FLAG_INTR_POL_HIGH)
  403. hwmode |= HW_INTR_HIGH_ACT;
  404. if (priv->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
  405. hwmode |= HW_INTR_EDGE_TRIG;
  406. /*
  407. * We have to set this first in case we're in 16-bit mode.
  408. * Write it twice to ensure correct upper bits if switching
  409. * to 16-bit mode.
  410. */
  411. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
  412. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
  413. reg_write32(hcd->regs, HC_SCRATCH_REG, 0xdeadbabe);
  414. /* Change bus pattern */
  415. scratch = reg_read32(hcd->regs, HC_CHIP_ID_REG);
  416. scratch = reg_read32(hcd->regs, HC_SCRATCH_REG);
  417. if (scratch != 0xdeadbabe) {
  418. dev_err(hcd->self.controller, "Scratch test failed.\n");
  419. return -ENODEV;
  420. }
  421. /* pre reset */
  422. reg_write32(hcd->regs, HC_BUFFER_STATUS_REG, 0);
  423. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  424. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  425. reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, NO_TRANSFER_ACTIVE);
  426. /* reset */
  427. reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_ALL);
  428. mdelay(100);
  429. reg_write32(hcd->regs, HC_RESET_REG, SW_RESET_RESET_HC);
  430. mdelay(100);
  431. result = ehci_reset(hcd);
  432. if (result)
  433. return result;
  434. /* Step 11 passed */
  435. dev_info(hcd->self.controller, "bus width: %d, oc: %s\n",
  436. (priv->devflags & ISP1760_FLAG_BUS_WIDTH_16) ?
  437. 16 : 32, (priv->devflags & ISP1760_FLAG_ANALOG_OC) ?
  438. "analog" : "digital");
  439. /* ATL reset */
  440. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode | ALL_ATX_RESET);
  441. mdelay(10);
  442. reg_write32(hcd->regs, HC_HW_MODE_CTRL, hwmode);
  443. reg_write32(hcd->regs, HC_INTERRUPT_ENABLE, INTERRUPT_ENABLE_MASK);
  444. /*
  445. * PORT 1 Control register of the ISP1760 is the OTG control
  446. * register on ISP1761. Since there is no OTG or device controller
  447. * support in this driver, we use port 1 as a "normal" USB host port on
  448. * both chips.
  449. */
  450. reg_write32(hcd->regs, HC_PORT1_CTRL, PORT1_POWER | PORT1_INIT2);
  451. mdelay(10);
  452. priv->hcs_params = reg_read32(hcd->regs, HC_HCSPARAMS);
  453. return priv_init(hcd);
  454. }
  455. static u32 base_to_chip(u32 base)
  456. {
  457. return ((base - 0x400) >> 3);
  458. }
  459. static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
  460. {
  461. struct urb *urb;
  462. if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
  463. return 1;
  464. urb = qtd->urb;
  465. qtd = list_entry(qtd->qtd_list.next, typeof(*qtd), qtd_list);
  466. return (qtd->urb != urb);
  467. }
  468. /* magic numbers that can affect system performance */
  469. #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
  470. #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
  471. #define EHCI_TUNE_RL_TT 0
  472. #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
  473. #define EHCI_TUNE_MULT_TT 1
  474. #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
  475. static void create_ptd_atl(struct isp1760_qh *qh,
  476. struct isp1760_qtd *qtd, struct ptd *ptd)
  477. {
  478. u32 maxpacket;
  479. u32 multi;
  480. u32 rl = RL_COUNTER;
  481. u32 nak = NAK_COUNTER;
  482. memset(ptd, 0, sizeof(*ptd));
  483. /* according to 3.6.2, max packet len can not be > 0x400 */
  484. maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe,
  485. usb_pipeout(qtd->urb->pipe));
  486. multi = 1 + ((maxpacket >> 11) & 0x3);
  487. maxpacket &= 0x7ff;
  488. /* DW0 */
  489. ptd->dw0 = DW0_VALID_BIT;
  490. ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
  491. ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
  492. ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
  493. /* DW1 */
  494. ptd->dw1 = usb_pipeendpoint(qtd->urb->pipe) >> 1;
  495. ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
  496. ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
  497. if (usb_pipebulk(qtd->urb->pipe))
  498. ptd->dw1 |= DW1_TRANS_BULK;
  499. else if (usb_pipeint(qtd->urb->pipe))
  500. ptd->dw1 |= DW1_TRANS_INT;
  501. if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
  502. /* split transaction */
  503. ptd->dw1 |= DW1_TRANS_SPLIT;
  504. if (qtd->urb->dev->speed == USB_SPEED_LOW)
  505. ptd->dw1 |= DW1_SE_USB_LOSPEED;
  506. ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
  507. ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
  508. /* SE bit for Split INT transfers */
  509. if (usb_pipeint(qtd->urb->pipe) &&
  510. (qtd->urb->dev->speed == USB_SPEED_LOW))
  511. ptd->dw1 |= 2 << 16;
  512. rl = 0;
  513. nak = 0;
  514. } else {
  515. ptd->dw0 |= TO_DW0_MULTI(multi);
  516. if (usb_pipecontrol(qtd->urb->pipe) ||
  517. usb_pipebulk(qtd->urb->pipe))
  518. ptd->dw3 |= TO_DW3_PING(qh->ping);
  519. }
  520. /* DW2 */
  521. ptd->dw2 = 0;
  522. ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
  523. ptd->dw2 |= TO_DW2_RL(rl);
  524. /* DW3 */
  525. ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
  526. ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
  527. if (usb_pipecontrol(qtd->urb->pipe)) {
  528. if (qtd->data_buffer == qtd->urb->setup_packet)
  529. ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
  530. else if (last_qtd_of_urb(qtd, qh))
  531. ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
  532. }
  533. ptd->dw3 |= DW3_ACTIVE_BIT;
  534. /* Cerr */
  535. ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
  536. }
  537. static void transform_add_int(struct isp1760_qh *qh,
  538. struct isp1760_qtd *qtd, struct ptd *ptd)
  539. {
  540. u32 usof;
  541. u32 period;
  542. /*
  543. * Most of this is guessing. ISP1761 datasheet is quite unclear, and
  544. * the algorithm from the original Philips driver code, which was
  545. * pretty much used in this driver before as well, is quite horrendous
  546. * and, i believe, incorrect. The code below follows the datasheet and
  547. * USB2.0 spec as far as I can tell, and plug/unplug seems to be much
  548. * more reliable this way (fingers crossed...).
  549. */
  550. if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
  551. /* urb->interval is in units of microframes (1/8 ms) */
  552. period = qtd->urb->interval >> 3;
  553. if (qtd->urb->interval > 4)
  554. usof = 0x01; /* One bit set =>
  555. interval 1 ms * uFrame-match */
  556. else if (qtd->urb->interval > 2)
  557. usof = 0x22; /* Two bits set => interval 1/2 ms */
  558. else if (qtd->urb->interval > 1)
  559. usof = 0x55; /* Four bits set => interval 1/4 ms */
  560. else
  561. usof = 0xff; /* All bits set => interval 1/8 ms */
  562. } else {
  563. /* urb->interval is in units of frames (1 ms) */
  564. period = qtd->urb->interval;
  565. usof = 0x0f; /* Execute Start Split on any of the
  566. four first uFrames */
  567. /*
  568. * First 8 bits in dw5 is uSCS and "specifies which uSOF the
  569. * complete split needs to be sent. Valid only for IN." Also,
  570. * "All bits can be set to one for every transfer." (p 82,
  571. * ISP1761 data sheet.) 0x1c is from Philips driver. Where did
  572. * that number come from? 0xff seems to work fine...
  573. */
  574. /* ptd->dw5 = 0x1c; */
  575. ptd->dw5 = 0xff; /* Execute Complete Split on any uFrame */
  576. }
  577. period = period >> 1;/* Ensure equal or shorter period than requested */
  578. period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
  579. ptd->dw2 |= period;
  580. ptd->dw4 = usof;
  581. }
  582. static void create_ptd_int(struct isp1760_qh *qh,
  583. struct isp1760_qtd *qtd, struct ptd *ptd)
  584. {
  585. create_ptd_atl(qh, qtd, ptd);
  586. transform_add_int(qh, qtd, ptd);
  587. }
  588. static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
  589. __releases(priv->lock)
  590. __acquires(priv->lock)
  591. {
  592. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  593. if (!urb->unlinked) {
  594. if (urb->status == -EINPROGRESS)
  595. urb->status = 0;
  596. }
  597. if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
  598. void *ptr;
  599. for (ptr = urb->transfer_buffer;
  600. ptr < urb->transfer_buffer + urb->transfer_buffer_length;
  601. ptr += PAGE_SIZE)
  602. flush_dcache_page(virt_to_page(ptr));
  603. }
  604. /* complete() can reenter this HCD */
  605. usb_hcd_unlink_urb_from_ep(hcd, urb);
  606. spin_unlock(&priv->lock);
  607. usb_hcd_giveback_urb(hcd, urb, urb->status);
  608. spin_lock(&priv->lock);
  609. }
  610. static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
  611. u8 packet_type)
  612. {
  613. struct isp1760_qtd *qtd;
  614. qtd = kmem_cache_zalloc(qtd_cachep, flags);
  615. if (!qtd)
  616. return NULL;
  617. INIT_LIST_HEAD(&qtd->qtd_list);
  618. qtd->urb = urb;
  619. qtd->packet_type = packet_type;
  620. qtd->status = QTD_ENQUEUED;
  621. qtd->actual_length = 0;
  622. return qtd;
  623. }
  624. static void qtd_free(struct isp1760_qtd *qtd)
  625. {
  626. WARN_ON(qtd->payload_addr);
  627. kmem_cache_free(qtd_cachep, qtd);
  628. }
  629. static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
  630. struct slotinfo *slots, struct isp1760_qtd *qtd,
  631. struct isp1760_qh *qh, struct ptd *ptd)
  632. {
  633. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  634. int skip_map;
  635. WARN_ON((slot < 0) || (slot > 31));
  636. WARN_ON(qtd->length && !qtd->payload_addr);
  637. WARN_ON(slots[slot].qtd);
  638. WARN_ON(slots[slot].qh);
  639. WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
  640. /* Make sure done map has not triggered from some unlinked transfer */
  641. if (ptd_offset == ATL_PTD_OFFSET) {
  642. priv->atl_done_map |= reg_read32(hcd->regs,
  643. HC_ATL_PTD_DONEMAP_REG);
  644. priv->atl_done_map &= ~(1 << slot);
  645. } else {
  646. priv->int_done_map |= reg_read32(hcd->regs,
  647. HC_INT_PTD_DONEMAP_REG);
  648. priv->int_done_map &= ~(1 << slot);
  649. }
  650. qh->slot = slot;
  651. qtd->status = QTD_XFER_STARTED;
  652. slots[slot].timestamp = jiffies;
  653. slots[slot].qtd = qtd;
  654. slots[slot].qh = qh;
  655. ptd_write(hcd->regs, ptd_offset, slot, ptd);
  656. if (ptd_offset == ATL_PTD_OFFSET) {
  657. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  658. skip_map &= ~(1 << qh->slot);
  659. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
  660. } else {
  661. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  662. skip_map &= ~(1 << qh->slot);
  663. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
  664. }
  665. }
  666. static int is_short_bulk(struct isp1760_qtd *qtd)
  667. {
  668. return (usb_pipebulk(qtd->urb->pipe) &&
  669. (qtd->actual_length < qtd->length));
  670. }
  671. static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
  672. struct list_head *urb_list)
  673. {
  674. int last_qtd;
  675. struct isp1760_qtd *qtd, *qtd_next;
  676. struct urb_listitem *urb_listitem;
  677. list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
  678. if (qtd->status < QTD_XFER_COMPLETE)
  679. break;
  680. last_qtd = last_qtd_of_urb(qtd, qh);
  681. if ((!last_qtd) && (qtd->status == QTD_RETIRE))
  682. qtd_next->status = QTD_RETIRE;
  683. if (qtd->status == QTD_XFER_COMPLETE) {
  684. if (qtd->actual_length) {
  685. switch (qtd->packet_type) {
  686. case IN_PID:
  687. mem_reads8(hcd->regs, qtd->payload_addr,
  688. qtd->data_buffer,
  689. qtd->actual_length);
  690. /* Fall through (?) */
  691. case OUT_PID:
  692. qtd->urb->actual_length +=
  693. qtd->actual_length;
  694. /* Fall through ... */
  695. case SETUP_PID:
  696. break;
  697. }
  698. }
  699. if (is_short_bulk(qtd)) {
  700. if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
  701. qtd->urb->status = -EREMOTEIO;
  702. if (!last_qtd)
  703. qtd_next->status = QTD_RETIRE;
  704. }
  705. }
  706. if (qtd->payload_addr)
  707. free_mem(hcd, qtd);
  708. if (last_qtd) {
  709. if ((qtd->status == QTD_RETIRE) &&
  710. (qtd->urb->status == -EINPROGRESS))
  711. qtd->urb->status = -EPIPE;
  712. /* Defer calling of urb_done() since it releases lock */
  713. urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
  714. GFP_ATOMIC);
  715. if (unlikely(!urb_listitem))
  716. break; /* Try again on next call */
  717. urb_listitem->urb = qtd->urb;
  718. list_add_tail(&urb_listitem->urb_list, urb_list);
  719. }
  720. list_del(&qtd->qtd_list);
  721. qtd_free(qtd);
  722. }
  723. }
  724. #define ENQUEUE_DEPTH 2
  725. static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
  726. {
  727. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  728. int ptd_offset;
  729. struct slotinfo *slots;
  730. int curr_slot, free_slot;
  731. int n;
  732. struct ptd ptd;
  733. struct isp1760_qtd *qtd;
  734. if (unlikely(list_empty(&qh->qtd_list))) {
  735. WARN_ON(1);
  736. return;
  737. }
  738. /* Make sure this endpoint's TT buffer is clean before queueing ptds */
  739. if (qh->tt_buffer_dirty)
  740. return;
  741. if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
  742. qtd_list)->urb->pipe)) {
  743. ptd_offset = INT_PTD_OFFSET;
  744. slots = priv->int_slots;
  745. } else {
  746. ptd_offset = ATL_PTD_OFFSET;
  747. slots = priv->atl_slots;
  748. }
  749. free_slot = -1;
  750. for (curr_slot = 0; curr_slot < 32; curr_slot++) {
  751. if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
  752. free_slot = curr_slot;
  753. if (slots[curr_slot].qh == qh)
  754. break;
  755. }
  756. n = 0;
  757. list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
  758. if (qtd->status == QTD_ENQUEUED) {
  759. WARN_ON(qtd->payload_addr);
  760. alloc_mem(hcd, qtd);
  761. if ((qtd->length) && (!qtd->payload_addr))
  762. break;
  763. if ((qtd->length) &&
  764. ((qtd->packet_type == SETUP_PID) ||
  765. (qtd->packet_type == OUT_PID))) {
  766. mem_writes8(hcd->regs, qtd->payload_addr,
  767. qtd->data_buffer, qtd->length);
  768. }
  769. qtd->status = QTD_PAYLOAD_ALLOC;
  770. }
  771. if (qtd->status == QTD_PAYLOAD_ALLOC) {
  772. /*
  773. if ((curr_slot > 31) && (free_slot == -1))
  774. dev_dbg(hcd->self.controller, "%s: No slot "
  775. "available for transfer\n", __func__);
  776. */
  777. /* Start xfer for this endpoint if not already done */
  778. if ((curr_slot > 31) && (free_slot > -1)) {
  779. if (usb_pipeint(qtd->urb->pipe))
  780. create_ptd_int(qh, qtd, &ptd);
  781. else
  782. create_ptd_atl(qh, qtd, &ptd);
  783. start_bus_transfer(hcd, ptd_offset, free_slot,
  784. slots, qtd, qh, &ptd);
  785. curr_slot = free_slot;
  786. }
  787. n++;
  788. if (n >= ENQUEUE_DEPTH)
  789. break;
  790. }
  791. }
  792. }
  793. static void schedule_ptds(struct usb_hcd *hcd)
  794. {
  795. struct isp1760_hcd *priv;
  796. struct isp1760_qh *qh, *qh_next;
  797. struct list_head *ep_queue;
  798. LIST_HEAD(urb_list);
  799. struct urb_listitem *urb_listitem, *urb_listitem_next;
  800. int i;
  801. if (!hcd) {
  802. WARN_ON(1);
  803. return;
  804. }
  805. priv = hcd_to_priv(hcd);
  806. /*
  807. * check finished/retired xfers, transfer payloads, call urb_done()
  808. */
  809. for (i = 0; i < QH_END; i++) {
  810. ep_queue = &priv->qh_list[i];
  811. list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
  812. collect_qtds(hcd, qh, &urb_list);
  813. if (list_empty(&qh->qtd_list))
  814. list_del(&qh->qh_list);
  815. }
  816. }
  817. list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
  818. urb_list) {
  819. isp1760_urb_done(hcd, urb_listitem->urb);
  820. kmem_cache_free(urb_listitem_cachep, urb_listitem);
  821. }
  822. /*
  823. * Schedule packets for transfer.
  824. *
  825. * According to USB2.0 specification:
  826. *
  827. * 1st prio: interrupt xfers, up to 80 % of bandwidth
  828. * 2nd prio: control xfers
  829. * 3rd prio: bulk xfers
  830. *
  831. * ... but let's use a simpler scheme here (mostly because ISP1761 doc
  832. * is very unclear on how to prioritize traffic):
  833. *
  834. * 1) Enqueue any queued control transfers, as long as payload chip mem
  835. * and PTD ATL slots are available.
  836. * 2) Enqueue any queued INT transfers, as long as payload chip mem
  837. * and PTD INT slots are available.
  838. * 3) Enqueue any queued bulk transfers, as long as payload chip mem
  839. * and PTD ATL slots are available.
  840. *
  841. * Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
  842. * conservation of chip mem and performance.
  843. *
  844. * I'm sure this scheme could be improved upon!
  845. */
  846. for (i = 0; i < QH_END; i++) {
  847. ep_queue = &priv->qh_list[i];
  848. list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
  849. enqueue_qtds(hcd, qh);
  850. }
  851. }
  852. #define PTD_STATE_QTD_DONE 1
  853. #define PTD_STATE_QTD_RELOAD 2
  854. #define PTD_STATE_URB_RETIRE 3
  855. static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
  856. struct urb *urb)
  857. {
  858. __dw dw4;
  859. int i;
  860. dw4 = ptd->dw4;
  861. dw4 >>= 8;
  862. /* FIXME: ISP1761 datasheet does not say what to do with these. Do we
  863. need to handle these errors? Is it done in hardware? */
  864. if (ptd->dw3 & DW3_HALT_BIT) {
  865. urb->status = -EPROTO; /* Default unknown error */
  866. for (i = 0; i < 8; i++) {
  867. switch (dw4 & 0x7) {
  868. case INT_UNDERRUN:
  869. dev_dbg(hcd->self.controller, "%s: underrun "
  870. "during uFrame %d\n",
  871. __func__, i);
  872. urb->status = -ECOMM; /* Could not write data */
  873. break;
  874. case INT_EXACT:
  875. dev_dbg(hcd->self.controller, "%s: transaction "
  876. "error during uFrame %d\n",
  877. __func__, i);
  878. urb->status = -EPROTO; /* timeout, bad CRC, PID
  879. error etc. */
  880. break;
  881. case INT_BABBLE:
  882. dev_dbg(hcd->self.controller, "%s: babble "
  883. "error during uFrame %d\n",
  884. __func__, i);
  885. urb->status = -EOVERFLOW;
  886. break;
  887. }
  888. dw4 >>= 3;
  889. }
  890. return PTD_STATE_URB_RETIRE;
  891. }
  892. return PTD_STATE_QTD_DONE;
  893. }
  894. static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
  895. struct urb *urb)
  896. {
  897. WARN_ON(!ptd);
  898. if (ptd->dw3 & DW3_HALT_BIT) {
  899. if (ptd->dw3 & DW3_BABBLE_BIT)
  900. urb->status = -EOVERFLOW;
  901. else if (FROM_DW3_CERR(ptd->dw3))
  902. urb->status = -EPIPE; /* Stall */
  903. else if (ptd->dw3 & DW3_ERROR_BIT)
  904. urb->status = -EPROTO; /* XactErr */
  905. else
  906. urb->status = -EPROTO; /* Unknown */
  907. /*
  908. dev_dbg(hcd->self.controller, "%s: ptd error:\n"
  909. " dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
  910. " dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
  911. __func__,
  912. ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
  913. ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
  914. */
  915. return PTD_STATE_URB_RETIRE;
  916. }
  917. if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
  918. /* Transfer Error, *but* active and no HALT -> reload */
  919. dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
  920. return PTD_STATE_QTD_RELOAD;
  921. }
  922. if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
  923. /*
  924. * NAKs are handled in HW by the chip. Usually if the
  925. * device is not able to send data fast enough.
  926. * This happens mostly on slower hardware.
  927. */
  928. return PTD_STATE_QTD_RELOAD;
  929. }
  930. return PTD_STATE_QTD_DONE;
  931. }
  932. static void handle_done_ptds(struct usb_hcd *hcd)
  933. {
  934. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  935. struct ptd ptd;
  936. struct isp1760_qh *qh;
  937. int slot;
  938. int state;
  939. struct slotinfo *slots;
  940. u32 ptd_offset;
  941. struct isp1760_qtd *qtd;
  942. int modified;
  943. int skip_map;
  944. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  945. priv->int_done_map &= ~skip_map;
  946. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  947. priv->atl_done_map &= ~skip_map;
  948. modified = priv->int_done_map || priv->atl_done_map;
  949. while (priv->int_done_map || priv->atl_done_map) {
  950. if (priv->int_done_map) {
  951. /* INT ptd */
  952. slot = __ffs(priv->int_done_map);
  953. priv->int_done_map &= ~(1 << slot);
  954. slots = priv->int_slots;
  955. /* This should not trigger, and could be removed if
  956. noone have any problems with it triggering: */
  957. if (!slots[slot].qh) {
  958. WARN_ON(1);
  959. continue;
  960. }
  961. ptd_offset = INT_PTD_OFFSET;
  962. ptd_read(hcd->regs, INT_PTD_OFFSET, slot, &ptd);
  963. state = check_int_transfer(hcd, &ptd,
  964. slots[slot].qtd->urb);
  965. } else {
  966. /* ATL ptd */
  967. slot = __ffs(priv->atl_done_map);
  968. priv->atl_done_map &= ~(1 << slot);
  969. slots = priv->atl_slots;
  970. /* This should not trigger, and could be removed if
  971. noone have any problems with it triggering: */
  972. if (!slots[slot].qh) {
  973. WARN_ON(1);
  974. continue;
  975. }
  976. ptd_offset = ATL_PTD_OFFSET;
  977. ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
  978. state = check_atl_transfer(hcd, &ptd,
  979. slots[slot].qtd->urb);
  980. }
  981. qtd = slots[slot].qtd;
  982. slots[slot].qtd = NULL;
  983. qh = slots[slot].qh;
  984. slots[slot].qh = NULL;
  985. qh->slot = -1;
  986. WARN_ON(qtd->status != QTD_XFER_STARTED);
  987. switch (state) {
  988. case PTD_STATE_QTD_DONE:
  989. if ((usb_pipeint(qtd->urb->pipe)) &&
  990. (qtd->urb->dev->speed != USB_SPEED_HIGH))
  991. qtd->actual_length =
  992. FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
  993. else
  994. qtd->actual_length =
  995. FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
  996. qtd->status = QTD_XFER_COMPLETE;
  997. if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
  998. is_short_bulk(qtd))
  999. qtd = NULL;
  1000. else
  1001. qtd = list_entry(qtd->qtd_list.next,
  1002. typeof(*qtd), qtd_list);
  1003. qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
  1004. qh->ping = FROM_DW3_PING(ptd.dw3);
  1005. break;
  1006. case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
  1007. qtd->status = QTD_PAYLOAD_ALLOC;
  1008. ptd.dw0 |= DW0_VALID_BIT;
  1009. /* RL counter = ERR counter */
  1010. ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
  1011. ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
  1012. ptd.dw3 &= ~TO_DW3_CERR(3);
  1013. ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
  1014. qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
  1015. qh->ping = FROM_DW3_PING(ptd.dw3);
  1016. break;
  1017. case PTD_STATE_URB_RETIRE:
  1018. qtd->status = QTD_RETIRE;
  1019. if ((qtd->urb->dev->speed != USB_SPEED_HIGH) &&
  1020. (qtd->urb->status != -EPIPE) &&
  1021. (qtd->urb->status != -EREMOTEIO)) {
  1022. qh->tt_buffer_dirty = 1;
  1023. if (usb_hub_clear_tt_buffer(qtd->urb))
  1024. /* Clear failed; let's hope things work
  1025. anyway */
  1026. qh->tt_buffer_dirty = 0;
  1027. }
  1028. qtd = NULL;
  1029. qh->toggle = 0;
  1030. qh->ping = 0;
  1031. break;
  1032. default:
  1033. WARN_ON(1);
  1034. continue;
  1035. }
  1036. if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
  1037. if (slots == priv->int_slots) {
  1038. if (state == PTD_STATE_QTD_RELOAD)
  1039. dev_err(hcd->self.controller,
  1040. "%s: PTD_STATE_QTD_RELOAD on "
  1041. "interrupt packet\n", __func__);
  1042. if (state != PTD_STATE_QTD_RELOAD)
  1043. create_ptd_int(qh, qtd, &ptd);
  1044. } else {
  1045. if (state != PTD_STATE_QTD_RELOAD)
  1046. create_ptd_atl(qh, qtd, &ptd);
  1047. }
  1048. start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
  1049. qh, &ptd);
  1050. }
  1051. }
  1052. if (modified)
  1053. schedule_ptds(hcd);
  1054. }
  1055. static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
  1056. {
  1057. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1058. u32 imask;
  1059. irqreturn_t irqret = IRQ_NONE;
  1060. spin_lock(&priv->lock);
  1061. if (!(hcd->state & HC_STATE_RUNNING))
  1062. goto leave;
  1063. imask = reg_read32(hcd->regs, HC_INTERRUPT_REG);
  1064. if (unlikely(!imask))
  1065. goto leave;
  1066. reg_write32(hcd->regs, HC_INTERRUPT_REG, imask); /* Clear */
  1067. priv->int_done_map |= reg_read32(hcd->regs, HC_INT_PTD_DONEMAP_REG);
  1068. priv->atl_done_map |= reg_read32(hcd->regs, HC_ATL_PTD_DONEMAP_REG);
  1069. handle_done_ptds(hcd);
  1070. irqret = IRQ_HANDLED;
  1071. leave:
  1072. spin_unlock(&priv->lock);
  1073. return irqret;
  1074. }
  1075. /*
  1076. * Workaround for problem described in chip errata 2:
  1077. *
  1078. * Sometimes interrupts are not generated when ATL (not INT?) completion occurs.
  1079. * One solution suggested in the errata is to use SOF interrupts _instead_of_
  1080. * ATL done interrupts (the "instead of" might be important since it seems
  1081. * enabling ATL interrupts also causes the chip to sometimes - rarely - "forget"
  1082. * to set the PTD's done bit in addition to not generating an interrupt!).
  1083. *
  1084. * So if we use SOF + ATL interrupts, we sometimes get stale PTDs since their
  1085. * done bit is not being set. This is bad - it blocks the endpoint until reboot.
  1086. *
  1087. * If we use SOF interrupts only, we get latency between ptd completion and the
  1088. * actual handling. This is very noticeable in testusb runs which takes several
  1089. * minutes longer without ATL interrupts.
  1090. *
  1091. * A better solution is to run the code below every SLOT_CHECK_PERIOD ms. If it
  1092. * finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
  1093. * slot's ACTIVE and VALID bits. If these are not set, the ptd is considered
  1094. * completed and its done map bit is set.
  1095. *
  1096. * The values of SLOT_TIMEOUT and SLOT_CHECK_PERIOD have been arbitrarily chosen
  1097. * not to cause too much lag when this HW bug occurs, while still hopefully
  1098. * ensuring that the check does not falsely trigger.
  1099. */
  1100. #define SLOT_TIMEOUT 300
  1101. #define SLOT_CHECK_PERIOD 200
  1102. static struct timer_list errata2_timer;
  1103. static void errata2_function(unsigned long data)
  1104. {
  1105. struct usb_hcd *hcd = (struct usb_hcd *) data;
  1106. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1107. int slot;
  1108. struct ptd ptd;
  1109. unsigned long spinflags;
  1110. spin_lock_irqsave(&priv->lock, spinflags);
  1111. for (slot = 0; slot < 32; slot++)
  1112. if (priv->atl_slots[slot].qh && time_after(jiffies,
  1113. priv->atl_slots[slot].timestamp +
  1114. SLOT_TIMEOUT * HZ / 1000)) {
  1115. ptd_read(hcd->regs, ATL_PTD_OFFSET, slot, &ptd);
  1116. if (!FROM_DW0_VALID(ptd.dw0) &&
  1117. !FROM_DW3_ACTIVE(ptd.dw3))
  1118. priv->atl_done_map |= 1 << slot;
  1119. }
  1120. if (priv->atl_done_map)
  1121. handle_done_ptds(hcd);
  1122. spin_unlock_irqrestore(&priv->lock, spinflags);
  1123. errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
  1124. add_timer(&errata2_timer);
  1125. }
  1126. static int isp1760_run(struct usb_hcd *hcd)
  1127. {
  1128. int retval;
  1129. u32 temp;
  1130. u32 command;
  1131. u32 chipid;
  1132. hcd->uses_new_polling = 1;
  1133. hcd->state = HC_STATE_RUNNING;
  1134. /* Set PTD interrupt AND & OR maps */
  1135. reg_write32(hcd->regs, HC_ATL_IRQ_MASK_AND_REG, 0);
  1136. reg_write32(hcd->regs, HC_ATL_IRQ_MASK_OR_REG, 0xffffffff);
  1137. reg_write32(hcd->regs, HC_INT_IRQ_MASK_AND_REG, 0);
  1138. reg_write32(hcd->regs, HC_INT_IRQ_MASK_OR_REG, 0xffffffff);
  1139. reg_write32(hcd->regs, HC_ISO_IRQ_MASK_AND_REG, 0);
  1140. reg_write32(hcd->regs, HC_ISO_IRQ_MASK_OR_REG, 0xffffffff);
  1141. /* step 23 passed */
  1142. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1143. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp | HW_GLOBAL_INTR_EN);
  1144. command = reg_read32(hcd->regs, HC_USBCMD);
  1145. command &= ~(CMD_LRESET|CMD_RESET);
  1146. command |= CMD_RUN;
  1147. reg_write32(hcd->regs, HC_USBCMD, command);
  1148. retval = handshake(hcd, HC_USBCMD, CMD_RUN, CMD_RUN, 250 * 1000);
  1149. if (retval)
  1150. return retval;
  1151. /*
  1152. * XXX
  1153. * Spec says to write FLAG_CF as last config action, priv code grabs
  1154. * the semaphore while doing so.
  1155. */
  1156. down_write(&ehci_cf_port_reset_rwsem);
  1157. reg_write32(hcd->regs, HC_CONFIGFLAG, FLAG_CF);
  1158. retval = handshake(hcd, HC_CONFIGFLAG, FLAG_CF, FLAG_CF, 250 * 1000);
  1159. up_write(&ehci_cf_port_reset_rwsem);
  1160. if (retval)
  1161. return retval;
  1162. init_timer(&errata2_timer);
  1163. errata2_timer.function = errata2_function;
  1164. errata2_timer.data = (unsigned long) hcd;
  1165. errata2_timer.expires = jiffies + SLOT_CHECK_PERIOD * HZ / 1000;
  1166. add_timer(&errata2_timer);
  1167. chipid = reg_read32(hcd->regs, HC_CHIP_ID_REG);
  1168. dev_info(hcd->self.controller, "USB ISP %04x HW rev. %d started\n",
  1169. chipid & 0xffff, chipid >> 16);
  1170. /* PTD Register Init Part 2, Step 28 */
  1171. /* Setup registers controlling PTD checking */
  1172. reg_write32(hcd->regs, HC_ATL_PTD_LASTPTD_REG, 0x80000000);
  1173. reg_write32(hcd->regs, HC_INT_PTD_LASTPTD_REG, 0x80000000);
  1174. reg_write32(hcd->regs, HC_ISO_PTD_LASTPTD_REG, 0x00000001);
  1175. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, 0xffffffff);
  1176. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, 0xffffffff);
  1177. reg_write32(hcd->regs, HC_ISO_PTD_SKIPMAP_REG, 0xffffffff);
  1178. reg_write32(hcd->regs, HC_BUFFER_STATUS_REG,
  1179. ATL_BUF_FILL | INT_BUF_FILL);
  1180. /* GRR this is run-once init(), being done every time the HC starts.
  1181. * So long as they're part of class devices, we can't do it init()
  1182. * since the class device isn't created that early.
  1183. */
  1184. return 0;
  1185. }
  1186. static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
  1187. {
  1188. qtd->data_buffer = databuffer;
  1189. if (len > MAX_PAYLOAD_SIZE)
  1190. len = MAX_PAYLOAD_SIZE;
  1191. qtd->length = len;
  1192. return qtd->length;
  1193. }
  1194. static void qtd_list_free(struct list_head *qtd_list)
  1195. {
  1196. struct isp1760_qtd *qtd, *qtd_next;
  1197. list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) {
  1198. list_del(&qtd->qtd_list);
  1199. qtd_free(qtd);
  1200. }
  1201. }
  1202. /*
  1203. * Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize.
  1204. * Also calculate the PID type (SETUP/IN/OUT) for each packet.
  1205. */
  1206. #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
  1207. static void packetize_urb(struct usb_hcd *hcd,
  1208. struct urb *urb, struct list_head *head, gfp_t flags)
  1209. {
  1210. struct isp1760_qtd *qtd;
  1211. void *buf;
  1212. int len, maxpacketsize;
  1213. u8 packet_type;
  1214. /*
  1215. * URBs map to sequences of QTDs: one logical transaction
  1216. */
  1217. if (!urb->transfer_buffer && urb->transfer_buffer_length) {
  1218. /* XXX This looks like usb storage / SCSI bug */
  1219. dev_err(hcd->self.controller,
  1220. "buf is null, dma is %08lx len is %d\n",
  1221. (long unsigned)urb->transfer_dma,
  1222. urb->transfer_buffer_length);
  1223. WARN_ON(1);
  1224. }
  1225. if (usb_pipein(urb->pipe))
  1226. packet_type = IN_PID;
  1227. else
  1228. packet_type = OUT_PID;
  1229. if (usb_pipecontrol(urb->pipe)) {
  1230. qtd = qtd_alloc(flags, urb, SETUP_PID);
  1231. if (!qtd)
  1232. goto cleanup;
  1233. qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest));
  1234. list_add_tail(&qtd->qtd_list, head);
  1235. /* for zero length DATA stages, STATUS is always IN */
  1236. if (urb->transfer_buffer_length == 0)
  1237. packet_type = IN_PID;
  1238. }
  1239. maxpacketsize = max_packet(usb_maxpacket(urb->dev, urb->pipe,
  1240. usb_pipeout(urb->pipe)));
  1241. /*
  1242. * buffer gets wrapped in one or more qtds;
  1243. * last one may be "short" (including zero len)
  1244. * and may serve as a control status ack
  1245. */
  1246. buf = urb->transfer_buffer;
  1247. len = urb->transfer_buffer_length;
  1248. for (;;) {
  1249. int this_qtd_len;
  1250. qtd = qtd_alloc(flags, urb, packet_type);
  1251. if (!qtd)
  1252. goto cleanup;
  1253. this_qtd_len = qtd_fill(qtd, buf, len);
  1254. list_add_tail(&qtd->qtd_list, head);
  1255. len -= this_qtd_len;
  1256. buf += this_qtd_len;
  1257. if (len <= 0)
  1258. break;
  1259. }
  1260. /*
  1261. * control requests may need a terminating data "status" ack;
  1262. * bulk ones may need a terminating short packet (zero length).
  1263. */
  1264. if (urb->transfer_buffer_length != 0) {
  1265. int one_more = 0;
  1266. if (usb_pipecontrol(urb->pipe)) {
  1267. one_more = 1;
  1268. if (packet_type == IN_PID)
  1269. packet_type = OUT_PID;
  1270. else
  1271. packet_type = IN_PID;
  1272. } else if (usb_pipebulk(urb->pipe)
  1273. && (urb->transfer_flags & URB_ZERO_PACKET)
  1274. && !(urb->transfer_buffer_length %
  1275. maxpacketsize)) {
  1276. one_more = 1;
  1277. }
  1278. if (one_more) {
  1279. qtd = qtd_alloc(flags, urb, packet_type);
  1280. if (!qtd)
  1281. goto cleanup;
  1282. /* never any data in such packets */
  1283. qtd_fill(qtd, NULL, 0);
  1284. list_add_tail(&qtd->qtd_list, head);
  1285. }
  1286. }
  1287. return;
  1288. cleanup:
  1289. qtd_list_free(head);
  1290. }
  1291. static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  1292. gfp_t mem_flags)
  1293. {
  1294. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1295. struct list_head *ep_queue;
  1296. struct isp1760_qh *qh, *qhit;
  1297. unsigned long spinflags;
  1298. LIST_HEAD(new_qtds);
  1299. int retval;
  1300. int qh_in_queue;
  1301. switch (usb_pipetype(urb->pipe)) {
  1302. case PIPE_CONTROL:
  1303. ep_queue = &priv->qh_list[QH_CONTROL];
  1304. break;
  1305. case PIPE_BULK:
  1306. ep_queue = &priv->qh_list[QH_BULK];
  1307. break;
  1308. case PIPE_INTERRUPT:
  1309. if (urb->interval < 0)
  1310. return -EINVAL;
  1311. /* FIXME: Check bandwidth */
  1312. ep_queue = &priv->qh_list[QH_INTERRUPT];
  1313. break;
  1314. case PIPE_ISOCHRONOUS:
  1315. dev_err(hcd->self.controller, "%s: isochronous USB packets "
  1316. "not yet supported\n",
  1317. __func__);
  1318. return -EPIPE;
  1319. default:
  1320. dev_err(hcd->self.controller, "%s: unknown pipe type\n",
  1321. __func__);
  1322. return -EPIPE;
  1323. }
  1324. if (usb_pipein(urb->pipe))
  1325. urb->actual_length = 0;
  1326. packetize_urb(hcd, urb, &new_qtds, mem_flags);
  1327. if (list_empty(&new_qtds))
  1328. return -ENOMEM;
  1329. retval = 0;
  1330. spin_lock_irqsave(&priv->lock, spinflags);
  1331. if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
  1332. retval = -ESHUTDOWN;
  1333. qtd_list_free(&new_qtds);
  1334. goto out;
  1335. }
  1336. retval = usb_hcd_link_urb_to_ep(hcd, urb);
  1337. if (retval) {
  1338. qtd_list_free(&new_qtds);
  1339. goto out;
  1340. }
  1341. qh = urb->ep->hcpriv;
  1342. if (qh) {
  1343. qh_in_queue = 0;
  1344. list_for_each_entry(qhit, ep_queue, qh_list) {
  1345. if (qhit == qh) {
  1346. qh_in_queue = 1;
  1347. break;
  1348. }
  1349. }
  1350. if (!qh_in_queue)
  1351. list_add_tail(&qh->qh_list, ep_queue);
  1352. } else {
  1353. qh = qh_alloc(GFP_ATOMIC);
  1354. if (!qh) {
  1355. retval = -ENOMEM;
  1356. usb_hcd_unlink_urb_from_ep(hcd, urb);
  1357. qtd_list_free(&new_qtds);
  1358. goto out;
  1359. }
  1360. list_add_tail(&qh->qh_list, ep_queue);
  1361. urb->ep->hcpriv = qh;
  1362. }
  1363. list_splice_tail(&new_qtds, &qh->qtd_list);
  1364. schedule_ptds(hcd);
  1365. out:
  1366. spin_unlock_irqrestore(&priv->lock, spinflags);
  1367. return retval;
  1368. }
  1369. static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
  1370. struct isp1760_qh *qh)
  1371. {
  1372. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1373. int skip_map;
  1374. WARN_ON(qh->slot == -1);
  1375. /* We need to forcefully reclaim the slot since some transfers never
  1376. return, e.g. interrupt transfers and NAKed bulk transfers. */
  1377. if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
  1378. skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
  1379. skip_map |= (1 << qh->slot);
  1380. reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
  1381. priv->atl_slots[qh->slot].qh = NULL;
  1382. priv->atl_slots[qh->slot].qtd = NULL;
  1383. } else {
  1384. skip_map = reg_read32(hcd->regs, HC_INT_PTD_SKIPMAP_REG);
  1385. skip_map |= (1 << qh->slot);
  1386. reg_write32(hcd->regs, HC_INT_PTD_SKIPMAP_REG, skip_map);
  1387. priv->int_slots[qh->slot].qh = NULL;
  1388. priv->int_slots[qh->slot].qtd = NULL;
  1389. }
  1390. qh->slot = -1;
  1391. }
  1392. /*
  1393. * Retire the qtds beginning at 'qtd' and belonging all to the same urb, killing
  1394. * any active transfer belonging to the urb in the process.
  1395. */
  1396. static void dequeue_urb_from_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
  1397. struct isp1760_qtd *qtd)
  1398. {
  1399. struct urb *urb;
  1400. int urb_was_running;
  1401. urb = qtd->urb;
  1402. urb_was_running = 0;
  1403. list_for_each_entry_from(qtd, &qh->qtd_list, qtd_list) {
  1404. if (qtd->urb != urb)
  1405. break;
  1406. if (qtd->status >= QTD_XFER_STARTED)
  1407. urb_was_running = 1;
  1408. if (last_qtd_of_urb(qtd, qh) &&
  1409. (qtd->status >= QTD_XFER_COMPLETE))
  1410. urb_was_running = 0;
  1411. if (qtd->status == QTD_XFER_STARTED)
  1412. kill_transfer(hcd, urb, qh);
  1413. qtd->status = QTD_RETIRE;
  1414. }
  1415. if ((urb->dev->speed != USB_SPEED_HIGH) && urb_was_running) {
  1416. qh->tt_buffer_dirty = 1;
  1417. if (usb_hub_clear_tt_buffer(urb))
  1418. /* Clear failed; let's hope things work anyway */
  1419. qh->tt_buffer_dirty = 0;
  1420. }
  1421. }
  1422. static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
  1423. int status)
  1424. {
  1425. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1426. unsigned long spinflags;
  1427. struct isp1760_qh *qh;
  1428. struct isp1760_qtd *qtd;
  1429. int retval = 0;
  1430. spin_lock_irqsave(&priv->lock, spinflags);
  1431. retval = usb_hcd_check_unlink_urb(hcd, urb, status);
  1432. if (retval)
  1433. goto out;
  1434. qh = urb->ep->hcpriv;
  1435. if (!qh) {
  1436. retval = -EINVAL;
  1437. goto out;
  1438. }
  1439. list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
  1440. if (qtd->urb == urb) {
  1441. dequeue_urb_from_qtd(hcd, qh, qtd);
  1442. list_move(&qtd->qtd_list, &qh->qtd_list);
  1443. break;
  1444. }
  1445. urb->status = status;
  1446. schedule_ptds(hcd);
  1447. out:
  1448. spin_unlock_irqrestore(&priv->lock, spinflags);
  1449. return retval;
  1450. }
  1451. static void isp1760_endpoint_disable(struct usb_hcd *hcd,
  1452. struct usb_host_endpoint *ep)
  1453. {
  1454. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1455. unsigned long spinflags;
  1456. struct isp1760_qh *qh, *qh_iter;
  1457. int i;
  1458. spin_lock_irqsave(&priv->lock, spinflags);
  1459. qh = ep->hcpriv;
  1460. if (!qh)
  1461. goto out;
  1462. WARN_ON(!list_empty(&qh->qtd_list));
  1463. for (i = 0; i < QH_END; i++)
  1464. list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
  1465. if (qh_iter == qh) {
  1466. list_del(&qh_iter->qh_list);
  1467. i = QH_END;
  1468. break;
  1469. }
  1470. qh_free(qh);
  1471. ep->hcpriv = NULL;
  1472. schedule_ptds(hcd);
  1473. out:
  1474. spin_unlock_irqrestore(&priv->lock, spinflags);
  1475. }
  1476. static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
  1477. {
  1478. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1479. u32 temp, status = 0;
  1480. u32 mask;
  1481. int retval = 1;
  1482. unsigned long flags;
  1483. /* if !PM_RUNTIME, root hub timers won't get shut down ... */
  1484. if (!HC_IS_RUNNING(hcd->state))
  1485. return 0;
  1486. /* init status to no-changes */
  1487. buf[0] = 0;
  1488. mask = PORT_CSC;
  1489. spin_lock_irqsave(&priv->lock, flags);
  1490. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1491. if (temp & PORT_OWNER) {
  1492. if (temp & PORT_CSC) {
  1493. temp &= ~PORT_CSC;
  1494. reg_write32(hcd->regs, HC_PORTSC1, temp);
  1495. goto done;
  1496. }
  1497. }
  1498. /*
  1499. * Return status information even for ports with OWNER set.
  1500. * Otherwise khubd wouldn't see the disconnect event when a
  1501. * high-speed device is switched over to the companion
  1502. * controller by the user.
  1503. */
  1504. if ((temp & mask) != 0
  1505. || ((temp & PORT_RESUME) != 0
  1506. && time_after_eq(jiffies,
  1507. priv->reset_done))) {
  1508. buf [0] |= 1 << (0 + 1);
  1509. status = STS_PCD;
  1510. }
  1511. /* FIXME autosuspend idle root hubs */
  1512. done:
  1513. spin_unlock_irqrestore(&priv->lock, flags);
  1514. return status ? retval : 0;
  1515. }
  1516. static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
  1517. struct usb_hub_descriptor *desc)
  1518. {
  1519. int ports = HCS_N_PORTS(priv->hcs_params);
  1520. u16 temp;
  1521. desc->bDescriptorType = 0x29;
  1522. /* priv 1.0, 2.3.9 says 20ms max */
  1523. desc->bPwrOn2PwrGood = 10;
  1524. desc->bHubContrCurrent = 0;
  1525. desc->bNbrPorts = ports;
  1526. temp = 1 + (ports / 8);
  1527. desc->bDescLength = 7 + 2 * temp;
  1528. /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
  1529. memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
  1530. memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
  1531. /* per-port overcurrent reporting */
  1532. temp = 0x0008;
  1533. if (HCS_PPC(priv->hcs_params))
  1534. /* per-port power control */
  1535. temp |= 0x0001;
  1536. else
  1537. /* no power switching */
  1538. temp |= 0x0002;
  1539. desc->wHubCharacteristics = cpu_to_le16(temp);
  1540. }
  1541. #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
  1542. static int check_reset_complete(struct usb_hcd *hcd, int index,
  1543. int port_status)
  1544. {
  1545. if (!(port_status & PORT_CONNECT))
  1546. return port_status;
  1547. /* if reset finished and it's still not enabled -- handoff */
  1548. if (!(port_status & PORT_PE)) {
  1549. dev_info(hcd->self.controller,
  1550. "port %d full speed --> companion\n",
  1551. index + 1);
  1552. port_status |= PORT_OWNER;
  1553. port_status &= ~PORT_RWC_BITS;
  1554. reg_write32(hcd->regs, HC_PORTSC1, port_status);
  1555. } else
  1556. dev_info(hcd->self.controller, "port %d high speed\n",
  1557. index + 1);
  1558. return port_status;
  1559. }
  1560. static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
  1561. u16 wValue, u16 wIndex, char *buf, u16 wLength)
  1562. {
  1563. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1564. int ports = HCS_N_PORTS(priv->hcs_params);
  1565. u32 temp, status;
  1566. unsigned long flags;
  1567. int retval = 0;
  1568. unsigned selector;
  1569. /*
  1570. * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
  1571. * HCS_INDICATOR may say we can change LEDs to off/amber/green.
  1572. * (track current state ourselves) ... blink for diagnostics,
  1573. * power, "this is the one", etc. EHCI spec supports this.
  1574. */
  1575. spin_lock_irqsave(&priv->lock, flags);
  1576. switch (typeReq) {
  1577. case ClearHubFeature:
  1578. switch (wValue) {
  1579. case C_HUB_LOCAL_POWER:
  1580. case C_HUB_OVER_CURRENT:
  1581. /* no hub-wide feature/status flags */
  1582. break;
  1583. default:
  1584. goto error;
  1585. }
  1586. break;
  1587. case ClearPortFeature:
  1588. if (!wIndex || wIndex > ports)
  1589. goto error;
  1590. wIndex--;
  1591. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1592. /*
  1593. * Even if OWNER is set, so the port is owned by the
  1594. * companion controller, khubd needs to be able to clear
  1595. * the port-change status bits (especially
  1596. * USB_PORT_STAT_C_CONNECTION).
  1597. */
  1598. switch (wValue) {
  1599. case USB_PORT_FEAT_ENABLE:
  1600. reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_PE);
  1601. break;
  1602. case USB_PORT_FEAT_C_ENABLE:
  1603. /* XXX error? */
  1604. break;
  1605. case USB_PORT_FEAT_SUSPEND:
  1606. if (temp & PORT_RESET)
  1607. goto error;
  1608. if (temp & PORT_SUSPEND) {
  1609. if ((temp & PORT_PE) == 0)
  1610. goto error;
  1611. /* resume signaling for 20 msec */
  1612. temp &= ~(PORT_RWC_BITS);
  1613. reg_write32(hcd->regs, HC_PORTSC1,
  1614. temp | PORT_RESUME);
  1615. priv->reset_done = jiffies +
  1616. msecs_to_jiffies(20);
  1617. }
  1618. break;
  1619. case USB_PORT_FEAT_C_SUSPEND:
  1620. /* we auto-clear this feature */
  1621. break;
  1622. case USB_PORT_FEAT_POWER:
  1623. if (HCS_PPC(priv->hcs_params))
  1624. reg_write32(hcd->regs, HC_PORTSC1,
  1625. temp & ~PORT_POWER);
  1626. break;
  1627. case USB_PORT_FEAT_C_CONNECTION:
  1628. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_CSC);
  1629. break;
  1630. case USB_PORT_FEAT_C_OVER_CURRENT:
  1631. /* XXX error ?*/
  1632. break;
  1633. case USB_PORT_FEAT_C_RESET:
  1634. /* GetPortStatus clears reset */
  1635. break;
  1636. default:
  1637. goto error;
  1638. }
  1639. reg_read32(hcd->regs, HC_USBCMD);
  1640. break;
  1641. case GetHubDescriptor:
  1642. isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
  1643. buf);
  1644. break;
  1645. case GetHubStatus:
  1646. /* no hub-wide feature/status flags */
  1647. memset(buf, 0, 4);
  1648. break;
  1649. case GetPortStatus:
  1650. if (!wIndex || wIndex > ports)
  1651. goto error;
  1652. wIndex--;
  1653. status = 0;
  1654. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1655. /* wPortChange bits */
  1656. if (temp & PORT_CSC)
  1657. status |= USB_PORT_STAT_C_CONNECTION << 16;
  1658. /* whoever resumes must GetPortStatus to complete it!! */
  1659. if (temp & PORT_RESUME) {
  1660. dev_err(hcd->self.controller, "Port resume should be skipped.\n");
  1661. /* Remote Wakeup received? */
  1662. if (!priv->reset_done) {
  1663. /* resume signaling for 20 msec */
  1664. priv->reset_done = jiffies
  1665. + msecs_to_jiffies(20);
  1666. /* check the port again */
  1667. mod_timer(&hcd->rh_timer, priv->reset_done);
  1668. }
  1669. /* resume completed? */
  1670. else if (time_after_eq(jiffies,
  1671. priv->reset_done)) {
  1672. status |= USB_PORT_STAT_C_SUSPEND << 16;
  1673. priv->reset_done = 0;
  1674. /* stop resume signaling */
  1675. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1676. reg_write32(hcd->regs, HC_PORTSC1,
  1677. temp & ~(PORT_RWC_BITS | PORT_RESUME));
  1678. retval = handshake(hcd, HC_PORTSC1,
  1679. PORT_RESUME, 0, 2000 /* 2msec */);
  1680. if (retval != 0) {
  1681. dev_err(hcd->self.controller,
  1682. "port %d resume error %d\n",
  1683. wIndex + 1, retval);
  1684. goto error;
  1685. }
  1686. temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
  1687. }
  1688. }
  1689. /* whoever resets must GetPortStatus to complete it!! */
  1690. if ((temp & PORT_RESET)
  1691. && time_after_eq(jiffies,
  1692. priv->reset_done)) {
  1693. status |= USB_PORT_STAT_C_RESET << 16;
  1694. priv->reset_done = 0;
  1695. /* force reset to complete */
  1696. reg_write32(hcd->regs, HC_PORTSC1, temp & ~PORT_RESET);
  1697. /* REVISIT: some hardware needs 550+ usec to clear
  1698. * this bit; seems too long to spin routinely...
  1699. */
  1700. retval = handshake(hcd, HC_PORTSC1,
  1701. PORT_RESET, 0, 750);
  1702. if (retval != 0) {
  1703. dev_err(hcd->self.controller, "port %d reset error %d\n",
  1704. wIndex + 1, retval);
  1705. goto error;
  1706. }
  1707. /* see what we found out */
  1708. temp = check_reset_complete(hcd, wIndex,
  1709. reg_read32(hcd->regs, HC_PORTSC1));
  1710. }
  1711. /*
  1712. * Even if OWNER is set, there's no harm letting khubd
  1713. * see the wPortStatus values (they should all be 0 except
  1714. * for PORT_POWER anyway).
  1715. */
  1716. if (temp & PORT_OWNER)
  1717. dev_err(hcd->self.controller, "PORT_OWNER is set\n");
  1718. if (temp & PORT_CONNECT) {
  1719. status |= USB_PORT_STAT_CONNECTION;
  1720. /* status may be from integrated TT */
  1721. status |= USB_PORT_STAT_HIGH_SPEED;
  1722. }
  1723. if (temp & PORT_PE)
  1724. status |= USB_PORT_STAT_ENABLE;
  1725. if (temp & (PORT_SUSPEND|PORT_RESUME))
  1726. status |= USB_PORT_STAT_SUSPEND;
  1727. if (temp & PORT_RESET)
  1728. status |= USB_PORT_STAT_RESET;
  1729. if (temp & PORT_POWER)
  1730. status |= USB_PORT_STAT_POWER;
  1731. put_unaligned(cpu_to_le32(status), (__le32 *) buf);
  1732. break;
  1733. case SetHubFeature:
  1734. switch (wValue) {
  1735. case C_HUB_LOCAL_POWER:
  1736. case C_HUB_OVER_CURRENT:
  1737. /* no hub-wide feature/status flags */
  1738. break;
  1739. default:
  1740. goto error;
  1741. }
  1742. break;
  1743. case SetPortFeature:
  1744. selector = wIndex >> 8;
  1745. wIndex &= 0xff;
  1746. if (!wIndex || wIndex > ports)
  1747. goto error;
  1748. wIndex--;
  1749. temp = reg_read32(hcd->regs, HC_PORTSC1);
  1750. if (temp & PORT_OWNER)
  1751. break;
  1752. /* temp &= ~PORT_RWC_BITS; */
  1753. switch (wValue) {
  1754. case USB_PORT_FEAT_ENABLE:
  1755. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_PE);
  1756. break;
  1757. case USB_PORT_FEAT_SUSPEND:
  1758. if ((temp & PORT_PE) == 0
  1759. || (temp & PORT_RESET) != 0)
  1760. goto error;
  1761. reg_write32(hcd->regs, HC_PORTSC1, temp | PORT_SUSPEND);
  1762. break;
  1763. case USB_PORT_FEAT_POWER:
  1764. if (HCS_PPC(priv->hcs_params))
  1765. reg_write32(hcd->regs, HC_PORTSC1,
  1766. temp | PORT_POWER);
  1767. break;
  1768. case USB_PORT_FEAT_RESET:
  1769. if (temp & PORT_RESUME)
  1770. goto error;
  1771. /* line status bits may report this as low speed,
  1772. * which can be fine if this root hub has a
  1773. * transaction translator built in.
  1774. */
  1775. if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT
  1776. && PORT_USB11(temp)) {
  1777. temp |= PORT_OWNER;
  1778. } else {
  1779. temp |= PORT_RESET;
  1780. temp &= ~PORT_PE;
  1781. /*
  1782. * caller must wait, then call GetPortStatus
  1783. * usb 2.0 spec says 50 ms resets on root
  1784. */
  1785. priv->reset_done = jiffies +
  1786. msecs_to_jiffies(50);
  1787. }
  1788. reg_write32(hcd->regs, HC_PORTSC1, temp);
  1789. break;
  1790. default:
  1791. goto error;
  1792. }
  1793. reg_read32(hcd->regs, HC_USBCMD);
  1794. break;
  1795. default:
  1796. error:
  1797. /* "stall" on error */
  1798. retval = -EPIPE;
  1799. }
  1800. spin_unlock_irqrestore(&priv->lock, flags);
  1801. return retval;
  1802. }
  1803. static int isp1760_get_frame(struct usb_hcd *hcd)
  1804. {
  1805. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1806. u32 fr;
  1807. fr = reg_read32(hcd->regs, HC_FRINDEX);
  1808. return (fr >> 3) % priv->periodic_size;
  1809. }
  1810. static void isp1760_stop(struct usb_hcd *hcd)
  1811. {
  1812. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1813. u32 temp;
  1814. del_timer(&errata2_timer);
  1815. isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
  1816. NULL, 0);
  1817. mdelay(20);
  1818. spin_lock_irq(&priv->lock);
  1819. ehci_reset(hcd);
  1820. /* Disable IRQ */
  1821. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1822. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
  1823. spin_unlock_irq(&priv->lock);
  1824. reg_write32(hcd->regs, HC_CONFIGFLAG, 0);
  1825. }
  1826. static void isp1760_shutdown(struct usb_hcd *hcd)
  1827. {
  1828. u32 command, temp;
  1829. isp1760_stop(hcd);
  1830. temp = reg_read32(hcd->regs, HC_HW_MODE_CTRL);
  1831. reg_write32(hcd->regs, HC_HW_MODE_CTRL, temp &= ~HW_GLOBAL_INTR_EN);
  1832. command = reg_read32(hcd->regs, HC_USBCMD);
  1833. command &= ~CMD_RUN;
  1834. reg_write32(hcd->regs, HC_USBCMD, command);
  1835. }
  1836. static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
  1837. struct usb_host_endpoint *ep)
  1838. {
  1839. struct isp1760_hcd *priv = hcd_to_priv(hcd);
  1840. struct isp1760_qh *qh = ep->hcpriv;
  1841. unsigned long spinflags;
  1842. if (!qh)
  1843. return;
  1844. spin_lock_irqsave(&priv->lock, spinflags);
  1845. qh->tt_buffer_dirty = 0;
  1846. schedule_ptds(hcd);
  1847. spin_unlock_irqrestore(&priv->lock, spinflags);
  1848. }
  1849. static const struct hc_driver isp1760_hc_driver = {
  1850. .description = "isp1760-hcd",
  1851. .product_desc = "NXP ISP1760 USB Host Controller",
  1852. .hcd_priv_size = sizeof(struct isp1760_hcd),
  1853. .irq = isp1760_irq,
  1854. .flags = HCD_MEMORY | HCD_USB2,
  1855. .reset = isp1760_hc_setup,
  1856. .start = isp1760_run,
  1857. .stop = isp1760_stop,
  1858. .shutdown = isp1760_shutdown,
  1859. .urb_enqueue = isp1760_urb_enqueue,
  1860. .urb_dequeue = isp1760_urb_dequeue,
  1861. .endpoint_disable = isp1760_endpoint_disable,
  1862. .get_frame_number = isp1760_get_frame,
  1863. .hub_status_data = isp1760_hub_status_data,
  1864. .hub_control = isp1760_hub_control,
  1865. .clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
  1866. };
  1867. int __init init_kmem_once(void)
  1868. {
  1869. urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
  1870. sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
  1871. SLAB_MEM_SPREAD, NULL);
  1872. if (!urb_listitem_cachep)
  1873. return -ENOMEM;
  1874. qtd_cachep = kmem_cache_create("isp1760_qtd",
  1875. sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
  1876. SLAB_MEM_SPREAD, NULL);
  1877. if (!qtd_cachep)
  1878. return -ENOMEM;
  1879. qh_cachep = kmem_cache_create("isp1760_qh", sizeof(struct isp1760_qh),
  1880. 0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
  1881. if (!qh_cachep) {
  1882. kmem_cache_destroy(qtd_cachep);
  1883. return -ENOMEM;
  1884. }
  1885. return 0;
  1886. }
  1887. void deinit_kmem_cache(void)
  1888. {
  1889. kmem_cache_destroy(qtd_cachep);
  1890. kmem_cache_destroy(qh_cachep);
  1891. kmem_cache_destroy(urb_listitem_cachep);
  1892. }
  1893. struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
  1894. int irq, unsigned long irqflags,
  1895. int rst_gpio,
  1896. struct device *dev, const char *busname,
  1897. unsigned int devflags)
  1898. {
  1899. struct usb_hcd *hcd;
  1900. struct isp1760_hcd *priv;
  1901. int ret;
  1902. if (usb_disabled())
  1903. return ERR_PTR(-ENODEV);
  1904. /* prevent usb-core allocating DMA pages */
  1905. dev->dma_mask = NULL;
  1906. hcd = usb_create_hcd(&isp1760_hc_driver, dev, dev_name(dev));
  1907. if (!hcd)
  1908. return ERR_PTR(-ENOMEM);
  1909. priv = hcd_to_priv(hcd);
  1910. priv->devflags = devflags;
  1911. priv->rst_gpio = rst_gpio;
  1912. init_memory(priv);
  1913. hcd->regs = ioremap(res_start, res_len);
  1914. if (!hcd->regs) {
  1915. ret = -EIO;
  1916. goto err_put;
  1917. }
  1918. hcd->irq = irq;
  1919. hcd->rsrc_start = res_start;
  1920. hcd->rsrc_len = res_len;
  1921. ret = usb_add_hcd(hcd, irq, irqflags);
  1922. if (ret)
  1923. goto err_unmap;
  1924. device_wakeup_enable(hcd->self.controller);
  1925. return hcd;
  1926. err_unmap:
  1927. iounmap(hcd->regs);
  1928. err_put:
  1929. usb_put_hcd(hcd);
  1930. return ERR_PTR(ret);
  1931. }
  1932. MODULE_DESCRIPTION("Driver for the ISP1760 USB-controller from NXP");
  1933. MODULE_AUTHOR("Sebastian Siewior <bigeasy@linuxtronix.de>");
  1934. MODULE_LICENSE("GPL v2");