usbtest.c 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/init.h>
  4. #include <linux/slab.h>
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <linux/moduleparam.h>
  8. #include <linux/scatterlist.h>
  9. #include <linux/mutex.h>
  10. #include <linux/timer.h>
  11. #include <linux/usb.h>
  12. #define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
  13. /*-------------------------------------------------------------------------*/
  14. static int override_alt = -1;
  15. module_param_named(alt, override_alt, int, 0644);
  16. MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
  17. static void complicated_callback(struct urb *urb);
  18. /*-------------------------------------------------------------------------*/
  19. /* FIXME make these public somewhere; usbdevfs.h? */
  20. /* Parameter for usbtest driver. */
  21. struct usbtest_param_32 {
  22. /* inputs */
  23. __u32 test_num; /* 0..(TEST_CASES-1) */
  24. __u32 iterations;
  25. __u32 length;
  26. __u32 vary;
  27. __u32 sglen;
  28. /* outputs */
  29. __s32 duration_sec;
  30. __s32 duration_usec;
  31. };
  32. /*
  33. * Compat parameter to the usbtest driver.
  34. * This supports older user space binaries compiled with 64 bit compiler.
  35. */
  36. struct usbtest_param_64 {
  37. /* inputs */
  38. __u32 test_num; /* 0..(TEST_CASES-1) */
  39. __u32 iterations;
  40. __u32 length;
  41. __u32 vary;
  42. __u32 sglen;
  43. /* outputs */
  44. __s64 duration_sec;
  45. __s64 duration_usec;
  46. };
  47. /* IOCTL interface to the driver. */
  48. #define USBTEST_REQUEST_32 _IOWR('U', 100, struct usbtest_param_32)
  49. /* COMPAT IOCTL interface to the driver. */
  50. #define USBTEST_REQUEST_64 _IOWR('U', 100, struct usbtest_param_64)
  51. /*-------------------------------------------------------------------------*/
  52. #define GENERIC /* let probe() bind using module params */
  53. /* Some devices that can be used for testing will have "real" drivers.
  54. * Entries for those need to be enabled here by hand, after disabling
  55. * that "real" driver.
  56. */
  57. //#define IBOT2 /* grab iBOT2 webcams */
  58. //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
  59. /*-------------------------------------------------------------------------*/
  60. struct usbtest_info {
  61. const char *name;
  62. u8 ep_in; /* bulk/intr source */
  63. u8 ep_out; /* bulk/intr sink */
  64. unsigned autoconf:1;
  65. unsigned ctrl_out:1;
  66. unsigned iso:1; /* try iso in/out */
  67. unsigned intr:1; /* try interrupt in/out */
  68. int alt;
  69. };
  70. /* this is accessed only through usbfs ioctl calls.
  71. * one ioctl to issue a test ... one lock per device.
  72. * tests create other threads if they need them.
  73. * urbs and buffers are allocated dynamically,
  74. * and data generated deterministically.
  75. */
  76. struct usbtest_dev {
  77. struct usb_interface *intf;
  78. struct usbtest_info *info;
  79. int in_pipe;
  80. int out_pipe;
  81. int in_iso_pipe;
  82. int out_iso_pipe;
  83. int in_int_pipe;
  84. int out_int_pipe;
  85. struct usb_endpoint_descriptor *iso_in, *iso_out;
  86. struct usb_endpoint_descriptor *int_in, *int_out;
  87. struct mutex lock;
  88. #define TBUF_SIZE 256
  89. u8 *buf;
  90. };
  91. static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
  92. {
  93. return interface_to_usbdev(test->intf);
  94. }
  95. /* set up all urbs so they can be used with either bulk or interrupt */
  96. #define INTERRUPT_RATE 1 /* msec/transfer */
  97. #define ERROR(tdev, fmt, args...) \
  98. dev_err(&(tdev)->intf->dev , fmt , ## args)
  99. #define WARNING(tdev, fmt, args...) \
  100. dev_warn(&(tdev)->intf->dev , fmt , ## args)
  101. #define GUARD_BYTE 0xA5
  102. #define MAX_SGLEN 128
  103. /*-------------------------------------------------------------------------*/
  104. static int
  105. get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
  106. {
  107. int tmp;
  108. struct usb_host_interface *alt;
  109. struct usb_host_endpoint *in, *out;
  110. struct usb_host_endpoint *iso_in, *iso_out;
  111. struct usb_host_endpoint *int_in, *int_out;
  112. struct usb_device *udev;
  113. for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
  114. unsigned ep;
  115. in = out = NULL;
  116. iso_in = iso_out = NULL;
  117. int_in = int_out = NULL;
  118. alt = intf->altsetting + tmp;
  119. if (override_alt >= 0 &&
  120. override_alt != alt->desc.bAlternateSetting)
  121. continue;
  122. /* take the first altsetting with in-bulk + out-bulk;
  123. * ignore other endpoints and altsettings.
  124. */
  125. for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
  126. struct usb_host_endpoint *e;
  127. e = alt->endpoint + ep;
  128. switch (usb_endpoint_type(&e->desc)) {
  129. case USB_ENDPOINT_XFER_BULK:
  130. break;
  131. case USB_ENDPOINT_XFER_INT:
  132. if (dev->info->intr)
  133. goto try_intr;
  134. case USB_ENDPOINT_XFER_ISOC:
  135. if (dev->info->iso)
  136. goto try_iso;
  137. /* FALLTHROUGH */
  138. default:
  139. continue;
  140. }
  141. if (usb_endpoint_dir_in(&e->desc)) {
  142. if (!in)
  143. in = e;
  144. } else {
  145. if (!out)
  146. out = e;
  147. }
  148. continue;
  149. try_intr:
  150. if (usb_endpoint_dir_in(&e->desc)) {
  151. if (!int_in)
  152. int_in = e;
  153. } else {
  154. if (!int_out)
  155. int_out = e;
  156. }
  157. continue;
  158. try_iso:
  159. if (usb_endpoint_dir_in(&e->desc)) {
  160. if (!iso_in)
  161. iso_in = e;
  162. } else {
  163. if (!iso_out)
  164. iso_out = e;
  165. }
  166. }
  167. if ((in && out) || iso_in || iso_out || int_in || int_out)
  168. goto found;
  169. }
  170. return -EINVAL;
  171. found:
  172. udev = testdev_to_usbdev(dev);
  173. dev->info->alt = alt->desc.bAlternateSetting;
  174. if (alt->desc.bAlternateSetting != 0) {
  175. tmp = usb_set_interface(udev,
  176. alt->desc.bInterfaceNumber,
  177. alt->desc.bAlternateSetting);
  178. if (tmp < 0)
  179. return tmp;
  180. }
  181. if (in) {
  182. dev->in_pipe = usb_rcvbulkpipe(udev,
  183. in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  184. dev->out_pipe = usb_sndbulkpipe(udev,
  185. out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  186. }
  187. if (iso_in) {
  188. dev->iso_in = &iso_in->desc;
  189. dev->in_iso_pipe = usb_rcvisocpipe(udev,
  190. iso_in->desc.bEndpointAddress
  191. & USB_ENDPOINT_NUMBER_MASK);
  192. }
  193. if (iso_out) {
  194. dev->iso_out = &iso_out->desc;
  195. dev->out_iso_pipe = usb_sndisocpipe(udev,
  196. iso_out->desc.bEndpointAddress
  197. & USB_ENDPOINT_NUMBER_MASK);
  198. }
  199. if (int_in) {
  200. dev->int_in = &int_in->desc;
  201. dev->in_int_pipe = usb_rcvintpipe(udev,
  202. int_in->desc.bEndpointAddress
  203. & USB_ENDPOINT_NUMBER_MASK);
  204. }
  205. if (int_out) {
  206. dev->int_out = &int_out->desc;
  207. dev->out_int_pipe = usb_sndintpipe(udev,
  208. int_out->desc.bEndpointAddress
  209. & USB_ENDPOINT_NUMBER_MASK);
  210. }
  211. return 0;
  212. }
  213. /*-------------------------------------------------------------------------*/
  214. /* Support for testing basic non-queued I/O streams.
  215. *
  216. * These just package urbs as requests that can be easily canceled.
  217. * Each urb's data buffer is dynamically allocated; callers can fill
  218. * them with non-zero test data (or test for it) when appropriate.
  219. */
  220. static void simple_callback(struct urb *urb)
  221. {
  222. complete(urb->context);
  223. }
  224. static struct urb *usbtest_alloc_urb(
  225. struct usb_device *udev,
  226. int pipe,
  227. unsigned long bytes,
  228. unsigned transfer_flags,
  229. unsigned offset,
  230. u8 bInterval,
  231. usb_complete_t complete_fn)
  232. {
  233. struct urb *urb;
  234. urb = usb_alloc_urb(0, GFP_KERNEL);
  235. if (!urb)
  236. return urb;
  237. if (bInterval)
  238. usb_fill_int_urb(urb, udev, pipe, NULL, bytes, complete_fn,
  239. NULL, bInterval);
  240. else
  241. usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, complete_fn,
  242. NULL);
  243. urb->interval = (udev->speed == USB_SPEED_HIGH)
  244. ? (INTERRUPT_RATE << 3)
  245. : INTERRUPT_RATE;
  246. urb->transfer_flags = transfer_flags;
  247. if (usb_pipein(pipe))
  248. urb->transfer_flags |= URB_SHORT_NOT_OK;
  249. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  250. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  251. GFP_KERNEL, &urb->transfer_dma);
  252. else
  253. urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
  254. if (!urb->transfer_buffer) {
  255. usb_free_urb(urb);
  256. return NULL;
  257. }
  258. /* To test unaligned transfers add an offset and fill the
  259. unused memory with a guard value */
  260. if (offset) {
  261. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  262. urb->transfer_buffer += offset;
  263. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  264. urb->transfer_dma += offset;
  265. }
  266. /* For inbound transfers use guard byte so that test fails if
  267. data not correctly copied */
  268. memset(urb->transfer_buffer,
  269. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  270. bytes);
  271. return urb;
  272. }
  273. static struct urb *simple_alloc_urb(
  274. struct usb_device *udev,
  275. int pipe,
  276. unsigned long bytes,
  277. u8 bInterval)
  278. {
  279. return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
  280. bInterval, simple_callback);
  281. }
  282. static struct urb *complicated_alloc_urb(
  283. struct usb_device *udev,
  284. int pipe,
  285. unsigned long bytes,
  286. u8 bInterval)
  287. {
  288. return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
  289. bInterval, complicated_callback);
  290. }
  291. static unsigned pattern;
  292. static unsigned mod_pattern;
  293. module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
  294. MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
  295. static unsigned get_maxpacket(struct usb_device *udev, int pipe)
  296. {
  297. struct usb_host_endpoint *ep;
  298. ep = usb_pipe_endpoint(udev, pipe);
  299. return le16_to_cpup(&ep->desc.wMaxPacketSize);
  300. }
  301. static void simple_fill_buf(struct urb *urb)
  302. {
  303. unsigned i;
  304. u8 *buf = urb->transfer_buffer;
  305. unsigned len = urb->transfer_buffer_length;
  306. unsigned maxpacket;
  307. switch (pattern) {
  308. default:
  309. /* FALLTHROUGH */
  310. case 0:
  311. memset(buf, 0, len);
  312. break;
  313. case 1: /* mod63 */
  314. maxpacket = get_maxpacket(urb->dev, urb->pipe);
  315. for (i = 0; i < len; i++)
  316. *buf++ = (u8) ((i % maxpacket) % 63);
  317. break;
  318. }
  319. }
  320. static inline unsigned long buffer_offset(void *buf)
  321. {
  322. return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
  323. }
  324. static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
  325. {
  326. u8 *buf = urb->transfer_buffer;
  327. u8 *guard = buf - buffer_offset(buf);
  328. unsigned i;
  329. for (i = 0; guard < buf; i++, guard++) {
  330. if (*guard != GUARD_BYTE) {
  331. ERROR(tdev, "guard byte[%d] %d (not %d)\n",
  332. i, *guard, GUARD_BYTE);
  333. return -EINVAL;
  334. }
  335. }
  336. return 0;
  337. }
  338. static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
  339. {
  340. unsigned i;
  341. u8 expected;
  342. u8 *buf = urb->transfer_buffer;
  343. unsigned len = urb->actual_length;
  344. unsigned maxpacket = get_maxpacket(urb->dev, urb->pipe);
  345. int ret = check_guard_bytes(tdev, urb);
  346. if (ret)
  347. return ret;
  348. for (i = 0; i < len; i++, buf++) {
  349. switch (pattern) {
  350. /* all-zeroes has no synchronization issues */
  351. case 0:
  352. expected = 0;
  353. break;
  354. /* mod63 stays in sync with short-terminated transfers,
  355. * or otherwise when host and gadget agree on how large
  356. * each usb transfer request should be. resync is done
  357. * with set_interface or set_config.
  358. */
  359. case 1: /* mod63 */
  360. expected = (i % maxpacket) % 63;
  361. break;
  362. /* always fail unsupported patterns */
  363. default:
  364. expected = !*buf;
  365. break;
  366. }
  367. if (*buf == expected)
  368. continue;
  369. ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
  370. return -EINVAL;
  371. }
  372. return 0;
  373. }
  374. static void simple_free_urb(struct urb *urb)
  375. {
  376. unsigned long offset = buffer_offset(urb->transfer_buffer);
  377. if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
  378. usb_free_coherent(
  379. urb->dev,
  380. urb->transfer_buffer_length + offset,
  381. urb->transfer_buffer - offset,
  382. urb->transfer_dma - offset);
  383. else
  384. kfree(urb->transfer_buffer - offset);
  385. usb_free_urb(urb);
  386. }
  387. static int simple_io(
  388. struct usbtest_dev *tdev,
  389. struct urb *urb,
  390. int iterations,
  391. int vary,
  392. int expected,
  393. const char *label
  394. )
  395. {
  396. struct usb_device *udev = urb->dev;
  397. int max = urb->transfer_buffer_length;
  398. struct completion completion;
  399. int retval = 0;
  400. unsigned long expire;
  401. urb->context = &completion;
  402. while (retval == 0 && iterations-- > 0) {
  403. init_completion(&completion);
  404. if (usb_pipeout(urb->pipe)) {
  405. simple_fill_buf(urb);
  406. urb->transfer_flags |= URB_ZERO_PACKET;
  407. }
  408. retval = usb_submit_urb(urb, GFP_KERNEL);
  409. if (retval != 0)
  410. break;
  411. expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
  412. if (!wait_for_completion_timeout(&completion, expire)) {
  413. usb_kill_urb(urb);
  414. retval = (urb->status == -ENOENT ?
  415. -ETIMEDOUT : urb->status);
  416. } else {
  417. retval = urb->status;
  418. }
  419. urb->dev = udev;
  420. if (retval == 0 && usb_pipein(urb->pipe))
  421. retval = simple_check_buf(tdev, urb);
  422. if (vary) {
  423. int len = urb->transfer_buffer_length;
  424. len += vary;
  425. len %= max;
  426. if (len == 0)
  427. len = (vary < max) ? vary : max;
  428. urb->transfer_buffer_length = len;
  429. }
  430. /* FIXME if endpoint halted, clear halt (and log) */
  431. }
  432. urb->transfer_buffer_length = max;
  433. if (expected != retval)
  434. dev_err(&udev->dev,
  435. "%s failed, iterations left %d, status %d (not %d)\n",
  436. label, iterations, retval, expected);
  437. return retval;
  438. }
  439. /*-------------------------------------------------------------------------*/
  440. /* We use scatterlist primitives to test queued I/O.
  441. * Yes, this also tests the scatterlist primitives.
  442. */
  443. static void free_sglist(struct scatterlist *sg, int nents)
  444. {
  445. unsigned i;
  446. if (!sg)
  447. return;
  448. for (i = 0; i < nents; i++) {
  449. if (!sg_page(&sg[i]))
  450. continue;
  451. kfree(sg_virt(&sg[i]));
  452. }
  453. kfree(sg);
  454. }
  455. static struct scatterlist *
  456. alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
  457. {
  458. struct scatterlist *sg;
  459. unsigned i;
  460. unsigned size = max;
  461. unsigned maxpacket =
  462. get_maxpacket(interface_to_usbdev(dev->intf), pipe);
  463. if (max == 0)
  464. return NULL;
  465. sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
  466. if (!sg)
  467. return NULL;
  468. sg_init_table(sg, nents);
  469. for (i = 0; i < nents; i++) {
  470. char *buf;
  471. unsigned j;
  472. buf = kzalloc(size, GFP_KERNEL);
  473. if (!buf) {
  474. free_sglist(sg, i);
  475. return NULL;
  476. }
  477. /* kmalloc pages are always physically contiguous! */
  478. sg_set_buf(&sg[i], buf, size);
  479. switch (pattern) {
  480. case 0:
  481. /* already zeroed */
  482. break;
  483. case 1:
  484. for (j = 0; j < size; j++)
  485. *buf++ = (u8) ((j % maxpacket) % 63);
  486. break;
  487. }
  488. if (vary) {
  489. size += vary;
  490. size %= max;
  491. if (size == 0)
  492. size = (vary < max) ? vary : max;
  493. }
  494. }
  495. return sg;
  496. }
  497. static void sg_timeout(unsigned long _req)
  498. {
  499. struct usb_sg_request *req = (struct usb_sg_request *) _req;
  500. req->status = -ETIMEDOUT;
  501. usb_sg_cancel(req);
  502. }
  503. static int perform_sglist(
  504. struct usbtest_dev *tdev,
  505. unsigned iterations,
  506. int pipe,
  507. struct usb_sg_request *req,
  508. struct scatterlist *sg,
  509. int nents
  510. )
  511. {
  512. struct usb_device *udev = testdev_to_usbdev(tdev);
  513. int retval = 0;
  514. struct timer_list sg_timer;
  515. setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
  516. while (retval == 0 && iterations-- > 0) {
  517. retval = usb_sg_init(req, udev, pipe,
  518. (udev->speed == USB_SPEED_HIGH)
  519. ? (INTERRUPT_RATE << 3)
  520. : INTERRUPT_RATE,
  521. sg, nents, 0, GFP_KERNEL);
  522. if (retval)
  523. break;
  524. mod_timer(&sg_timer, jiffies +
  525. msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
  526. usb_sg_wait(req);
  527. del_timer_sync(&sg_timer);
  528. retval = req->status;
  529. /* FIXME check resulting data pattern */
  530. /* FIXME if endpoint halted, clear halt (and log) */
  531. }
  532. /* FIXME for unlink or fault handling tests, don't report
  533. * failure if retval is as we expected ...
  534. */
  535. if (retval)
  536. ERROR(tdev, "perform_sglist failed, "
  537. "iterations left %d, status %d\n",
  538. iterations, retval);
  539. return retval;
  540. }
  541. /*-------------------------------------------------------------------------*/
  542. /* unqueued control message testing
  543. *
  544. * there's a nice set of device functional requirements in chapter 9 of the
  545. * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
  546. * special test firmware.
  547. *
  548. * we know the device is configured (or suspended) by the time it's visible
  549. * through usbfs. we can't change that, so we won't test enumeration (which
  550. * worked 'well enough' to get here, this time), power management (ditto),
  551. * or remote wakeup (which needs human interaction).
  552. */
  553. static unsigned realworld = 1;
  554. module_param(realworld, uint, 0);
  555. MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
  556. static int get_altsetting(struct usbtest_dev *dev)
  557. {
  558. struct usb_interface *iface = dev->intf;
  559. struct usb_device *udev = interface_to_usbdev(iface);
  560. int retval;
  561. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  562. USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
  563. 0, iface->altsetting[0].desc.bInterfaceNumber,
  564. dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  565. switch (retval) {
  566. case 1:
  567. return dev->buf[0];
  568. case 0:
  569. retval = -ERANGE;
  570. /* FALLTHROUGH */
  571. default:
  572. return retval;
  573. }
  574. }
  575. static int set_altsetting(struct usbtest_dev *dev, int alternate)
  576. {
  577. struct usb_interface *iface = dev->intf;
  578. struct usb_device *udev;
  579. if (alternate < 0 || alternate >= 256)
  580. return -EINVAL;
  581. udev = interface_to_usbdev(iface);
  582. return usb_set_interface(udev,
  583. iface->altsetting[0].desc.bInterfaceNumber,
  584. alternate);
  585. }
  586. static int is_good_config(struct usbtest_dev *tdev, int len)
  587. {
  588. struct usb_config_descriptor *config;
  589. if (len < sizeof(*config))
  590. return 0;
  591. config = (struct usb_config_descriptor *) tdev->buf;
  592. switch (config->bDescriptorType) {
  593. case USB_DT_CONFIG:
  594. case USB_DT_OTHER_SPEED_CONFIG:
  595. if (config->bLength != 9) {
  596. ERROR(tdev, "bogus config descriptor length\n");
  597. return 0;
  598. }
  599. /* this bit 'must be 1' but often isn't */
  600. if (!realworld && !(config->bmAttributes & 0x80)) {
  601. ERROR(tdev, "high bit of config attributes not set\n");
  602. return 0;
  603. }
  604. if (config->bmAttributes & 0x1f) { /* reserved == 0 */
  605. ERROR(tdev, "reserved config bits set\n");
  606. return 0;
  607. }
  608. break;
  609. default:
  610. return 0;
  611. }
  612. if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
  613. return 1;
  614. if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
  615. return 1;
  616. ERROR(tdev, "bogus config descriptor read size\n");
  617. return 0;
  618. }
  619. static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
  620. {
  621. struct usb_ext_cap_descriptor *ext;
  622. u32 attr;
  623. ext = (struct usb_ext_cap_descriptor *) buf;
  624. if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
  625. ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
  626. return 0;
  627. }
  628. attr = le32_to_cpu(ext->bmAttributes);
  629. /* bits[1:15] is used and others are reserved */
  630. if (attr & ~0xfffe) { /* reserved == 0 */
  631. ERROR(tdev, "reserved bits set\n");
  632. return 0;
  633. }
  634. return 1;
  635. }
  636. static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
  637. {
  638. struct usb_ss_cap_descriptor *ss;
  639. ss = (struct usb_ss_cap_descriptor *) buf;
  640. if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
  641. ERROR(tdev, "bogus superspeed device capability descriptor length\n");
  642. return 0;
  643. }
  644. /*
  645. * only bit[1] of bmAttributes is used for LTM and others are
  646. * reserved
  647. */
  648. if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
  649. ERROR(tdev, "reserved bits set in bmAttributes\n");
  650. return 0;
  651. }
  652. /* bits[0:3] of wSpeedSupported is used and others are reserved */
  653. if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
  654. ERROR(tdev, "reserved bits set in wSpeedSupported\n");
  655. return 0;
  656. }
  657. return 1;
  658. }
  659. static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
  660. {
  661. struct usb_ss_container_id_descriptor *con_id;
  662. con_id = (struct usb_ss_container_id_descriptor *) buf;
  663. if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
  664. ERROR(tdev, "bogus container id descriptor length\n");
  665. return 0;
  666. }
  667. if (con_id->bReserved) { /* reserved == 0 */
  668. ERROR(tdev, "reserved bits set\n");
  669. return 0;
  670. }
  671. return 1;
  672. }
  673. /* sanity test for standard requests working with usb_control_mesg() and some
  674. * of the utility functions which use it.
  675. *
  676. * this doesn't test how endpoint halts behave or data toggles get set, since
  677. * we won't do I/O to bulk/interrupt endpoints here (which is how to change
  678. * halt or toggle). toggle testing is impractical without support from hcds.
  679. *
  680. * this avoids failing devices linux would normally work with, by not testing
  681. * config/altsetting operations for devices that only support their defaults.
  682. * such devices rarely support those needless operations.
  683. *
  684. * NOTE that since this is a sanity test, it's not examining boundary cases
  685. * to see if usbcore, hcd, and device all behave right. such testing would
  686. * involve varied read sizes and other operation sequences.
  687. */
  688. static int ch9_postconfig(struct usbtest_dev *dev)
  689. {
  690. struct usb_interface *iface = dev->intf;
  691. struct usb_device *udev = interface_to_usbdev(iface);
  692. int i, alt, retval;
  693. /* [9.2.3] if there's more than one altsetting, we need to be able to
  694. * set and get each one. mostly trusts the descriptors from usbcore.
  695. */
  696. for (i = 0; i < iface->num_altsetting; i++) {
  697. /* 9.2.3 constrains the range here */
  698. alt = iface->altsetting[i].desc.bAlternateSetting;
  699. if (alt < 0 || alt >= iface->num_altsetting) {
  700. dev_err(&iface->dev,
  701. "invalid alt [%d].bAltSetting = %d\n",
  702. i, alt);
  703. }
  704. /* [real world] get/set unimplemented if there's only one */
  705. if (realworld && iface->num_altsetting == 1)
  706. continue;
  707. /* [9.4.10] set_interface */
  708. retval = set_altsetting(dev, alt);
  709. if (retval) {
  710. dev_err(&iface->dev, "can't set_interface = %d, %d\n",
  711. alt, retval);
  712. return retval;
  713. }
  714. /* [9.4.4] get_interface always works */
  715. retval = get_altsetting(dev);
  716. if (retval != alt) {
  717. dev_err(&iface->dev, "get alt should be %d, was %d\n",
  718. alt, retval);
  719. return (retval < 0) ? retval : -EDOM;
  720. }
  721. }
  722. /* [real world] get_config unimplemented if there's only one */
  723. if (!realworld || udev->descriptor.bNumConfigurations != 1) {
  724. int expected = udev->actconfig->desc.bConfigurationValue;
  725. /* [9.4.2] get_configuration always works
  726. * ... although some cheap devices (like one TI Hub I've got)
  727. * won't return config descriptors except before set_config.
  728. */
  729. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  730. USB_REQ_GET_CONFIGURATION,
  731. USB_DIR_IN | USB_RECIP_DEVICE,
  732. 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
  733. if (retval != 1 || dev->buf[0] != expected) {
  734. dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
  735. retval, dev->buf[0], expected);
  736. return (retval < 0) ? retval : -EDOM;
  737. }
  738. }
  739. /* there's always [9.4.3] a device descriptor [9.6.1] */
  740. retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
  741. dev->buf, sizeof(udev->descriptor));
  742. if (retval != sizeof(udev->descriptor)) {
  743. dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
  744. return (retval < 0) ? retval : -EDOM;
  745. }
  746. /*
  747. * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
  748. * 3.0 spec
  749. */
  750. if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) {
  751. struct usb_bos_descriptor *bos = NULL;
  752. struct usb_dev_cap_header *header = NULL;
  753. unsigned total, num, length;
  754. u8 *buf;
  755. retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
  756. sizeof(*udev->bos->desc));
  757. if (retval != sizeof(*udev->bos->desc)) {
  758. dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
  759. return (retval < 0) ? retval : -EDOM;
  760. }
  761. bos = (struct usb_bos_descriptor *)dev->buf;
  762. total = le16_to_cpu(bos->wTotalLength);
  763. num = bos->bNumDeviceCaps;
  764. if (total > TBUF_SIZE)
  765. total = TBUF_SIZE;
  766. /*
  767. * get generic device-level capability descriptors [9.6.2]
  768. * in USB 3.0 spec
  769. */
  770. retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
  771. total);
  772. if (retval != total) {
  773. dev_err(&iface->dev, "bos descriptor set --> %d\n",
  774. retval);
  775. return (retval < 0) ? retval : -EDOM;
  776. }
  777. length = sizeof(*udev->bos->desc);
  778. buf = dev->buf;
  779. for (i = 0; i < num; i++) {
  780. buf += length;
  781. if (buf + sizeof(struct usb_dev_cap_header) >
  782. dev->buf + total)
  783. break;
  784. header = (struct usb_dev_cap_header *)buf;
  785. length = header->bLength;
  786. if (header->bDescriptorType !=
  787. USB_DT_DEVICE_CAPABILITY) {
  788. dev_warn(&udev->dev, "not device capability descriptor, skip\n");
  789. continue;
  790. }
  791. switch (header->bDevCapabilityType) {
  792. case USB_CAP_TYPE_EXT:
  793. if (buf + USB_DT_USB_EXT_CAP_SIZE >
  794. dev->buf + total ||
  795. !is_good_ext(dev, buf)) {
  796. dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
  797. return -EDOM;
  798. }
  799. break;
  800. case USB_SS_CAP_TYPE:
  801. if (buf + USB_DT_USB_SS_CAP_SIZE >
  802. dev->buf + total ||
  803. !is_good_ss_cap(dev, buf)) {
  804. dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
  805. return -EDOM;
  806. }
  807. break;
  808. case CONTAINER_ID_TYPE:
  809. if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
  810. dev->buf + total ||
  811. !is_good_con_id(dev, buf)) {
  812. dev_err(&iface->dev, "bogus container id descriptor\n");
  813. return -EDOM;
  814. }
  815. break;
  816. default:
  817. break;
  818. }
  819. }
  820. }
  821. /* there's always [9.4.3] at least one config descriptor [9.6.3] */
  822. for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
  823. retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
  824. dev->buf, TBUF_SIZE);
  825. if (!is_good_config(dev, retval)) {
  826. dev_err(&iface->dev,
  827. "config [%d] descriptor --> %d\n",
  828. i, retval);
  829. return (retval < 0) ? retval : -EDOM;
  830. }
  831. /* FIXME cross-checking udev->config[i] to make sure usbcore
  832. * parsed it right (etc) would be good testing paranoia
  833. */
  834. }
  835. /* and sometimes [9.2.6.6] speed dependent descriptors */
  836. if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
  837. struct usb_qualifier_descriptor *d = NULL;
  838. /* device qualifier [9.6.2] */
  839. retval = usb_get_descriptor(udev,
  840. USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
  841. sizeof(struct usb_qualifier_descriptor));
  842. if (retval == -EPIPE) {
  843. if (udev->speed == USB_SPEED_HIGH) {
  844. dev_err(&iface->dev,
  845. "hs dev qualifier --> %d\n",
  846. retval);
  847. return (retval < 0) ? retval : -EDOM;
  848. }
  849. /* usb2.0 but not high-speed capable; fine */
  850. } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
  851. dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
  852. return (retval < 0) ? retval : -EDOM;
  853. } else
  854. d = (struct usb_qualifier_descriptor *) dev->buf;
  855. /* might not have [9.6.2] any other-speed configs [9.6.4] */
  856. if (d) {
  857. unsigned max = d->bNumConfigurations;
  858. for (i = 0; i < max; i++) {
  859. retval = usb_get_descriptor(udev,
  860. USB_DT_OTHER_SPEED_CONFIG, i,
  861. dev->buf, TBUF_SIZE);
  862. if (!is_good_config(dev, retval)) {
  863. dev_err(&iface->dev,
  864. "other speed config --> %d\n",
  865. retval);
  866. return (retval < 0) ? retval : -EDOM;
  867. }
  868. }
  869. }
  870. }
  871. /* FIXME fetch strings from at least the device descriptor */
  872. /* [9.4.5] get_status always works */
  873. retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
  874. if (retval) {
  875. dev_err(&iface->dev, "get dev status --> %d\n", retval);
  876. return retval;
  877. }
  878. /* FIXME configuration.bmAttributes says if we could try to set/clear
  879. * the device's remote wakeup feature ... if we can, test that here
  880. */
  881. retval = usb_get_status(udev, USB_RECIP_INTERFACE,
  882. iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
  883. if (retval) {
  884. dev_err(&iface->dev, "get interface status --> %d\n", retval);
  885. return retval;
  886. }
  887. /* FIXME get status for each endpoint in the interface */
  888. return 0;
  889. }
  890. /*-------------------------------------------------------------------------*/
  891. /* use ch9 requests to test whether:
  892. * (a) queues work for control, keeping N subtests queued and
  893. * active (auto-resubmit) for M loops through the queue.
  894. * (b) protocol stalls (control-only) will autorecover.
  895. * it's not like bulk/intr; no halt clearing.
  896. * (c) short control reads are reported and handled.
  897. * (d) queues are always processed in-order
  898. */
  899. struct ctrl_ctx {
  900. spinlock_t lock;
  901. struct usbtest_dev *dev;
  902. struct completion complete;
  903. unsigned count;
  904. unsigned pending;
  905. int status;
  906. struct urb **urb;
  907. struct usbtest_param_32 *param;
  908. int last;
  909. };
  910. #define NUM_SUBCASES 16 /* how many test subcases here? */
  911. struct subcase {
  912. struct usb_ctrlrequest setup;
  913. int number;
  914. int expected;
  915. };
  916. static void ctrl_complete(struct urb *urb)
  917. {
  918. struct ctrl_ctx *ctx = urb->context;
  919. struct usb_ctrlrequest *reqp;
  920. struct subcase *subcase;
  921. int status = urb->status;
  922. reqp = (struct usb_ctrlrequest *)urb->setup_packet;
  923. subcase = container_of(reqp, struct subcase, setup);
  924. spin_lock(&ctx->lock);
  925. ctx->count--;
  926. ctx->pending--;
  927. /* queue must transfer and complete in fifo order, unless
  928. * usb_unlink_urb() is used to unlink something not at the
  929. * physical queue head (not tested).
  930. */
  931. if (subcase->number > 0) {
  932. if ((subcase->number - ctx->last) != 1) {
  933. ERROR(ctx->dev,
  934. "subcase %d completed out of order, last %d\n",
  935. subcase->number, ctx->last);
  936. status = -EDOM;
  937. ctx->last = subcase->number;
  938. goto error;
  939. }
  940. }
  941. ctx->last = subcase->number;
  942. /* succeed or fault in only one way? */
  943. if (status == subcase->expected)
  944. status = 0;
  945. /* async unlink for cleanup? */
  946. else if (status != -ECONNRESET) {
  947. /* some faults are allowed, not required */
  948. if (subcase->expected > 0 && (
  949. ((status == -subcase->expected /* happened */
  950. || status == 0)))) /* didn't */
  951. status = 0;
  952. /* sometimes more than one fault is allowed */
  953. else if (subcase->number == 12 && status == -EPIPE)
  954. status = 0;
  955. else
  956. ERROR(ctx->dev, "subtest %d error, status %d\n",
  957. subcase->number, status);
  958. }
  959. /* unexpected status codes mean errors; ideally, in hardware */
  960. if (status) {
  961. error:
  962. if (ctx->status == 0) {
  963. int i;
  964. ctx->status = status;
  965. ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
  966. "%d left, subcase %d, len %d/%d\n",
  967. reqp->bRequestType, reqp->bRequest,
  968. status, ctx->count, subcase->number,
  969. urb->actual_length,
  970. urb->transfer_buffer_length);
  971. /* FIXME this "unlink everything" exit route should
  972. * be a separate test case.
  973. */
  974. /* unlink whatever's still pending */
  975. for (i = 1; i < ctx->param->sglen; i++) {
  976. struct urb *u = ctx->urb[
  977. (i + subcase->number)
  978. % ctx->param->sglen];
  979. if (u == urb || !u->dev)
  980. continue;
  981. spin_unlock(&ctx->lock);
  982. status = usb_unlink_urb(u);
  983. spin_lock(&ctx->lock);
  984. switch (status) {
  985. case -EINPROGRESS:
  986. case -EBUSY:
  987. case -EIDRM:
  988. continue;
  989. default:
  990. ERROR(ctx->dev, "urb unlink --> %d\n",
  991. status);
  992. }
  993. }
  994. status = ctx->status;
  995. }
  996. }
  997. /* resubmit if we need to, else mark this as done */
  998. if ((status == 0) && (ctx->pending < ctx->count)) {
  999. status = usb_submit_urb(urb, GFP_ATOMIC);
  1000. if (status != 0) {
  1001. ERROR(ctx->dev,
  1002. "can't resubmit ctrl %02x.%02x, err %d\n",
  1003. reqp->bRequestType, reqp->bRequest, status);
  1004. urb->dev = NULL;
  1005. } else
  1006. ctx->pending++;
  1007. } else
  1008. urb->dev = NULL;
  1009. /* signal completion when nothing's queued */
  1010. if (ctx->pending == 0)
  1011. complete(&ctx->complete);
  1012. spin_unlock(&ctx->lock);
  1013. }
  1014. static int
  1015. test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param)
  1016. {
  1017. struct usb_device *udev = testdev_to_usbdev(dev);
  1018. struct urb **urb;
  1019. struct ctrl_ctx context;
  1020. int i;
  1021. if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
  1022. return -EOPNOTSUPP;
  1023. spin_lock_init(&context.lock);
  1024. context.dev = dev;
  1025. init_completion(&context.complete);
  1026. context.count = param->sglen * param->iterations;
  1027. context.pending = 0;
  1028. context.status = -ENOMEM;
  1029. context.param = param;
  1030. context.last = -1;
  1031. /* allocate and init the urbs we'll queue.
  1032. * as with bulk/intr sglists, sglen is the queue depth; it also
  1033. * controls which subtests run (more tests than sglen) or rerun.
  1034. */
  1035. urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
  1036. if (!urb)
  1037. return -ENOMEM;
  1038. for (i = 0; i < param->sglen; i++) {
  1039. int pipe = usb_rcvctrlpipe(udev, 0);
  1040. unsigned len;
  1041. struct urb *u;
  1042. struct usb_ctrlrequest req;
  1043. struct subcase *reqp;
  1044. /* sign of this variable means:
  1045. * -: tested code must return this (negative) error code
  1046. * +: tested code may return this (negative too) error code
  1047. */
  1048. int expected = 0;
  1049. /* requests here are mostly expected to succeed on any
  1050. * device, but some are chosen to trigger protocol stalls
  1051. * or short reads.
  1052. */
  1053. memset(&req, 0, sizeof(req));
  1054. req.bRequest = USB_REQ_GET_DESCRIPTOR;
  1055. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  1056. switch (i % NUM_SUBCASES) {
  1057. case 0: /* get device descriptor */
  1058. req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
  1059. len = sizeof(struct usb_device_descriptor);
  1060. break;
  1061. case 1: /* get first config descriptor (only) */
  1062. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  1063. len = sizeof(struct usb_config_descriptor);
  1064. break;
  1065. case 2: /* get altsetting (OFTEN STALLS) */
  1066. req.bRequest = USB_REQ_GET_INTERFACE;
  1067. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  1068. /* index = 0 means first interface */
  1069. len = 1;
  1070. expected = EPIPE;
  1071. break;
  1072. case 3: /* get interface status */
  1073. req.bRequest = USB_REQ_GET_STATUS;
  1074. req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
  1075. /* interface 0 */
  1076. len = 2;
  1077. break;
  1078. case 4: /* get device status */
  1079. req.bRequest = USB_REQ_GET_STATUS;
  1080. req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
  1081. len = 2;
  1082. break;
  1083. case 5: /* get device qualifier (MAY STALL) */
  1084. req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
  1085. len = sizeof(struct usb_qualifier_descriptor);
  1086. if (udev->speed != USB_SPEED_HIGH)
  1087. expected = EPIPE;
  1088. break;
  1089. case 6: /* get first config descriptor, plus interface */
  1090. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  1091. len = sizeof(struct usb_config_descriptor);
  1092. len += sizeof(struct usb_interface_descriptor);
  1093. break;
  1094. case 7: /* get interface descriptor (ALWAYS STALLS) */
  1095. req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
  1096. /* interface == 0 */
  1097. len = sizeof(struct usb_interface_descriptor);
  1098. expected = -EPIPE;
  1099. break;
  1100. /* NOTE: two consecutive stalls in the queue here.
  1101. * that tests fault recovery a bit more aggressively. */
  1102. case 8: /* clear endpoint halt (MAY STALL) */
  1103. req.bRequest = USB_REQ_CLEAR_FEATURE;
  1104. req.bRequestType = USB_RECIP_ENDPOINT;
  1105. /* wValue 0 == ep halt */
  1106. /* wIndex 0 == ep0 (shouldn't halt!) */
  1107. len = 0;
  1108. pipe = usb_sndctrlpipe(udev, 0);
  1109. expected = EPIPE;
  1110. break;
  1111. case 9: /* get endpoint status */
  1112. req.bRequest = USB_REQ_GET_STATUS;
  1113. req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
  1114. /* endpoint 0 */
  1115. len = 2;
  1116. break;
  1117. case 10: /* trigger short read (EREMOTEIO) */
  1118. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  1119. len = 1024;
  1120. expected = -EREMOTEIO;
  1121. break;
  1122. /* NOTE: two consecutive _different_ faults in the queue. */
  1123. case 11: /* get endpoint descriptor (ALWAYS STALLS) */
  1124. req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
  1125. /* endpoint == 0 */
  1126. len = sizeof(struct usb_interface_descriptor);
  1127. expected = EPIPE;
  1128. break;
  1129. /* NOTE: sometimes even a third fault in the queue! */
  1130. case 12: /* get string 0 descriptor (MAY STALL) */
  1131. req.wValue = cpu_to_le16(USB_DT_STRING << 8);
  1132. /* string == 0, for language IDs */
  1133. len = sizeof(struct usb_interface_descriptor);
  1134. /* may succeed when > 4 languages */
  1135. expected = EREMOTEIO; /* or EPIPE, if no strings */
  1136. break;
  1137. case 13: /* short read, resembling case 10 */
  1138. req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
  1139. /* last data packet "should" be DATA1, not DATA0 */
  1140. if (udev->speed == USB_SPEED_SUPER)
  1141. len = 1024 - 512;
  1142. else
  1143. len = 1024 - udev->descriptor.bMaxPacketSize0;
  1144. expected = -EREMOTEIO;
  1145. break;
  1146. case 14: /* short read; try to fill the last packet */
  1147. req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
  1148. /* device descriptor size == 18 bytes */
  1149. len = udev->descriptor.bMaxPacketSize0;
  1150. if (udev->speed == USB_SPEED_SUPER)
  1151. len = 512;
  1152. switch (len) {
  1153. case 8:
  1154. len = 24;
  1155. break;
  1156. case 16:
  1157. len = 32;
  1158. break;
  1159. }
  1160. expected = -EREMOTEIO;
  1161. break;
  1162. case 15:
  1163. req.wValue = cpu_to_le16(USB_DT_BOS << 8);
  1164. if (udev->bos)
  1165. len = le16_to_cpu(udev->bos->desc->wTotalLength);
  1166. else
  1167. len = sizeof(struct usb_bos_descriptor);
  1168. if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
  1169. expected = -EPIPE;
  1170. break;
  1171. default:
  1172. ERROR(dev, "bogus number of ctrl queue testcases!\n");
  1173. context.status = -EINVAL;
  1174. goto cleanup;
  1175. }
  1176. req.wLength = cpu_to_le16(len);
  1177. urb[i] = u = simple_alloc_urb(udev, pipe, len, 0);
  1178. if (!u)
  1179. goto cleanup;
  1180. reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
  1181. if (!reqp)
  1182. goto cleanup;
  1183. reqp->setup = req;
  1184. reqp->number = i % NUM_SUBCASES;
  1185. reqp->expected = expected;
  1186. u->setup_packet = (char *) &reqp->setup;
  1187. u->context = &context;
  1188. u->complete = ctrl_complete;
  1189. }
  1190. /* queue the urbs */
  1191. context.urb = urb;
  1192. spin_lock_irq(&context.lock);
  1193. for (i = 0; i < param->sglen; i++) {
  1194. context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
  1195. if (context.status != 0) {
  1196. ERROR(dev, "can't submit urb[%d], status %d\n",
  1197. i, context.status);
  1198. context.count = context.pending;
  1199. break;
  1200. }
  1201. context.pending++;
  1202. }
  1203. spin_unlock_irq(&context.lock);
  1204. /* FIXME set timer and time out; provide a disconnect hook */
  1205. /* wait for the last one to complete */
  1206. if (context.pending > 0)
  1207. wait_for_completion(&context.complete);
  1208. cleanup:
  1209. for (i = 0; i < param->sglen; i++) {
  1210. if (!urb[i])
  1211. continue;
  1212. urb[i]->dev = udev;
  1213. kfree(urb[i]->setup_packet);
  1214. simple_free_urb(urb[i]);
  1215. }
  1216. kfree(urb);
  1217. return context.status;
  1218. }
  1219. #undef NUM_SUBCASES
  1220. /*-------------------------------------------------------------------------*/
  1221. static void unlink1_callback(struct urb *urb)
  1222. {
  1223. int status = urb->status;
  1224. /* we "know" -EPIPE (stall) never happens */
  1225. if (!status)
  1226. status = usb_submit_urb(urb, GFP_ATOMIC);
  1227. if (status) {
  1228. urb->status = status;
  1229. complete(urb->context);
  1230. }
  1231. }
  1232. static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
  1233. {
  1234. struct urb *urb;
  1235. struct completion completion;
  1236. int retval = 0;
  1237. init_completion(&completion);
  1238. urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size, 0);
  1239. if (!urb)
  1240. return -ENOMEM;
  1241. urb->context = &completion;
  1242. urb->complete = unlink1_callback;
  1243. if (usb_pipeout(urb->pipe)) {
  1244. simple_fill_buf(urb);
  1245. urb->transfer_flags |= URB_ZERO_PACKET;
  1246. }
  1247. /* keep the endpoint busy. there are lots of hc/hcd-internal
  1248. * states, and testing should get to all of them over time.
  1249. *
  1250. * FIXME want additional tests for when endpoint is STALLing
  1251. * due to errors, or is just NAKing requests.
  1252. */
  1253. retval = usb_submit_urb(urb, GFP_KERNEL);
  1254. if (retval != 0) {
  1255. dev_err(&dev->intf->dev, "submit fail %d\n", retval);
  1256. return retval;
  1257. }
  1258. /* unlinking that should always work. variable delay tests more
  1259. * hcd states and code paths, even with little other system load.
  1260. */
  1261. msleep(jiffies % (2 * INTERRUPT_RATE));
  1262. if (async) {
  1263. while (!completion_done(&completion)) {
  1264. retval = usb_unlink_urb(urb);
  1265. if (retval == 0 && usb_pipein(urb->pipe))
  1266. retval = simple_check_buf(dev, urb);
  1267. switch (retval) {
  1268. case -EBUSY:
  1269. case -EIDRM:
  1270. /* we can't unlink urbs while they're completing
  1271. * or if they've completed, and we haven't
  1272. * resubmitted. "normal" drivers would prevent
  1273. * resubmission, but since we're testing unlink
  1274. * paths, we can't.
  1275. */
  1276. ERROR(dev, "unlink retry\n");
  1277. continue;
  1278. case 0:
  1279. case -EINPROGRESS:
  1280. break;
  1281. default:
  1282. dev_err(&dev->intf->dev,
  1283. "unlink fail %d\n", retval);
  1284. return retval;
  1285. }
  1286. break;
  1287. }
  1288. } else
  1289. usb_kill_urb(urb);
  1290. wait_for_completion(&completion);
  1291. retval = urb->status;
  1292. simple_free_urb(urb);
  1293. if (async)
  1294. return (retval == -ECONNRESET) ? 0 : retval - 1000;
  1295. else
  1296. return (retval == -ENOENT || retval == -EPERM) ?
  1297. 0 : retval - 2000;
  1298. }
  1299. static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
  1300. {
  1301. int retval = 0;
  1302. /* test sync and async paths */
  1303. retval = unlink1(dev, pipe, len, 1);
  1304. if (!retval)
  1305. retval = unlink1(dev, pipe, len, 0);
  1306. return retval;
  1307. }
  1308. /*-------------------------------------------------------------------------*/
  1309. struct queued_ctx {
  1310. struct completion complete;
  1311. atomic_t pending;
  1312. unsigned num;
  1313. int status;
  1314. struct urb **urbs;
  1315. };
  1316. static void unlink_queued_callback(struct urb *urb)
  1317. {
  1318. int status = urb->status;
  1319. struct queued_ctx *ctx = urb->context;
  1320. if (ctx->status)
  1321. goto done;
  1322. if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
  1323. if (status == -ECONNRESET)
  1324. goto done;
  1325. /* What error should we report if the URB completed normally? */
  1326. }
  1327. if (status != 0)
  1328. ctx->status = status;
  1329. done:
  1330. if (atomic_dec_and_test(&ctx->pending))
  1331. complete(&ctx->complete);
  1332. }
  1333. static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
  1334. unsigned size)
  1335. {
  1336. struct queued_ctx ctx;
  1337. struct usb_device *udev = testdev_to_usbdev(dev);
  1338. void *buf;
  1339. dma_addr_t buf_dma;
  1340. int i;
  1341. int retval = -ENOMEM;
  1342. init_completion(&ctx.complete);
  1343. atomic_set(&ctx.pending, 1); /* One more than the actual value */
  1344. ctx.num = num;
  1345. ctx.status = 0;
  1346. buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
  1347. if (!buf)
  1348. return retval;
  1349. memset(buf, 0, size);
  1350. /* Allocate and init the urbs we'll queue */
  1351. ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
  1352. if (!ctx.urbs)
  1353. goto free_buf;
  1354. for (i = 0; i < num; i++) {
  1355. ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
  1356. if (!ctx.urbs[i])
  1357. goto free_urbs;
  1358. usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
  1359. unlink_queued_callback, &ctx);
  1360. ctx.urbs[i]->transfer_dma = buf_dma;
  1361. ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
  1362. if (usb_pipeout(ctx.urbs[i]->pipe)) {
  1363. simple_fill_buf(ctx.urbs[i]);
  1364. ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
  1365. }
  1366. }
  1367. /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
  1368. for (i = 0; i < num; i++) {
  1369. atomic_inc(&ctx.pending);
  1370. retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
  1371. if (retval != 0) {
  1372. dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
  1373. i, retval);
  1374. atomic_dec(&ctx.pending);
  1375. ctx.status = retval;
  1376. break;
  1377. }
  1378. }
  1379. if (i == num) {
  1380. usb_unlink_urb(ctx.urbs[num - 4]);
  1381. usb_unlink_urb(ctx.urbs[num - 2]);
  1382. } else {
  1383. while (--i >= 0)
  1384. usb_unlink_urb(ctx.urbs[i]);
  1385. }
  1386. if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
  1387. complete(&ctx.complete);
  1388. wait_for_completion(&ctx.complete);
  1389. retval = ctx.status;
  1390. free_urbs:
  1391. for (i = 0; i < num; i++)
  1392. usb_free_urb(ctx.urbs[i]);
  1393. kfree(ctx.urbs);
  1394. free_buf:
  1395. usb_free_coherent(udev, size, buf, buf_dma);
  1396. return retval;
  1397. }
  1398. /*-------------------------------------------------------------------------*/
  1399. static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1400. {
  1401. int retval;
  1402. u16 status;
  1403. /* shouldn't look or act halted */
  1404. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1405. if (retval < 0) {
  1406. ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
  1407. ep, retval);
  1408. return retval;
  1409. }
  1410. if (status != 0) {
  1411. ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
  1412. return -EINVAL;
  1413. }
  1414. retval = simple_io(tdev, urb, 1, 0, 0, __func__);
  1415. if (retval != 0)
  1416. return -EINVAL;
  1417. return 0;
  1418. }
  1419. static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1420. {
  1421. int retval;
  1422. u16 status;
  1423. /* should look and act halted */
  1424. retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
  1425. if (retval < 0) {
  1426. ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
  1427. ep, retval);
  1428. return retval;
  1429. }
  1430. if (status != 1) {
  1431. ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
  1432. return -EINVAL;
  1433. }
  1434. retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
  1435. if (retval != -EPIPE)
  1436. return -EINVAL;
  1437. retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
  1438. if (retval != -EPIPE)
  1439. return -EINVAL;
  1440. return 0;
  1441. }
  1442. static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
  1443. {
  1444. int retval;
  1445. /* shouldn't look or act halted now */
  1446. retval = verify_not_halted(tdev, ep, urb);
  1447. if (retval < 0)
  1448. return retval;
  1449. /* set halt (protocol test only), verify it worked */
  1450. retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
  1451. USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
  1452. USB_ENDPOINT_HALT, ep,
  1453. NULL, 0, USB_CTRL_SET_TIMEOUT);
  1454. if (retval < 0) {
  1455. ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
  1456. return retval;
  1457. }
  1458. retval = verify_halted(tdev, ep, urb);
  1459. if (retval < 0) {
  1460. int ret;
  1461. /* clear halt anyways, else further tests will fail */
  1462. ret = usb_clear_halt(urb->dev, urb->pipe);
  1463. if (ret)
  1464. ERROR(tdev, "ep %02x couldn't clear halt, %d\n",
  1465. ep, ret);
  1466. return retval;
  1467. }
  1468. /* clear halt (tests API + protocol), verify it worked */
  1469. retval = usb_clear_halt(urb->dev, urb->pipe);
  1470. if (retval < 0) {
  1471. ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
  1472. return retval;
  1473. }
  1474. retval = verify_not_halted(tdev, ep, urb);
  1475. if (retval < 0)
  1476. return retval;
  1477. /* NOTE: could also verify SET_INTERFACE clear halts ... */
  1478. return 0;
  1479. }
  1480. static int halt_simple(struct usbtest_dev *dev)
  1481. {
  1482. int ep;
  1483. int retval = 0;
  1484. struct urb *urb;
  1485. struct usb_device *udev = testdev_to_usbdev(dev);
  1486. if (udev->speed == USB_SPEED_SUPER)
  1487. urb = simple_alloc_urb(udev, 0, 1024, 0);
  1488. else
  1489. urb = simple_alloc_urb(udev, 0, 512, 0);
  1490. if (urb == NULL)
  1491. return -ENOMEM;
  1492. if (dev->in_pipe) {
  1493. ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
  1494. urb->pipe = dev->in_pipe;
  1495. retval = test_halt(dev, ep, urb);
  1496. if (retval < 0)
  1497. goto done;
  1498. }
  1499. if (dev->out_pipe) {
  1500. ep = usb_pipeendpoint(dev->out_pipe);
  1501. urb->pipe = dev->out_pipe;
  1502. retval = test_halt(dev, ep, urb);
  1503. }
  1504. done:
  1505. simple_free_urb(urb);
  1506. return retval;
  1507. }
  1508. /*-------------------------------------------------------------------------*/
  1509. /* Control OUT tests use the vendor control requests from Intel's
  1510. * USB 2.0 compliance test device: write a buffer, read it back.
  1511. *
  1512. * Intel's spec only _requires_ that it work for one packet, which
  1513. * is pretty weak. Some HCDs place limits here; most devices will
  1514. * need to be able to handle more than one OUT data packet. We'll
  1515. * try whatever we're told to try.
  1516. */
  1517. static int ctrl_out(struct usbtest_dev *dev,
  1518. unsigned count, unsigned length, unsigned vary, unsigned offset)
  1519. {
  1520. unsigned i, j, len;
  1521. int retval;
  1522. u8 *buf;
  1523. char *what = "?";
  1524. struct usb_device *udev;
  1525. if (length < 1 || length > 0xffff || vary >= length)
  1526. return -EINVAL;
  1527. buf = kmalloc(length + offset, GFP_KERNEL);
  1528. if (!buf)
  1529. return -ENOMEM;
  1530. buf += offset;
  1531. udev = testdev_to_usbdev(dev);
  1532. len = length;
  1533. retval = 0;
  1534. /* NOTE: hardware might well act differently if we pushed it
  1535. * with lots back-to-back queued requests.
  1536. */
  1537. for (i = 0; i < count; i++) {
  1538. /* write patterned data */
  1539. for (j = 0; j < len; j++)
  1540. buf[j] = (u8)(i + j);
  1541. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  1542. 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
  1543. 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
  1544. if (retval != len) {
  1545. what = "write";
  1546. if (retval >= 0) {
  1547. ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
  1548. retval, len);
  1549. retval = -EBADMSG;
  1550. }
  1551. break;
  1552. }
  1553. /* read it back -- assuming nothing intervened!! */
  1554. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  1555. 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
  1556. 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
  1557. if (retval != len) {
  1558. what = "read";
  1559. if (retval >= 0) {
  1560. ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
  1561. retval, len);
  1562. retval = -EBADMSG;
  1563. }
  1564. break;
  1565. }
  1566. /* fail if we can't verify */
  1567. for (j = 0; j < len; j++) {
  1568. if (buf[j] != (u8)(i + j)) {
  1569. ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
  1570. j, buf[j], (u8)(i + j));
  1571. retval = -EBADMSG;
  1572. break;
  1573. }
  1574. }
  1575. if (retval < 0) {
  1576. what = "verify";
  1577. break;
  1578. }
  1579. len += vary;
  1580. /* [real world] the "zero bytes IN" case isn't really used.
  1581. * hardware can easily trip up in this weird case, since its
  1582. * status stage is IN, not OUT like other ep0in transfers.
  1583. */
  1584. if (len > length)
  1585. len = realworld ? 1 : 0;
  1586. }
  1587. if (retval < 0)
  1588. ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
  1589. what, retval, i);
  1590. kfree(buf - offset);
  1591. return retval;
  1592. }
  1593. /*-------------------------------------------------------------------------*/
  1594. /* ISO/BULK tests ... mimics common usage
  1595. * - buffer length is split into N packets (mostly maxpacket sized)
  1596. * - multi-buffers according to sglen
  1597. */
  1598. struct transfer_context {
  1599. unsigned count;
  1600. unsigned pending;
  1601. spinlock_t lock;
  1602. struct completion done;
  1603. int submit_error;
  1604. unsigned long errors;
  1605. unsigned long packet_count;
  1606. struct usbtest_dev *dev;
  1607. bool is_iso;
  1608. };
  1609. static void complicated_callback(struct urb *urb)
  1610. {
  1611. struct transfer_context *ctx = urb->context;
  1612. spin_lock(&ctx->lock);
  1613. ctx->count--;
  1614. ctx->packet_count += urb->number_of_packets;
  1615. if (urb->error_count > 0)
  1616. ctx->errors += urb->error_count;
  1617. else if (urb->status != 0)
  1618. ctx->errors += (ctx->is_iso ? urb->number_of_packets : 1);
  1619. else if (urb->actual_length != urb->transfer_buffer_length)
  1620. ctx->errors++;
  1621. else if (check_guard_bytes(ctx->dev, urb) != 0)
  1622. ctx->errors++;
  1623. if (urb->status == 0 && ctx->count > (ctx->pending - 1)
  1624. && !ctx->submit_error) {
  1625. int status = usb_submit_urb(urb, GFP_ATOMIC);
  1626. switch (status) {
  1627. case 0:
  1628. goto done;
  1629. default:
  1630. dev_err(&ctx->dev->intf->dev,
  1631. "resubmit err %d\n",
  1632. status);
  1633. /* FALLTHROUGH */
  1634. case -ENODEV: /* disconnected */
  1635. case -ESHUTDOWN: /* endpoint disabled */
  1636. ctx->submit_error = 1;
  1637. break;
  1638. }
  1639. }
  1640. ctx->pending--;
  1641. if (ctx->pending == 0) {
  1642. if (ctx->errors)
  1643. dev_err(&ctx->dev->intf->dev,
  1644. "during the test, %lu errors out of %lu\n",
  1645. ctx->errors, ctx->packet_count);
  1646. complete(&ctx->done);
  1647. }
  1648. done:
  1649. spin_unlock(&ctx->lock);
  1650. }
  1651. static struct urb *iso_alloc_urb(
  1652. struct usb_device *udev,
  1653. int pipe,
  1654. struct usb_endpoint_descriptor *desc,
  1655. long bytes,
  1656. unsigned offset
  1657. )
  1658. {
  1659. struct urb *urb;
  1660. unsigned i, maxp, packets;
  1661. if (bytes < 0 || !desc)
  1662. return NULL;
  1663. maxp = 0x7ff & usb_endpoint_maxp(desc);
  1664. maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
  1665. packets = DIV_ROUND_UP(bytes, maxp);
  1666. urb = usb_alloc_urb(packets, GFP_KERNEL);
  1667. if (!urb)
  1668. return urb;
  1669. urb->dev = udev;
  1670. urb->pipe = pipe;
  1671. urb->number_of_packets = packets;
  1672. urb->transfer_buffer_length = bytes;
  1673. urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
  1674. GFP_KERNEL,
  1675. &urb->transfer_dma);
  1676. if (!urb->transfer_buffer) {
  1677. usb_free_urb(urb);
  1678. return NULL;
  1679. }
  1680. if (offset) {
  1681. memset(urb->transfer_buffer, GUARD_BYTE, offset);
  1682. urb->transfer_buffer += offset;
  1683. urb->transfer_dma += offset;
  1684. }
  1685. /* For inbound transfers use guard byte so that test fails if
  1686. data not correctly copied */
  1687. memset(urb->transfer_buffer,
  1688. usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
  1689. bytes);
  1690. for (i = 0; i < packets; i++) {
  1691. /* here, only the last packet will be short */
  1692. urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
  1693. bytes -= urb->iso_frame_desc[i].length;
  1694. urb->iso_frame_desc[i].offset = maxp * i;
  1695. }
  1696. urb->complete = complicated_callback;
  1697. /* urb->context = SET BY CALLER */
  1698. urb->interval = 1 << (desc->bInterval - 1);
  1699. urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
  1700. return urb;
  1701. }
  1702. static int
  1703. test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
  1704. int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
  1705. {
  1706. struct transfer_context context;
  1707. struct usb_device *udev;
  1708. unsigned i;
  1709. unsigned long packets = 0;
  1710. int status = 0;
  1711. struct urb *urbs[param->sglen];
  1712. memset(&context, 0, sizeof(context));
  1713. context.count = param->iterations * param->sglen;
  1714. context.dev = dev;
  1715. context.is_iso = !!desc;
  1716. init_completion(&context.done);
  1717. spin_lock_init(&context.lock);
  1718. udev = testdev_to_usbdev(dev);
  1719. for (i = 0; i < param->sglen; i++) {
  1720. if (context.is_iso)
  1721. urbs[i] = iso_alloc_urb(udev, pipe, desc,
  1722. param->length, offset);
  1723. else
  1724. urbs[i] = complicated_alloc_urb(udev, pipe,
  1725. param->length, 0);
  1726. if (!urbs[i]) {
  1727. status = -ENOMEM;
  1728. goto fail;
  1729. }
  1730. packets += urbs[i]->number_of_packets;
  1731. urbs[i]->context = &context;
  1732. }
  1733. packets *= param->iterations;
  1734. if (context.is_iso) {
  1735. dev_info(&dev->intf->dev,
  1736. "iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
  1737. 1 << (desc->bInterval - 1),
  1738. (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
  1739. usb_endpoint_maxp(desc) & 0x7ff,
  1740. 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)));
  1741. dev_info(&dev->intf->dev,
  1742. "total %lu msec (%lu packets)\n",
  1743. (packets * (1 << (desc->bInterval - 1)))
  1744. / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
  1745. packets);
  1746. }
  1747. spin_lock_irq(&context.lock);
  1748. for (i = 0; i < param->sglen; i++) {
  1749. ++context.pending;
  1750. status = usb_submit_urb(urbs[i], GFP_ATOMIC);
  1751. if (status < 0) {
  1752. ERROR(dev, "submit iso[%d], error %d\n", i, status);
  1753. if (i == 0) {
  1754. spin_unlock_irq(&context.lock);
  1755. goto fail;
  1756. }
  1757. simple_free_urb(urbs[i]);
  1758. urbs[i] = NULL;
  1759. context.pending--;
  1760. context.submit_error = 1;
  1761. break;
  1762. }
  1763. }
  1764. spin_unlock_irq(&context.lock);
  1765. wait_for_completion(&context.done);
  1766. for (i = 0; i < param->sglen; i++) {
  1767. if (urbs[i])
  1768. simple_free_urb(urbs[i]);
  1769. }
  1770. /*
  1771. * Isochronous transfers are expected to fail sometimes. As an
  1772. * arbitrary limit, we will report an error if any submissions
  1773. * fail or if the transfer failure rate is > 10%.
  1774. */
  1775. if (status != 0)
  1776. ;
  1777. else if (context.submit_error)
  1778. status = -EACCES;
  1779. else if (context.errors >
  1780. (context.is_iso ? context.packet_count / 10 : 0))
  1781. status = -EIO;
  1782. return status;
  1783. fail:
  1784. for (i = 0; i < param->sglen; i++) {
  1785. if (urbs[i])
  1786. simple_free_urb(urbs[i]);
  1787. }
  1788. return status;
  1789. }
  1790. static int test_unaligned_bulk(
  1791. struct usbtest_dev *tdev,
  1792. int pipe,
  1793. unsigned length,
  1794. int iterations,
  1795. unsigned transfer_flags,
  1796. const char *label)
  1797. {
  1798. int retval;
  1799. struct urb *urb = usbtest_alloc_urb(testdev_to_usbdev(tdev),
  1800. pipe, length, transfer_flags, 1, 0, simple_callback);
  1801. if (!urb)
  1802. return -ENOMEM;
  1803. retval = simple_io(tdev, urb, iterations, 0, 0, label);
  1804. simple_free_urb(urb);
  1805. return retval;
  1806. }
  1807. /* Run tests. */
  1808. static int
  1809. usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
  1810. {
  1811. struct usbtest_dev *dev = usb_get_intfdata(intf);
  1812. struct usb_device *udev = testdev_to_usbdev(dev);
  1813. struct urb *urb;
  1814. struct scatterlist *sg;
  1815. struct usb_sg_request req;
  1816. unsigned i;
  1817. int retval = -EOPNOTSUPP;
  1818. if (param->iterations <= 0)
  1819. return -EINVAL;
  1820. /*
  1821. * Just a bunch of test cases that every HCD is expected to handle.
  1822. *
  1823. * Some may need specific firmware, though it'd be good to have
  1824. * one firmware image to handle all the test cases.
  1825. *
  1826. * FIXME add more tests! cancel requests, verify the data, control
  1827. * queueing, concurrent read+write threads, and so on.
  1828. */
  1829. switch (param->test_num) {
  1830. case 0:
  1831. dev_info(&intf->dev, "TEST 0: NOP\n");
  1832. retval = 0;
  1833. break;
  1834. /* Simple non-queued bulk I/O tests */
  1835. case 1:
  1836. if (dev->out_pipe == 0)
  1837. break;
  1838. dev_info(&intf->dev,
  1839. "TEST 1: write %d bytes %u times\n",
  1840. param->length, param->iterations);
  1841. urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
  1842. if (!urb) {
  1843. retval = -ENOMEM;
  1844. break;
  1845. }
  1846. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1847. retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
  1848. simple_free_urb(urb);
  1849. break;
  1850. case 2:
  1851. if (dev->in_pipe == 0)
  1852. break;
  1853. dev_info(&intf->dev,
  1854. "TEST 2: read %d bytes %u times\n",
  1855. param->length, param->iterations);
  1856. urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
  1857. if (!urb) {
  1858. retval = -ENOMEM;
  1859. break;
  1860. }
  1861. /* FIRMWARE: bulk source (maybe generates short writes) */
  1862. retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
  1863. simple_free_urb(urb);
  1864. break;
  1865. case 3:
  1866. if (dev->out_pipe == 0 || param->vary == 0)
  1867. break;
  1868. dev_info(&intf->dev,
  1869. "TEST 3: write/%d 0..%d bytes %u times\n",
  1870. param->vary, param->length, param->iterations);
  1871. urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
  1872. if (!urb) {
  1873. retval = -ENOMEM;
  1874. break;
  1875. }
  1876. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1877. retval = simple_io(dev, urb, param->iterations, param->vary,
  1878. 0, "test3");
  1879. simple_free_urb(urb);
  1880. break;
  1881. case 4:
  1882. if (dev->in_pipe == 0 || param->vary == 0)
  1883. break;
  1884. dev_info(&intf->dev,
  1885. "TEST 4: read/%d 0..%d bytes %u times\n",
  1886. param->vary, param->length, param->iterations);
  1887. urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
  1888. if (!urb) {
  1889. retval = -ENOMEM;
  1890. break;
  1891. }
  1892. /* FIRMWARE: bulk source (maybe generates short writes) */
  1893. retval = simple_io(dev, urb, param->iterations, param->vary,
  1894. 0, "test4");
  1895. simple_free_urb(urb);
  1896. break;
  1897. /* Queued bulk I/O tests */
  1898. case 5:
  1899. if (dev->out_pipe == 0 || param->sglen == 0)
  1900. break;
  1901. dev_info(&intf->dev,
  1902. "TEST 5: write %d sglists %d entries of %d bytes\n",
  1903. param->iterations,
  1904. param->sglen, param->length);
  1905. sg = alloc_sglist(param->sglen, param->length,
  1906. 0, dev, dev->out_pipe);
  1907. if (!sg) {
  1908. retval = -ENOMEM;
  1909. break;
  1910. }
  1911. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1912. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1913. &req, sg, param->sglen);
  1914. free_sglist(sg, param->sglen);
  1915. break;
  1916. case 6:
  1917. if (dev->in_pipe == 0 || param->sglen == 0)
  1918. break;
  1919. dev_info(&intf->dev,
  1920. "TEST 6: read %d sglists %d entries of %d bytes\n",
  1921. param->iterations,
  1922. param->sglen, param->length);
  1923. sg = alloc_sglist(param->sglen, param->length,
  1924. 0, dev, dev->in_pipe);
  1925. if (!sg) {
  1926. retval = -ENOMEM;
  1927. break;
  1928. }
  1929. /* FIRMWARE: bulk source (maybe generates short writes) */
  1930. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1931. &req, sg, param->sglen);
  1932. free_sglist(sg, param->sglen);
  1933. break;
  1934. case 7:
  1935. if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1936. break;
  1937. dev_info(&intf->dev,
  1938. "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
  1939. param->vary, param->iterations,
  1940. param->sglen, param->length);
  1941. sg = alloc_sglist(param->sglen, param->length,
  1942. param->vary, dev, dev->out_pipe);
  1943. if (!sg) {
  1944. retval = -ENOMEM;
  1945. break;
  1946. }
  1947. /* FIRMWARE: bulk sink (maybe accepts short writes) */
  1948. retval = perform_sglist(dev, param->iterations, dev->out_pipe,
  1949. &req, sg, param->sglen);
  1950. free_sglist(sg, param->sglen);
  1951. break;
  1952. case 8:
  1953. if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
  1954. break;
  1955. dev_info(&intf->dev,
  1956. "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
  1957. param->vary, param->iterations,
  1958. param->sglen, param->length);
  1959. sg = alloc_sglist(param->sglen, param->length,
  1960. param->vary, dev, dev->in_pipe);
  1961. if (!sg) {
  1962. retval = -ENOMEM;
  1963. break;
  1964. }
  1965. /* FIRMWARE: bulk source (maybe generates short writes) */
  1966. retval = perform_sglist(dev, param->iterations, dev->in_pipe,
  1967. &req, sg, param->sglen);
  1968. free_sglist(sg, param->sglen);
  1969. break;
  1970. /* non-queued sanity tests for control (chapter 9 subset) */
  1971. case 9:
  1972. retval = 0;
  1973. dev_info(&intf->dev,
  1974. "TEST 9: ch9 (subset) control tests, %d times\n",
  1975. param->iterations);
  1976. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1977. retval = ch9_postconfig(dev);
  1978. if (retval)
  1979. dev_err(&intf->dev, "ch9 subset failed, "
  1980. "iterations left %d\n", i);
  1981. break;
  1982. /* queued control messaging */
  1983. case 10:
  1984. retval = 0;
  1985. dev_info(&intf->dev,
  1986. "TEST 10: queue %d control calls, %d times\n",
  1987. param->sglen,
  1988. param->iterations);
  1989. retval = test_ctrl_queue(dev, param);
  1990. break;
  1991. /* simple non-queued unlinks (ring with one urb) */
  1992. case 11:
  1993. if (dev->in_pipe == 0 || !param->length)
  1994. break;
  1995. retval = 0;
  1996. dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
  1997. param->iterations, param->length);
  1998. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  1999. retval = unlink_simple(dev, dev->in_pipe,
  2000. param->length);
  2001. if (retval)
  2002. dev_err(&intf->dev, "unlink reads failed %d, "
  2003. "iterations left %d\n", retval, i);
  2004. break;
  2005. case 12:
  2006. if (dev->out_pipe == 0 || !param->length)
  2007. break;
  2008. retval = 0;
  2009. dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
  2010. param->iterations, param->length);
  2011. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  2012. retval = unlink_simple(dev, dev->out_pipe,
  2013. param->length);
  2014. if (retval)
  2015. dev_err(&intf->dev, "unlink writes failed %d, "
  2016. "iterations left %d\n", retval, i);
  2017. break;
  2018. /* ep halt tests */
  2019. case 13:
  2020. if (dev->out_pipe == 0 && dev->in_pipe == 0)
  2021. break;
  2022. retval = 0;
  2023. dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
  2024. param->iterations);
  2025. for (i = param->iterations; retval == 0 && i--; /* NOP */)
  2026. retval = halt_simple(dev);
  2027. if (retval)
  2028. ERROR(dev, "halts failed, iterations left %d\n", i);
  2029. break;
  2030. /* control write tests */
  2031. case 14:
  2032. if (!dev->info->ctrl_out)
  2033. break;
  2034. dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
  2035. param->iterations,
  2036. realworld ? 1 : 0, param->length,
  2037. param->vary);
  2038. retval = ctrl_out(dev, param->iterations,
  2039. param->length, param->vary, 0);
  2040. break;
  2041. /* iso write tests */
  2042. case 15:
  2043. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  2044. break;
  2045. dev_info(&intf->dev,
  2046. "TEST 15: write %d iso, %d entries of %d bytes\n",
  2047. param->iterations,
  2048. param->sglen, param->length);
  2049. /* FIRMWARE: iso sink */
  2050. retval = test_queue(dev, param,
  2051. dev->out_iso_pipe, dev->iso_out, 0);
  2052. break;
  2053. /* iso read tests */
  2054. case 16:
  2055. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  2056. break;
  2057. dev_info(&intf->dev,
  2058. "TEST 16: read %d iso, %d entries of %d bytes\n",
  2059. param->iterations,
  2060. param->sglen, param->length);
  2061. /* FIRMWARE: iso source */
  2062. retval = test_queue(dev, param,
  2063. dev->in_iso_pipe, dev->iso_in, 0);
  2064. break;
  2065. /* FIXME scatterlist cancel (needs helper thread) */
  2066. /* Tests for bulk I/O using DMA mapping by core and odd address */
  2067. case 17:
  2068. if (dev->out_pipe == 0)
  2069. break;
  2070. dev_info(&intf->dev,
  2071. "TEST 17: write odd addr %d bytes %u times core map\n",
  2072. param->length, param->iterations);
  2073. retval = test_unaligned_bulk(
  2074. dev, dev->out_pipe,
  2075. param->length, param->iterations,
  2076. 0, "test17");
  2077. break;
  2078. case 18:
  2079. if (dev->in_pipe == 0)
  2080. break;
  2081. dev_info(&intf->dev,
  2082. "TEST 18: read odd addr %d bytes %u times core map\n",
  2083. param->length, param->iterations);
  2084. retval = test_unaligned_bulk(
  2085. dev, dev->in_pipe,
  2086. param->length, param->iterations,
  2087. 0, "test18");
  2088. break;
  2089. /* Tests for bulk I/O using premapped coherent buffer and odd address */
  2090. case 19:
  2091. if (dev->out_pipe == 0)
  2092. break;
  2093. dev_info(&intf->dev,
  2094. "TEST 19: write odd addr %d bytes %u times premapped\n",
  2095. param->length, param->iterations);
  2096. retval = test_unaligned_bulk(
  2097. dev, dev->out_pipe,
  2098. param->length, param->iterations,
  2099. URB_NO_TRANSFER_DMA_MAP, "test19");
  2100. break;
  2101. case 20:
  2102. if (dev->in_pipe == 0)
  2103. break;
  2104. dev_info(&intf->dev,
  2105. "TEST 20: read odd addr %d bytes %u times premapped\n",
  2106. param->length, param->iterations);
  2107. retval = test_unaligned_bulk(
  2108. dev, dev->in_pipe,
  2109. param->length, param->iterations,
  2110. URB_NO_TRANSFER_DMA_MAP, "test20");
  2111. break;
  2112. /* control write tests with unaligned buffer */
  2113. case 21:
  2114. if (!dev->info->ctrl_out)
  2115. break;
  2116. dev_info(&intf->dev,
  2117. "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
  2118. param->iterations,
  2119. realworld ? 1 : 0, param->length,
  2120. param->vary);
  2121. retval = ctrl_out(dev, param->iterations,
  2122. param->length, param->vary, 1);
  2123. break;
  2124. /* unaligned iso tests */
  2125. case 22:
  2126. if (dev->out_iso_pipe == 0 || param->sglen == 0)
  2127. break;
  2128. dev_info(&intf->dev,
  2129. "TEST 22: write %d iso odd, %d entries of %d bytes\n",
  2130. param->iterations,
  2131. param->sglen, param->length);
  2132. retval = test_queue(dev, param,
  2133. dev->out_iso_pipe, dev->iso_out, 1);
  2134. break;
  2135. case 23:
  2136. if (dev->in_iso_pipe == 0 || param->sglen == 0)
  2137. break;
  2138. dev_info(&intf->dev,
  2139. "TEST 23: read %d iso odd, %d entries of %d bytes\n",
  2140. param->iterations,
  2141. param->sglen, param->length);
  2142. retval = test_queue(dev, param,
  2143. dev->in_iso_pipe, dev->iso_in, 1);
  2144. break;
  2145. /* unlink URBs from a bulk-OUT queue */
  2146. case 24:
  2147. if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
  2148. break;
  2149. retval = 0;
  2150. dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
  2151. "%d %d-byte writes\n",
  2152. param->iterations, param->sglen, param->length);
  2153. for (i = param->iterations; retval == 0 && i > 0; --i) {
  2154. retval = unlink_queued(dev, dev->out_pipe,
  2155. param->sglen, param->length);
  2156. if (retval) {
  2157. dev_err(&intf->dev,
  2158. "unlink queued writes failed %d, "
  2159. "iterations left %d\n", retval, i);
  2160. break;
  2161. }
  2162. }
  2163. break;
  2164. /* Simple non-queued interrupt I/O tests */
  2165. case 25:
  2166. if (dev->out_int_pipe == 0)
  2167. break;
  2168. dev_info(&intf->dev,
  2169. "TEST 25: write %d bytes %u times\n",
  2170. param->length, param->iterations);
  2171. urb = simple_alloc_urb(udev, dev->out_int_pipe, param->length,
  2172. dev->int_out->bInterval);
  2173. if (!urb) {
  2174. retval = -ENOMEM;
  2175. break;
  2176. }
  2177. /* FIRMWARE: interrupt sink (maybe accepts short writes) */
  2178. retval = simple_io(dev, urb, param->iterations, 0, 0, "test25");
  2179. simple_free_urb(urb);
  2180. break;
  2181. case 26:
  2182. if (dev->in_int_pipe == 0)
  2183. break;
  2184. dev_info(&intf->dev,
  2185. "TEST 26: read %d bytes %u times\n",
  2186. param->length, param->iterations);
  2187. urb = simple_alloc_urb(udev, dev->in_int_pipe, param->length,
  2188. dev->int_in->bInterval);
  2189. if (!urb) {
  2190. retval = -ENOMEM;
  2191. break;
  2192. }
  2193. /* FIRMWARE: interrupt source (maybe generates short writes) */
  2194. retval = simple_io(dev, urb, param->iterations, 0, 0, "test26");
  2195. simple_free_urb(urb);
  2196. break;
  2197. case 27:
  2198. /* We do performance test, so ignore data compare */
  2199. if (dev->out_pipe == 0 || param->sglen == 0 || pattern != 0)
  2200. break;
  2201. dev_info(&intf->dev,
  2202. "TEST 27: bulk write %dMbytes\n", (param->iterations *
  2203. param->sglen * param->length) / (1024 * 1024));
  2204. retval = test_queue(dev, param,
  2205. dev->out_pipe, NULL, 0);
  2206. break;
  2207. case 28:
  2208. if (dev->in_pipe == 0 || param->sglen == 0 || pattern != 0)
  2209. break;
  2210. dev_info(&intf->dev,
  2211. "TEST 28: bulk read %dMbytes\n", (param->iterations *
  2212. param->sglen * param->length) / (1024 * 1024));
  2213. retval = test_queue(dev, param,
  2214. dev->in_pipe, NULL, 0);
  2215. break;
  2216. }
  2217. return retval;
  2218. }
  2219. /*-------------------------------------------------------------------------*/
  2220. /* We only have this one interface to user space, through usbfs.
  2221. * User mode code can scan usbfs to find N different devices (maybe on
  2222. * different busses) to use when testing, and allocate one thread per
  2223. * test. So discovery is simplified, and we have no device naming issues.
  2224. *
  2225. * Don't use these only as stress/load tests. Use them along with with
  2226. * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
  2227. * video capture, and so on. Run different tests at different times, in
  2228. * different sequences. Nothing here should interact with other devices,
  2229. * except indirectly by consuming USB bandwidth and CPU resources for test
  2230. * threads and request completion. But the only way to know that for sure
  2231. * is to test when HC queues are in use by many devices.
  2232. *
  2233. * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
  2234. * it locks out usbcore in certain code paths. Notably, if you disconnect
  2235. * the device-under-test, hub_wq will wait block forever waiting for the
  2236. * ioctl to complete ... so that usb_disconnect() can abort the pending
  2237. * urbs and then call usbtest_disconnect(). To abort a test, you're best
  2238. * off just killing the userspace task and waiting for it to exit.
  2239. */
  2240. static int
  2241. usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
  2242. {
  2243. struct usbtest_dev *dev = usb_get_intfdata(intf);
  2244. struct usbtest_param_64 *param_64 = buf;
  2245. struct usbtest_param_32 temp;
  2246. struct usbtest_param_32 *param_32 = buf;
  2247. struct timespec64 start;
  2248. struct timespec64 end;
  2249. struct timespec64 duration;
  2250. int retval = -EOPNOTSUPP;
  2251. /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
  2252. pattern = mod_pattern;
  2253. if (mutex_lock_interruptible(&dev->lock))
  2254. return -ERESTARTSYS;
  2255. /* FIXME: What if a system sleep starts while a test is running? */
  2256. /* some devices, like ez-usb default devices, need a non-default
  2257. * altsetting to have any active endpoints. some tests change
  2258. * altsettings; force a default so most tests don't need to check.
  2259. */
  2260. if (dev->info->alt >= 0) {
  2261. if (intf->altsetting->desc.bInterfaceNumber) {
  2262. retval = -ENODEV;
  2263. goto free_mutex;
  2264. }
  2265. retval = set_altsetting(dev, dev->info->alt);
  2266. if (retval) {
  2267. dev_err(&intf->dev,
  2268. "set altsetting to %d failed, %d\n",
  2269. dev->info->alt, retval);
  2270. goto free_mutex;
  2271. }
  2272. }
  2273. switch (code) {
  2274. case USBTEST_REQUEST_64:
  2275. temp.test_num = param_64->test_num;
  2276. temp.iterations = param_64->iterations;
  2277. temp.length = param_64->length;
  2278. temp.sglen = param_64->sglen;
  2279. temp.vary = param_64->vary;
  2280. param_32 = &temp;
  2281. break;
  2282. case USBTEST_REQUEST_32:
  2283. break;
  2284. default:
  2285. retval = -EOPNOTSUPP;
  2286. goto free_mutex;
  2287. }
  2288. ktime_get_ts64(&start);
  2289. retval = usbtest_do_ioctl(intf, param_32);
  2290. if (retval)
  2291. goto free_mutex;
  2292. ktime_get_ts64(&end);
  2293. duration = timespec64_sub(end, start);
  2294. temp.duration_sec = duration.tv_sec;
  2295. temp.duration_usec = duration.tv_nsec/NSEC_PER_USEC;
  2296. switch (code) {
  2297. case USBTEST_REQUEST_32:
  2298. param_32->duration_sec = temp.duration_sec;
  2299. param_32->duration_usec = temp.duration_usec;
  2300. break;
  2301. case USBTEST_REQUEST_64:
  2302. param_64->duration_sec = temp.duration_sec;
  2303. param_64->duration_usec = temp.duration_usec;
  2304. break;
  2305. }
  2306. free_mutex:
  2307. mutex_unlock(&dev->lock);
  2308. return retval;
  2309. }
  2310. /*-------------------------------------------------------------------------*/
  2311. static unsigned force_interrupt;
  2312. module_param(force_interrupt, uint, 0);
  2313. MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
  2314. #ifdef GENERIC
  2315. static unsigned short vendor;
  2316. module_param(vendor, ushort, 0);
  2317. MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
  2318. static unsigned short product;
  2319. module_param(product, ushort, 0);
  2320. MODULE_PARM_DESC(product, "product code (from vendor)");
  2321. #endif
  2322. static int
  2323. usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
  2324. {
  2325. struct usb_device *udev;
  2326. struct usbtest_dev *dev;
  2327. struct usbtest_info *info;
  2328. char *rtest, *wtest;
  2329. char *irtest, *iwtest;
  2330. char *intrtest, *intwtest;
  2331. udev = interface_to_usbdev(intf);
  2332. #ifdef GENERIC
  2333. /* specify devices by module parameters? */
  2334. if (id->match_flags == 0) {
  2335. /* vendor match required, product match optional */
  2336. if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
  2337. return -ENODEV;
  2338. if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
  2339. return -ENODEV;
  2340. dev_info(&intf->dev, "matched module params, "
  2341. "vend=0x%04x prod=0x%04x\n",
  2342. le16_to_cpu(udev->descriptor.idVendor),
  2343. le16_to_cpu(udev->descriptor.idProduct));
  2344. }
  2345. #endif
  2346. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  2347. if (!dev)
  2348. return -ENOMEM;
  2349. info = (struct usbtest_info *) id->driver_info;
  2350. dev->info = info;
  2351. mutex_init(&dev->lock);
  2352. dev->intf = intf;
  2353. /* cacheline-aligned scratch for i/o */
  2354. dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
  2355. if (dev->buf == NULL) {
  2356. kfree(dev);
  2357. return -ENOMEM;
  2358. }
  2359. /* NOTE this doesn't yet test the handful of difference that are
  2360. * visible with high speed interrupts: bigger maxpacket (1K) and
  2361. * "high bandwidth" modes (up to 3 packets/uframe).
  2362. */
  2363. rtest = wtest = "";
  2364. irtest = iwtest = "";
  2365. intrtest = intwtest = "";
  2366. if (force_interrupt || udev->speed == USB_SPEED_LOW) {
  2367. if (info->ep_in) {
  2368. dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
  2369. rtest = " intr-in";
  2370. }
  2371. if (info->ep_out) {
  2372. dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
  2373. wtest = " intr-out";
  2374. }
  2375. } else {
  2376. if (override_alt >= 0 || info->autoconf) {
  2377. int status;
  2378. status = get_endpoints(dev, intf);
  2379. if (status < 0) {
  2380. WARNING(dev, "couldn't get endpoints, %d\n",
  2381. status);
  2382. kfree(dev->buf);
  2383. kfree(dev);
  2384. return status;
  2385. }
  2386. /* may find bulk or ISO pipes */
  2387. } else {
  2388. if (info->ep_in)
  2389. dev->in_pipe = usb_rcvbulkpipe(udev,
  2390. info->ep_in);
  2391. if (info->ep_out)
  2392. dev->out_pipe = usb_sndbulkpipe(udev,
  2393. info->ep_out);
  2394. }
  2395. if (dev->in_pipe)
  2396. rtest = " bulk-in";
  2397. if (dev->out_pipe)
  2398. wtest = " bulk-out";
  2399. if (dev->in_iso_pipe)
  2400. irtest = " iso-in";
  2401. if (dev->out_iso_pipe)
  2402. iwtest = " iso-out";
  2403. if (dev->in_int_pipe)
  2404. intrtest = " int-in";
  2405. if (dev->out_int_pipe)
  2406. intwtest = " int-out";
  2407. }
  2408. usb_set_intfdata(intf, dev);
  2409. dev_info(&intf->dev, "%s\n", info->name);
  2410. dev_info(&intf->dev, "%s {control%s%s%s%s%s%s%s} tests%s\n",
  2411. usb_speed_string(udev->speed),
  2412. info->ctrl_out ? " in/out" : "",
  2413. rtest, wtest,
  2414. irtest, iwtest,
  2415. intrtest, intwtest,
  2416. info->alt >= 0 ? " (+alt)" : "");
  2417. return 0;
  2418. }
  2419. static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
  2420. {
  2421. return 0;
  2422. }
  2423. static int usbtest_resume(struct usb_interface *intf)
  2424. {
  2425. return 0;
  2426. }
  2427. static void usbtest_disconnect(struct usb_interface *intf)
  2428. {
  2429. struct usbtest_dev *dev = usb_get_intfdata(intf);
  2430. usb_set_intfdata(intf, NULL);
  2431. dev_dbg(&intf->dev, "disconnect\n");
  2432. kfree(dev);
  2433. }
  2434. /* Basic testing only needs a device that can source or sink bulk traffic.
  2435. * Any device can test control transfers (default with GENERIC binding).
  2436. *
  2437. * Several entries work with the default EP0 implementation that's built
  2438. * into EZ-USB chips. There's a default vendor ID which can be overridden
  2439. * by (very) small config EEPROMS, but otherwise all these devices act
  2440. * identically until firmware is loaded: only EP0 works. It turns out
  2441. * to be easy to make other endpoints work, without modifying that EP0
  2442. * behavior. For now, we expect that kind of firmware.
  2443. */
  2444. /* an21xx or fx versions of ez-usb */
  2445. static struct usbtest_info ez1_info = {
  2446. .name = "EZ-USB device",
  2447. .ep_in = 2,
  2448. .ep_out = 2,
  2449. .alt = 1,
  2450. };
  2451. /* fx2 version of ez-usb */
  2452. static struct usbtest_info ez2_info = {
  2453. .name = "FX2 device",
  2454. .ep_in = 6,
  2455. .ep_out = 2,
  2456. .alt = 1,
  2457. };
  2458. /* ezusb family device with dedicated usb test firmware,
  2459. */
  2460. static struct usbtest_info fw_info = {
  2461. .name = "usb test device",
  2462. .ep_in = 2,
  2463. .ep_out = 2,
  2464. .alt = 1,
  2465. .autoconf = 1, /* iso and ctrl_out need autoconf */
  2466. .ctrl_out = 1,
  2467. .iso = 1, /* iso_ep's are #8 in/out */
  2468. };
  2469. /* peripheral running Linux and 'zero.c' test firmware, or
  2470. * its user-mode cousin. different versions of this use
  2471. * different hardware with the same vendor/product codes.
  2472. * host side MUST rely on the endpoint descriptors.
  2473. */
  2474. static struct usbtest_info gz_info = {
  2475. .name = "Linux gadget zero",
  2476. .autoconf = 1,
  2477. .ctrl_out = 1,
  2478. .iso = 1,
  2479. .intr = 1,
  2480. .alt = 0,
  2481. };
  2482. static struct usbtest_info um_info = {
  2483. .name = "Linux user mode test driver",
  2484. .autoconf = 1,
  2485. .alt = -1,
  2486. };
  2487. static struct usbtest_info um2_info = {
  2488. .name = "Linux user mode ISO test driver",
  2489. .autoconf = 1,
  2490. .iso = 1,
  2491. .alt = -1,
  2492. };
  2493. #ifdef IBOT2
  2494. /* this is a nice source of high speed bulk data;
  2495. * uses an FX2, with firmware provided in the device
  2496. */
  2497. static struct usbtest_info ibot2_info = {
  2498. .name = "iBOT2 webcam",
  2499. .ep_in = 2,
  2500. .alt = -1,
  2501. };
  2502. #endif
  2503. #ifdef GENERIC
  2504. /* we can use any device to test control traffic */
  2505. static struct usbtest_info generic_info = {
  2506. .name = "Generic USB device",
  2507. .alt = -1,
  2508. };
  2509. #endif
  2510. static const struct usb_device_id id_table[] = {
  2511. /*-------------------------------------------------------------*/
  2512. /* EZ-USB devices which download firmware to replace (or in our
  2513. * case augment) the default device implementation.
  2514. */
  2515. /* generic EZ-USB FX controller */
  2516. { USB_DEVICE(0x0547, 0x2235),
  2517. .driver_info = (unsigned long) &ez1_info,
  2518. },
  2519. /* CY3671 development board with EZ-USB FX */
  2520. { USB_DEVICE(0x0547, 0x0080),
  2521. .driver_info = (unsigned long) &ez1_info,
  2522. },
  2523. /* generic EZ-USB FX2 controller (or development board) */
  2524. { USB_DEVICE(0x04b4, 0x8613),
  2525. .driver_info = (unsigned long) &ez2_info,
  2526. },
  2527. /* re-enumerated usb test device firmware */
  2528. { USB_DEVICE(0xfff0, 0xfff0),
  2529. .driver_info = (unsigned long) &fw_info,
  2530. },
  2531. /* "Gadget Zero" firmware runs under Linux */
  2532. { USB_DEVICE(0x0525, 0xa4a0),
  2533. .driver_info = (unsigned long) &gz_info,
  2534. },
  2535. /* so does a user-mode variant */
  2536. { USB_DEVICE(0x0525, 0xa4a4),
  2537. .driver_info = (unsigned long) &um_info,
  2538. },
  2539. /* ... and a user-mode variant that talks iso */
  2540. { USB_DEVICE(0x0525, 0xa4a3),
  2541. .driver_info = (unsigned long) &um2_info,
  2542. },
  2543. #ifdef KEYSPAN_19Qi
  2544. /* Keyspan 19qi uses an21xx (original EZ-USB) */
  2545. /* this does not coexist with the real Keyspan 19qi driver! */
  2546. { USB_DEVICE(0x06cd, 0x010b),
  2547. .driver_info = (unsigned long) &ez1_info,
  2548. },
  2549. #endif
  2550. /*-------------------------------------------------------------*/
  2551. #ifdef IBOT2
  2552. /* iBOT2 makes a nice source of high speed bulk-in data */
  2553. /* this does not coexist with a real iBOT2 driver! */
  2554. { USB_DEVICE(0x0b62, 0x0059),
  2555. .driver_info = (unsigned long) &ibot2_info,
  2556. },
  2557. #endif
  2558. /*-------------------------------------------------------------*/
  2559. #ifdef GENERIC
  2560. /* module params can specify devices to use for control tests */
  2561. { .driver_info = (unsigned long) &generic_info, },
  2562. #endif
  2563. /*-------------------------------------------------------------*/
  2564. { }
  2565. };
  2566. MODULE_DEVICE_TABLE(usb, id_table);
  2567. static struct usb_driver usbtest_driver = {
  2568. .name = "usbtest",
  2569. .id_table = id_table,
  2570. .probe = usbtest_probe,
  2571. .unlocked_ioctl = usbtest_ioctl,
  2572. .disconnect = usbtest_disconnect,
  2573. .suspend = usbtest_suspend,
  2574. .resume = usbtest_resume,
  2575. };
  2576. /*-------------------------------------------------------------------------*/
  2577. static int __init usbtest_init(void)
  2578. {
  2579. #ifdef GENERIC
  2580. if (vendor)
  2581. pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
  2582. #endif
  2583. return usb_register(&usbtest_driver);
  2584. }
  2585. module_init(usbtest_init);
  2586. static void __exit usbtest_exit(void)
  2587. {
  2588. usb_deregister(&usbtest_driver);
  2589. }
  2590. module_exit(usbtest_exit);
  2591. MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
  2592. MODULE_LICENSE("GPL");