f_fs.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023
  1. /*
  2. * f_fs.c -- user mode file system API for USB composite function controllers
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. * Author: Michal Nazarewicz <mina86@mina86.com>
  6. *
  7. * Based on inode.c (GadgetFS) which was:
  8. * Copyright (C) 2003-2004 David Brownell
  9. * Copyright (C) 2003 Agilent Technologies
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. */
  16. /* #define DEBUG */
  17. /* #define VERBOSE_DEBUG */
  18. #include <linux/blkdev.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/export.h>
  21. #include <linux/hid.h>
  22. #include <linux/module.h>
  23. #include <asm/unaligned.h>
  24. #include <linux/usb/composite.h>
  25. #include <linux/usb/functionfs.h>
  26. #include <linux/aio.h>
  27. #include <linux/mmu_context.h>
  28. #include <linux/poll.h>
  29. #include "u_fs.h"
  30. #include "u_f.h"
  31. #include "configfs.h"
  32. #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
  33. /* Reference counter handling */
  34. static void ffs_data_get(struct ffs_data *ffs);
  35. static void ffs_data_put(struct ffs_data *ffs);
  36. /* Creates new ffs_data object. */
  37. static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
  38. /* Opened counter handling. */
  39. static void ffs_data_opened(struct ffs_data *ffs);
  40. static void ffs_data_closed(struct ffs_data *ffs);
  41. /* Called with ffs->mutex held; take over ownership of data. */
  42. static int __must_check
  43. __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
  44. static int __must_check
  45. __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
  46. /* The function structure ***************************************************/
  47. struct ffs_ep;
  48. struct ffs_function {
  49. struct usb_configuration *conf;
  50. struct usb_gadget *gadget;
  51. struct ffs_data *ffs;
  52. struct ffs_ep *eps;
  53. u8 eps_revmap[16];
  54. short *interfaces_nums;
  55. struct usb_function function;
  56. };
  57. static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
  58. {
  59. return container_of(f, struct ffs_function, function);
  60. }
  61. static inline enum ffs_setup_state
  62. ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
  63. {
  64. return (enum ffs_setup_state)
  65. cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
  66. }
  67. static void ffs_func_eps_disable(struct ffs_function *func);
  68. static int __must_check ffs_func_eps_enable(struct ffs_function *func);
  69. static int ffs_func_bind(struct usb_configuration *,
  70. struct usb_function *);
  71. static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
  72. static void ffs_func_disable(struct usb_function *);
  73. static int ffs_func_setup(struct usb_function *,
  74. const struct usb_ctrlrequest *);
  75. static void ffs_func_suspend(struct usb_function *);
  76. static void ffs_func_resume(struct usb_function *);
  77. static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
  78. static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
  79. /* The endpoints structures *************************************************/
  80. struct ffs_ep {
  81. struct usb_ep *ep; /* P: ffs->eps_lock */
  82. struct usb_request *req; /* P: epfile->mutex */
  83. /* [0]: full speed, [1]: high speed, [2]: super speed */
  84. struct usb_endpoint_descriptor *descs[3];
  85. u8 num;
  86. int status; /* P: epfile->mutex */
  87. };
  88. struct ffs_epfile {
  89. /* Protects ep->ep and ep->req. */
  90. struct mutex mutex;
  91. wait_queue_head_t wait;
  92. struct ffs_data *ffs;
  93. struct ffs_ep *ep; /* P: ffs->eps_lock */
  94. struct dentry *dentry;
  95. char name[5];
  96. unsigned char in; /* P: ffs->eps_lock */
  97. unsigned char isoc; /* P: ffs->eps_lock */
  98. unsigned char _pad;
  99. };
  100. /* ffs_io_data structure ***************************************************/
  101. struct ffs_io_data {
  102. bool aio;
  103. bool read;
  104. struct kiocb *kiocb;
  105. const struct iovec *iovec;
  106. unsigned long nr_segs;
  107. char __user *buf;
  108. size_t len;
  109. struct mm_struct *mm;
  110. struct work_struct work;
  111. struct usb_ep *ep;
  112. struct usb_request *req;
  113. };
  114. static int __must_check ffs_epfiles_create(struct ffs_data *ffs);
  115. static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
  116. static struct inode *__must_check
  117. ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
  118. const struct file_operations *fops,
  119. struct dentry **dentry_p);
  120. /* Devices management *******************************************************/
  121. DEFINE_MUTEX(ffs_lock);
  122. EXPORT_SYMBOL_GPL(ffs_lock);
  123. static struct ffs_dev *_ffs_find_dev(const char *name);
  124. static struct ffs_dev *_ffs_alloc_dev(void);
  125. static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
  126. static void _ffs_free_dev(struct ffs_dev *dev);
  127. static void *ffs_acquire_dev(const char *dev_name);
  128. static void ffs_release_dev(struct ffs_data *ffs_data);
  129. static int ffs_ready(struct ffs_data *ffs);
  130. static void ffs_closed(struct ffs_data *ffs);
  131. /* Misc helper functions ****************************************************/
  132. static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
  133. __attribute__((warn_unused_result, nonnull));
  134. static char *ffs_prepare_buffer(const char __user *buf, size_t len)
  135. __attribute__((warn_unused_result, nonnull));
  136. /* Control file aka ep0 *****************************************************/
  137. static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
  138. {
  139. struct ffs_data *ffs = req->context;
  140. complete_all(&ffs->ep0req_completion);
  141. }
  142. static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
  143. {
  144. struct usb_request *req = ffs->ep0req;
  145. int ret;
  146. req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
  147. spin_unlock_irq(&ffs->ev.waitq.lock);
  148. req->buf = data;
  149. req->length = len;
  150. /*
  151. * UDC layer requires to provide a buffer even for ZLP, but should
  152. * not use it at all. Let's provide some poisoned pointer to catch
  153. * possible bug in the driver.
  154. */
  155. if (req->buf == NULL)
  156. req->buf = (void *)0xDEADBABE;
  157. reinit_completion(&ffs->ep0req_completion);
  158. ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
  159. if (unlikely(ret < 0))
  160. return ret;
  161. ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
  162. if (unlikely(ret)) {
  163. usb_ep_dequeue(ffs->gadget->ep0, req);
  164. return -EINTR;
  165. }
  166. ffs->setup_state = FFS_NO_SETUP;
  167. return req->status ? req->status : req->actual;
  168. }
  169. static int __ffs_ep0_stall(struct ffs_data *ffs)
  170. {
  171. if (ffs->ev.can_stall) {
  172. pr_vdebug("ep0 stall\n");
  173. usb_ep_set_halt(ffs->gadget->ep0);
  174. ffs->setup_state = FFS_NO_SETUP;
  175. return -EL2HLT;
  176. } else {
  177. pr_debug("bogus ep0 stall!\n");
  178. return -ESRCH;
  179. }
  180. }
  181. static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
  182. size_t len, loff_t *ptr)
  183. {
  184. struct ffs_data *ffs = file->private_data;
  185. ssize_t ret;
  186. char *data;
  187. ENTER();
  188. /* Fast check if setup was canceled */
  189. if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
  190. return -EIDRM;
  191. /* Acquire mutex */
  192. ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
  193. if (unlikely(ret < 0))
  194. return ret;
  195. /* Check state */
  196. switch (ffs->state) {
  197. case FFS_READ_DESCRIPTORS:
  198. case FFS_READ_STRINGS:
  199. /* Copy data */
  200. if (unlikely(len < 16)) {
  201. ret = -EINVAL;
  202. break;
  203. }
  204. data = ffs_prepare_buffer(buf, len);
  205. if (IS_ERR(data)) {
  206. ret = PTR_ERR(data);
  207. break;
  208. }
  209. /* Handle data */
  210. if (ffs->state == FFS_READ_DESCRIPTORS) {
  211. pr_info("read descriptors\n");
  212. ret = __ffs_data_got_descs(ffs, data, len);
  213. if (unlikely(ret < 0))
  214. break;
  215. ffs->state = FFS_READ_STRINGS;
  216. ret = len;
  217. } else {
  218. pr_info("read strings\n");
  219. ret = __ffs_data_got_strings(ffs, data, len);
  220. if (unlikely(ret < 0))
  221. break;
  222. ret = ffs_epfiles_create(ffs);
  223. if (unlikely(ret)) {
  224. ffs->state = FFS_CLOSING;
  225. break;
  226. }
  227. ffs->state = FFS_ACTIVE;
  228. mutex_unlock(&ffs->mutex);
  229. ret = ffs_ready(ffs);
  230. if (unlikely(ret < 0)) {
  231. ffs->state = FFS_CLOSING;
  232. return ret;
  233. }
  234. set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
  235. return len;
  236. }
  237. break;
  238. case FFS_ACTIVE:
  239. data = NULL;
  240. /*
  241. * We're called from user space, we can use _irq
  242. * rather then _irqsave
  243. */
  244. spin_lock_irq(&ffs->ev.waitq.lock);
  245. switch (ffs_setup_state_clear_cancelled(ffs)) {
  246. case FFS_SETUP_CANCELLED:
  247. ret = -EIDRM;
  248. goto done_spin;
  249. case FFS_NO_SETUP:
  250. ret = -ESRCH;
  251. goto done_spin;
  252. case FFS_SETUP_PENDING:
  253. break;
  254. }
  255. /* FFS_SETUP_PENDING */
  256. if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
  257. spin_unlock_irq(&ffs->ev.waitq.lock);
  258. ret = __ffs_ep0_stall(ffs);
  259. break;
  260. }
  261. /* FFS_SETUP_PENDING and not stall */
  262. len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
  263. spin_unlock_irq(&ffs->ev.waitq.lock);
  264. data = ffs_prepare_buffer(buf, len);
  265. if (IS_ERR(data)) {
  266. ret = PTR_ERR(data);
  267. break;
  268. }
  269. spin_lock_irq(&ffs->ev.waitq.lock);
  270. /*
  271. * We are guaranteed to be still in FFS_ACTIVE state
  272. * but the state of setup could have changed from
  273. * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
  274. * to check for that. If that happened we copied data
  275. * from user space in vain but it's unlikely.
  276. *
  277. * For sure we are not in FFS_NO_SETUP since this is
  278. * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
  279. * transition can be performed and it's protected by
  280. * mutex.
  281. */
  282. if (ffs_setup_state_clear_cancelled(ffs) ==
  283. FFS_SETUP_CANCELLED) {
  284. ret = -EIDRM;
  285. done_spin:
  286. spin_unlock_irq(&ffs->ev.waitq.lock);
  287. } else {
  288. /* unlocks spinlock */
  289. ret = __ffs_ep0_queue_wait(ffs, data, len);
  290. }
  291. kfree(data);
  292. break;
  293. default:
  294. ret = -EBADFD;
  295. break;
  296. }
  297. mutex_unlock(&ffs->mutex);
  298. return ret;
  299. }
  300. static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
  301. size_t n)
  302. {
  303. /*
  304. * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
  305. * to release them.
  306. */
  307. struct usb_functionfs_event events[n];
  308. unsigned i = 0;
  309. memset(events, 0, sizeof events);
  310. do {
  311. events[i].type = ffs->ev.types[i];
  312. if (events[i].type == FUNCTIONFS_SETUP) {
  313. events[i].u.setup = ffs->ev.setup;
  314. ffs->setup_state = FFS_SETUP_PENDING;
  315. }
  316. } while (++i < n);
  317. if (n < ffs->ev.count) {
  318. ffs->ev.count -= n;
  319. memmove(ffs->ev.types, ffs->ev.types + n,
  320. ffs->ev.count * sizeof *ffs->ev.types);
  321. } else {
  322. ffs->ev.count = 0;
  323. }
  324. spin_unlock_irq(&ffs->ev.waitq.lock);
  325. mutex_unlock(&ffs->mutex);
  326. return unlikely(__copy_to_user(buf, events, sizeof events))
  327. ? -EFAULT : sizeof events;
  328. }
  329. static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
  330. size_t len, loff_t *ptr)
  331. {
  332. struct ffs_data *ffs = file->private_data;
  333. char *data = NULL;
  334. size_t n;
  335. int ret;
  336. ENTER();
  337. /* Fast check if setup was canceled */
  338. if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
  339. return -EIDRM;
  340. /* Acquire mutex */
  341. ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
  342. if (unlikely(ret < 0))
  343. return ret;
  344. /* Check state */
  345. if (ffs->state != FFS_ACTIVE) {
  346. ret = -EBADFD;
  347. goto done_mutex;
  348. }
  349. /*
  350. * We're called from user space, we can use _irq rather then
  351. * _irqsave
  352. */
  353. spin_lock_irq(&ffs->ev.waitq.lock);
  354. switch (ffs_setup_state_clear_cancelled(ffs)) {
  355. case FFS_SETUP_CANCELLED:
  356. ret = -EIDRM;
  357. break;
  358. case FFS_NO_SETUP:
  359. n = len / sizeof(struct usb_functionfs_event);
  360. if (unlikely(!n)) {
  361. ret = -EINVAL;
  362. break;
  363. }
  364. if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
  365. ret = -EAGAIN;
  366. break;
  367. }
  368. if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
  369. ffs->ev.count)) {
  370. ret = -EINTR;
  371. break;
  372. }
  373. return __ffs_ep0_read_events(ffs, buf,
  374. min(n, (size_t)ffs->ev.count));
  375. case FFS_SETUP_PENDING:
  376. if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
  377. spin_unlock_irq(&ffs->ev.waitq.lock);
  378. ret = __ffs_ep0_stall(ffs);
  379. goto done_mutex;
  380. }
  381. len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
  382. spin_unlock_irq(&ffs->ev.waitq.lock);
  383. if (likely(len)) {
  384. data = kmalloc(len, GFP_KERNEL);
  385. if (unlikely(!data)) {
  386. ret = -ENOMEM;
  387. goto done_mutex;
  388. }
  389. }
  390. spin_lock_irq(&ffs->ev.waitq.lock);
  391. /* See ffs_ep0_write() */
  392. if (ffs_setup_state_clear_cancelled(ffs) ==
  393. FFS_SETUP_CANCELLED) {
  394. ret = -EIDRM;
  395. break;
  396. }
  397. /* unlocks spinlock */
  398. ret = __ffs_ep0_queue_wait(ffs, data, len);
  399. if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
  400. ret = -EFAULT;
  401. goto done_mutex;
  402. default:
  403. ret = -EBADFD;
  404. break;
  405. }
  406. spin_unlock_irq(&ffs->ev.waitq.lock);
  407. done_mutex:
  408. mutex_unlock(&ffs->mutex);
  409. kfree(data);
  410. return ret;
  411. }
  412. static int ffs_ep0_open(struct inode *inode, struct file *file)
  413. {
  414. struct ffs_data *ffs = inode->i_private;
  415. ENTER();
  416. if (unlikely(ffs->state == FFS_CLOSING))
  417. return -EBUSY;
  418. file->private_data = ffs;
  419. ffs_data_opened(ffs);
  420. return 0;
  421. }
  422. static int ffs_ep0_release(struct inode *inode, struct file *file)
  423. {
  424. struct ffs_data *ffs = file->private_data;
  425. ENTER();
  426. ffs_data_closed(ffs);
  427. return 0;
  428. }
  429. static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
  430. {
  431. struct ffs_data *ffs = file->private_data;
  432. struct usb_gadget *gadget = ffs->gadget;
  433. long ret;
  434. ENTER();
  435. if (code == FUNCTIONFS_INTERFACE_REVMAP) {
  436. struct ffs_function *func = ffs->func;
  437. ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
  438. } else if (gadget && gadget->ops->ioctl) {
  439. ret = gadget->ops->ioctl(gadget, code, value);
  440. } else {
  441. ret = -ENOTTY;
  442. }
  443. return ret;
  444. }
  445. static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
  446. {
  447. struct ffs_data *ffs = file->private_data;
  448. unsigned int mask = POLLWRNORM;
  449. int ret;
  450. poll_wait(file, &ffs->ev.waitq, wait);
  451. ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
  452. if (unlikely(ret < 0))
  453. return mask;
  454. switch (ffs->state) {
  455. case FFS_READ_DESCRIPTORS:
  456. case FFS_READ_STRINGS:
  457. mask |= POLLOUT;
  458. break;
  459. case FFS_ACTIVE:
  460. switch (ffs->setup_state) {
  461. case FFS_NO_SETUP:
  462. if (ffs->ev.count)
  463. mask |= POLLIN;
  464. break;
  465. case FFS_SETUP_PENDING:
  466. case FFS_SETUP_CANCELLED:
  467. mask |= (POLLIN | POLLOUT);
  468. break;
  469. }
  470. case FFS_CLOSING:
  471. break;
  472. }
  473. mutex_unlock(&ffs->mutex);
  474. return mask;
  475. }
  476. static const struct file_operations ffs_ep0_operations = {
  477. .llseek = no_llseek,
  478. .open = ffs_ep0_open,
  479. .write = ffs_ep0_write,
  480. .read = ffs_ep0_read,
  481. .release = ffs_ep0_release,
  482. .unlocked_ioctl = ffs_ep0_ioctl,
  483. .poll = ffs_ep0_poll,
  484. };
  485. /* "Normal" endpoints operations ********************************************/
  486. static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
  487. {
  488. ENTER();
  489. if (likely(req->context)) {
  490. struct ffs_ep *ep = _ep->driver_data;
  491. ep->status = req->status ? req->status : req->actual;
  492. complete(req->context);
  493. }
  494. }
  495. static void ffs_user_copy_worker(struct work_struct *work)
  496. {
  497. struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
  498. work);
  499. int ret = io_data->req->status ? io_data->req->status :
  500. io_data->req->actual;
  501. if (io_data->read && ret > 0) {
  502. int i;
  503. size_t pos = 0;
  504. use_mm(io_data->mm);
  505. for (i = 0; i < io_data->nr_segs; i++) {
  506. if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
  507. &io_data->buf[pos],
  508. io_data->iovec[i].iov_len))) {
  509. ret = -EFAULT;
  510. break;
  511. }
  512. pos += io_data->iovec[i].iov_len;
  513. }
  514. unuse_mm(io_data->mm);
  515. }
  516. aio_complete(io_data->kiocb, ret, ret);
  517. usb_ep_free_request(io_data->ep, io_data->req);
  518. io_data->kiocb->private = NULL;
  519. if (io_data->read)
  520. kfree(io_data->iovec);
  521. kfree(io_data->buf);
  522. kfree(io_data);
  523. }
  524. static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
  525. struct usb_request *req)
  526. {
  527. struct ffs_io_data *io_data = req->context;
  528. ENTER();
  529. INIT_WORK(&io_data->work, ffs_user_copy_worker);
  530. schedule_work(&io_data->work);
  531. }
  532. static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
  533. {
  534. struct ffs_epfile *epfile = file->private_data;
  535. struct ffs_ep *ep;
  536. char *data = NULL;
  537. ssize_t ret, data_len;
  538. int halt;
  539. /* Are we still active? */
  540. if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
  541. ret = -ENODEV;
  542. goto error;
  543. }
  544. /* Wait for endpoint to be enabled */
  545. ep = epfile->ep;
  546. if (!ep) {
  547. if (file->f_flags & O_NONBLOCK) {
  548. ret = -EAGAIN;
  549. goto error;
  550. }
  551. ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
  552. if (ret) {
  553. ret = -EINTR;
  554. goto error;
  555. }
  556. }
  557. /* Do we halt? */
  558. halt = (!io_data->read == !epfile->in);
  559. if (halt && epfile->isoc) {
  560. ret = -EINVAL;
  561. goto error;
  562. }
  563. /* Allocate & copy */
  564. if (!halt) {
  565. /*
  566. * if we _do_ wait above, the epfile->ffs->gadget might be NULL
  567. * before the waiting completes, so do not assign to 'gadget' earlier
  568. */
  569. struct usb_gadget *gadget = epfile->ffs->gadget;
  570. spin_lock_irq(&epfile->ffs->eps_lock);
  571. /* In the meantime, endpoint got disabled or changed. */
  572. if (epfile->ep != ep) {
  573. spin_unlock_irq(&epfile->ffs->eps_lock);
  574. return -ESHUTDOWN;
  575. }
  576. /*
  577. * Controller may require buffer size to be aligned to
  578. * maxpacketsize of an out endpoint.
  579. */
  580. data_len = io_data->read ?
  581. usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
  582. io_data->len;
  583. spin_unlock_irq(&epfile->ffs->eps_lock);
  584. data = kmalloc(data_len, GFP_KERNEL);
  585. if (unlikely(!data))
  586. return -ENOMEM;
  587. if (io_data->aio && !io_data->read) {
  588. int i;
  589. size_t pos = 0;
  590. for (i = 0; i < io_data->nr_segs; i++) {
  591. if (unlikely(copy_from_user(&data[pos],
  592. io_data->iovec[i].iov_base,
  593. io_data->iovec[i].iov_len))) {
  594. ret = -EFAULT;
  595. goto error;
  596. }
  597. pos += io_data->iovec[i].iov_len;
  598. }
  599. } else {
  600. if (!io_data->read &&
  601. unlikely(__copy_from_user(data, io_data->buf,
  602. io_data->len))) {
  603. ret = -EFAULT;
  604. goto error;
  605. }
  606. }
  607. }
  608. /* We will be using request */
  609. ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
  610. if (unlikely(ret))
  611. goto error;
  612. spin_lock_irq(&epfile->ffs->eps_lock);
  613. if (epfile->ep != ep) {
  614. /* In the meantime, endpoint got disabled or changed. */
  615. ret = -ESHUTDOWN;
  616. spin_unlock_irq(&epfile->ffs->eps_lock);
  617. } else if (halt) {
  618. /* Halt */
  619. if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
  620. usb_ep_set_halt(ep->ep);
  621. spin_unlock_irq(&epfile->ffs->eps_lock);
  622. ret = -EBADMSG;
  623. } else {
  624. /* Fire the request */
  625. struct usb_request *req;
  626. if (io_data->aio) {
  627. req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
  628. if (unlikely(!req))
  629. goto error_lock;
  630. req->buf = data;
  631. req->length = io_data->len;
  632. io_data->buf = data;
  633. io_data->ep = ep->ep;
  634. io_data->req = req;
  635. req->context = io_data;
  636. req->complete = ffs_epfile_async_io_complete;
  637. ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
  638. if (unlikely(ret)) {
  639. usb_ep_free_request(ep->ep, req);
  640. goto error_lock;
  641. }
  642. ret = -EIOCBQUEUED;
  643. spin_unlock_irq(&epfile->ffs->eps_lock);
  644. } else {
  645. DECLARE_COMPLETION_ONSTACK(done);
  646. req = ep->req;
  647. req->buf = data;
  648. req->length = io_data->len;
  649. req->context = &done;
  650. req->complete = ffs_epfile_io_complete;
  651. ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
  652. spin_unlock_irq(&epfile->ffs->eps_lock);
  653. if (unlikely(ret < 0)) {
  654. /* nop */
  655. } else if (unlikely(
  656. wait_for_completion_interruptible(&done))) {
  657. ret = -EINTR;
  658. usb_ep_dequeue(ep->ep, req);
  659. } else {
  660. /*
  661. * XXX We may end up silently droping data
  662. * here. Since data_len (i.e. req->length) may
  663. * be bigger than len (after being rounded up
  664. * to maxpacketsize), we may end up with more
  665. * data then user space has space for.
  666. */
  667. ret = ep->status;
  668. if (io_data->read && ret > 0) {
  669. ret = min_t(size_t, ret, io_data->len);
  670. if (unlikely(copy_to_user(io_data->buf,
  671. data, ret)))
  672. ret = -EFAULT;
  673. }
  674. }
  675. kfree(data);
  676. }
  677. }
  678. mutex_unlock(&epfile->mutex);
  679. return ret;
  680. error_lock:
  681. spin_unlock_irq(&epfile->ffs->eps_lock);
  682. mutex_unlock(&epfile->mutex);
  683. error:
  684. kfree(data);
  685. return ret;
  686. }
  687. static ssize_t
  688. ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
  689. loff_t *ptr)
  690. {
  691. struct ffs_io_data io_data;
  692. ENTER();
  693. io_data.aio = false;
  694. io_data.read = false;
  695. io_data.buf = (char * __user)buf;
  696. io_data.len = len;
  697. return ffs_epfile_io(file, &io_data);
  698. }
  699. static ssize_t
  700. ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
  701. {
  702. struct ffs_io_data io_data;
  703. ENTER();
  704. io_data.aio = false;
  705. io_data.read = true;
  706. io_data.buf = buf;
  707. io_data.len = len;
  708. return ffs_epfile_io(file, &io_data);
  709. }
  710. static int
  711. ffs_epfile_open(struct inode *inode, struct file *file)
  712. {
  713. struct ffs_epfile *epfile = inode->i_private;
  714. ENTER();
  715. if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
  716. return -ENODEV;
  717. file->private_data = epfile;
  718. ffs_data_opened(epfile->ffs);
  719. return 0;
  720. }
  721. static int ffs_aio_cancel(struct kiocb *kiocb)
  722. {
  723. struct ffs_io_data *io_data = kiocb->private;
  724. struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
  725. int value;
  726. ENTER();
  727. spin_lock_irq(&epfile->ffs->eps_lock);
  728. if (likely(io_data && io_data->ep && io_data->req))
  729. value = usb_ep_dequeue(io_data->ep, io_data->req);
  730. else
  731. value = -EINVAL;
  732. spin_unlock_irq(&epfile->ffs->eps_lock);
  733. return value;
  734. }
  735. static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
  736. const struct iovec *iovec,
  737. unsigned long nr_segs, loff_t loff)
  738. {
  739. struct ffs_io_data *io_data;
  740. ENTER();
  741. io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
  742. if (unlikely(!io_data))
  743. return -ENOMEM;
  744. io_data->aio = true;
  745. io_data->read = false;
  746. io_data->kiocb = kiocb;
  747. io_data->iovec = iovec;
  748. io_data->nr_segs = nr_segs;
  749. io_data->len = kiocb->ki_nbytes;
  750. io_data->mm = current->mm;
  751. kiocb->private = io_data;
  752. kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
  753. return ffs_epfile_io(kiocb->ki_filp, io_data);
  754. }
  755. static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
  756. const struct iovec *iovec,
  757. unsigned long nr_segs, loff_t loff)
  758. {
  759. struct ffs_io_data *io_data;
  760. struct iovec *iovec_copy;
  761. ENTER();
  762. iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
  763. if (unlikely(!iovec_copy))
  764. return -ENOMEM;
  765. memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
  766. io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
  767. if (unlikely(!io_data)) {
  768. kfree(iovec_copy);
  769. return -ENOMEM;
  770. }
  771. io_data->aio = true;
  772. io_data->read = true;
  773. io_data->kiocb = kiocb;
  774. io_data->iovec = iovec_copy;
  775. io_data->nr_segs = nr_segs;
  776. io_data->len = kiocb->ki_nbytes;
  777. io_data->mm = current->mm;
  778. kiocb->private = io_data;
  779. kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
  780. return ffs_epfile_io(kiocb->ki_filp, io_data);
  781. }
  782. static int
  783. ffs_epfile_release(struct inode *inode, struct file *file)
  784. {
  785. struct ffs_epfile *epfile = inode->i_private;
  786. ENTER();
  787. ffs_data_closed(epfile->ffs);
  788. return 0;
  789. }
  790. static long ffs_epfile_ioctl(struct file *file, unsigned code,
  791. unsigned long value)
  792. {
  793. struct ffs_epfile *epfile = file->private_data;
  794. int ret;
  795. ENTER();
  796. if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
  797. return -ENODEV;
  798. spin_lock_irq(&epfile->ffs->eps_lock);
  799. if (likely(epfile->ep)) {
  800. switch (code) {
  801. case FUNCTIONFS_FIFO_STATUS:
  802. ret = usb_ep_fifo_status(epfile->ep->ep);
  803. break;
  804. case FUNCTIONFS_FIFO_FLUSH:
  805. usb_ep_fifo_flush(epfile->ep->ep);
  806. ret = 0;
  807. break;
  808. case FUNCTIONFS_CLEAR_HALT:
  809. ret = usb_ep_clear_halt(epfile->ep->ep);
  810. break;
  811. case FUNCTIONFS_ENDPOINT_REVMAP:
  812. ret = epfile->ep->num;
  813. break;
  814. default:
  815. ret = -ENOTTY;
  816. }
  817. } else {
  818. ret = -ENODEV;
  819. }
  820. spin_unlock_irq(&epfile->ffs->eps_lock);
  821. return ret;
  822. }
  823. static const struct file_operations ffs_epfile_operations = {
  824. .llseek = no_llseek,
  825. .open = ffs_epfile_open,
  826. .write = ffs_epfile_write,
  827. .read = ffs_epfile_read,
  828. .aio_write = ffs_epfile_aio_write,
  829. .aio_read = ffs_epfile_aio_read,
  830. .release = ffs_epfile_release,
  831. .unlocked_ioctl = ffs_epfile_ioctl,
  832. };
  833. /* File system and super block operations ***********************************/
  834. /*
  835. * Mounting the file system creates a controller file, used first for
  836. * function configuration then later for event monitoring.
  837. */
  838. static struct inode *__must_check
  839. ffs_sb_make_inode(struct super_block *sb, void *data,
  840. const struct file_operations *fops,
  841. const struct inode_operations *iops,
  842. struct ffs_file_perms *perms)
  843. {
  844. struct inode *inode;
  845. ENTER();
  846. inode = new_inode(sb);
  847. if (likely(inode)) {
  848. struct timespec current_time = CURRENT_TIME;
  849. inode->i_ino = get_next_ino();
  850. inode->i_mode = perms->mode;
  851. inode->i_uid = perms->uid;
  852. inode->i_gid = perms->gid;
  853. inode->i_atime = current_time;
  854. inode->i_mtime = current_time;
  855. inode->i_ctime = current_time;
  856. inode->i_private = data;
  857. if (fops)
  858. inode->i_fop = fops;
  859. if (iops)
  860. inode->i_op = iops;
  861. }
  862. return inode;
  863. }
  864. /* Create "regular" file */
  865. static struct inode *ffs_sb_create_file(struct super_block *sb,
  866. const char *name, void *data,
  867. const struct file_operations *fops,
  868. struct dentry **dentry_p)
  869. {
  870. struct ffs_data *ffs = sb->s_fs_info;
  871. struct dentry *dentry;
  872. struct inode *inode;
  873. ENTER();
  874. dentry = d_alloc_name(sb->s_root, name);
  875. if (unlikely(!dentry))
  876. return NULL;
  877. inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
  878. if (unlikely(!inode)) {
  879. dput(dentry);
  880. return NULL;
  881. }
  882. d_add(dentry, inode);
  883. if (dentry_p)
  884. *dentry_p = dentry;
  885. return inode;
  886. }
  887. /* Super block */
  888. static const struct super_operations ffs_sb_operations = {
  889. .statfs = simple_statfs,
  890. .drop_inode = generic_delete_inode,
  891. };
  892. struct ffs_sb_fill_data {
  893. struct ffs_file_perms perms;
  894. umode_t root_mode;
  895. const char *dev_name;
  896. struct ffs_data *ffs_data;
  897. };
  898. static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
  899. {
  900. struct ffs_sb_fill_data *data = _data;
  901. struct inode *inode;
  902. struct ffs_data *ffs = data->ffs_data;
  903. ENTER();
  904. ffs->sb = sb;
  905. data->ffs_data = NULL;
  906. sb->s_fs_info = ffs;
  907. sb->s_blocksize = PAGE_CACHE_SIZE;
  908. sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
  909. sb->s_magic = FUNCTIONFS_MAGIC;
  910. sb->s_op = &ffs_sb_operations;
  911. sb->s_time_gran = 1;
  912. /* Root inode */
  913. data->perms.mode = data->root_mode;
  914. inode = ffs_sb_make_inode(sb, NULL,
  915. &simple_dir_operations,
  916. &simple_dir_inode_operations,
  917. &data->perms);
  918. sb->s_root = d_make_root(inode);
  919. if (unlikely(!sb->s_root))
  920. return -ENOMEM;
  921. /* EP0 file */
  922. if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
  923. &ffs_ep0_operations, NULL)))
  924. return -ENOMEM;
  925. return 0;
  926. }
  927. static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
  928. {
  929. ENTER();
  930. if (!opts || !*opts)
  931. return 0;
  932. for (;;) {
  933. unsigned long value;
  934. char *eq, *comma;
  935. /* Option limit */
  936. comma = strchr(opts, ',');
  937. if (comma)
  938. *comma = 0;
  939. /* Value limit */
  940. eq = strchr(opts, '=');
  941. if (unlikely(!eq)) {
  942. pr_err("'=' missing in %s\n", opts);
  943. return -EINVAL;
  944. }
  945. *eq = 0;
  946. /* Parse value */
  947. if (kstrtoul(eq + 1, 0, &value)) {
  948. pr_err("%s: invalid value: %s\n", opts, eq + 1);
  949. return -EINVAL;
  950. }
  951. /* Interpret option */
  952. switch (eq - opts) {
  953. case 5:
  954. if (!memcmp(opts, "rmode", 5))
  955. data->root_mode = (value & 0555) | S_IFDIR;
  956. else if (!memcmp(opts, "fmode", 5))
  957. data->perms.mode = (value & 0666) | S_IFREG;
  958. else
  959. goto invalid;
  960. break;
  961. case 4:
  962. if (!memcmp(opts, "mode", 4)) {
  963. data->root_mode = (value & 0555) | S_IFDIR;
  964. data->perms.mode = (value & 0666) | S_IFREG;
  965. } else {
  966. goto invalid;
  967. }
  968. break;
  969. case 3:
  970. if (!memcmp(opts, "uid", 3)) {
  971. data->perms.uid = make_kuid(current_user_ns(), value);
  972. if (!uid_valid(data->perms.uid)) {
  973. pr_err("%s: unmapped value: %lu\n", opts, value);
  974. return -EINVAL;
  975. }
  976. } else if (!memcmp(opts, "gid", 3)) {
  977. data->perms.gid = make_kgid(current_user_ns(), value);
  978. if (!gid_valid(data->perms.gid)) {
  979. pr_err("%s: unmapped value: %lu\n", opts, value);
  980. return -EINVAL;
  981. }
  982. } else {
  983. goto invalid;
  984. }
  985. break;
  986. default:
  987. invalid:
  988. pr_err("%s: invalid option\n", opts);
  989. return -EINVAL;
  990. }
  991. /* Next iteration */
  992. if (!comma)
  993. break;
  994. opts = comma + 1;
  995. }
  996. return 0;
  997. }
  998. /* "mount -t functionfs dev_name /dev/function" ends up here */
  999. static struct dentry *
  1000. ffs_fs_mount(struct file_system_type *t, int flags,
  1001. const char *dev_name, void *opts)
  1002. {
  1003. struct ffs_sb_fill_data data = {
  1004. .perms = {
  1005. .mode = S_IFREG | 0600,
  1006. .uid = GLOBAL_ROOT_UID,
  1007. .gid = GLOBAL_ROOT_GID,
  1008. },
  1009. .root_mode = S_IFDIR | 0500,
  1010. };
  1011. struct dentry *rv;
  1012. int ret;
  1013. void *ffs_dev;
  1014. struct ffs_data *ffs;
  1015. ENTER();
  1016. ret = ffs_fs_parse_opts(&data, opts);
  1017. if (unlikely(ret < 0))
  1018. return ERR_PTR(ret);
  1019. ffs = ffs_data_new();
  1020. if (unlikely(!ffs))
  1021. return ERR_PTR(-ENOMEM);
  1022. ffs->file_perms = data.perms;
  1023. ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
  1024. if (unlikely(!ffs->dev_name)) {
  1025. ffs_data_put(ffs);
  1026. return ERR_PTR(-ENOMEM);
  1027. }
  1028. ffs_dev = ffs_acquire_dev(dev_name);
  1029. if (IS_ERR(ffs_dev)) {
  1030. ffs_data_put(ffs);
  1031. return ERR_CAST(ffs_dev);
  1032. }
  1033. ffs->private_data = ffs_dev;
  1034. data.ffs_data = ffs;
  1035. rv = mount_nodev(t, flags, &data, ffs_sb_fill);
  1036. if (IS_ERR(rv) && data.ffs_data) {
  1037. ffs_release_dev(data.ffs_data);
  1038. ffs_data_put(data.ffs_data);
  1039. }
  1040. return rv;
  1041. }
  1042. static void
  1043. ffs_fs_kill_sb(struct super_block *sb)
  1044. {
  1045. ENTER();
  1046. kill_litter_super(sb);
  1047. if (sb->s_fs_info) {
  1048. ffs_release_dev(sb->s_fs_info);
  1049. ffs_data_put(sb->s_fs_info);
  1050. }
  1051. }
  1052. static struct file_system_type ffs_fs_type = {
  1053. .owner = THIS_MODULE,
  1054. .name = "functionfs",
  1055. .mount = ffs_fs_mount,
  1056. .kill_sb = ffs_fs_kill_sb,
  1057. };
  1058. MODULE_ALIAS_FS("functionfs");
  1059. /* Driver's main init/cleanup functions *************************************/
  1060. static int functionfs_init(void)
  1061. {
  1062. int ret;
  1063. ENTER();
  1064. ret = register_filesystem(&ffs_fs_type);
  1065. if (likely(!ret))
  1066. pr_info("file system registered\n");
  1067. else
  1068. pr_err("failed registering file system (%d)\n", ret);
  1069. return ret;
  1070. }
  1071. static void functionfs_cleanup(void)
  1072. {
  1073. ENTER();
  1074. pr_info("unloading\n");
  1075. unregister_filesystem(&ffs_fs_type);
  1076. }
  1077. /* ffs_data and ffs_function construction and destruction code **************/
  1078. static void ffs_data_clear(struct ffs_data *ffs);
  1079. static void ffs_data_reset(struct ffs_data *ffs);
  1080. static void ffs_data_get(struct ffs_data *ffs)
  1081. {
  1082. ENTER();
  1083. atomic_inc(&ffs->ref);
  1084. }
  1085. static void ffs_data_opened(struct ffs_data *ffs)
  1086. {
  1087. ENTER();
  1088. atomic_inc(&ffs->ref);
  1089. atomic_inc(&ffs->opened);
  1090. }
  1091. static void ffs_data_put(struct ffs_data *ffs)
  1092. {
  1093. ENTER();
  1094. if (unlikely(atomic_dec_and_test(&ffs->ref))) {
  1095. pr_info("%s(): freeing\n", __func__);
  1096. ffs_data_clear(ffs);
  1097. BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
  1098. waitqueue_active(&ffs->ep0req_completion.wait));
  1099. kfree(ffs->dev_name);
  1100. kfree(ffs);
  1101. }
  1102. }
  1103. static void ffs_data_closed(struct ffs_data *ffs)
  1104. {
  1105. ENTER();
  1106. if (atomic_dec_and_test(&ffs->opened)) {
  1107. ffs->state = FFS_CLOSING;
  1108. ffs_data_reset(ffs);
  1109. }
  1110. ffs_data_put(ffs);
  1111. }
  1112. static struct ffs_data *ffs_data_new(void)
  1113. {
  1114. struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
  1115. if (unlikely(!ffs))
  1116. return NULL;
  1117. ENTER();
  1118. atomic_set(&ffs->ref, 1);
  1119. atomic_set(&ffs->opened, 0);
  1120. ffs->state = FFS_READ_DESCRIPTORS;
  1121. mutex_init(&ffs->mutex);
  1122. spin_lock_init(&ffs->eps_lock);
  1123. init_waitqueue_head(&ffs->ev.waitq);
  1124. init_completion(&ffs->ep0req_completion);
  1125. /* XXX REVISIT need to update it in some places, or do we? */
  1126. ffs->ev.can_stall = 1;
  1127. return ffs;
  1128. }
  1129. static void ffs_data_clear(struct ffs_data *ffs)
  1130. {
  1131. ENTER();
  1132. if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
  1133. ffs_closed(ffs);
  1134. BUG_ON(ffs->gadget);
  1135. if (ffs->epfiles)
  1136. ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
  1137. kfree(ffs->raw_descs_data);
  1138. kfree(ffs->raw_strings);
  1139. kfree(ffs->stringtabs);
  1140. }
  1141. static void ffs_data_reset(struct ffs_data *ffs)
  1142. {
  1143. ENTER();
  1144. ffs_data_clear(ffs);
  1145. ffs->epfiles = NULL;
  1146. ffs->raw_descs_data = NULL;
  1147. ffs->raw_descs = NULL;
  1148. ffs->raw_strings = NULL;
  1149. ffs->stringtabs = NULL;
  1150. ffs->raw_descs_length = 0;
  1151. ffs->fs_descs_count = 0;
  1152. ffs->hs_descs_count = 0;
  1153. ffs->ss_descs_count = 0;
  1154. ffs->strings_count = 0;
  1155. ffs->interfaces_count = 0;
  1156. ffs->eps_count = 0;
  1157. ffs->ev.count = 0;
  1158. ffs->state = FFS_READ_DESCRIPTORS;
  1159. ffs->setup_state = FFS_NO_SETUP;
  1160. ffs->flags = 0;
  1161. }
  1162. static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
  1163. {
  1164. struct usb_gadget_strings **lang;
  1165. int first_id;
  1166. ENTER();
  1167. if (WARN_ON(ffs->state != FFS_ACTIVE
  1168. || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
  1169. return -EBADFD;
  1170. first_id = usb_string_ids_n(cdev, ffs->strings_count);
  1171. if (unlikely(first_id < 0))
  1172. return first_id;
  1173. ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
  1174. if (unlikely(!ffs->ep0req))
  1175. return -ENOMEM;
  1176. ffs->ep0req->complete = ffs_ep0_complete;
  1177. ffs->ep0req->context = ffs;
  1178. lang = ffs->stringtabs;
  1179. if (lang) {
  1180. for (; *lang; ++lang) {
  1181. struct usb_string *str = (*lang)->strings;
  1182. int id = first_id;
  1183. for (; str->s; ++id, ++str)
  1184. str->id = id;
  1185. }
  1186. }
  1187. ffs->gadget = cdev->gadget;
  1188. ffs_data_get(ffs);
  1189. return 0;
  1190. }
  1191. static void functionfs_unbind(struct ffs_data *ffs)
  1192. {
  1193. ENTER();
  1194. if (!WARN_ON(!ffs->gadget)) {
  1195. usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
  1196. ffs->ep0req = NULL;
  1197. ffs->gadget = NULL;
  1198. clear_bit(FFS_FL_BOUND, &ffs->flags);
  1199. ffs_data_put(ffs);
  1200. }
  1201. }
  1202. static int ffs_epfiles_create(struct ffs_data *ffs)
  1203. {
  1204. struct ffs_epfile *epfile, *epfiles;
  1205. unsigned i, count;
  1206. ENTER();
  1207. count = ffs->eps_count;
  1208. epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
  1209. if (!epfiles)
  1210. return -ENOMEM;
  1211. epfile = epfiles;
  1212. for (i = 1; i <= count; ++i, ++epfile) {
  1213. epfile->ffs = ffs;
  1214. mutex_init(&epfile->mutex);
  1215. init_waitqueue_head(&epfile->wait);
  1216. sprintf(epfiles->name, "ep%u", i);
  1217. if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
  1218. &ffs_epfile_operations,
  1219. &epfile->dentry))) {
  1220. ffs_epfiles_destroy(epfiles, i - 1);
  1221. return -ENOMEM;
  1222. }
  1223. }
  1224. ffs->epfiles = epfiles;
  1225. return 0;
  1226. }
  1227. static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
  1228. {
  1229. struct ffs_epfile *epfile = epfiles;
  1230. ENTER();
  1231. for (; count; --count, ++epfile) {
  1232. BUG_ON(mutex_is_locked(&epfile->mutex) ||
  1233. waitqueue_active(&epfile->wait));
  1234. if (epfile->dentry) {
  1235. d_delete(epfile->dentry);
  1236. dput(epfile->dentry);
  1237. epfile->dentry = NULL;
  1238. }
  1239. }
  1240. kfree(epfiles);
  1241. }
  1242. static void ffs_func_eps_disable(struct ffs_function *func)
  1243. {
  1244. struct ffs_ep *ep = func->eps;
  1245. struct ffs_epfile *epfile = func->ffs->epfiles;
  1246. unsigned count = func->ffs->eps_count;
  1247. unsigned long flags;
  1248. spin_lock_irqsave(&func->ffs->eps_lock, flags);
  1249. do {
  1250. /* pending requests get nuked */
  1251. if (likely(ep->ep))
  1252. usb_ep_disable(ep->ep);
  1253. epfile->ep = NULL;
  1254. ++ep;
  1255. ++epfile;
  1256. } while (--count);
  1257. spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
  1258. }
  1259. static int ffs_func_eps_enable(struct ffs_function *func)
  1260. {
  1261. struct ffs_data *ffs = func->ffs;
  1262. struct ffs_ep *ep = func->eps;
  1263. struct ffs_epfile *epfile = ffs->epfiles;
  1264. unsigned count = ffs->eps_count;
  1265. unsigned long flags;
  1266. int ret = 0;
  1267. spin_lock_irqsave(&func->ffs->eps_lock, flags);
  1268. do {
  1269. struct usb_endpoint_descriptor *ds;
  1270. int desc_idx;
  1271. if (ffs->gadget->speed == USB_SPEED_SUPER)
  1272. desc_idx = 2;
  1273. else if (ffs->gadget->speed == USB_SPEED_HIGH)
  1274. desc_idx = 1;
  1275. else
  1276. desc_idx = 0;
  1277. /* fall-back to lower speed if desc missing for current speed */
  1278. do {
  1279. ds = ep->descs[desc_idx];
  1280. } while (!ds && --desc_idx >= 0);
  1281. if (!ds) {
  1282. ret = -EINVAL;
  1283. break;
  1284. }
  1285. ep->ep->driver_data = ep;
  1286. ep->ep->desc = ds;
  1287. ret = usb_ep_enable(ep->ep);
  1288. if (likely(!ret)) {
  1289. epfile->ep = ep;
  1290. epfile->in = usb_endpoint_dir_in(ds);
  1291. epfile->isoc = usb_endpoint_xfer_isoc(ds);
  1292. } else {
  1293. break;
  1294. }
  1295. wake_up(&epfile->wait);
  1296. ++ep;
  1297. ++epfile;
  1298. } while (--count);
  1299. spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
  1300. return ret;
  1301. }
  1302. /* Parsing and building descriptors and strings *****************************/
  1303. /*
  1304. * This validates if data pointed by data is a valid USB descriptor as
  1305. * well as record how many interfaces, endpoints and strings are
  1306. * required by given configuration. Returns address after the
  1307. * descriptor or NULL if data is invalid.
  1308. */
  1309. enum ffs_entity_type {
  1310. FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
  1311. };
  1312. typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
  1313. u8 *valuep,
  1314. struct usb_descriptor_header *desc,
  1315. void *priv);
  1316. static int __must_check ffs_do_desc(char *data, unsigned len,
  1317. ffs_entity_callback entity, void *priv)
  1318. {
  1319. struct usb_descriptor_header *_ds = (void *)data;
  1320. u8 length;
  1321. int ret;
  1322. ENTER();
  1323. /* At least two bytes are required: length and type */
  1324. if (len < 2) {
  1325. pr_vdebug("descriptor too short\n");
  1326. return -EINVAL;
  1327. }
  1328. /* If we have at least as many bytes as the descriptor takes? */
  1329. length = _ds->bLength;
  1330. if (len < length) {
  1331. pr_vdebug("descriptor longer then available data\n");
  1332. return -EINVAL;
  1333. }
  1334. #define __entity_check_INTERFACE(val) 1
  1335. #define __entity_check_STRING(val) (val)
  1336. #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
  1337. #define __entity(type, val) do { \
  1338. pr_vdebug("entity " #type "(%02x)\n", (val)); \
  1339. if (unlikely(!__entity_check_ ##type(val))) { \
  1340. pr_vdebug("invalid entity's value\n"); \
  1341. return -EINVAL; \
  1342. } \
  1343. ret = entity(FFS_ ##type, &val, _ds, priv); \
  1344. if (unlikely(ret < 0)) { \
  1345. pr_debug("entity " #type "(%02x); ret = %d\n", \
  1346. (val), ret); \
  1347. return ret; \
  1348. } \
  1349. } while (0)
  1350. /* Parse descriptor depending on type. */
  1351. switch (_ds->bDescriptorType) {
  1352. case USB_DT_DEVICE:
  1353. case USB_DT_CONFIG:
  1354. case USB_DT_STRING:
  1355. case USB_DT_DEVICE_QUALIFIER:
  1356. /* function can't have any of those */
  1357. pr_vdebug("descriptor reserved for gadget: %d\n",
  1358. _ds->bDescriptorType);
  1359. return -EINVAL;
  1360. case USB_DT_INTERFACE: {
  1361. struct usb_interface_descriptor *ds = (void *)_ds;
  1362. pr_vdebug("interface descriptor\n");
  1363. if (length != sizeof *ds)
  1364. goto inv_length;
  1365. __entity(INTERFACE, ds->bInterfaceNumber);
  1366. if (ds->iInterface)
  1367. __entity(STRING, ds->iInterface);
  1368. }
  1369. break;
  1370. case USB_DT_ENDPOINT: {
  1371. struct usb_endpoint_descriptor *ds = (void *)_ds;
  1372. pr_vdebug("endpoint descriptor\n");
  1373. if (length != USB_DT_ENDPOINT_SIZE &&
  1374. length != USB_DT_ENDPOINT_AUDIO_SIZE)
  1375. goto inv_length;
  1376. __entity(ENDPOINT, ds->bEndpointAddress);
  1377. }
  1378. break;
  1379. case HID_DT_HID:
  1380. pr_vdebug("hid descriptor\n");
  1381. if (length != sizeof(struct hid_descriptor))
  1382. goto inv_length;
  1383. break;
  1384. case USB_DT_OTG:
  1385. if (length != sizeof(struct usb_otg_descriptor))
  1386. goto inv_length;
  1387. break;
  1388. case USB_DT_INTERFACE_ASSOCIATION: {
  1389. struct usb_interface_assoc_descriptor *ds = (void *)_ds;
  1390. pr_vdebug("interface association descriptor\n");
  1391. if (length != sizeof *ds)
  1392. goto inv_length;
  1393. if (ds->iFunction)
  1394. __entity(STRING, ds->iFunction);
  1395. }
  1396. break;
  1397. case USB_DT_SS_ENDPOINT_COMP:
  1398. pr_vdebug("EP SS companion descriptor\n");
  1399. if (length != sizeof(struct usb_ss_ep_comp_descriptor))
  1400. goto inv_length;
  1401. break;
  1402. case USB_DT_OTHER_SPEED_CONFIG:
  1403. case USB_DT_INTERFACE_POWER:
  1404. case USB_DT_DEBUG:
  1405. case USB_DT_SECURITY:
  1406. case USB_DT_CS_RADIO_CONTROL:
  1407. /* TODO */
  1408. pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
  1409. return -EINVAL;
  1410. default:
  1411. /* We should never be here */
  1412. pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
  1413. return -EINVAL;
  1414. inv_length:
  1415. pr_vdebug("invalid length: %d (descriptor %d)\n",
  1416. _ds->bLength, _ds->bDescriptorType);
  1417. return -EINVAL;
  1418. }
  1419. #undef __entity
  1420. #undef __entity_check_DESCRIPTOR
  1421. #undef __entity_check_INTERFACE
  1422. #undef __entity_check_STRING
  1423. #undef __entity_check_ENDPOINT
  1424. return length;
  1425. }
  1426. static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
  1427. ffs_entity_callback entity, void *priv)
  1428. {
  1429. const unsigned _len = len;
  1430. unsigned long num = 0;
  1431. ENTER();
  1432. for (;;) {
  1433. int ret;
  1434. if (num == count)
  1435. data = NULL;
  1436. /* Record "descriptor" entity */
  1437. ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
  1438. if (unlikely(ret < 0)) {
  1439. pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
  1440. num, ret);
  1441. return ret;
  1442. }
  1443. if (!data)
  1444. return _len - len;
  1445. ret = ffs_do_desc(data, len, entity, priv);
  1446. if (unlikely(ret < 0)) {
  1447. pr_debug("%s returns %d\n", __func__, ret);
  1448. return ret;
  1449. }
  1450. len -= ret;
  1451. data += ret;
  1452. ++num;
  1453. }
  1454. }
  1455. static int __ffs_data_do_entity(enum ffs_entity_type type,
  1456. u8 *valuep, struct usb_descriptor_header *desc,
  1457. void *priv)
  1458. {
  1459. struct ffs_data *ffs = priv;
  1460. ENTER();
  1461. switch (type) {
  1462. case FFS_DESCRIPTOR:
  1463. break;
  1464. case FFS_INTERFACE:
  1465. /*
  1466. * Interfaces are indexed from zero so if we
  1467. * encountered interface "n" then there are at least
  1468. * "n+1" interfaces.
  1469. */
  1470. if (*valuep >= ffs->interfaces_count)
  1471. ffs->interfaces_count = *valuep + 1;
  1472. break;
  1473. case FFS_STRING:
  1474. /*
  1475. * Strings are indexed from 1 (0 is magic ;) reserved
  1476. * for languages list or some such)
  1477. */
  1478. if (*valuep > ffs->strings_count)
  1479. ffs->strings_count = *valuep;
  1480. break;
  1481. case FFS_ENDPOINT:
  1482. /* Endpoints are indexed from 1 as well. */
  1483. if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
  1484. ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
  1485. break;
  1486. }
  1487. return 0;
  1488. }
  1489. static int __ffs_data_got_descs(struct ffs_data *ffs,
  1490. char *const _data, size_t len)
  1491. {
  1492. char *data = _data, *raw_descs;
  1493. unsigned counts[3], flags;
  1494. int ret = -EINVAL, i;
  1495. ENTER();
  1496. if (get_unaligned_le32(data + 4) != len)
  1497. goto error;
  1498. switch (get_unaligned_le32(data)) {
  1499. case FUNCTIONFS_DESCRIPTORS_MAGIC:
  1500. flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
  1501. data += 8;
  1502. len -= 8;
  1503. break;
  1504. case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
  1505. flags = get_unaligned_le32(data + 8);
  1506. if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
  1507. FUNCTIONFS_HAS_HS_DESC |
  1508. FUNCTIONFS_HAS_SS_DESC)) {
  1509. ret = -ENOSYS;
  1510. goto error;
  1511. }
  1512. data += 12;
  1513. len -= 12;
  1514. break;
  1515. default:
  1516. goto error;
  1517. }
  1518. /* Read fs_count, hs_count and ss_count (if present) */
  1519. for (i = 0; i < 3; ++i) {
  1520. if (!(flags & (1 << i))) {
  1521. counts[i] = 0;
  1522. } else if (len < 4) {
  1523. goto error;
  1524. } else {
  1525. counts[i] = get_unaligned_le32(data);
  1526. data += 4;
  1527. len -= 4;
  1528. }
  1529. }
  1530. /* Read descriptors */
  1531. raw_descs = data;
  1532. for (i = 0; i < 3; ++i) {
  1533. if (!counts[i])
  1534. continue;
  1535. ret = ffs_do_descs(counts[i], data, len,
  1536. __ffs_data_do_entity, ffs);
  1537. if (ret < 0)
  1538. goto error;
  1539. data += ret;
  1540. len -= ret;
  1541. }
  1542. if (raw_descs == data || len) {
  1543. ret = -EINVAL;
  1544. goto error;
  1545. }
  1546. ffs->raw_descs_data = _data;
  1547. ffs->raw_descs = raw_descs;
  1548. ffs->raw_descs_length = data - raw_descs;
  1549. ffs->fs_descs_count = counts[0];
  1550. ffs->hs_descs_count = counts[1];
  1551. ffs->ss_descs_count = counts[2];
  1552. return 0;
  1553. error:
  1554. kfree(_data);
  1555. return ret;
  1556. }
  1557. static int __ffs_data_got_strings(struct ffs_data *ffs,
  1558. char *const _data, size_t len)
  1559. {
  1560. u32 str_count, needed_count, lang_count;
  1561. struct usb_gadget_strings **stringtabs, *t;
  1562. struct usb_string *strings, *s;
  1563. const char *data = _data;
  1564. ENTER();
  1565. if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
  1566. get_unaligned_le32(data + 4) != len))
  1567. goto error;
  1568. str_count = get_unaligned_le32(data + 8);
  1569. lang_count = get_unaligned_le32(data + 12);
  1570. /* if one is zero the other must be zero */
  1571. if (unlikely(!str_count != !lang_count))
  1572. goto error;
  1573. /* Do we have at least as many strings as descriptors need? */
  1574. needed_count = ffs->strings_count;
  1575. if (unlikely(str_count < needed_count))
  1576. goto error;
  1577. /*
  1578. * If we don't need any strings just return and free all
  1579. * memory.
  1580. */
  1581. if (!needed_count) {
  1582. kfree(_data);
  1583. return 0;
  1584. }
  1585. /* Allocate everything in one chunk so there's less maintenance. */
  1586. {
  1587. unsigned i = 0;
  1588. vla_group(d);
  1589. vla_item(d, struct usb_gadget_strings *, stringtabs,
  1590. lang_count + 1);
  1591. vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
  1592. vla_item(d, struct usb_string, strings,
  1593. lang_count*(needed_count+1));
  1594. char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
  1595. if (unlikely(!vlabuf)) {
  1596. kfree(_data);
  1597. return -ENOMEM;
  1598. }
  1599. /* Initialize the VLA pointers */
  1600. stringtabs = vla_ptr(vlabuf, d, stringtabs);
  1601. t = vla_ptr(vlabuf, d, stringtab);
  1602. i = lang_count;
  1603. do {
  1604. *stringtabs++ = t++;
  1605. } while (--i);
  1606. *stringtabs = NULL;
  1607. /* stringtabs = vlabuf = d_stringtabs for later kfree */
  1608. stringtabs = vla_ptr(vlabuf, d, stringtabs);
  1609. t = vla_ptr(vlabuf, d, stringtab);
  1610. s = vla_ptr(vlabuf, d, strings);
  1611. strings = s;
  1612. }
  1613. /* For each language */
  1614. data += 16;
  1615. len -= 16;
  1616. do { /* lang_count > 0 so we can use do-while */
  1617. unsigned needed = needed_count;
  1618. if (unlikely(len < 3))
  1619. goto error_free;
  1620. t->language = get_unaligned_le16(data);
  1621. t->strings = s;
  1622. ++t;
  1623. data += 2;
  1624. len -= 2;
  1625. /* For each string */
  1626. do { /* str_count > 0 so we can use do-while */
  1627. size_t length = strnlen(data, len);
  1628. if (unlikely(length == len))
  1629. goto error_free;
  1630. /*
  1631. * User may provide more strings then we need,
  1632. * if that's the case we simply ignore the
  1633. * rest
  1634. */
  1635. if (likely(needed)) {
  1636. /*
  1637. * s->id will be set while adding
  1638. * function to configuration so for
  1639. * now just leave garbage here.
  1640. */
  1641. s->s = data;
  1642. --needed;
  1643. ++s;
  1644. }
  1645. data += length + 1;
  1646. len -= length + 1;
  1647. } while (--str_count);
  1648. s->id = 0; /* terminator */
  1649. s->s = NULL;
  1650. ++s;
  1651. } while (--lang_count);
  1652. /* Some garbage left? */
  1653. if (unlikely(len))
  1654. goto error_free;
  1655. /* Done! */
  1656. ffs->stringtabs = stringtabs;
  1657. ffs->raw_strings = _data;
  1658. return 0;
  1659. error_free:
  1660. kfree(stringtabs);
  1661. error:
  1662. kfree(_data);
  1663. return -EINVAL;
  1664. }
  1665. /* Events handling and management *******************************************/
  1666. static void __ffs_event_add(struct ffs_data *ffs,
  1667. enum usb_functionfs_event_type type)
  1668. {
  1669. enum usb_functionfs_event_type rem_type1, rem_type2 = type;
  1670. int neg = 0;
  1671. /*
  1672. * Abort any unhandled setup
  1673. *
  1674. * We do not need to worry about some cmpxchg() changing value
  1675. * of ffs->setup_state without holding the lock because when
  1676. * state is FFS_SETUP_PENDING cmpxchg() in several places in
  1677. * the source does nothing.
  1678. */
  1679. if (ffs->setup_state == FFS_SETUP_PENDING)
  1680. ffs->setup_state = FFS_SETUP_CANCELLED;
  1681. switch (type) {
  1682. case FUNCTIONFS_RESUME:
  1683. rem_type2 = FUNCTIONFS_SUSPEND;
  1684. /* FALL THROUGH */
  1685. case FUNCTIONFS_SUSPEND:
  1686. case FUNCTIONFS_SETUP:
  1687. rem_type1 = type;
  1688. /* Discard all similar events */
  1689. break;
  1690. case FUNCTIONFS_BIND:
  1691. case FUNCTIONFS_UNBIND:
  1692. case FUNCTIONFS_DISABLE:
  1693. case FUNCTIONFS_ENABLE:
  1694. /* Discard everything other then power management. */
  1695. rem_type1 = FUNCTIONFS_SUSPEND;
  1696. rem_type2 = FUNCTIONFS_RESUME;
  1697. neg = 1;
  1698. break;
  1699. default:
  1700. BUG();
  1701. }
  1702. {
  1703. u8 *ev = ffs->ev.types, *out = ev;
  1704. unsigned n = ffs->ev.count;
  1705. for (; n; --n, ++ev)
  1706. if ((*ev == rem_type1 || *ev == rem_type2) == neg)
  1707. *out++ = *ev;
  1708. else
  1709. pr_vdebug("purging event %d\n", *ev);
  1710. ffs->ev.count = out - ffs->ev.types;
  1711. }
  1712. pr_vdebug("adding event %d\n", type);
  1713. ffs->ev.types[ffs->ev.count++] = type;
  1714. wake_up_locked(&ffs->ev.waitq);
  1715. }
  1716. static void ffs_event_add(struct ffs_data *ffs,
  1717. enum usb_functionfs_event_type type)
  1718. {
  1719. unsigned long flags;
  1720. spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
  1721. __ffs_event_add(ffs, type);
  1722. spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
  1723. }
  1724. /* Bind/unbind USB function hooks *******************************************/
  1725. static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
  1726. struct usb_descriptor_header *desc,
  1727. void *priv)
  1728. {
  1729. struct usb_endpoint_descriptor *ds = (void *)desc;
  1730. struct ffs_function *func = priv;
  1731. struct ffs_ep *ffs_ep;
  1732. unsigned ep_desc_id, idx;
  1733. static const char *speed_names[] = { "full", "high", "super" };
  1734. if (type != FFS_DESCRIPTOR)
  1735. return 0;
  1736. /*
  1737. * If ss_descriptors is not NULL, we are reading super speed
  1738. * descriptors; if hs_descriptors is not NULL, we are reading high
  1739. * speed descriptors; otherwise, we are reading full speed
  1740. * descriptors.
  1741. */
  1742. if (func->function.ss_descriptors) {
  1743. ep_desc_id = 2;
  1744. func->function.ss_descriptors[(long)valuep] = desc;
  1745. } else if (func->function.hs_descriptors) {
  1746. ep_desc_id = 1;
  1747. func->function.hs_descriptors[(long)valuep] = desc;
  1748. } else {
  1749. ep_desc_id = 0;
  1750. func->function.fs_descriptors[(long)valuep] = desc;
  1751. }
  1752. if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1753. return 0;
  1754. idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
  1755. ffs_ep = func->eps + idx;
  1756. if (unlikely(ffs_ep->descs[ep_desc_id])) {
  1757. pr_err("two %sspeed descriptors for EP %d\n",
  1758. speed_names[ep_desc_id],
  1759. ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  1760. return -EINVAL;
  1761. }
  1762. ffs_ep->descs[ep_desc_id] = ds;
  1763. ffs_dump_mem(": Original ep desc", ds, ds->bLength);
  1764. if (ffs_ep->ep) {
  1765. ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
  1766. if (!ds->wMaxPacketSize)
  1767. ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
  1768. } else {
  1769. struct usb_request *req;
  1770. struct usb_ep *ep;
  1771. pr_vdebug("autoconfig\n");
  1772. ep = usb_ep_autoconfig(func->gadget, ds);
  1773. if (unlikely(!ep))
  1774. return -ENOTSUPP;
  1775. ep->driver_data = func->eps + idx;
  1776. req = usb_ep_alloc_request(ep, GFP_KERNEL);
  1777. if (unlikely(!req))
  1778. return -ENOMEM;
  1779. ffs_ep->ep = ep;
  1780. ffs_ep->req = req;
  1781. func->eps_revmap[ds->bEndpointAddress &
  1782. USB_ENDPOINT_NUMBER_MASK] = idx + 1;
  1783. }
  1784. ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
  1785. return 0;
  1786. }
  1787. static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
  1788. struct usb_descriptor_header *desc,
  1789. void *priv)
  1790. {
  1791. struct ffs_function *func = priv;
  1792. unsigned idx;
  1793. u8 newValue;
  1794. switch (type) {
  1795. default:
  1796. case FFS_DESCRIPTOR:
  1797. /* Handled in previous pass by __ffs_func_bind_do_descs() */
  1798. return 0;
  1799. case FFS_INTERFACE:
  1800. idx = *valuep;
  1801. if (func->interfaces_nums[idx] < 0) {
  1802. int id = usb_interface_id(func->conf, &func->function);
  1803. if (unlikely(id < 0))
  1804. return id;
  1805. func->interfaces_nums[idx] = id;
  1806. }
  1807. newValue = func->interfaces_nums[idx];
  1808. break;
  1809. case FFS_STRING:
  1810. /* String' IDs are allocated when fsf_data is bound to cdev */
  1811. newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
  1812. break;
  1813. case FFS_ENDPOINT:
  1814. /*
  1815. * USB_DT_ENDPOINT are handled in
  1816. * __ffs_func_bind_do_descs().
  1817. */
  1818. if (desc->bDescriptorType == USB_DT_ENDPOINT)
  1819. return 0;
  1820. idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
  1821. if (unlikely(!func->eps[idx].ep))
  1822. return -EINVAL;
  1823. {
  1824. struct usb_endpoint_descriptor **descs;
  1825. descs = func->eps[idx].descs;
  1826. newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
  1827. }
  1828. break;
  1829. }
  1830. pr_vdebug("%02x -> %02x\n", *valuep, newValue);
  1831. *valuep = newValue;
  1832. return 0;
  1833. }
  1834. static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
  1835. struct usb_configuration *c)
  1836. {
  1837. struct ffs_function *func = ffs_func_from_usb(f);
  1838. struct f_fs_opts *ffs_opts =
  1839. container_of(f->fi, struct f_fs_opts, func_inst);
  1840. int ret;
  1841. ENTER();
  1842. /*
  1843. * Legacy gadget triggers binding in functionfs_ready_callback,
  1844. * which already uses locking; taking the same lock here would
  1845. * cause a deadlock.
  1846. *
  1847. * Configfs-enabled gadgets however do need ffs_dev_lock.
  1848. */
  1849. if (!ffs_opts->no_configfs)
  1850. ffs_dev_lock();
  1851. ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
  1852. func->ffs = ffs_opts->dev->ffs_data;
  1853. if (!ffs_opts->no_configfs)
  1854. ffs_dev_unlock();
  1855. if (ret)
  1856. return ERR_PTR(ret);
  1857. func->conf = c;
  1858. func->gadget = c->cdev->gadget;
  1859. ffs_data_get(func->ffs);
  1860. /*
  1861. * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
  1862. * configurations are bound in sequence with list_for_each_entry,
  1863. * in each configuration its functions are bound in sequence
  1864. * with list_for_each_entry, so we assume no race condition
  1865. * with regard to ffs_opts->bound access
  1866. */
  1867. if (!ffs_opts->refcnt) {
  1868. ret = functionfs_bind(func->ffs, c->cdev);
  1869. if (ret)
  1870. return ERR_PTR(ret);
  1871. }
  1872. ffs_opts->refcnt++;
  1873. func->function.strings = func->ffs->stringtabs;
  1874. return ffs_opts;
  1875. }
  1876. static int _ffs_func_bind(struct usb_configuration *c,
  1877. struct usb_function *f)
  1878. {
  1879. struct ffs_function *func = ffs_func_from_usb(f);
  1880. struct ffs_data *ffs = func->ffs;
  1881. const int full = !!func->ffs->fs_descs_count;
  1882. const int high = gadget_is_dualspeed(func->gadget) &&
  1883. func->ffs->hs_descs_count;
  1884. const int super = gadget_is_superspeed(func->gadget) &&
  1885. func->ffs->ss_descs_count;
  1886. int fs_len, hs_len, ret;
  1887. /* Make it a single chunk, less management later on */
  1888. vla_group(d);
  1889. vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
  1890. vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
  1891. full ? ffs->fs_descs_count + 1 : 0);
  1892. vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
  1893. high ? ffs->hs_descs_count + 1 : 0);
  1894. vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
  1895. super ? ffs->ss_descs_count + 1 : 0);
  1896. vla_item_with_sz(d, short, inums, ffs->interfaces_count);
  1897. vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
  1898. char *vlabuf;
  1899. ENTER();
  1900. /* Has descriptors only for speeds gadget does not support */
  1901. if (unlikely(!(full | high | super)))
  1902. return -ENOTSUPP;
  1903. /* Allocate a single chunk, less management later on */
  1904. vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
  1905. if (unlikely(!vlabuf))
  1906. return -ENOMEM;
  1907. /* Zero */
  1908. memset(vla_ptr(vlabuf, d, eps), 0, d_eps__sz);
  1909. /* Copy descriptors */
  1910. memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
  1911. ffs->raw_descs_length);
  1912. memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
  1913. for (ret = ffs->eps_count; ret; --ret) {
  1914. struct ffs_ep *ptr;
  1915. ptr = vla_ptr(vlabuf, d, eps);
  1916. ptr[ret].num = -1;
  1917. }
  1918. /* Save pointers
  1919. * d_eps == vlabuf, func->eps used to kfree vlabuf later
  1920. */
  1921. func->eps = vla_ptr(vlabuf, d, eps);
  1922. func->interfaces_nums = vla_ptr(vlabuf, d, inums);
  1923. /*
  1924. * Go through all the endpoint descriptors and allocate
  1925. * endpoints first, so that later we can rewrite the endpoint
  1926. * numbers without worrying that it may be described later on.
  1927. */
  1928. if (likely(full)) {
  1929. func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
  1930. fs_len = ffs_do_descs(ffs->fs_descs_count,
  1931. vla_ptr(vlabuf, d, raw_descs),
  1932. d_raw_descs__sz,
  1933. __ffs_func_bind_do_descs, func);
  1934. if (unlikely(fs_len < 0)) {
  1935. ret = fs_len;
  1936. goto error;
  1937. }
  1938. } else {
  1939. fs_len = 0;
  1940. }
  1941. if (likely(high)) {
  1942. func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
  1943. hs_len = ffs_do_descs(ffs->hs_descs_count,
  1944. vla_ptr(vlabuf, d, raw_descs) + fs_len,
  1945. d_raw_descs__sz - fs_len,
  1946. __ffs_func_bind_do_descs, func);
  1947. if (unlikely(hs_len < 0)) {
  1948. ret = hs_len;
  1949. goto error;
  1950. }
  1951. } else {
  1952. hs_len = 0;
  1953. }
  1954. if (likely(super)) {
  1955. func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
  1956. ret = ffs_do_descs(ffs->ss_descs_count,
  1957. vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
  1958. d_raw_descs__sz - fs_len - hs_len,
  1959. __ffs_func_bind_do_descs, func);
  1960. if (unlikely(ret < 0))
  1961. goto error;
  1962. }
  1963. /*
  1964. * Now handle interface numbers allocation and interface and
  1965. * endpoint numbers rewriting. We can do that in one go
  1966. * now.
  1967. */
  1968. ret = ffs_do_descs(ffs->fs_descs_count +
  1969. (high ? ffs->hs_descs_count : 0) +
  1970. (super ? ffs->ss_descs_count : 0),
  1971. vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
  1972. __ffs_func_bind_do_nums, func);
  1973. if (unlikely(ret < 0))
  1974. goto error;
  1975. /* And we're done */
  1976. ffs_event_add(ffs, FUNCTIONFS_BIND);
  1977. return 0;
  1978. error:
  1979. /* XXX Do we need to release all claimed endpoints here? */
  1980. return ret;
  1981. }
  1982. static int ffs_func_bind(struct usb_configuration *c,
  1983. struct usb_function *f)
  1984. {
  1985. struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
  1986. if (IS_ERR(ffs_opts))
  1987. return PTR_ERR(ffs_opts);
  1988. return _ffs_func_bind(c, f);
  1989. }
  1990. /* Other USB function hooks *************************************************/
  1991. static int ffs_func_set_alt(struct usb_function *f,
  1992. unsigned interface, unsigned alt)
  1993. {
  1994. struct ffs_function *func = ffs_func_from_usb(f);
  1995. struct ffs_data *ffs = func->ffs;
  1996. int ret = 0, intf;
  1997. if (alt != (unsigned)-1) {
  1998. intf = ffs_func_revmap_intf(func, interface);
  1999. if (unlikely(intf < 0))
  2000. return intf;
  2001. }
  2002. if (ffs->func)
  2003. ffs_func_eps_disable(ffs->func);
  2004. if (ffs->state != FFS_ACTIVE)
  2005. return -ENODEV;
  2006. if (alt == (unsigned)-1) {
  2007. ffs->func = NULL;
  2008. ffs_event_add(ffs, FUNCTIONFS_DISABLE);
  2009. return 0;
  2010. }
  2011. ffs->func = func;
  2012. ret = ffs_func_eps_enable(func);
  2013. if (likely(ret >= 0))
  2014. ffs_event_add(ffs, FUNCTIONFS_ENABLE);
  2015. return ret;
  2016. }
  2017. static void ffs_func_disable(struct usb_function *f)
  2018. {
  2019. ffs_func_set_alt(f, 0, (unsigned)-1);
  2020. }
  2021. static int ffs_func_setup(struct usb_function *f,
  2022. const struct usb_ctrlrequest *creq)
  2023. {
  2024. struct ffs_function *func = ffs_func_from_usb(f);
  2025. struct ffs_data *ffs = func->ffs;
  2026. unsigned long flags;
  2027. int ret;
  2028. ENTER();
  2029. pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
  2030. pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
  2031. pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
  2032. pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
  2033. pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
  2034. /*
  2035. * Most requests directed to interface go through here
  2036. * (notable exceptions are set/get interface) so we need to
  2037. * handle them. All other either handled by composite or
  2038. * passed to usb_configuration->setup() (if one is set). No
  2039. * matter, we will handle requests directed to endpoint here
  2040. * as well (as it's straightforward) but what to do with any
  2041. * other request?
  2042. */
  2043. if (ffs->state != FFS_ACTIVE)
  2044. return -ENODEV;
  2045. switch (creq->bRequestType & USB_RECIP_MASK) {
  2046. case USB_RECIP_INTERFACE:
  2047. ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
  2048. if (unlikely(ret < 0))
  2049. return ret;
  2050. break;
  2051. case USB_RECIP_ENDPOINT:
  2052. ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
  2053. if (unlikely(ret < 0))
  2054. return ret;
  2055. break;
  2056. default:
  2057. return -EOPNOTSUPP;
  2058. }
  2059. spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
  2060. ffs->ev.setup = *creq;
  2061. ffs->ev.setup.wIndex = cpu_to_le16(ret);
  2062. __ffs_event_add(ffs, FUNCTIONFS_SETUP);
  2063. spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
  2064. return 0;
  2065. }
  2066. static void ffs_func_suspend(struct usb_function *f)
  2067. {
  2068. ENTER();
  2069. ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
  2070. }
  2071. static void ffs_func_resume(struct usb_function *f)
  2072. {
  2073. ENTER();
  2074. ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
  2075. }
  2076. /* Endpoint and interface numbers reverse mapping ***************************/
  2077. static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
  2078. {
  2079. num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
  2080. return num ? num : -EDOM;
  2081. }
  2082. static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
  2083. {
  2084. short *nums = func->interfaces_nums;
  2085. unsigned count = func->ffs->interfaces_count;
  2086. for (; count; --count, ++nums) {
  2087. if (*nums >= 0 && *nums == intf)
  2088. return nums - func->interfaces_nums;
  2089. }
  2090. return -EDOM;
  2091. }
  2092. /* Devices management *******************************************************/
  2093. static LIST_HEAD(ffs_devices);
  2094. static struct ffs_dev *_ffs_do_find_dev(const char *name)
  2095. {
  2096. struct ffs_dev *dev;
  2097. list_for_each_entry(dev, &ffs_devices, entry) {
  2098. if (!dev->name || !name)
  2099. continue;
  2100. if (strcmp(dev->name, name) == 0)
  2101. return dev;
  2102. }
  2103. return NULL;
  2104. }
  2105. /*
  2106. * ffs_lock must be taken by the caller of this function
  2107. */
  2108. static struct ffs_dev *_ffs_get_single_dev(void)
  2109. {
  2110. struct ffs_dev *dev;
  2111. if (list_is_singular(&ffs_devices)) {
  2112. dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
  2113. if (dev->single)
  2114. return dev;
  2115. }
  2116. return NULL;
  2117. }
  2118. /*
  2119. * ffs_lock must be taken by the caller of this function
  2120. */
  2121. static struct ffs_dev *_ffs_find_dev(const char *name)
  2122. {
  2123. struct ffs_dev *dev;
  2124. dev = _ffs_get_single_dev();
  2125. if (dev)
  2126. return dev;
  2127. return _ffs_do_find_dev(name);
  2128. }
  2129. /* Configfs support *********************************************************/
  2130. static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
  2131. {
  2132. return container_of(to_config_group(item), struct f_fs_opts,
  2133. func_inst.group);
  2134. }
  2135. static void ffs_attr_release(struct config_item *item)
  2136. {
  2137. struct f_fs_opts *opts = to_ffs_opts(item);
  2138. usb_put_function_instance(&opts->func_inst);
  2139. }
  2140. static struct configfs_item_operations ffs_item_ops = {
  2141. .release = ffs_attr_release,
  2142. };
  2143. static struct config_item_type ffs_func_type = {
  2144. .ct_item_ops = &ffs_item_ops,
  2145. .ct_owner = THIS_MODULE,
  2146. };
  2147. /* Function registration interface ******************************************/
  2148. static void ffs_free_inst(struct usb_function_instance *f)
  2149. {
  2150. struct f_fs_opts *opts;
  2151. opts = to_f_fs_opts(f);
  2152. ffs_dev_lock();
  2153. _ffs_free_dev(opts->dev);
  2154. ffs_dev_unlock();
  2155. kfree(opts);
  2156. }
  2157. #define MAX_INST_NAME_LEN 40
  2158. static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
  2159. {
  2160. struct f_fs_opts *opts;
  2161. char *ptr;
  2162. const char *tmp;
  2163. int name_len, ret;
  2164. name_len = strlen(name) + 1;
  2165. if (name_len > MAX_INST_NAME_LEN)
  2166. return -ENAMETOOLONG;
  2167. ptr = kstrndup(name, name_len, GFP_KERNEL);
  2168. if (!ptr)
  2169. return -ENOMEM;
  2170. opts = to_f_fs_opts(fi);
  2171. tmp = NULL;
  2172. ffs_dev_lock();
  2173. tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
  2174. ret = _ffs_name_dev(opts->dev, ptr);
  2175. if (ret) {
  2176. kfree(ptr);
  2177. ffs_dev_unlock();
  2178. return ret;
  2179. }
  2180. opts->dev->name_allocated = true;
  2181. ffs_dev_unlock();
  2182. kfree(tmp);
  2183. return 0;
  2184. }
  2185. static struct usb_function_instance *ffs_alloc_inst(void)
  2186. {
  2187. struct f_fs_opts *opts;
  2188. struct ffs_dev *dev;
  2189. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  2190. if (!opts)
  2191. return ERR_PTR(-ENOMEM);
  2192. opts->func_inst.set_inst_name = ffs_set_inst_name;
  2193. opts->func_inst.free_func_inst = ffs_free_inst;
  2194. ffs_dev_lock();
  2195. dev = _ffs_alloc_dev();
  2196. ffs_dev_unlock();
  2197. if (IS_ERR(dev)) {
  2198. kfree(opts);
  2199. return ERR_CAST(dev);
  2200. }
  2201. opts->dev = dev;
  2202. dev->opts = opts;
  2203. config_group_init_type_name(&opts->func_inst.group, "",
  2204. &ffs_func_type);
  2205. return &opts->func_inst;
  2206. }
  2207. static void ffs_free(struct usb_function *f)
  2208. {
  2209. kfree(ffs_func_from_usb(f));
  2210. }
  2211. static void ffs_func_unbind(struct usb_configuration *c,
  2212. struct usb_function *f)
  2213. {
  2214. struct ffs_function *func = ffs_func_from_usb(f);
  2215. struct ffs_data *ffs = func->ffs;
  2216. struct f_fs_opts *opts =
  2217. container_of(f->fi, struct f_fs_opts, func_inst);
  2218. struct ffs_ep *ep = func->eps;
  2219. unsigned count = ffs->eps_count;
  2220. unsigned long flags;
  2221. ENTER();
  2222. if (ffs->func == func) {
  2223. ffs_func_eps_disable(func);
  2224. ffs->func = NULL;
  2225. }
  2226. if (!--opts->refcnt)
  2227. functionfs_unbind(ffs);
  2228. /* cleanup after autoconfig */
  2229. spin_lock_irqsave(&func->ffs->eps_lock, flags);
  2230. do {
  2231. if (ep->ep && ep->req)
  2232. usb_ep_free_request(ep->ep, ep->req);
  2233. ep->req = NULL;
  2234. ++ep;
  2235. } while (--count);
  2236. spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
  2237. kfree(func->eps);
  2238. func->eps = NULL;
  2239. /*
  2240. * eps, descriptors and interfaces_nums are allocated in the
  2241. * same chunk so only one free is required.
  2242. */
  2243. func->function.fs_descriptors = NULL;
  2244. func->function.hs_descriptors = NULL;
  2245. func->function.ss_descriptors = NULL;
  2246. func->interfaces_nums = NULL;
  2247. ffs_event_add(ffs, FUNCTIONFS_UNBIND);
  2248. }
  2249. static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
  2250. {
  2251. struct ffs_function *func;
  2252. ENTER();
  2253. func = kzalloc(sizeof(*func), GFP_KERNEL);
  2254. if (unlikely(!func))
  2255. return ERR_PTR(-ENOMEM);
  2256. func->function.name = "Function FS Gadget";
  2257. func->function.bind = ffs_func_bind;
  2258. func->function.unbind = ffs_func_unbind;
  2259. func->function.set_alt = ffs_func_set_alt;
  2260. func->function.disable = ffs_func_disable;
  2261. func->function.setup = ffs_func_setup;
  2262. func->function.suspend = ffs_func_suspend;
  2263. func->function.resume = ffs_func_resume;
  2264. func->function.free_func = ffs_free;
  2265. return &func->function;
  2266. }
  2267. /*
  2268. * ffs_lock must be taken by the caller of this function
  2269. */
  2270. static struct ffs_dev *_ffs_alloc_dev(void)
  2271. {
  2272. struct ffs_dev *dev;
  2273. int ret;
  2274. if (_ffs_get_single_dev())
  2275. return ERR_PTR(-EBUSY);
  2276. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  2277. if (!dev)
  2278. return ERR_PTR(-ENOMEM);
  2279. if (list_empty(&ffs_devices)) {
  2280. ret = functionfs_init();
  2281. if (ret) {
  2282. kfree(dev);
  2283. return ERR_PTR(ret);
  2284. }
  2285. }
  2286. list_add(&dev->entry, &ffs_devices);
  2287. return dev;
  2288. }
  2289. /*
  2290. * ffs_lock must be taken by the caller of this function
  2291. * The caller is responsible for "name" being available whenever f_fs needs it
  2292. */
  2293. static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
  2294. {
  2295. struct ffs_dev *existing;
  2296. existing = _ffs_do_find_dev(name);
  2297. if (existing)
  2298. return -EBUSY;
  2299. dev->name = name;
  2300. return 0;
  2301. }
  2302. /*
  2303. * The caller is responsible for "name" being available whenever f_fs needs it
  2304. */
  2305. int ffs_name_dev(struct ffs_dev *dev, const char *name)
  2306. {
  2307. int ret;
  2308. ffs_dev_lock();
  2309. ret = _ffs_name_dev(dev, name);
  2310. ffs_dev_unlock();
  2311. return ret;
  2312. }
  2313. EXPORT_SYMBOL_GPL(ffs_name_dev);
  2314. int ffs_single_dev(struct ffs_dev *dev)
  2315. {
  2316. int ret;
  2317. ret = 0;
  2318. ffs_dev_lock();
  2319. if (!list_is_singular(&ffs_devices))
  2320. ret = -EBUSY;
  2321. else
  2322. dev->single = true;
  2323. ffs_dev_unlock();
  2324. return ret;
  2325. }
  2326. EXPORT_SYMBOL_GPL(ffs_single_dev);
  2327. /*
  2328. * ffs_lock must be taken by the caller of this function
  2329. */
  2330. static void _ffs_free_dev(struct ffs_dev *dev)
  2331. {
  2332. list_del(&dev->entry);
  2333. if (dev->name_allocated)
  2334. kfree(dev->name);
  2335. kfree(dev);
  2336. if (list_empty(&ffs_devices))
  2337. functionfs_cleanup();
  2338. }
  2339. static void *ffs_acquire_dev(const char *dev_name)
  2340. {
  2341. struct ffs_dev *ffs_dev;
  2342. ENTER();
  2343. ffs_dev_lock();
  2344. ffs_dev = _ffs_find_dev(dev_name);
  2345. if (!ffs_dev)
  2346. ffs_dev = ERR_PTR(-ENODEV);
  2347. else if (ffs_dev->mounted)
  2348. ffs_dev = ERR_PTR(-EBUSY);
  2349. else if (ffs_dev->ffs_acquire_dev_callback &&
  2350. ffs_dev->ffs_acquire_dev_callback(ffs_dev))
  2351. ffs_dev = ERR_PTR(-ENODEV);
  2352. else
  2353. ffs_dev->mounted = true;
  2354. ffs_dev_unlock();
  2355. return ffs_dev;
  2356. }
  2357. static void ffs_release_dev(struct ffs_data *ffs_data)
  2358. {
  2359. struct ffs_dev *ffs_dev;
  2360. ENTER();
  2361. ffs_dev_lock();
  2362. ffs_dev = ffs_data->private_data;
  2363. if (ffs_dev) {
  2364. ffs_dev->mounted = false;
  2365. if (ffs_dev->ffs_release_dev_callback)
  2366. ffs_dev->ffs_release_dev_callback(ffs_dev);
  2367. }
  2368. ffs_dev_unlock();
  2369. }
  2370. static int ffs_ready(struct ffs_data *ffs)
  2371. {
  2372. struct ffs_dev *ffs_obj;
  2373. int ret = 0;
  2374. ENTER();
  2375. ffs_dev_lock();
  2376. ffs_obj = ffs->private_data;
  2377. if (!ffs_obj) {
  2378. ret = -EINVAL;
  2379. goto done;
  2380. }
  2381. if (WARN_ON(ffs_obj->desc_ready)) {
  2382. ret = -EBUSY;
  2383. goto done;
  2384. }
  2385. ffs_obj->desc_ready = true;
  2386. ffs_obj->ffs_data = ffs;
  2387. if (ffs_obj->ffs_ready_callback)
  2388. ret = ffs_obj->ffs_ready_callback(ffs);
  2389. done:
  2390. ffs_dev_unlock();
  2391. return ret;
  2392. }
  2393. static void ffs_closed(struct ffs_data *ffs)
  2394. {
  2395. struct ffs_dev *ffs_obj;
  2396. ENTER();
  2397. ffs_dev_lock();
  2398. ffs_obj = ffs->private_data;
  2399. if (!ffs_obj)
  2400. goto done;
  2401. ffs_obj->desc_ready = false;
  2402. if (ffs_obj->ffs_closed_callback)
  2403. ffs_obj->ffs_closed_callback(ffs);
  2404. if (!ffs_obj->opts || ffs_obj->opts->no_configfs
  2405. || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
  2406. goto done;
  2407. unregister_gadget_item(ffs_obj->opts->
  2408. func_inst.group.cg_item.ci_parent->ci_parent);
  2409. done:
  2410. ffs_dev_unlock();
  2411. }
  2412. /* Misc helper functions ****************************************************/
  2413. static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
  2414. {
  2415. return nonblock
  2416. ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
  2417. : mutex_lock_interruptible(mutex);
  2418. }
  2419. static char *ffs_prepare_buffer(const char __user *buf, size_t len)
  2420. {
  2421. char *data;
  2422. if (unlikely(!len))
  2423. return NULL;
  2424. data = kmalloc(len, GFP_KERNEL);
  2425. if (unlikely(!data))
  2426. return ERR_PTR(-ENOMEM);
  2427. if (unlikely(__copy_from_user(data, buf, len))) {
  2428. kfree(data);
  2429. return ERR_PTR(-EFAULT);
  2430. }
  2431. pr_vdebug("Buffer from user space:\n");
  2432. ffs_dump_mem("", data, len);
  2433. return data;
  2434. }
  2435. DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
  2436. MODULE_LICENSE("GPL");
  2437. MODULE_AUTHOR("Michal Nazarewicz");