smb2pdu.c 85 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143
  1. /*
  2. * fs/cifs/smb2pdu.c
  3. *
  4. * Copyright (C) International Business Machines Corp., 2009, 2013
  5. * Etersoft, 2012
  6. * Author(s): Steve French (sfrench@us.ibm.com)
  7. * Pavel Shilovsky (pshilovsky@samba.org) 2012
  8. *
  9. * Contains the routines for constructing the SMB2 PDUs themselves
  10. *
  11. * This library is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU Lesser General Public License as published
  13. * by the Free Software Foundation; either version 2.1 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  19. * the GNU Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public License
  22. * along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
  26. /* Note that there are handle based routines which must be */
  27. /* treated slightly differently for reconnection purposes since we never */
  28. /* want to reuse a stale file handle and only the caller knows the file info */
  29. #include <linux/fs.h>
  30. #include <linux/kernel.h>
  31. #include <linux/vfs.h>
  32. #include <linux/task_io_accounting_ops.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/pagemap.h>
  35. #include <linux/xattr.h>
  36. #include "smb2pdu.h"
  37. #include "cifsglob.h"
  38. #include "cifsacl.h"
  39. #include "cifsproto.h"
  40. #include "smb2proto.h"
  41. #include "cifs_unicode.h"
  42. #include "cifs_debug.h"
  43. #include "ntlmssp.h"
  44. #include "smb2status.h"
  45. #include "smb2glob.h"
  46. #include "cifspdu.h"
  47. #include "cifs_spnego.h"
  48. /*
  49. * The following table defines the expected "StructureSize" of SMB2 requests
  50. * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
  51. *
  52. * Note that commands are defined in smb2pdu.h in le16 but the array below is
  53. * indexed by command in host byte order.
  54. */
  55. static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
  56. /* SMB2_NEGOTIATE */ 36,
  57. /* SMB2_SESSION_SETUP */ 25,
  58. /* SMB2_LOGOFF */ 4,
  59. /* SMB2_TREE_CONNECT */ 9,
  60. /* SMB2_TREE_DISCONNECT */ 4,
  61. /* SMB2_CREATE */ 57,
  62. /* SMB2_CLOSE */ 24,
  63. /* SMB2_FLUSH */ 24,
  64. /* SMB2_READ */ 49,
  65. /* SMB2_WRITE */ 49,
  66. /* SMB2_LOCK */ 48,
  67. /* SMB2_IOCTL */ 57,
  68. /* SMB2_CANCEL */ 4,
  69. /* SMB2_ECHO */ 4,
  70. /* SMB2_QUERY_DIRECTORY */ 33,
  71. /* SMB2_CHANGE_NOTIFY */ 32,
  72. /* SMB2_QUERY_INFO */ 41,
  73. /* SMB2_SET_INFO */ 33,
  74. /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
  75. };
  76. static void
  77. smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
  78. const struct cifs_tcon *tcon)
  79. {
  80. struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
  81. char *temp = (char *)hdr;
  82. /* lookup word count ie StructureSize from table */
  83. __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
  84. /*
  85. * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
  86. * largest operations (Create)
  87. */
  88. memset(temp, 0, 256);
  89. /* Note this is only network field converted to big endian */
  90. hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
  91. - 4 /* RFC 1001 length field itself not counted */);
  92. hdr->ProtocolId = SMB2_PROTO_NUMBER;
  93. hdr->StructureSize = cpu_to_le16(64);
  94. hdr->Command = smb2_cmd;
  95. if (tcon && tcon->ses && tcon->ses->server) {
  96. struct TCP_Server_Info *server = tcon->ses->server;
  97. spin_lock(&server->req_lock);
  98. /* Request up to 2 credits but don't go over the limit. */
  99. if (server->credits >= server->max_credits)
  100. hdr->CreditRequest = cpu_to_le16(0);
  101. else
  102. hdr->CreditRequest = cpu_to_le16(
  103. min_t(int, server->max_credits -
  104. server->credits, 2));
  105. spin_unlock(&server->req_lock);
  106. } else {
  107. hdr->CreditRequest = cpu_to_le16(2);
  108. }
  109. hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
  110. if (!tcon)
  111. goto out;
  112. /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
  113. /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
  114. if ((tcon->ses) && (tcon->ses->server) &&
  115. (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
  116. hdr->CreditCharge = cpu_to_le16(1);
  117. /* else CreditCharge MBZ */
  118. hdr->TreeId = tcon->tid;
  119. /* Uid is not converted */
  120. if (tcon->ses)
  121. hdr->SessionId = tcon->ses->Suid;
  122. /*
  123. * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
  124. * to pass the path on the Open SMB prefixed by \\server\share.
  125. * Not sure when we would need to do the augmented path (if ever) and
  126. * setting this flag breaks the SMB2 open operation since it is
  127. * illegal to send an empty path name (without \\server\share prefix)
  128. * when the DFS flag is set in the SMB open header. We could
  129. * consider setting the flag on all operations other than open
  130. * but it is safer to net set it for now.
  131. */
  132. /* if (tcon->share_flags & SHI1005_FLAGS_DFS)
  133. hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
  134. if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
  135. hdr->Flags |= SMB2_FLAGS_SIGNED;
  136. out:
  137. pdu->StructureSize2 = cpu_to_le16(parmsize);
  138. return;
  139. }
  140. static int
  141. smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
  142. {
  143. int rc = 0;
  144. struct nls_table *nls_codepage;
  145. struct cifs_ses *ses;
  146. struct TCP_Server_Info *server;
  147. /*
  148. * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
  149. * check for tcp and smb session status done differently
  150. * for those three - in the calling routine.
  151. */
  152. if (tcon == NULL)
  153. return rc;
  154. if (smb2_command == SMB2_TREE_CONNECT)
  155. return rc;
  156. if (tcon->tidStatus == CifsExiting) {
  157. /*
  158. * only tree disconnect, open, and write,
  159. * (and ulogoff which does not have tcon)
  160. * are allowed as we start force umount.
  161. */
  162. if ((smb2_command != SMB2_WRITE) &&
  163. (smb2_command != SMB2_CREATE) &&
  164. (smb2_command != SMB2_TREE_DISCONNECT)) {
  165. cifs_dbg(FYI, "can not send cmd %d while umounting\n",
  166. smb2_command);
  167. return -ENODEV;
  168. }
  169. }
  170. if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
  171. (!tcon->ses->server))
  172. return -EIO;
  173. ses = tcon->ses;
  174. server = ses->server;
  175. /*
  176. * Give demultiplex thread up to 10 seconds to reconnect, should be
  177. * greater than cifs socket timeout which is 7 seconds
  178. */
  179. while (server->tcpStatus == CifsNeedReconnect) {
  180. /*
  181. * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
  182. * here since they are implicitly done when session drops.
  183. */
  184. switch (smb2_command) {
  185. /*
  186. * BB Should we keep oplock break and add flush to exceptions?
  187. */
  188. case SMB2_TREE_DISCONNECT:
  189. case SMB2_CANCEL:
  190. case SMB2_CLOSE:
  191. case SMB2_OPLOCK_BREAK:
  192. return -EAGAIN;
  193. }
  194. wait_event_interruptible_timeout(server->response_q,
  195. (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
  196. /* are we still trying to reconnect? */
  197. if (server->tcpStatus != CifsNeedReconnect)
  198. break;
  199. /*
  200. * on "soft" mounts we wait once. Hard mounts keep
  201. * retrying until process is killed or server comes
  202. * back on-line
  203. */
  204. if (!tcon->retry) {
  205. cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
  206. return -EHOSTDOWN;
  207. }
  208. }
  209. if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
  210. return rc;
  211. nls_codepage = load_nls_default();
  212. /*
  213. * need to prevent multiple threads trying to simultaneously reconnect
  214. * the same SMB session
  215. */
  216. mutex_lock(&tcon->ses->session_mutex);
  217. rc = cifs_negotiate_protocol(0, tcon->ses);
  218. if (!rc && tcon->ses->need_reconnect)
  219. rc = cifs_setup_session(0, tcon->ses, nls_codepage);
  220. if (rc || !tcon->need_reconnect) {
  221. mutex_unlock(&tcon->ses->session_mutex);
  222. goto out;
  223. }
  224. cifs_mark_open_files_invalid(tcon);
  225. if (tcon->use_persistent)
  226. tcon->need_reopen_files = true;
  227. rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
  228. mutex_unlock(&tcon->ses->session_mutex);
  229. cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
  230. if (rc)
  231. goto out;
  232. if (smb2_command != SMB2_INTERNAL_CMD)
  233. queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
  234. atomic_inc(&tconInfoReconnectCount);
  235. out:
  236. /*
  237. * Check if handle based operation so we know whether we can continue
  238. * or not without returning to caller to reset file handle.
  239. */
  240. /*
  241. * BB Is flush done by server on drop of tcp session? Should we special
  242. * case it and skip above?
  243. */
  244. switch (smb2_command) {
  245. case SMB2_FLUSH:
  246. case SMB2_READ:
  247. case SMB2_WRITE:
  248. case SMB2_LOCK:
  249. case SMB2_IOCTL:
  250. case SMB2_QUERY_DIRECTORY:
  251. case SMB2_CHANGE_NOTIFY:
  252. case SMB2_QUERY_INFO:
  253. case SMB2_SET_INFO:
  254. rc = -EAGAIN;
  255. }
  256. unload_nls(nls_codepage);
  257. return rc;
  258. }
  259. /*
  260. * Allocate and return pointer to an SMB request hdr, and set basic
  261. * SMB information in the SMB header. If the return code is zero, this
  262. * function must have filled in request_buf pointer.
  263. */
  264. static int
  265. small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
  266. void **request_buf)
  267. {
  268. int rc = 0;
  269. rc = smb2_reconnect(smb2_command, tcon);
  270. if (rc)
  271. return rc;
  272. /* BB eventually switch this to SMB2 specific small buf size */
  273. *request_buf = cifs_small_buf_get();
  274. if (*request_buf == NULL) {
  275. /* BB should we add a retry in here if not a writepage? */
  276. return -ENOMEM;
  277. }
  278. smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
  279. if (tcon != NULL) {
  280. #ifdef CONFIG_CIFS_STATS2
  281. uint16_t com_code = le16_to_cpu(smb2_command);
  282. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
  283. #endif
  284. cifs_stats_inc(&tcon->num_smbs_sent);
  285. }
  286. return rc;
  287. }
  288. #ifdef CONFIG_CIFS_SMB311
  289. /* offset is sizeof smb2_negotiate_req - 4 but rounded up to 8 bytes */
  290. #define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) - 4 */
  291. #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
  292. #define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
  293. static void
  294. build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
  295. {
  296. pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
  297. pneg_ctxt->DataLength = cpu_to_le16(38);
  298. pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
  299. pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
  300. get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
  301. pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
  302. }
  303. static void
  304. build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
  305. {
  306. pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
  307. pneg_ctxt->DataLength = cpu_to_le16(6);
  308. pneg_ctxt->CipherCount = cpu_to_le16(2);
  309. pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
  310. pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
  311. }
  312. static void
  313. assemble_neg_contexts(struct smb2_negotiate_req *req)
  314. {
  315. /* +4 is to account for the RFC1001 len field */
  316. char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4;
  317. build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
  318. /* Add 2 to size to round to 8 byte boundary */
  319. pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context);
  320. build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
  321. req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
  322. req->NegotiateContextCount = cpu_to_le16(2);
  323. inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
  324. + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
  325. }
  326. #else
  327. static void assemble_neg_contexts(struct smb2_negotiate_req *req)
  328. {
  329. return;
  330. }
  331. #endif /* SMB311 */
  332. /*
  333. *
  334. * SMB2 Worker functions follow:
  335. *
  336. * The general structure of the worker functions is:
  337. * 1) Call smb2_init (assembles SMB2 header)
  338. * 2) Initialize SMB2 command specific fields in fixed length area of SMB
  339. * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
  340. * 4) Decode SMB2 command specific fields in the fixed length area
  341. * 5) Decode variable length data area (if any for this SMB2 command type)
  342. * 6) Call free smb buffer
  343. * 7) return
  344. *
  345. */
  346. int
  347. SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
  348. {
  349. struct smb2_negotiate_req *req;
  350. struct smb2_negotiate_rsp *rsp;
  351. struct kvec iov[1];
  352. int rc = 0;
  353. int resp_buftype;
  354. struct TCP_Server_Info *server = ses->server;
  355. int blob_offset, blob_length;
  356. char *security_blob;
  357. int flags = CIFS_NEG_OP;
  358. cifs_dbg(FYI, "Negotiate protocol\n");
  359. if (!server) {
  360. WARN(1, "%s: server is NULL!\n", __func__);
  361. return -EIO;
  362. }
  363. rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
  364. if (rc)
  365. return rc;
  366. req->hdr.SessionId = 0;
  367. req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
  368. req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
  369. inc_rfc1001_len(req, 2);
  370. /* only one of SMB2 signing flags may be set in SMB2 request */
  371. if (ses->sign)
  372. req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
  373. else if (global_secflags & CIFSSEC_MAY_SIGN)
  374. req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
  375. else
  376. req->SecurityMode = 0;
  377. req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
  378. /* ClientGUID must be zero for SMB2.02 dialect */
  379. if (ses->server->vals->protocol_id == SMB20_PROT_ID)
  380. memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
  381. else {
  382. memcpy(req->ClientGUID, server->client_guid,
  383. SMB2_CLIENT_GUID_SIZE);
  384. if (ses->server->vals->protocol_id == SMB311_PROT_ID)
  385. assemble_neg_contexts(req);
  386. }
  387. iov[0].iov_base = (char *)req;
  388. /* 4 for rfc1002 length field */
  389. iov[0].iov_len = get_rfc1002_length(req) + 4;
  390. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
  391. rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
  392. /*
  393. * No tcon so can't do
  394. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  395. */
  396. if (rc != 0)
  397. goto neg_exit;
  398. cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
  399. /* BB we may eventually want to match the negotiated vs. requested
  400. dialect, even though we are only requesting one at a time */
  401. if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
  402. cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
  403. else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
  404. cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
  405. else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
  406. cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
  407. else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
  408. cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
  409. #ifdef CONFIG_CIFS_SMB311
  410. else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
  411. cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
  412. #endif /* SMB311 */
  413. else {
  414. cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
  415. le16_to_cpu(rsp->DialectRevision));
  416. rc = -EIO;
  417. goto neg_exit;
  418. }
  419. server->dialect = le16_to_cpu(rsp->DialectRevision);
  420. /* SMB2 only has an extended negflavor */
  421. server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
  422. /* set it to the maximum buffer size value we can send with 1 credit */
  423. server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
  424. SMB2_MAX_BUFFER_SIZE);
  425. server->max_read = le32_to_cpu(rsp->MaxReadSize);
  426. server->max_write = le32_to_cpu(rsp->MaxWriteSize);
  427. /* BB Do we need to validate the SecurityMode? */
  428. server->sec_mode = le16_to_cpu(rsp->SecurityMode);
  429. server->capabilities = le32_to_cpu(rsp->Capabilities);
  430. /* Internal types */
  431. server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
  432. security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
  433. &rsp->hdr);
  434. /*
  435. * See MS-SMB2 section 2.2.4: if no blob, client picks default which
  436. * for us will be
  437. * ses->sectype = RawNTLMSSP;
  438. * but for time being this is our only auth choice so doesn't matter.
  439. * We just found a server which sets blob length to zero expecting raw.
  440. */
  441. if (blob_length == 0)
  442. cifs_dbg(FYI, "missing security blob on negprot\n");
  443. rc = cifs_enable_signing(server, ses->sign);
  444. if (rc)
  445. goto neg_exit;
  446. if (blob_length) {
  447. rc = decode_negTokenInit(security_blob, blob_length, server);
  448. if (rc == 1)
  449. rc = 0;
  450. else if (rc == 0)
  451. rc = -EIO;
  452. }
  453. neg_exit:
  454. free_rsp_buf(resp_buftype, rsp);
  455. return rc;
  456. }
  457. int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
  458. {
  459. int rc = 0;
  460. struct validate_negotiate_info_req vneg_inbuf;
  461. struct validate_negotiate_info_rsp *pneg_rsp;
  462. u32 rsplen;
  463. cifs_dbg(FYI, "validate negotiate\n");
  464. /*
  465. * validation ioctl must be signed, so no point sending this if we
  466. * can not sign it. We could eventually change this to selectively
  467. * sign just this, the first and only signed request on a connection.
  468. * This is good enough for now since a user who wants better security
  469. * would also enable signing on the mount. Having validation of
  470. * negotiate info for signed connections helps reduce attack vectors
  471. */
  472. if (tcon->ses->server->sign == false)
  473. return 0; /* validation requires signing */
  474. vneg_inbuf.Capabilities =
  475. cpu_to_le32(tcon->ses->server->vals->req_capabilities);
  476. memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
  477. SMB2_CLIENT_GUID_SIZE);
  478. if (tcon->ses->sign)
  479. vneg_inbuf.SecurityMode =
  480. cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
  481. else if (global_secflags & CIFSSEC_MAY_SIGN)
  482. vneg_inbuf.SecurityMode =
  483. cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
  484. else
  485. vneg_inbuf.SecurityMode = 0;
  486. vneg_inbuf.DialectCount = cpu_to_le16(1);
  487. vneg_inbuf.Dialects[0] =
  488. cpu_to_le16(tcon->ses->server->vals->protocol_id);
  489. rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
  490. FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
  491. (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
  492. (char **)&pneg_rsp, &rsplen);
  493. if (rc != 0) {
  494. cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
  495. return -EIO;
  496. }
  497. if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
  498. cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
  499. return -EIO;
  500. }
  501. /* check validate negotiate info response matches what we got earlier */
  502. if (pneg_rsp->Dialect !=
  503. cpu_to_le16(tcon->ses->server->vals->protocol_id))
  504. goto vneg_out;
  505. if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
  506. goto vneg_out;
  507. /* do not validate server guid because not saved at negprot time yet */
  508. if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
  509. SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
  510. goto vneg_out;
  511. /* validate negotiate successful */
  512. cifs_dbg(FYI, "validate negotiate info successful\n");
  513. return 0;
  514. vneg_out:
  515. cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
  516. return -EIO;
  517. }
  518. struct SMB2_sess_data {
  519. unsigned int xid;
  520. struct cifs_ses *ses;
  521. struct nls_table *nls_cp;
  522. void (*func)(struct SMB2_sess_data *);
  523. int result;
  524. u64 previous_session;
  525. /* we will send the SMB in three pieces:
  526. * a fixed length beginning part, an optional
  527. * SPNEGO blob (which can be zero length), and a
  528. * last part which will include the strings
  529. * and rest of bcc area. This allows us to avoid
  530. * a large buffer 17K allocation
  531. */
  532. int buf0_type;
  533. struct kvec iov[2];
  534. };
  535. static int
  536. SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
  537. {
  538. int rc;
  539. struct cifs_ses *ses = sess_data->ses;
  540. struct smb2_sess_setup_req *req;
  541. struct TCP_Server_Info *server = ses->server;
  542. rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
  543. if (rc)
  544. return rc;
  545. req->hdr.SessionId = 0; /* First session, not a reauthenticate */
  546. /* if reconnect, we need to send previous sess id, otherwise it is 0 */
  547. req->PreviousSessionId = sess_data->previous_session;
  548. req->Flags = 0; /* MBZ */
  549. /* to enable echos and oplocks */
  550. req->hdr.CreditRequest = cpu_to_le16(3);
  551. /* only one of SMB2 signing flags may be set in SMB2 request */
  552. if (server->sign)
  553. req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  554. else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
  555. req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
  556. else
  557. req->SecurityMode = 0;
  558. req->Capabilities = 0;
  559. req->Channel = 0; /* MBZ */
  560. sess_data->iov[0].iov_base = (char *)req;
  561. /* 4 for rfc1002 length field and 1 for pad */
  562. sess_data->iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  563. /*
  564. * This variable will be used to clear the buffer
  565. * allocated above in case of any error in the calling function.
  566. */
  567. sess_data->buf0_type = CIFS_SMALL_BUFFER;
  568. return 0;
  569. }
  570. static void
  571. SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
  572. {
  573. free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
  574. sess_data->buf0_type = CIFS_NO_BUFFER;
  575. }
  576. static int
  577. SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
  578. {
  579. int rc;
  580. struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
  581. /* Testing shows that buffer offset must be at location of Buffer[0] */
  582. req->SecurityBufferOffset =
  583. cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
  584. 1 /* pad */ - 4 /* rfc1001 len */);
  585. req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
  586. inc_rfc1001_len(req, sess_data->iov[1].iov_len - 1 /* pad */);
  587. /* BB add code to build os and lm fields */
  588. rc = SendReceive2(sess_data->xid, sess_data->ses,
  589. sess_data->iov, 2,
  590. &sess_data->buf0_type,
  591. CIFS_LOG_ERROR | CIFS_NEG_OP);
  592. return rc;
  593. }
  594. static int
  595. SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
  596. {
  597. int rc = 0;
  598. struct cifs_ses *ses = sess_data->ses;
  599. mutex_lock(&ses->server->srv_mutex);
  600. if (ses->server->sign && ses->server->ops->generate_signingkey) {
  601. rc = ses->server->ops->generate_signingkey(ses);
  602. kfree(ses->auth_key.response);
  603. ses->auth_key.response = NULL;
  604. if (rc) {
  605. cifs_dbg(FYI,
  606. "SMB3 session key generation failed\n");
  607. mutex_unlock(&ses->server->srv_mutex);
  608. goto keygen_exit;
  609. }
  610. }
  611. if (!ses->server->session_estab) {
  612. ses->server->sequence_number = 0x2;
  613. ses->server->session_estab = true;
  614. }
  615. mutex_unlock(&ses->server->srv_mutex);
  616. cifs_dbg(FYI, "SMB2/3 session established successfully\n");
  617. spin_lock(&GlobalMid_Lock);
  618. ses->status = CifsGood;
  619. ses->need_reconnect = false;
  620. spin_unlock(&GlobalMid_Lock);
  621. keygen_exit:
  622. if (!ses->server->sign) {
  623. kfree(ses->auth_key.response);
  624. ses->auth_key.response = NULL;
  625. }
  626. return rc;
  627. }
  628. #ifdef CONFIG_CIFS_UPCALL
  629. static void
  630. SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
  631. {
  632. int rc;
  633. struct cifs_ses *ses = sess_data->ses;
  634. struct cifs_spnego_msg *msg;
  635. struct key *spnego_key = NULL;
  636. struct smb2_sess_setup_rsp *rsp = NULL;
  637. rc = SMB2_sess_alloc_buffer(sess_data);
  638. if (rc)
  639. goto out;
  640. spnego_key = cifs_get_spnego_key(ses);
  641. if (IS_ERR(spnego_key)) {
  642. rc = PTR_ERR(spnego_key);
  643. spnego_key = NULL;
  644. goto out;
  645. }
  646. msg = spnego_key->payload.data[0];
  647. /*
  648. * check version field to make sure that cifs.upcall is
  649. * sending us a response in an expected form
  650. */
  651. if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
  652. cifs_dbg(VFS,
  653. "bad cifs.upcall version. Expected %d got %d",
  654. CIFS_SPNEGO_UPCALL_VERSION, msg->version);
  655. rc = -EKEYREJECTED;
  656. goto out_put_spnego_key;
  657. }
  658. ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
  659. GFP_KERNEL);
  660. if (!ses->auth_key.response) {
  661. cifs_dbg(VFS,
  662. "Kerberos can't allocate (%u bytes) memory",
  663. msg->sesskey_len);
  664. rc = -ENOMEM;
  665. goto out_put_spnego_key;
  666. }
  667. ses->auth_key.len = msg->sesskey_len;
  668. sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
  669. sess_data->iov[1].iov_len = msg->secblob_len;
  670. rc = SMB2_sess_sendreceive(sess_data);
  671. if (rc)
  672. goto out_put_spnego_key;
  673. rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
  674. ses->Suid = rsp->hdr.SessionId;
  675. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  676. if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
  677. cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
  678. rc = SMB2_sess_establish_session(sess_data);
  679. out_put_spnego_key:
  680. key_invalidate(spnego_key);
  681. key_put(spnego_key);
  682. out:
  683. sess_data->result = rc;
  684. sess_data->func = NULL;
  685. SMB2_sess_free_buffer(sess_data);
  686. }
  687. #else
  688. static void
  689. SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
  690. {
  691. cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
  692. sess_data->result = -EOPNOTSUPP;
  693. sess_data->func = NULL;
  694. }
  695. #endif
  696. static void
  697. SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
  698. static void
  699. SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
  700. {
  701. int rc;
  702. struct cifs_ses *ses = sess_data->ses;
  703. struct smb2_sess_setup_rsp *rsp = NULL;
  704. char *ntlmssp_blob = NULL;
  705. bool use_spnego = false; /* else use raw ntlmssp */
  706. u16 blob_length = 0;
  707. /*
  708. * If memory allocation is successful, caller of this function
  709. * frees it.
  710. */
  711. ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
  712. if (!ses->ntlmssp) {
  713. rc = -ENOMEM;
  714. goto out_err;
  715. }
  716. ses->ntlmssp->sesskey_per_smbsess = true;
  717. rc = SMB2_sess_alloc_buffer(sess_data);
  718. if (rc)
  719. goto out_err;
  720. ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
  721. GFP_KERNEL);
  722. if (ntlmssp_blob == NULL) {
  723. rc = -ENOMEM;
  724. goto out;
  725. }
  726. build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
  727. if (use_spnego) {
  728. /* BB eventually need to add this */
  729. cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
  730. rc = -EOPNOTSUPP;
  731. goto out;
  732. } else {
  733. blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
  734. /* with raw NTLMSSP we don't encapsulate in SPNEGO */
  735. }
  736. sess_data->iov[1].iov_base = ntlmssp_blob;
  737. sess_data->iov[1].iov_len = blob_length;
  738. rc = SMB2_sess_sendreceive(sess_data);
  739. rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
  740. /* If true, rc here is expected and not an error */
  741. if (sess_data->buf0_type != CIFS_NO_BUFFER &&
  742. rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
  743. rc = 0;
  744. if (rc)
  745. goto out;
  746. if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
  747. le16_to_cpu(rsp->SecurityBufferOffset)) {
  748. cifs_dbg(VFS, "Invalid security buffer offset %d\n",
  749. le16_to_cpu(rsp->SecurityBufferOffset));
  750. rc = -EIO;
  751. goto out;
  752. }
  753. rc = decode_ntlmssp_challenge(rsp->Buffer,
  754. le16_to_cpu(rsp->SecurityBufferLength), ses);
  755. if (rc)
  756. goto out;
  757. cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
  758. ses->Suid = rsp->hdr.SessionId;
  759. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  760. if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
  761. cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
  762. out:
  763. kfree(ntlmssp_blob);
  764. SMB2_sess_free_buffer(sess_data);
  765. if (!rc) {
  766. sess_data->result = 0;
  767. sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
  768. return;
  769. }
  770. out_err:
  771. kfree(ses->ntlmssp);
  772. ses->ntlmssp = NULL;
  773. sess_data->result = rc;
  774. sess_data->func = NULL;
  775. }
  776. static void
  777. SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
  778. {
  779. int rc;
  780. struct cifs_ses *ses = sess_data->ses;
  781. struct smb2_sess_setup_req *req;
  782. struct smb2_sess_setup_rsp *rsp = NULL;
  783. unsigned char *ntlmssp_blob = NULL;
  784. bool use_spnego = false; /* else use raw ntlmssp */
  785. u16 blob_length = 0;
  786. rc = SMB2_sess_alloc_buffer(sess_data);
  787. if (rc)
  788. goto out;
  789. req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
  790. req->hdr.SessionId = ses->Suid;
  791. rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
  792. sess_data->nls_cp);
  793. if (rc) {
  794. cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
  795. goto out;
  796. }
  797. if (use_spnego) {
  798. /* BB eventually need to add this */
  799. cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
  800. rc = -EOPNOTSUPP;
  801. goto out;
  802. }
  803. sess_data->iov[1].iov_base = ntlmssp_blob;
  804. sess_data->iov[1].iov_len = blob_length;
  805. rc = SMB2_sess_sendreceive(sess_data);
  806. if (rc)
  807. goto out;
  808. rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
  809. ses->Suid = rsp->hdr.SessionId;
  810. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  811. if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
  812. cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
  813. rc = SMB2_sess_establish_session(sess_data);
  814. out:
  815. kfree(ntlmssp_blob);
  816. SMB2_sess_free_buffer(sess_data);
  817. kfree(ses->ntlmssp);
  818. ses->ntlmssp = NULL;
  819. sess_data->result = rc;
  820. sess_data->func = NULL;
  821. }
  822. static int
  823. SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
  824. {
  825. if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
  826. ses->sectype = RawNTLMSSP;
  827. switch (ses->sectype) {
  828. case Kerberos:
  829. sess_data->func = SMB2_auth_kerberos;
  830. break;
  831. case RawNTLMSSP:
  832. sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
  833. break;
  834. default:
  835. cifs_dbg(VFS, "secType %d not supported!\n", ses->sectype);
  836. return -EOPNOTSUPP;
  837. }
  838. return 0;
  839. }
  840. int
  841. SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
  842. const struct nls_table *nls_cp)
  843. {
  844. int rc = 0;
  845. struct TCP_Server_Info *server = ses->server;
  846. struct SMB2_sess_data *sess_data;
  847. cifs_dbg(FYI, "Session Setup\n");
  848. if (!server) {
  849. WARN(1, "%s: server is NULL!\n", __func__);
  850. return -EIO;
  851. }
  852. sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
  853. if (!sess_data)
  854. return -ENOMEM;
  855. rc = SMB2_select_sec(ses, sess_data);
  856. if (rc)
  857. goto out;
  858. sess_data->xid = xid;
  859. sess_data->ses = ses;
  860. sess_data->buf0_type = CIFS_NO_BUFFER;
  861. sess_data->nls_cp = (struct nls_table *) nls_cp;
  862. while (sess_data->func)
  863. sess_data->func(sess_data);
  864. rc = sess_data->result;
  865. out:
  866. kfree(sess_data);
  867. return rc;
  868. }
  869. int
  870. SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
  871. {
  872. struct smb2_logoff_req *req; /* response is also trivial struct */
  873. int rc = 0;
  874. struct TCP_Server_Info *server;
  875. cifs_dbg(FYI, "disconnect session %p\n", ses);
  876. if (ses && (ses->server))
  877. server = ses->server;
  878. else
  879. return -EIO;
  880. /* no need to send SMB logoff if uid already closed due to reconnect */
  881. if (ses->need_reconnect)
  882. goto smb2_session_already_dead;
  883. rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
  884. if (rc)
  885. return rc;
  886. /* since no tcon, smb2_init can not do this, so do here */
  887. req->hdr.SessionId = ses->Suid;
  888. if (server->sign)
  889. req->hdr.Flags |= SMB2_FLAGS_SIGNED;
  890. rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
  891. /*
  892. * No tcon so can't do
  893. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  894. */
  895. smb2_session_already_dead:
  896. return rc;
  897. }
  898. static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
  899. {
  900. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
  901. }
  902. #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
  903. /* These are similar values to what Windows uses */
  904. static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
  905. {
  906. tcon->max_chunks = 256;
  907. tcon->max_bytes_chunk = 1048576;
  908. tcon->max_bytes_copy = 16777216;
  909. }
  910. int
  911. SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
  912. struct cifs_tcon *tcon, const struct nls_table *cp)
  913. {
  914. struct smb2_tree_connect_req *req;
  915. struct smb2_tree_connect_rsp *rsp = NULL;
  916. struct kvec iov[2];
  917. int rc = 0;
  918. int resp_buftype;
  919. int unc_path_len;
  920. struct TCP_Server_Info *server;
  921. __le16 *unc_path = NULL;
  922. cifs_dbg(FYI, "TCON\n");
  923. if ((ses->server) && tree)
  924. server = ses->server;
  925. else
  926. return -EIO;
  927. if (tcon && tcon->bad_network_name)
  928. return -ENOENT;
  929. if ((tcon && tcon->seal) &&
  930. ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
  931. cifs_dbg(VFS, "encryption requested but no server support");
  932. return -EOPNOTSUPP;
  933. }
  934. unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
  935. if (unc_path == NULL)
  936. return -ENOMEM;
  937. unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
  938. unc_path_len *= 2;
  939. if (unc_path_len < 2) {
  940. kfree(unc_path);
  941. return -EINVAL;
  942. }
  943. rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
  944. if (rc) {
  945. kfree(unc_path);
  946. return rc;
  947. }
  948. if (tcon == NULL) {
  949. /* since no tcon, smb2_init can not do this, so do here */
  950. req->hdr.SessionId = ses->Suid;
  951. /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
  952. req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
  953. }
  954. iov[0].iov_base = (char *)req;
  955. /* 4 for rfc1002 length field and 1 for pad */
  956. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  957. /* Testing shows that buffer offset must be at location of Buffer[0] */
  958. req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
  959. - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
  960. req->PathLength = cpu_to_le16(unc_path_len - 2);
  961. iov[1].iov_base = unc_path;
  962. iov[1].iov_len = unc_path_len;
  963. inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
  964. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
  965. rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
  966. if (rc != 0) {
  967. if (tcon) {
  968. cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
  969. tcon->need_reconnect = true;
  970. }
  971. goto tcon_error_exit;
  972. }
  973. if (tcon == NULL) {
  974. ses->ipc_tid = rsp->hdr.TreeId;
  975. goto tcon_exit;
  976. }
  977. if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
  978. cifs_dbg(FYI, "connection to disk share\n");
  979. else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
  980. tcon->ipc = true;
  981. cifs_dbg(FYI, "connection to pipe share\n");
  982. } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
  983. tcon->print = true;
  984. cifs_dbg(FYI, "connection to printer\n");
  985. } else {
  986. cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
  987. rc = -EOPNOTSUPP;
  988. goto tcon_error_exit;
  989. }
  990. tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
  991. tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
  992. tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
  993. tcon->tidStatus = CifsGood;
  994. tcon->need_reconnect = false;
  995. tcon->tid = rsp->hdr.TreeId;
  996. strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
  997. if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
  998. ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
  999. cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
  1000. init_copy_chunk_defaults(tcon);
  1001. if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)
  1002. cifs_dbg(VFS, "Encrypted shares not supported");
  1003. if (tcon->ses->server->ops->validate_negotiate)
  1004. rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
  1005. tcon_exit:
  1006. free_rsp_buf(resp_buftype, rsp);
  1007. kfree(unc_path);
  1008. return rc;
  1009. tcon_error_exit:
  1010. if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
  1011. cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
  1012. if (tcon)
  1013. tcon->bad_network_name = true;
  1014. }
  1015. goto tcon_exit;
  1016. }
  1017. int
  1018. SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
  1019. {
  1020. struct smb2_tree_disconnect_req *req; /* response is trivial */
  1021. int rc = 0;
  1022. struct TCP_Server_Info *server;
  1023. struct cifs_ses *ses = tcon->ses;
  1024. cifs_dbg(FYI, "Tree Disconnect\n");
  1025. if (ses && (ses->server))
  1026. server = ses->server;
  1027. else
  1028. return -EIO;
  1029. if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
  1030. return 0;
  1031. rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
  1032. if (rc)
  1033. return rc;
  1034. rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
  1035. if (rc)
  1036. cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
  1037. return rc;
  1038. }
  1039. static struct create_durable *
  1040. create_durable_buf(void)
  1041. {
  1042. struct create_durable *buf;
  1043. buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
  1044. if (!buf)
  1045. return NULL;
  1046. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  1047. (struct create_durable, Data));
  1048. buf->ccontext.DataLength = cpu_to_le32(16);
  1049. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  1050. (struct create_durable, Name));
  1051. buf->ccontext.NameLength = cpu_to_le16(4);
  1052. /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
  1053. buf->Name[0] = 'D';
  1054. buf->Name[1] = 'H';
  1055. buf->Name[2] = 'n';
  1056. buf->Name[3] = 'Q';
  1057. return buf;
  1058. }
  1059. static struct create_durable *
  1060. create_reconnect_durable_buf(struct cifs_fid *fid)
  1061. {
  1062. struct create_durable *buf;
  1063. buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
  1064. if (!buf)
  1065. return NULL;
  1066. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  1067. (struct create_durable, Data));
  1068. buf->ccontext.DataLength = cpu_to_le32(16);
  1069. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  1070. (struct create_durable, Name));
  1071. buf->ccontext.NameLength = cpu_to_le16(4);
  1072. buf->Data.Fid.PersistentFileId = fid->persistent_fid;
  1073. buf->Data.Fid.VolatileFileId = fid->volatile_fid;
  1074. /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
  1075. buf->Name[0] = 'D';
  1076. buf->Name[1] = 'H';
  1077. buf->Name[2] = 'n';
  1078. buf->Name[3] = 'C';
  1079. return buf;
  1080. }
  1081. static __u8
  1082. parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
  1083. unsigned int *epoch)
  1084. {
  1085. char *data_offset;
  1086. struct create_context *cc;
  1087. unsigned int next;
  1088. unsigned int remaining;
  1089. char *name;
  1090. data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
  1091. remaining = le32_to_cpu(rsp->CreateContextsLength);
  1092. cc = (struct create_context *)data_offset;
  1093. while (remaining >= sizeof(struct create_context)) {
  1094. name = le16_to_cpu(cc->NameOffset) + (char *)cc;
  1095. if (le16_to_cpu(cc->NameLength) == 4 &&
  1096. strncmp(name, "RqLs", 4) == 0)
  1097. return server->ops->parse_lease_buf(cc, epoch);
  1098. next = le32_to_cpu(cc->Next);
  1099. if (!next)
  1100. break;
  1101. remaining -= next;
  1102. cc = (struct create_context *)((char *)cc + next);
  1103. }
  1104. return 0;
  1105. }
  1106. static int
  1107. add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
  1108. unsigned int *num_iovec, __u8 *oplock)
  1109. {
  1110. struct smb2_create_req *req = iov[0].iov_base;
  1111. unsigned int num = *num_iovec;
  1112. iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
  1113. if (iov[num].iov_base == NULL)
  1114. return -ENOMEM;
  1115. iov[num].iov_len = server->vals->create_lease_size;
  1116. req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
  1117. if (!req->CreateContextsOffset)
  1118. req->CreateContextsOffset = cpu_to_le32(
  1119. sizeof(struct smb2_create_req) - 4 +
  1120. iov[num - 1].iov_len);
  1121. le32_add_cpu(&req->CreateContextsLength,
  1122. server->vals->create_lease_size);
  1123. inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
  1124. *num_iovec = num + 1;
  1125. return 0;
  1126. }
  1127. static struct create_durable_v2 *
  1128. create_durable_v2_buf(struct cifs_fid *pfid)
  1129. {
  1130. struct create_durable_v2 *buf;
  1131. buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
  1132. if (!buf)
  1133. return NULL;
  1134. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  1135. (struct create_durable_v2, dcontext));
  1136. buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
  1137. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  1138. (struct create_durable_v2, Name));
  1139. buf->ccontext.NameLength = cpu_to_le16(4);
  1140. buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
  1141. buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
  1142. generate_random_uuid(buf->dcontext.CreateGuid);
  1143. memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
  1144. /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
  1145. buf->Name[0] = 'D';
  1146. buf->Name[1] = 'H';
  1147. buf->Name[2] = '2';
  1148. buf->Name[3] = 'Q';
  1149. return buf;
  1150. }
  1151. static struct create_durable_handle_reconnect_v2 *
  1152. create_reconnect_durable_v2_buf(struct cifs_fid *fid)
  1153. {
  1154. struct create_durable_handle_reconnect_v2 *buf;
  1155. buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
  1156. GFP_KERNEL);
  1157. if (!buf)
  1158. return NULL;
  1159. buf->ccontext.DataOffset =
  1160. cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
  1161. dcontext));
  1162. buf->ccontext.DataLength =
  1163. cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
  1164. buf->ccontext.NameOffset =
  1165. cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
  1166. Name));
  1167. buf->ccontext.NameLength = cpu_to_le16(4);
  1168. buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
  1169. buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
  1170. buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
  1171. memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
  1172. /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
  1173. buf->Name[0] = 'D';
  1174. buf->Name[1] = 'H';
  1175. buf->Name[2] = '2';
  1176. buf->Name[3] = 'C';
  1177. return buf;
  1178. }
  1179. static int
  1180. add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
  1181. struct cifs_open_parms *oparms)
  1182. {
  1183. struct smb2_create_req *req = iov[0].iov_base;
  1184. unsigned int num = *num_iovec;
  1185. iov[num].iov_base = create_durable_v2_buf(oparms->fid);
  1186. if (iov[num].iov_base == NULL)
  1187. return -ENOMEM;
  1188. iov[num].iov_len = sizeof(struct create_durable_v2);
  1189. if (!req->CreateContextsOffset)
  1190. req->CreateContextsOffset =
  1191. cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
  1192. iov[1].iov_len);
  1193. le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
  1194. inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_v2));
  1195. *num_iovec = num + 1;
  1196. return 0;
  1197. }
  1198. static int
  1199. add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
  1200. struct cifs_open_parms *oparms)
  1201. {
  1202. struct smb2_create_req *req = iov[0].iov_base;
  1203. unsigned int num = *num_iovec;
  1204. /* indicate that we don't need to relock the file */
  1205. oparms->reconnect = false;
  1206. iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
  1207. if (iov[num].iov_base == NULL)
  1208. return -ENOMEM;
  1209. iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
  1210. if (!req->CreateContextsOffset)
  1211. req->CreateContextsOffset =
  1212. cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
  1213. iov[1].iov_len);
  1214. le32_add_cpu(&req->CreateContextsLength,
  1215. sizeof(struct create_durable_handle_reconnect_v2));
  1216. inc_rfc1001_len(&req->hdr,
  1217. sizeof(struct create_durable_handle_reconnect_v2));
  1218. *num_iovec = num + 1;
  1219. return 0;
  1220. }
  1221. static int
  1222. add_durable_context(struct kvec *iov, unsigned int *num_iovec,
  1223. struct cifs_open_parms *oparms, bool use_persistent)
  1224. {
  1225. struct smb2_create_req *req = iov[0].iov_base;
  1226. unsigned int num = *num_iovec;
  1227. if (use_persistent) {
  1228. if (oparms->reconnect)
  1229. return add_durable_reconnect_v2_context(iov, num_iovec,
  1230. oparms);
  1231. else
  1232. return add_durable_v2_context(iov, num_iovec, oparms);
  1233. }
  1234. if (oparms->reconnect) {
  1235. iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
  1236. /* indicate that we don't need to relock the file */
  1237. oparms->reconnect = false;
  1238. } else
  1239. iov[num].iov_base = create_durable_buf();
  1240. if (iov[num].iov_base == NULL)
  1241. return -ENOMEM;
  1242. iov[num].iov_len = sizeof(struct create_durable);
  1243. if (!req->CreateContextsOffset)
  1244. req->CreateContextsOffset =
  1245. cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
  1246. iov[1].iov_len);
  1247. le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
  1248. inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
  1249. *num_iovec = num + 1;
  1250. return 0;
  1251. }
  1252. int
  1253. SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
  1254. __u8 *oplock, struct smb2_file_all_info *buf,
  1255. struct smb2_err_rsp **err_buf)
  1256. {
  1257. struct smb2_create_req *req;
  1258. struct smb2_create_rsp *rsp;
  1259. struct TCP_Server_Info *server;
  1260. struct cifs_tcon *tcon = oparms->tcon;
  1261. struct cifs_ses *ses = tcon->ses;
  1262. struct kvec iov[4];
  1263. int resp_buftype;
  1264. int uni_path_len;
  1265. __le16 *copy_path = NULL;
  1266. int copy_size;
  1267. int rc = 0;
  1268. unsigned int num_iovecs = 2;
  1269. __u32 file_attributes = 0;
  1270. char *dhc_buf = NULL, *lc_buf = NULL;
  1271. cifs_dbg(FYI, "create/open\n");
  1272. if (ses && (ses->server))
  1273. server = ses->server;
  1274. else
  1275. return -EIO;
  1276. rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
  1277. if (rc)
  1278. return rc;
  1279. if (oparms->create_options & CREATE_OPTION_READONLY)
  1280. file_attributes |= ATTR_READONLY;
  1281. if (oparms->create_options & CREATE_OPTION_SPECIAL)
  1282. file_attributes |= ATTR_SYSTEM;
  1283. req->ImpersonationLevel = IL_IMPERSONATION;
  1284. req->DesiredAccess = cpu_to_le32(oparms->desired_access);
  1285. /* File attributes ignored on open (used in create though) */
  1286. req->FileAttributes = cpu_to_le32(file_attributes);
  1287. req->ShareAccess = FILE_SHARE_ALL_LE;
  1288. req->CreateDisposition = cpu_to_le32(oparms->disposition);
  1289. req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
  1290. uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
  1291. /* do not count rfc1001 len field */
  1292. req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
  1293. iov[0].iov_base = (char *)req;
  1294. /* 4 for rfc1002 length field */
  1295. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1296. /* MUST set path len (NameLength) to 0 opening root of share */
  1297. req->NameLength = cpu_to_le16(uni_path_len - 2);
  1298. /* -1 since last byte is buf[0] which is sent below (path) */
  1299. iov[0].iov_len--;
  1300. if (uni_path_len % 8 != 0) {
  1301. copy_size = uni_path_len / 8 * 8;
  1302. if (copy_size < uni_path_len)
  1303. copy_size += 8;
  1304. copy_path = kzalloc(copy_size, GFP_KERNEL);
  1305. if (!copy_path)
  1306. return -ENOMEM;
  1307. memcpy((char *)copy_path, (const char *)path,
  1308. uni_path_len);
  1309. uni_path_len = copy_size;
  1310. path = copy_path;
  1311. }
  1312. iov[1].iov_len = uni_path_len;
  1313. iov[1].iov_base = path;
  1314. /* -1 since last byte is buf[0] which was counted in smb2_buf_len */
  1315. inc_rfc1001_len(req, uni_path_len - 1);
  1316. if (!server->oplocks)
  1317. *oplock = SMB2_OPLOCK_LEVEL_NONE;
  1318. if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
  1319. *oplock == SMB2_OPLOCK_LEVEL_NONE)
  1320. req->RequestedOplockLevel = *oplock;
  1321. else {
  1322. rc = add_lease_context(server, iov, &num_iovecs, oplock);
  1323. if (rc) {
  1324. cifs_small_buf_release(req);
  1325. kfree(copy_path);
  1326. return rc;
  1327. }
  1328. lc_buf = iov[num_iovecs-1].iov_base;
  1329. }
  1330. if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
  1331. /* need to set Next field of lease context if we request it */
  1332. if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
  1333. struct create_context *ccontext =
  1334. (struct create_context *)iov[num_iovecs-1].iov_base;
  1335. ccontext->Next =
  1336. cpu_to_le32(server->vals->create_lease_size);
  1337. }
  1338. rc = add_durable_context(iov, &num_iovecs, oparms,
  1339. tcon->use_persistent);
  1340. if (rc) {
  1341. cifs_small_buf_release(req);
  1342. kfree(copy_path);
  1343. kfree(lc_buf);
  1344. return rc;
  1345. }
  1346. dhc_buf = iov[num_iovecs-1].iov_base;
  1347. }
  1348. rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
  1349. rsp = (struct smb2_create_rsp *)iov[0].iov_base;
  1350. if (rc != 0) {
  1351. cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
  1352. if (err_buf)
  1353. *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
  1354. GFP_KERNEL);
  1355. goto creat_exit;
  1356. }
  1357. oparms->fid->persistent_fid = rsp->PersistentFileId;
  1358. oparms->fid->volatile_fid = rsp->VolatileFileId;
  1359. if (buf) {
  1360. memcpy(buf, &rsp->CreationTime, 32);
  1361. buf->AllocationSize = rsp->AllocationSize;
  1362. buf->EndOfFile = rsp->EndofFile;
  1363. buf->Attributes = rsp->FileAttributes;
  1364. buf->NumberOfLinks = cpu_to_le32(1);
  1365. buf->DeletePending = 0;
  1366. }
  1367. if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
  1368. *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch);
  1369. else
  1370. *oplock = rsp->OplockLevel;
  1371. creat_exit:
  1372. kfree(copy_path);
  1373. kfree(lc_buf);
  1374. kfree(dhc_buf);
  1375. free_rsp_buf(resp_buftype, rsp);
  1376. return rc;
  1377. }
  1378. /*
  1379. * SMB2 IOCTL is used for both IOCTLs and FSCTLs
  1380. */
  1381. int
  1382. SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  1383. u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data,
  1384. u32 indatalen, char **out_data, u32 *plen /* returned data len */)
  1385. {
  1386. struct smb2_ioctl_req *req;
  1387. struct smb2_ioctl_rsp *rsp;
  1388. struct TCP_Server_Info *server;
  1389. struct cifs_ses *ses;
  1390. struct kvec iov[2];
  1391. int resp_buftype;
  1392. int num_iovecs;
  1393. int rc = 0;
  1394. cifs_dbg(FYI, "SMB2 IOCTL\n");
  1395. if (out_data != NULL)
  1396. *out_data = NULL;
  1397. /* zero out returned data len, in case of error */
  1398. if (plen)
  1399. *plen = 0;
  1400. if (tcon)
  1401. ses = tcon->ses;
  1402. else
  1403. return -EIO;
  1404. if (ses && (ses->server))
  1405. server = ses->server;
  1406. else
  1407. return -EIO;
  1408. rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
  1409. if (rc)
  1410. return rc;
  1411. req->CtlCode = cpu_to_le32(opcode);
  1412. req->PersistentFileId = persistent_fid;
  1413. req->VolatileFileId = volatile_fid;
  1414. if (indatalen) {
  1415. req->InputCount = cpu_to_le32(indatalen);
  1416. /* do not set InputOffset if no input data */
  1417. req->InputOffset =
  1418. cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
  1419. iov[1].iov_base = in_data;
  1420. iov[1].iov_len = indatalen;
  1421. num_iovecs = 2;
  1422. } else
  1423. num_iovecs = 1;
  1424. req->OutputOffset = 0;
  1425. req->OutputCount = 0; /* MBZ */
  1426. /*
  1427. * Could increase MaxOutputResponse, but that would require more
  1428. * than one credit. Windows typically sets this smaller, but for some
  1429. * ioctls it may be useful to allow server to send more. No point
  1430. * limiting what the server can send as long as fits in one credit
  1431. */
  1432. req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
  1433. if (is_fsctl)
  1434. req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
  1435. else
  1436. req->Flags = 0;
  1437. iov[0].iov_base = (char *)req;
  1438. /*
  1439. * If no input data, the size of ioctl struct in
  1440. * protocol spec still includes a 1 byte data buffer,
  1441. * but if input data passed to ioctl, we do not
  1442. * want to double count this, so we do not send
  1443. * the dummy one byte of data in iovec[0] if sending
  1444. * input data (in iovec[1]). We also must add 4 bytes
  1445. * in first iovec to allow for rfc1002 length field.
  1446. */
  1447. if (indatalen) {
  1448. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  1449. inc_rfc1001_len(req, indatalen - 1);
  1450. } else
  1451. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1452. rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
  1453. rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
  1454. if ((rc != 0) && (rc != -EINVAL)) {
  1455. cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
  1456. goto ioctl_exit;
  1457. } else if (rc == -EINVAL) {
  1458. if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
  1459. (opcode != FSCTL_SRV_COPYCHUNK)) {
  1460. cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
  1461. goto ioctl_exit;
  1462. }
  1463. }
  1464. /* check if caller wants to look at return data or just return rc */
  1465. if ((plen == NULL) || (out_data == NULL))
  1466. goto ioctl_exit;
  1467. *plen = le32_to_cpu(rsp->OutputCount);
  1468. /* We check for obvious errors in the output buffer length and offset */
  1469. if (*plen == 0)
  1470. goto ioctl_exit; /* server returned no data */
  1471. else if (*plen > 0xFF00) {
  1472. cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
  1473. *plen = 0;
  1474. rc = -EIO;
  1475. goto ioctl_exit;
  1476. }
  1477. if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
  1478. cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
  1479. le32_to_cpu(rsp->OutputOffset));
  1480. *plen = 0;
  1481. rc = -EIO;
  1482. goto ioctl_exit;
  1483. }
  1484. *out_data = kmalloc(*plen, GFP_KERNEL);
  1485. if (*out_data == NULL) {
  1486. rc = -ENOMEM;
  1487. goto ioctl_exit;
  1488. }
  1489. memcpy(*out_data,
  1490. (char *)&rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
  1491. *plen);
  1492. ioctl_exit:
  1493. free_rsp_buf(resp_buftype, rsp);
  1494. return rc;
  1495. }
  1496. /*
  1497. * Individual callers to ioctl worker function follow
  1498. */
  1499. int
  1500. SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
  1501. u64 persistent_fid, u64 volatile_fid)
  1502. {
  1503. int rc;
  1504. struct compress_ioctl fsctl_input;
  1505. char *ret_data = NULL;
  1506. fsctl_input.CompressionState =
  1507. cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
  1508. rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
  1509. FSCTL_SET_COMPRESSION, true /* is_fsctl */,
  1510. (char *)&fsctl_input /* data input */,
  1511. 2 /* in data len */, &ret_data /* out data */, NULL);
  1512. cifs_dbg(FYI, "set compression rc %d\n", rc);
  1513. return rc;
  1514. }
  1515. int
  1516. SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
  1517. u64 persistent_fid, u64 volatile_fid)
  1518. {
  1519. struct smb2_close_req *req;
  1520. struct smb2_close_rsp *rsp;
  1521. struct TCP_Server_Info *server;
  1522. struct cifs_ses *ses = tcon->ses;
  1523. struct kvec iov[1];
  1524. int resp_buftype;
  1525. int rc = 0;
  1526. cifs_dbg(FYI, "Close\n");
  1527. if (ses && (ses->server))
  1528. server = ses->server;
  1529. else
  1530. return -EIO;
  1531. rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
  1532. if (rc)
  1533. return rc;
  1534. req->PersistentFileId = persistent_fid;
  1535. req->VolatileFileId = volatile_fid;
  1536. iov[0].iov_base = (char *)req;
  1537. /* 4 for rfc1002 length field */
  1538. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1539. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1540. rsp = (struct smb2_close_rsp *)iov[0].iov_base;
  1541. if (rc != 0) {
  1542. cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
  1543. goto close_exit;
  1544. }
  1545. /* BB FIXME - decode close response, update inode for caching */
  1546. close_exit:
  1547. free_rsp_buf(resp_buftype, rsp);
  1548. return rc;
  1549. }
  1550. static int
  1551. validate_buf(unsigned int offset, unsigned int buffer_length,
  1552. struct smb2_hdr *hdr, unsigned int min_buf_size)
  1553. {
  1554. unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
  1555. char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
  1556. char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
  1557. char *end_of_buf = begin_of_buf + buffer_length;
  1558. if (buffer_length < min_buf_size) {
  1559. cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
  1560. buffer_length, min_buf_size);
  1561. return -EINVAL;
  1562. }
  1563. /* check if beyond RFC1001 maximum length */
  1564. if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
  1565. cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
  1566. buffer_length, smb_len);
  1567. return -EINVAL;
  1568. }
  1569. if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
  1570. cifs_dbg(VFS, "illegal server response, bad offset to data\n");
  1571. return -EINVAL;
  1572. }
  1573. return 0;
  1574. }
  1575. /*
  1576. * If SMB buffer fields are valid, copy into temporary buffer to hold result.
  1577. * Caller must free buffer.
  1578. */
  1579. static int
  1580. validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
  1581. struct smb2_hdr *hdr, unsigned int minbufsize,
  1582. char *data)
  1583. {
  1584. char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
  1585. int rc;
  1586. if (!data)
  1587. return -EINVAL;
  1588. rc = validate_buf(offset, buffer_length, hdr, minbufsize);
  1589. if (rc)
  1590. return rc;
  1591. memcpy(data, begin_of_buf, buffer_length);
  1592. return 0;
  1593. }
  1594. static int
  1595. query_info(const unsigned int xid, struct cifs_tcon *tcon,
  1596. u64 persistent_fid, u64 volatile_fid, u8 info_class,
  1597. size_t output_len, size_t min_len, void *data)
  1598. {
  1599. struct smb2_query_info_req *req;
  1600. struct smb2_query_info_rsp *rsp = NULL;
  1601. struct kvec iov[2];
  1602. int rc = 0;
  1603. int resp_buftype;
  1604. struct TCP_Server_Info *server;
  1605. struct cifs_ses *ses = tcon->ses;
  1606. cifs_dbg(FYI, "Query Info\n");
  1607. if (ses && (ses->server))
  1608. server = ses->server;
  1609. else
  1610. return -EIO;
  1611. rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
  1612. if (rc)
  1613. return rc;
  1614. req->InfoType = SMB2_O_INFO_FILE;
  1615. req->FileInfoClass = info_class;
  1616. req->PersistentFileId = persistent_fid;
  1617. req->VolatileFileId = volatile_fid;
  1618. /* 4 for rfc1002 length field and 1 for Buffer */
  1619. req->InputBufferOffset =
  1620. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
  1621. req->OutputBufferLength = cpu_to_le32(output_len);
  1622. iov[0].iov_base = (char *)req;
  1623. /* 4 for rfc1002 length field */
  1624. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1625. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1626. rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
  1627. if (rc) {
  1628. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  1629. goto qinf_exit;
  1630. }
  1631. rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
  1632. le32_to_cpu(rsp->OutputBufferLength),
  1633. &rsp->hdr, min_len, data);
  1634. qinf_exit:
  1635. free_rsp_buf(resp_buftype, rsp);
  1636. return rc;
  1637. }
  1638. int
  1639. SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
  1640. u64 persistent_fid, u64 volatile_fid,
  1641. struct smb2_file_all_info *data)
  1642. {
  1643. return query_info(xid, tcon, persistent_fid, volatile_fid,
  1644. FILE_ALL_INFORMATION,
  1645. sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
  1646. sizeof(struct smb2_file_all_info), data);
  1647. }
  1648. int
  1649. SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
  1650. u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
  1651. {
  1652. return query_info(xid, tcon, persistent_fid, volatile_fid,
  1653. FILE_INTERNAL_INFORMATION,
  1654. sizeof(struct smb2_file_internal_info),
  1655. sizeof(struct smb2_file_internal_info), uniqueid);
  1656. }
  1657. /*
  1658. * This is a no-op for now. We're not really interested in the reply, but
  1659. * rather in the fact that the server sent one and that server->lstrp
  1660. * gets updated.
  1661. *
  1662. * FIXME: maybe we should consider checking that the reply matches request?
  1663. */
  1664. static void
  1665. smb2_echo_callback(struct mid_q_entry *mid)
  1666. {
  1667. struct TCP_Server_Info *server = mid->callback_data;
  1668. struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
  1669. unsigned int credits_received = 1;
  1670. if (mid->mid_state == MID_RESPONSE_RECEIVED)
  1671. credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
  1672. mutex_lock(&server->srv_mutex);
  1673. DeleteMidQEntry(mid);
  1674. mutex_unlock(&server->srv_mutex);
  1675. add_credits(server, credits_received, CIFS_ECHO_OP);
  1676. }
  1677. void smb2_reconnect_server(struct work_struct *work)
  1678. {
  1679. struct TCP_Server_Info *server = container_of(work,
  1680. struct TCP_Server_Info, reconnect.work);
  1681. struct cifs_ses *ses;
  1682. struct cifs_tcon *tcon, *tcon2;
  1683. struct list_head tmp_list;
  1684. int tcon_exist = false;
  1685. /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
  1686. mutex_lock(&server->reconnect_mutex);
  1687. INIT_LIST_HEAD(&tmp_list);
  1688. cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
  1689. spin_lock(&cifs_tcp_ses_lock);
  1690. list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
  1691. list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
  1692. if (tcon->need_reconnect || tcon->need_reopen_files) {
  1693. tcon->tc_count++;
  1694. list_add_tail(&tcon->rlist, &tmp_list);
  1695. tcon_exist = true;
  1696. }
  1697. }
  1698. }
  1699. /*
  1700. * Get the reference to server struct to be sure that the last call of
  1701. * cifs_put_tcon() in the loop below won't release the server pointer.
  1702. */
  1703. if (tcon_exist)
  1704. server->srv_count++;
  1705. spin_unlock(&cifs_tcp_ses_lock);
  1706. list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
  1707. if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
  1708. cifs_reopen_persistent_handles(tcon);
  1709. list_del_init(&tcon->rlist);
  1710. cifs_put_tcon(tcon);
  1711. }
  1712. cifs_dbg(FYI, "Reconnecting tcons finished\n");
  1713. mutex_unlock(&server->reconnect_mutex);
  1714. /* now we can safely release srv struct */
  1715. if (tcon_exist)
  1716. cifs_put_tcp_session(server, 1);
  1717. }
  1718. int
  1719. SMB2_echo(struct TCP_Server_Info *server)
  1720. {
  1721. struct smb2_echo_req *req;
  1722. int rc = 0;
  1723. struct kvec iov;
  1724. struct smb_rqst rqst = { .rq_iov = &iov,
  1725. .rq_nvec = 1 };
  1726. cifs_dbg(FYI, "In echo request\n");
  1727. if (server->tcpStatus == CifsNeedNegotiate) {
  1728. /* No need to send echo on newly established connections */
  1729. queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
  1730. return rc;
  1731. }
  1732. rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
  1733. if (rc)
  1734. return rc;
  1735. req->hdr.CreditRequest = cpu_to_le16(1);
  1736. iov.iov_base = (char *)req;
  1737. /* 4 for rfc1002 length field */
  1738. iov.iov_len = get_rfc1002_length(req) + 4;
  1739. rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
  1740. CIFS_ECHO_OP);
  1741. if (rc)
  1742. cifs_dbg(FYI, "Echo request failed: %d\n", rc);
  1743. cifs_small_buf_release(req);
  1744. return rc;
  1745. }
  1746. int
  1747. SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  1748. u64 volatile_fid)
  1749. {
  1750. struct smb2_flush_req *req;
  1751. struct TCP_Server_Info *server;
  1752. struct cifs_ses *ses = tcon->ses;
  1753. struct kvec iov[1];
  1754. int resp_buftype;
  1755. int rc = 0;
  1756. cifs_dbg(FYI, "Flush\n");
  1757. if (ses && (ses->server))
  1758. server = ses->server;
  1759. else
  1760. return -EIO;
  1761. rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
  1762. if (rc)
  1763. return rc;
  1764. req->PersistentFileId = persistent_fid;
  1765. req->VolatileFileId = volatile_fid;
  1766. iov[0].iov_base = (char *)req;
  1767. /* 4 for rfc1002 length field */
  1768. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1769. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1770. if (rc != 0)
  1771. cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
  1772. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1773. return rc;
  1774. }
  1775. /*
  1776. * To form a chain of read requests, any read requests after the first should
  1777. * have the end_of_chain boolean set to true.
  1778. */
  1779. static int
  1780. smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
  1781. unsigned int remaining_bytes, int request_type)
  1782. {
  1783. int rc = -EACCES;
  1784. struct smb2_read_req *req = NULL;
  1785. rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
  1786. if (rc)
  1787. return rc;
  1788. if (io_parms->tcon->ses->server == NULL)
  1789. return -ECONNABORTED;
  1790. req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
  1791. req->PersistentFileId = io_parms->persistent_fid;
  1792. req->VolatileFileId = io_parms->volatile_fid;
  1793. req->ReadChannelInfoOffset = 0; /* reserved */
  1794. req->ReadChannelInfoLength = 0; /* reserved */
  1795. req->Channel = 0; /* reserved */
  1796. req->MinimumCount = 0;
  1797. req->Length = cpu_to_le32(io_parms->length);
  1798. req->Offset = cpu_to_le64(io_parms->offset);
  1799. if (request_type & CHAINED_REQUEST) {
  1800. if (!(request_type & END_OF_CHAIN)) {
  1801. /* 4 for rfc1002 length field */
  1802. req->hdr.NextCommand =
  1803. cpu_to_le32(get_rfc1002_length(req) + 4);
  1804. } else /* END_OF_CHAIN */
  1805. req->hdr.NextCommand = 0;
  1806. if (request_type & RELATED_REQUEST) {
  1807. req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
  1808. /*
  1809. * Related requests use info from previous read request
  1810. * in chain.
  1811. */
  1812. req->hdr.SessionId = 0xFFFFFFFF;
  1813. req->hdr.TreeId = 0xFFFFFFFF;
  1814. req->PersistentFileId = 0xFFFFFFFF;
  1815. req->VolatileFileId = 0xFFFFFFFF;
  1816. }
  1817. }
  1818. if (remaining_bytes > io_parms->length)
  1819. req->RemainingBytes = cpu_to_le32(remaining_bytes);
  1820. else
  1821. req->RemainingBytes = 0;
  1822. iov[0].iov_base = (char *)req;
  1823. /* 4 for rfc1002 length field */
  1824. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1825. return rc;
  1826. }
  1827. static void
  1828. smb2_readv_callback(struct mid_q_entry *mid)
  1829. {
  1830. struct cifs_readdata *rdata = mid->callback_data;
  1831. struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
  1832. struct TCP_Server_Info *server = tcon->ses->server;
  1833. struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
  1834. unsigned int credits_received = 1;
  1835. struct smb_rqst rqst = { .rq_iov = &rdata->iov,
  1836. .rq_nvec = 1,
  1837. .rq_pages = rdata->pages,
  1838. .rq_npages = rdata->nr_pages,
  1839. .rq_pagesz = rdata->pagesz,
  1840. .rq_tailsz = rdata->tailsz };
  1841. cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
  1842. __func__, mid->mid, mid->mid_state, rdata->result,
  1843. rdata->bytes);
  1844. switch (mid->mid_state) {
  1845. case MID_RESPONSE_RECEIVED:
  1846. credits_received = le16_to_cpu(buf->CreditRequest);
  1847. /* result already set, check signature */
  1848. if (server->sign) {
  1849. int rc;
  1850. rc = smb2_verify_signature(&rqst, server);
  1851. if (rc)
  1852. cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
  1853. rc);
  1854. }
  1855. /* FIXME: should this be counted toward the initiating task? */
  1856. task_io_account_read(rdata->got_bytes);
  1857. cifs_stats_bytes_read(tcon, rdata->got_bytes);
  1858. break;
  1859. case MID_REQUEST_SUBMITTED:
  1860. case MID_RETRY_NEEDED:
  1861. rdata->result = -EAGAIN;
  1862. if (server->sign && rdata->got_bytes)
  1863. /* reset bytes number since we can not check a sign */
  1864. rdata->got_bytes = 0;
  1865. /* FIXME: should this be counted toward the initiating task? */
  1866. task_io_account_read(rdata->got_bytes);
  1867. cifs_stats_bytes_read(tcon, rdata->got_bytes);
  1868. break;
  1869. default:
  1870. if (rdata->result != -ENODATA)
  1871. rdata->result = -EIO;
  1872. }
  1873. if (rdata->result)
  1874. cifs_stats_fail_inc(tcon, SMB2_READ_HE);
  1875. queue_work(cifsiod_wq, &rdata->work);
  1876. mutex_lock(&server->srv_mutex);
  1877. DeleteMidQEntry(mid);
  1878. mutex_unlock(&server->srv_mutex);
  1879. add_credits(server, credits_received, 0);
  1880. }
  1881. /* smb2_async_readv - send an async write, and set up mid to handle result */
  1882. int
  1883. smb2_async_readv(struct cifs_readdata *rdata)
  1884. {
  1885. int rc, flags = 0;
  1886. struct smb2_hdr *buf;
  1887. struct cifs_io_parms io_parms;
  1888. struct smb_rqst rqst = { .rq_iov = &rdata->iov,
  1889. .rq_nvec = 1 };
  1890. struct TCP_Server_Info *server;
  1891. cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
  1892. __func__, rdata->offset, rdata->bytes);
  1893. io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
  1894. io_parms.offset = rdata->offset;
  1895. io_parms.length = rdata->bytes;
  1896. io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
  1897. io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
  1898. io_parms.pid = rdata->pid;
  1899. server = io_parms.tcon->ses->server;
  1900. rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
  1901. if (rc) {
  1902. if (rc == -EAGAIN && rdata->credits) {
  1903. /* credits was reset by reconnect */
  1904. rdata->credits = 0;
  1905. /* reduce in_flight value since we won't send the req */
  1906. spin_lock(&server->req_lock);
  1907. server->in_flight--;
  1908. spin_unlock(&server->req_lock);
  1909. }
  1910. return rc;
  1911. }
  1912. buf = (struct smb2_hdr *)rdata->iov.iov_base;
  1913. /* 4 for rfc1002 length field */
  1914. rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
  1915. if (rdata->credits) {
  1916. buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
  1917. SMB2_MAX_BUFFER_SIZE));
  1918. buf->CreditRequest = buf->CreditCharge;
  1919. spin_lock(&server->req_lock);
  1920. server->credits += rdata->credits -
  1921. le16_to_cpu(buf->CreditCharge);
  1922. spin_unlock(&server->req_lock);
  1923. wake_up(&server->request_q);
  1924. flags = CIFS_HAS_CREDITS;
  1925. }
  1926. kref_get(&rdata->refcount);
  1927. rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
  1928. cifs_readv_receive, smb2_readv_callback,
  1929. rdata, flags);
  1930. if (rc) {
  1931. kref_put(&rdata->refcount, cifs_readdata_release);
  1932. cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
  1933. }
  1934. cifs_small_buf_release(buf);
  1935. return rc;
  1936. }
  1937. int
  1938. SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
  1939. unsigned int *nbytes, char **buf, int *buf_type)
  1940. {
  1941. int resp_buftype, rc = -EACCES;
  1942. struct smb2_read_rsp *rsp = NULL;
  1943. struct kvec iov[1];
  1944. *nbytes = 0;
  1945. rc = smb2_new_read_req(iov, io_parms, 0, 0);
  1946. if (rc)
  1947. return rc;
  1948. rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
  1949. &resp_buftype, CIFS_LOG_ERROR);
  1950. rsp = (struct smb2_read_rsp *)iov[0].iov_base;
  1951. if (rsp->hdr.Status == STATUS_END_OF_FILE) {
  1952. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1953. return 0;
  1954. }
  1955. if (rc) {
  1956. cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
  1957. cifs_dbg(VFS, "Send error in read = %d\n", rc);
  1958. } else {
  1959. *nbytes = le32_to_cpu(rsp->DataLength);
  1960. if ((*nbytes > CIFS_MAX_MSGSIZE) ||
  1961. (*nbytes > io_parms->length)) {
  1962. cifs_dbg(FYI, "bad length %d for count %d\n",
  1963. *nbytes, io_parms->length);
  1964. rc = -EIO;
  1965. *nbytes = 0;
  1966. }
  1967. }
  1968. if (*buf) {
  1969. memcpy(*buf, (char *)&rsp->hdr.ProtocolId + rsp->DataOffset,
  1970. *nbytes);
  1971. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1972. } else if (resp_buftype != CIFS_NO_BUFFER) {
  1973. *buf = iov[0].iov_base;
  1974. if (resp_buftype == CIFS_SMALL_BUFFER)
  1975. *buf_type = CIFS_SMALL_BUFFER;
  1976. else if (resp_buftype == CIFS_LARGE_BUFFER)
  1977. *buf_type = CIFS_LARGE_BUFFER;
  1978. }
  1979. return rc;
  1980. }
  1981. /*
  1982. * Check the mid_state and signature on received buffer (if any), and queue the
  1983. * workqueue completion task.
  1984. */
  1985. static void
  1986. smb2_writev_callback(struct mid_q_entry *mid)
  1987. {
  1988. struct cifs_writedata *wdata = mid->callback_data;
  1989. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1990. struct TCP_Server_Info *server = tcon->ses->server;
  1991. unsigned int written;
  1992. struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
  1993. unsigned int credits_received = 1;
  1994. switch (mid->mid_state) {
  1995. case MID_RESPONSE_RECEIVED:
  1996. credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
  1997. wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
  1998. if (wdata->result != 0)
  1999. break;
  2000. written = le32_to_cpu(rsp->DataLength);
  2001. /*
  2002. * Mask off high 16 bits when bytes written as returned
  2003. * by the server is greater than bytes requested by the
  2004. * client. OS/2 servers are known to set incorrect
  2005. * CountHigh values.
  2006. */
  2007. if (written > wdata->bytes)
  2008. written &= 0xFFFF;
  2009. if (written < wdata->bytes)
  2010. wdata->result = -ENOSPC;
  2011. else
  2012. wdata->bytes = written;
  2013. break;
  2014. case MID_REQUEST_SUBMITTED:
  2015. case MID_RETRY_NEEDED:
  2016. wdata->result = -EAGAIN;
  2017. break;
  2018. default:
  2019. wdata->result = -EIO;
  2020. break;
  2021. }
  2022. if (wdata->result)
  2023. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  2024. queue_work(cifsiod_wq, &wdata->work);
  2025. mutex_lock(&server->srv_mutex);
  2026. DeleteMidQEntry(mid);
  2027. mutex_unlock(&server->srv_mutex);
  2028. add_credits(tcon->ses->server, credits_received, 0);
  2029. }
  2030. /* smb2_async_writev - send an async write, and set up mid to handle result */
  2031. int
  2032. smb2_async_writev(struct cifs_writedata *wdata,
  2033. void (*release)(struct kref *kref))
  2034. {
  2035. int rc = -EACCES, flags = 0;
  2036. struct smb2_write_req *req = NULL;
  2037. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  2038. struct TCP_Server_Info *server = tcon->ses->server;
  2039. struct kvec iov;
  2040. struct smb_rqst rqst;
  2041. rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
  2042. if (rc) {
  2043. if (rc == -EAGAIN && wdata->credits) {
  2044. /* credits was reset by reconnect */
  2045. wdata->credits = 0;
  2046. /* reduce in_flight value since we won't send the req */
  2047. spin_lock(&server->req_lock);
  2048. server->in_flight--;
  2049. spin_unlock(&server->req_lock);
  2050. }
  2051. goto async_writev_out;
  2052. }
  2053. req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
  2054. req->PersistentFileId = wdata->cfile->fid.persistent_fid;
  2055. req->VolatileFileId = wdata->cfile->fid.volatile_fid;
  2056. req->WriteChannelInfoOffset = 0;
  2057. req->WriteChannelInfoLength = 0;
  2058. req->Channel = 0;
  2059. req->Offset = cpu_to_le64(wdata->offset);
  2060. /* 4 for rfc1002 length field */
  2061. req->DataOffset = cpu_to_le16(
  2062. offsetof(struct smb2_write_req, Buffer) - 4);
  2063. req->RemainingBytes = 0;
  2064. /* 4 for rfc1002 length field and 1 for Buffer */
  2065. iov.iov_len = get_rfc1002_length(req) + 4 - 1;
  2066. iov.iov_base = req;
  2067. rqst.rq_iov = &iov;
  2068. rqst.rq_nvec = 1;
  2069. rqst.rq_pages = wdata->pages;
  2070. rqst.rq_npages = wdata->nr_pages;
  2071. rqst.rq_pagesz = wdata->pagesz;
  2072. rqst.rq_tailsz = wdata->tailsz;
  2073. cifs_dbg(FYI, "async write at %llu %u bytes\n",
  2074. wdata->offset, wdata->bytes);
  2075. req->Length = cpu_to_le32(wdata->bytes);
  2076. inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
  2077. if (wdata->credits) {
  2078. req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
  2079. SMB2_MAX_BUFFER_SIZE));
  2080. req->hdr.CreditRequest = req->hdr.CreditCharge;
  2081. spin_lock(&server->req_lock);
  2082. server->credits += wdata->credits -
  2083. le16_to_cpu(req->hdr.CreditCharge);
  2084. spin_unlock(&server->req_lock);
  2085. wake_up(&server->request_q);
  2086. flags = CIFS_HAS_CREDITS;
  2087. }
  2088. kref_get(&wdata->refcount);
  2089. rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata,
  2090. flags);
  2091. if (rc) {
  2092. kref_put(&wdata->refcount, release);
  2093. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  2094. }
  2095. async_writev_out:
  2096. cifs_small_buf_release(req);
  2097. return rc;
  2098. }
  2099. /*
  2100. * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
  2101. * The length field from io_parms must be at least 1 and indicates a number of
  2102. * elements with data to write that begins with position 1 in iov array. All
  2103. * data length is specified by count.
  2104. */
  2105. int
  2106. SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
  2107. unsigned int *nbytes, struct kvec *iov, int n_vec)
  2108. {
  2109. int rc = 0;
  2110. struct smb2_write_req *req = NULL;
  2111. struct smb2_write_rsp *rsp = NULL;
  2112. int resp_buftype;
  2113. *nbytes = 0;
  2114. if (n_vec < 1)
  2115. return rc;
  2116. rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
  2117. if (rc)
  2118. return rc;
  2119. if (io_parms->tcon->ses->server == NULL)
  2120. return -ECONNABORTED;
  2121. req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
  2122. req->PersistentFileId = io_parms->persistent_fid;
  2123. req->VolatileFileId = io_parms->volatile_fid;
  2124. req->WriteChannelInfoOffset = 0;
  2125. req->WriteChannelInfoLength = 0;
  2126. req->Channel = 0;
  2127. req->Length = cpu_to_le32(io_parms->length);
  2128. req->Offset = cpu_to_le64(io_parms->offset);
  2129. /* 4 for rfc1002 length field */
  2130. req->DataOffset = cpu_to_le16(
  2131. offsetof(struct smb2_write_req, Buffer) - 4);
  2132. req->RemainingBytes = 0;
  2133. iov[0].iov_base = (char *)req;
  2134. /* 4 for rfc1002 length field and 1 for Buffer */
  2135. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  2136. /* length of entire message including data to be written */
  2137. inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
  2138. rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
  2139. &resp_buftype, 0);
  2140. rsp = (struct smb2_write_rsp *)iov[0].iov_base;
  2141. if (rc) {
  2142. cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
  2143. cifs_dbg(VFS, "Send error in write = %d\n", rc);
  2144. } else
  2145. *nbytes = le32_to_cpu(rsp->DataLength);
  2146. free_rsp_buf(resp_buftype, rsp);
  2147. return rc;
  2148. }
  2149. static unsigned int
  2150. num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
  2151. {
  2152. int len;
  2153. unsigned int entrycount = 0;
  2154. unsigned int next_offset = 0;
  2155. FILE_DIRECTORY_INFO *entryptr;
  2156. if (bufstart == NULL)
  2157. return 0;
  2158. entryptr = (FILE_DIRECTORY_INFO *)bufstart;
  2159. while (1) {
  2160. entryptr = (FILE_DIRECTORY_INFO *)
  2161. ((char *)entryptr + next_offset);
  2162. if ((char *)entryptr + size > end_of_buf) {
  2163. cifs_dbg(VFS, "malformed search entry would overflow\n");
  2164. break;
  2165. }
  2166. len = le32_to_cpu(entryptr->FileNameLength);
  2167. if ((char *)entryptr + len + size > end_of_buf) {
  2168. cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
  2169. end_of_buf);
  2170. break;
  2171. }
  2172. *lastentry = (char *)entryptr;
  2173. entrycount++;
  2174. next_offset = le32_to_cpu(entryptr->NextEntryOffset);
  2175. if (!next_offset)
  2176. break;
  2177. }
  2178. return entrycount;
  2179. }
  2180. /*
  2181. * Readdir/FindFirst
  2182. */
  2183. int
  2184. SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
  2185. u64 persistent_fid, u64 volatile_fid, int index,
  2186. struct cifs_search_info *srch_inf)
  2187. {
  2188. struct smb2_query_directory_req *req;
  2189. struct smb2_query_directory_rsp *rsp = NULL;
  2190. struct kvec iov[2];
  2191. int rc = 0;
  2192. int len;
  2193. int resp_buftype = CIFS_NO_BUFFER;
  2194. unsigned char *bufptr;
  2195. struct TCP_Server_Info *server;
  2196. struct cifs_ses *ses = tcon->ses;
  2197. __le16 asteriks = cpu_to_le16('*');
  2198. char *end_of_smb;
  2199. unsigned int output_size = CIFSMaxBufSize;
  2200. size_t info_buf_size;
  2201. if (ses && (ses->server))
  2202. server = ses->server;
  2203. else
  2204. return -EIO;
  2205. rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
  2206. if (rc)
  2207. return rc;
  2208. switch (srch_inf->info_level) {
  2209. case SMB_FIND_FILE_DIRECTORY_INFO:
  2210. req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
  2211. info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
  2212. break;
  2213. case SMB_FIND_FILE_ID_FULL_DIR_INFO:
  2214. req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
  2215. info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
  2216. break;
  2217. default:
  2218. cifs_dbg(VFS, "info level %u isn't supported\n",
  2219. srch_inf->info_level);
  2220. rc = -EINVAL;
  2221. goto qdir_exit;
  2222. }
  2223. req->FileIndex = cpu_to_le32(index);
  2224. req->PersistentFileId = persistent_fid;
  2225. req->VolatileFileId = volatile_fid;
  2226. len = 0x2;
  2227. bufptr = req->Buffer;
  2228. memcpy(bufptr, &asteriks, len);
  2229. req->FileNameOffset =
  2230. cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
  2231. req->FileNameLength = cpu_to_le16(len);
  2232. /*
  2233. * BB could be 30 bytes or so longer if we used SMB2 specific
  2234. * buffer lengths, but this is safe and close enough.
  2235. */
  2236. output_size = min_t(unsigned int, output_size, server->maxBuf);
  2237. output_size = min_t(unsigned int, output_size, 2 << 15);
  2238. req->OutputBufferLength = cpu_to_le32(output_size);
  2239. iov[0].iov_base = (char *)req;
  2240. /* 4 for RFC1001 length and 1 for Buffer */
  2241. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  2242. iov[1].iov_base = (char *)(req->Buffer);
  2243. iov[1].iov_len = len;
  2244. inc_rfc1001_len(req, len - 1 /* Buffer */);
  2245. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
  2246. rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
  2247. if (rc) {
  2248. if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
  2249. srch_inf->endOfSearch = true;
  2250. rc = 0;
  2251. }
  2252. cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
  2253. goto qdir_exit;
  2254. }
  2255. rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
  2256. le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
  2257. info_buf_size);
  2258. if (rc)
  2259. goto qdir_exit;
  2260. srch_inf->unicode = true;
  2261. if (srch_inf->ntwrk_buf_start) {
  2262. if (srch_inf->smallBuf)
  2263. cifs_small_buf_release(srch_inf->ntwrk_buf_start);
  2264. else
  2265. cifs_buf_release(srch_inf->ntwrk_buf_start);
  2266. }
  2267. srch_inf->ntwrk_buf_start = (char *)rsp;
  2268. srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
  2269. (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
  2270. /* 4 for rfc1002 length field */
  2271. end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
  2272. srch_inf->entries_in_buffer =
  2273. num_entries(srch_inf->srch_entries_start, end_of_smb,
  2274. &srch_inf->last_entry, info_buf_size);
  2275. srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
  2276. cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
  2277. srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
  2278. srch_inf->srch_entries_start, srch_inf->last_entry);
  2279. if (resp_buftype == CIFS_LARGE_BUFFER)
  2280. srch_inf->smallBuf = false;
  2281. else if (resp_buftype == CIFS_SMALL_BUFFER)
  2282. srch_inf->smallBuf = true;
  2283. else
  2284. cifs_dbg(VFS, "illegal search buffer type\n");
  2285. return rc;
  2286. qdir_exit:
  2287. free_rsp_buf(resp_buftype, rsp);
  2288. return rc;
  2289. }
  2290. static int
  2291. send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  2292. u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
  2293. unsigned int num, void **data, unsigned int *size)
  2294. {
  2295. struct smb2_set_info_req *req;
  2296. struct smb2_set_info_rsp *rsp = NULL;
  2297. struct kvec *iov;
  2298. int rc = 0;
  2299. int resp_buftype;
  2300. unsigned int i;
  2301. struct TCP_Server_Info *server;
  2302. struct cifs_ses *ses = tcon->ses;
  2303. if (ses && (ses->server))
  2304. server = ses->server;
  2305. else
  2306. return -EIO;
  2307. if (!num)
  2308. return -EINVAL;
  2309. iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
  2310. if (!iov)
  2311. return -ENOMEM;
  2312. rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
  2313. if (rc) {
  2314. kfree(iov);
  2315. return rc;
  2316. }
  2317. req->hdr.ProcessId = cpu_to_le32(pid);
  2318. req->InfoType = SMB2_O_INFO_FILE;
  2319. req->FileInfoClass = info_class;
  2320. req->PersistentFileId = persistent_fid;
  2321. req->VolatileFileId = volatile_fid;
  2322. /* 4 for RFC1001 length and 1 for Buffer */
  2323. req->BufferOffset =
  2324. cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
  2325. req->BufferLength = cpu_to_le32(*size);
  2326. inc_rfc1001_len(req, *size - 1 /* Buffer */);
  2327. memcpy(req->Buffer, *data, *size);
  2328. iov[0].iov_base = (char *)req;
  2329. /* 4 for RFC1001 length */
  2330. iov[0].iov_len = get_rfc1002_length(req) + 4;
  2331. for (i = 1; i < num; i++) {
  2332. inc_rfc1001_len(req, size[i]);
  2333. le32_add_cpu(&req->BufferLength, size[i]);
  2334. iov[i].iov_base = (char *)data[i];
  2335. iov[i].iov_len = size[i];
  2336. }
  2337. rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
  2338. rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
  2339. if (rc != 0)
  2340. cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
  2341. free_rsp_buf(resp_buftype, rsp);
  2342. kfree(iov);
  2343. return rc;
  2344. }
  2345. int
  2346. SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
  2347. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  2348. {
  2349. struct smb2_file_rename_info info;
  2350. void **data;
  2351. unsigned int size[2];
  2352. int rc;
  2353. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  2354. data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
  2355. if (!data)
  2356. return -ENOMEM;
  2357. info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
  2358. /* 0 = fail if target already exists */
  2359. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  2360. info.FileNameLength = cpu_to_le32(len);
  2361. data[0] = &info;
  2362. size[0] = sizeof(struct smb2_file_rename_info);
  2363. data[1] = target_file;
  2364. size[1] = len + 2 /* null */;
  2365. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2366. current->tgid, FILE_RENAME_INFORMATION, 2, data,
  2367. size);
  2368. kfree(data);
  2369. return rc;
  2370. }
  2371. int
  2372. SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
  2373. u64 persistent_fid, u64 volatile_fid)
  2374. {
  2375. __u8 delete_pending = 1;
  2376. void *data;
  2377. unsigned int size;
  2378. data = &delete_pending;
  2379. size = 1; /* sizeof __u8 */
  2380. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2381. current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
  2382. &size);
  2383. }
  2384. int
  2385. SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
  2386. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  2387. {
  2388. struct smb2_file_link_info info;
  2389. void **data;
  2390. unsigned int size[2];
  2391. int rc;
  2392. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  2393. data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
  2394. if (!data)
  2395. return -ENOMEM;
  2396. info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
  2397. /* 0 = fail if link already exists */
  2398. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  2399. info.FileNameLength = cpu_to_le32(len);
  2400. data[0] = &info;
  2401. size[0] = sizeof(struct smb2_file_link_info);
  2402. data[1] = target_file;
  2403. size[1] = len + 2 /* null */;
  2404. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2405. current->tgid, FILE_LINK_INFORMATION, 2, data, size);
  2406. kfree(data);
  2407. return rc;
  2408. }
  2409. int
  2410. SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  2411. u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
  2412. {
  2413. struct smb2_file_eof_info info;
  2414. void *data;
  2415. unsigned int size;
  2416. info.EndOfFile = *eof;
  2417. data = &info;
  2418. size = sizeof(struct smb2_file_eof_info);
  2419. if (is_falloc)
  2420. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2421. pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size);
  2422. else
  2423. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2424. pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
  2425. }
  2426. int
  2427. SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  2428. u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
  2429. {
  2430. unsigned int size;
  2431. size = sizeof(FILE_BASIC_INFO);
  2432. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2433. current->tgid, FILE_BASIC_INFORMATION, 1,
  2434. (void **)&buf, &size);
  2435. }
  2436. int
  2437. SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
  2438. const u64 persistent_fid, const u64 volatile_fid,
  2439. __u8 oplock_level)
  2440. {
  2441. int rc;
  2442. struct smb2_oplock_break *req = NULL;
  2443. cifs_dbg(FYI, "SMB2_oplock_break\n");
  2444. rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
  2445. if (rc)
  2446. return rc;
  2447. req->VolatileFid = volatile_fid;
  2448. req->PersistentFid = persistent_fid;
  2449. req->OplockLevel = oplock_level;
  2450. req->hdr.CreditRequest = cpu_to_le16(1);
  2451. rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
  2452. /* SMB2 buffer freed by function above */
  2453. if (rc) {
  2454. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  2455. cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
  2456. }
  2457. return rc;
  2458. }
  2459. static void
  2460. copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
  2461. struct kstatfs *kst)
  2462. {
  2463. kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
  2464. le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
  2465. kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
  2466. kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
  2467. kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
  2468. return;
  2469. }
  2470. static int
  2471. build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
  2472. int outbuf_len, u64 persistent_fid, u64 volatile_fid)
  2473. {
  2474. int rc;
  2475. struct smb2_query_info_req *req;
  2476. cifs_dbg(FYI, "Query FSInfo level %d\n", level);
  2477. if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
  2478. return -EIO;
  2479. rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
  2480. if (rc)
  2481. return rc;
  2482. req->InfoType = SMB2_O_INFO_FILESYSTEM;
  2483. req->FileInfoClass = level;
  2484. req->PersistentFileId = persistent_fid;
  2485. req->VolatileFileId = volatile_fid;
  2486. /* 4 for rfc1002 length field and 1 for pad */
  2487. req->InputBufferOffset =
  2488. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
  2489. req->OutputBufferLength = cpu_to_le32(
  2490. outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
  2491. iov->iov_base = (char *)req;
  2492. /* 4 for rfc1002 length field */
  2493. iov->iov_len = get_rfc1002_length(req) + 4;
  2494. return 0;
  2495. }
  2496. int
  2497. SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
  2498. u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
  2499. {
  2500. struct smb2_query_info_rsp *rsp = NULL;
  2501. struct kvec iov;
  2502. int rc = 0;
  2503. int resp_buftype;
  2504. struct cifs_ses *ses = tcon->ses;
  2505. struct smb2_fs_full_size_info *info = NULL;
  2506. rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
  2507. sizeof(struct smb2_fs_full_size_info),
  2508. persistent_fid, volatile_fid);
  2509. if (rc)
  2510. return rc;
  2511. rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
  2512. if (rc) {
  2513. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  2514. goto qfsinf_exit;
  2515. }
  2516. rsp = (struct smb2_query_info_rsp *)iov.iov_base;
  2517. info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
  2518. le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
  2519. rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
  2520. le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
  2521. sizeof(struct smb2_fs_full_size_info));
  2522. if (!rc)
  2523. copy_fs_info_to_kstatfs(info, fsdata);
  2524. qfsinf_exit:
  2525. free_rsp_buf(resp_buftype, iov.iov_base);
  2526. return rc;
  2527. }
  2528. int
  2529. SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
  2530. u64 persistent_fid, u64 volatile_fid, int level)
  2531. {
  2532. struct smb2_query_info_rsp *rsp = NULL;
  2533. struct kvec iov;
  2534. int rc = 0;
  2535. int resp_buftype, max_len, min_len;
  2536. struct cifs_ses *ses = tcon->ses;
  2537. unsigned int rsp_len, offset;
  2538. if (level == FS_DEVICE_INFORMATION) {
  2539. max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
  2540. min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
  2541. } else if (level == FS_ATTRIBUTE_INFORMATION) {
  2542. max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
  2543. min_len = MIN_FS_ATTR_INFO_SIZE;
  2544. } else if (level == FS_SECTOR_SIZE_INFORMATION) {
  2545. max_len = sizeof(struct smb3_fs_ss_info);
  2546. min_len = sizeof(struct smb3_fs_ss_info);
  2547. } else {
  2548. cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
  2549. return -EINVAL;
  2550. }
  2551. rc = build_qfs_info_req(&iov, tcon, level, max_len,
  2552. persistent_fid, volatile_fid);
  2553. if (rc)
  2554. return rc;
  2555. rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
  2556. if (rc) {
  2557. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  2558. goto qfsattr_exit;
  2559. }
  2560. rsp = (struct smb2_query_info_rsp *)iov.iov_base;
  2561. rsp_len = le32_to_cpu(rsp->OutputBufferLength);
  2562. offset = le16_to_cpu(rsp->OutputBufferOffset);
  2563. rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
  2564. if (rc)
  2565. goto qfsattr_exit;
  2566. if (level == FS_ATTRIBUTE_INFORMATION)
  2567. memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
  2568. + (char *)&rsp->hdr, min_t(unsigned int,
  2569. rsp_len, max_len));
  2570. else if (level == FS_DEVICE_INFORMATION)
  2571. memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
  2572. + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
  2573. else if (level == FS_SECTOR_SIZE_INFORMATION) {
  2574. struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
  2575. (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
  2576. tcon->ss_flags = le32_to_cpu(ss_info->Flags);
  2577. tcon->perf_sector_size =
  2578. le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
  2579. }
  2580. qfsattr_exit:
  2581. free_rsp_buf(resp_buftype, iov.iov_base);
  2582. return rc;
  2583. }
  2584. int
  2585. smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
  2586. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  2587. const __u32 num_lock, struct smb2_lock_element *buf)
  2588. {
  2589. int rc = 0;
  2590. struct smb2_lock_req *req = NULL;
  2591. struct kvec iov[2];
  2592. int resp_buf_type;
  2593. unsigned int count;
  2594. cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
  2595. rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
  2596. if (rc)
  2597. return rc;
  2598. req->hdr.ProcessId = cpu_to_le32(pid);
  2599. req->LockCount = cpu_to_le16(num_lock);
  2600. req->PersistentFileId = persist_fid;
  2601. req->VolatileFileId = volatile_fid;
  2602. count = num_lock * sizeof(struct smb2_lock_element);
  2603. inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
  2604. iov[0].iov_base = (char *)req;
  2605. /* 4 for rfc1002 length field and count for all locks */
  2606. iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
  2607. iov[1].iov_base = (char *)buf;
  2608. iov[1].iov_len = count;
  2609. cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
  2610. rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
  2611. if (rc) {
  2612. cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
  2613. cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
  2614. }
  2615. return rc;
  2616. }
  2617. int
  2618. SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
  2619. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  2620. const __u64 length, const __u64 offset, const __u32 lock_flags,
  2621. const bool wait)
  2622. {
  2623. struct smb2_lock_element lock;
  2624. lock.Offset = cpu_to_le64(offset);
  2625. lock.Length = cpu_to_le64(length);
  2626. lock.Flags = cpu_to_le32(lock_flags);
  2627. if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
  2628. lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
  2629. return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
  2630. }
  2631. int
  2632. SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
  2633. __u8 *lease_key, const __le32 lease_state)
  2634. {
  2635. int rc;
  2636. struct smb2_lease_ack *req = NULL;
  2637. cifs_dbg(FYI, "SMB2_lease_break\n");
  2638. rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
  2639. if (rc)
  2640. return rc;
  2641. req->hdr.CreditRequest = cpu_to_le16(1);
  2642. req->StructureSize = cpu_to_le16(36);
  2643. inc_rfc1001_len(req, 12);
  2644. memcpy(req->LeaseKey, lease_key, 16);
  2645. req->LeaseState = lease_state;
  2646. rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
  2647. /* SMB2 buffer freed by function above */
  2648. if (rc) {
  2649. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  2650. cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
  2651. }
  2652. return rc;
  2653. }