smb2pdu.c 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939
  1. /*
  2. * fs/cifs/smb2pdu.c
  3. *
  4. * Copyright (C) International Business Machines Corp., 2009, 2013
  5. * Etersoft, 2012
  6. * Author(s): Steve French (sfrench@us.ibm.com)
  7. * Pavel Shilovsky (pshilovsky@samba.org) 2012
  8. *
  9. * Contains the routines for constructing the SMB2 PDUs themselves
  10. *
  11. * This library is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU Lesser General Public License as published
  13. * by the Free Software Foundation; either version 2.1 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  19. * the GNU Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public License
  22. * along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
  26. /* Note that there are handle based routines which must be */
  27. /* treated slightly differently for reconnection purposes since we never */
  28. /* want to reuse a stale file handle and only the caller knows the file info */
  29. #include <linux/fs.h>
  30. #include <linux/kernel.h>
  31. #include <linux/vfs.h>
  32. #include <linux/task_io_accounting_ops.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/pagemap.h>
  35. #include <linux/xattr.h>
  36. #include "smb2pdu.h"
  37. #include "cifsglob.h"
  38. #include "cifsacl.h"
  39. #include "cifsproto.h"
  40. #include "smb2proto.h"
  41. #include "cifs_unicode.h"
  42. #include "cifs_debug.h"
  43. #include "ntlmssp.h"
  44. #include "smb2status.h"
  45. #include "smb2glob.h"
  46. #include "cifspdu.h"
  47. #include "cifs_spnego.h"
  48. /*
  49. * The following table defines the expected "StructureSize" of SMB2 requests
  50. * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
  51. *
  52. * Note that commands are defined in smb2pdu.h in le16 but the array below is
  53. * indexed by command in host byte order.
  54. */
  55. static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
  56. /* SMB2_NEGOTIATE */ 36,
  57. /* SMB2_SESSION_SETUP */ 25,
  58. /* SMB2_LOGOFF */ 4,
  59. /* SMB2_TREE_CONNECT */ 9,
  60. /* SMB2_TREE_DISCONNECT */ 4,
  61. /* SMB2_CREATE */ 57,
  62. /* SMB2_CLOSE */ 24,
  63. /* SMB2_FLUSH */ 24,
  64. /* SMB2_READ */ 49,
  65. /* SMB2_WRITE */ 49,
  66. /* SMB2_LOCK */ 48,
  67. /* SMB2_IOCTL */ 57,
  68. /* SMB2_CANCEL */ 4,
  69. /* SMB2_ECHO */ 4,
  70. /* SMB2_QUERY_DIRECTORY */ 33,
  71. /* SMB2_CHANGE_NOTIFY */ 32,
  72. /* SMB2_QUERY_INFO */ 41,
  73. /* SMB2_SET_INFO */ 33,
  74. /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
  75. };
  76. static void
  77. smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
  78. const struct cifs_tcon *tcon)
  79. {
  80. struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
  81. char *temp = (char *)hdr;
  82. /* lookup word count ie StructureSize from table */
  83. __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
  84. /*
  85. * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
  86. * largest operations (Create)
  87. */
  88. memset(temp, 0, 256);
  89. /* Note this is only network field converted to big endian */
  90. hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
  91. - 4 /* RFC 1001 length field itself not counted */);
  92. hdr->ProtocolId = SMB2_PROTO_NUMBER;
  93. hdr->StructureSize = cpu_to_le16(64);
  94. hdr->Command = smb2_cmd;
  95. hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
  96. hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
  97. if (!tcon)
  98. goto out;
  99. /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
  100. /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
  101. if ((tcon->ses) && (tcon->ses->server) &&
  102. (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
  103. hdr->CreditCharge = cpu_to_le16(1);
  104. /* else CreditCharge MBZ */
  105. hdr->TreeId = tcon->tid;
  106. /* Uid is not converted */
  107. if (tcon->ses)
  108. hdr->SessionId = tcon->ses->Suid;
  109. /*
  110. * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
  111. * to pass the path on the Open SMB prefixed by \\server\share.
  112. * Not sure when we would need to do the augmented path (if ever) and
  113. * setting this flag breaks the SMB2 open operation since it is
  114. * illegal to send an empty path name (without \\server\share prefix)
  115. * when the DFS flag is set in the SMB open header. We could
  116. * consider setting the flag on all operations other than open
  117. * but it is safer to net set it for now.
  118. */
  119. /* if (tcon->share_flags & SHI1005_FLAGS_DFS)
  120. hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
  121. if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
  122. hdr->Flags |= SMB2_FLAGS_SIGNED;
  123. out:
  124. pdu->StructureSize2 = cpu_to_le16(parmsize);
  125. return;
  126. }
  127. static int
  128. smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
  129. {
  130. int rc = 0;
  131. struct nls_table *nls_codepage;
  132. struct cifs_ses *ses;
  133. struct TCP_Server_Info *server;
  134. /*
  135. * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
  136. * check for tcp and smb session status done differently
  137. * for those three - in the calling routine.
  138. */
  139. if (tcon == NULL)
  140. return rc;
  141. if (smb2_command == SMB2_TREE_CONNECT)
  142. return rc;
  143. if (tcon->tidStatus == CifsExiting) {
  144. /*
  145. * only tree disconnect, open, and write,
  146. * (and ulogoff which does not have tcon)
  147. * are allowed as we start force umount.
  148. */
  149. if ((smb2_command != SMB2_WRITE) &&
  150. (smb2_command != SMB2_CREATE) &&
  151. (smb2_command != SMB2_TREE_DISCONNECT)) {
  152. cifs_dbg(FYI, "can not send cmd %d while umounting\n",
  153. smb2_command);
  154. return -ENODEV;
  155. }
  156. }
  157. if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
  158. (!tcon->ses->server))
  159. return -EIO;
  160. ses = tcon->ses;
  161. server = ses->server;
  162. /*
  163. * Give demultiplex thread up to 10 seconds to reconnect, should be
  164. * greater than cifs socket timeout which is 7 seconds
  165. */
  166. while (server->tcpStatus == CifsNeedReconnect) {
  167. /*
  168. * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
  169. * here since they are implicitly done when session drops.
  170. */
  171. switch (smb2_command) {
  172. /*
  173. * BB Should we keep oplock break and add flush to exceptions?
  174. */
  175. case SMB2_TREE_DISCONNECT:
  176. case SMB2_CANCEL:
  177. case SMB2_CLOSE:
  178. case SMB2_OPLOCK_BREAK:
  179. return -EAGAIN;
  180. }
  181. wait_event_interruptible_timeout(server->response_q,
  182. (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
  183. /* are we still trying to reconnect? */
  184. if (server->tcpStatus != CifsNeedReconnect)
  185. break;
  186. /*
  187. * on "soft" mounts we wait once. Hard mounts keep
  188. * retrying until process is killed or server comes
  189. * back on-line
  190. */
  191. if (!tcon->retry) {
  192. cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
  193. return -EHOSTDOWN;
  194. }
  195. }
  196. if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
  197. return rc;
  198. nls_codepage = load_nls_default();
  199. /*
  200. * need to prevent multiple threads trying to simultaneously reconnect
  201. * the same SMB session
  202. */
  203. mutex_lock(&tcon->ses->session_mutex);
  204. rc = cifs_negotiate_protocol(0, tcon->ses);
  205. if (!rc && tcon->ses->need_reconnect)
  206. rc = cifs_setup_session(0, tcon->ses, nls_codepage);
  207. if (rc || !tcon->need_reconnect) {
  208. mutex_unlock(&tcon->ses->session_mutex);
  209. goto out;
  210. }
  211. cifs_mark_open_files_invalid(tcon);
  212. rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
  213. mutex_unlock(&tcon->ses->session_mutex);
  214. cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
  215. if (rc)
  216. goto out;
  217. atomic_inc(&tconInfoReconnectCount);
  218. out:
  219. /*
  220. * Check if handle based operation so we know whether we can continue
  221. * or not without returning to caller to reset file handle.
  222. */
  223. /*
  224. * BB Is flush done by server on drop of tcp session? Should we special
  225. * case it and skip above?
  226. */
  227. switch (smb2_command) {
  228. case SMB2_FLUSH:
  229. case SMB2_READ:
  230. case SMB2_WRITE:
  231. case SMB2_LOCK:
  232. case SMB2_IOCTL:
  233. case SMB2_QUERY_DIRECTORY:
  234. case SMB2_CHANGE_NOTIFY:
  235. case SMB2_QUERY_INFO:
  236. case SMB2_SET_INFO:
  237. return -EAGAIN;
  238. }
  239. unload_nls(nls_codepage);
  240. return rc;
  241. }
  242. /*
  243. * Allocate and return pointer to an SMB request hdr, and set basic
  244. * SMB information in the SMB header. If the return code is zero, this
  245. * function must have filled in request_buf pointer.
  246. */
  247. static int
  248. small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
  249. void **request_buf)
  250. {
  251. int rc = 0;
  252. rc = smb2_reconnect(smb2_command, tcon);
  253. if (rc)
  254. return rc;
  255. /* BB eventually switch this to SMB2 specific small buf size */
  256. *request_buf = cifs_small_buf_get();
  257. if (*request_buf == NULL) {
  258. /* BB should we add a retry in here if not a writepage? */
  259. return -ENOMEM;
  260. }
  261. smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
  262. if (tcon != NULL) {
  263. #ifdef CONFIG_CIFS_STATS2
  264. uint16_t com_code = le16_to_cpu(smb2_command);
  265. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
  266. #endif
  267. cifs_stats_inc(&tcon->num_smbs_sent);
  268. }
  269. return rc;
  270. }
  271. #ifdef CONFIG_CIFS_SMB311
  272. /* offset is sizeof smb2_negotiate_req - 4 but rounded up to 8 bytes */
  273. #define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) - 4 */
  274. #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
  275. #define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
  276. static void
  277. build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
  278. {
  279. pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
  280. pneg_ctxt->DataLength = cpu_to_le16(38);
  281. pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
  282. pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
  283. get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
  284. pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
  285. }
  286. static void
  287. build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
  288. {
  289. pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
  290. pneg_ctxt->DataLength = cpu_to_le16(6);
  291. pneg_ctxt->CipherCount = cpu_to_le16(2);
  292. pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
  293. pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
  294. }
  295. static void
  296. assemble_neg_contexts(struct smb2_negotiate_req *req)
  297. {
  298. /* +4 is to account for the RFC1001 len field */
  299. char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4;
  300. build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
  301. /* Add 2 to size to round to 8 byte boundary */
  302. pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context);
  303. build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
  304. req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
  305. req->NegotiateContextCount = cpu_to_le16(2);
  306. inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
  307. + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
  308. }
  309. #else
  310. static void assemble_neg_contexts(struct smb2_negotiate_req *req)
  311. {
  312. return;
  313. }
  314. #endif /* SMB311 */
  315. /*
  316. *
  317. * SMB2 Worker functions follow:
  318. *
  319. * The general structure of the worker functions is:
  320. * 1) Call smb2_init (assembles SMB2 header)
  321. * 2) Initialize SMB2 command specific fields in fixed length area of SMB
  322. * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
  323. * 4) Decode SMB2 command specific fields in the fixed length area
  324. * 5) Decode variable length data area (if any for this SMB2 command type)
  325. * 6) Call free smb buffer
  326. * 7) return
  327. *
  328. */
  329. int
  330. SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
  331. {
  332. struct smb2_negotiate_req *req;
  333. struct smb2_negotiate_rsp *rsp;
  334. struct kvec iov[1];
  335. int rc = 0;
  336. int resp_buftype;
  337. struct TCP_Server_Info *server = ses->server;
  338. int blob_offset, blob_length;
  339. char *security_blob;
  340. int flags = CIFS_NEG_OP;
  341. cifs_dbg(FYI, "Negotiate protocol\n");
  342. if (!server) {
  343. WARN(1, "%s: server is NULL!\n", __func__);
  344. return -EIO;
  345. }
  346. rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
  347. if (rc)
  348. return rc;
  349. req->hdr.SessionId = 0;
  350. req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
  351. req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
  352. inc_rfc1001_len(req, 2);
  353. /* only one of SMB2 signing flags may be set in SMB2 request */
  354. if (ses->sign)
  355. req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
  356. else if (global_secflags & CIFSSEC_MAY_SIGN)
  357. req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
  358. else
  359. req->SecurityMode = 0;
  360. req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
  361. /* ClientGUID must be zero for SMB2.02 dialect */
  362. if (ses->server->vals->protocol_id == SMB20_PROT_ID)
  363. memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
  364. else {
  365. memcpy(req->ClientGUID, server->client_guid,
  366. SMB2_CLIENT_GUID_SIZE);
  367. if (ses->server->vals->protocol_id == SMB311_PROT_ID)
  368. assemble_neg_contexts(req);
  369. }
  370. iov[0].iov_base = (char *)req;
  371. /* 4 for rfc1002 length field */
  372. iov[0].iov_len = get_rfc1002_length(req) + 4;
  373. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
  374. rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
  375. /*
  376. * No tcon so can't do
  377. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  378. */
  379. if (rc != 0)
  380. goto neg_exit;
  381. cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
  382. /* BB we may eventually want to match the negotiated vs. requested
  383. dialect, even though we are only requesting one at a time */
  384. if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
  385. cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
  386. else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
  387. cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
  388. else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
  389. cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
  390. else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
  391. cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
  392. #ifdef CONFIG_CIFS_SMB311
  393. else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
  394. cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
  395. #endif /* SMB311 */
  396. else {
  397. cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
  398. le16_to_cpu(rsp->DialectRevision));
  399. rc = -EIO;
  400. goto neg_exit;
  401. }
  402. server->dialect = le16_to_cpu(rsp->DialectRevision);
  403. /* SMB2 only has an extended negflavor */
  404. server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
  405. /* set it to the maximum buffer size value we can send with 1 credit */
  406. server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
  407. SMB2_MAX_BUFFER_SIZE);
  408. server->max_read = le32_to_cpu(rsp->MaxReadSize);
  409. server->max_write = le32_to_cpu(rsp->MaxWriteSize);
  410. /* BB Do we need to validate the SecurityMode? */
  411. server->sec_mode = le16_to_cpu(rsp->SecurityMode);
  412. server->capabilities = le32_to_cpu(rsp->Capabilities);
  413. /* Internal types */
  414. server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
  415. security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
  416. &rsp->hdr);
  417. /*
  418. * See MS-SMB2 section 2.2.4: if no blob, client picks default which
  419. * for us will be
  420. * ses->sectype = RawNTLMSSP;
  421. * but for time being this is our only auth choice so doesn't matter.
  422. * We just found a server which sets blob length to zero expecting raw.
  423. */
  424. if (blob_length == 0)
  425. cifs_dbg(FYI, "missing security blob on negprot\n");
  426. rc = cifs_enable_signing(server, ses->sign);
  427. if (rc)
  428. goto neg_exit;
  429. if (blob_length) {
  430. rc = decode_negTokenInit(security_blob, blob_length, server);
  431. if (rc == 1)
  432. rc = 0;
  433. else if (rc == 0)
  434. rc = -EIO;
  435. }
  436. neg_exit:
  437. free_rsp_buf(resp_buftype, rsp);
  438. return rc;
  439. }
  440. int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
  441. {
  442. int rc = 0;
  443. struct validate_negotiate_info_req vneg_inbuf;
  444. struct validate_negotiate_info_rsp *pneg_rsp;
  445. u32 rsplen;
  446. cifs_dbg(FYI, "validate negotiate\n");
  447. /*
  448. * validation ioctl must be signed, so no point sending this if we
  449. * can not sign it. We could eventually change this to selectively
  450. * sign just this, the first and only signed request on a connection.
  451. * This is good enough for now since a user who wants better security
  452. * would also enable signing on the mount. Having validation of
  453. * negotiate info for signed connections helps reduce attack vectors
  454. */
  455. if (tcon->ses->server->sign == false)
  456. return 0; /* validation requires signing */
  457. vneg_inbuf.Capabilities =
  458. cpu_to_le32(tcon->ses->server->vals->req_capabilities);
  459. memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
  460. SMB2_CLIENT_GUID_SIZE);
  461. if (tcon->ses->sign)
  462. vneg_inbuf.SecurityMode =
  463. cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
  464. else if (global_secflags & CIFSSEC_MAY_SIGN)
  465. vneg_inbuf.SecurityMode =
  466. cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
  467. else
  468. vneg_inbuf.SecurityMode = 0;
  469. vneg_inbuf.DialectCount = cpu_to_le16(1);
  470. vneg_inbuf.Dialects[0] =
  471. cpu_to_le16(tcon->ses->server->vals->protocol_id);
  472. rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
  473. FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
  474. (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
  475. (char **)&pneg_rsp, &rsplen);
  476. if (rc != 0) {
  477. cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
  478. return -EIO;
  479. }
  480. if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
  481. cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
  482. return -EIO;
  483. }
  484. /* check validate negotiate info response matches what we got earlier */
  485. if (pneg_rsp->Dialect !=
  486. cpu_to_le16(tcon->ses->server->vals->protocol_id))
  487. goto vneg_out;
  488. if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
  489. goto vneg_out;
  490. /* do not validate server guid because not saved at negprot time yet */
  491. if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
  492. SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
  493. goto vneg_out;
  494. /* validate negotiate successful */
  495. cifs_dbg(FYI, "validate negotiate info successful\n");
  496. return 0;
  497. vneg_out:
  498. cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
  499. return -EIO;
  500. }
  501. int
  502. SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
  503. const struct nls_table *nls_cp)
  504. {
  505. struct smb2_sess_setup_req *req;
  506. struct smb2_sess_setup_rsp *rsp = NULL;
  507. struct kvec iov[2];
  508. int rc = 0;
  509. int resp_buftype = CIFS_NO_BUFFER;
  510. __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
  511. struct TCP_Server_Info *server = ses->server;
  512. u16 blob_length = 0;
  513. struct key *spnego_key = NULL;
  514. char *security_blob = NULL;
  515. unsigned char *ntlmssp_blob = NULL;
  516. bool use_spnego = false; /* else use raw ntlmssp */
  517. cifs_dbg(FYI, "Session Setup\n");
  518. if (!server) {
  519. WARN(1, "%s: server is NULL!\n", __func__);
  520. return -EIO;
  521. }
  522. /*
  523. * If we are here due to reconnect, free per-smb session key
  524. * in case signing was required.
  525. */
  526. kfree(ses->auth_key.response);
  527. ses->auth_key.response = NULL;
  528. /*
  529. * If memory allocation is successful, caller of this function
  530. * frees it.
  531. */
  532. ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
  533. if (!ses->ntlmssp)
  534. return -ENOMEM;
  535. ses->ntlmssp->sesskey_per_smbsess = true;
  536. /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
  537. if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
  538. ses->sectype = RawNTLMSSP;
  539. ssetup_ntlmssp_authenticate:
  540. if (phase == NtLmChallenge)
  541. phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
  542. rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
  543. if (rc)
  544. return rc;
  545. req->hdr.SessionId = 0; /* First session, not a reauthenticate */
  546. req->Flags = 0; /* MBZ */
  547. /* to enable echos and oplocks */
  548. req->hdr.CreditRequest = cpu_to_le16(3);
  549. /* only one of SMB2 signing flags may be set in SMB2 request */
  550. if (server->sign)
  551. req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  552. else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
  553. req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
  554. else
  555. req->SecurityMode = 0;
  556. req->Capabilities = 0;
  557. req->Channel = 0; /* MBZ */
  558. iov[0].iov_base = (char *)req;
  559. /* 4 for rfc1002 length field and 1 for pad */
  560. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  561. if (ses->sectype == Kerberos) {
  562. #ifdef CONFIG_CIFS_UPCALL
  563. struct cifs_spnego_msg *msg;
  564. spnego_key = cifs_get_spnego_key(ses);
  565. if (IS_ERR(spnego_key)) {
  566. rc = PTR_ERR(spnego_key);
  567. spnego_key = NULL;
  568. goto ssetup_exit;
  569. }
  570. msg = spnego_key->payload.data[0];
  571. /*
  572. * check version field to make sure that cifs.upcall is
  573. * sending us a response in an expected form
  574. */
  575. if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
  576. cifs_dbg(VFS,
  577. "bad cifs.upcall version. Expected %d got %d",
  578. CIFS_SPNEGO_UPCALL_VERSION, msg->version);
  579. rc = -EKEYREJECTED;
  580. goto ssetup_exit;
  581. }
  582. ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
  583. GFP_KERNEL);
  584. if (!ses->auth_key.response) {
  585. cifs_dbg(VFS,
  586. "Kerberos can't allocate (%u bytes) memory",
  587. msg->sesskey_len);
  588. rc = -ENOMEM;
  589. goto ssetup_exit;
  590. }
  591. ses->auth_key.len = msg->sesskey_len;
  592. blob_length = msg->secblob_len;
  593. iov[1].iov_base = msg->data + msg->sesskey_len;
  594. iov[1].iov_len = blob_length;
  595. #else
  596. rc = -EOPNOTSUPP;
  597. goto ssetup_exit;
  598. #endif /* CONFIG_CIFS_UPCALL */
  599. } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
  600. ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
  601. GFP_KERNEL);
  602. if (ntlmssp_blob == NULL) {
  603. rc = -ENOMEM;
  604. goto ssetup_exit;
  605. }
  606. build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
  607. if (use_spnego) {
  608. /* blob_length = build_spnego_ntlmssp_blob(
  609. &security_blob,
  610. sizeof(struct _NEGOTIATE_MESSAGE),
  611. ntlmssp_blob); */
  612. /* BB eventually need to add this */
  613. cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
  614. rc = -EOPNOTSUPP;
  615. kfree(ntlmssp_blob);
  616. goto ssetup_exit;
  617. } else {
  618. blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
  619. /* with raw NTLMSSP we don't encapsulate in SPNEGO */
  620. security_blob = ntlmssp_blob;
  621. }
  622. iov[1].iov_base = security_blob;
  623. iov[1].iov_len = blob_length;
  624. } else if (phase == NtLmAuthenticate) {
  625. req->hdr.SessionId = ses->Suid;
  626. rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
  627. nls_cp);
  628. if (rc) {
  629. cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
  630. rc);
  631. goto ssetup_exit; /* BB double check error handling */
  632. }
  633. if (use_spnego) {
  634. /* blob_length = build_spnego_ntlmssp_blob(
  635. &security_blob,
  636. blob_length,
  637. ntlmssp_blob); */
  638. cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
  639. rc = -EOPNOTSUPP;
  640. kfree(ntlmssp_blob);
  641. goto ssetup_exit;
  642. } else {
  643. security_blob = ntlmssp_blob;
  644. }
  645. iov[1].iov_base = security_blob;
  646. iov[1].iov_len = blob_length;
  647. } else {
  648. cifs_dbg(VFS, "illegal ntlmssp phase\n");
  649. rc = -EIO;
  650. goto ssetup_exit;
  651. }
  652. /* Testing shows that buffer offset must be at location of Buffer[0] */
  653. req->SecurityBufferOffset =
  654. cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
  655. 1 /* pad */ - 4 /* rfc1001 len */);
  656. req->SecurityBufferLength = cpu_to_le16(blob_length);
  657. inc_rfc1001_len(req, blob_length - 1 /* pad */);
  658. /* BB add code to build os and lm fields */
  659. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
  660. CIFS_LOG_ERROR | CIFS_NEG_OP);
  661. kfree(security_blob);
  662. rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
  663. ses->Suid = rsp->hdr.SessionId;
  664. if (resp_buftype != CIFS_NO_BUFFER &&
  665. rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
  666. if (phase != NtLmNegotiate) {
  667. cifs_dbg(VFS, "Unexpected more processing error\n");
  668. goto ssetup_exit;
  669. }
  670. if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
  671. le16_to_cpu(rsp->SecurityBufferOffset)) {
  672. cifs_dbg(VFS, "Invalid security buffer offset %d\n",
  673. le16_to_cpu(rsp->SecurityBufferOffset));
  674. rc = -EIO;
  675. goto ssetup_exit;
  676. }
  677. /* NTLMSSP Negotiate sent now processing challenge (response) */
  678. phase = NtLmChallenge; /* process ntlmssp challenge */
  679. rc = 0; /* MORE_PROCESSING is not an error here but expected */
  680. rc = decode_ntlmssp_challenge(rsp->Buffer,
  681. le16_to_cpu(rsp->SecurityBufferLength), ses);
  682. }
  683. /*
  684. * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
  685. * but at least the raw NTLMSSP case works.
  686. */
  687. /*
  688. * No tcon so can't do
  689. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  690. */
  691. if (rc != 0)
  692. goto ssetup_exit;
  693. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  694. if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
  695. cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
  696. ssetup_exit:
  697. free_rsp_buf(resp_buftype, rsp);
  698. /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
  699. if ((phase == NtLmChallenge) && (rc == 0))
  700. goto ssetup_ntlmssp_authenticate;
  701. if (!rc) {
  702. mutex_lock(&server->srv_mutex);
  703. if (server->sign && server->ops->generate_signingkey) {
  704. rc = server->ops->generate_signingkey(ses);
  705. kfree(ses->auth_key.response);
  706. ses->auth_key.response = NULL;
  707. if (rc) {
  708. cifs_dbg(FYI,
  709. "SMB3 session key generation failed\n");
  710. mutex_unlock(&server->srv_mutex);
  711. goto keygen_exit;
  712. }
  713. }
  714. if (!server->session_estab) {
  715. server->sequence_number = 0x2;
  716. server->session_estab = true;
  717. }
  718. mutex_unlock(&server->srv_mutex);
  719. cifs_dbg(FYI, "SMB2/3 session established successfully\n");
  720. spin_lock(&GlobalMid_Lock);
  721. ses->status = CifsGood;
  722. ses->need_reconnect = false;
  723. spin_unlock(&GlobalMid_Lock);
  724. }
  725. keygen_exit:
  726. if (!server->sign) {
  727. kfree(ses->auth_key.response);
  728. ses->auth_key.response = NULL;
  729. }
  730. if (spnego_key) {
  731. key_invalidate(spnego_key);
  732. key_put(spnego_key);
  733. }
  734. kfree(ses->ntlmssp);
  735. return rc;
  736. }
  737. int
  738. SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
  739. {
  740. struct smb2_logoff_req *req; /* response is also trivial struct */
  741. int rc = 0;
  742. struct TCP_Server_Info *server;
  743. cifs_dbg(FYI, "disconnect session %p\n", ses);
  744. if (ses && (ses->server))
  745. server = ses->server;
  746. else
  747. return -EIO;
  748. /* no need to send SMB logoff if uid already closed due to reconnect */
  749. if (ses->need_reconnect)
  750. goto smb2_session_already_dead;
  751. rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
  752. if (rc)
  753. return rc;
  754. /* since no tcon, smb2_init can not do this, so do here */
  755. req->hdr.SessionId = ses->Suid;
  756. if (server->sign)
  757. req->hdr.Flags |= SMB2_FLAGS_SIGNED;
  758. rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
  759. /*
  760. * No tcon so can't do
  761. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  762. */
  763. smb2_session_already_dead:
  764. return rc;
  765. }
  766. static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
  767. {
  768. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
  769. }
  770. #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
  771. /* These are similar values to what Windows uses */
  772. static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
  773. {
  774. tcon->max_chunks = 256;
  775. tcon->max_bytes_chunk = 1048576;
  776. tcon->max_bytes_copy = 16777216;
  777. }
  778. int
  779. SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
  780. struct cifs_tcon *tcon, const struct nls_table *cp)
  781. {
  782. struct smb2_tree_connect_req *req;
  783. struct smb2_tree_connect_rsp *rsp = NULL;
  784. struct kvec iov[2];
  785. int rc = 0;
  786. int resp_buftype;
  787. int unc_path_len;
  788. struct TCP_Server_Info *server;
  789. __le16 *unc_path = NULL;
  790. cifs_dbg(FYI, "TCON\n");
  791. if ((ses->server) && tree)
  792. server = ses->server;
  793. else
  794. return -EIO;
  795. if (tcon && tcon->bad_network_name)
  796. return -ENOENT;
  797. if ((tcon && tcon->seal) &&
  798. ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
  799. cifs_dbg(VFS, "encryption requested but no server support");
  800. return -EOPNOTSUPP;
  801. }
  802. unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
  803. if (unc_path == NULL)
  804. return -ENOMEM;
  805. unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
  806. unc_path_len *= 2;
  807. if (unc_path_len < 2) {
  808. kfree(unc_path);
  809. return -EINVAL;
  810. }
  811. rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
  812. if (rc) {
  813. kfree(unc_path);
  814. return rc;
  815. }
  816. if (tcon == NULL) {
  817. /* since no tcon, smb2_init can not do this, so do here */
  818. req->hdr.SessionId = ses->Suid;
  819. /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
  820. req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
  821. }
  822. iov[0].iov_base = (char *)req;
  823. /* 4 for rfc1002 length field and 1 for pad */
  824. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  825. /* Testing shows that buffer offset must be at location of Buffer[0] */
  826. req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
  827. - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
  828. req->PathLength = cpu_to_le16(unc_path_len - 2);
  829. iov[1].iov_base = unc_path;
  830. iov[1].iov_len = unc_path_len;
  831. inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
  832. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
  833. rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
  834. if (rc != 0) {
  835. if (tcon) {
  836. cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
  837. tcon->need_reconnect = true;
  838. }
  839. goto tcon_error_exit;
  840. }
  841. if (tcon == NULL) {
  842. ses->ipc_tid = rsp->hdr.TreeId;
  843. goto tcon_exit;
  844. }
  845. if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
  846. cifs_dbg(FYI, "connection to disk share\n");
  847. else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
  848. tcon->ipc = true;
  849. cifs_dbg(FYI, "connection to pipe share\n");
  850. } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
  851. tcon->print = true;
  852. cifs_dbg(FYI, "connection to printer\n");
  853. } else {
  854. cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
  855. rc = -EOPNOTSUPP;
  856. goto tcon_error_exit;
  857. }
  858. tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
  859. tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
  860. tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
  861. tcon->tidStatus = CifsGood;
  862. tcon->need_reconnect = false;
  863. tcon->tid = rsp->hdr.TreeId;
  864. strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
  865. if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
  866. ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
  867. cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
  868. init_copy_chunk_defaults(tcon);
  869. if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)
  870. cifs_dbg(VFS, "Encrypted shares not supported");
  871. if (tcon->ses->server->ops->validate_negotiate)
  872. rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
  873. tcon_exit:
  874. free_rsp_buf(resp_buftype, rsp);
  875. kfree(unc_path);
  876. return rc;
  877. tcon_error_exit:
  878. if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
  879. cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
  880. if (tcon)
  881. tcon->bad_network_name = true;
  882. }
  883. goto tcon_exit;
  884. }
  885. int
  886. SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
  887. {
  888. struct smb2_tree_disconnect_req *req; /* response is trivial */
  889. int rc = 0;
  890. struct TCP_Server_Info *server;
  891. struct cifs_ses *ses = tcon->ses;
  892. cifs_dbg(FYI, "Tree Disconnect\n");
  893. if (ses && (ses->server))
  894. server = ses->server;
  895. else
  896. return -EIO;
  897. if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
  898. return 0;
  899. rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
  900. if (rc)
  901. return rc;
  902. rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
  903. if (rc)
  904. cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
  905. return rc;
  906. }
  907. static struct create_durable *
  908. create_durable_buf(void)
  909. {
  910. struct create_durable *buf;
  911. buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
  912. if (!buf)
  913. return NULL;
  914. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  915. (struct create_durable, Data));
  916. buf->ccontext.DataLength = cpu_to_le32(16);
  917. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  918. (struct create_durable, Name));
  919. buf->ccontext.NameLength = cpu_to_le16(4);
  920. /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
  921. buf->Name[0] = 'D';
  922. buf->Name[1] = 'H';
  923. buf->Name[2] = 'n';
  924. buf->Name[3] = 'Q';
  925. return buf;
  926. }
  927. static struct create_durable *
  928. create_reconnect_durable_buf(struct cifs_fid *fid)
  929. {
  930. struct create_durable *buf;
  931. buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
  932. if (!buf)
  933. return NULL;
  934. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  935. (struct create_durable, Data));
  936. buf->ccontext.DataLength = cpu_to_le32(16);
  937. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  938. (struct create_durable, Name));
  939. buf->ccontext.NameLength = cpu_to_le16(4);
  940. buf->Data.Fid.PersistentFileId = fid->persistent_fid;
  941. buf->Data.Fid.VolatileFileId = fid->volatile_fid;
  942. /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
  943. buf->Name[0] = 'D';
  944. buf->Name[1] = 'H';
  945. buf->Name[2] = 'n';
  946. buf->Name[3] = 'C';
  947. return buf;
  948. }
  949. static __u8
  950. parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
  951. unsigned int *epoch)
  952. {
  953. char *data_offset;
  954. struct create_context *cc;
  955. unsigned int next;
  956. unsigned int remaining;
  957. char *name;
  958. data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
  959. remaining = le32_to_cpu(rsp->CreateContextsLength);
  960. cc = (struct create_context *)data_offset;
  961. while (remaining >= sizeof(struct create_context)) {
  962. name = le16_to_cpu(cc->NameOffset) + (char *)cc;
  963. if (le16_to_cpu(cc->NameLength) == 4 &&
  964. strncmp(name, "RqLs", 4) == 0)
  965. return server->ops->parse_lease_buf(cc, epoch);
  966. next = le32_to_cpu(cc->Next);
  967. if (!next)
  968. break;
  969. remaining -= next;
  970. cc = (struct create_context *)((char *)cc + next);
  971. }
  972. return 0;
  973. }
  974. static int
  975. add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
  976. unsigned int *num_iovec, __u8 *oplock)
  977. {
  978. struct smb2_create_req *req = iov[0].iov_base;
  979. unsigned int num = *num_iovec;
  980. iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
  981. if (iov[num].iov_base == NULL)
  982. return -ENOMEM;
  983. iov[num].iov_len = server->vals->create_lease_size;
  984. req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
  985. if (!req->CreateContextsOffset)
  986. req->CreateContextsOffset = cpu_to_le32(
  987. sizeof(struct smb2_create_req) - 4 +
  988. iov[num - 1].iov_len);
  989. le32_add_cpu(&req->CreateContextsLength,
  990. server->vals->create_lease_size);
  991. inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
  992. *num_iovec = num + 1;
  993. return 0;
  994. }
  995. static struct create_durable_v2 *
  996. create_durable_v2_buf(struct cifs_fid *pfid)
  997. {
  998. struct create_durable_v2 *buf;
  999. buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
  1000. if (!buf)
  1001. return NULL;
  1002. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  1003. (struct create_durable_v2, dcontext));
  1004. buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
  1005. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  1006. (struct create_durable_v2, Name));
  1007. buf->ccontext.NameLength = cpu_to_le16(4);
  1008. buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
  1009. buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
  1010. get_random_bytes(buf->dcontext.CreateGuid, 16);
  1011. memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
  1012. /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
  1013. buf->Name[0] = 'D';
  1014. buf->Name[1] = 'H';
  1015. buf->Name[2] = '2';
  1016. buf->Name[3] = 'Q';
  1017. return buf;
  1018. }
  1019. static struct create_durable_handle_reconnect_v2 *
  1020. create_reconnect_durable_v2_buf(struct cifs_fid *fid)
  1021. {
  1022. struct create_durable_handle_reconnect_v2 *buf;
  1023. buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
  1024. GFP_KERNEL);
  1025. if (!buf)
  1026. return NULL;
  1027. buf->ccontext.DataOffset =
  1028. cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
  1029. dcontext));
  1030. buf->ccontext.DataLength =
  1031. cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
  1032. buf->ccontext.NameOffset =
  1033. cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
  1034. Name));
  1035. buf->ccontext.NameLength = cpu_to_le16(4);
  1036. buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
  1037. buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
  1038. buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
  1039. memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
  1040. /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
  1041. buf->Name[0] = 'D';
  1042. buf->Name[1] = 'H';
  1043. buf->Name[2] = '2';
  1044. buf->Name[3] = 'C';
  1045. return buf;
  1046. }
  1047. static int
  1048. add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
  1049. struct cifs_open_parms *oparms)
  1050. {
  1051. struct smb2_create_req *req = iov[0].iov_base;
  1052. unsigned int num = *num_iovec;
  1053. iov[num].iov_base = create_durable_v2_buf(oparms->fid);
  1054. if (iov[num].iov_base == NULL)
  1055. return -ENOMEM;
  1056. iov[num].iov_len = sizeof(struct create_durable_v2);
  1057. if (!req->CreateContextsOffset)
  1058. req->CreateContextsOffset =
  1059. cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
  1060. iov[1].iov_len);
  1061. le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
  1062. inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_v2));
  1063. *num_iovec = num + 1;
  1064. return 0;
  1065. }
  1066. static int
  1067. add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
  1068. struct cifs_open_parms *oparms)
  1069. {
  1070. struct smb2_create_req *req = iov[0].iov_base;
  1071. unsigned int num = *num_iovec;
  1072. /* indicate that we don't need to relock the file */
  1073. oparms->reconnect = false;
  1074. iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
  1075. if (iov[num].iov_base == NULL)
  1076. return -ENOMEM;
  1077. iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
  1078. if (!req->CreateContextsOffset)
  1079. req->CreateContextsOffset =
  1080. cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
  1081. iov[1].iov_len);
  1082. le32_add_cpu(&req->CreateContextsLength,
  1083. sizeof(struct create_durable_handle_reconnect_v2));
  1084. inc_rfc1001_len(&req->hdr,
  1085. sizeof(struct create_durable_handle_reconnect_v2));
  1086. *num_iovec = num + 1;
  1087. return 0;
  1088. }
  1089. static int
  1090. add_durable_context(struct kvec *iov, unsigned int *num_iovec,
  1091. struct cifs_open_parms *oparms, bool use_persistent)
  1092. {
  1093. struct smb2_create_req *req = iov[0].iov_base;
  1094. unsigned int num = *num_iovec;
  1095. if (use_persistent) {
  1096. if (oparms->reconnect)
  1097. return add_durable_reconnect_v2_context(iov, num_iovec,
  1098. oparms);
  1099. else
  1100. return add_durable_v2_context(iov, num_iovec, oparms);
  1101. }
  1102. if (oparms->reconnect) {
  1103. iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
  1104. /* indicate that we don't need to relock the file */
  1105. oparms->reconnect = false;
  1106. } else
  1107. iov[num].iov_base = create_durable_buf();
  1108. if (iov[num].iov_base == NULL)
  1109. return -ENOMEM;
  1110. iov[num].iov_len = sizeof(struct create_durable);
  1111. if (!req->CreateContextsOffset)
  1112. req->CreateContextsOffset =
  1113. cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
  1114. iov[1].iov_len);
  1115. le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
  1116. inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
  1117. *num_iovec = num + 1;
  1118. return 0;
  1119. }
  1120. int
  1121. SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
  1122. __u8 *oplock, struct smb2_file_all_info *buf,
  1123. struct smb2_err_rsp **err_buf)
  1124. {
  1125. struct smb2_create_req *req;
  1126. struct smb2_create_rsp *rsp;
  1127. struct TCP_Server_Info *server;
  1128. struct cifs_tcon *tcon = oparms->tcon;
  1129. struct cifs_ses *ses = tcon->ses;
  1130. struct kvec iov[4];
  1131. int resp_buftype;
  1132. int uni_path_len;
  1133. __le16 *copy_path = NULL;
  1134. int copy_size;
  1135. int rc = 0;
  1136. unsigned int num_iovecs = 2;
  1137. __u32 file_attributes = 0;
  1138. char *dhc_buf = NULL, *lc_buf = NULL;
  1139. cifs_dbg(FYI, "create/open\n");
  1140. if (ses && (ses->server))
  1141. server = ses->server;
  1142. else
  1143. return -EIO;
  1144. rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
  1145. if (rc)
  1146. return rc;
  1147. if (oparms->create_options & CREATE_OPTION_READONLY)
  1148. file_attributes |= ATTR_READONLY;
  1149. if (oparms->create_options & CREATE_OPTION_SPECIAL)
  1150. file_attributes |= ATTR_SYSTEM;
  1151. req->ImpersonationLevel = IL_IMPERSONATION;
  1152. req->DesiredAccess = cpu_to_le32(oparms->desired_access);
  1153. /* File attributes ignored on open (used in create though) */
  1154. req->FileAttributes = cpu_to_le32(file_attributes);
  1155. req->ShareAccess = FILE_SHARE_ALL_LE;
  1156. req->CreateDisposition = cpu_to_le32(oparms->disposition);
  1157. req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
  1158. uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
  1159. /* do not count rfc1001 len field */
  1160. req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
  1161. iov[0].iov_base = (char *)req;
  1162. /* 4 for rfc1002 length field */
  1163. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1164. /* MUST set path len (NameLength) to 0 opening root of share */
  1165. req->NameLength = cpu_to_le16(uni_path_len - 2);
  1166. /* -1 since last byte is buf[0] which is sent below (path) */
  1167. iov[0].iov_len--;
  1168. if (uni_path_len % 8 != 0) {
  1169. copy_size = uni_path_len / 8 * 8;
  1170. if (copy_size < uni_path_len)
  1171. copy_size += 8;
  1172. copy_path = kzalloc(copy_size, GFP_KERNEL);
  1173. if (!copy_path)
  1174. return -ENOMEM;
  1175. memcpy((char *)copy_path, (const char *)path,
  1176. uni_path_len);
  1177. uni_path_len = copy_size;
  1178. path = copy_path;
  1179. }
  1180. iov[1].iov_len = uni_path_len;
  1181. iov[1].iov_base = path;
  1182. /* -1 since last byte is buf[0] which was counted in smb2_buf_len */
  1183. inc_rfc1001_len(req, uni_path_len - 1);
  1184. if (!server->oplocks)
  1185. *oplock = SMB2_OPLOCK_LEVEL_NONE;
  1186. if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
  1187. *oplock == SMB2_OPLOCK_LEVEL_NONE)
  1188. req->RequestedOplockLevel = *oplock;
  1189. else {
  1190. rc = add_lease_context(server, iov, &num_iovecs, oplock);
  1191. if (rc) {
  1192. cifs_small_buf_release(req);
  1193. kfree(copy_path);
  1194. return rc;
  1195. }
  1196. lc_buf = iov[num_iovecs-1].iov_base;
  1197. }
  1198. if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
  1199. /* need to set Next field of lease context if we request it */
  1200. if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
  1201. struct create_context *ccontext =
  1202. (struct create_context *)iov[num_iovecs-1].iov_base;
  1203. ccontext->Next =
  1204. cpu_to_le32(server->vals->create_lease_size);
  1205. }
  1206. rc = add_durable_context(iov, &num_iovecs, oparms,
  1207. tcon->use_persistent);
  1208. if (rc) {
  1209. cifs_small_buf_release(req);
  1210. kfree(copy_path);
  1211. kfree(lc_buf);
  1212. return rc;
  1213. }
  1214. dhc_buf = iov[num_iovecs-1].iov_base;
  1215. }
  1216. rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
  1217. rsp = (struct smb2_create_rsp *)iov[0].iov_base;
  1218. if (rc != 0) {
  1219. cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
  1220. if (err_buf)
  1221. *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
  1222. GFP_KERNEL);
  1223. goto creat_exit;
  1224. }
  1225. oparms->fid->persistent_fid = rsp->PersistentFileId;
  1226. oparms->fid->volatile_fid = rsp->VolatileFileId;
  1227. if (buf) {
  1228. memcpy(buf, &rsp->CreationTime, 32);
  1229. buf->AllocationSize = rsp->AllocationSize;
  1230. buf->EndOfFile = rsp->EndofFile;
  1231. buf->Attributes = rsp->FileAttributes;
  1232. buf->NumberOfLinks = cpu_to_le32(1);
  1233. buf->DeletePending = 0;
  1234. }
  1235. if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
  1236. *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch);
  1237. else
  1238. *oplock = rsp->OplockLevel;
  1239. creat_exit:
  1240. kfree(copy_path);
  1241. kfree(lc_buf);
  1242. kfree(dhc_buf);
  1243. free_rsp_buf(resp_buftype, rsp);
  1244. return rc;
  1245. }
  1246. /*
  1247. * SMB2 IOCTL is used for both IOCTLs and FSCTLs
  1248. */
  1249. int
  1250. SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  1251. u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data,
  1252. u32 indatalen, char **out_data, u32 *plen /* returned data len */)
  1253. {
  1254. struct smb2_ioctl_req *req;
  1255. struct smb2_ioctl_rsp *rsp;
  1256. struct TCP_Server_Info *server;
  1257. struct cifs_ses *ses;
  1258. struct kvec iov[2];
  1259. int resp_buftype;
  1260. int num_iovecs;
  1261. int rc = 0;
  1262. cifs_dbg(FYI, "SMB2 IOCTL\n");
  1263. if (out_data != NULL)
  1264. *out_data = NULL;
  1265. /* zero out returned data len, in case of error */
  1266. if (plen)
  1267. *plen = 0;
  1268. if (tcon)
  1269. ses = tcon->ses;
  1270. else
  1271. return -EIO;
  1272. if (ses && (ses->server))
  1273. server = ses->server;
  1274. else
  1275. return -EIO;
  1276. rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
  1277. if (rc)
  1278. return rc;
  1279. req->CtlCode = cpu_to_le32(opcode);
  1280. req->PersistentFileId = persistent_fid;
  1281. req->VolatileFileId = volatile_fid;
  1282. if (indatalen) {
  1283. req->InputCount = cpu_to_le32(indatalen);
  1284. /* do not set InputOffset if no input data */
  1285. req->InputOffset =
  1286. cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
  1287. iov[1].iov_base = in_data;
  1288. iov[1].iov_len = indatalen;
  1289. num_iovecs = 2;
  1290. } else
  1291. num_iovecs = 1;
  1292. req->OutputOffset = 0;
  1293. req->OutputCount = 0; /* MBZ */
  1294. /*
  1295. * Could increase MaxOutputResponse, but that would require more
  1296. * than one credit. Windows typically sets this smaller, but for some
  1297. * ioctls it may be useful to allow server to send more. No point
  1298. * limiting what the server can send as long as fits in one credit
  1299. */
  1300. req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
  1301. if (is_fsctl)
  1302. req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
  1303. else
  1304. req->Flags = 0;
  1305. iov[0].iov_base = (char *)req;
  1306. /*
  1307. * If no input data, the size of ioctl struct in
  1308. * protocol spec still includes a 1 byte data buffer,
  1309. * but if input data passed to ioctl, we do not
  1310. * want to double count this, so we do not send
  1311. * the dummy one byte of data in iovec[0] if sending
  1312. * input data (in iovec[1]). We also must add 4 bytes
  1313. * in first iovec to allow for rfc1002 length field.
  1314. */
  1315. if (indatalen) {
  1316. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  1317. inc_rfc1001_len(req, indatalen - 1);
  1318. } else
  1319. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1320. rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
  1321. rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
  1322. if ((rc != 0) && (rc != -EINVAL)) {
  1323. cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
  1324. goto ioctl_exit;
  1325. } else if (rc == -EINVAL) {
  1326. if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
  1327. (opcode != FSCTL_SRV_COPYCHUNK)) {
  1328. cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
  1329. goto ioctl_exit;
  1330. }
  1331. }
  1332. /* check if caller wants to look at return data or just return rc */
  1333. if ((plen == NULL) || (out_data == NULL))
  1334. goto ioctl_exit;
  1335. *plen = le32_to_cpu(rsp->OutputCount);
  1336. /* We check for obvious errors in the output buffer length and offset */
  1337. if (*plen == 0)
  1338. goto ioctl_exit; /* server returned no data */
  1339. else if (*plen > 0xFF00) {
  1340. cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
  1341. *plen = 0;
  1342. rc = -EIO;
  1343. goto ioctl_exit;
  1344. }
  1345. if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
  1346. cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
  1347. le32_to_cpu(rsp->OutputOffset));
  1348. *plen = 0;
  1349. rc = -EIO;
  1350. goto ioctl_exit;
  1351. }
  1352. *out_data = kmalloc(*plen, GFP_KERNEL);
  1353. if (*out_data == NULL) {
  1354. rc = -ENOMEM;
  1355. goto ioctl_exit;
  1356. }
  1357. memcpy(*out_data,
  1358. (char *)&rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
  1359. *plen);
  1360. ioctl_exit:
  1361. free_rsp_buf(resp_buftype, rsp);
  1362. return rc;
  1363. }
  1364. /*
  1365. * Individual callers to ioctl worker function follow
  1366. */
  1367. int
  1368. SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
  1369. u64 persistent_fid, u64 volatile_fid)
  1370. {
  1371. int rc;
  1372. struct compress_ioctl fsctl_input;
  1373. char *ret_data = NULL;
  1374. fsctl_input.CompressionState =
  1375. cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
  1376. rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
  1377. FSCTL_SET_COMPRESSION, true /* is_fsctl */,
  1378. (char *)&fsctl_input /* data input */,
  1379. 2 /* in data len */, &ret_data /* out data */, NULL);
  1380. cifs_dbg(FYI, "set compression rc %d\n", rc);
  1381. return rc;
  1382. }
  1383. int
  1384. SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
  1385. u64 persistent_fid, u64 volatile_fid)
  1386. {
  1387. struct smb2_close_req *req;
  1388. struct smb2_close_rsp *rsp;
  1389. struct TCP_Server_Info *server;
  1390. struct cifs_ses *ses = tcon->ses;
  1391. struct kvec iov[1];
  1392. int resp_buftype;
  1393. int rc = 0;
  1394. cifs_dbg(FYI, "Close\n");
  1395. if (ses && (ses->server))
  1396. server = ses->server;
  1397. else
  1398. return -EIO;
  1399. rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
  1400. if (rc)
  1401. return rc;
  1402. req->PersistentFileId = persistent_fid;
  1403. req->VolatileFileId = volatile_fid;
  1404. iov[0].iov_base = (char *)req;
  1405. /* 4 for rfc1002 length field */
  1406. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1407. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1408. rsp = (struct smb2_close_rsp *)iov[0].iov_base;
  1409. if (rc != 0) {
  1410. cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
  1411. goto close_exit;
  1412. }
  1413. /* BB FIXME - decode close response, update inode for caching */
  1414. close_exit:
  1415. free_rsp_buf(resp_buftype, rsp);
  1416. return rc;
  1417. }
  1418. static int
  1419. validate_buf(unsigned int offset, unsigned int buffer_length,
  1420. struct smb2_hdr *hdr, unsigned int min_buf_size)
  1421. {
  1422. unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
  1423. char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
  1424. char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
  1425. char *end_of_buf = begin_of_buf + buffer_length;
  1426. if (buffer_length < min_buf_size) {
  1427. cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
  1428. buffer_length, min_buf_size);
  1429. return -EINVAL;
  1430. }
  1431. /* check if beyond RFC1001 maximum length */
  1432. if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
  1433. cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
  1434. buffer_length, smb_len);
  1435. return -EINVAL;
  1436. }
  1437. if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
  1438. cifs_dbg(VFS, "illegal server response, bad offset to data\n");
  1439. return -EINVAL;
  1440. }
  1441. return 0;
  1442. }
  1443. /*
  1444. * If SMB buffer fields are valid, copy into temporary buffer to hold result.
  1445. * Caller must free buffer.
  1446. */
  1447. static int
  1448. validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
  1449. struct smb2_hdr *hdr, unsigned int minbufsize,
  1450. char *data)
  1451. {
  1452. char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
  1453. int rc;
  1454. if (!data)
  1455. return -EINVAL;
  1456. rc = validate_buf(offset, buffer_length, hdr, minbufsize);
  1457. if (rc)
  1458. return rc;
  1459. memcpy(data, begin_of_buf, buffer_length);
  1460. return 0;
  1461. }
  1462. static int
  1463. query_info(const unsigned int xid, struct cifs_tcon *tcon,
  1464. u64 persistent_fid, u64 volatile_fid, u8 info_class,
  1465. size_t output_len, size_t min_len, void *data)
  1466. {
  1467. struct smb2_query_info_req *req;
  1468. struct smb2_query_info_rsp *rsp = NULL;
  1469. struct kvec iov[2];
  1470. int rc = 0;
  1471. int resp_buftype;
  1472. struct TCP_Server_Info *server;
  1473. struct cifs_ses *ses = tcon->ses;
  1474. cifs_dbg(FYI, "Query Info\n");
  1475. if (ses && (ses->server))
  1476. server = ses->server;
  1477. else
  1478. return -EIO;
  1479. rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
  1480. if (rc)
  1481. return rc;
  1482. req->InfoType = SMB2_O_INFO_FILE;
  1483. req->FileInfoClass = info_class;
  1484. req->PersistentFileId = persistent_fid;
  1485. req->VolatileFileId = volatile_fid;
  1486. /* 4 for rfc1002 length field and 1 for Buffer */
  1487. req->InputBufferOffset =
  1488. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
  1489. req->OutputBufferLength = cpu_to_le32(output_len);
  1490. iov[0].iov_base = (char *)req;
  1491. /* 4 for rfc1002 length field */
  1492. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1493. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1494. rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
  1495. if (rc) {
  1496. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  1497. goto qinf_exit;
  1498. }
  1499. rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
  1500. le32_to_cpu(rsp->OutputBufferLength),
  1501. &rsp->hdr, min_len, data);
  1502. qinf_exit:
  1503. free_rsp_buf(resp_buftype, rsp);
  1504. return rc;
  1505. }
  1506. int
  1507. SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
  1508. u64 persistent_fid, u64 volatile_fid,
  1509. struct smb2_file_all_info *data)
  1510. {
  1511. return query_info(xid, tcon, persistent_fid, volatile_fid,
  1512. FILE_ALL_INFORMATION,
  1513. sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
  1514. sizeof(struct smb2_file_all_info), data);
  1515. }
  1516. int
  1517. SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
  1518. u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
  1519. {
  1520. return query_info(xid, tcon, persistent_fid, volatile_fid,
  1521. FILE_INTERNAL_INFORMATION,
  1522. sizeof(struct smb2_file_internal_info),
  1523. sizeof(struct smb2_file_internal_info), uniqueid);
  1524. }
  1525. /*
  1526. * This is a no-op for now. We're not really interested in the reply, but
  1527. * rather in the fact that the server sent one and that server->lstrp
  1528. * gets updated.
  1529. *
  1530. * FIXME: maybe we should consider checking that the reply matches request?
  1531. */
  1532. static void
  1533. smb2_echo_callback(struct mid_q_entry *mid)
  1534. {
  1535. struct TCP_Server_Info *server = mid->callback_data;
  1536. struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
  1537. unsigned int credits_received = 1;
  1538. if (mid->mid_state == MID_RESPONSE_RECEIVED)
  1539. credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
  1540. mutex_lock(&server->srv_mutex);
  1541. DeleteMidQEntry(mid);
  1542. mutex_unlock(&server->srv_mutex);
  1543. add_credits(server, credits_received, CIFS_ECHO_OP);
  1544. }
  1545. int
  1546. SMB2_echo(struct TCP_Server_Info *server)
  1547. {
  1548. struct smb2_echo_req *req;
  1549. int rc = 0;
  1550. struct kvec iov;
  1551. struct smb_rqst rqst = { .rq_iov = &iov,
  1552. .rq_nvec = 1 };
  1553. cifs_dbg(FYI, "In echo request\n");
  1554. if (server->tcpStatus == CifsNeedNegotiate) {
  1555. struct list_head *tmp, *tmp2;
  1556. struct cifs_ses *ses;
  1557. struct cifs_tcon *tcon;
  1558. cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
  1559. spin_lock(&cifs_tcp_ses_lock);
  1560. list_for_each(tmp, &server->smb_ses_list) {
  1561. ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
  1562. list_for_each(tmp2, &ses->tcon_list) {
  1563. tcon = list_entry(tmp2, struct cifs_tcon,
  1564. tcon_list);
  1565. /* add check for persistent handle reconnect */
  1566. if (tcon && tcon->need_reconnect) {
  1567. spin_unlock(&cifs_tcp_ses_lock);
  1568. rc = smb2_reconnect(SMB2_ECHO, tcon);
  1569. spin_lock(&cifs_tcp_ses_lock);
  1570. }
  1571. }
  1572. }
  1573. spin_unlock(&cifs_tcp_ses_lock);
  1574. }
  1575. /* if no session, renegotiate failed above */
  1576. if (server->tcpStatus == CifsNeedNegotiate)
  1577. return -EIO;
  1578. rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
  1579. if (rc)
  1580. return rc;
  1581. req->hdr.CreditRequest = cpu_to_le16(1);
  1582. iov.iov_base = (char *)req;
  1583. /* 4 for rfc1002 length field */
  1584. iov.iov_len = get_rfc1002_length(req) + 4;
  1585. rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
  1586. CIFS_ECHO_OP);
  1587. if (rc)
  1588. cifs_dbg(FYI, "Echo request failed: %d\n", rc);
  1589. cifs_small_buf_release(req);
  1590. return rc;
  1591. }
  1592. int
  1593. SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  1594. u64 volatile_fid)
  1595. {
  1596. struct smb2_flush_req *req;
  1597. struct TCP_Server_Info *server;
  1598. struct cifs_ses *ses = tcon->ses;
  1599. struct kvec iov[1];
  1600. int resp_buftype;
  1601. int rc = 0;
  1602. cifs_dbg(FYI, "Flush\n");
  1603. if (ses && (ses->server))
  1604. server = ses->server;
  1605. else
  1606. return -EIO;
  1607. rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
  1608. if (rc)
  1609. return rc;
  1610. req->PersistentFileId = persistent_fid;
  1611. req->VolatileFileId = volatile_fid;
  1612. iov[0].iov_base = (char *)req;
  1613. /* 4 for rfc1002 length field */
  1614. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1615. rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
  1616. if (rc != 0)
  1617. cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
  1618. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1619. return rc;
  1620. }
  1621. /*
  1622. * To form a chain of read requests, any read requests after the first should
  1623. * have the end_of_chain boolean set to true.
  1624. */
  1625. static int
  1626. smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
  1627. unsigned int remaining_bytes, int request_type)
  1628. {
  1629. int rc = -EACCES;
  1630. struct smb2_read_req *req = NULL;
  1631. rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
  1632. if (rc)
  1633. return rc;
  1634. if (io_parms->tcon->ses->server == NULL)
  1635. return -ECONNABORTED;
  1636. req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
  1637. req->PersistentFileId = io_parms->persistent_fid;
  1638. req->VolatileFileId = io_parms->volatile_fid;
  1639. req->ReadChannelInfoOffset = 0; /* reserved */
  1640. req->ReadChannelInfoLength = 0; /* reserved */
  1641. req->Channel = 0; /* reserved */
  1642. req->MinimumCount = 0;
  1643. req->Length = cpu_to_le32(io_parms->length);
  1644. req->Offset = cpu_to_le64(io_parms->offset);
  1645. if (request_type & CHAINED_REQUEST) {
  1646. if (!(request_type & END_OF_CHAIN)) {
  1647. /* 4 for rfc1002 length field */
  1648. req->hdr.NextCommand =
  1649. cpu_to_le32(get_rfc1002_length(req) + 4);
  1650. } else /* END_OF_CHAIN */
  1651. req->hdr.NextCommand = 0;
  1652. if (request_type & RELATED_REQUEST) {
  1653. req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
  1654. /*
  1655. * Related requests use info from previous read request
  1656. * in chain.
  1657. */
  1658. req->hdr.SessionId = 0xFFFFFFFF;
  1659. req->hdr.TreeId = 0xFFFFFFFF;
  1660. req->PersistentFileId = 0xFFFFFFFF;
  1661. req->VolatileFileId = 0xFFFFFFFF;
  1662. }
  1663. }
  1664. if (remaining_bytes > io_parms->length)
  1665. req->RemainingBytes = cpu_to_le32(remaining_bytes);
  1666. else
  1667. req->RemainingBytes = 0;
  1668. iov[0].iov_base = (char *)req;
  1669. /* 4 for rfc1002 length field */
  1670. iov[0].iov_len = get_rfc1002_length(req) + 4;
  1671. return rc;
  1672. }
  1673. static void
  1674. smb2_readv_callback(struct mid_q_entry *mid)
  1675. {
  1676. struct cifs_readdata *rdata = mid->callback_data;
  1677. struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
  1678. struct TCP_Server_Info *server = tcon->ses->server;
  1679. struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
  1680. unsigned int credits_received = 1;
  1681. struct smb_rqst rqst = { .rq_iov = &rdata->iov,
  1682. .rq_nvec = 1,
  1683. .rq_pages = rdata->pages,
  1684. .rq_npages = rdata->nr_pages,
  1685. .rq_pagesz = rdata->pagesz,
  1686. .rq_tailsz = rdata->tailsz };
  1687. cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
  1688. __func__, mid->mid, mid->mid_state, rdata->result,
  1689. rdata->bytes);
  1690. switch (mid->mid_state) {
  1691. case MID_RESPONSE_RECEIVED:
  1692. credits_received = le16_to_cpu(buf->CreditRequest);
  1693. /* result already set, check signature */
  1694. if (server->sign) {
  1695. int rc;
  1696. rc = smb2_verify_signature(&rqst, server);
  1697. if (rc)
  1698. cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
  1699. rc);
  1700. }
  1701. /* FIXME: should this be counted toward the initiating task? */
  1702. task_io_account_read(rdata->got_bytes);
  1703. cifs_stats_bytes_read(tcon, rdata->got_bytes);
  1704. break;
  1705. case MID_REQUEST_SUBMITTED:
  1706. case MID_RETRY_NEEDED:
  1707. rdata->result = -EAGAIN;
  1708. if (server->sign && rdata->got_bytes)
  1709. /* reset bytes number since we can not check a sign */
  1710. rdata->got_bytes = 0;
  1711. /* FIXME: should this be counted toward the initiating task? */
  1712. task_io_account_read(rdata->got_bytes);
  1713. cifs_stats_bytes_read(tcon, rdata->got_bytes);
  1714. break;
  1715. default:
  1716. if (rdata->result != -ENODATA)
  1717. rdata->result = -EIO;
  1718. }
  1719. if (rdata->result)
  1720. cifs_stats_fail_inc(tcon, SMB2_READ_HE);
  1721. queue_work(cifsiod_wq, &rdata->work);
  1722. mutex_lock(&server->srv_mutex);
  1723. DeleteMidQEntry(mid);
  1724. mutex_unlock(&server->srv_mutex);
  1725. add_credits(server, credits_received, 0);
  1726. }
  1727. /* smb2_async_readv - send an async write, and set up mid to handle result */
  1728. int
  1729. smb2_async_readv(struct cifs_readdata *rdata)
  1730. {
  1731. int rc, flags = 0;
  1732. struct smb2_hdr *buf;
  1733. struct cifs_io_parms io_parms;
  1734. struct smb_rqst rqst = { .rq_iov = &rdata->iov,
  1735. .rq_nvec = 1 };
  1736. struct TCP_Server_Info *server;
  1737. cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
  1738. __func__, rdata->offset, rdata->bytes);
  1739. io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
  1740. io_parms.offset = rdata->offset;
  1741. io_parms.length = rdata->bytes;
  1742. io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
  1743. io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
  1744. io_parms.pid = rdata->pid;
  1745. server = io_parms.tcon->ses->server;
  1746. rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
  1747. if (rc) {
  1748. if (rc == -EAGAIN && rdata->credits) {
  1749. /* credits was reset by reconnect */
  1750. rdata->credits = 0;
  1751. /* reduce in_flight value since we won't send the req */
  1752. spin_lock(&server->req_lock);
  1753. server->in_flight--;
  1754. spin_unlock(&server->req_lock);
  1755. }
  1756. return rc;
  1757. }
  1758. buf = (struct smb2_hdr *)rdata->iov.iov_base;
  1759. /* 4 for rfc1002 length field */
  1760. rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
  1761. if (rdata->credits) {
  1762. buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
  1763. SMB2_MAX_BUFFER_SIZE));
  1764. spin_lock(&server->req_lock);
  1765. server->credits += rdata->credits -
  1766. le16_to_cpu(buf->CreditCharge);
  1767. spin_unlock(&server->req_lock);
  1768. wake_up(&server->request_q);
  1769. flags = CIFS_HAS_CREDITS;
  1770. }
  1771. kref_get(&rdata->refcount);
  1772. rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
  1773. cifs_readv_receive, smb2_readv_callback,
  1774. rdata, flags);
  1775. if (rc) {
  1776. kref_put(&rdata->refcount, cifs_readdata_release);
  1777. cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
  1778. }
  1779. cifs_small_buf_release(buf);
  1780. return rc;
  1781. }
  1782. int
  1783. SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
  1784. unsigned int *nbytes, char **buf, int *buf_type)
  1785. {
  1786. int resp_buftype, rc = -EACCES;
  1787. struct smb2_read_rsp *rsp = NULL;
  1788. struct kvec iov[1];
  1789. *nbytes = 0;
  1790. rc = smb2_new_read_req(iov, io_parms, 0, 0);
  1791. if (rc)
  1792. return rc;
  1793. rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
  1794. &resp_buftype, CIFS_LOG_ERROR);
  1795. rsp = (struct smb2_read_rsp *)iov[0].iov_base;
  1796. if (rsp->hdr.Status == STATUS_END_OF_FILE) {
  1797. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1798. return 0;
  1799. }
  1800. if (rc) {
  1801. cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
  1802. cifs_dbg(VFS, "Send error in read = %d\n", rc);
  1803. } else {
  1804. *nbytes = le32_to_cpu(rsp->DataLength);
  1805. if ((*nbytes > CIFS_MAX_MSGSIZE) ||
  1806. (*nbytes > io_parms->length)) {
  1807. cifs_dbg(FYI, "bad length %d for count %d\n",
  1808. *nbytes, io_parms->length);
  1809. rc = -EIO;
  1810. *nbytes = 0;
  1811. }
  1812. }
  1813. if (*buf) {
  1814. memcpy(*buf, (char *)&rsp->hdr.ProtocolId + rsp->DataOffset,
  1815. *nbytes);
  1816. free_rsp_buf(resp_buftype, iov[0].iov_base);
  1817. } else if (resp_buftype != CIFS_NO_BUFFER) {
  1818. *buf = iov[0].iov_base;
  1819. if (resp_buftype == CIFS_SMALL_BUFFER)
  1820. *buf_type = CIFS_SMALL_BUFFER;
  1821. else if (resp_buftype == CIFS_LARGE_BUFFER)
  1822. *buf_type = CIFS_LARGE_BUFFER;
  1823. }
  1824. return rc;
  1825. }
  1826. /*
  1827. * Check the mid_state and signature on received buffer (if any), and queue the
  1828. * workqueue completion task.
  1829. */
  1830. static void
  1831. smb2_writev_callback(struct mid_q_entry *mid)
  1832. {
  1833. struct cifs_writedata *wdata = mid->callback_data;
  1834. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1835. struct TCP_Server_Info *server = tcon->ses->server;
  1836. unsigned int written;
  1837. struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
  1838. unsigned int credits_received = 1;
  1839. switch (mid->mid_state) {
  1840. case MID_RESPONSE_RECEIVED:
  1841. credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
  1842. wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
  1843. if (wdata->result != 0)
  1844. break;
  1845. written = le32_to_cpu(rsp->DataLength);
  1846. /*
  1847. * Mask off high 16 bits when bytes written as returned
  1848. * by the server is greater than bytes requested by the
  1849. * client. OS/2 servers are known to set incorrect
  1850. * CountHigh values.
  1851. */
  1852. if (written > wdata->bytes)
  1853. written &= 0xFFFF;
  1854. if (written < wdata->bytes)
  1855. wdata->result = -ENOSPC;
  1856. else
  1857. wdata->bytes = written;
  1858. break;
  1859. case MID_REQUEST_SUBMITTED:
  1860. case MID_RETRY_NEEDED:
  1861. wdata->result = -EAGAIN;
  1862. break;
  1863. default:
  1864. wdata->result = -EIO;
  1865. break;
  1866. }
  1867. if (wdata->result)
  1868. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  1869. queue_work(cifsiod_wq, &wdata->work);
  1870. mutex_lock(&server->srv_mutex);
  1871. DeleteMidQEntry(mid);
  1872. mutex_unlock(&server->srv_mutex);
  1873. add_credits(tcon->ses->server, credits_received, 0);
  1874. }
  1875. /* smb2_async_writev - send an async write, and set up mid to handle result */
  1876. int
  1877. smb2_async_writev(struct cifs_writedata *wdata,
  1878. void (*release)(struct kref *kref))
  1879. {
  1880. int rc = -EACCES, flags = 0;
  1881. struct smb2_write_req *req = NULL;
  1882. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  1883. struct TCP_Server_Info *server = tcon->ses->server;
  1884. struct kvec iov;
  1885. struct smb_rqst rqst;
  1886. rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
  1887. if (rc) {
  1888. if (rc == -EAGAIN && wdata->credits) {
  1889. /* credits was reset by reconnect */
  1890. wdata->credits = 0;
  1891. /* reduce in_flight value since we won't send the req */
  1892. spin_lock(&server->req_lock);
  1893. server->in_flight--;
  1894. spin_unlock(&server->req_lock);
  1895. }
  1896. goto async_writev_out;
  1897. }
  1898. req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
  1899. req->PersistentFileId = wdata->cfile->fid.persistent_fid;
  1900. req->VolatileFileId = wdata->cfile->fid.volatile_fid;
  1901. req->WriteChannelInfoOffset = 0;
  1902. req->WriteChannelInfoLength = 0;
  1903. req->Channel = 0;
  1904. req->Offset = cpu_to_le64(wdata->offset);
  1905. /* 4 for rfc1002 length field */
  1906. req->DataOffset = cpu_to_le16(
  1907. offsetof(struct smb2_write_req, Buffer) - 4);
  1908. req->RemainingBytes = 0;
  1909. /* 4 for rfc1002 length field and 1 for Buffer */
  1910. iov.iov_len = get_rfc1002_length(req) + 4 - 1;
  1911. iov.iov_base = req;
  1912. rqst.rq_iov = &iov;
  1913. rqst.rq_nvec = 1;
  1914. rqst.rq_pages = wdata->pages;
  1915. rqst.rq_npages = wdata->nr_pages;
  1916. rqst.rq_pagesz = wdata->pagesz;
  1917. rqst.rq_tailsz = wdata->tailsz;
  1918. cifs_dbg(FYI, "async write at %llu %u bytes\n",
  1919. wdata->offset, wdata->bytes);
  1920. req->Length = cpu_to_le32(wdata->bytes);
  1921. inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
  1922. if (wdata->credits) {
  1923. req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
  1924. SMB2_MAX_BUFFER_SIZE));
  1925. spin_lock(&server->req_lock);
  1926. server->credits += wdata->credits -
  1927. le16_to_cpu(req->hdr.CreditCharge);
  1928. spin_unlock(&server->req_lock);
  1929. wake_up(&server->request_q);
  1930. flags = CIFS_HAS_CREDITS;
  1931. }
  1932. kref_get(&wdata->refcount);
  1933. rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata,
  1934. flags);
  1935. if (rc) {
  1936. kref_put(&wdata->refcount, release);
  1937. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  1938. }
  1939. async_writev_out:
  1940. cifs_small_buf_release(req);
  1941. return rc;
  1942. }
  1943. /*
  1944. * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
  1945. * The length field from io_parms must be at least 1 and indicates a number of
  1946. * elements with data to write that begins with position 1 in iov array. All
  1947. * data length is specified by count.
  1948. */
  1949. int
  1950. SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
  1951. unsigned int *nbytes, struct kvec *iov, int n_vec)
  1952. {
  1953. int rc = 0;
  1954. struct smb2_write_req *req = NULL;
  1955. struct smb2_write_rsp *rsp = NULL;
  1956. int resp_buftype;
  1957. *nbytes = 0;
  1958. if (n_vec < 1)
  1959. return rc;
  1960. rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
  1961. if (rc)
  1962. return rc;
  1963. if (io_parms->tcon->ses->server == NULL)
  1964. return -ECONNABORTED;
  1965. req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
  1966. req->PersistentFileId = io_parms->persistent_fid;
  1967. req->VolatileFileId = io_parms->volatile_fid;
  1968. req->WriteChannelInfoOffset = 0;
  1969. req->WriteChannelInfoLength = 0;
  1970. req->Channel = 0;
  1971. req->Length = cpu_to_le32(io_parms->length);
  1972. req->Offset = cpu_to_le64(io_parms->offset);
  1973. /* 4 for rfc1002 length field */
  1974. req->DataOffset = cpu_to_le16(
  1975. offsetof(struct smb2_write_req, Buffer) - 4);
  1976. req->RemainingBytes = 0;
  1977. iov[0].iov_base = (char *)req;
  1978. /* 4 for rfc1002 length field and 1 for Buffer */
  1979. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  1980. /* length of entire message including data to be written */
  1981. inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
  1982. rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
  1983. &resp_buftype, 0);
  1984. rsp = (struct smb2_write_rsp *)iov[0].iov_base;
  1985. if (rc) {
  1986. cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
  1987. cifs_dbg(VFS, "Send error in write = %d\n", rc);
  1988. } else
  1989. *nbytes = le32_to_cpu(rsp->DataLength);
  1990. free_rsp_buf(resp_buftype, rsp);
  1991. return rc;
  1992. }
  1993. static unsigned int
  1994. num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
  1995. {
  1996. int len;
  1997. unsigned int entrycount = 0;
  1998. unsigned int next_offset = 0;
  1999. FILE_DIRECTORY_INFO *entryptr;
  2000. if (bufstart == NULL)
  2001. return 0;
  2002. entryptr = (FILE_DIRECTORY_INFO *)bufstart;
  2003. while (1) {
  2004. entryptr = (FILE_DIRECTORY_INFO *)
  2005. ((char *)entryptr + next_offset);
  2006. if ((char *)entryptr + size > end_of_buf) {
  2007. cifs_dbg(VFS, "malformed search entry would overflow\n");
  2008. break;
  2009. }
  2010. len = le32_to_cpu(entryptr->FileNameLength);
  2011. if ((char *)entryptr + len + size > end_of_buf) {
  2012. cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
  2013. end_of_buf);
  2014. break;
  2015. }
  2016. *lastentry = (char *)entryptr;
  2017. entrycount++;
  2018. next_offset = le32_to_cpu(entryptr->NextEntryOffset);
  2019. if (!next_offset)
  2020. break;
  2021. }
  2022. return entrycount;
  2023. }
  2024. /*
  2025. * Readdir/FindFirst
  2026. */
  2027. int
  2028. SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
  2029. u64 persistent_fid, u64 volatile_fid, int index,
  2030. struct cifs_search_info *srch_inf)
  2031. {
  2032. struct smb2_query_directory_req *req;
  2033. struct smb2_query_directory_rsp *rsp = NULL;
  2034. struct kvec iov[2];
  2035. int rc = 0;
  2036. int len;
  2037. int resp_buftype = CIFS_NO_BUFFER;
  2038. unsigned char *bufptr;
  2039. struct TCP_Server_Info *server;
  2040. struct cifs_ses *ses = tcon->ses;
  2041. __le16 asteriks = cpu_to_le16('*');
  2042. char *end_of_smb;
  2043. unsigned int output_size = CIFSMaxBufSize;
  2044. size_t info_buf_size;
  2045. if (ses && (ses->server))
  2046. server = ses->server;
  2047. else
  2048. return -EIO;
  2049. rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
  2050. if (rc)
  2051. return rc;
  2052. switch (srch_inf->info_level) {
  2053. case SMB_FIND_FILE_DIRECTORY_INFO:
  2054. req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
  2055. info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
  2056. break;
  2057. case SMB_FIND_FILE_ID_FULL_DIR_INFO:
  2058. req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
  2059. info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
  2060. break;
  2061. default:
  2062. cifs_dbg(VFS, "info level %u isn't supported\n",
  2063. srch_inf->info_level);
  2064. rc = -EINVAL;
  2065. goto qdir_exit;
  2066. }
  2067. req->FileIndex = cpu_to_le32(index);
  2068. req->PersistentFileId = persistent_fid;
  2069. req->VolatileFileId = volatile_fid;
  2070. len = 0x2;
  2071. bufptr = req->Buffer;
  2072. memcpy(bufptr, &asteriks, len);
  2073. req->FileNameOffset =
  2074. cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
  2075. req->FileNameLength = cpu_to_le16(len);
  2076. /*
  2077. * BB could be 30 bytes or so longer if we used SMB2 specific
  2078. * buffer lengths, but this is safe and close enough.
  2079. */
  2080. output_size = min_t(unsigned int, output_size, server->maxBuf);
  2081. output_size = min_t(unsigned int, output_size, 2 << 15);
  2082. req->OutputBufferLength = cpu_to_le32(output_size);
  2083. iov[0].iov_base = (char *)req;
  2084. /* 4 for RFC1001 length and 1 for Buffer */
  2085. iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
  2086. iov[1].iov_base = (char *)(req->Buffer);
  2087. iov[1].iov_len = len;
  2088. inc_rfc1001_len(req, len - 1 /* Buffer */);
  2089. rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
  2090. rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
  2091. if (rc) {
  2092. if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
  2093. srch_inf->endOfSearch = true;
  2094. rc = 0;
  2095. }
  2096. cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
  2097. goto qdir_exit;
  2098. }
  2099. rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
  2100. le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
  2101. info_buf_size);
  2102. if (rc)
  2103. goto qdir_exit;
  2104. srch_inf->unicode = true;
  2105. if (srch_inf->ntwrk_buf_start) {
  2106. if (srch_inf->smallBuf)
  2107. cifs_small_buf_release(srch_inf->ntwrk_buf_start);
  2108. else
  2109. cifs_buf_release(srch_inf->ntwrk_buf_start);
  2110. }
  2111. srch_inf->ntwrk_buf_start = (char *)rsp;
  2112. srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
  2113. (char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
  2114. /* 4 for rfc1002 length field */
  2115. end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
  2116. srch_inf->entries_in_buffer =
  2117. num_entries(srch_inf->srch_entries_start, end_of_smb,
  2118. &srch_inf->last_entry, info_buf_size);
  2119. srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
  2120. cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
  2121. srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
  2122. srch_inf->srch_entries_start, srch_inf->last_entry);
  2123. if (resp_buftype == CIFS_LARGE_BUFFER)
  2124. srch_inf->smallBuf = false;
  2125. else if (resp_buftype == CIFS_SMALL_BUFFER)
  2126. srch_inf->smallBuf = true;
  2127. else
  2128. cifs_dbg(VFS, "illegal search buffer type\n");
  2129. return rc;
  2130. qdir_exit:
  2131. free_rsp_buf(resp_buftype, rsp);
  2132. return rc;
  2133. }
  2134. static int
  2135. send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  2136. u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
  2137. unsigned int num, void **data, unsigned int *size)
  2138. {
  2139. struct smb2_set_info_req *req;
  2140. struct smb2_set_info_rsp *rsp = NULL;
  2141. struct kvec *iov;
  2142. int rc = 0;
  2143. int resp_buftype;
  2144. unsigned int i;
  2145. struct TCP_Server_Info *server;
  2146. struct cifs_ses *ses = tcon->ses;
  2147. if (ses && (ses->server))
  2148. server = ses->server;
  2149. else
  2150. return -EIO;
  2151. if (!num)
  2152. return -EINVAL;
  2153. iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
  2154. if (!iov)
  2155. return -ENOMEM;
  2156. rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
  2157. if (rc) {
  2158. kfree(iov);
  2159. return rc;
  2160. }
  2161. req->hdr.ProcessId = cpu_to_le32(pid);
  2162. req->InfoType = SMB2_O_INFO_FILE;
  2163. req->FileInfoClass = info_class;
  2164. req->PersistentFileId = persistent_fid;
  2165. req->VolatileFileId = volatile_fid;
  2166. /* 4 for RFC1001 length and 1 for Buffer */
  2167. req->BufferOffset =
  2168. cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
  2169. req->BufferLength = cpu_to_le32(*size);
  2170. inc_rfc1001_len(req, *size - 1 /* Buffer */);
  2171. memcpy(req->Buffer, *data, *size);
  2172. iov[0].iov_base = (char *)req;
  2173. /* 4 for RFC1001 length */
  2174. iov[0].iov_len = get_rfc1002_length(req) + 4;
  2175. for (i = 1; i < num; i++) {
  2176. inc_rfc1001_len(req, size[i]);
  2177. le32_add_cpu(&req->BufferLength, size[i]);
  2178. iov[i].iov_base = (char *)data[i];
  2179. iov[i].iov_len = size[i];
  2180. }
  2181. rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
  2182. rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
  2183. if (rc != 0)
  2184. cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
  2185. free_rsp_buf(resp_buftype, rsp);
  2186. kfree(iov);
  2187. return rc;
  2188. }
  2189. int
  2190. SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
  2191. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  2192. {
  2193. struct smb2_file_rename_info info;
  2194. void **data;
  2195. unsigned int size[2];
  2196. int rc;
  2197. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  2198. data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
  2199. if (!data)
  2200. return -ENOMEM;
  2201. info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
  2202. /* 0 = fail if target already exists */
  2203. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  2204. info.FileNameLength = cpu_to_le32(len);
  2205. data[0] = &info;
  2206. size[0] = sizeof(struct smb2_file_rename_info);
  2207. data[1] = target_file;
  2208. size[1] = len + 2 /* null */;
  2209. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2210. current->tgid, FILE_RENAME_INFORMATION, 2, data,
  2211. size);
  2212. kfree(data);
  2213. return rc;
  2214. }
  2215. int
  2216. SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
  2217. u64 persistent_fid, u64 volatile_fid)
  2218. {
  2219. __u8 delete_pending = 1;
  2220. void *data;
  2221. unsigned int size;
  2222. data = &delete_pending;
  2223. size = 1; /* sizeof __u8 */
  2224. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2225. current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
  2226. &size);
  2227. }
  2228. int
  2229. SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
  2230. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  2231. {
  2232. struct smb2_file_link_info info;
  2233. void **data;
  2234. unsigned int size[2];
  2235. int rc;
  2236. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  2237. data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
  2238. if (!data)
  2239. return -ENOMEM;
  2240. info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
  2241. /* 0 = fail if link already exists */
  2242. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  2243. info.FileNameLength = cpu_to_le32(len);
  2244. data[0] = &info;
  2245. size[0] = sizeof(struct smb2_file_link_info);
  2246. data[1] = target_file;
  2247. size[1] = len + 2 /* null */;
  2248. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2249. current->tgid, FILE_LINK_INFORMATION, 2, data, size);
  2250. kfree(data);
  2251. return rc;
  2252. }
  2253. int
  2254. SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  2255. u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
  2256. {
  2257. struct smb2_file_eof_info info;
  2258. void *data;
  2259. unsigned int size;
  2260. info.EndOfFile = *eof;
  2261. data = &info;
  2262. size = sizeof(struct smb2_file_eof_info);
  2263. if (is_falloc)
  2264. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2265. pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size);
  2266. else
  2267. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2268. pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
  2269. }
  2270. int
  2271. SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  2272. u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
  2273. {
  2274. unsigned int size;
  2275. size = sizeof(FILE_BASIC_INFO);
  2276. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  2277. current->tgid, FILE_BASIC_INFORMATION, 1,
  2278. (void **)&buf, &size);
  2279. }
  2280. int
  2281. SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
  2282. const u64 persistent_fid, const u64 volatile_fid,
  2283. __u8 oplock_level)
  2284. {
  2285. int rc;
  2286. struct smb2_oplock_break *req = NULL;
  2287. cifs_dbg(FYI, "SMB2_oplock_break\n");
  2288. rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
  2289. if (rc)
  2290. return rc;
  2291. req->VolatileFid = volatile_fid;
  2292. req->PersistentFid = persistent_fid;
  2293. req->OplockLevel = oplock_level;
  2294. req->hdr.CreditRequest = cpu_to_le16(1);
  2295. rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
  2296. /* SMB2 buffer freed by function above */
  2297. if (rc) {
  2298. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  2299. cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
  2300. }
  2301. return rc;
  2302. }
  2303. static void
  2304. copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
  2305. struct kstatfs *kst)
  2306. {
  2307. kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
  2308. le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
  2309. kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
  2310. kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
  2311. kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
  2312. return;
  2313. }
  2314. static int
  2315. build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
  2316. int outbuf_len, u64 persistent_fid, u64 volatile_fid)
  2317. {
  2318. int rc;
  2319. struct smb2_query_info_req *req;
  2320. cifs_dbg(FYI, "Query FSInfo level %d\n", level);
  2321. if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
  2322. return -EIO;
  2323. rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
  2324. if (rc)
  2325. return rc;
  2326. req->InfoType = SMB2_O_INFO_FILESYSTEM;
  2327. req->FileInfoClass = level;
  2328. req->PersistentFileId = persistent_fid;
  2329. req->VolatileFileId = volatile_fid;
  2330. /* 4 for rfc1002 length field and 1 for pad */
  2331. req->InputBufferOffset =
  2332. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
  2333. req->OutputBufferLength = cpu_to_le32(
  2334. outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
  2335. iov->iov_base = (char *)req;
  2336. /* 4 for rfc1002 length field */
  2337. iov->iov_len = get_rfc1002_length(req) + 4;
  2338. return 0;
  2339. }
  2340. int
  2341. SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
  2342. u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
  2343. {
  2344. struct smb2_query_info_rsp *rsp = NULL;
  2345. struct kvec iov;
  2346. int rc = 0;
  2347. int resp_buftype;
  2348. struct cifs_ses *ses = tcon->ses;
  2349. struct smb2_fs_full_size_info *info = NULL;
  2350. rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
  2351. sizeof(struct smb2_fs_full_size_info),
  2352. persistent_fid, volatile_fid);
  2353. if (rc)
  2354. return rc;
  2355. rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
  2356. if (rc) {
  2357. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  2358. goto qfsinf_exit;
  2359. }
  2360. rsp = (struct smb2_query_info_rsp *)iov.iov_base;
  2361. info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
  2362. le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
  2363. rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
  2364. le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
  2365. sizeof(struct smb2_fs_full_size_info));
  2366. if (!rc)
  2367. copy_fs_info_to_kstatfs(info, fsdata);
  2368. qfsinf_exit:
  2369. free_rsp_buf(resp_buftype, iov.iov_base);
  2370. return rc;
  2371. }
  2372. int
  2373. SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
  2374. u64 persistent_fid, u64 volatile_fid, int level)
  2375. {
  2376. struct smb2_query_info_rsp *rsp = NULL;
  2377. struct kvec iov;
  2378. int rc = 0;
  2379. int resp_buftype, max_len, min_len;
  2380. struct cifs_ses *ses = tcon->ses;
  2381. unsigned int rsp_len, offset;
  2382. if (level == FS_DEVICE_INFORMATION) {
  2383. max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
  2384. min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
  2385. } else if (level == FS_ATTRIBUTE_INFORMATION) {
  2386. max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
  2387. min_len = MIN_FS_ATTR_INFO_SIZE;
  2388. } else if (level == FS_SECTOR_SIZE_INFORMATION) {
  2389. max_len = sizeof(struct smb3_fs_ss_info);
  2390. min_len = sizeof(struct smb3_fs_ss_info);
  2391. } else {
  2392. cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
  2393. return -EINVAL;
  2394. }
  2395. rc = build_qfs_info_req(&iov, tcon, level, max_len,
  2396. persistent_fid, volatile_fid);
  2397. if (rc)
  2398. return rc;
  2399. rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
  2400. if (rc) {
  2401. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  2402. goto qfsattr_exit;
  2403. }
  2404. rsp = (struct smb2_query_info_rsp *)iov.iov_base;
  2405. rsp_len = le32_to_cpu(rsp->OutputBufferLength);
  2406. offset = le16_to_cpu(rsp->OutputBufferOffset);
  2407. rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
  2408. if (rc)
  2409. goto qfsattr_exit;
  2410. if (level == FS_ATTRIBUTE_INFORMATION)
  2411. memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
  2412. + (char *)&rsp->hdr, min_t(unsigned int,
  2413. rsp_len, max_len));
  2414. else if (level == FS_DEVICE_INFORMATION)
  2415. memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
  2416. + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
  2417. else if (level == FS_SECTOR_SIZE_INFORMATION) {
  2418. struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
  2419. (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
  2420. tcon->ss_flags = le32_to_cpu(ss_info->Flags);
  2421. tcon->perf_sector_size =
  2422. le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
  2423. }
  2424. qfsattr_exit:
  2425. free_rsp_buf(resp_buftype, iov.iov_base);
  2426. return rc;
  2427. }
  2428. int
  2429. smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
  2430. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  2431. const __u32 num_lock, struct smb2_lock_element *buf)
  2432. {
  2433. int rc = 0;
  2434. struct smb2_lock_req *req = NULL;
  2435. struct kvec iov[2];
  2436. int resp_buf_type;
  2437. unsigned int count;
  2438. cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
  2439. rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
  2440. if (rc)
  2441. return rc;
  2442. req->hdr.ProcessId = cpu_to_le32(pid);
  2443. req->LockCount = cpu_to_le16(num_lock);
  2444. req->PersistentFileId = persist_fid;
  2445. req->VolatileFileId = volatile_fid;
  2446. count = num_lock * sizeof(struct smb2_lock_element);
  2447. inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
  2448. iov[0].iov_base = (char *)req;
  2449. /* 4 for rfc1002 length field and count for all locks */
  2450. iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
  2451. iov[1].iov_base = (char *)buf;
  2452. iov[1].iov_len = count;
  2453. cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
  2454. rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
  2455. if (rc) {
  2456. cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
  2457. cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
  2458. }
  2459. return rc;
  2460. }
  2461. int
  2462. SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
  2463. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  2464. const __u64 length, const __u64 offset, const __u32 lock_flags,
  2465. const bool wait)
  2466. {
  2467. struct smb2_lock_element lock;
  2468. lock.Offset = cpu_to_le64(offset);
  2469. lock.Length = cpu_to_le64(length);
  2470. lock.Flags = cpu_to_le32(lock_flags);
  2471. if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
  2472. lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
  2473. return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
  2474. }
  2475. int
  2476. SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
  2477. __u8 *lease_key, const __le32 lease_state)
  2478. {
  2479. int rc;
  2480. struct smb2_lease_ack *req = NULL;
  2481. cifs_dbg(FYI, "SMB2_lease_break\n");
  2482. rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
  2483. if (rc)
  2484. return rc;
  2485. req->hdr.CreditRequest = cpu_to_le16(1);
  2486. req->StructureSize = cpu_to_le16(36);
  2487. inc_rfc1001_len(req, 12);
  2488. memcpy(req->LeaseKey, lease_key, 16);
  2489. req->LeaseState = lease_state;
  2490. rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
  2491. /* SMB2 buffer freed by function above */
  2492. if (rc) {
  2493. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  2494. cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
  2495. }
  2496. return rc;
  2497. }