xprtsock.c 82 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136
  1. /*
  2. * linux/net/sunrpc/xprtsock.c
  3. *
  4. * Client-side transport implementation for sockets.
  5. *
  6. * TCP callback races fixes (C) 1998 Red Hat
  7. * TCP send fixes (C) 1998 Red Hat
  8. * TCP NFS related read + write fixes
  9. * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  10. *
  11. * Rewrite of larges part of the code in order to stabilize TCP stuff.
  12. * Fix behaviour when socket buffer is full.
  13. * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
  14. *
  15. * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
  16. *
  17. * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
  18. * <gilles.quillard@bull.net>
  19. */
  20. #include <linux/types.h>
  21. #include <linux/string.h>
  22. #include <linux/slab.h>
  23. #include <linux/module.h>
  24. #include <linux/capability.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/errno.h>
  27. #include <linux/socket.h>
  28. #include <linux/in.h>
  29. #include <linux/net.h>
  30. #include <linux/mm.h>
  31. #include <linux/un.h>
  32. #include <linux/udp.h>
  33. #include <linux/tcp.h>
  34. #include <linux/sunrpc/clnt.h>
  35. #include <linux/sunrpc/addr.h>
  36. #include <linux/sunrpc/sched.h>
  37. #include <linux/sunrpc/svcsock.h>
  38. #include <linux/sunrpc/xprtsock.h>
  39. #include <linux/file.h>
  40. #ifdef CONFIG_SUNRPC_BACKCHANNEL
  41. #include <linux/sunrpc/bc_xprt.h>
  42. #endif
  43. #include <net/sock.h>
  44. #include <net/checksum.h>
  45. #include <net/udp.h>
  46. #include <net/tcp.h>
  47. #include <trace/events/sunrpc.h>
  48. #include "sunrpc.h"
  49. static void xs_close(struct rpc_xprt *xprt);
  50. /*
  51. * xprtsock tunables
  52. */
  53. static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
  54. static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
  55. static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
  56. static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
  57. static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
  58. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  59. #define XS_TCP_LINGER_TO (15U * HZ)
  60. static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
  61. /*
  62. * We can register our own files under /proc/sys/sunrpc by
  63. * calling register_sysctl_table() again. The files in that
  64. * directory become the union of all files registered there.
  65. *
  66. * We simply need to make sure that we don't collide with
  67. * someone else's file names!
  68. */
  69. static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
  70. static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
  71. static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
  72. static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
  73. static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
  74. static struct ctl_table_header *sunrpc_table_header;
  75. /*
  76. * FIXME: changing the UDP slot table size should also resize the UDP
  77. * socket buffers for existing UDP transports
  78. */
  79. static struct ctl_table xs_tunables_table[] = {
  80. {
  81. .procname = "udp_slot_table_entries",
  82. .data = &xprt_udp_slot_table_entries,
  83. .maxlen = sizeof(unsigned int),
  84. .mode = 0644,
  85. .proc_handler = proc_dointvec_minmax,
  86. .extra1 = &min_slot_table_size,
  87. .extra2 = &max_slot_table_size
  88. },
  89. {
  90. .procname = "tcp_slot_table_entries",
  91. .data = &xprt_tcp_slot_table_entries,
  92. .maxlen = sizeof(unsigned int),
  93. .mode = 0644,
  94. .proc_handler = proc_dointvec_minmax,
  95. .extra1 = &min_slot_table_size,
  96. .extra2 = &max_slot_table_size
  97. },
  98. {
  99. .procname = "tcp_max_slot_table_entries",
  100. .data = &xprt_max_tcp_slot_table_entries,
  101. .maxlen = sizeof(unsigned int),
  102. .mode = 0644,
  103. .proc_handler = proc_dointvec_minmax,
  104. .extra1 = &min_slot_table_size,
  105. .extra2 = &max_tcp_slot_table_limit
  106. },
  107. {
  108. .procname = "min_resvport",
  109. .data = &xprt_min_resvport,
  110. .maxlen = sizeof(unsigned int),
  111. .mode = 0644,
  112. .proc_handler = proc_dointvec_minmax,
  113. .extra1 = &xprt_min_resvport_limit,
  114. .extra2 = &xprt_max_resvport_limit
  115. },
  116. {
  117. .procname = "max_resvport",
  118. .data = &xprt_max_resvport,
  119. .maxlen = sizeof(unsigned int),
  120. .mode = 0644,
  121. .proc_handler = proc_dointvec_minmax,
  122. .extra1 = &xprt_min_resvport_limit,
  123. .extra2 = &xprt_max_resvport_limit
  124. },
  125. {
  126. .procname = "tcp_fin_timeout",
  127. .data = &xs_tcp_fin_timeout,
  128. .maxlen = sizeof(xs_tcp_fin_timeout),
  129. .mode = 0644,
  130. .proc_handler = proc_dointvec_jiffies,
  131. },
  132. { },
  133. };
  134. static struct ctl_table sunrpc_table[] = {
  135. {
  136. .procname = "sunrpc",
  137. .mode = 0555,
  138. .child = xs_tunables_table
  139. },
  140. { },
  141. };
  142. #endif
  143. /*
  144. * Wait duration for a reply from the RPC portmapper.
  145. */
  146. #define XS_BIND_TO (60U * HZ)
  147. /*
  148. * Delay if a UDP socket connect error occurs. This is most likely some
  149. * kind of resource problem on the local host.
  150. */
  151. #define XS_UDP_REEST_TO (2U * HZ)
  152. /*
  153. * The reestablish timeout allows clients to delay for a bit before attempting
  154. * to reconnect to a server that just dropped our connection.
  155. *
  156. * We implement an exponential backoff when trying to reestablish a TCP
  157. * transport connection with the server. Some servers like to drop a TCP
  158. * connection when they are overworked, so we start with a short timeout and
  159. * increase over time if the server is down or not responding.
  160. */
  161. #define XS_TCP_INIT_REEST_TO (3U * HZ)
  162. #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
  163. /*
  164. * TCP idle timeout; client drops the transport socket if it is idle
  165. * for this long. Note that we also timeout UDP sockets to prevent
  166. * holding port numbers when there is no RPC traffic.
  167. */
  168. #define XS_IDLE_DISC_TO (5U * 60 * HZ)
  169. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  170. # undef RPC_DEBUG_DATA
  171. # define RPCDBG_FACILITY RPCDBG_TRANS
  172. #endif
  173. #ifdef RPC_DEBUG_DATA
  174. static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
  175. {
  176. u8 *buf = (u8 *) packet;
  177. int j;
  178. dprintk("RPC: %s\n", msg);
  179. for (j = 0; j < count && j < 128; j += 4) {
  180. if (!(j & 31)) {
  181. if (j)
  182. dprintk("\n");
  183. dprintk("0x%04x ", j);
  184. }
  185. dprintk("%02x%02x%02x%02x ",
  186. buf[j], buf[j+1], buf[j+2], buf[j+3]);
  187. }
  188. dprintk("\n");
  189. }
  190. #else
  191. static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
  192. {
  193. /* NOP */
  194. }
  195. #endif
  196. static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
  197. {
  198. return (struct rpc_xprt *) sk->sk_user_data;
  199. }
  200. static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
  201. {
  202. return (struct sockaddr *) &xprt->addr;
  203. }
  204. static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
  205. {
  206. return (struct sockaddr_un *) &xprt->addr;
  207. }
  208. static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
  209. {
  210. return (struct sockaddr_in *) &xprt->addr;
  211. }
  212. static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
  213. {
  214. return (struct sockaddr_in6 *) &xprt->addr;
  215. }
  216. static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
  217. {
  218. struct sockaddr *sap = xs_addr(xprt);
  219. struct sockaddr_in6 *sin6;
  220. struct sockaddr_in *sin;
  221. struct sockaddr_un *sun;
  222. char buf[128];
  223. switch (sap->sa_family) {
  224. case AF_LOCAL:
  225. sun = xs_addr_un(xprt);
  226. strlcpy(buf, sun->sun_path, sizeof(buf));
  227. xprt->address_strings[RPC_DISPLAY_ADDR] =
  228. kstrdup(buf, GFP_KERNEL);
  229. break;
  230. case AF_INET:
  231. (void)rpc_ntop(sap, buf, sizeof(buf));
  232. xprt->address_strings[RPC_DISPLAY_ADDR] =
  233. kstrdup(buf, GFP_KERNEL);
  234. sin = xs_addr_in(xprt);
  235. snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
  236. break;
  237. case AF_INET6:
  238. (void)rpc_ntop(sap, buf, sizeof(buf));
  239. xprt->address_strings[RPC_DISPLAY_ADDR] =
  240. kstrdup(buf, GFP_KERNEL);
  241. sin6 = xs_addr_in6(xprt);
  242. snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
  243. break;
  244. default:
  245. BUG();
  246. }
  247. xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
  248. }
  249. static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
  250. {
  251. struct sockaddr *sap = xs_addr(xprt);
  252. char buf[128];
  253. snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
  254. xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
  255. snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
  256. xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
  257. }
  258. static void xs_format_peer_addresses(struct rpc_xprt *xprt,
  259. const char *protocol,
  260. const char *netid)
  261. {
  262. xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
  263. xprt->address_strings[RPC_DISPLAY_NETID] = netid;
  264. xs_format_common_peer_addresses(xprt);
  265. xs_format_common_peer_ports(xprt);
  266. }
  267. static void xs_update_peer_port(struct rpc_xprt *xprt)
  268. {
  269. kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
  270. kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
  271. xs_format_common_peer_ports(xprt);
  272. }
  273. static void xs_free_peer_addresses(struct rpc_xprt *xprt)
  274. {
  275. unsigned int i;
  276. for (i = 0; i < RPC_DISPLAY_MAX; i++)
  277. switch (i) {
  278. case RPC_DISPLAY_PROTO:
  279. case RPC_DISPLAY_NETID:
  280. continue;
  281. default:
  282. kfree(xprt->address_strings[i]);
  283. }
  284. }
  285. #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
  286. static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
  287. {
  288. struct msghdr msg = {
  289. .msg_name = addr,
  290. .msg_namelen = addrlen,
  291. .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
  292. };
  293. struct kvec iov = {
  294. .iov_base = vec->iov_base + base,
  295. .iov_len = vec->iov_len - base,
  296. };
  297. if (iov.iov_len != 0)
  298. return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
  299. return kernel_sendmsg(sock, &msg, NULL, 0, 0);
  300. }
  301. static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
  302. {
  303. ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
  304. int offset, size_t size, int flags);
  305. struct page **ppage;
  306. unsigned int remainder;
  307. int err;
  308. remainder = xdr->page_len - base;
  309. base += xdr->page_base;
  310. ppage = xdr->pages + (base >> PAGE_SHIFT);
  311. base &= ~PAGE_MASK;
  312. do_sendpage = sock->ops->sendpage;
  313. if (!zerocopy)
  314. do_sendpage = sock_no_sendpage;
  315. for(;;) {
  316. unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
  317. int flags = XS_SENDMSG_FLAGS;
  318. remainder -= len;
  319. if (remainder != 0 || more)
  320. flags |= MSG_MORE;
  321. err = do_sendpage(sock, *ppage, base, len, flags);
  322. if (remainder == 0 || err != len)
  323. break;
  324. *sent_p += err;
  325. ppage++;
  326. base = 0;
  327. }
  328. if (err > 0) {
  329. *sent_p += err;
  330. err = 0;
  331. }
  332. return err;
  333. }
  334. /**
  335. * xs_sendpages - write pages directly to a socket
  336. * @sock: socket to send on
  337. * @addr: UDP only -- address of destination
  338. * @addrlen: UDP only -- length of destination address
  339. * @xdr: buffer containing this request
  340. * @base: starting position in the buffer
  341. * @zerocopy: true if it is safe to use sendpage()
  342. * @sent_p: return the total number of bytes successfully queued for sending
  343. *
  344. */
  345. static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
  346. {
  347. unsigned int remainder = xdr->len - base;
  348. int err = 0;
  349. int sent = 0;
  350. if (unlikely(!sock))
  351. return -ENOTSOCK;
  352. clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
  353. if (base != 0) {
  354. addr = NULL;
  355. addrlen = 0;
  356. }
  357. if (base < xdr->head[0].iov_len || addr != NULL) {
  358. unsigned int len = xdr->head[0].iov_len - base;
  359. remainder -= len;
  360. err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
  361. if (remainder == 0 || err != len)
  362. goto out;
  363. *sent_p += err;
  364. base = 0;
  365. } else
  366. base -= xdr->head[0].iov_len;
  367. if (base < xdr->page_len) {
  368. unsigned int len = xdr->page_len - base;
  369. remainder -= len;
  370. err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
  371. *sent_p += sent;
  372. if (remainder == 0 || sent != len)
  373. goto out;
  374. base = 0;
  375. } else
  376. base -= xdr->page_len;
  377. if (base >= xdr->tail[0].iov_len)
  378. return 0;
  379. err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
  380. out:
  381. if (err > 0) {
  382. *sent_p += err;
  383. err = 0;
  384. }
  385. return err;
  386. }
  387. static void xs_nospace_callback(struct rpc_task *task)
  388. {
  389. struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
  390. transport->inet->sk_write_pending--;
  391. clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
  392. }
  393. /**
  394. * xs_nospace - place task on wait queue if transmit was incomplete
  395. * @task: task to put to sleep
  396. *
  397. */
  398. static int xs_nospace(struct rpc_task *task)
  399. {
  400. struct rpc_rqst *req = task->tk_rqstp;
  401. struct rpc_xprt *xprt = req->rq_xprt;
  402. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  403. struct sock *sk = transport->inet;
  404. int ret = -EAGAIN;
  405. dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
  406. task->tk_pid, req->rq_slen - req->rq_bytes_sent,
  407. req->rq_slen);
  408. /* Protect against races with write_space */
  409. spin_lock_bh(&xprt->transport_lock);
  410. /* Don't race with disconnect */
  411. if (xprt_connected(xprt)) {
  412. if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
  413. /*
  414. * Notify TCP that we're limited by the application
  415. * window size
  416. */
  417. set_bit(SOCK_NOSPACE, &transport->sock->flags);
  418. sk->sk_write_pending++;
  419. /* ...and wait for more buffer space */
  420. xprt_wait_for_buffer_space(task, xs_nospace_callback);
  421. }
  422. } else {
  423. clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
  424. ret = -ENOTCONN;
  425. }
  426. spin_unlock_bh(&xprt->transport_lock);
  427. /* Race breaker in case memory is freed before above code is called */
  428. sk->sk_write_space(sk);
  429. return ret;
  430. }
  431. /*
  432. * Construct a stream transport record marker in @buf.
  433. */
  434. static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
  435. {
  436. u32 reclen = buf->len - sizeof(rpc_fraghdr);
  437. rpc_fraghdr *base = buf->head[0].iov_base;
  438. *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
  439. }
  440. /**
  441. * xs_local_send_request - write an RPC request to an AF_LOCAL socket
  442. * @task: RPC task that manages the state of an RPC request
  443. *
  444. * Return values:
  445. * 0: The request has been sent
  446. * EAGAIN: The socket was blocked, please call again later to
  447. * complete the request
  448. * ENOTCONN: Caller needs to invoke connect logic then call again
  449. * other: Some other error occured, the request was not sent
  450. */
  451. static int xs_local_send_request(struct rpc_task *task)
  452. {
  453. struct rpc_rqst *req = task->tk_rqstp;
  454. struct rpc_xprt *xprt = req->rq_xprt;
  455. struct sock_xprt *transport =
  456. container_of(xprt, struct sock_xprt, xprt);
  457. struct xdr_buf *xdr = &req->rq_snd_buf;
  458. int status;
  459. int sent = 0;
  460. xs_encode_stream_record_marker(&req->rq_snd_buf);
  461. xs_pktdump("packet data:",
  462. req->rq_svec->iov_base, req->rq_svec->iov_len);
  463. status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
  464. true, &sent);
  465. dprintk("RPC: %s(%u) = %d\n",
  466. __func__, xdr->len - req->rq_bytes_sent, status);
  467. if (status == -EAGAIN && sock_writeable(transport->inet))
  468. status = -ENOBUFS;
  469. if (likely(sent > 0) || status == 0) {
  470. req->rq_bytes_sent += sent;
  471. req->rq_xmit_bytes_sent += sent;
  472. if (likely(req->rq_bytes_sent >= req->rq_slen)) {
  473. req->rq_bytes_sent = 0;
  474. return 0;
  475. }
  476. status = -EAGAIN;
  477. }
  478. switch (status) {
  479. case -ENOBUFS:
  480. break;
  481. case -EAGAIN:
  482. status = xs_nospace(task);
  483. break;
  484. default:
  485. dprintk("RPC: sendmsg returned unrecognized error %d\n",
  486. -status);
  487. case -EPIPE:
  488. xs_close(xprt);
  489. status = -ENOTCONN;
  490. }
  491. return status;
  492. }
  493. /**
  494. * xs_udp_send_request - write an RPC request to a UDP socket
  495. * @task: address of RPC task that manages the state of an RPC request
  496. *
  497. * Return values:
  498. * 0: The request has been sent
  499. * EAGAIN: The socket was blocked, please call again later to
  500. * complete the request
  501. * ENOTCONN: Caller needs to invoke connect logic then call again
  502. * other: Some other error occurred, the request was not sent
  503. */
  504. static int xs_udp_send_request(struct rpc_task *task)
  505. {
  506. struct rpc_rqst *req = task->tk_rqstp;
  507. struct rpc_xprt *xprt = req->rq_xprt;
  508. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  509. struct xdr_buf *xdr = &req->rq_snd_buf;
  510. int sent = 0;
  511. int status;
  512. xs_pktdump("packet data:",
  513. req->rq_svec->iov_base,
  514. req->rq_svec->iov_len);
  515. if (!xprt_bound(xprt))
  516. return -ENOTCONN;
  517. status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
  518. xdr, req->rq_bytes_sent, true, &sent);
  519. dprintk("RPC: xs_udp_send_request(%u) = %d\n",
  520. xdr->len - req->rq_bytes_sent, status);
  521. /* firewall is blocking us, don't return -EAGAIN or we end up looping */
  522. if (status == -EPERM)
  523. goto process_status;
  524. if (status == -EAGAIN && sock_writeable(transport->inet))
  525. status = -ENOBUFS;
  526. if (sent > 0 || status == 0) {
  527. req->rq_xmit_bytes_sent += sent;
  528. if (sent >= req->rq_slen)
  529. return 0;
  530. /* Still some bytes left; set up for a retry later. */
  531. status = -EAGAIN;
  532. }
  533. process_status:
  534. switch (status) {
  535. case -ENOTSOCK:
  536. status = -ENOTCONN;
  537. /* Should we call xs_close() here? */
  538. break;
  539. case -EAGAIN:
  540. status = xs_nospace(task);
  541. break;
  542. default:
  543. dprintk("RPC: sendmsg returned unrecognized error %d\n",
  544. -status);
  545. case -ENETUNREACH:
  546. case -ENOBUFS:
  547. case -EPIPE:
  548. case -ECONNREFUSED:
  549. case -EPERM:
  550. /* When the server has died, an ICMP port unreachable message
  551. * prompts ECONNREFUSED. */
  552. clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
  553. }
  554. return status;
  555. }
  556. /**
  557. * xs_tcp_send_request - write an RPC request to a TCP socket
  558. * @task: address of RPC task that manages the state of an RPC request
  559. *
  560. * Return values:
  561. * 0: The request has been sent
  562. * EAGAIN: The socket was blocked, please call again later to
  563. * complete the request
  564. * ENOTCONN: Caller needs to invoke connect logic then call again
  565. * other: Some other error occurred, the request was not sent
  566. *
  567. * XXX: In the case of soft timeouts, should we eventually give up
  568. * if sendmsg is not able to make progress?
  569. */
  570. static int xs_tcp_send_request(struct rpc_task *task)
  571. {
  572. struct rpc_rqst *req = task->tk_rqstp;
  573. struct rpc_xprt *xprt = req->rq_xprt;
  574. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  575. struct xdr_buf *xdr = &req->rq_snd_buf;
  576. bool zerocopy = true;
  577. int status;
  578. int sent;
  579. xs_encode_stream_record_marker(&req->rq_snd_buf);
  580. xs_pktdump("packet data:",
  581. req->rq_svec->iov_base,
  582. req->rq_svec->iov_len);
  583. /* Don't use zero copy if this is a resend. If the RPC call
  584. * completes while the socket holds a reference to the pages,
  585. * then we may end up resending corrupted data.
  586. */
  587. if (task->tk_flags & RPC_TASK_SENT)
  588. zerocopy = false;
  589. /* Continue transmitting the packet/record. We must be careful
  590. * to cope with writespace callbacks arriving _after_ we have
  591. * called sendmsg(). */
  592. while (1) {
  593. sent = 0;
  594. status = xs_sendpages(transport->sock, NULL, 0, xdr,
  595. req->rq_bytes_sent, zerocopy, &sent);
  596. dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
  597. xdr->len - req->rq_bytes_sent, status);
  598. /* If we've sent the entire packet, immediately
  599. * reset the count of bytes sent. */
  600. req->rq_bytes_sent += sent;
  601. req->rq_xmit_bytes_sent += sent;
  602. if (likely(req->rq_bytes_sent >= req->rq_slen)) {
  603. req->rq_bytes_sent = 0;
  604. return 0;
  605. }
  606. if (status < 0)
  607. break;
  608. if (sent == 0) {
  609. status = -EAGAIN;
  610. break;
  611. }
  612. }
  613. if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
  614. status = -ENOBUFS;
  615. switch (status) {
  616. case -ENOTSOCK:
  617. status = -ENOTCONN;
  618. /* Should we call xs_close() here? */
  619. break;
  620. case -EAGAIN:
  621. status = xs_nospace(task);
  622. break;
  623. default:
  624. dprintk("RPC: sendmsg returned unrecognized error %d\n",
  625. -status);
  626. case -ECONNRESET:
  627. case -ECONNREFUSED:
  628. case -ENOTCONN:
  629. case -EADDRINUSE:
  630. case -ENOBUFS:
  631. case -EPIPE:
  632. clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
  633. }
  634. return status;
  635. }
  636. /**
  637. * xs_tcp_release_xprt - clean up after a tcp transmission
  638. * @xprt: transport
  639. * @task: rpc task
  640. *
  641. * This cleans up if an error causes us to abort the transmission of a request.
  642. * In this case, the socket may need to be reset in order to avoid confusing
  643. * the server.
  644. */
  645. static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
  646. {
  647. struct rpc_rqst *req;
  648. if (task != xprt->snd_task)
  649. return;
  650. if (task == NULL)
  651. goto out_release;
  652. req = task->tk_rqstp;
  653. if (req == NULL)
  654. goto out_release;
  655. if (req->rq_bytes_sent == 0)
  656. goto out_release;
  657. if (req->rq_bytes_sent == req->rq_snd_buf.len)
  658. goto out_release;
  659. set_bit(XPRT_CLOSE_WAIT, &xprt->state);
  660. out_release:
  661. xprt_release_xprt(xprt, task);
  662. }
  663. static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
  664. {
  665. transport->old_data_ready = sk->sk_data_ready;
  666. transport->old_state_change = sk->sk_state_change;
  667. transport->old_write_space = sk->sk_write_space;
  668. transport->old_error_report = sk->sk_error_report;
  669. }
  670. static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
  671. {
  672. sk->sk_data_ready = transport->old_data_ready;
  673. sk->sk_state_change = transport->old_state_change;
  674. sk->sk_write_space = transport->old_write_space;
  675. sk->sk_error_report = transport->old_error_report;
  676. }
  677. static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
  678. {
  679. smp_mb__before_atomic();
  680. clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
  681. clear_bit(XPRT_CLOSING, &xprt->state);
  682. smp_mb__after_atomic();
  683. }
  684. static void xs_sock_mark_closed(struct rpc_xprt *xprt)
  685. {
  686. xs_sock_reset_connection_flags(xprt);
  687. /* Mark transport as closed and wake up all pending tasks */
  688. xprt_disconnect_done(xprt);
  689. }
  690. /**
  691. * xs_error_report - callback to handle TCP socket state errors
  692. * @sk: socket
  693. *
  694. * Note: we don't call sock_error() since there may be a rpc_task
  695. * using the socket, and so we don't want to clear sk->sk_err.
  696. */
  697. static void xs_error_report(struct sock *sk)
  698. {
  699. struct rpc_xprt *xprt;
  700. int err;
  701. read_lock_bh(&sk->sk_callback_lock);
  702. if (!(xprt = xprt_from_sock(sk)))
  703. goto out;
  704. err = -sk->sk_err;
  705. if (err == 0)
  706. goto out;
  707. /* Is this a reset event? */
  708. if (sk->sk_state == TCP_CLOSE)
  709. xs_sock_mark_closed(xprt);
  710. dprintk("RPC: xs_error_report client %p, error=%d...\n",
  711. xprt, -err);
  712. trace_rpc_socket_error(xprt, sk->sk_socket, err);
  713. xprt_wake_pending_tasks(xprt, err);
  714. out:
  715. read_unlock_bh(&sk->sk_callback_lock);
  716. }
  717. static void xs_reset_transport(struct sock_xprt *transport)
  718. {
  719. struct socket *sock = transport->sock;
  720. struct sock *sk = transport->inet;
  721. struct rpc_xprt *xprt = &transport->xprt;
  722. if (sk == NULL)
  723. return;
  724. if (atomic_read(&transport->xprt.swapper))
  725. sk_clear_memalloc(sk);
  726. kernel_sock_shutdown(sock, SHUT_RDWR);
  727. write_lock_bh(&sk->sk_callback_lock);
  728. transport->inet = NULL;
  729. transport->sock = NULL;
  730. sk->sk_user_data = NULL;
  731. xs_restore_old_callbacks(transport, sk);
  732. xprt_clear_connected(xprt);
  733. write_unlock_bh(&sk->sk_callback_lock);
  734. xs_sock_reset_connection_flags(xprt);
  735. trace_rpc_socket_close(xprt, sock);
  736. sock_release(sock);
  737. }
  738. /**
  739. * xs_close - close a socket
  740. * @xprt: transport
  741. *
  742. * This is used when all requests are complete; ie, no DRC state remains
  743. * on the server we want to save.
  744. *
  745. * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
  746. * xs_reset_transport() zeroing the socket from underneath a writer.
  747. */
  748. static void xs_close(struct rpc_xprt *xprt)
  749. {
  750. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  751. dprintk("RPC: xs_close xprt %p\n", xprt);
  752. xs_reset_transport(transport);
  753. xprt->reestablish_timeout = 0;
  754. xprt_disconnect_done(xprt);
  755. }
  756. static void xs_inject_disconnect(struct rpc_xprt *xprt)
  757. {
  758. dprintk("RPC: injecting transport disconnect on xprt=%p\n",
  759. xprt);
  760. xprt_disconnect_done(xprt);
  761. }
  762. static void xs_xprt_free(struct rpc_xprt *xprt)
  763. {
  764. xs_free_peer_addresses(xprt);
  765. xprt_free(xprt);
  766. }
  767. /**
  768. * xs_destroy - prepare to shutdown a transport
  769. * @xprt: doomed transport
  770. *
  771. */
  772. static void xs_destroy(struct rpc_xprt *xprt)
  773. {
  774. struct sock_xprt *transport = container_of(xprt,
  775. struct sock_xprt, xprt);
  776. dprintk("RPC: xs_destroy xprt %p\n", xprt);
  777. cancel_delayed_work_sync(&transport->connect_worker);
  778. xs_close(xprt);
  779. xs_xprt_free(xprt);
  780. module_put(THIS_MODULE);
  781. }
  782. static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
  783. {
  784. struct xdr_skb_reader desc = {
  785. .skb = skb,
  786. .offset = sizeof(rpc_fraghdr),
  787. .count = skb->len - sizeof(rpc_fraghdr),
  788. };
  789. if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
  790. return -1;
  791. if (desc.count)
  792. return -1;
  793. return 0;
  794. }
  795. /**
  796. * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
  797. * @sk: socket with data to read
  798. *
  799. * Currently this assumes we can read the whole reply in a single gulp.
  800. */
  801. static void xs_local_data_ready(struct sock *sk)
  802. {
  803. struct rpc_task *task;
  804. struct rpc_xprt *xprt;
  805. struct rpc_rqst *rovr;
  806. struct sk_buff *skb;
  807. int err, repsize, copied;
  808. u32 _xid;
  809. __be32 *xp;
  810. read_lock_bh(&sk->sk_callback_lock);
  811. dprintk("RPC: %s...\n", __func__);
  812. xprt = xprt_from_sock(sk);
  813. if (xprt == NULL)
  814. goto out;
  815. skb = skb_recv_datagram(sk, 0, 1, &err);
  816. if (skb == NULL)
  817. goto out;
  818. repsize = skb->len - sizeof(rpc_fraghdr);
  819. if (repsize < 4) {
  820. dprintk("RPC: impossible RPC reply size %d\n", repsize);
  821. goto dropit;
  822. }
  823. /* Copy the XID from the skb... */
  824. xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
  825. if (xp == NULL)
  826. goto dropit;
  827. /* Look up and lock the request corresponding to the given XID */
  828. spin_lock(&xprt->transport_lock);
  829. rovr = xprt_lookup_rqst(xprt, *xp);
  830. if (!rovr)
  831. goto out_unlock;
  832. task = rovr->rq_task;
  833. copied = rovr->rq_private_buf.buflen;
  834. if (copied > repsize)
  835. copied = repsize;
  836. if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
  837. dprintk("RPC: sk_buff copy failed\n");
  838. goto out_unlock;
  839. }
  840. xprt_complete_rqst(task, copied);
  841. out_unlock:
  842. spin_unlock(&xprt->transport_lock);
  843. dropit:
  844. skb_free_datagram(sk, skb);
  845. out:
  846. read_unlock_bh(&sk->sk_callback_lock);
  847. }
  848. /**
  849. * xs_udp_data_ready - "data ready" callback for UDP sockets
  850. * @sk: socket with data to read
  851. *
  852. */
  853. static void xs_udp_data_ready(struct sock *sk)
  854. {
  855. struct rpc_task *task;
  856. struct rpc_xprt *xprt;
  857. struct rpc_rqst *rovr;
  858. struct sk_buff *skb;
  859. int err, repsize, copied;
  860. u32 _xid;
  861. __be32 *xp;
  862. read_lock_bh(&sk->sk_callback_lock);
  863. dprintk("RPC: xs_udp_data_ready...\n");
  864. if (!(xprt = xprt_from_sock(sk)))
  865. goto out;
  866. if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
  867. goto out;
  868. repsize = skb->len - sizeof(struct udphdr);
  869. if (repsize < 4) {
  870. dprintk("RPC: impossible RPC reply size %d!\n", repsize);
  871. goto dropit;
  872. }
  873. /* Copy the XID from the skb... */
  874. xp = skb_header_pointer(skb, sizeof(struct udphdr),
  875. sizeof(_xid), &_xid);
  876. if (xp == NULL)
  877. goto dropit;
  878. /* Look up and lock the request corresponding to the given XID */
  879. spin_lock(&xprt->transport_lock);
  880. rovr = xprt_lookup_rqst(xprt, *xp);
  881. if (!rovr)
  882. goto out_unlock;
  883. task = rovr->rq_task;
  884. if ((copied = rovr->rq_private_buf.buflen) > repsize)
  885. copied = repsize;
  886. /* Suck it into the iovec, verify checksum if not done by hw. */
  887. if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
  888. UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
  889. goto out_unlock;
  890. }
  891. UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
  892. xprt_adjust_cwnd(xprt, task, copied);
  893. xprt_complete_rqst(task, copied);
  894. out_unlock:
  895. spin_unlock(&xprt->transport_lock);
  896. dropit:
  897. skb_free_datagram(sk, skb);
  898. out:
  899. read_unlock_bh(&sk->sk_callback_lock);
  900. }
  901. /*
  902. * Helper function to force a TCP close if the server is sending
  903. * junk and/or it has put us in CLOSE_WAIT
  904. */
  905. static void xs_tcp_force_close(struct rpc_xprt *xprt)
  906. {
  907. xprt_force_disconnect(xprt);
  908. }
  909. static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
  910. {
  911. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  912. size_t len, used;
  913. char *p;
  914. p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
  915. len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
  916. used = xdr_skb_read_bits(desc, p, len);
  917. transport->tcp_offset += used;
  918. if (used != len)
  919. return;
  920. transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
  921. if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
  922. transport->tcp_flags |= TCP_RCV_LAST_FRAG;
  923. else
  924. transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
  925. transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
  926. transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
  927. transport->tcp_offset = 0;
  928. /* Sanity check of the record length */
  929. if (unlikely(transport->tcp_reclen < 8)) {
  930. dprintk("RPC: invalid TCP record fragment length\n");
  931. xs_tcp_force_close(xprt);
  932. return;
  933. }
  934. dprintk("RPC: reading TCP record fragment of length %d\n",
  935. transport->tcp_reclen);
  936. }
  937. static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
  938. {
  939. if (transport->tcp_offset == transport->tcp_reclen) {
  940. transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
  941. transport->tcp_offset = 0;
  942. if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
  943. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  944. transport->tcp_flags |= TCP_RCV_COPY_XID;
  945. transport->tcp_copied = 0;
  946. }
  947. }
  948. }
  949. static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
  950. {
  951. size_t len, used;
  952. char *p;
  953. len = sizeof(transport->tcp_xid) - transport->tcp_offset;
  954. dprintk("RPC: reading XID (%Zu bytes)\n", len);
  955. p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
  956. used = xdr_skb_read_bits(desc, p, len);
  957. transport->tcp_offset += used;
  958. if (used != len)
  959. return;
  960. transport->tcp_flags &= ~TCP_RCV_COPY_XID;
  961. transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
  962. transport->tcp_copied = 4;
  963. dprintk("RPC: reading %s XID %08x\n",
  964. (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
  965. : "request with",
  966. ntohl(transport->tcp_xid));
  967. xs_tcp_check_fraghdr(transport);
  968. }
  969. static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
  970. struct xdr_skb_reader *desc)
  971. {
  972. size_t len, used;
  973. u32 offset;
  974. char *p;
  975. /*
  976. * We want transport->tcp_offset to be 8 at the end of this routine
  977. * (4 bytes for the xid and 4 bytes for the call/reply flag).
  978. * When this function is called for the first time,
  979. * transport->tcp_offset is 4 (after having already read the xid).
  980. */
  981. offset = transport->tcp_offset - sizeof(transport->tcp_xid);
  982. len = sizeof(transport->tcp_calldir) - offset;
  983. dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
  984. p = ((char *) &transport->tcp_calldir) + offset;
  985. used = xdr_skb_read_bits(desc, p, len);
  986. transport->tcp_offset += used;
  987. if (used != len)
  988. return;
  989. transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
  990. /*
  991. * We don't yet have the XDR buffer, so we will write the calldir
  992. * out after we get the buffer from the 'struct rpc_rqst'
  993. */
  994. switch (ntohl(transport->tcp_calldir)) {
  995. case RPC_REPLY:
  996. transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
  997. transport->tcp_flags |= TCP_RCV_COPY_DATA;
  998. transport->tcp_flags |= TCP_RPC_REPLY;
  999. break;
  1000. case RPC_CALL:
  1001. transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
  1002. transport->tcp_flags |= TCP_RCV_COPY_DATA;
  1003. transport->tcp_flags &= ~TCP_RPC_REPLY;
  1004. break;
  1005. default:
  1006. dprintk("RPC: invalid request message type\n");
  1007. xs_tcp_force_close(&transport->xprt);
  1008. }
  1009. xs_tcp_check_fraghdr(transport);
  1010. }
  1011. static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
  1012. struct xdr_skb_reader *desc,
  1013. struct rpc_rqst *req)
  1014. {
  1015. struct sock_xprt *transport =
  1016. container_of(xprt, struct sock_xprt, xprt);
  1017. struct xdr_buf *rcvbuf;
  1018. size_t len;
  1019. ssize_t r;
  1020. rcvbuf = &req->rq_private_buf;
  1021. if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
  1022. /*
  1023. * Save the RPC direction in the XDR buffer
  1024. */
  1025. memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
  1026. &transport->tcp_calldir,
  1027. sizeof(transport->tcp_calldir));
  1028. transport->tcp_copied += sizeof(transport->tcp_calldir);
  1029. transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
  1030. }
  1031. len = desc->count;
  1032. if (len > transport->tcp_reclen - transport->tcp_offset) {
  1033. struct xdr_skb_reader my_desc;
  1034. len = transport->tcp_reclen - transport->tcp_offset;
  1035. memcpy(&my_desc, desc, sizeof(my_desc));
  1036. my_desc.count = len;
  1037. r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
  1038. &my_desc, xdr_skb_read_bits);
  1039. desc->count -= r;
  1040. desc->offset += r;
  1041. } else
  1042. r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
  1043. desc, xdr_skb_read_bits);
  1044. if (r > 0) {
  1045. transport->tcp_copied += r;
  1046. transport->tcp_offset += r;
  1047. }
  1048. if (r != len) {
  1049. /* Error when copying to the receive buffer,
  1050. * usually because we weren't able to allocate
  1051. * additional buffer pages. All we can do now
  1052. * is turn off TCP_RCV_COPY_DATA, so the request
  1053. * will not receive any additional updates,
  1054. * and time out.
  1055. * Any remaining data from this record will
  1056. * be discarded.
  1057. */
  1058. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  1059. dprintk("RPC: XID %08x truncated request\n",
  1060. ntohl(transport->tcp_xid));
  1061. dprintk("RPC: xprt = %p, tcp_copied = %lu, "
  1062. "tcp_offset = %u, tcp_reclen = %u\n",
  1063. xprt, transport->tcp_copied,
  1064. transport->tcp_offset, transport->tcp_reclen);
  1065. return;
  1066. }
  1067. dprintk("RPC: XID %08x read %Zd bytes\n",
  1068. ntohl(transport->tcp_xid), r);
  1069. dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
  1070. "tcp_reclen = %u\n", xprt, transport->tcp_copied,
  1071. transport->tcp_offset, transport->tcp_reclen);
  1072. if (transport->tcp_copied == req->rq_private_buf.buflen)
  1073. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  1074. else if (transport->tcp_offset == transport->tcp_reclen) {
  1075. if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
  1076. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  1077. }
  1078. }
  1079. /*
  1080. * Finds the request corresponding to the RPC xid and invokes the common
  1081. * tcp read code to read the data.
  1082. */
  1083. static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
  1084. struct xdr_skb_reader *desc)
  1085. {
  1086. struct sock_xprt *transport =
  1087. container_of(xprt, struct sock_xprt, xprt);
  1088. struct rpc_rqst *req;
  1089. dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
  1090. /* Find and lock the request corresponding to this xid */
  1091. spin_lock(&xprt->transport_lock);
  1092. req = xprt_lookup_rqst(xprt, transport->tcp_xid);
  1093. if (!req) {
  1094. dprintk("RPC: XID %08x request not found!\n",
  1095. ntohl(transport->tcp_xid));
  1096. spin_unlock(&xprt->transport_lock);
  1097. return -1;
  1098. }
  1099. xs_tcp_read_common(xprt, desc, req);
  1100. if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
  1101. xprt_complete_rqst(req->rq_task, transport->tcp_copied);
  1102. spin_unlock(&xprt->transport_lock);
  1103. return 0;
  1104. }
  1105. #if defined(CONFIG_SUNRPC_BACKCHANNEL)
  1106. /*
  1107. * Obtains an rpc_rqst previously allocated and invokes the common
  1108. * tcp read code to read the data. The result is placed in the callback
  1109. * queue.
  1110. * If we're unable to obtain the rpc_rqst we schedule the closing of the
  1111. * connection and return -1.
  1112. */
  1113. static int xs_tcp_read_callback(struct rpc_xprt *xprt,
  1114. struct xdr_skb_reader *desc)
  1115. {
  1116. struct sock_xprt *transport =
  1117. container_of(xprt, struct sock_xprt, xprt);
  1118. struct rpc_rqst *req;
  1119. /* Look up and lock the request corresponding to the given XID */
  1120. spin_lock(&xprt->transport_lock);
  1121. req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
  1122. if (req == NULL) {
  1123. spin_unlock(&xprt->transport_lock);
  1124. printk(KERN_WARNING "Callback slot table overflowed\n");
  1125. xprt_force_disconnect(xprt);
  1126. return -1;
  1127. }
  1128. dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
  1129. xs_tcp_read_common(xprt, desc, req);
  1130. if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
  1131. xprt_complete_bc_request(req, transport->tcp_copied);
  1132. spin_unlock(&xprt->transport_lock);
  1133. return 0;
  1134. }
  1135. static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
  1136. struct xdr_skb_reader *desc)
  1137. {
  1138. struct sock_xprt *transport =
  1139. container_of(xprt, struct sock_xprt, xprt);
  1140. return (transport->tcp_flags & TCP_RPC_REPLY) ?
  1141. xs_tcp_read_reply(xprt, desc) :
  1142. xs_tcp_read_callback(xprt, desc);
  1143. }
  1144. static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net)
  1145. {
  1146. int ret;
  1147. ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0,
  1148. SVC_SOCK_ANONYMOUS);
  1149. if (ret < 0)
  1150. return ret;
  1151. return 0;
  1152. }
  1153. #else
  1154. static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
  1155. struct xdr_skb_reader *desc)
  1156. {
  1157. return xs_tcp_read_reply(xprt, desc);
  1158. }
  1159. #endif /* CONFIG_SUNRPC_BACKCHANNEL */
  1160. /*
  1161. * Read data off the transport. This can be either an RPC_CALL or an
  1162. * RPC_REPLY. Relay the processing to helper functions.
  1163. */
  1164. static void xs_tcp_read_data(struct rpc_xprt *xprt,
  1165. struct xdr_skb_reader *desc)
  1166. {
  1167. struct sock_xprt *transport =
  1168. container_of(xprt, struct sock_xprt, xprt);
  1169. if (_xs_tcp_read_data(xprt, desc) == 0)
  1170. xs_tcp_check_fraghdr(transport);
  1171. else {
  1172. /*
  1173. * The transport_lock protects the request handling.
  1174. * There's no need to hold it to update the tcp_flags.
  1175. */
  1176. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  1177. }
  1178. }
  1179. static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
  1180. {
  1181. size_t len;
  1182. len = transport->tcp_reclen - transport->tcp_offset;
  1183. if (len > desc->count)
  1184. len = desc->count;
  1185. desc->count -= len;
  1186. desc->offset += len;
  1187. transport->tcp_offset += len;
  1188. dprintk("RPC: discarded %Zu bytes\n", len);
  1189. xs_tcp_check_fraghdr(transport);
  1190. }
  1191. static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
  1192. {
  1193. struct rpc_xprt *xprt = rd_desc->arg.data;
  1194. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1195. struct xdr_skb_reader desc = {
  1196. .skb = skb,
  1197. .offset = offset,
  1198. .count = len,
  1199. };
  1200. dprintk("RPC: xs_tcp_data_recv started\n");
  1201. do {
  1202. trace_xs_tcp_data_recv(transport);
  1203. /* Read in a new fragment marker if necessary */
  1204. /* Can we ever really expect to get completely empty fragments? */
  1205. if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
  1206. xs_tcp_read_fraghdr(xprt, &desc);
  1207. continue;
  1208. }
  1209. /* Read in the xid if necessary */
  1210. if (transport->tcp_flags & TCP_RCV_COPY_XID) {
  1211. xs_tcp_read_xid(transport, &desc);
  1212. continue;
  1213. }
  1214. /* Read in the call/reply flag */
  1215. if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
  1216. xs_tcp_read_calldir(transport, &desc);
  1217. continue;
  1218. }
  1219. /* Read in the request data */
  1220. if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
  1221. xs_tcp_read_data(xprt, &desc);
  1222. continue;
  1223. }
  1224. /* Skip over any trailing bytes on short reads */
  1225. xs_tcp_read_discard(transport, &desc);
  1226. } while (desc.count);
  1227. trace_xs_tcp_data_recv(transport);
  1228. dprintk("RPC: xs_tcp_data_recv done\n");
  1229. return len - desc.count;
  1230. }
  1231. /**
  1232. * xs_tcp_data_ready - "data ready" callback for TCP sockets
  1233. * @sk: socket with data to read
  1234. *
  1235. */
  1236. static void xs_tcp_data_ready(struct sock *sk)
  1237. {
  1238. struct rpc_xprt *xprt;
  1239. read_descriptor_t rd_desc;
  1240. int read;
  1241. unsigned long total = 0;
  1242. dprintk("RPC: xs_tcp_data_ready...\n");
  1243. read_lock_bh(&sk->sk_callback_lock);
  1244. if (!(xprt = xprt_from_sock(sk))) {
  1245. read = 0;
  1246. goto out;
  1247. }
  1248. /* Any data means we had a useful conversation, so
  1249. * the we don't need to delay the next reconnect
  1250. */
  1251. if (xprt->reestablish_timeout)
  1252. xprt->reestablish_timeout = 0;
  1253. /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
  1254. rd_desc.arg.data = xprt;
  1255. do {
  1256. rd_desc.count = 65536;
  1257. read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
  1258. if (read > 0)
  1259. total += read;
  1260. } while (read > 0);
  1261. out:
  1262. trace_xs_tcp_data_ready(xprt, read, total);
  1263. read_unlock_bh(&sk->sk_callback_lock);
  1264. }
  1265. /**
  1266. * xs_tcp_state_change - callback to handle TCP socket state changes
  1267. * @sk: socket whose state has changed
  1268. *
  1269. */
  1270. static void xs_tcp_state_change(struct sock *sk)
  1271. {
  1272. struct rpc_xprt *xprt;
  1273. struct sock_xprt *transport;
  1274. read_lock_bh(&sk->sk_callback_lock);
  1275. if (!(xprt = xprt_from_sock(sk)))
  1276. goto out;
  1277. dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
  1278. dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
  1279. sk->sk_state, xprt_connected(xprt),
  1280. sock_flag(sk, SOCK_DEAD),
  1281. sock_flag(sk, SOCK_ZAPPED),
  1282. sk->sk_shutdown);
  1283. transport = container_of(xprt, struct sock_xprt, xprt);
  1284. trace_rpc_socket_state_change(xprt, sk->sk_socket);
  1285. switch (sk->sk_state) {
  1286. case TCP_ESTABLISHED:
  1287. spin_lock(&xprt->transport_lock);
  1288. if (!xprt_test_and_set_connected(xprt)) {
  1289. /* Reset TCP record info */
  1290. transport->tcp_offset = 0;
  1291. transport->tcp_reclen = 0;
  1292. transport->tcp_copied = 0;
  1293. transport->tcp_flags =
  1294. TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
  1295. xprt->connect_cookie++;
  1296. clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
  1297. xprt_clear_connecting(xprt);
  1298. xprt_wake_pending_tasks(xprt, -EAGAIN);
  1299. }
  1300. spin_unlock(&xprt->transport_lock);
  1301. break;
  1302. case TCP_FIN_WAIT1:
  1303. /* The client initiated a shutdown of the socket */
  1304. xprt->connect_cookie++;
  1305. xprt->reestablish_timeout = 0;
  1306. set_bit(XPRT_CLOSING, &xprt->state);
  1307. smp_mb__before_atomic();
  1308. clear_bit(XPRT_CONNECTED, &xprt->state);
  1309. clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
  1310. smp_mb__after_atomic();
  1311. break;
  1312. case TCP_CLOSE_WAIT:
  1313. /* The server initiated a shutdown of the socket */
  1314. xprt->connect_cookie++;
  1315. clear_bit(XPRT_CONNECTED, &xprt->state);
  1316. xs_tcp_force_close(xprt);
  1317. case TCP_CLOSING:
  1318. /*
  1319. * If the server closed down the connection, make sure that
  1320. * we back off before reconnecting
  1321. */
  1322. if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
  1323. xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
  1324. break;
  1325. case TCP_LAST_ACK:
  1326. set_bit(XPRT_CLOSING, &xprt->state);
  1327. smp_mb__before_atomic();
  1328. clear_bit(XPRT_CONNECTED, &xprt->state);
  1329. smp_mb__after_atomic();
  1330. break;
  1331. case TCP_CLOSE:
  1332. if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
  1333. &transport->sock_state))
  1334. xprt_clear_connecting(xprt);
  1335. xs_sock_mark_closed(xprt);
  1336. }
  1337. out:
  1338. read_unlock_bh(&sk->sk_callback_lock);
  1339. }
  1340. static void xs_write_space(struct sock *sk)
  1341. {
  1342. struct socket *sock;
  1343. struct rpc_xprt *xprt;
  1344. if (unlikely(!(sock = sk->sk_socket)))
  1345. return;
  1346. clear_bit(SOCK_NOSPACE, &sock->flags);
  1347. if (unlikely(!(xprt = xprt_from_sock(sk))))
  1348. return;
  1349. if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
  1350. return;
  1351. xprt_write_space(xprt);
  1352. }
  1353. /**
  1354. * xs_udp_write_space - callback invoked when socket buffer space
  1355. * becomes available
  1356. * @sk: socket whose state has changed
  1357. *
  1358. * Called when more output buffer space is available for this socket.
  1359. * We try not to wake our writers until they can make "significant"
  1360. * progress, otherwise we'll waste resources thrashing kernel_sendmsg
  1361. * with a bunch of small requests.
  1362. */
  1363. static void xs_udp_write_space(struct sock *sk)
  1364. {
  1365. read_lock_bh(&sk->sk_callback_lock);
  1366. /* from net/core/sock.c:sock_def_write_space */
  1367. if (sock_writeable(sk))
  1368. xs_write_space(sk);
  1369. read_unlock_bh(&sk->sk_callback_lock);
  1370. }
  1371. /**
  1372. * xs_tcp_write_space - callback invoked when socket buffer space
  1373. * becomes available
  1374. * @sk: socket whose state has changed
  1375. *
  1376. * Called when more output buffer space is available for this socket.
  1377. * We try not to wake our writers until they can make "significant"
  1378. * progress, otherwise we'll waste resources thrashing kernel_sendmsg
  1379. * with a bunch of small requests.
  1380. */
  1381. static void xs_tcp_write_space(struct sock *sk)
  1382. {
  1383. read_lock_bh(&sk->sk_callback_lock);
  1384. /* from net/core/stream.c:sk_stream_write_space */
  1385. if (sk_stream_is_writeable(sk))
  1386. xs_write_space(sk);
  1387. read_unlock_bh(&sk->sk_callback_lock);
  1388. }
  1389. static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
  1390. {
  1391. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1392. struct sock *sk = transport->inet;
  1393. if (transport->rcvsize) {
  1394. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  1395. sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
  1396. }
  1397. if (transport->sndsize) {
  1398. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  1399. sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
  1400. sk->sk_write_space(sk);
  1401. }
  1402. }
  1403. /**
  1404. * xs_udp_set_buffer_size - set send and receive limits
  1405. * @xprt: generic transport
  1406. * @sndsize: requested size of send buffer, in bytes
  1407. * @rcvsize: requested size of receive buffer, in bytes
  1408. *
  1409. * Set socket send and receive buffer size limits.
  1410. */
  1411. static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
  1412. {
  1413. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1414. transport->sndsize = 0;
  1415. if (sndsize)
  1416. transport->sndsize = sndsize + 1024;
  1417. transport->rcvsize = 0;
  1418. if (rcvsize)
  1419. transport->rcvsize = rcvsize + 1024;
  1420. xs_udp_do_set_buffer_size(xprt);
  1421. }
  1422. /**
  1423. * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
  1424. * @task: task that timed out
  1425. *
  1426. * Adjust the congestion window after a retransmit timeout has occurred.
  1427. */
  1428. static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
  1429. {
  1430. xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
  1431. }
  1432. static unsigned short xs_get_random_port(void)
  1433. {
  1434. unsigned short range = xprt_max_resvport - xprt_min_resvport;
  1435. unsigned short rand = (unsigned short) prandom_u32() % range;
  1436. return rand + xprt_min_resvport;
  1437. }
  1438. /**
  1439. * xs_set_reuseaddr_port - set the socket's port and address reuse options
  1440. * @sock: socket
  1441. *
  1442. * Note that this function has to be called on all sockets that share the
  1443. * same port, and it must be called before binding.
  1444. */
  1445. static void xs_sock_set_reuseport(struct socket *sock)
  1446. {
  1447. int opt = 1;
  1448. kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
  1449. (char *)&opt, sizeof(opt));
  1450. }
  1451. static unsigned short xs_sock_getport(struct socket *sock)
  1452. {
  1453. struct sockaddr_storage buf;
  1454. int buflen;
  1455. unsigned short port = 0;
  1456. if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0)
  1457. goto out;
  1458. switch (buf.ss_family) {
  1459. case AF_INET6:
  1460. port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
  1461. break;
  1462. case AF_INET:
  1463. port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
  1464. }
  1465. out:
  1466. return port;
  1467. }
  1468. /**
  1469. * xs_set_port - reset the port number in the remote endpoint address
  1470. * @xprt: generic transport
  1471. * @port: new port number
  1472. *
  1473. */
  1474. static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
  1475. {
  1476. dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
  1477. rpc_set_port(xs_addr(xprt), port);
  1478. xs_update_peer_port(xprt);
  1479. }
  1480. static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
  1481. {
  1482. if (transport->srcport == 0)
  1483. transport->srcport = xs_sock_getport(sock);
  1484. }
  1485. static unsigned short xs_get_srcport(struct sock_xprt *transport)
  1486. {
  1487. unsigned short port = transport->srcport;
  1488. if (port == 0 && transport->xprt.resvport)
  1489. port = xs_get_random_port();
  1490. return port;
  1491. }
  1492. static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
  1493. {
  1494. if (transport->srcport != 0)
  1495. transport->srcport = 0;
  1496. if (!transport->xprt.resvport)
  1497. return 0;
  1498. if (port <= xprt_min_resvport || port > xprt_max_resvport)
  1499. return xprt_max_resvport;
  1500. return --port;
  1501. }
  1502. static int xs_bind(struct sock_xprt *transport, struct socket *sock)
  1503. {
  1504. struct sockaddr_storage myaddr;
  1505. int err, nloop = 0;
  1506. unsigned short port = xs_get_srcport(transport);
  1507. unsigned short last;
  1508. /*
  1509. * If we are asking for any ephemeral port (i.e. port == 0 &&
  1510. * transport->xprt.resvport == 0), don't bind. Let the local
  1511. * port selection happen implicitly when the socket is used
  1512. * (for example at connect time).
  1513. *
  1514. * This ensures that we can continue to establish TCP
  1515. * connections even when all local ephemeral ports are already
  1516. * a part of some TCP connection. This makes no difference
  1517. * for UDP sockets, but also doens't harm them.
  1518. *
  1519. * If we're asking for any reserved port (i.e. port == 0 &&
  1520. * transport->xprt.resvport == 1) xs_get_srcport above will
  1521. * ensure that port is non-zero and we will bind as needed.
  1522. */
  1523. if (port == 0)
  1524. return 0;
  1525. memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
  1526. do {
  1527. rpc_set_port((struct sockaddr *)&myaddr, port);
  1528. err = kernel_bind(sock, (struct sockaddr *)&myaddr,
  1529. transport->xprt.addrlen);
  1530. if (err == 0) {
  1531. transport->srcport = port;
  1532. break;
  1533. }
  1534. last = port;
  1535. port = xs_next_srcport(transport, port);
  1536. if (port > last)
  1537. nloop++;
  1538. } while (err == -EADDRINUSE && nloop != 2);
  1539. if (myaddr.ss_family == AF_INET)
  1540. dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
  1541. &((struct sockaddr_in *)&myaddr)->sin_addr,
  1542. port, err ? "failed" : "ok", err);
  1543. else
  1544. dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
  1545. &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
  1546. port, err ? "failed" : "ok", err);
  1547. return err;
  1548. }
  1549. /*
  1550. * We don't support autobind on AF_LOCAL sockets
  1551. */
  1552. static void xs_local_rpcbind(struct rpc_task *task)
  1553. {
  1554. rcu_read_lock();
  1555. xprt_set_bound(rcu_dereference(task->tk_client->cl_xprt));
  1556. rcu_read_unlock();
  1557. }
  1558. static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
  1559. {
  1560. }
  1561. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  1562. static struct lock_class_key xs_key[2];
  1563. static struct lock_class_key xs_slock_key[2];
  1564. static inline void xs_reclassify_socketu(struct socket *sock)
  1565. {
  1566. struct sock *sk = sock->sk;
  1567. sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
  1568. &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
  1569. }
  1570. static inline void xs_reclassify_socket4(struct socket *sock)
  1571. {
  1572. struct sock *sk = sock->sk;
  1573. sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
  1574. &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
  1575. }
  1576. static inline void xs_reclassify_socket6(struct socket *sock)
  1577. {
  1578. struct sock *sk = sock->sk;
  1579. sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
  1580. &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
  1581. }
  1582. static inline void xs_reclassify_socket(int family, struct socket *sock)
  1583. {
  1584. WARN_ON_ONCE(sock_owned_by_user(sock->sk));
  1585. if (sock_owned_by_user(sock->sk))
  1586. return;
  1587. switch (family) {
  1588. case AF_LOCAL:
  1589. xs_reclassify_socketu(sock);
  1590. break;
  1591. case AF_INET:
  1592. xs_reclassify_socket4(sock);
  1593. break;
  1594. case AF_INET6:
  1595. xs_reclassify_socket6(sock);
  1596. break;
  1597. }
  1598. }
  1599. #else
  1600. static inline void xs_reclassify_socketu(struct socket *sock)
  1601. {
  1602. }
  1603. static inline void xs_reclassify_socket4(struct socket *sock)
  1604. {
  1605. }
  1606. static inline void xs_reclassify_socket6(struct socket *sock)
  1607. {
  1608. }
  1609. static inline void xs_reclassify_socket(int family, struct socket *sock)
  1610. {
  1611. }
  1612. #endif
  1613. static void xs_dummy_setup_socket(struct work_struct *work)
  1614. {
  1615. }
  1616. static struct socket *xs_create_sock(struct rpc_xprt *xprt,
  1617. struct sock_xprt *transport, int family, int type,
  1618. int protocol, bool reuseport)
  1619. {
  1620. struct socket *sock;
  1621. int err;
  1622. err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
  1623. if (err < 0) {
  1624. dprintk("RPC: can't create %d transport socket (%d).\n",
  1625. protocol, -err);
  1626. goto out;
  1627. }
  1628. xs_reclassify_socket(family, sock);
  1629. if (reuseport)
  1630. xs_sock_set_reuseport(sock);
  1631. err = xs_bind(transport, sock);
  1632. if (err) {
  1633. sock_release(sock);
  1634. goto out;
  1635. }
  1636. return sock;
  1637. out:
  1638. return ERR_PTR(err);
  1639. }
  1640. static int xs_local_finish_connecting(struct rpc_xprt *xprt,
  1641. struct socket *sock)
  1642. {
  1643. struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
  1644. xprt);
  1645. if (!transport->inet) {
  1646. struct sock *sk = sock->sk;
  1647. write_lock_bh(&sk->sk_callback_lock);
  1648. xs_save_old_callbacks(transport, sk);
  1649. sk->sk_user_data = xprt;
  1650. sk->sk_data_ready = xs_local_data_ready;
  1651. sk->sk_write_space = xs_udp_write_space;
  1652. sk->sk_error_report = xs_error_report;
  1653. sk->sk_allocation = GFP_NOIO;
  1654. xprt_clear_connected(xprt);
  1655. /* Reset to new socket */
  1656. transport->sock = sock;
  1657. transport->inet = sk;
  1658. write_unlock_bh(&sk->sk_callback_lock);
  1659. }
  1660. /* Tell the socket layer to start connecting... */
  1661. xprt->stat.connect_count++;
  1662. xprt->stat.connect_start = jiffies;
  1663. return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
  1664. }
  1665. /**
  1666. * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
  1667. * @transport: socket transport to connect
  1668. */
  1669. static int xs_local_setup_socket(struct sock_xprt *transport)
  1670. {
  1671. struct rpc_xprt *xprt = &transport->xprt;
  1672. struct socket *sock;
  1673. int status = -EIO;
  1674. status = __sock_create(xprt->xprt_net, AF_LOCAL,
  1675. SOCK_STREAM, 0, &sock, 1);
  1676. if (status < 0) {
  1677. dprintk("RPC: can't create AF_LOCAL "
  1678. "transport socket (%d).\n", -status);
  1679. goto out;
  1680. }
  1681. xs_reclassify_socketu(sock);
  1682. dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
  1683. xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
  1684. status = xs_local_finish_connecting(xprt, sock);
  1685. trace_rpc_socket_connect(xprt, sock, status);
  1686. switch (status) {
  1687. case 0:
  1688. dprintk("RPC: xprt %p connected to %s\n",
  1689. xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
  1690. xprt_set_connected(xprt);
  1691. case -ENOBUFS:
  1692. break;
  1693. case -ENOENT:
  1694. dprintk("RPC: xprt %p: socket %s does not exist\n",
  1695. xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
  1696. break;
  1697. case -ECONNREFUSED:
  1698. dprintk("RPC: xprt %p: connection refused for %s\n",
  1699. xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
  1700. break;
  1701. default:
  1702. printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
  1703. __func__, -status,
  1704. xprt->address_strings[RPC_DISPLAY_ADDR]);
  1705. }
  1706. out:
  1707. xprt_clear_connecting(xprt);
  1708. xprt_wake_pending_tasks(xprt, status);
  1709. return status;
  1710. }
  1711. static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
  1712. {
  1713. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1714. int ret;
  1715. if (RPC_IS_ASYNC(task)) {
  1716. /*
  1717. * We want the AF_LOCAL connect to be resolved in the
  1718. * filesystem namespace of the process making the rpc
  1719. * call. Thus we connect synchronously.
  1720. *
  1721. * If we want to support asynchronous AF_LOCAL calls,
  1722. * we'll need to figure out how to pass a namespace to
  1723. * connect.
  1724. */
  1725. rpc_exit(task, -ENOTCONN);
  1726. return;
  1727. }
  1728. ret = xs_local_setup_socket(transport);
  1729. if (ret && !RPC_IS_SOFTCONN(task))
  1730. msleep_interruptible(15000);
  1731. }
  1732. #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
  1733. /*
  1734. * Note that this should be called with XPRT_LOCKED held (or when we otherwise
  1735. * know that we have exclusive access to the socket), to guard against
  1736. * races with xs_reset_transport.
  1737. */
  1738. static void xs_set_memalloc(struct rpc_xprt *xprt)
  1739. {
  1740. struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
  1741. xprt);
  1742. /*
  1743. * If there's no sock, then we have nothing to set. The
  1744. * reconnecting process will get it for us.
  1745. */
  1746. if (!transport->inet)
  1747. return;
  1748. if (atomic_read(&xprt->swapper))
  1749. sk_set_memalloc(transport->inet);
  1750. }
  1751. /**
  1752. * xs_enable_swap - Tag this transport as being used for swap.
  1753. * @xprt: transport to tag
  1754. *
  1755. * Take a reference to this transport on behalf of the rpc_clnt, and
  1756. * optionally mark it for swapping if it wasn't already.
  1757. */
  1758. static int
  1759. xs_enable_swap(struct rpc_xprt *xprt)
  1760. {
  1761. struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
  1762. if (atomic_inc_return(&xprt->swapper) != 1)
  1763. return 0;
  1764. if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
  1765. return -ERESTARTSYS;
  1766. if (xs->inet)
  1767. sk_set_memalloc(xs->inet);
  1768. xprt_release_xprt(xprt, NULL);
  1769. return 0;
  1770. }
  1771. /**
  1772. * xs_disable_swap - Untag this transport as being used for swap.
  1773. * @xprt: transport to tag
  1774. *
  1775. * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
  1776. * swapper refcount goes to 0, untag the socket as a memalloc socket.
  1777. */
  1778. static void
  1779. xs_disable_swap(struct rpc_xprt *xprt)
  1780. {
  1781. struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
  1782. if (!atomic_dec_and_test(&xprt->swapper))
  1783. return;
  1784. if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
  1785. return;
  1786. if (xs->inet)
  1787. sk_clear_memalloc(xs->inet);
  1788. xprt_release_xprt(xprt, NULL);
  1789. }
  1790. #else
  1791. static void xs_set_memalloc(struct rpc_xprt *xprt)
  1792. {
  1793. }
  1794. static int
  1795. xs_enable_swap(struct rpc_xprt *xprt)
  1796. {
  1797. return -EINVAL;
  1798. }
  1799. static void
  1800. xs_disable_swap(struct rpc_xprt *xprt)
  1801. {
  1802. }
  1803. #endif
  1804. static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
  1805. {
  1806. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1807. if (!transport->inet) {
  1808. struct sock *sk = sock->sk;
  1809. write_lock_bh(&sk->sk_callback_lock);
  1810. xs_save_old_callbacks(transport, sk);
  1811. sk->sk_user_data = xprt;
  1812. sk->sk_data_ready = xs_udp_data_ready;
  1813. sk->sk_write_space = xs_udp_write_space;
  1814. sk->sk_allocation = GFP_NOIO;
  1815. xprt_set_connected(xprt);
  1816. /* Reset to new socket */
  1817. transport->sock = sock;
  1818. transport->inet = sk;
  1819. xs_set_memalloc(xprt);
  1820. write_unlock_bh(&sk->sk_callback_lock);
  1821. }
  1822. xs_udp_do_set_buffer_size(xprt);
  1823. }
  1824. static void xs_udp_setup_socket(struct work_struct *work)
  1825. {
  1826. struct sock_xprt *transport =
  1827. container_of(work, struct sock_xprt, connect_worker.work);
  1828. struct rpc_xprt *xprt = &transport->xprt;
  1829. struct socket *sock = transport->sock;
  1830. int status = -EIO;
  1831. sock = xs_create_sock(xprt, transport,
  1832. xs_addr(xprt)->sa_family, SOCK_DGRAM,
  1833. IPPROTO_UDP, false);
  1834. if (IS_ERR(sock))
  1835. goto out;
  1836. dprintk("RPC: worker connecting xprt %p via %s to "
  1837. "%s (port %s)\n", xprt,
  1838. xprt->address_strings[RPC_DISPLAY_PROTO],
  1839. xprt->address_strings[RPC_DISPLAY_ADDR],
  1840. xprt->address_strings[RPC_DISPLAY_PORT]);
  1841. xs_udp_finish_connecting(xprt, sock);
  1842. trace_rpc_socket_connect(xprt, sock, 0);
  1843. status = 0;
  1844. out:
  1845. xprt_unlock_connect(xprt, transport);
  1846. xprt_clear_connecting(xprt);
  1847. xprt_wake_pending_tasks(xprt, status);
  1848. }
  1849. /**
  1850. * xs_tcp_shutdown - gracefully shut down a TCP socket
  1851. * @xprt: transport
  1852. *
  1853. * Initiates a graceful shutdown of the TCP socket by calling the
  1854. * equivalent of shutdown(SHUT_RDWR);
  1855. */
  1856. static void xs_tcp_shutdown(struct rpc_xprt *xprt)
  1857. {
  1858. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1859. struct socket *sock = transport->sock;
  1860. if (sock == NULL)
  1861. return;
  1862. if (xprt_connected(xprt)) {
  1863. kernel_sock_shutdown(sock, SHUT_RDWR);
  1864. trace_rpc_socket_shutdown(xprt, sock);
  1865. } else
  1866. xs_reset_transport(transport);
  1867. }
  1868. static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
  1869. {
  1870. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1871. int ret = -ENOTCONN;
  1872. if (!transport->inet) {
  1873. struct sock *sk = sock->sk;
  1874. unsigned int keepidle = xprt->timeout->to_initval / HZ;
  1875. unsigned int keepcnt = xprt->timeout->to_retries + 1;
  1876. unsigned int opt_on = 1;
  1877. unsigned int timeo;
  1878. /* TCP Keepalive options */
  1879. kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
  1880. (char *)&opt_on, sizeof(opt_on));
  1881. kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
  1882. (char *)&keepidle, sizeof(keepidle));
  1883. kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
  1884. (char *)&keepidle, sizeof(keepidle));
  1885. kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
  1886. (char *)&keepcnt, sizeof(keepcnt));
  1887. /* TCP user timeout (see RFC5482) */
  1888. timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
  1889. (xprt->timeout->to_retries + 1);
  1890. kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
  1891. (char *)&timeo, sizeof(timeo));
  1892. write_lock_bh(&sk->sk_callback_lock);
  1893. xs_save_old_callbacks(transport, sk);
  1894. sk->sk_user_data = xprt;
  1895. sk->sk_data_ready = xs_tcp_data_ready;
  1896. sk->sk_state_change = xs_tcp_state_change;
  1897. sk->sk_write_space = xs_tcp_write_space;
  1898. sk->sk_error_report = xs_error_report;
  1899. sk->sk_allocation = GFP_NOIO;
  1900. /* socket options */
  1901. sock_reset_flag(sk, SOCK_LINGER);
  1902. tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
  1903. xprt_clear_connected(xprt);
  1904. /* Reset to new socket */
  1905. transport->sock = sock;
  1906. transport->inet = sk;
  1907. write_unlock_bh(&sk->sk_callback_lock);
  1908. }
  1909. if (!xprt_bound(xprt))
  1910. goto out;
  1911. xs_set_memalloc(xprt);
  1912. /* Tell the socket layer to start connecting... */
  1913. xprt->stat.connect_count++;
  1914. xprt->stat.connect_start = jiffies;
  1915. set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
  1916. ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
  1917. switch (ret) {
  1918. case 0:
  1919. xs_set_srcport(transport, sock);
  1920. case -EINPROGRESS:
  1921. /* SYN_SENT! */
  1922. if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
  1923. xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
  1924. }
  1925. out:
  1926. return ret;
  1927. }
  1928. /**
  1929. * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
  1930. *
  1931. * Invoked by a work queue tasklet.
  1932. */
  1933. static void xs_tcp_setup_socket(struct work_struct *work)
  1934. {
  1935. struct sock_xprt *transport =
  1936. container_of(work, struct sock_xprt, connect_worker.work);
  1937. struct socket *sock = transport->sock;
  1938. struct rpc_xprt *xprt = &transport->xprt;
  1939. int status = -EIO;
  1940. if (!sock) {
  1941. sock = xs_create_sock(xprt, transport,
  1942. xs_addr(xprt)->sa_family, SOCK_STREAM,
  1943. IPPROTO_TCP, true);
  1944. if (IS_ERR(sock)) {
  1945. status = PTR_ERR(sock);
  1946. goto out;
  1947. }
  1948. }
  1949. dprintk("RPC: worker connecting xprt %p via %s to "
  1950. "%s (port %s)\n", xprt,
  1951. xprt->address_strings[RPC_DISPLAY_PROTO],
  1952. xprt->address_strings[RPC_DISPLAY_ADDR],
  1953. xprt->address_strings[RPC_DISPLAY_PORT]);
  1954. status = xs_tcp_finish_connecting(xprt, sock);
  1955. trace_rpc_socket_connect(xprt, sock, status);
  1956. dprintk("RPC: %p connect status %d connected %d sock state %d\n",
  1957. xprt, -status, xprt_connected(xprt),
  1958. sock->sk->sk_state);
  1959. switch (status) {
  1960. default:
  1961. printk("%s: connect returned unhandled error %d\n",
  1962. __func__, status);
  1963. case -EADDRNOTAVAIL:
  1964. /* We're probably in TIME_WAIT. Get rid of existing socket,
  1965. * and retry
  1966. */
  1967. xs_tcp_force_close(xprt);
  1968. break;
  1969. case 0:
  1970. case -EINPROGRESS:
  1971. case -EALREADY:
  1972. xprt_unlock_connect(xprt, transport);
  1973. return;
  1974. case -EINVAL:
  1975. /* Happens, for instance, if the user specified a link
  1976. * local IPv6 address without a scope-id.
  1977. */
  1978. case -ECONNREFUSED:
  1979. case -ECONNRESET:
  1980. case -ENETUNREACH:
  1981. case -EADDRINUSE:
  1982. case -ENOBUFS:
  1983. /* retry with existing socket, after a delay */
  1984. xs_tcp_force_close(xprt);
  1985. goto out;
  1986. }
  1987. status = -EAGAIN;
  1988. out:
  1989. xprt_unlock_connect(xprt, transport);
  1990. xprt_clear_connecting(xprt);
  1991. xprt_wake_pending_tasks(xprt, status);
  1992. }
  1993. /**
  1994. * xs_connect - connect a socket to a remote endpoint
  1995. * @xprt: pointer to transport structure
  1996. * @task: address of RPC task that manages state of connect request
  1997. *
  1998. * TCP: If the remote end dropped the connection, delay reconnecting.
  1999. *
  2000. * UDP socket connects are synchronous, but we use a work queue anyway
  2001. * to guarantee that even unprivileged user processes can set up a
  2002. * socket on a privileged port.
  2003. *
  2004. * If a UDP socket connect fails, the delay behavior here prevents
  2005. * retry floods (hard mounts).
  2006. */
  2007. static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
  2008. {
  2009. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  2010. WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
  2011. if (transport->sock != NULL) {
  2012. dprintk("RPC: xs_connect delayed xprt %p for %lu "
  2013. "seconds\n",
  2014. xprt, xprt->reestablish_timeout / HZ);
  2015. /* Start by resetting any existing state */
  2016. xs_reset_transport(transport);
  2017. queue_delayed_work(rpciod_workqueue,
  2018. &transport->connect_worker,
  2019. xprt->reestablish_timeout);
  2020. xprt->reestablish_timeout <<= 1;
  2021. if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
  2022. xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
  2023. if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
  2024. xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
  2025. } else {
  2026. dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
  2027. queue_delayed_work(rpciod_workqueue,
  2028. &transport->connect_worker, 0);
  2029. }
  2030. }
  2031. /**
  2032. * xs_local_print_stats - display AF_LOCAL socket-specifc stats
  2033. * @xprt: rpc_xprt struct containing statistics
  2034. * @seq: output file
  2035. *
  2036. */
  2037. static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
  2038. {
  2039. long idle_time = 0;
  2040. if (xprt_connected(xprt))
  2041. idle_time = (long)(jiffies - xprt->last_used) / HZ;
  2042. seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
  2043. "%llu %llu %lu %llu %llu\n",
  2044. xprt->stat.bind_count,
  2045. xprt->stat.connect_count,
  2046. xprt->stat.connect_time,
  2047. idle_time,
  2048. xprt->stat.sends,
  2049. xprt->stat.recvs,
  2050. xprt->stat.bad_xids,
  2051. xprt->stat.req_u,
  2052. xprt->stat.bklog_u,
  2053. xprt->stat.max_slots,
  2054. xprt->stat.sending_u,
  2055. xprt->stat.pending_u);
  2056. }
  2057. /**
  2058. * xs_udp_print_stats - display UDP socket-specifc stats
  2059. * @xprt: rpc_xprt struct containing statistics
  2060. * @seq: output file
  2061. *
  2062. */
  2063. static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
  2064. {
  2065. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  2066. seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
  2067. "%lu %llu %llu\n",
  2068. transport->srcport,
  2069. xprt->stat.bind_count,
  2070. xprt->stat.sends,
  2071. xprt->stat.recvs,
  2072. xprt->stat.bad_xids,
  2073. xprt->stat.req_u,
  2074. xprt->stat.bklog_u,
  2075. xprt->stat.max_slots,
  2076. xprt->stat.sending_u,
  2077. xprt->stat.pending_u);
  2078. }
  2079. /**
  2080. * xs_tcp_print_stats - display TCP socket-specifc stats
  2081. * @xprt: rpc_xprt struct containing statistics
  2082. * @seq: output file
  2083. *
  2084. */
  2085. static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
  2086. {
  2087. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  2088. long idle_time = 0;
  2089. if (xprt_connected(xprt))
  2090. idle_time = (long)(jiffies - xprt->last_used) / HZ;
  2091. seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
  2092. "%llu %llu %lu %llu %llu\n",
  2093. transport->srcport,
  2094. xprt->stat.bind_count,
  2095. xprt->stat.connect_count,
  2096. xprt->stat.connect_time,
  2097. idle_time,
  2098. xprt->stat.sends,
  2099. xprt->stat.recvs,
  2100. xprt->stat.bad_xids,
  2101. xprt->stat.req_u,
  2102. xprt->stat.bklog_u,
  2103. xprt->stat.max_slots,
  2104. xprt->stat.sending_u,
  2105. xprt->stat.pending_u);
  2106. }
  2107. /*
  2108. * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
  2109. * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
  2110. * to use the server side send routines.
  2111. */
  2112. static void *bc_malloc(struct rpc_task *task, size_t size)
  2113. {
  2114. struct page *page;
  2115. struct rpc_buffer *buf;
  2116. WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
  2117. if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
  2118. return NULL;
  2119. page = alloc_page(GFP_KERNEL);
  2120. if (!page)
  2121. return NULL;
  2122. buf = page_address(page);
  2123. buf->len = PAGE_SIZE;
  2124. return buf->data;
  2125. }
  2126. /*
  2127. * Free the space allocated in the bc_alloc routine
  2128. */
  2129. static void bc_free(void *buffer)
  2130. {
  2131. struct rpc_buffer *buf;
  2132. if (!buffer)
  2133. return;
  2134. buf = container_of(buffer, struct rpc_buffer, data);
  2135. free_page((unsigned long)buf);
  2136. }
  2137. /*
  2138. * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
  2139. * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
  2140. */
  2141. static int bc_sendto(struct rpc_rqst *req)
  2142. {
  2143. int len;
  2144. struct xdr_buf *xbufp = &req->rq_snd_buf;
  2145. struct rpc_xprt *xprt = req->rq_xprt;
  2146. struct sock_xprt *transport =
  2147. container_of(xprt, struct sock_xprt, xprt);
  2148. struct socket *sock = transport->sock;
  2149. unsigned long headoff;
  2150. unsigned long tailoff;
  2151. xs_encode_stream_record_marker(xbufp);
  2152. tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
  2153. headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
  2154. len = svc_send_common(sock, xbufp,
  2155. virt_to_page(xbufp->head[0].iov_base), headoff,
  2156. xbufp->tail[0].iov_base, tailoff);
  2157. if (len != xbufp->len) {
  2158. printk(KERN_NOTICE "Error sending entire callback!\n");
  2159. len = -EAGAIN;
  2160. }
  2161. return len;
  2162. }
  2163. /*
  2164. * The send routine. Borrows from svc_send
  2165. */
  2166. static int bc_send_request(struct rpc_task *task)
  2167. {
  2168. struct rpc_rqst *req = task->tk_rqstp;
  2169. struct svc_xprt *xprt;
  2170. u32 len;
  2171. dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
  2172. /*
  2173. * Get the server socket associated with this callback xprt
  2174. */
  2175. xprt = req->rq_xprt->bc_xprt;
  2176. /*
  2177. * Grab the mutex to serialize data as the connection is shared
  2178. * with the fore channel
  2179. */
  2180. if (!mutex_trylock(&xprt->xpt_mutex)) {
  2181. rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
  2182. if (!mutex_trylock(&xprt->xpt_mutex))
  2183. return -EAGAIN;
  2184. rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
  2185. }
  2186. if (test_bit(XPT_DEAD, &xprt->xpt_flags))
  2187. len = -ENOTCONN;
  2188. else
  2189. len = bc_sendto(req);
  2190. mutex_unlock(&xprt->xpt_mutex);
  2191. if (len > 0)
  2192. len = 0;
  2193. return len;
  2194. }
  2195. /*
  2196. * The close routine. Since this is client initiated, we do nothing
  2197. */
  2198. static void bc_close(struct rpc_xprt *xprt)
  2199. {
  2200. }
  2201. /*
  2202. * The xprt destroy routine. Again, because this connection is client
  2203. * initiated, we do nothing
  2204. */
  2205. static void bc_destroy(struct rpc_xprt *xprt)
  2206. {
  2207. dprintk("RPC: bc_destroy xprt %p\n", xprt);
  2208. xs_xprt_free(xprt);
  2209. module_put(THIS_MODULE);
  2210. }
  2211. static struct rpc_xprt_ops xs_local_ops = {
  2212. .reserve_xprt = xprt_reserve_xprt,
  2213. .release_xprt = xs_tcp_release_xprt,
  2214. .alloc_slot = xprt_alloc_slot,
  2215. .rpcbind = xs_local_rpcbind,
  2216. .set_port = xs_local_set_port,
  2217. .connect = xs_local_connect,
  2218. .buf_alloc = rpc_malloc,
  2219. .buf_free = rpc_free,
  2220. .send_request = xs_local_send_request,
  2221. .set_retrans_timeout = xprt_set_retrans_timeout_def,
  2222. .close = xs_close,
  2223. .destroy = xs_destroy,
  2224. .print_stats = xs_local_print_stats,
  2225. .enable_swap = xs_enable_swap,
  2226. .disable_swap = xs_disable_swap,
  2227. };
  2228. static struct rpc_xprt_ops xs_udp_ops = {
  2229. .set_buffer_size = xs_udp_set_buffer_size,
  2230. .reserve_xprt = xprt_reserve_xprt_cong,
  2231. .release_xprt = xprt_release_xprt_cong,
  2232. .alloc_slot = xprt_alloc_slot,
  2233. .rpcbind = rpcb_getport_async,
  2234. .set_port = xs_set_port,
  2235. .connect = xs_connect,
  2236. .buf_alloc = rpc_malloc,
  2237. .buf_free = rpc_free,
  2238. .send_request = xs_udp_send_request,
  2239. .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
  2240. .timer = xs_udp_timer,
  2241. .release_request = xprt_release_rqst_cong,
  2242. .close = xs_close,
  2243. .destroy = xs_destroy,
  2244. .print_stats = xs_udp_print_stats,
  2245. .enable_swap = xs_enable_swap,
  2246. .disable_swap = xs_disable_swap,
  2247. .inject_disconnect = xs_inject_disconnect,
  2248. };
  2249. static struct rpc_xprt_ops xs_tcp_ops = {
  2250. .reserve_xprt = xprt_reserve_xprt,
  2251. .release_xprt = xs_tcp_release_xprt,
  2252. .alloc_slot = xprt_lock_and_alloc_slot,
  2253. .rpcbind = rpcb_getport_async,
  2254. .set_port = xs_set_port,
  2255. .connect = xs_connect,
  2256. .buf_alloc = rpc_malloc,
  2257. .buf_free = rpc_free,
  2258. .send_request = xs_tcp_send_request,
  2259. .set_retrans_timeout = xprt_set_retrans_timeout_def,
  2260. .close = xs_tcp_shutdown,
  2261. .destroy = xs_destroy,
  2262. .print_stats = xs_tcp_print_stats,
  2263. .enable_swap = xs_enable_swap,
  2264. .disable_swap = xs_disable_swap,
  2265. .inject_disconnect = xs_inject_disconnect,
  2266. #ifdef CONFIG_SUNRPC_BACKCHANNEL
  2267. .bc_setup = xprt_setup_bc,
  2268. .bc_up = xs_tcp_bc_up,
  2269. .bc_free_rqst = xprt_free_bc_rqst,
  2270. .bc_destroy = xprt_destroy_bc,
  2271. #endif
  2272. };
  2273. /*
  2274. * The rpc_xprt_ops for the server backchannel
  2275. */
  2276. static struct rpc_xprt_ops bc_tcp_ops = {
  2277. .reserve_xprt = xprt_reserve_xprt,
  2278. .release_xprt = xprt_release_xprt,
  2279. .alloc_slot = xprt_alloc_slot,
  2280. .buf_alloc = bc_malloc,
  2281. .buf_free = bc_free,
  2282. .send_request = bc_send_request,
  2283. .set_retrans_timeout = xprt_set_retrans_timeout_def,
  2284. .close = bc_close,
  2285. .destroy = bc_destroy,
  2286. .print_stats = xs_tcp_print_stats,
  2287. .enable_swap = xs_enable_swap,
  2288. .disable_swap = xs_disable_swap,
  2289. .inject_disconnect = xs_inject_disconnect,
  2290. };
  2291. static int xs_init_anyaddr(const int family, struct sockaddr *sap)
  2292. {
  2293. static const struct sockaddr_in sin = {
  2294. .sin_family = AF_INET,
  2295. .sin_addr.s_addr = htonl(INADDR_ANY),
  2296. };
  2297. static const struct sockaddr_in6 sin6 = {
  2298. .sin6_family = AF_INET6,
  2299. .sin6_addr = IN6ADDR_ANY_INIT,
  2300. };
  2301. switch (family) {
  2302. case AF_LOCAL:
  2303. break;
  2304. case AF_INET:
  2305. memcpy(sap, &sin, sizeof(sin));
  2306. break;
  2307. case AF_INET6:
  2308. memcpy(sap, &sin6, sizeof(sin6));
  2309. break;
  2310. default:
  2311. dprintk("RPC: %s: Bad address family\n", __func__);
  2312. return -EAFNOSUPPORT;
  2313. }
  2314. return 0;
  2315. }
  2316. static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
  2317. unsigned int slot_table_size,
  2318. unsigned int max_slot_table_size)
  2319. {
  2320. struct rpc_xprt *xprt;
  2321. struct sock_xprt *new;
  2322. if (args->addrlen > sizeof(xprt->addr)) {
  2323. dprintk("RPC: xs_setup_xprt: address too large\n");
  2324. return ERR_PTR(-EBADF);
  2325. }
  2326. xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
  2327. max_slot_table_size);
  2328. if (xprt == NULL) {
  2329. dprintk("RPC: xs_setup_xprt: couldn't allocate "
  2330. "rpc_xprt\n");
  2331. return ERR_PTR(-ENOMEM);
  2332. }
  2333. new = container_of(xprt, struct sock_xprt, xprt);
  2334. memcpy(&xprt->addr, args->dstaddr, args->addrlen);
  2335. xprt->addrlen = args->addrlen;
  2336. if (args->srcaddr)
  2337. memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
  2338. else {
  2339. int err;
  2340. err = xs_init_anyaddr(args->dstaddr->sa_family,
  2341. (struct sockaddr *)&new->srcaddr);
  2342. if (err != 0) {
  2343. xprt_free(xprt);
  2344. return ERR_PTR(err);
  2345. }
  2346. }
  2347. return xprt;
  2348. }
  2349. static const struct rpc_timeout xs_local_default_timeout = {
  2350. .to_initval = 10 * HZ,
  2351. .to_maxval = 10 * HZ,
  2352. .to_retries = 2,
  2353. };
  2354. /**
  2355. * xs_setup_local - Set up transport to use an AF_LOCAL socket
  2356. * @args: rpc transport creation arguments
  2357. *
  2358. * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
  2359. */
  2360. static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
  2361. {
  2362. struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
  2363. struct sock_xprt *transport;
  2364. struct rpc_xprt *xprt;
  2365. struct rpc_xprt *ret;
  2366. xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
  2367. xprt_max_tcp_slot_table_entries);
  2368. if (IS_ERR(xprt))
  2369. return xprt;
  2370. transport = container_of(xprt, struct sock_xprt, xprt);
  2371. xprt->prot = 0;
  2372. xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
  2373. xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
  2374. xprt->bind_timeout = XS_BIND_TO;
  2375. xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
  2376. xprt->idle_timeout = XS_IDLE_DISC_TO;
  2377. xprt->ops = &xs_local_ops;
  2378. xprt->timeout = &xs_local_default_timeout;
  2379. INIT_DELAYED_WORK(&transport->connect_worker,
  2380. xs_dummy_setup_socket);
  2381. switch (sun->sun_family) {
  2382. case AF_LOCAL:
  2383. if (sun->sun_path[0] != '/') {
  2384. dprintk("RPC: bad AF_LOCAL address: %s\n",
  2385. sun->sun_path);
  2386. ret = ERR_PTR(-EINVAL);
  2387. goto out_err;
  2388. }
  2389. xprt_set_bound(xprt);
  2390. xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
  2391. ret = ERR_PTR(xs_local_setup_socket(transport));
  2392. if (ret)
  2393. goto out_err;
  2394. break;
  2395. default:
  2396. ret = ERR_PTR(-EAFNOSUPPORT);
  2397. goto out_err;
  2398. }
  2399. dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
  2400. xprt->address_strings[RPC_DISPLAY_ADDR]);
  2401. if (try_module_get(THIS_MODULE))
  2402. return xprt;
  2403. ret = ERR_PTR(-EINVAL);
  2404. out_err:
  2405. xs_xprt_free(xprt);
  2406. return ret;
  2407. }
  2408. static const struct rpc_timeout xs_udp_default_timeout = {
  2409. .to_initval = 5 * HZ,
  2410. .to_maxval = 30 * HZ,
  2411. .to_increment = 5 * HZ,
  2412. .to_retries = 5,
  2413. };
  2414. /**
  2415. * xs_setup_udp - Set up transport to use a UDP socket
  2416. * @args: rpc transport creation arguments
  2417. *
  2418. */
  2419. static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
  2420. {
  2421. struct sockaddr *addr = args->dstaddr;
  2422. struct rpc_xprt *xprt;
  2423. struct sock_xprt *transport;
  2424. struct rpc_xprt *ret;
  2425. xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
  2426. xprt_udp_slot_table_entries);
  2427. if (IS_ERR(xprt))
  2428. return xprt;
  2429. transport = container_of(xprt, struct sock_xprt, xprt);
  2430. xprt->prot = IPPROTO_UDP;
  2431. xprt->tsh_size = 0;
  2432. /* XXX: header size can vary due to auth type, IPv6, etc. */
  2433. xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
  2434. xprt->bind_timeout = XS_BIND_TO;
  2435. xprt->reestablish_timeout = XS_UDP_REEST_TO;
  2436. xprt->idle_timeout = XS_IDLE_DISC_TO;
  2437. xprt->ops = &xs_udp_ops;
  2438. xprt->timeout = &xs_udp_default_timeout;
  2439. switch (addr->sa_family) {
  2440. case AF_INET:
  2441. if (((struct sockaddr_in *)addr)->sin_port != htons(0))
  2442. xprt_set_bound(xprt);
  2443. INIT_DELAYED_WORK(&transport->connect_worker,
  2444. xs_udp_setup_socket);
  2445. xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
  2446. break;
  2447. case AF_INET6:
  2448. if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
  2449. xprt_set_bound(xprt);
  2450. INIT_DELAYED_WORK(&transport->connect_worker,
  2451. xs_udp_setup_socket);
  2452. xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
  2453. break;
  2454. default:
  2455. ret = ERR_PTR(-EAFNOSUPPORT);
  2456. goto out_err;
  2457. }
  2458. if (xprt_bound(xprt))
  2459. dprintk("RPC: set up xprt to %s (port %s) via %s\n",
  2460. xprt->address_strings[RPC_DISPLAY_ADDR],
  2461. xprt->address_strings[RPC_DISPLAY_PORT],
  2462. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2463. else
  2464. dprintk("RPC: set up xprt to %s (autobind) via %s\n",
  2465. xprt->address_strings[RPC_DISPLAY_ADDR],
  2466. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2467. if (try_module_get(THIS_MODULE))
  2468. return xprt;
  2469. ret = ERR_PTR(-EINVAL);
  2470. out_err:
  2471. xs_xprt_free(xprt);
  2472. return ret;
  2473. }
  2474. static const struct rpc_timeout xs_tcp_default_timeout = {
  2475. .to_initval = 60 * HZ,
  2476. .to_maxval = 60 * HZ,
  2477. .to_retries = 2,
  2478. };
  2479. /**
  2480. * xs_setup_tcp - Set up transport to use a TCP socket
  2481. * @args: rpc transport creation arguments
  2482. *
  2483. */
  2484. static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
  2485. {
  2486. struct sockaddr *addr = args->dstaddr;
  2487. struct rpc_xprt *xprt;
  2488. struct sock_xprt *transport;
  2489. struct rpc_xprt *ret;
  2490. unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
  2491. if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
  2492. max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
  2493. xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
  2494. max_slot_table_size);
  2495. if (IS_ERR(xprt))
  2496. return xprt;
  2497. transport = container_of(xprt, struct sock_xprt, xprt);
  2498. xprt->prot = IPPROTO_TCP;
  2499. xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
  2500. xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
  2501. xprt->bind_timeout = XS_BIND_TO;
  2502. xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
  2503. xprt->idle_timeout = XS_IDLE_DISC_TO;
  2504. xprt->ops = &xs_tcp_ops;
  2505. xprt->timeout = &xs_tcp_default_timeout;
  2506. switch (addr->sa_family) {
  2507. case AF_INET:
  2508. if (((struct sockaddr_in *)addr)->sin_port != htons(0))
  2509. xprt_set_bound(xprt);
  2510. INIT_DELAYED_WORK(&transport->connect_worker,
  2511. xs_tcp_setup_socket);
  2512. xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
  2513. break;
  2514. case AF_INET6:
  2515. if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
  2516. xprt_set_bound(xprt);
  2517. INIT_DELAYED_WORK(&transport->connect_worker,
  2518. xs_tcp_setup_socket);
  2519. xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
  2520. break;
  2521. default:
  2522. ret = ERR_PTR(-EAFNOSUPPORT);
  2523. goto out_err;
  2524. }
  2525. if (xprt_bound(xprt))
  2526. dprintk("RPC: set up xprt to %s (port %s) via %s\n",
  2527. xprt->address_strings[RPC_DISPLAY_ADDR],
  2528. xprt->address_strings[RPC_DISPLAY_PORT],
  2529. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2530. else
  2531. dprintk("RPC: set up xprt to %s (autobind) via %s\n",
  2532. xprt->address_strings[RPC_DISPLAY_ADDR],
  2533. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2534. if (try_module_get(THIS_MODULE))
  2535. return xprt;
  2536. ret = ERR_PTR(-EINVAL);
  2537. out_err:
  2538. xs_xprt_free(xprt);
  2539. return ret;
  2540. }
  2541. /**
  2542. * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
  2543. * @args: rpc transport creation arguments
  2544. *
  2545. */
  2546. static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
  2547. {
  2548. struct sockaddr *addr = args->dstaddr;
  2549. struct rpc_xprt *xprt;
  2550. struct sock_xprt *transport;
  2551. struct svc_sock *bc_sock;
  2552. struct rpc_xprt *ret;
  2553. xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
  2554. xprt_tcp_slot_table_entries);
  2555. if (IS_ERR(xprt))
  2556. return xprt;
  2557. transport = container_of(xprt, struct sock_xprt, xprt);
  2558. xprt->prot = IPPROTO_TCP;
  2559. xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
  2560. xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
  2561. xprt->timeout = &xs_tcp_default_timeout;
  2562. /* backchannel */
  2563. xprt_set_bound(xprt);
  2564. xprt->bind_timeout = 0;
  2565. xprt->reestablish_timeout = 0;
  2566. xprt->idle_timeout = 0;
  2567. xprt->ops = &bc_tcp_ops;
  2568. switch (addr->sa_family) {
  2569. case AF_INET:
  2570. xs_format_peer_addresses(xprt, "tcp",
  2571. RPCBIND_NETID_TCP);
  2572. break;
  2573. case AF_INET6:
  2574. xs_format_peer_addresses(xprt, "tcp",
  2575. RPCBIND_NETID_TCP6);
  2576. break;
  2577. default:
  2578. ret = ERR_PTR(-EAFNOSUPPORT);
  2579. goto out_err;
  2580. }
  2581. dprintk("RPC: set up xprt to %s (port %s) via %s\n",
  2582. xprt->address_strings[RPC_DISPLAY_ADDR],
  2583. xprt->address_strings[RPC_DISPLAY_PORT],
  2584. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2585. /*
  2586. * Once we've associated a backchannel xprt with a connection,
  2587. * we want to keep it around as long as the connection lasts,
  2588. * in case we need to start using it for a backchannel again;
  2589. * this reference won't be dropped until bc_xprt is destroyed.
  2590. */
  2591. xprt_get(xprt);
  2592. args->bc_xprt->xpt_bc_xprt = xprt;
  2593. xprt->bc_xprt = args->bc_xprt;
  2594. bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
  2595. transport->sock = bc_sock->sk_sock;
  2596. transport->inet = bc_sock->sk_sk;
  2597. /*
  2598. * Since we don't want connections for the backchannel, we set
  2599. * the xprt status to connected
  2600. */
  2601. xprt_set_connected(xprt);
  2602. if (try_module_get(THIS_MODULE))
  2603. return xprt;
  2604. args->bc_xprt->xpt_bc_xprt = NULL;
  2605. xprt_put(xprt);
  2606. ret = ERR_PTR(-EINVAL);
  2607. out_err:
  2608. xs_xprt_free(xprt);
  2609. return ret;
  2610. }
  2611. static struct xprt_class xs_local_transport = {
  2612. .list = LIST_HEAD_INIT(xs_local_transport.list),
  2613. .name = "named UNIX socket",
  2614. .owner = THIS_MODULE,
  2615. .ident = XPRT_TRANSPORT_LOCAL,
  2616. .setup = xs_setup_local,
  2617. };
  2618. static struct xprt_class xs_udp_transport = {
  2619. .list = LIST_HEAD_INIT(xs_udp_transport.list),
  2620. .name = "udp",
  2621. .owner = THIS_MODULE,
  2622. .ident = XPRT_TRANSPORT_UDP,
  2623. .setup = xs_setup_udp,
  2624. };
  2625. static struct xprt_class xs_tcp_transport = {
  2626. .list = LIST_HEAD_INIT(xs_tcp_transport.list),
  2627. .name = "tcp",
  2628. .owner = THIS_MODULE,
  2629. .ident = XPRT_TRANSPORT_TCP,
  2630. .setup = xs_setup_tcp,
  2631. };
  2632. static struct xprt_class xs_bc_tcp_transport = {
  2633. .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
  2634. .name = "tcp NFSv4.1 backchannel",
  2635. .owner = THIS_MODULE,
  2636. .ident = XPRT_TRANSPORT_BC_TCP,
  2637. .setup = xs_setup_bc_tcp,
  2638. };
  2639. /**
  2640. * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
  2641. *
  2642. */
  2643. int init_socket_xprt(void)
  2644. {
  2645. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  2646. if (!sunrpc_table_header)
  2647. sunrpc_table_header = register_sysctl_table(sunrpc_table);
  2648. #endif
  2649. xprt_register_transport(&xs_local_transport);
  2650. xprt_register_transport(&xs_udp_transport);
  2651. xprt_register_transport(&xs_tcp_transport);
  2652. xprt_register_transport(&xs_bc_tcp_transport);
  2653. return 0;
  2654. }
  2655. /**
  2656. * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
  2657. *
  2658. */
  2659. void cleanup_socket_xprt(void)
  2660. {
  2661. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  2662. if (sunrpc_table_header) {
  2663. unregister_sysctl_table(sunrpc_table_header);
  2664. sunrpc_table_header = NULL;
  2665. }
  2666. #endif
  2667. xprt_unregister_transport(&xs_local_transport);
  2668. xprt_unregister_transport(&xs_udp_transport);
  2669. xprt_unregister_transport(&xs_tcp_transport);
  2670. xprt_unregister_transport(&xs_bc_tcp_transport);
  2671. }
  2672. static int param_set_uint_minmax(const char *val,
  2673. const struct kernel_param *kp,
  2674. unsigned int min, unsigned int max)
  2675. {
  2676. unsigned int num;
  2677. int ret;
  2678. if (!val)
  2679. return -EINVAL;
  2680. ret = kstrtouint(val, 0, &num);
  2681. if (ret == -EINVAL || num < min || num > max)
  2682. return -EINVAL;
  2683. *((unsigned int *)kp->arg) = num;
  2684. return 0;
  2685. }
  2686. static int param_set_portnr(const char *val, const struct kernel_param *kp)
  2687. {
  2688. return param_set_uint_minmax(val, kp,
  2689. RPC_MIN_RESVPORT,
  2690. RPC_MAX_RESVPORT);
  2691. }
  2692. static const struct kernel_param_ops param_ops_portnr = {
  2693. .set = param_set_portnr,
  2694. .get = param_get_uint,
  2695. };
  2696. #define param_check_portnr(name, p) \
  2697. __param_check(name, p, unsigned int);
  2698. module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
  2699. module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
  2700. static int param_set_slot_table_size(const char *val,
  2701. const struct kernel_param *kp)
  2702. {
  2703. return param_set_uint_minmax(val, kp,
  2704. RPC_MIN_SLOT_TABLE,
  2705. RPC_MAX_SLOT_TABLE);
  2706. }
  2707. static const struct kernel_param_ops param_ops_slot_table_size = {
  2708. .set = param_set_slot_table_size,
  2709. .get = param_get_uint,
  2710. };
  2711. #define param_check_slot_table_size(name, p) \
  2712. __param_check(name, p, unsigned int);
  2713. static int param_set_max_slot_table_size(const char *val,
  2714. const struct kernel_param *kp)
  2715. {
  2716. return param_set_uint_minmax(val, kp,
  2717. RPC_MIN_SLOT_TABLE,
  2718. RPC_MAX_SLOT_TABLE_LIMIT);
  2719. }
  2720. static const struct kernel_param_ops param_ops_max_slot_table_size = {
  2721. .set = param_set_max_slot_table_size,
  2722. .get = param_get_uint,
  2723. };
  2724. #define param_check_max_slot_table_size(name, p) \
  2725. __param_check(name, p, unsigned int);
  2726. module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
  2727. slot_table_size, 0644);
  2728. module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
  2729. max_slot_table_size, 0644);
  2730. module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
  2731. slot_table_size, 0644);