sockmap.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565
  1. /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. */
  12. /* A BPF sock_map is used to store sock objects. This is primarly used
  13. * for doing socket redirect with BPF helper routines.
  14. *
  15. * A sock map may have BPF programs attached to it, currently a program
  16. * used to parse packets and a program to provide a verdict and redirect
  17. * decision on the packet are supported. Any programs attached to a sock
  18. * map are inherited by sock objects when they are added to the map. If
  19. * no BPF programs are attached the sock object may only be used for sock
  20. * redirect.
  21. *
  22. * A sock object may be in multiple maps, but can only inherit a single
  23. * parse or verdict program. If adding a sock object to a map would result
  24. * in having multiple parsing programs the update will return an EBUSY error.
  25. *
  26. * For reference this program is similar to devmap used in XDP context
  27. * reviewing these together may be useful. For an example please review
  28. * ./samples/bpf/sockmap/.
  29. */
  30. #include <linux/bpf.h>
  31. #include <net/sock.h>
  32. #include <linux/filter.h>
  33. #include <linux/errno.h>
  34. #include <linux/file.h>
  35. #include <linux/kernel.h>
  36. #include <linux/net.h>
  37. #include <linux/skbuff.h>
  38. #include <linux/workqueue.h>
  39. #include <linux/list.h>
  40. #include <linux/mm.h>
  41. #include <net/strparser.h>
  42. #include <net/tcp.h>
  43. #include <linux/ptr_ring.h>
  44. #include <net/inet_common.h>
  45. #include <linux/sched/signal.h>
  46. #define SOCK_CREATE_FLAG_MASK \
  47. (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
  48. struct bpf_sock_progs {
  49. struct bpf_prog *bpf_tx_msg;
  50. struct bpf_prog *bpf_parse;
  51. struct bpf_prog *bpf_verdict;
  52. };
  53. struct bpf_stab {
  54. struct bpf_map map;
  55. struct sock **sock_map;
  56. struct bpf_sock_progs progs;
  57. raw_spinlock_t lock;
  58. };
  59. struct bucket {
  60. struct hlist_head head;
  61. raw_spinlock_t lock;
  62. };
  63. struct bpf_htab {
  64. struct bpf_map map;
  65. struct bucket *buckets;
  66. atomic_t count;
  67. u32 n_buckets;
  68. u32 elem_size;
  69. struct bpf_sock_progs progs;
  70. struct rcu_head rcu;
  71. };
  72. struct htab_elem {
  73. struct rcu_head rcu;
  74. struct hlist_node hash_node;
  75. u32 hash;
  76. struct sock *sk;
  77. char key[0];
  78. };
  79. enum smap_psock_state {
  80. SMAP_TX_RUNNING,
  81. };
  82. struct smap_psock_map_entry {
  83. struct list_head list;
  84. struct bpf_map *map;
  85. struct sock **entry;
  86. struct htab_elem __rcu *hash_link;
  87. };
  88. struct smap_psock {
  89. struct rcu_head rcu;
  90. refcount_t refcnt;
  91. /* datapath variables */
  92. struct sk_buff_head rxqueue;
  93. bool strp_enabled;
  94. /* datapath error path cache across tx work invocations */
  95. int save_rem;
  96. int save_off;
  97. struct sk_buff *save_skb;
  98. /* datapath variables for tx_msg ULP */
  99. struct sock *sk_redir;
  100. int apply_bytes;
  101. int cork_bytes;
  102. int sg_size;
  103. int eval;
  104. struct sk_msg_buff *cork;
  105. struct list_head ingress;
  106. struct strparser strp;
  107. struct bpf_prog *bpf_tx_msg;
  108. struct bpf_prog *bpf_parse;
  109. struct bpf_prog *bpf_verdict;
  110. struct list_head maps;
  111. spinlock_t maps_lock;
  112. /* Back reference used when sock callback trigger sockmap operations */
  113. struct sock *sock;
  114. unsigned long state;
  115. struct work_struct tx_work;
  116. struct work_struct gc_work;
  117. struct proto *sk_proto;
  118. void (*save_close)(struct sock *sk, long timeout);
  119. void (*save_data_ready)(struct sock *sk);
  120. void (*save_write_space)(struct sock *sk);
  121. };
  122. static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
  123. static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  124. int nonblock, int flags, int *addr_len);
  125. static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
  126. static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
  127. int offset, size_t size, int flags);
  128. static void bpf_tcp_close(struct sock *sk, long timeout);
  129. static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
  130. {
  131. return rcu_dereference_sk_user_data(sk);
  132. }
  133. static bool bpf_tcp_stream_read(const struct sock *sk)
  134. {
  135. struct smap_psock *psock;
  136. bool empty = true;
  137. rcu_read_lock();
  138. psock = smap_psock_sk(sk);
  139. if (unlikely(!psock))
  140. goto out;
  141. empty = list_empty(&psock->ingress);
  142. out:
  143. rcu_read_unlock();
  144. return !empty;
  145. }
  146. enum {
  147. SOCKMAP_IPV4,
  148. SOCKMAP_IPV6,
  149. SOCKMAP_NUM_PROTS,
  150. };
  151. enum {
  152. SOCKMAP_BASE,
  153. SOCKMAP_TX,
  154. SOCKMAP_NUM_CONFIGS,
  155. };
  156. static struct proto *saved_tcpv6_prot __read_mostly;
  157. static DEFINE_SPINLOCK(tcpv6_prot_lock);
  158. static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
  159. static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
  160. struct proto *base)
  161. {
  162. prot[SOCKMAP_BASE] = *base;
  163. prot[SOCKMAP_BASE].close = bpf_tcp_close;
  164. prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
  165. prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
  166. prot[SOCKMAP_TX] = prot[SOCKMAP_BASE];
  167. prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg;
  168. prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage;
  169. }
  170. static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
  171. {
  172. int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
  173. int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
  174. sk->sk_prot = &bpf_tcp_prots[family][conf];
  175. }
  176. static int bpf_tcp_init(struct sock *sk)
  177. {
  178. struct smap_psock *psock;
  179. rcu_read_lock();
  180. psock = smap_psock_sk(sk);
  181. if (unlikely(!psock)) {
  182. rcu_read_unlock();
  183. return -EINVAL;
  184. }
  185. if (unlikely(psock->sk_proto)) {
  186. rcu_read_unlock();
  187. return -EBUSY;
  188. }
  189. psock->save_close = sk->sk_prot->close;
  190. psock->sk_proto = sk->sk_prot;
  191. /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
  192. if (sk->sk_family == AF_INET6 &&
  193. unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
  194. spin_lock_bh(&tcpv6_prot_lock);
  195. if (likely(sk->sk_prot != saved_tcpv6_prot)) {
  196. build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
  197. smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
  198. }
  199. spin_unlock_bh(&tcpv6_prot_lock);
  200. }
  201. update_sk_prot(sk, psock);
  202. rcu_read_unlock();
  203. return 0;
  204. }
  205. static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
  206. static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
  207. static void bpf_tcp_release(struct sock *sk)
  208. {
  209. struct smap_psock *psock;
  210. rcu_read_lock();
  211. psock = smap_psock_sk(sk);
  212. if (unlikely(!psock))
  213. goto out;
  214. if (psock->cork) {
  215. free_start_sg(psock->sock, psock->cork);
  216. kfree(psock->cork);
  217. psock->cork = NULL;
  218. }
  219. if (psock->sk_proto) {
  220. sk->sk_prot = psock->sk_proto;
  221. psock->sk_proto = NULL;
  222. }
  223. out:
  224. rcu_read_unlock();
  225. }
  226. static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
  227. u32 hash, void *key, u32 key_size)
  228. {
  229. struct htab_elem *l;
  230. hlist_for_each_entry_rcu(l, head, hash_node) {
  231. if (l->hash == hash && !memcmp(&l->key, key, key_size))
  232. return l;
  233. }
  234. return NULL;
  235. }
  236. static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
  237. {
  238. return &htab->buckets[hash & (htab->n_buckets - 1)];
  239. }
  240. static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
  241. {
  242. return &__select_bucket(htab, hash)->head;
  243. }
  244. static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
  245. {
  246. atomic_dec(&htab->count);
  247. kfree_rcu(l, rcu);
  248. }
  249. static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
  250. struct smap_psock *psock)
  251. {
  252. struct smap_psock_map_entry *e;
  253. spin_lock_bh(&psock->maps_lock);
  254. e = list_first_entry_or_null(&psock->maps,
  255. struct smap_psock_map_entry,
  256. list);
  257. if (e)
  258. list_del(&e->list);
  259. spin_unlock_bh(&psock->maps_lock);
  260. return e;
  261. }
  262. static void bpf_tcp_close(struct sock *sk, long timeout)
  263. {
  264. void (*close_fun)(struct sock *sk, long timeout);
  265. struct smap_psock_map_entry *e;
  266. struct sk_msg_buff *md, *mtmp;
  267. struct smap_psock *psock;
  268. struct sock *osk;
  269. lock_sock(sk);
  270. rcu_read_lock();
  271. psock = smap_psock_sk(sk);
  272. if (unlikely(!psock)) {
  273. rcu_read_unlock();
  274. release_sock(sk);
  275. return sk->sk_prot->close(sk, timeout);
  276. }
  277. /* The psock may be destroyed anytime after exiting the RCU critial
  278. * section so by the time we use close_fun the psock may no longer
  279. * be valid. However, bpf_tcp_close is called with the sock lock
  280. * held so the close hook and sk are still valid.
  281. */
  282. close_fun = psock->save_close;
  283. if (psock->cork) {
  284. free_start_sg(psock->sock, psock->cork);
  285. kfree(psock->cork);
  286. psock->cork = NULL;
  287. }
  288. list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
  289. list_del(&md->list);
  290. free_start_sg(psock->sock, md);
  291. kfree(md);
  292. }
  293. e = psock_map_pop(sk, psock);
  294. while (e) {
  295. if (e->entry) {
  296. struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map);
  297. raw_spin_lock_bh(&stab->lock);
  298. osk = *e->entry;
  299. if (osk == sk) {
  300. *e->entry = NULL;
  301. smap_release_sock(psock, sk);
  302. }
  303. raw_spin_unlock_bh(&stab->lock);
  304. } else {
  305. struct htab_elem *link = rcu_dereference(e->hash_link);
  306. struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map);
  307. struct hlist_head *head;
  308. struct htab_elem *l;
  309. struct bucket *b;
  310. b = __select_bucket(htab, link->hash);
  311. head = &b->head;
  312. raw_spin_lock_bh(&b->lock);
  313. l = lookup_elem_raw(head,
  314. link->hash, link->key,
  315. htab->map.key_size);
  316. /* If another thread deleted this object skip deletion.
  317. * The refcnt on psock may or may not be zero.
  318. */
  319. if (l) {
  320. hlist_del_rcu(&link->hash_node);
  321. smap_release_sock(psock, link->sk);
  322. free_htab_elem(htab, link);
  323. }
  324. raw_spin_unlock_bh(&b->lock);
  325. }
  326. kfree(e);
  327. e = psock_map_pop(sk, psock);
  328. }
  329. rcu_read_unlock();
  330. release_sock(sk);
  331. close_fun(sk, timeout);
  332. }
  333. enum __sk_action {
  334. __SK_DROP = 0,
  335. __SK_PASS,
  336. __SK_REDIRECT,
  337. __SK_NONE,
  338. };
  339. static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
  340. .name = "bpf_tcp",
  341. .uid = TCP_ULP_BPF,
  342. .user_visible = false,
  343. .owner = NULL,
  344. .init = bpf_tcp_init,
  345. .release = bpf_tcp_release,
  346. };
  347. static int memcopy_from_iter(struct sock *sk,
  348. struct sk_msg_buff *md,
  349. struct iov_iter *from, int bytes)
  350. {
  351. struct scatterlist *sg = md->sg_data;
  352. int i = md->sg_curr, rc = -ENOSPC;
  353. do {
  354. int copy;
  355. char *to;
  356. if (md->sg_copybreak >= sg[i].length) {
  357. md->sg_copybreak = 0;
  358. if (++i == MAX_SKB_FRAGS)
  359. i = 0;
  360. if (i == md->sg_end)
  361. break;
  362. }
  363. copy = sg[i].length - md->sg_copybreak;
  364. to = sg_virt(&sg[i]) + md->sg_copybreak;
  365. md->sg_copybreak += copy;
  366. if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
  367. rc = copy_from_iter_nocache(to, copy, from);
  368. else
  369. rc = copy_from_iter(to, copy, from);
  370. if (rc != copy) {
  371. rc = -EFAULT;
  372. goto out;
  373. }
  374. bytes -= copy;
  375. if (!bytes)
  376. break;
  377. md->sg_copybreak = 0;
  378. if (++i == MAX_SKB_FRAGS)
  379. i = 0;
  380. } while (i != md->sg_end);
  381. out:
  382. md->sg_curr = i;
  383. return rc;
  384. }
  385. static int bpf_tcp_push(struct sock *sk, int apply_bytes,
  386. struct sk_msg_buff *md,
  387. int flags, bool uncharge)
  388. {
  389. bool apply = apply_bytes;
  390. struct scatterlist *sg;
  391. int offset, ret = 0;
  392. struct page *p;
  393. size_t size;
  394. while (1) {
  395. sg = md->sg_data + md->sg_start;
  396. size = (apply && apply_bytes < sg->length) ?
  397. apply_bytes : sg->length;
  398. offset = sg->offset;
  399. tcp_rate_check_app_limited(sk);
  400. p = sg_page(sg);
  401. retry:
  402. ret = do_tcp_sendpages(sk, p, offset, size, flags);
  403. if (ret != size) {
  404. if (ret > 0) {
  405. if (apply)
  406. apply_bytes -= ret;
  407. sg->offset += ret;
  408. sg->length -= ret;
  409. size -= ret;
  410. offset += ret;
  411. if (uncharge)
  412. sk_mem_uncharge(sk, ret);
  413. goto retry;
  414. }
  415. return ret;
  416. }
  417. if (apply)
  418. apply_bytes -= ret;
  419. sg->offset += ret;
  420. sg->length -= ret;
  421. if (uncharge)
  422. sk_mem_uncharge(sk, ret);
  423. if (!sg->length) {
  424. put_page(p);
  425. md->sg_start++;
  426. if (md->sg_start == MAX_SKB_FRAGS)
  427. md->sg_start = 0;
  428. sg_init_table(sg, 1);
  429. if (md->sg_start == md->sg_end)
  430. break;
  431. }
  432. if (apply && !apply_bytes)
  433. break;
  434. }
  435. return 0;
  436. }
  437. static inline void bpf_compute_data_pointers_sg(struct sk_msg_buff *md)
  438. {
  439. struct scatterlist *sg = md->sg_data + md->sg_start;
  440. if (md->sg_copy[md->sg_start]) {
  441. md->data = md->data_end = 0;
  442. } else {
  443. md->data = sg_virt(sg);
  444. md->data_end = md->data + sg->length;
  445. }
  446. }
  447. static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
  448. {
  449. struct scatterlist *sg = md->sg_data;
  450. int i = md->sg_start;
  451. do {
  452. int uncharge = (bytes < sg[i].length) ? bytes : sg[i].length;
  453. sk_mem_uncharge(sk, uncharge);
  454. bytes -= uncharge;
  455. if (!bytes)
  456. break;
  457. i++;
  458. if (i == MAX_SKB_FRAGS)
  459. i = 0;
  460. } while (i != md->sg_end);
  461. }
  462. static void free_bytes_sg(struct sock *sk, int bytes,
  463. struct sk_msg_buff *md, bool charge)
  464. {
  465. struct scatterlist *sg = md->sg_data;
  466. int i = md->sg_start, free;
  467. while (bytes && sg[i].length) {
  468. free = sg[i].length;
  469. if (bytes < free) {
  470. sg[i].length -= bytes;
  471. sg[i].offset += bytes;
  472. if (charge)
  473. sk_mem_uncharge(sk, bytes);
  474. break;
  475. }
  476. if (charge)
  477. sk_mem_uncharge(sk, sg[i].length);
  478. put_page(sg_page(&sg[i]));
  479. bytes -= sg[i].length;
  480. sg[i].length = 0;
  481. sg[i].page_link = 0;
  482. sg[i].offset = 0;
  483. i++;
  484. if (i == MAX_SKB_FRAGS)
  485. i = 0;
  486. }
  487. md->sg_start = i;
  488. }
  489. static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
  490. {
  491. struct scatterlist *sg = md->sg_data;
  492. int i = start, free = 0;
  493. while (sg[i].length) {
  494. free += sg[i].length;
  495. sk_mem_uncharge(sk, sg[i].length);
  496. if (!md->skb)
  497. put_page(sg_page(&sg[i]));
  498. sg[i].length = 0;
  499. sg[i].page_link = 0;
  500. sg[i].offset = 0;
  501. i++;
  502. if (i == MAX_SKB_FRAGS)
  503. i = 0;
  504. }
  505. if (md->skb)
  506. consume_skb(md->skb);
  507. return free;
  508. }
  509. static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
  510. {
  511. int free = free_sg(sk, md->sg_start, md);
  512. md->sg_start = md->sg_end;
  513. return free;
  514. }
  515. static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
  516. {
  517. return free_sg(sk, md->sg_curr, md);
  518. }
  519. static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
  520. {
  521. return ((_rc == SK_PASS) ?
  522. (md->sk_redir ? __SK_REDIRECT : __SK_PASS) :
  523. __SK_DROP);
  524. }
  525. static unsigned int smap_do_tx_msg(struct sock *sk,
  526. struct smap_psock *psock,
  527. struct sk_msg_buff *md)
  528. {
  529. struct bpf_prog *prog;
  530. unsigned int rc, _rc;
  531. preempt_disable();
  532. rcu_read_lock();
  533. /* If the policy was removed mid-send then default to 'accept' */
  534. prog = READ_ONCE(psock->bpf_tx_msg);
  535. if (unlikely(!prog)) {
  536. _rc = SK_PASS;
  537. goto verdict;
  538. }
  539. bpf_compute_data_pointers_sg(md);
  540. md->sk = sk;
  541. rc = (*prog->bpf_func)(md, prog->insnsi);
  542. psock->apply_bytes = md->apply_bytes;
  543. /* Moving return codes from UAPI namespace into internal namespace */
  544. _rc = bpf_map_msg_verdict(rc, md);
  545. /* The psock has a refcount on the sock but not on the map and because
  546. * we need to drop rcu read lock here its possible the map could be
  547. * removed between here and when we need it to execute the sock
  548. * redirect. So do the map lookup now for future use.
  549. */
  550. if (_rc == __SK_REDIRECT) {
  551. if (psock->sk_redir)
  552. sock_put(psock->sk_redir);
  553. psock->sk_redir = do_msg_redirect_map(md);
  554. if (!psock->sk_redir) {
  555. _rc = __SK_DROP;
  556. goto verdict;
  557. }
  558. sock_hold(psock->sk_redir);
  559. }
  560. verdict:
  561. rcu_read_unlock();
  562. preempt_enable();
  563. return _rc;
  564. }
  565. static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
  566. struct smap_psock *psock,
  567. struct sk_msg_buff *md, int flags)
  568. {
  569. bool apply = apply_bytes;
  570. size_t size, copied = 0;
  571. struct sk_msg_buff *r;
  572. int err = 0, i;
  573. r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_KERNEL);
  574. if (unlikely(!r))
  575. return -ENOMEM;
  576. lock_sock(sk);
  577. r->sg_start = md->sg_start;
  578. i = md->sg_start;
  579. do {
  580. size = (apply && apply_bytes < md->sg_data[i].length) ?
  581. apply_bytes : md->sg_data[i].length;
  582. if (!sk_wmem_schedule(sk, size)) {
  583. if (!copied)
  584. err = -ENOMEM;
  585. break;
  586. }
  587. sk_mem_charge(sk, size);
  588. r->sg_data[i] = md->sg_data[i];
  589. r->sg_data[i].length = size;
  590. md->sg_data[i].length -= size;
  591. md->sg_data[i].offset += size;
  592. copied += size;
  593. if (md->sg_data[i].length) {
  594. get_page(sg_page(&r->sg_data[i]));
  595. r->sg_end = (i + 1) == MAX_SKB_FRAGS ? 0 : i + 1;
  596. } else {
  597. i++;
  598. if (i == MAX_SKB_FRAGS)
  599. i = 0;
  600. r->sg_end = i;
  601. }
  602. if (apply) {
  603. apply_bytes -= size;
  604. if (!apply_bytes)
  605. break;
  606. }
  607. } while (i != md->sg_end);
  608. md->sg_start = i;
  609. if (!err) {
  610. list_add_tail(&r->list, &psock->ingress);
  611. sk->sk_data_ready(sk);
  612. } else {
  613. free_start_sg(sk, r);
  614. kfree(r);
  615. }
  616. release_sock(sk);
  617. return err;
  618. }
  619. static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
  620. struct sk_msg_buff *md,
  621. int flags)
  622. {
  623. bool ingress = !!(md->flags & BPF_F_INGRESS);
  624. struct smap_psock *psock;
  625. int err = 0;
  626. rcu_read_lock();
  627. psock = smap_psock_sk(sk);
  628. if (unlikely(!psock))
  629. goto out_rcu;
  630. if (!refcount_inc_not_zero(&psock->refcnt))
  631. goto out_rcu;
  632. rcu_read_unlock();
  633. if (ingress) {
  634. err = bpf_tcp_ingress(sk, send, psock, md, flags);
  635. } else {
  636. lock_sock(sk);
  637. err = bpf_tcp_push(sk, send, md, flags, false);
  638. release_sock(sk);
  639. }
  640. smap_release_sock(psock, sk);
  641. if (unlikely(err))
  642. goto out;
  643. return 0;
  644. out_rcu:
  645. rcu_read_unlock();
  646. out:
  647. free_bytes_sg(NULL, send, md, false);
  648. return err;
  649. }
  650. static inline void bpf_md_init(struct smap_psock *psock)
  651. {
  652. if (!psock->apply_bytes) {
  653. psock->eval = __SK_NONE;
  654. if (psock->sk_redir) {
  655. sock_put(psock->sk_redir);
  656. psock->sk_redir = NULL;
  657. }
  658. }
  659. }
  660. static void apply_bytes_dec(struct smap_psock *psock, int i)
  661. {
  662. if (psock->apply_bytes) {
  663. if (psock->apply_bytes < i)
  664. psock->apply_bytes = 0;
  665. else
  666. psock->apply_bytes -= i;
  667. }
  668. }
  669. static int bpf_exec_tx_verdict(struct smap_psock *psock,
  670. struct sk_msg_buff *m,
  671. struct sock *sk,
  672. int *copied, int flags)
  673. {
  674. bool cork = false, enospc = (m->sg_start == m->sg_end);
  675. struct sock *redir;
  676. int err = 0;
  677. int send;
  678. more_data:
  679. if (psock->eval == __SK_NONE)
  680. psock->eval = smap_do_tx_msg(sk, psock, m);
  681. if (m->cork_bytes &&
  682. m->cork_bytes > psock->sg_size && !enospc) {
  683. psock->cork_bytes = m->cork_bytes - psock->sg_size;
  684. if (!psock->cork) {
  685. psock->cork = kcalloc(1,
  686. sizeof(struct sk_msg_buff),
  687. GFP_ATOMIC | __GFP_NOWARN);
  688. if (!psock->cork) {
  689. err = -ENOMEM;
  690. goto out_err;
  691. }
  692. }
  693. memcpy(psock->cork, m, sizeof(*m));
  694. goto out_err;
  695. }
  696. send = psock->sg_size;
  697. if (psock->apply_bytes && psock->apply_bytes < send)
  698. send = psock->apply_bytes;
  699. switch (psock->eval) {
  700. case __SK_PASS:
  701. err = bpf_tcp_push(sk, send, m, flags, true);
  702. if (unlikely(err)) {
  703. *copied -= free_start_sg(sk, m);
  704. break;
  705. }
  706. apply_bytes_dec(psock, send);
  707. psock->sg_size -= send;
  708. break;
  709. case __SK_REDIRECT:
  710. redir = psock->sk_redir;
  711. apply_bytes_dec(psock, send);
  712. if (psock->cork) {
  713. cork = true;
  714. psock->cork = NULL;
  715. }
  716. return_mem_sg(sk, send, m);
  717. release_sock(sk);
  718. err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
  719. lock_sock(sk);
  720. if (unlikely(err < 0)) {
  721. free_start_sg(sk, m);
  722. psock->sg_size = 0;
  723. if (!cork)
  724. *copied -= send;
  725. } else {
  726. psock->sg_size -= send;
  727. }
  728. if (cork) {
  729. free_start_sg(sk, m);
  730. psock->sg_size = 0;
  731. kfree(m);
  732. m = NULL;
  733. err = 0;
  734. }
  735. break;
  736. case __SK_DROP:
  737. default:
  738. free_bytes_sg(sk, send, m, true);
  739. apply_bytes_dec(psock, send);
  740. *copied -= send;
  741. psock->sg_size -= send;
  742. err = -EACCES;
  743. break;
  744. }
  745. if (likely(!err)) {
  746. bpf_md_init(psock);
  747. if (m &&
  748. m->sg_data[m->sg_start].page_link &&
  749. m->sg_data[m->sg_start].length)
  750. goto more_data;
  751. }
  752. out_err:
  753. return err;
  754. }
  755. static int bpf_wait_data(struct sock *sk,
  756. struct smap_psock *psk, int flags,
  757. long timeo, int *err)
  758. {
  759. int rc;
  760. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  761. add_wait_queue(sk_sleep(sk), &wait);
  762. sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  763. rc = sk_wait_event(sk, &timeo,
  764. !list_empty(&psk->ingress) ||
  765. !skb_queue_empty(&sk->sk_receive_queue),
  766. &wait);
  767. sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  768. remove_wait_queue(sk_sleep(sk), &wait);
  769. return rc;
  770. }
  771. static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
  772. int nonblock, int flags, int *addr_len)
  773. {
  774. struct iov_iter *iter = &msg->msg_iter;
  775. struct smap_psock *psock;
  776. int copied = 0;
  777. if (unlikely(flags & MSG_ERRQUEUE))
  778. return inet_recv_error(sk, msg, len, addr_len);
  779. rcu_read_lock();
  780. psock = smap_psock_sk(sk);
  781. if (unlikely(!psock))
  782. goto out;
  783. if (unlikely(!refcount_inc_not_zero(&psock->refcnt)))
  784. goto out;
  785. rcu_read_unlock();
  786. if (!skb_queue_empty(&sk->sk_receive_queue))
  787. return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
  788. lock_sock(sk);
  789. bytes_ready:
  790. while (copied != len) {
  791. struct scatterlist *sg;
  792. struct sk_msg_buff *md;
  793. int i;
  794. md = list_first_entry_or_null(&psock->ingress,
  795. struct sk_msg_buff, list);
  796. if (unlikely(!md))
  797. break;
  798. i = md->sg_start;
  799. do {
  800. struct page *page;
  801. int n, copy;
  802. sg = &md->sg_data[i];
  803. copy = sg->length;
  804. page = sg_page(sg);
  805. if (copied + copy > len)
  806. copy = len - copied;
  807. n = copy_page_to_iter(page, sg->offset, copy, iter);
  808. if (n != copy) {
  809. md->sg_start = i;
  810. release_sock(sk);
  811. smap_release_sock(psock, sk);
  812. return -EFAULT;
  813. }
  814. copied += copy;
  815. sg->offset += copy;
  816. sg->length -= copy;
  817. sk_mem_uncharge(sk, copy);
  818. if (!sg->length) {
  819. i++;
  820. if (i == MAX_SKB_FRAGS)
  821. i = 0;
  822. if (!md->skb)
  823. put_page(page);
  824. }
  825. if (copied == len)
  826. break;
  827. } while (i != md->sg_end);
  828. md->sg_start = i;
  829. if (!sg->length && md->sg_start == md->sg_end) {
  830. list_del(&md->list);
  831. if (md->skb)
  832. consume_skb(md->skb);
  833. kfree(md);
  834. }
  835. }
  836. if (!copied) {
  837. long timeo;
  838. int data;
  839. int err = 0;
  840. timeo = sock_rcvtimeo(sk, nonblock);
  841. data = bpf_wait_data(sk, psock, flags, timeo, &err);
  842. if (data) {
  843. if (!skb_queue_empty(&sk->sk_receive_queue)) {
  844. release_sock(sk);
  845. smap_release_sock(psock, sk);
  846. copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
  847. return copied;
  848. }
  849. goto bytes_ready;
  850. }
  851. if (err)
  852. copied = err;
  853. }
  854. release_sock(sk);
  855. smap_release_sock(psock, sk);
  856. return copied;
  857. out:
  858. rcu_read_unlock();
  859. return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
  860. }
  861. static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
  862. {
  863. int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
  864. struct sk_msg_buff md = {0};
  865. unsigned int sg_copy = 0;
  866. struct smap_psock *psock;
  867. int copied = 0, err = 0;
  868. struct scatterlist *sg;
  869. long timeo;
  870. /* Its possible a sock event or user removed the psock _but_ the ops
  871. * have not been reprogrammed yet so we get here. In this case fallback
  872. * to tcp_sendmsg. Note this only works because we _only_ ever allow
  873. * a single ULP there is no hierarchy here.
  874. */
  875. rcu_read_lock();
  876. psock = smap_psock_sk(sk);
  877. if (unlikely(!psock)) {
  878. rcu_read_unlock();
  879. return tcp_sendmsg(sk, msg, size);
  880. }
  881. /* Increment the psock refcnt to ensure its not released while sending a
  882. * message. Required because sk lookup and bpf programs are used in
  883. * separate rcu critical sections. Its OK if we lose the map entry
  884. * but we can't lose the sock reference.
  885. */
  886. if (!refcount_inc_not_zero(&psock->refcnt)) {
  887. rcu_read_unlock();
  888. return tcp_sendmsg(sk, msg, size);
  889. }
  890. sg = md.sg_data;
  891. sg_init_marker(sg, MAX_SKB_FRAGS);
  892. rcu_read_unlock();
  893. lock_sock(sk);
  894. timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  895. while (msg_data_left(msg)) {
  896. struct sk_msg_buff *m = NULL;
  897. bool enospc = false;
  898. int copy;
  899. if (sk->sk_err) {
  900. err = -sk->sk_err;
  901. goto out_err;
  902. }
  903. copy = msg_data_left(msg);
  904. if (!sk_stream_memory_free(sk))
  905. goto wait_for_sndbuf;
  906. m = psock->cork_bytes ? psock->cork : &md;
  907. m->sg_curr = m->sg_copybreak ? m->sg_curr : m->sg_end;
  908. err = sk_alloc_sg(sk, copy, m->sg_data,
  909. m->sg_start, &m->sg_end, &sg_copy,
  910. m->sg_end - 1);
  911. if (err) {
  912. if (err != -ENOSPC)
  913. goto wait_for_memory;
  914. enospc = true;
  915. copy = sg_copy;
  916. }
  917. err = memcopy_from_iter(sk, m, &msg->msg_iter, copy);
  918. if (err < 0) {
  919. free_curr_sg(sk, m);
  920. goto out_err;
  921. }
  922. psock->sg_size += copy;
  923. copied += copy;
  924. sg_copy = 0;
  925. /* When bytes are being corked skip running BPF program and
  926. * applying verdict unless there is no more buffer space. In
  927. * the ENOSPC case simply run BPF prorgram with currently
  928. * accumulated data. We don't have much choice at this point
  929. * we could try extending the page frags or chaining complex
  930. * frags but even in these cases _eventually_ we will hit an
  931. * OOM scenario. More complex recovery schemes may be
  932. * implemented in the future, but BPF programs must handle
  933. * the case where apply_cork requests are not honored. The
  934. * canonical method to verify this is to check data length.
  935. */
  936. if (psock->cork_bytes) {
  937. if (copy > psock->cork_bytes)
  938. psock->cork_bytes = 0;
  939. else
  940. psock->cork_bytes -= copy;
  941. if (psock->cork_bytes && !enospc)
  942. goto out_cork;
  943. /* All cork bytes accounted for re-run filter */
  944. psock->eval = __SK_NONE;
  945. psock->cork_bytes = 0;
  946. }
  947. err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
  948. if (unlikely(err < 0))
  949. goto out_err;
  950. continue;
  951. wait_for_sndbuf:
  952. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  953. wait_for_memory:
  954. err = sk_stream_wait_memory(sk, &timeo);
  955. if (err) {
  956. if (m && m != psock->cork)
  957. free_start_sg(sk, m);
  958. goto out_err;
  959. }
  960. }
  961. out_err:
  962. if (err < 0)
  963. err = sk_stream_error(sk, msg->msg_flags, err);
  964. out_cork:
  965. release_sock(sk);
  966. smap_release_sock(psock, sk);
  967. return copied ? copied : err;
  968. }
  969. static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
  970. int offset, size_t size, int flags)
  971. {
  972. struct sk_msg_buff md = {0}, *m = NULL;
  973. int err = 0, copied = 0;
  974. struct smap_psock *psock;
  975. struct scatterlist *sg;
  976. bool enospc = false;
  977. rcu_read_lock();
  978. psock = smap_psock_sk(sk);
  979. if (unlikely(!psock))
  980. goto accept;
  981. if (!refcount_inc_not_zero(&psock->refcnt))
  982. goto accept;
  983. rcu_read_unlock();
  984. lock_sock(sk);
  985. if (psock->cork_bytes) {
  986. m = psock->cork;
  987. sg = &m->sg_data[m->sg_end];
  988. } else {
  989. m = &md;
  990. sg = m->sg_data;
  991. sg_init_marker(sg, MAX_SKB_FRAGS);
  992. }
  993. /* Catch case where ring is full and sendpage is stalled. */
  994. if (unlikely(m->sg_end == m->sg_start &&
  995. m->sg_data[m->sg_end].length))
  996. goto out_err;
  997. psock->sg_size += size;
  998. sg_set_page(sg, page, size, offset);
  999. get_page(page);
  1000. m->sg_copy[m->sg_end] = true;
  1001. sk_mem_charge(sk, size);
  1002. m->sg_end++;
  1003. copied = size;
  1004. if (m->sg_end == MAX_SKB_FRAGS)
  1005. m->sg_end = 0;
  1006. if (m->sg_end == m->sg_start)
  1007. enospc = true;
  1008. if (psock->cork_bytes) {
  1009. if (size > psock->cork_bytes)
  1010. psock->cork_bytes = 0;
  1011. else
  1012. psock->cork_bytes -= size;
  1013. if (psock->cork_bytes && !enospc)
  1014. goto out_err;
  1015. /* All cork bytes accounted for re-run filter */
  1016. psock->eval = __SK_NONE;
  1017. psock->cork_bytes = 0;
  1018. }
  1019. err = bpf_exec_tx_verdict(psock, m, sk, &copied, flags);
  1020. out_err:
  1021. release_sock(sk);
  1022. smap_release_sock(psock, sk);
  1023. return copied ? copied : err;
  1024. accept:
  1025. rcu_read_unlock();
  1026. return tcp_sendpage(sk, page, offset, size, flags);
  1027. }
  1028. static void bpf_tcp_msg_add(struct smap_psock *psock,
  1029. struct sock *sk,
  1030. struct bpf_prog *tx_msg)
  1031. {
  1032. struct bpf_prog *orig_tx_msg;
  1033. orig_tx_msg = xchg(&psock->bpf_tx_msg, tx_msg);
  1034. if (orig_tx_msg)
  1035. bpf_prog_put(orig_tx_msg);
  1036. }
  1037. static int bpf_tcp_ulp_register(void)
  1038. {
  1039. build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
  1040. /* Once BPF TX ULP is registered it is never unregistered. It
  1041. * will be in the ULP list for the lifetime of the system. Doing
  1042. * duplicate registers is not a problem.
  1043. */
  1044. return tcp_register_ulp(&bpf_tcp_ulp_ops);
  1045. }
  1046. static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
  1047. {
  1048. struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
  1049. int rc;
  1050. if (unlikely(!prog))
  1051. return __SK_DROP;
  1052. skb_orphan(skb);
  1053. /* We need to ensure that BPF metadata for maps is also cleared
  1054. * when we orphan the skb so that we don't have the possibility
  1055. * to reference a stale map.
  1056. */
  1057. TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
  1058. skb->sk = psock->sock;
  1059. bpf_compute_data_end_sk_skb(skb);
  1060. preempt_disable();
  1061. rc = (*prog->bpf_func)(skb, prog->insnsi);
  1062. preempt_enable();
  1063. skb->sk = NULL;
  1064. /* Moving return codes from UAPI namespace into internal namespace */
  1065. return rc == SK_PASS ?
  1066. (TCP_SKB_CB(skb)->bpf.sk_redir ? __SK_REDIRECT : __SK_PASS) :
  1067. __SK_DROP;
  1068. }
  1069. static int smap_do_ingress(struct smap_psock *psock, struct sk_buff *skb)
  1070. {
  1071. struct sock *sk = psock->sock;
  1072. int copied = 0, num_sg;
  1073. struct sk_msg_buff *r;
  1074. r = kzalloc(sizeof(struct sk_msg_buff), __GFP_NOWARN | GFP_ATOMIC);
  1075. if (unlikely(!r))
  1076. return -EAGAIN;
  1077. if (!sk_rmem_schedule(sk, skb, skb->len)) {
  1078. kfree(r);
  1079. return -EAGAIN;
  1080. }
  1081. sg_init_table(r->sg_data, MAX_SKB_FRAGS);
  1082. num_sg = skb_to_sgvec(skb, r->sg_data, 0, skb->len);
  1083. if (unlikely(num_sg < 0)) {
  1084. kfree(r);
  1085. return num_sg;
  1086. }
  1087. sk_mem_charge(sk, skb->len);
  1088. copied = skb->len;
  1089. r->sg_start = 0;
  1090. r->sg_end = num_sg == MAX_SKB_FRAGS ? 0 : num_sg;
  1091. r->skb = skb;
  1092. list_add_tail(&r->list, &psock->ingress);
  1093. sk->sk_data_ready(sk);
  1094. return copied;
  1095. }
  1096. static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
  1097. {
  1098. struct smap_psock *peer;
  1099. struct sock *sk;
  1100. __u32 in;
  1101. int rc;
  1102. rc = smap_verdict_func(psock, skb);
  1103. switch (rc) {
  1104. case __SK_REDIRECT:
  1105. sk = do_sk_redirect_map(skb);
  1106. if (!sk) {
  1107. kfree_skb(skb);
  1108. break;
  1109. }
  1110. peer = smap_psock_sk(sk);
  1111. in = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
  1112. if (unlikely(!peer || sock_flag(sk, SOCK_DEAD) ||
  1113. !test_bit(SMAP_TX_RUNNING, &peer->state))) {
  1114. kfree_skb(skb);
  1115. break;
  1116. }
  1117. if (!in && sock_writeable(sk)) {
  1118. skb_set_owner_w(skb, sk);
  1119. skb_queue_tail(&peer->rxqueue, skb);
  1120. schedule_work(&peer->tx_work);
  1121. break;
  1122. } else if (in &&
  1123. atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
  1124. skb_queue_tail(&peer->rxqueue, skb);
  1125. schedule_work(&peer->tx_work);
  1126. break;
  1127. }
  1128. /* Fall through and free skb otherwise */
  1129. case __SK_DROP:
  1130. default:
  1131. kfree_skb(skb);
  1132. }
  1133. }
  1134. static void smap_report_sk_error(struct smap_psock *psock, int err)
  1135. {
  1136. struct sock *sk = psock->sock;
  1137. sk->sk_err = err;
  1138. sk->sk_error_report(sk);
  1139. }
  1140. static void smap_read_sock_strparser(struct strparser *strp,
  1141. struct sk_buff *skb)
  1142. {
  1143. struct smap_psock *psock;
  1144. rcu_read_lock();
  1145. psock = container_of(strp, struct smap_psock, strp);
  1146. smap_do_verdict(psock, skb);
  1147. rcu_read_unlock();
  1148. }
  1149. /* Called with lock held on socket */
  1150. static void smap_data_ready(struct sock *sk)
  1151. {
  1152. struct smap_psock *psock;
  1153. rcu_read_lock();
  1154. psock = smap_psock_sk(sk);
  1155. if (likely(psock)) {
  1156. write_lock_bh(&sk->sk_callback_lock);
  1157. strp_data_ready(&psock->strp);
  1158. write_unlock_bh(&sk->sk_callback_lock);
  1159. }
  1160. rcu_read_unlock();
  1161. }
  1162. static void smap_tx_work(struct work_struct *w)
  1163. {
  1164. struct smap_psock *psock;
  1165. struct sk_buff *skb;
  1166. int rem, off, n;
  1167. psock = container_of(w, struct smap_psock, tx_work);
  1168. /* lock sock to avoid losing sk_socket at some point during loop */
  1169. lock_sock(psock->sock);
  1170. if (psock->save_skb) {
  1171. skb = psock->save_skb;
  1172. rem = psock->save_rem;
  1173. off = psock->save_off;
  1174. psock->save_skb = NULL;
  1175. goto start;
  1176. }
  1177. while ((skb = skb_dequeue(&psock->rxqueue))) {
  1178. __u32 flags;
  1179. rem = skb->len;
  1180. off = 0;
  1181. start:
  1182. flags = (TCP_SKB_CB(skb)->bpf.flags) & BPF_F_INGRESS;
  1183. do {
  1184. if (likely(psock->sock->sk_socket)) {
  1185. if (flags)
  1186. n = smap_do_ingress(psock, skb);
  1187. else
  1188. n = skb_send_sock_locked(psock->sock,
  1189. skb, off, rem);
  1190. } else {
  1191. n = -EINVAL;
  1192. }
  1193. if (n <= 0) {
  1194. if (n == -EAGAIN) {
  1195. /* Retry when space is available */
  1196. psock->save_skb = skb;
  1197. psock->save_rem = rem;
  1198. psock->save_off = off;
  1199. goto out;
  1200. }
  1201. /* Hard errors break pipe and stop xmit */
  1202. smap_report_sk_error(psock, n ? -n : EPIPE);
  1203. clear_bit(SMAP_TX_RUNNING, &psock->state);
  1204. kfree_skb(skb);
  1205. goto out;
  1206. }
  1207. rem -= n;
  1208. off += n;
  1209. } while (rem);
  1210. if (!flags)
  1211. kfree_skb(skb);
  1212. }
  1213. out:
  1214. release_sock(psock->sock);
  1215. }
  1216. static void smap_write_space(struct sock *sk)
  1217. {
  1218. struct smap_psock *psock;
  1219. rcu_read_lock();
  1220. psock = smap_psock_sk(sk);
  1221. if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
  1222. schedule_work(&psock->tx_work);
  1223. rcu_read_unlock();
  1224. }
  1225. static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
  1226. {
  1227. if (!psock->strp_enabled)
  1228. return;
  1229. sk->sk_data_ready = psock->save_data_ready;
  1230. sk->sk_write_space = psock->save_write_space;
  1231. psock->save_data_ready = NULL;
  1232. psock->save_write_space = NULL;
  1233. strp_stop(&psock->strp);
  1234. psock->strp_enabled = false;
  1235. }
  1236. static void smap_destroy_psock(struct rcu_head *rcu)
  1237. {
  1238. struct smap_psock *psock = container_of(rcu,
  1239. struct smap_psock, rcu);
  1240. /* Now that a grace period has passed there is no longer
  1241. * any reference to this sock in the sockmap so we can
  1242. * destroy the psock, strparser, and bpf programs. But,
  1243. * because we use workqueue sync operations we can not
  1244. * do it in rcu context
  1245. */
  1246. schedule_work(&psock->gc_work);
  1247. }
  1248. static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
  1249. {
  1250. if (refcount_dec_and_test(&psock->refcnt)) {
  1251. tcp_cleanup_ulp(sock);
  1252. write_lock_bh(&sock->sk_callback_lock);
  1253. smap_stop_sock(psock, sock);
  1254. write_unlock_bh(&sock->sk_callback_lock);
  1255. clear_bit(SMAP_TX_RUNNING, &psock->state);
  1256. rcu_assign_sk_user_data(sock, NULL);
  1257. call_rcu_sched(&psock->rcu, smap_destroy_psock);
  1258. }
  1259. }
  1260. static int smap_parse_func_strparser(struct strparser *strp,
  1261. struct sk_buff *skb)
  1262. {
  1263. struct smap_psock *psock;
  1264. struct bpf_prog *prog;
  1265. int rc;
  1266. rcu_read_lock();
  1267. psock = container_of(strp, struct smap_psock, strp);
  1268. prog = READ_ONCE(psock->bpf_parse);
  1269. if (unlikely(!prog)) {
  1270. rcu_read_unlock();
  1271. return skb->len;
  1272. }
  1273. /* Attach socket for bpf program to use if needed we can do this
  1274. * because strparser clones the skb before handing it to a upper
  1275. * layer, meaning skb_orphan has been called. We NULL sk on the
  1276. * way out to ensure we don't trigger a BUG_ON in skb/sk operations
  1277. * later and because we are not charging the memory of this skb to
  1278. * any socket yet.
  1279. */
  1280. skb->sk = psock->sock;
  1281. bpf_compute_data_end_sk_skb(skb);
  1282. rc = (*prog->bpf_func)(skb, prog->insnsi);
  1283. skb->sk = NULL;
  1284. rcu_read_unlock();
  1285. return rc;
  1286. }
  1287. static int smap_read_sock_done(struct strparser *strp, int err)
  1288. {
  1289. return err;
  1290. }
  1291. static int smap_init_sock(struct smap_psock *psock,
  1292. struct sock *sk)
  1293. {
  1294. static const struct strp_callbacks cb = {
  1295. .rcv_msg = smap_read_sock_strparser,
  1296. .parse_msg = smap_parse_func_strparser,
  1297. .read_sock_done = smap_read_sock_done,
  1298. };
  1299. return strp_init(&psock->strp, sk, &cb);
  1300. }
  1301. static void smap_init_progs(struct smap_psock *psock,
  1302. struct bpf_prog *verdict,
  1303. struct bpf_prog *parse)
  1304. {
  1305. struct bpf_prog *orig_parse, *orig_verdict;
  1306. orig_parse = xchg(&psock->bpf_parse, parse);
  1307. orig_verdict = xchg(&psock->bpf_verdict, verdict);
  1308. if (orig_verdict)
  1309. bpf_prog_put(orig_verdict);
  1310. if (orig_parse)
  1311. bpf_prog_put(orig_parse);
  1312. }
  1313. static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
  1314. {
  1315. if (sk->sk_data_ready == smap_data_ready)
  1316. return;
  1317. psock->save_data_ready = sk->sk_data_ready;
  1318. psock->save_write_space = sk->sk_write_space;
  1319. sk->sk_data_ready = smap_data_ready;
  1320. sk->sk_write_space = smap_write_space;
  1321. psock->strp_enabled = true;
  1322. }
  1323. static void sock_map_remove_complete(struct bpf_stab *stab)
  1324. {
  1325. bpf_map_area_free(stab->sock_map);
  1326. kfree(stab);
  1327. }
  1328. static void smap_gc_work(struct work_struct *w)
  1329. {
  1330. struct smap_psock_map_entry *e, *tmp;
  1331. struct sk_msg_buff *md, *mtmp;
  1332. struct smap_psock *psock;
  1333. psock = container_of(w, struct smap_psock, gc_work);
  1334. /* no callback lock needed because we already detached sockmap ops */
  1335. if (psock->strp_enabled)
  1336. strp_done(&psock->strp);
  1337. cancel_work_sync(&psock->tx_work);
  1338. __skb_queue_purge(&psock->rxqueue);
  1339. /* At this point all strparser and xmit work must be complete */
  1340. if (psock->bpf_parse)
  1341. bpf_prog_put(psock->bpf_parse);
  1342. if (psock->bpf_verdict)
  1343. bpf_prog_put(psock->bpf_verdict);
  1344. if (psock->bpf_tx_msg)
  1345. bpf_prog_put(psock->bpf_tx_msg);
  1346. if (psock->cork) {
  1347. free_start_sg(psock->sock, psock->cork);
  1348. kfree(psock->cork);
  1349. }
  1350. list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
  1351. list_del(&md->list);
  1352. free_start_sg(psock->sock, md);
  1353. kfree(md);
  1354. }
  1355. list_for_each_entry_safe(e, tmp, &psock->maps, list) {
  1356. list_del(&e->list);
  1357. kfree(e);
  1358. }
  1359. if (psock->sk_redir)
  1360. sock_put(psock->sk_redir);
  1361. sock_put(psock->sock);
  1362. kfree(psock);
  1363. }
  1364. static struct smap_psock *smap_init_psock(struct sock *sock, int node)
  1365. {
  1366. struct smap_psock *psock;
  1367. psock = kzalloc_node(sizeof(struct smap_psock),
  1368. GFP_ATOMIC | __GFP_NOWARN,
  1369. node);
  1370. if (!psock)
  1371. return ERR_PTR(-ENOMEM);
  1372. psock->eval = __SK_NONE;
  1373. psock->sock = sock;
  1374. skb_queue_head_init(&psock->rxqueue);
  1375. INIT_WORK(&psock->tx_work, smap_tx_work);
  1376. INIT_WORK(&psock->gc_work, smap_gc_work);
  1377. INIT_LIST_HEAD(&psock->maps);
  1378. INIT_LIST_HEAD(&psock->ingress);
  1379. refcount_set(&psock->refcnt, 1);
  1380. spin_lock_init(&psock->maps_lock);
  1381. rcu_assign_sk_user_data(sock, psock);
  1382. sock_hold(sock);
  1383. return psock;
  1384. }
  1385. static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
  1386. {
  1387. struct bpf_stab *stab;
  1388. u64 cost;
  1389. int err;
  1390. if (!capable(CAP_NET_ADMIN))
  1391. return ERR_PTR(-EPERM);
  1392. /* check sanity of attributes */
  1393. if (attr->max_entries == 0 || attr->key_size != 4 ||
  1394. attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  1395. return ERR_PTR(-EINVAL);
  1396. err = bpf_tcp_ulp_register();
  1397. if (err && err != -EEXIST)
  1398. return ERR_PTR(err);
  1399. stab = kzalloc(sizeof(*stab), GFP_USER);
  1400. if (!stab)
  1401. return ERR_PTR(-ENOMEM);
  1402. bpf_map_init_from_attr(&stab->map, attr);
  1403. raw_spin_lock_init(&stab->lock);
  1404. /* make sure page count doesn't overflow */
  1405. cost = (u64) stab->map.max_entries * sizeof(struct sock *);
  1406. err = -EINVAL;
  1407. if (cost >= U32_MAX - PAGE_SIZE)
  1408. goto free_stab;
  1409. stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
  1410. /* if map size is larger than memlock limit, reject it early */
  1411. err = bpf_map_precharge_memlock(stab->map.pages);
  1412. if (err)
  1413. goto free_stab;
  1414. err = -ENOMEM;
  1415. stab->sock_map = bpf_map_area_alloc(stab->map.max_entries *
  1416. sizeof(struct sock *),
  1417. stab->map.numa_node);
  1418. if (!stab->sock_map)
  1419. goto free_stab;
  1420. return &stab->map;
  1421. free_stab:
  1422. kfree(stab);
  1423. return ERR_PTR(err);
  1424. }
  1425. static void smap_list_map_remove(struct smap_psock *psock,
  1426. struct sock **entry)
  1427. {
  1428. struct smap_psock_map_entry *e, *tmp;
  1429. spin_lock_bh(&psock->maps_lock);
  1430. list_for_each_entry_safe(e, tmp, &psock->maps, list) {
  1431. if (e->entry == entry) {
  1432. list_del(&e->list);
  1433. kfree(e);
  1434. }
  1435. }
  1436. spin_unlock_bh(&psock->maps_lock);
  1437. }
  1438. static void smap_list_hash_remove(struct smap_psock *psock,
  1439. struct htab_elem *hash_link)
  1440. {
  1441. struct smap_psock_map_entry *e, *tmp;
  1442. spin_lock_bh(&psock->maps_lock);
  1443. list_for_each_entry_safe(e, tmp, &psock->maps, list) {
  1444. struct htab_elem *c = rcu_dereference(e->hash_link);
  1445. if (c == hash_link) {
  1446. list_del(&e->list);
  1447. kfree(e);
  1448. }
  1449. }
  1450. spin_unlock_bh(&psock->maps_lock);
  1451. }
  1452. static void sock_map_free(struct bpf_map *map)
  1453. {
  1454. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  1455. int i;
  1456. synchronize_rcu();
  1457. /* At this point no update, lookup or delete operations can happen.
  1458. * However, be aware we can still get a socket state event updates,
  1459. * and data ready callabacks that reference the psock from sk_user_data
  1460. * Also psock worker threads are still in-flight. So smap_release_sock
  1461. * will only free the psock after cancel_sync on the worker threads
  1462. * and a grace period expire to ensure psock is really safe to remove.
  1463. */
  1464. rcu_read_lock();
  1465. raw_spin_lock_bh(&stab->lock);
  1466. for (i = 0; i < stab->map.max_entries; i++) {
  1467. struct smap_psock *psock;
  1468. struct sock *sock;
  1469. sock = stab->sock_map[i];
  1470. if (!sock)
  1471. continue;
  1472. stab->sock_map[i] = NULL;
  1473. psock = smap_psock_sk(sock);
  1474. /* This check handles a racing sock event that can get the
  1475. * sk_callback_lock before this case but after xchg happens
  1476. * causing the refcnt to hit zero and sock user data (psock)
  1477. * to be null and queued for garbage collection.
  1478. */
  1479. if (likely(psock)) {
  1480. smap_list_map_remove(psock, &stab->sock_map[i]);
  1481. smap_release_sock(psock, sock);
  1482. }
  1483. }
  1484. raw_spin_unlock_bh(&stab->lock);
  1485. rcu_read_unlock();
  1486. sock_map_remove_complete(stab);
  1487. }
  1488. static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
  1489. {
  1490. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  1491. u32 i = key ? *(u32 *)key : U32_MAX;
  1492. u32 *next = (u32 *)next_key;
  1493. if (i >= stab->map.max_entries) {
  1494. *next = 0;
  1495. return 0;
  1496. }
  1497. if (i == stab->map.max_entries - 1)
  1498. return -ENOENT;
  1499. *next = i + 1;
  1500. return 0;
  1501. }
  1502. struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
  1503. {
  1504. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  1505. if (key >= map->max_entries)
  1506. return NULL;
  1507. return READ_ONCE(stab->sock_map[key]);
  1508. }
  1509. static int sock_map_delete_elem(struct bpf_map *map, void *key)
  1510. {
  1511. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  1512. struct smap_psock *psock;
  1513. int k = *(u32 *)key;
  1514. struct sock *sock;
  1515. if (k >= map->max_entries)
  1516. return -EINVAL;
  1517. raw_spin_lock_bh(&stab->lock);
  1518. sock = stab->sock_map[k];
  1519. stab->sock_map[k] = NULL;
  1520. raw_spin_unlock_bh(&stab->lock);
  1521. if (!sock)
  1522. return -EINVAL;
  1523. psock = smap_psock_sk(sock);
  1524. if (!psock)
  1525. return 0;
  1526. if (psock->bpf_parse) {
  1527. write_lock_bh(&sock->sk_callback_lock);
  1528. smap_stop_sock(psock, sock);
  1529. write_unlock_bh(&sock->sk_callback_lock);
  1530. }
  1531. smap_list_map_remove(psock, &stab->sock_map[k]);
  1532. smap_release_sock(psock, sock);
  1533. return 0;
  1534. }
  1535. /* Locking notes: Concurrent updates, deletes, and lookups are allowed and are
  1536. * done inside rcu critical sections. This ensures on updates that the psock
  1537. * will not be released via smap_release_sock() until concurrent updates/deletes
  1538. * complete. All operations operate on sock_map using cmpxchg and xchg
  1539. * operations to ensure we do not get stale references. Any reads into the
  1540. * map must be done with READ_ONCE() because of this.
  1541. *
  1542. * A psock is destroyed via call_rcu and after any worker threads are cancelled
  1543. * and syncd so we are certain all references from the update/lookup/delete
  1544. * operations as well as references in the data path are no longer in use.
  1545. *
  1546. * Psocks may exist in multiple maps, but only a single set of parse/verdict
  1547. * programs may be inherited from the maps it belongs to. A reference count
  1548. * is kept with the total number of references to the psock from all maps. The
  1549. * psock will not be released until this reaches zero. The psock and sock
  1550. * user data data use the sk_callback_lock to protect critical data structures
  1551. * from concurrent access. This allows us to avoid two updates from modifying
  1552. * the user data in sock and the lock is required anyways for modifying
  1553. * callbacks, we simply increase its scope slightly.
  1554. *
  1555. * Rules to follow,
  1556. * - psock must always be read inside RCU critical section
  1557. * - sk_user_data must only be modified inside sk_callback_lock and read
  1558. * inside RCU critical section.
  1559. * - psock->maps list must only be read & modified inside sk_callback_lock
  1560. * - sock_map must use READ_ONCE and (cmp)xchg operations
  1561. * - BPF verdict/parse programs must use READ_ONCE and xchg operations
  1562. */
  1563. static int __sock_map_ctx_update_elem(struct bpf_map *map,
  1564. struct bpf_sock_progs *progs,
  1565. struct sock *sock,
  1566. void *key)
  1567. {
  1568. struct bpf_prog *verdict, *parse, *tx_msg;
  1569. struct smap_psock *psock;
  1570. bool new = false;
  1571. int err = 0;
  1572. /* 1. If sock map has BPF programs those will be inherited by the
  1573. * sock being added. If the sock is already attached to BPF programs
  1574. * this results in an error.
  1575. */
  1576. verdict = READ_ONCE(progs->bpf_verdict);
  1577. parse = READ_ONCE(progs->bpf_parse);
  1578. tx_msg = READ_ONCE(progs->bpf_tx_msg);
  1579. if (parse && verdict) {
  1580. /* bpf prog refcnt may be zero if a concurrent attach operation
  1581. * removes the program after the above READ_ONCE() but before
  1582. * we increment the refcnt. If this is the case abort with an
  1583. * error.
  1584. */
  1585. verdict = bpf_prog_inc_not_zero(verdict);
  1586. if (IS_ERR(verdict))
  1587. return PTR_ERR(verdict);
  1588. parse = bpf_prog_inc_not_zero(parse);
  1589. if (IS_ERR(parse)) {
  1590. bpf_prog_put(verdict);
  1591. return PTR_ERR(parse);
  1592. }
  1593. }
  1594. if (tx_msg) {
  1595. tx_msg = bpf_prog_inc_not_zero(tx_msg);
  1596. if (IS_ERR(tx_msg)) {
  1597. if (parse && verdict) {
  1598. bpf_prog_put(parse);
  1599. bpf_prog_put(verdict);
  1600. }
  1601. return PTR_ERR(tx_msg);
  1602. }
  1603. }
  1604. psock = smap_psock_sk(sock);
  1605. /* 2. Do not allow inheriting programs if psock exists and has
  1606. * already inherited programs. This would create confusion on
  1607. * which parser/verdict program is running. If no psock exists
  1608. * create one. Inside sk_callback_lock to ensure concurrent create
  1609. * doesn't update user data.
  1610. */
  1611. if (psock) {
  1612. if (READ_ONCE(psock->bpf_parse) && parse) {
  1613. err = -EBUSY;
  1614. goto out_progs;
  1615. }
  1616. if (READ_ONCE(psock->bpf_tx_msg) && tx_msg) {
  1617. err = -EBUSY;
  1618. goto out_progs;
  1619. }
  1620. if (!refcount_inc_not_zero(&psock->refcnt)) {
  1621. err = -EAGAIN;
  1622. goto out_progs;
  1623. }
  1624. } else {
  1625. psock = smap_init_psock(sock, map->numa_node);
  1626. if (IS_ERR(psock)) {
  1627. err = PTR_ERR(psock);
  1628. goto out_progs;
  1629. }
  1630. set_bit(SMAP_TX_RUNNING, &psock->state);
  1631. new = true;
  1632. }
  1633. /* 3. At this point we have a reference to a valid psock that is
  1634. * running. Attach any BPF programs needed.
  1635. */
  1636. if (tx_msg)
  1637. bpf_tcp_msg_add(psock, sock, tx_msg);
  1638. if (new) {
  1639. err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
  1640. if (err)
  1641. goto out_free;
  1642. }
  1643. if (parse && verdict && !psock->strp_enabled) {
  1644. err = smap_init_sock(psock, sock);
  1645. if (err)
  1646. goto out_free;
  1647. smap_init_progs(psock, verdict, parse);
  1648. write_lock_bh(&sock->sk_callback_lock);
  1649. smap_start_sock(psock, sock);
  1650. write_unlock_bh(&sock->sk_callback_lock);
  1651. }
  1652. return err;
  1653. out_free:
  1654. smap_release_sock(psock, sock);
  1655. out_progs:
  1656. if (parse && verdict) {
  1657. bpf_prog_put(parse);
  1658. bpf_prog_put(verdict);
  1659. }
  1660. if (tx_msg)
  1661. bpf_prog_put(tx_msg);
  1662. return err;
  1663. }
  1664. static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
  1665. struct bpf_map *map,
  1666. void *key, u64 flags)
  1667. {
  1668. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  1669. struct bpf_sock_progs *progs = &stab->progs;
  1670. struct sock *osock, *sock = skops->sk;
  1671. struct smap_psock_map_entry *e;
  1672. struct smap_psock *psock;
  1673. u32 i = *(u32 *)key;
  1674. int err;
  1675. if (unlikely(flags > BPF_EXIST))
  1676. return -EINVAL;
  1677. if (unlikely(i >= stab->map.max_entries))
  1678. return -E2BIG;
  1679. e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
  1680. if (!e)
  1681. return -ENOMEM;
  1682. err = __sock_map_ctx_update_elem(map, progs, sock, key);
  1683. if (err)
  1684. goto out;
  1685. /* psock guaranteed to be present. */
  1686. psock = smap_psock_sk(sock);
  1687. raw_spin_lock_bh(&stab->lock);
  1688. osock = stab->sock_map[i];
  1689. if (osock && flags == BPF_NOEXIST) {
  1690. err = -EEXIST;
  1691. goto out_unlock;
  1692. }
  1693. if (!osock && flags == BPF_EXIST) {
  1694. err = -ENOENT;
  1695. goto out_unlock;
  1696. }
  1697. e->entry = &stab->sock_map[i];
  1698. e->map = map;
  1699. spin_lock_bh(&psock->maps_lock);
  1700. list_add_tail(&e->list, &psock->maps);
  1701. spin_unlock_bh(&psock->maps_lock);
  1702. stab->sock_map[i] = sock;
  1703. if (osock) {
  1704. psock = smap_psock_sk(osock);
  1705. smap_list_map_remove(psock, &stab->sock_map[i]);
  1706. smap_release_sock(psock, osock);
  1707. }
  1708. raw_spin_unlock_bh(&stab->lock);
  1709. return 0;
  1710. out_unlock:
  1711. smap_release_sock(psock, sock);
  1712. raw_spin_unlock_bh(&stab->lock);
  1713. out:
  1714. kfree(e);
  1715. return err;
  1716. }
  1717. int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
  1718. {
  1719. struct bpf_sock_progs *progs;
  1720. struct bpf_prog *orig;
  1721. if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
  1722. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  1723. progs = &stab->progs;
  1724. } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH) {
  1725. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1726. progs = &htab->progs;
  1727. } else {
  1728. return -EINVAL;
  1729. }
  1730. switch (type) {
  1731. case BPF_SK_MSG_VERDICT:
  1732. orig = xchg(&progs->bpf_tx_msg, prog);
  1733. break;
  1734. case BPF_SK_SKB_STREAM_PARSER:
  1735. orig = xchg(&progs->bpf_parse, prog);
  1736. break;
  1737. case BPF_SK_SKB_STREAM_VERDICT:
  1738. orig = xchg(&progs->bpf_verdict, prog);
  1739. break;
  1740. default:
  1741. return -EOPNOTSUPP;
  1742. }
  1743. if (orig)
  1744. bpf_prog_put(orig);
  1745. return 0;
  1746. }
  1747. int sockmap_get_from_fd(const union bpf_attr *attr, int type,
  1748. struct bpf_prog *prog)
  1749. {
  1750. int ufd = attr->target_fd;
  1751. struct bpf_map *map;
  1752. struct fd f;
  1753. int err;
  1754. f = fdget(ufd);
  1755. map = __bpf_map_get(f);
  1756. if (IS_ERR(map))
  1757. return PTR_ERR(map);
  1758. err = sock_map_prog(map, prog, attr->attach_type);
  1759. fdput(f);
  1760. return err;
  1761. }
  1762. static void *sock_map_lookup(struct bpf_map *map, void *key)
  1763. {
  1764. return NULL;
  1765. }
  1766. static int sock_map_update_elem(struct bpf_map *map,
  1767. void *key, void *value, u64 flags)
  1768. {
  1769. struct bpf_sock_ops_kern skops;
  1770. u32 fd = *(u32 *)value;
  1771. struct socket *socket;
  1772. int err;
  1773. socket = sockfd_lookup(fd, &err);
  1774. if (!socket)
  1775. return err;
  1776. skops.sk = socket->sk;
  1777. if (!skops.sk) {
  1778. fput(socket->file);
  1779. return -EINVAL;
  1780. }
  1781. if (skops.sk->sk_type != SOCK_STREAM ||
  1782. skops.sk->sk_protocol != IPPROTO_TCP) {
  1783. fput(socket->file);
  1784. return -EOPNOTSUPP;
  1785. }
  1786. lock_sock(skops.sk);
  1787. preempt_disable();
  1788. rcu_read_lock();
  1789. err = sock_map_ctx_update_elem(&skops, map, key, flags);
  1790. rcu_read_unlock();
  1791. preempt_enable();
  1792. release_sock(skops.sk);
  1793. fput(socket->file);
  1794. return err;
  1795. }
  1796. static void sock_map_release(struct bpf_map *map)
  1797. {
  1798. struct bpf_sock_progs *progs;
  1799. struct bpf_prog *orig;
  1800. if (map->map_type == BPF_MAP_TYPE_SOCKMAP) {
  1801. struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
  1802. progs = &stab->progs;
  1803. } else {
  1804. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1805. progs = &htab->progs;
  1806. }
  1807. orig = xchg(&progs->bpf_parse, NULL);
  1808. if (orig)
  1809. bpf_prog_put(orig);
  1810. orig = xchg(&progs->bpf_verdict, NULL);
  1811. if (orig)
  1812. bpf_prog_put(orig);
  1813. orig = xchg(&progs->bpf_tx_msg, NULL);
  1814. if (orig)
  1815. bpf_prog_put(orig);
  1816. }
  1817. static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
  1818. {
  1819. struct bpf_htab *htab;
  1820. int i, err;
  1821. u64 cost;
  1822. if (!capable(CAP_NET_ADMIN))
  1823. return ERR_PTR(-EPERM);
  1824. /* check sanity of attributes */
  1825. if (attr->max_entries == 0 || attr->value_size != 4 ||
  1826. attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
  1827. return ERR_PTR(-EINVAL);
  1828. if (attr->key_size > MAX_BPF_STACK)
  1829. /* eBPF programs initialize keys on stack, so they cannot be
  1830. * larger than max stack size
  1831. */
  1832. return ERR_PTR(-E2BIG);
  1833. err = bpf_tcp_ulp_register();
  1834. if (err && err != -EEXIST)
  1835. return ERR_PTR(err);
  1836. htab = kzalloc(sizeof(*htab), GFP_USER);
  1837. if (!htab)
  1838. return ERR_PTR(-ENOMEM);
  1839. bpf_map_init_from_attr(&htab->map, attr);
  1840. htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
  1841. htab->elem_size = sizeof(struct htab_elem) +
  1842. round_up(htab->map.key_size, 8);
  1843. err = -EINVAL;
  1844. if (htab->n_buckets == 0 ||
  1845. htab->n_buckets > U32_MAX / sizeof(struct bucket))
  1846. goto free_htab;
  1847. cost = (u64) htab->n_buckets * sizeof(struct bucket) +
  1848. (u64) htab->elem_size * htab->map.max_entries;
  1849. if (cost >= U32_MAX - PAGE_SIZE)
  1850. goto free_htab;
  1851. htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
  1852. err = bpf_map_precharge_memlock(htab->map.pages);
  1853. if (err)
  1854. goto free_htab;
  1855. err = -ENOMEM;
  1856. htab->buckets = bpf_map_area_alloc(
  1857. htab->n_buckets * sizeof(struct bucket),
  1858. htab->map.numa_node);
  1859. if (!htab->buckets)
  1860. goto free_htab;
  1861. for (i = 0; i < htab->n_buckets; i++) {
  1862. INIT_HLIST_HEAD(&htab->buckets[i].head);
  1863. raw_spin_lock_init(&htab->buckets[i].lock);
  1864. }
  1865. return &htab->map;
  1866. free_htab:
  1867. kfree(htab);
  1868. return ERR_PTR(err);
  1869. }
  1870. static void __bpf_htab_free(struct rcu_head *rcu)
  1871. {
  1872. struct bpf_htab *htab;
  1873. htab = container_of(rcu, struct bpf_htab, rcu);
  1874. bpf_map_area_free(htab->buckets);
  1875. kfree(htab);
  1876. }
  1877. static void sock_hash_free(struct bpf_map *map)
  1878. {
  1879. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1880. int i;
  1881. synchronize_rcu();
  1882. /* At this point no update, lookup or delete operations can happen.
  1883. * However, be aware we can still get a socket state event updates,
  1884. * and data ready callabacks that reference the psock from sk_user_data
  1885. * Also psock worker threads are still in-flight. So smap_release_sock
  1886. * will only free the psock after cancel_sync on the worker threads
  1887. * and a grace period expire to ensure psock is really safe to remove.
  1888. */
  1889. rcu_read_lock();
  1890. for (i = 0; i < htab->n_buckets; i++) {
  1891. struct bucket *b = __select_bucket(htab, i);
  1892. struct hlist_head *head;
  1893. struct hlist_node *n;
  1894. struct htab_elem *l;
  1895. raw_spin_lock_bh(&b->lock);
  1896. head = &b->head;
  1897. hlist_for_each_entry_safe(l, n, head, hash_node) {
  1898. struct sock *sock = l->sk;
  1899. struct smap_psock *psock;
  1900. hlist_del_rcu(&l->hash_node);
  1901. psock = smap_psock_sk(sock);
  1902. /* This check handles a racing sock event that can get
  1903. * the sk_callback_lock before this case but after xchg
  1904. * causing the refcnt to hit zero and sock user data
  1905. * (psock) to be null and queued for garbage collection.
  1906. */
  1907. if (likely(psock)) {
  1908. smap_list_hash_remove(psock, l);
  1909. smap_release_sock(psock, sock);
  1910. }
  1911. free_htab_elem(htab, l);
  1912. }
  1913. raw_spin_unlock_bh(&b->lock);
  1914. }
  1915. rcu_read_unlock();
  1916. call_rcu(&htab->rcu, __bpf_htab_free);
  1917. }
  1918. static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
  1919. void *key, u32 key_size, u32 hash,
  1920. struct sock *sk,
  1921. struct htab_elem *old_elem)
  1922. {
  1923. struct htab_elem *l_new;
  1924. if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
  1925. if (!old_elem) {
  1926. atomic_dec(&htab->count);
  1927. return ERR_PTR(-E2BIG);
  1928. }
  1929. }
  1930. l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
  1931. htab->map.numa_node);
  1932. if (!l_new)
  1933. return ERR_PTR(-ENOMEM);
  1934. memcpy(l_new->key, key, key_size);
  1935. l_new->sk = sk;
  1936. l_new->hash = hash;
  1937. return l_new;
  1938. }
  1939. static inline u32 htab_map_hash(const void *key, u32 key_len)
  1940. {
  1941. return jhash(key, key_len, 0);
  1942. }
  1943. static int sock_hash_get_next_key(struct bpf_map *map,
  1944. void *key, void *next_key)
  1945. {
  1946. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1947. struct htab_elem *l, *next_l;
  1948. struct hlist_head *h;
  1949. u32 hash, key_size;
  1950. int i = 0;
  1951. WARN_ON_ONCE(!rcu_read_lock_held());
  1952. key_size = map->key_size;
  1953. if (!key)
  1954. goto find_first_elem;
  1955. hash = htab_map_hash(key, key_size);
  1956. h = select_bucket(htab, hash);
  1957. l = lookup_elem_raw(h, hash, key, key_size);
  1958. if (!l)
  1959. goto find_first_elem;
  1960. next_l = hlist_entry_safe(
  1961. rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
  1962. struct htab_elem, hash_node);
  1963. if (next_l) {
  1964. memcpy(next_key, next_l->key, key_size);
  1965. return 0;
  1966. }
  1967. /* no more elements in this hash list, go to the next bucket */
  1968. i = hash & (htab->n_buckets - 1);
  1969. i++;
  1970. find_first_elem:
  1971. /* iterate over buckets */
  1972. for (; i < htab->n_buckets; i++) {
  1973. h = select_bucket(htab, i);
  1974. /* pick first element in the bucket */
  1975. next_l = hlist_entry_safe(
  1976. rcu_dereference_raw(hlist_first_rcu(h)),
  1977. struct htab_elem, hash_node);
  1978. if (next_l) {
  1979. /* if it's not empty, just return it */
  1980. memcpy(next_key, next_l->key, key_size);
  1981. return 0;
  1982. }
  1983. }
  1984. /* iterated over all buckets and all elements */
  1985. return -ENOENT;
  1986. }
  1987. static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
  1988. struct bpf_map *map,
  1989. void *key, u64 map_flags)
  1990. {
  1991. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  1992. struct bpf_sock_progs *progs = &htab->progs;
  1993. struct htab_elem *l_new = NULL, *l_old;
  1994. struct smap_psock_map_entry *e = NULL;
  1995. struct hlist_head *head;
  1996. struct smap_psock *psock;
  1997. u32 key_size, hash;
  1998. struct sock *sock;
  1999. struct bucket *b;
  2000. int err;
  2001. sock = skops->sk;
  2002. if (sock->sk_type != SOCK_STREAM ||
  2003. sock->sk_protocol != IPPROTO_TCP)
  2004. return -EOPNOTSUPP;
  2005. if (unlikely(map_flags > BPF_EXIST))
  2006. return -EINVAL;
  2007. e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
  2008. if (!e)
  2009. return -ENOMEM;
  2010. WARN_ON_ONCE(!rcu_read_lock_held());
  2011. key_size = map->key_size;
  2012. hash = htab_map_hash(key, key_size);
  2013. b = __select_bucket(htab, hash);
  2014. head = &b->head;
  2015. err = __sock_map_ctx_update_elem(map, progs, sock, key);
  2016. if (err)
  2017. goto err;
  2018. /* psock is valid here because otherwise above *ctx_update_elem would
  2019. * have thrown an error. It is safe to skip error check.
  2020. */
  2021. psock = smap_psock_sk(sock);
  2022. raw_spin_lock_bh(&b->lock);
  2023. l_old = lookup_elem_raw(head, hash, key, key_size);
  2024. if (l_old && map_flags == BPF_NOEXIST) {
  2025. err = -EEXIST;
  2026. goto bucket_err;
  2027. }
  2028. if (!l_old && map_flags == BPF_EXIST) {
  2029. err = -ENOENT;
  2030. goto bucket_err;
  2031. }
  2032. l_new = alloc_sock_hash_elem(htab, key, key_size, hash, sock, l_old);
  2033. if (IS_ERR(l_new)) {
  2034. err = PTR_ERR(l_new);
  2035. goto bucket_err;
  2036. }
  2037. rcu_assign_pointer(e->hash_link, l_new);
  2038. e->map = map;
  2039. spin_lock_bh(&psock->maps_lock);
  2040. list_add_tail(&e->list, &psock->maps);
  2041. spin_unlock_bh(&psock->maps_lock);
  2042. /* add new element to the head of the list, so that
  2043. * concurrent search will find it before old elem
  2044. */
  2045. hlist_add_head_rcu(&l_new->hash_node, head);
  2046. if (l_old) {
  2047. psock = smap_psock_sk(l_old->sk);
  2048. hlist_del_rcu(&l_old->hash_node);
  2049. smap_list_hash_remove(psock, l_old);
  2050. smap_release_sock(psock, l_old->sk);
  2051. free_htab_elem(htab, l_old);
  2052. }
  2053. raw_spin_unlock_bh(&b->lock);
  2054. return 0;
  2055. bucket_err:
  2056. smap_release_sock(psock, sock);
  2057. raw_spin_unlock_bh(&b->lock);
  2058. err:
  2059. kfree(e);
  2060. return err;
  2061. }
  2062. static int sock_hash_update_elem(struct bpf_map *map,
  2063. void *key, void *value, u64 flags)
  2064. {
  2065. struct bpf_sock_ops_kern skops;
  2066. u32 fd = *(u32 *)value;
  2067. struct socket *socket;
  2068. int err;
  2069. socket = sockfd_lookup(fd, &err);
  2070. if (!socket)
  2071. return err;
  2072. skops.sk = socket->sk;
  2073. if (!skops.sk) {
  2074. fput(socket->file);
  2075. return -EINVAL;
  2076. }
  2077. lock_sock(skops.sk);
  2078. preempt_disable();
  2079. rcu_read_lock();
  2080. err = sock_hash_ctx_update_elem(&skops, map, key, flags);
  2081. rcu_read_unlock();
  2082. preempt_enable();
  2083. release_sock(skops.sk);
  2084. fput(socket->file);
  2085. return err;
  2086. }
  2087. static int sock_hash_delete_elem(struct bpf_map *map, void *key)
  2088. {
  2089. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  2090. struct hlist_head *head;
  2091. struct bucket *b;
  2092. struct htab_elem *l;
  2093. u32 hash, key_size;
  2094. int ret = -ENOENT;
  2095. key_size = map->key_size;
  2096. hash = htab_map_hash(key, key_size);
  2097. b = __select_bucket(htab, hash);
  2098. head = &b->head;
  2099. raw_spin_lock_bh(&b->lock);
  2100. l = lookup_elem_raw(head, hash, key, key_size);
  2101. if (l) {
  2102. struct sock *sock = l->sk;
  2103. struct smap_psock *psock;
  2104. hlist_del_rcu(&l->hash_node);
  2105. psock = smap_psock_sk(sock);
  2106. /* This check handles a racing sock event that can get the
  2107. * sk_callback_lock before this case but after xchg happens
  2108. * causing the refcnt to hit zero and sock user data (psock)
  2109. * to be null and queued for garbage collection.
  2110. */
  2111. if (likely(psock)) {
  2112. smap_list_hash_remove(psock, l);
  2113. smap_release_sock(psock, sock);
  2114. }
  2115. free_htab_elem(htab, l);
  2116. ret = 0;
  2117. }
  2118. raw_spin_unlock_bh(&b->lock);
  2119. return ret;
  2120. }
  2121. struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
  2122. {
  2123. struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
  2124. struct hlist_head *head;
  2125. struct htab_elem *l;
  2126. u32 key_size, hash;
  2127. struct bucket *b;
  2128. struct sock *sk;
  2129. key_size = map->key_size;
  2130. hash = htab_map_hash(key, key_size);
  2131. b = __select_bucket(htab, hash);
  2132. head = &b->head;
  2133. l = lookup_elem_raw(head, hash, key, key_size);
  2134. sk = l ? l->sk : NULL;
  2135. return sk;
  2136. }
  2137. const struct bpf_map_ops sock_map_ops = {
  2138. .map_alloc = sock_map_alloc,
  2139. .map_free = sock_map_free,
  2140. .map_lookup_elem = sock_map_lookup,
  2141. .map_get_next_key = sock_map_get_next_key,
  2142. .map_update_elem = sock_map_update_elem,
  2143. .map_delete_elem = sock_map_delete_elem,
  2144. .map_release_uref = sock_map_release,
  2145. .map_check_btf = map_check_no_btf,
  2146. };
  2147. const struct bpf_map_ops sock_hash_ops = {
  2148. .map_alloc = sock_hash_alloc,
  2149. .map_free = sock_hash_free,
  2150. .map_lookup_elem = sock_map_lookup,
  2151. .map_get_next_key = sock_hash_get_next_key,
  2152. .map_update_elem = sock_hash_update_elem,
  2153. .map_delete_elem = sock_hash_delete_elem,
  2154. .map_release_uref = sock_map_release,
  2155. .map_check_btf = map_check_no_btf,
  2156. };
  2157. BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
  2158. struct bpf_map *, map, void *, key, u64, flags)
  2159. {
  2160. WARN_ON_ONCE(!rcu_read_lock_held());
  2161. return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
  2162. }
  2163. const struct bpf_func_proto bpf_sock_map_update_proto = {
  2164. .func = bpf_sock_map_update,
  2165. .gpl_only = false,
  2166. .pkt_access = true,
  2167. .ret_type = RET_INTEGER,
  2168. .arg1_type = ARG_PTR_TO_CTX,
  2169. .arg2_type = ARG_CONST_MAP_PTR,
  2170. .arg3_type = ARG_PTR_TO_MAP_KEY,
  2171. .arg4_type = ARG_ANYTHING,
  2172. };
  2173. BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
  2174. struct bpf_map *, map, void *, key, u64, flags)
  2175. {
  2176. WARN_ON_ONCE(!rcu_read_lock_held());
  2177. return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
  2178. }
  2179. const struct bpf_func_proto bpf_sock_hash_update_proto = {
  2180. .func = bpf_sock_hash_update,
  2181. .gpl_only = false,
  2182. .pkt_access = true,
  2183. .ret_type = RET_INTEGER,
  2184. .arg1_type = ARG_PTR_TO_CTX,
  2185. .arg2_type = ARG_CONST_MAP_PTR,
  2186. .arg3_type = ARG_PTR_TO_MAP_KEY,
  2187. .arg4_type = ARG_ANYTHING,
  2188. };