xfrm_policy.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/flow.h>
  29. #include <net/xfrm.h>
  30. #include <net/ip.h>
  31. #ifdef CONFIG_XFRM_STATISTICS
  32. #include <net/snmp.h>
  33. #endif
  34. #include "xfrm_hash.h"
  35. #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  36. #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  37. #define XFRM_MAX_QUEUE_LEN 100
  38. static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  39. static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
  40. __read_mostly;
  41. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  42. static void xfrm_init_pmtu(struct dst_entry *dst);
  43. static int stale_bundle(struct dst_entry *dst);
  44. static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  45. static void xfrm_policy_queue_process(unsigned long arg);
  46. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  47. int dir);
  48. static inline bool
  49. __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  50. {
  51. const struct flowi4 *fl4 = &fl->u.ip4;
  52. return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  53. addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  54. !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  55. !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  56. (fl4->flowi4_proto == sel->proto || !sel->proto) &&
  57. (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  58. }
  59. static inline bool
  60. __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  61. {
  62. const struct flowi6 *fl6 = &fl->u.ip6;
  63. return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  64. addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  65. !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  66. !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  67. (fl6->flowi6_proto == sel->proto || !sel->proto) &&
  68. (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  69. }
  70. bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  71. unsigned short family)
  72. {
  73. switch (family) {
  74. case AF_INET:
  75. return __xfrm4_selector_match(sel, fl);
  76. case AF_INET6:
  77. return __xfrm6_selector_match(sel, fl);
  78. }
  79. return false;
  80. }
  81. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  82. {
  83. struct xfrm_policy_afinfo *afinfo;
  84. if (unlikely(family >= NPROTO))
  85. return NULL;
  86. rcu_read_lock();
  87. afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
  88. if (unlikely(!afinfo))
  89. rcu_read_unlock();
  90. return afinfo;
  91. }
  92. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  93. {
  94. rcu_read_unlock();
  95. }
  96. static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
  97. const xfrm_address_t *saddr,
  98. const xfrm_address_t *daddr,
  99. int family)
  100. {
  101. struct xfrm_policy_afinfo *afinfo;
  102. struct dst_entry *dst;
  103. afinfo = xfrm_policy_get_afinfo(family);
  104. if (unlikely(afinfo == NULL))
  105. return ERR_PTR(-EAFNOSUPPORT);
  106. dst = afinfo->dst_lookup(net, tos, saddr, daddr);
  107. xfrm_policy_put_afinfo(afinfo);
  108. return dst;
  109. }
  110. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  111. xfrm_address_t *prev_saddr,
  112. xfrm_address_t *prev_daddr,
  113. int family)
  114. {
  115. struct net *net = xs_net(x);
  116. xfrm_address_t *saddr = &x->props.saddr;
  117. xfrm_address_t *daddr = &x->id.daddr;
  118. struct dst_entry *dst;
  119. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  120. saddr = x->coaddr;
  121. daddr = prev_daddr;
  122. }
  123. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  124. saddr = prev_saddr;
  125. daddr = x->coaddr;
  126. }
  127. dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
  128. if (!IS_ERR(dst)) {
  129. if (prev_saddr != saddr)
  130. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  131. if (prev_daddr != daddr)
  132. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  133. }
  134. return dst;
  135. }
  136. static inline unsigned long make_jiffies(long secs)
  137. {
  138. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  139. return MAX_SCHEDULE_TIMEOUT-1;
  140. else
  141. return secs*HZ;
  142. }
  143. static void xfrm_policy_timer(unsigned long data)
  144. {
  145. struct xfrm_policy *xp = (struct xfrm_policy *)data;
  146. unsigned long now = get_seconds();
  147. long next = LONG_MAX;
  148. int warn = 0;
  149. int dir;
  150. read_lock(&xp->lock);
  151. if (unlikely(xp->walk.dead))
  152. goto out;
  153. dir = xfrm_policy_id2dir(xp->index);
  154. if (xp->lft.hard_add_expires_seconds) {
  155. long tmo = xp->lft.hard_add_expires_seconds +
  156. xp->curlft.add_time - now;
  157. if (tmo <= 0)
  158. goto expired;
  159. if (tmo < next)
  160. next = tmo;
  161. }
  162. if (xp->lft.hard_use_expires_seconds) {
  163. long tmo = xp->lft.hard_use_expires_seconds +
  164. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  165. if (tmo <= 0)
  166. goto expired;
  167. if (tmo < next)
  168. next = tmo;
  169. }
  170. if (xp->lft.soft_add_expires_seconds) {
  171. long tmo = xp->lft.soft_add_expires_seconds +
  172. xp->curlft.add_time - now;
  173. if (tmo <= 0) {
  174. warn = 1;
  175. tmo = XFRM_KM_TIMEOUT;
  176. }
  177. if (tmo < next)
  178. next = tmo;
  179. }
  180. if (xp->lft.soft_use_expires_seconds) {
  181. long tmo = xp->lft.soft_use_expires_seconds +
  182. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  183. if (tmo <= 0) {
  184. warn = 1;
  185. tmo = XFRM_KM_TIMEOUT;
  186. }
  187. if (tmo < next)
  188. next = tmo;
  189. }
  190. if (warn)
  191. km_policy_expired(xp, dir, 0, 0);
  192. if (next != LONG_MAX &&
  193. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  194. xfrm_pol_hold(xp);
  195. out:
  196. read_unlock(&xp->lock);
  197. xfrm_pol_put(xp);
  198. return;
  199. expired:
  200. read_unlock(&xp->lock);
  201. if (!xfrm_policy_delete(xp, dir))
  202. km_policy_expired(xp, dir, 1, 0);
  203. xfrm_pol_put(xp);
  204. }
  205. static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
  206. {
  207. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  208. if (unlikely(pol->walk.dead))
  209. flo = NULL;
  210. else
  211. xfrm_pol_hold(pol);
  212. return flo;
  213. }
  214. static int xfrm_policy_flo_check(struct flow_cache_object *flo)
  215. {
  216. struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
  217. return !pol->walk.dead;
  218. }
  219. static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
  220. {
  221. xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
  222. }
  223. static const struct flow_cache_ops xfrm_policy_fc_ops = {
  224. .get = xfrm_policy_flo_get,
  225. .check = xfrm_policy_flo_check,
  226. .delete = xfrm_policy_flo_delete,
  227. };
  228. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  229. * SPD calls.
  230. */
  231. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  232. {
  233. struct xfrm_policy *policy;
  234. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  235. if (policy) {
  236. write_pnet(&policy->xp_net, net);
  237. INIT_LIST_HEAD(&policy->walk.all);
  238. INIT_HLIST_NODE(&policy->bydst);
  239. INIT_HLIST_NODE(&policy->byidx);
  240. rwlock_init(&policy->lock);
  241. atomic_set(&policy->refcnt, 1);
  242. skb_queue_head_init(&policy->polq.hold_queue);
  243. setup_timer(&policy->timer, xfrm_policy_timer,
  244. (unsigned long)policy);
  245. setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
  246. (unsigned long)policy);
  247. policy->flo.ops = &xfrm_policy_fc_ops;
  248. }
  249. return policy;
  250. }
  251. EXPORT_SYMBOL(xfrm_policy_alloc);
  252. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  253. void xfrm_policy_destroy(struct xfrm_policy *policy)
  254. {
  255. BUG_ON(!policy->walk.dead);
  256. if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
  257. BUG();
  258. security_xfrm_policy_free(policy->security);
  259. kfree(policy);
  260. }
  261. EXPORT_SYMBOL(xfrm_policy_destroy);
  262. static void xfrm_queue_purge(struct sk_buff_head *list)
  263. {
  264. struct sk_buff *skb;
  265. while ((skb = skb_dequeue(list)) != NULL)
  266. kfree_skb(skb);
  267. }
  268. /* Rule must be locked. Release descentant resources, announce
  269. * entry dead. The rule must be unlinked from lists to the moment.
  270. */
  271. static void xfrm_policy_kill(struct xfrm_policy *policy)
  272. {
  273. policy->walk.dead = 1;
  274. atomic_inc(&policy->genid);
  275. if (del_timer(&policy->polq.hold_timer))
  276. xfrm_pol_put(policy);
  277. xfrm_queue_purge(&policy->polq.hold_queue);
  278. if (del_timer(&policy->timer))
  279. xfrm_pol_put(policy);
  280. xfrm_pol_put(policy);
  281. }
  282. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  283. static inline unsigned int idx_hash(struct net *net, u32 index)
  284. {
  285. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  286. }
  287. static struct hlist_head *policy_hash_bysel(struct net *net,
  288. const struct xfrm_selector *sel,
  289. unsigned short family, int dir)
  290. {
  291. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  292. unsigned int hash = __sel_hash(sel, family, hmask);
  293. return (hash == hmask + 1 ?
  294. &net->xfrm.policy_inexact[dir] :
  295. net->xfrm.policy_bydst[dir].table + hash);
  296. }
  297. static struct hlist_head *policy_hash_direct(struct net *net,
  298. const xfrm_address_t *daddr,
  299. const xfrm_address_t *saddr,
  300. unsigned short family, int dir)
  301. {
  302. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  303. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  304. return net->xfrm.policy_bydst[dir].table + hash;
  305. }
  306. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  307. struct hlist_head *ndsttable,
  308. unsigned int nhashmask)
  309. {
  310. struct hlist_node *tmp, *entry0 = NULL;
  311. struct xfrm_policy *pol;
  312. unsigned int h0 = 0;
  313. redo:
  314. hlist_for_each_entry_safe(pol, tmp, list, bydst) {
  315. unsigned int h;
  316. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  317. pol->family, nhashmask);
  318. if (!entry0) {
  319. hlist_del(&pol->bydst);
  320. hlist_add_head(&pol->bydst, ndsttable+h);
  321. h0 = h;
  322. } else {
  323. if (h != h0)
  324. continue;
  325. hlist_del(&pol->bydst);
  326. hlist_add_after(entry0, &pol->bydst);
  327. }
  328. entry0 = &pol->bydst;
  329. }
  330. if (!hlist_empty(list)) {
  331. entry0 = NULL;
  332. goto redo;
  333. }
  334. }
  335. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  336. struct hlist_head *nidxtable,
  337. unsigned int nhashmask)
  338. {
  339. struct hlist_node *tmp;
  340. struct xfrm_policy *pol;
  341. hlist_for_each_entry_safe(pol, tmp, list, byidx) {
  342. unsigned int h;
  343. h = __idx_hash(pol->index, nhashmask);
  344. hlist_add_head(&pol->byidx, nidxtable+h);
  345. }
  346. }
  347. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  348. {
  349. return ((old_hmask + 1) << 1) - 1;
  350. }
  351. static void xfrm_bydst_resize(struct net *net, int dir)
  352. {
  353. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  354. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  355. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  356. struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
  357. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  358. int i;
  359. if (!ndst)
  360. return;
  361. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  362. for (i = hmask; i >= 0; i--)
  363. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  364. net->xfrm.policy_bydst[dir].table = ndst;
  365. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  366. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  367. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  368. }
  369. static void xfrm_byidx_resize(struct net *net, int total)
  370. {
  371. unsigned int hmask = net->xfrm.policy_idx_hmask;
  372. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  373. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  374. struct hlist_head *oidx = net->xfrm.policy_byidx;
  375. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  376. int i;
  377. if (!nidx)
  378. return;
  379. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  380. for (i = hmask; i >= 0; i--)
  381. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  382. net->xfrm.policy_byidx = nidx;
  383. net->xfrm.policy_idx_hmask = nhashmask;
  384. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  385. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  386. }
  387. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  388. {
  389. unsigned int cnt = net->xfrm.policy_count[dir];
  390. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  391. if (total)
  392. *total += cnt;
  393. if ((hmask + 1) < xfrm_policy_hashmax &&
  394. cnt > hmask)
  395. return 1;
  396. return 0;
  397. }
  398. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  399. {
  400. unsigned int hmask = net->xfrm.policy_idx_hmask;
  401. if ((hmask + 1) < xfrm_policy_hashmax &&
  402. total > hmask)
  403. return 1;
  404. return 0;
  405. }
  406. void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
  407. {
  408. read_lock_bh(&net->xfrm.xfrm_policy_lock);
  409. si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
  410. si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
  411. si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
  412. si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  413. si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  414. si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  415. si->spdhcnt = net->xfrm.policy_idx_hmask;
  416. si->spdhmcnt = xfrm_policy_hashmax;
  417. read_unlock_bh(&net->xfrm.xfrm_policy_lock);
  418. }
  419. EXPORT_SYMBOL(xfrm_spd_getinfo);
  420. static DEFINE_MUTEX(hash_resize_mutex);
  421. static void xfrm_hash_resize(struct work_struct *work)
  422. {
  423. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  424. int dir, total;
  425. mutex_lock(&hash_resize_mutex);
  426. total = 0;
  427. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  428. if (xfrm_bydst_should_resize(net, dir, &total))
  429. xfrm_bydst_resize(net, dir);
  430. }
  431. if (xfrm_byidx_should_resize(net, total))
  432. xfrm_byidx_resize(net, total);
  433. mutex_unlock(&hash_resize_mutex);
  434. }
  435. /* Generate new index... KAME seems to generate them ordered by cost
  436. * of an absolute inpredictability of ordering of rules. This will not pass. */
  437. static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
  438. {
  439. static u32 idx_generator;
  440. for (;;) {
  441. struct hlist_head *list;
  442. struct xfrm_policy *p;
  443. u32 idx;
  444. int found;
  445. if (!index) {
  446. idx = (idx_generator | dir);
  447. idx_generator += 8;
  448. } else {
  449. idx = index;
  450. index = 0;
  451. }
  452. if (idx == 0)
  453. idx = 8;
  454. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  455. found = 0;
  456. hlist_for_each_entry(p, list, byidx) {
  457. if (p->index == idx) {
  458. found = 1;
  459. break;
  460. }
  461. }
  462. if (!found)
  463. return idx;
  464. }
  465. }
  466. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  467. {
  468. u32 *p1 = (u32 *) s1;
  469. u32 *p2 = (u32 *) s2;
  470. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  471. int i;
  472. for (i = 0; i < len; i++) {
  473. if (p1[i] != p2[i])
  474. return 1;
  475. }
  476. return 0;
  477. }
  478. static void xfrm_policy_requeue(struct xfrm_policy *old,
  479. struct xfrm_policy *new)
  480. {
  481. struct xfrm_policy_queue *pq = &old->polq;
  482. struct sk_buff_head list;
  483. __skb_queue_head_init(&list);
  484. spin_lock_bh(&pq->hold_queue.lock);
  485. skb_queue_splice_init(&pq->hold_queue, &list);
  486. if (del_timer(&pq->hold_timer))
  487. xfrm_pol_put(old);
  488. spin_unlock_bh(&pq->hold_queue.lock);
  489. if (skb_queue_empty(&list))
  490. return;
  491. pq = &new->polq;
  492. spin_lock_bh(&pq->hold_queue.lock);
  493. skb_queue_splice(&list, &pq->hold_queue);
  494. pq->timeout = XFRM_QUEUE_TMO_MIN;
  495. if (!mod_timer(&pq->hold_timer, jiffies))
  496. xfrm_pol_hold(new);
  497. spin_unlock_bh(&pq->hold_queue.lock);
  498. }
  499. static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
  500. struct xfrm_policy *pol)
  501. {
  502. u32 mark = policy->mark.v & policy->mark.m;
  503. if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
  504. return true;
  505. if ((mark & pol->mark.m) == pol->mark.v &&
  506. policy->priority == pol->priority)
  507. return true;
  508. return false;
  509. }
  510. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  511. {
  512. struct net *net = xp_net(policy);
  513. struct xfrm_policy *pol;
  514. struct xfrm_policy *delpol;
  515. struct hlist_head *chain;
  516. struct hlist_node *newpos;
  517. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  518. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  519. delpol = NULL;
  520. newpos = NULL;
  521. hlist_for_each_entry(pol, chain, bydst) {
  522. if (pol->type == policy->type &&
  523. !selector_cmp(&pol->selector, &policy->selector) &&
  524. xfrm_policy_mark_match(policy, pol) &&
  525. xfrm_sec_ctx_match(pol->security, policy->security) &&
  526. !WARN_ON(delpol)) {
  527. if (excl) {
  528. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  529. return -EEXIST;
  530. }
  531. delpol = pol;
  532. if (policy->priority > pol->priority)
  533. continue;
  534. } else if (policy->priority >= pol->priority) {
  535. newpos = &pol->bydst;
  536. continue;
  537. }
  538. if (delpol)
  539. break;
  540. }
  541. if (newpos)
  542. hlist_add_after(newpos, &policy->bydst);
  543. else
  544. hlist_add_head(&policy->bydst, chain);
  545. xfrm_pol_hold(policy);
  546. net->xfrm.policy_count[dir]++;
  547. atomic_inc(&net->xfrm.flow_cache_genid);
  548. /* After previous checking, family can either be AF_INET or AF_INET6 */
  549. if (policy->family == AF_INET)
  550. rt_genid_bump_ipv4(net);
  551. else
  552. rt_genid_bump_ipv6(net);
  553. if (delpol) {
  554. xfrm_policy_requeue(delpol, policy);
  555. __xfrm_policy_unlink(delpol, dir);
  556. }
  557. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
  558. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  559. policy->curlft.add_time = get_seconds();
  560. policy->curlft.use_time = 0;
  561. if (!mod_timer(&policy->timer, jiffies + HZ))
  562. xfrm_pol_hold(policy);
  563. list_add(&policy->walk.all, &net->xfrm.policy_all);
  564. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  565. if (delpol)
  566. xfrm_policy_kill(delpol);
  567. else if (xfrm_bydst_should_resize(net, dir, NULL))
  568. schedule_work(&net->xfrm.policy_hash_work);
  569. return 0;
  570. }
  571. EXPORT_SYMBOL(xfrm_policy_insert);
  572. struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
  573. int dir, struct xfrm_selector *sel,
  574. struct xfrm_sec_ctx *ctx, int delete,
  575. int *err)
  576. {
  577. struct xfrm_policy *pol, *ret;
  578. struct hlist_head *chain;
  579. *err = 0;
  580. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  581. chain = policy_hash_bysel(net, sel, sel->family, dir);
  582. ret = NULL;
  583. hlist_for_each_entry(pol, chain, bydst) {
  584. if (pol->type == type &&
  585. (mark & pol->mark.m) == pol->mark.v &&
  586. !selector_cmp(sel, &pol->selector) &&
  587. xfrm_sec_ctx_match(ctx, pol->security)) {
  588. xfrm_pol_hold(pol);
  589. if (delete) {
  590. *err = security_xfrm_policy_delete(
  591. pol->security);
  592. if (*err) {
  593. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  594. return pol;
  595. }
  596. __xfrm_policy_unlink(pol, dir);
  597. }
  598. ret = pol;
  599. break;
  600. }
  601. }
  602. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  603. if (ret && delete)
  604. xfrm_policy_kill(ret);
  605. return ret;
  606. }
  607. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  608. struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
  609. int dir, u32 id, int delete, int *err)
  610. {
  611. struct xfrm_policy *pol, *ret;
  612. struct hlist_head *chain;
  613. *err = -ENOENT;
  614. if (xfrm_policy_id2dir(id) != dir)
  615. return NULL;
  616. *err = 0;
  617. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  618. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  619. ret = NULL;
  620. hlist_for_each_entry(pol, chain, byidx) {
  621. if (pol->type == type && pol->index == id &&
  622. (mark & pol->mark.m) == pol->mark.v) {
  623. xfrm_pol_hold(pol);
  624. if (delete) {
  625. *err = security_xfrm_policy_delete(
  626. pol->security);
  627. if (*err) {
  628. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  629. return pol;
  630. }
  631. __xfrm_policy_unlink(pol, dir);
  632. }
  633. ret = pol;
  634. break;
  635. }
  636. }
  637. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  638. if (ret && delete)
  639. xfrm_policy_kill(ret);
  640. return ret;
  641. }
  642. EXPORT_SYMBOL(xfrm_policy_byid);
  643. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  644. static inline int
  645. xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
  646. {
  647. int dir, err = 0;
  648. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  649. struct xfrm_policy *pol;
  650. int i;
  651. hlist_for_each_entry(pol,
  652. &net->xfrm.policy_inexact[dir], bydst) {
  653. if (pol->type != type)
  654. continue;
  655. err = security_xfrm_policy_delete(pol->security);
  656. if (err) {
  657. xfrm_audit_policy_delete(pol, 0, task_valid);
  658. return err;
  659. }
  660. }
  661. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  662. hlist_for_each_entry(pol,
  663. net->xfrm.policy_bydst[dir].table + i,
  664. bydst) {
  665. if (pol->type != type)
  666. continue;
  667. err = security_xfrm_policy_delete(
  668. pol->security);
  669. if (err) {
  670. xfrm_audit_policy_delete(pol, 0,
  671. task_valid);
  672. return err;
  673. }
  674. }
  675. }
  676. }
  677. return err;
  678. }
  679. #else
  680. static inline int
  681. xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
  682. {
  683. return 0;
  684. }
  685. #endif
  686. int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
  687. {
  688. int dir, err = 0, cnt = 0;
  689. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  690. err = xfrm_policy_flush_secctx_check(net, type, task_valid);
  691. if (err)
  692. goto out;
  693. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  694. struct xfrm_policy *pol;
  695. int i;
  696. again1:
  697. hlist_for_each_entry(pol,
  698. &net->xfrm.policy_inexact[dir], bydst) {
  699. if (pol->type != type)
  700. continue;
  701. __xfrm_policy_unlink(pol, dir);
  702. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  703. cnt++;
  704. xfrm_audit_policy_delete(pol, 1, task_valid);
  705. xfrm_policy_kill(pol);
  706. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  707. goto again1;
  708. }
  709. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  710. again2:
  711. hlist_for_each_entry(pol,
  712. net->xfrm.policy_bydst[dir].table + i,
  713. bydst) {
  714. if (pol->type != type)
  715. continue;
  716. __xfrm_policy_unlink(pol, dir);
  717. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  718. cnt++;
  719. xfrm_audit_policy_delete(pol, 1, task_valid);
  720. xfrm_policy_kill(pol);
  721. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  722. goto again2;
  723. }
  724. }
  725. }
  726. if (!cnt)
  727. err = -ESRCH;
  728. out:
  729. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  730. return err;
  731. }
  732. EXPORT_SYMBOL(xfrm_policy_flush);
  733. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  734. int (*func)(struct xfrm_policy *, int, int, void*),
  735. void *data)
  736. {
  737. struct xfrm_policy *pol;
  738. struct xfrm_policy_walk_entry *x;
  739. int error = 0;
  740. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  741. walk->type != XFRM_POLICY_TYPE_ANY)
  742. return -EINVAL;
  743. if (list_empty(&walk->walk.all) && walk->seq != 0)
  744. return 0;
  745. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  746. if (list_empty(&walk->walk.all))
  747. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  748. else
  749. x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
  750. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  751. if (x->dead)
  752. continue;
  753. pol = container_of(x, struct xfrm_policy, walk);
  754. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  755. walk->type != pol->type)
  756. continue;
  757. error = func(pol, xfrm_policy_id2dir(pol->index),
  758. walk->seq, data);
  759. if (error) {
  760. list_move_tail(&walk->walk.all, &x->all);
  761. goto out;
  762. }
  763. walk->seq++;
  764. }
  765. if (walk->seq == 0) {
  766. error = -ENOENT;
  767. goto out;
  768. }
  769. list_del_init(&walk->walk.all);
  770. out:
  771. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  772. return error;
  773. }
  774. EXPORT_SYMBOL(xfrm_policy_walk);
  775. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  776. {
  777. INIT_LIST_HEAD(&walk->walk.all);
  778. walk->walk.dead = 1;
  779. walk->type = type;
  780. walk->seq = 0;
  781. }
  782. EXPORT_SYMBOL(xfrm_policy_walk_init);
  783. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
  784. {
  785. if (list_empty(&walk->walk.all))
  786. return;
  787. write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
  788. list_del(&walk->walk.all);
  789. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  790. }
  791. EXPORT_SYMBOL(xfrm_policy_walk_done);
  792. /*
  793. * Find policy to apply to this flow.
  794. *
  795. * Returns 0 if policy found, else an -errno.
  796. */
  797. static int xfrm_policy_match(const struct xfrm_policy *pol,
  798. const struct flowi *fl,
  799. u8 type, u16 family, int dir)
  800. {
  801. const struct xfrm_selector *sel = &pol->selector;
  802. int ret = -ESRCH;
  803. bool match;
  804. if (pol->family != family ||
  805. (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
  806. pol->type != type)
  807. return ret;
  808. match = xfrm_selector_match(sel, fl, family);
  809. if (match)
  810. ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
  811. dir);
  812. return ret;
  813. }
  814. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  815. const struct flowi *fl,
  816. u16 family, u8 dir)
  817. {
  818. int err;
  819. struct xfrm_policy *pol, *ret;
  820. const xfrm_address_t *daddr, *saddr;
  821. struct hlist_head *chain;
  822. u32 priority = ~0U;
  823. daddr = xfrm_flowi_daddr(fl, family);
  824. saddr = xfrm_flowi_saddr(fl, family);
  825. if (unlikely(!daddr || !saddr))
  826. return NULL;
  827. read_lock_bh(&net->xfrm.xfrm_policy_lock);
  828. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  829. ret = NULL;
  830. hlist_for_each_entry(pol, chain, bydst) {
  831. err = xfrm_policy_match(pol, fl, type, family, dir);
  832. if (err) {
  833. if (err == -ESRCH)
  834. continue;
  835. else {
  836. ret = ERR_PTR(err);
  837. goto fail;
  838. }
  839. } else {
  840. ret = pol;
  841. priority = ret->priority;
  842. break;
  843. }
  844. }
  845. chain = &net->xfrm.policy_inexact[dir];
  846. hlist_for_each_entry(pol, chain, bydst) {
  847. err = xfrm_policy_match(pol, fl, type, family, dir);
  848. if (err) {
  849. if (err == -ESRCH)
  850. continue;
  851. else {
  852. ret = ERR_PTR(err);
  853. goto fail;
  854. }
  855. } else if (pol->priority < priority) {
  856. ret = pol;
  857. break;
  858. }
  859. }
  860. if (ret)
  861. xfrm_pol_hold(ret);
  862. fail:
  863. read_unlock_bh(&net->xfrm.xfrm_policy_lock);
  864. return ret;
  865. }
  866. static struct xfrm_policy *
  867. __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
  868. {
  869. #ifdef CONFIG_XFRM_SUB_POLICY
  870. struct xfrm_policy *pol;
  871. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
  872. if (pol != NULL)
  873. return pol;
  874. #endif
  875. return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  876. }
  877. static int flow_to_policy_dir(int dir)
  878. {
  879. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  880. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  881. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  882. return dir;
  883. switch (dir) {
  884. default:
  885. case FLOW_DIR_IN:
  886. return XFRM_POLICY_IN;
  887. case FLOW_DIR_OUT:
  888. return XFRM_POLICY_OUT;
  889. case FLOW_DIR_FWD:
  890. return XFRM_POLICY_FWD;
  891. }
  892. }
  893. static struct flow_cache_object *
  894. xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
  895. u8 dir, struct flow_cache_object *old_obj, void *ctx)
  896. {
  897. struct xfrm_policy *pol;
  898. if (old_obj)
  899. xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
  900. pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
  901. if (IS_ERR_OR_NULL(pol))
  902. return ERR_CAST(pol);
  903. /* Resolver returns two references:
  904. * one for cache and one for caller of flow_cache_lookup() */
  905. xfrm_pol_hold(pol);
  906. return &pol->flo;
  907. }
  908. static inline int policy_to_flow_dir(int dir)
  909. {
  910. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  911. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  912. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  913. return dir;
  914. switch (dir) {
  915. default:
  916. case XFRM_POLICY_IN:
  917. return FLOW_DIR_IN;
  918. case XFRM_POLICY_OUT:
  919. return FLOW_DIR_OUT;
  920. case XFRM_POLICY_FWD:
  921. return FLOW_DIR_FWD;
  922. }
  923. }
  924. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
  925. const struct flowi *fl)
  926. {
  927. struct xfrm_policy *pol;
  928. struct net *net = sock_net(sk);
  929. read_lock_bh(&net->xfrm.xfrm_policy_lock);
  930. if ((pol = sk->sk_policy[dir]) != NULL) {
  931. bool match = xfrm_selector_match(&pol->selector, fl,
  932. sk->sk_family);
  933. int err = 0;
  934. if (match) {
  935. if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
  936. pol = NULL;
  937. goto out;
  938. }
  939. err = security_xfrm_policy_lookup(pol->security,
  940. fl->flowi_secid,
  941. policy_to_flow_dir(dir));
  942. if (!err)
  943. xfrm_pol_hold(pol);
  944. else if (err == -ESRCH)
  945. pol = NULL;
  946. else
  947. pol = ERR_PTR(err);
  948. } else
  949. pol = NULL;
  950. }
  951. out:
  952. read_unlock_bh(&net->xfrm.xfrm_policy_lock);
  953. return pol;
  954. }
  955. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  956. {
  957. struct net *net = xp_net(pol);
  958. struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
  959. pol->family, dir);
  960. list_add(&pol->walk.all, &net->xfrm.policy_all);
  961. hlist_add_head(&pol->bydst, chain);
  962. hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
  963. net->xfrm.policy_count[dir]++;
  964. xfrm_pol_hold(pol);
  965. if (xfrm_bydst_should_resize(net, dir, NULL))
  966. schedule_work(&net->xfrm.policy_hash_work);
  967. }
  968. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  969. int dir)
  970. {
  971. struct net *net = xp_net(pol);
  972. if (hlist_unhashed(&pol->bydst))
  973. return NULL;
  974. hlist_del_init(&pol->bydst);
  975. hlist_del(&pol->byidx);
  976. list_del(&pol->walk.all);
  977. net->xfrm.policy_count[dir]--;
  978. return pol;
  979. }
  980. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  981. {
  982. struct net *net = xp_net(pol);
  983. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  984. pol = __xfrm_policy_unlink(pol, dir);
  985. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  986. if (pol) {
  987. xfrm_policy_kill(pol);
  988. return 0;
  989. }
  990. return -ENOENT;
  991. }
  992. EXPORT_SYMBOL(xfrm_policy_delete);
  993. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  994. {
  995. struct net *net = xp_net(pol);
  996. struct xfrm_policy *old_pol;
  997. #ifdef CONFIG_XFRM_SUB_POLICY
  998. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  999. return -EINVAL;
  1000. #endif
  1001. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  1002. old_pol = sk->sk_policy[dir];
  1003. sk->sk_policy[dir] = pol;
  1004. if (pol) {
  1005. pol->curlft.add_time = get_seconds();
  1006. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
  1007. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  1008. }
  1009. if (old_pol) {
  1010. if (pol)
  1011. xfrm_policy_requeue(old_pol, pol);
  1012. /* Unlinking succeeds always. This is the only function
  1013. * allowed to delete or replace socket policy.
  1014. */
  1015. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  1016. }
  1017. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1018. if (old_pol) {
  1019. xfrm_policy_kill(old_pol);
  1020. }
  1021. return 0;
  1022. }
  1023. static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
  1024. {
  1025. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  1026. struct net *net = xp_net(old);
  1027. if (newp) {
  1028. newp->selector = old->selector;
  1029. if (security_xfrm_policy_clone(old->security,
  1030. &newp->security)) {
  1031. kfree(newp);
  1032. return NULL; /* ENOMEM */
  1033. }
  1034. newp->lft = old->lft;
  1035. newp->curlft = old->curlft;
  1036. newp->mark = old->mark;
  1037. newp->action = old->action;
  1038. newp->flags = old->flags;
  1039. newp->xfrm_nr = old->xfrm_nr;
  1040. newp->index = old->index;
  1041. newp->type = old->type;
  1042. memcpy(newp->xfrm_vec, old->xfrm_vec,
  1043. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  1044. write_lock_bh(&net->xfrm.xfrm_policy_lock);
  1045. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  1046. write_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1047. xfrm_pol_put(newp);
  1048. }
  1049. return newp;
  1050. }
  1051. int __xfrm_sk_clone_policy(struct sock *sk)
  1052. {
  1053. struct xfrm_policy *p0 = sk->sk_policy[0],
  1054. *p1 = sk->sk_policy[1];
  1055. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  1056. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  1057. return -ENOMEM;
  1058. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  1059. return -ENOMEM;
  1060. return 0;
  1061. }
  1062. static int
  1063. xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
  1064. unsigned short family)
  1065. {
  1066. int err;
  1067. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1068. if (unlikely(afinfo == NULL))
  1069. return -EINVAL;
  1070. err = afinfo->get_saddr(net, local, remote);
  1071. xfrm_policy_put_afinfo(afinfo);
  1072. return err;
  1073. }
  1074. /* Resolve list of templates for the flow, given policy. */
  1075. static int
  1076. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
  1077. struct xfrm_state **xfrm, unsigned short family)
  1078. {
  1079. struct net *net = xp_net(policy);
  1080. int nx;
  1081. int i, error;
  1082. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1083. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1084. xfrm_address_t tmp;
  1085. for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
  1086. struct xfrm_state *x;
  1087. xfrm_address_t *remote = daddr;
  1088. xfrm_address_t *local = saddr;
  1089. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1090. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1091. tmpl->mode == XFRM_MODE_BEET) {
  1092. remote = &tmpl->id.daddr;
  1093. local = &tmpl->saddr;
  1094. if (xfrm_addr_any(local, tmpl->encap_family)) {
  1095. error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
  1096. if (error)
  1097. goto fail;
  1098. local = &tmp;
  1099. }
  1100. }
  1101. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1102. if (x && x->km.state == XFRM_STATE_VALID) {
  1103. xfrm[nx++] = x;
  1104. daddr = remote;
  1105. saddr = local;
  1106. continue;
  1107. }
  1108. if (x) {
  1109. error = (x->km.state == XFRM_STATE_ERROR ?
  1110. -EINVAL : -EAGAIN);
  1111. xfrm_state_put(x);
  1112. } else if (error == -ESRCH) {
  1113. error = -EAGAIN;
  1114. }
  1115. if (!tmpl->optional)
  1116. goto fail;
  1117. }
  1118. return nx;
  1119. fail:
  1120. for (nx--; nx >= 0; nx--)
  1121. xfrm_state_put(xfrm[nx]);
  1122. return error;
  1123. }
  1124. static int
  1125. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
  1126. struct xfrm_state **xfrm, unsigned short family)
  1127. {
  1128. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1129. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1130. int cnx = 0;
  1131. int error;
  1132. int ret;
  1133. int i;
  1134. for (i = 0; i < npols; i++) {
  1135. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1136. error = -ENOBUFS;
  1137. goto fail;
  1138. }
  1139. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1140. if (ret < 0) {
  1141. error = ret;
  1142. goto fail;
  1143. } else
  1144. cnx += ret;
  1145. }
  1146. /* found states are sorted for outbound processing */
  1147. if (npols > 1)
  1148. xfrm_state_sort(xfrm, tpp, cnx, family);
  1149. return cnx;
  1150. fail:
  1151. for (cnx--; cnx >= 0; cnx--)
  1152. xfrm_state_put(tpp[cnx]);
  1153. return error;
  1154. }
  1155. /* Check that the bundle accepts the flow and its components are
  1156. * still valid.
  1157. */
  1158. static inline int xfrm_get_tos(const struct flowi *fl, int family)
  1159. {
  1160. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1161. int tos;
  1162. if (!afinfo)
  1163. return -EINVAL;
  1164. tos = afinfo->get_tos(fl);
  1165. xfrm_policy_put_afinfo(afinfo);
  1166. return tos;
  1167. }
  1168. static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
  1169. {
  1170. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1171. struct dst_entry *dst = &xdst->u.dst;
  1172. if (xdst->route == NULL) {
  1173. /* Dummy bundle - if it has xfrms we were not
  1174. * able to build bundle as template resolution failed.
  1175. * It means we need to try again resolving. */
  1176. if (xdst->num_xfrms > 0)
  1177. return NULL;
  1178. } else if (dst->flags & DST_XFRM_QUEUE) {
  1179. return NULL;
  1180. } else {
  1181. /* Real bundle */
  1182. if (stale_bundle(dst))
  1183. return NULL;
  1184. }
  1185. dst_hold(dst);
  1186. return flo;
  1187. }
  1188. static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
  1189. {
  1190. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1191. struct dst_entry *dst = &xdst->u.dst;
  1192. if (!xdst->route)
  1193. return 0;
  1194. if (stale_bundle(dst))
  1195. return 0;
  1196. return 1;
  1197. }
  1198. static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
  1199. {
  1200. struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
  1201. struct dst_entry *dst = &xdst->u.dst;
  1202. dst_free(dst);
  1203. }
  1204. static const struct flow_cache_ops xfrm_bundle_fc_ops = {
  1205. .get = xfrm_bundle_flo_get,
  1206. .check = xfrm_bundle_flo_check,
  1207. .delete = xfrm_bundle_flo_delete,
  1208. };
  1209. static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
  1210. {
  1211. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1212. struct dst_ops *dst_ops;
  1213. struct xfrm_dst *xdst;
  1214. if (!afinfo)
  1215. return ERR_PTR(-EINVAL);
  1216. switch (family) {
  1217. case AF_INET:
  1218. dst_ops = &net->xfrm.xfrm4_dst_ops;
  1219. break;
  1220. #if IS_ENABLED(CONFIG_IPV6)
  1221. case AF_INET6:
  1222. dst_ops = &net->xfrm.xfrm6_dst_ops;
  1223. break;
  1224. #endif
  1225. default:
  1226. BUG();
  1227. }
  1228. xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
  1229. if (likely(xdst)) {
  1230. struct dst_entry *dst = &xdst->u.dst;
  1231. memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
  1232. xdst->flo.ops = &xfrm_bundle_fc_ops;
  1233. if (afinfo->init_dst)
  1234. afinfo->init_dst(net, xdst);
  1235. } else
  1236. xdst = ERR_PTR(-ENOBUFS);
  1237. xfrm_policy_put_afinfo(afinfo);
  1238. return xdst;
  1239. }
  1240. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1241. int nfheader_len)
  1242. {
  1243. struct xfrm_policy_afinfo *afinfo =
  1244. xfrm_policy_get_afinfo(dst->ops->family);
  1245. int err;
  1246. if (!afinfo)
  1247. return -EINVAL;
  1248. err = afinfo->init_path(path, dst, nfheader_len);
  1249. xfrm_policy_put_afinfo(afinfo);
  1250. return err;
  1251. }
  1252. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  1253. const struct flowi *fl)
  1254. {
  1255. struct xfrm_policy_afinfo *afinfo =
  1256. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1257. int err;
  1258. if (!afinfo)
  1259. return -EINVAL;
  1260. err = afinfo->fill_dst(xdst, dev, fl);
  1261. xfrm_policy_put_afinfo(afinfo);
  1262. return err;
  1263. }
  1264. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1265. * all the metrics... Shortly, bundle a bundle.
  1266. */
  1267. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1268. struct xfrm_state **xfrm, int nx,
  1269. const struct flowi *fl,
  1270. struct dst_entry *dst)
  1271. {
  1272. struct net *net = xp_net(policy);
  1273. unsigned long now = jiffies;
  1274. struct net_device *dev;
  1275. struct xfrm_mode *inner_mode;
  1276. struct dst_entry *dst_prev = NULL;
  1277. struct dst_entry *dst0 = NULL;
  1278. int i = 0;
  1279. int err;
  1280. int header_len = 0;
  1281. int nfheader_len = 0;
  1282. int trailer_len = 0;
  1283. int tos;
  1284. int family = policy->selector.family;
  1285. xfrm_address_t saddr, daddr;
  1286. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  1287. tos = xfrm_get_tos(fl, family);
  1288. err = tos;
  1289. if (tos < 0)
  1290. goto put_states;
  1291. dst_hold(dst);
  1292. for (; i < nx; i++) {
  1293. struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
  1294. struct dst_entry *dst1 = &xdst->u.dst;
  1295. err = PTR_ERR(xdst);
  1296. if (IS_ERR(xdst)) {
  1297. dst_release(dst);
  1298. goto put_states;
  1299. }
  1300. if (xfrm[i]->sel.family == AF_UNSPEC) {
  1301. inner_mode = xfrm_ip2inner_mode(xfrm[i],
  1302. xfrm_af2proto(family));
  1303. if (!inner_mode) {
  1304. err = -EAFNOSUPPORT;
  1305. dst_release(dst);
  1306. goto put_states;
  1307. }
  1308. } else
  1309. inner_mode = xfrm[i]->inner_mode;
  1310. if (!dst_prev)
  1311. dst0 = dst1;
  1312. else {
  1313. dst_prev->child = dst_clone(dst1);
  1314. dst1->flags |= DST_NOHASH;
  1315. }
  1316. xdst->route = dst;
  1317. dst_copy_metrics(dst1, dst);
  1318. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1319. family = xfrm[i]->props.family;
  1320. dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
  1321. family);
  1322. err = PTR_ERR(dst);
  1323. if (IS_ERR(dst))
  1324. goto put_states;
  1325. } else
  1326. dst_hold(dst);
  1327. dst1->xfrm = xfrm[i];
  1328. xdst->xfrm_genid = xfrm[i]->genid;
  1329. dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
  1330. dst1->flags |= DST_HOST;
  1331. dst1->lastuse = now;
  1332. dst1->input = dst_discard;
  1333. dst1->output = inner_mode->afinfo->output;
  1334. dst1->next = dst_prev;
  1335. dst_prev = dst1;
  1336. header_len += xfrm[i]->props.header_len;
  1337. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1338. nfheader_len += xfrm[i]->props.header_len;
  1339. trailer_len += xfrm[i]->props.trailer_len;
  1340. }
  1341. dst_prev->child = dst;
  1342. dst0->path = dst;
  1343. err = -ENODEV;
  1344. dev = dst->dev;
  1345. if (!dev)
  1346. goto free_dst;
  1347. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1348. xfrm_init_pmtu(dst_prev);
  1349. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1350. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1351. err = xfrm_fill_dst(xdst, dev, fl);
  1352. if (err)
  1353. goto free_dst;
  1354. dst_prev->header_len = header_len;
  1355. dst_prev->trailer_len = trailer_len;
  1356. header_len -= xdst->u.dst.xfrm->props.header_len;
  1357. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1358. }
  1359. out:
  1360. return dst0;
  1361. put_states:
  1362. for (; i < nx; i++)
  1363. xfrm_state_put(xfrm[i]);
  1364. free_dst:
  1365. if (dst0)
  1366. dst_free(dst0);
  1367. dst0 = ERR_PTR(err);
  1368. goto out;
  1369. }
  1370. #ifdef CONFIG_XFRM_SUB_POLICY
  1371. static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
  1372. {
  1373. if (!*target) {
  1374. *target = kmalloc(size, GFP_ATOMIC);
  1375. if (!*target)
  1376. return -ENOMEM;
  1377. }
  1378. memcpy(*target, src, size);
  1379. return 0;
  1380. }
  1381. #endif
  1382. static int xfrm_dst_update_parent(struct dst_entry *dst,
  1383. const struct xfrm_selector *sel)
  1384. {
  1385. #ifdef CONFIG_XFRM_SUB_POLICY
  1386. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1387. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1388. sel, sizeof(*sel));
  1389. #else
  1390. return 0;
  1391. #endif
  1392. }
  1393. static int xfrm_dst_update_origin(struct dst_entry *dst,
  1394. const struct flowi *fl)
  1395. {
  1396. #ifdef CONFIG_XFRM_SUB_POLICY
  1397. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1398. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1399. #else
  1400. return 0;
  1401. #endif
  1402. }
  1403. static int xfrm_expand_policies(const struct flowi *fl, u16 family,
  1404. struct xfrm_policy **pols,
  1405. int *num_pols, int *num_xfrms)
  1406. {
  1407. int i;
  1408. if (*num_pols == 0 || !pols[0]) {
  1409. *num_pols = 0;
  1410. *num_xfrms = 0;
  1411. return 0;
  1412. }
  1413. if (IS_ERR(pols[0]))
  1414. return PTR_ERR(pols[0]);
  1415. *num_xfrms = pols[0]->xfrm_nr;
  1416. #ifdef CONFIG_XFRM_SUB_POLICY
  1417. if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
  1418. pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1419. pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
  1420. XFRM_POLICY_TYPE_MAIN,
  1421. fl, family,
  1422. XFRM_POLICY_OUT);
  1423. if (pols[1]) {
  1424. if (IS_ERR(pols[1])) {
  1425. xfrm_pols_put(pols, *num_pols);
  1426. return PTR_ERR(pols[1]);
  1427. }
  1428. (*num_pols)++;
  1429. (*num_xfrms) += pols[1]->xfrm_nr;
  1430. }
  1431. }
  1432. #endif
  1433. for (i = 0; i < *num_pols; i++) {
  1434. if (pols[i]->action != XFRM_POLICY_ALLOW) {
  1435. *num_xfrms = -1;
  1436. break;
  1437. }
  1438. }
  1439. return 0;
  1440. }
  1441. static struct xfrm_dst *
  1442. xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
  1443. const struct flowi *fl, u16 family,
  1444. struct dst_entry *dst_orig)
  1445. {
  1446. struct net *net = xp_net(pols[0]);
  1447. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1448. struct dst_entry *dst;
  1449. struct xfrm_dst *xdst;
  1450. int err;
  1451. /* Try to instantiate a bundle */
  1452. err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
  1453. if (err <= 0) {
  1454. if (err != 0 && err != -EAGAIN)
  1455. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1456. return ERR_PTR(err);
  1457. }
  1458. dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
  1459. if (IS_ERR(dst)) {
  1460. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1461. return ERR_CAST(dst);
  1462. }
  1463. xdst = (struct xfrm_dst *)dst;
  1464. xdst->num_xfrms = err;
  1465. if (num_pols > 1)
  1466. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1467. else
  1468. err = xfrm_dst_update_origin(dst, fl);
  1469. if (unlikely(err)) {
  1470. dst_free(dst);
  1471. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1472. return ERR_PTR(err);
  1473. }
  1474. xdst->num_pols = num_pols;
  1475. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
  1476. xdst->policy_genid = atomic_read(&pols[0]->genid);
  1477. return xdst;
  1478. }
  1479. static void xfrm_policy_queue_process(unsigned long arg)
  1480. {
  1481. int err = 0;
  1482. struct sk_buff *skb;
  1483. struct sock *sk;
  1484. struct dst_entry *dst;
  1485. struct xfrm_policy *pol = (struct xfrm_policy *)arg;
  1486. struct xfrm_policy_queue *pq = &pol->polq;
  1487. struct flowi fl;
  1488. struct sk_buff_head list;
  1489. spin_lock(&pq->hold_queue.lock);
  1490. skb = skb_peek(&pq->hold_queue);
  1491. if (!skb) {
  1492. spin_unlock(&pq->hold_queue.lock);
  1493. goto out;
  1494. }
  1495. dst = skb_dst(skb);
  1496. sk = skb->sk;
  1497. xfrm_decode_session(skb, &fl, dst->ops->family);
  1498. spin_unlock(&pq->hold_queue.lock);
  1499. dst_hold(dst->path);
  1500. dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
  1501. sk, 0);
  1502. if (IS_ERR(dst))
  1503. goto purge_queue;
  1504. if (dst->flags & DST_XFRM_QUEUE) {
  1505. dst_release(dst);
  1506. if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
  1507. goto purge_queue;
  1508. pq->timeout = pq->timeout << 1;
  1509. if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
  1510. xfrm_pol_hold(pol);
  1511. goto out;
  1512. }
  1513. dst_release(dst);
  1514. __skb_queue_head_init(&list);
  1515. spin_lock(&pq->hold_queue.lock);
  1516. pq->timeout = 0;
  1517. skb_queue_splice_init(&pq->hold_queue, &list);
  1518. spin_unlock(&pq->hold_queue.lock);
  1519. while (!skb_queue_empty(&list)) {
  1520. skb = __skb_dequeue(&list);
  1521. xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
  1522. dst_hold(skb_dst(skb)->path);
  1523. dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
  1524. &fl, skb->sk, 0);
  1525. if (IS_ERR(dst)) {
  1526. kfree_skb(skb);
  1527. continue;
  1528. }
  1529. nf_reset(skb);
  1530. skb_dst_drop(skb);
  1531. skb_dst_set(skb, dst);
  1532. err = dst_output(skb);
  1533. }
  1534. out:
  1535. xfrm_pol_put(pol);
  1536. return;
  1537. purge_queue:
  1538. pq->timeout = 0;
  1539. xfrm_queue_purge(&pq->hold_queue);
  1540. xfrm_pol_put(pol);
  1541. }
  1542. static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
  1543. {
  1544. unsigned long sched_next;
  1545. struct dst_entry *dst = skb_dst(skb);
  1546. struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
  1547. struct xfrm_policy *pol = xdst->pols[0];
  1548. struct xfrm_policy_queue *pq = &pol->polq;
  1549. const struct sk_buff *fclone = skb + 1;
  1550. if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
  1551. fclone->fclone == SKB_FCLONE_CLONE)) {
  1552. kfree_skb(skb);
  1553. return 0;
  1554. }
  1555. if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
  1556. kfree_skb(skb);
  1557. return -EAGAIN;
  1558. }
  1559. skb_dst_force(skb);
  1560. spin_lock_bh(&pq->hold_queue.lock);
  1561. if (!pq->timeout)
  1562. pq->timeout = XFRM_QUEUE_TMO_MIN;
  1563. sched_next = jiffies + pq->timeout;
  1564. if (del_timer(&pq->hold_timer)) {
  1565. if (time_before(pq->hold_timer.expires, sched_next))
  1566. sched_next = pq->hold_timer.expires;
  1567. xfrm_pol_put(pol);
  1568. }
  1569. __skb_queue_tail(&pq->hold_queue, skb);
  1570. if (!mod_timer(&pq->hold_timer, sched_next))
  1571. xfrm_pol_hold(pol);
  1572. spin_unlock_bh(&pq->hold_queue.lock);
  1573. return 0;
  1574. }
  1575. static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
  1576. struct dst_entry *dst,
  1577. const struct flowi *fl,
  1578. int num_xfrms,
  1579. u16 family)
  1580. {
  1581. int err;
  1582. struct net_device *dev;
  1583. struct dst_entry *dst1;
  1584. struct xfrm_dst *xdst;
  1585. xdst = xfrm_alloc_dst(net, family);
  1586. if (IS_ERR(xdst))
  1587. return xdst;
  1588. if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
  1589. return xdst;
  1590. dst1 = &xdst->u.dst;
  1591. dst_hold(dst);
  1592. xdst->route = dst;
  1593. dst_copy_metrics(dst1, dst);
  1594. dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
  1595. dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
  1596. dst1->lastuse = jiffies;
  1597. dst1->input = dst_discard;
  1598. dst1->output = xdst_queue_output;
  1599. dst_hold(dst);
  1600. dst1->child = dst;
  1601. dst1->path = dst;
  1602. xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
  1603. err = -ENODEV;
  1604. dev = dst->dev;
  1605. if (!dev)
  1606. goto free_dst;
  1607. err = xfrm_fill_dst(xdst, dev, fl);
  1608. if (err)
  1609. goto free_dst;
  1610. out:
  1611. return xdst;
  1612. free_dst:
  1613. dst_release(dst1);
  1614. xdst = ERR_PTR(err);
  1615. goto out;
  1616. }
  1617. static struct flow_cache_object *
  1618. xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
  1619. struct flow_cache_object *oldflo, void *ctx)
  1620. {
  1621. struct dst_entry *dst_orig = (struct dst_entry *)ctx;
  1622. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1623. struct xfrm_dst *xdst, *new_xdst;
  1624. int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
  1625. /* Check if the policies from old bundle are usable */
  1626. xdst = NULL;
  1627. if (oldflo) {
  1628. xdst = container_of(oldflo, struct xfrm_dst, flo);
  1629. num_pols = xdst->num_pols;
  1630. num_xfrms = xdst->num_xfrms;
  1631. pol_dead = 0;
  1632. for (i = 0; i < num_pols; i++) {
  1633. pols[i] = xdst->pols[i];
  1634. pol_dead |= pols[i]->walk.dead;
  1635. }
  1636. if (pol_dead) {
  1637. dst_free(&xdst->u.dst);
  1638. xdst = NULL;
  1639. num_pols = 0;
  1640. num_xfrms = 0;
  1641. oldflo = NULL;
  1642. }
  1643. }
  1644. /* Resolve policies to use if we couldn't get them from
  1645. * previous cache entry */
  1646. if (xdst == NULL) {
  1647. num_pols = 1;
  1648. pols[0] = __xfrm_policy_lookup(net, fl, family,
  1649. flow_to_policy_dir(dir));
  1650. err = xfrm_expand_policies(fl, family, pols,
  1651. &num_pols, &num_xfrms);
  1652. if (err < 0)
  1653. goto inc_error;
  1654. if (num_pols == 0)
  1655. return NULL;
  1656. if (num_xfrms <= 0)
  1657. goto make_dummy_bundle;
  1658. }
  1659. new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
  1660. if (IS_ERR(new_xdst)) {
  1661. err = PTR_ERR(new_xdst);
  1662. if (err != -EAGAIN)
  1663. goto error;
  1664. if (oldflo == NULL)
  1665. goto make_dummy_bundle;
  1666. dst_hold(&xdst->u.dst);
  1667. return oldflo;
  1668. } else if (new_xdst == NULL) {
  1669. num_xfrms = 0;
  1670. if (oldflo == NULL)
  1671. goto make_dummy_bundle;
  1672. xdst->num_xfrms = 0;
  1673. dst_hold(&xdst->u.dst);
  1674. return oldflo;
  1675. }
  1676. /* Kill the previous bundle */
  1677. if (xdst) {
  1678. /* The policies were stolen for newly generated bundle */
  1679. xdst->num_pols = 0;
  1680. dst_free(&xdst->u.dst);
  1681. }
  1682. /* Flow cache does not have reference, it dst_free()'s,
  1683. * but we do need to return one reference for original caller */
  1684. dst_hold(&new_xdst->u.dst);
  1685. return &new_xdst->flo;
  1686. make_dummy_bundle:
  1687. /* We found policies, but there's no bundles to instantiate:
  1688. * either because the policy blocks, has no transformations or
  1689. * we could not build template (no xfrm_states).*/
  1690. xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
  1691. if (IS_ERR(xdst)) {
  1692. xfrm_pols_put(pols, num_pols);
  1693. return ERR_CAST(xdst);
  1694. }
  1695. xdst->num_pols = num_pols;
  1696. xdst->num_xfrms = num_xfrms;
  1697. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
  1698. dst_hold(&xdst->u.dst);
  1699. return &xdst->flo;
  1700. inc_error:
  1701. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1702. error:
  1703. if (xdst != NULL)
  1704. dst_free(&xdst->u.dst);
  1705. else
  1706. xfrm_pols_put(pols, num_pols);
  1707. return ERR_PTR(err);
  1708. }
  1709. static struct dst_entry *make_blackhole(struct net *net, u16 family,
  1710. struct dst_entry *dst_orig)
  1711. {
  1712. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1713. struct dst_entry *ret;
  1714. if (!afinfo) {
  1715. dst_release(dst_orig);
  1716. return ERR_PTR(-EINVAL);
  1717. } else {
  1718. ret = afinfo->blackhole_route(net, dst_orig);
  1719. }
  1720. xfrm_policy_put_afinfo(afinfo);
  1721. return ret;
  1722. }
  1723. /* Main function: finds/creates a bundle for given flow.
  1724. *
  1725. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1726. * on interfaces with disabled IPsec.
  1727. */
  1728. struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
  1729. const struct flowi *fl,
  1730. struct sock *sk, int flags)
  1731. {
  1732. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1733. struct flow_cache_object *flo;
  1734. struct xfrm_dst *xdst;
  1735. struct dst_entry *dst, *route;
  1736. u16 family = dst_orig->ops->family;
  1737. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1738. int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
  1739. dst = NULL;
  1740. xdst = NULL;
  1741. route = NULL;
  1742. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1743. num_pols = 1;
  1744. pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1745. err = xfrm_expand_policies(fl, family, pols,
  1746. &num_pols, &num_xfrms);
  1747. if (err < 0)
  1748. goto dropdst;
  1749. if (num_pols) {
  1750. if (num_xfrms <= 0) {
  1751. drop_pols = num_pols;
  1752. goto no_transform;
  1753. }
  1754. xdst = xfrm_resolve_and_create_bundle(
  1755. pols, num_pols, fl,
  1756. family, dst_orig);
  1757. if (IS_ERR(xdst)) {
  1758. xfrm_pols_put(pols, num_pols);
  1759. err = PTR_ERR(xdst);
  1760. goto dropdst;
  1761. } else if (xdst == NULL) {
  1762. num_xfrms = 0;
  1763. drop_pols = num_pols;
  1764. goto no_transform;
  1765. }
  1766. route = xdst->route;
  1767. }
  1768. }
  1769. if (xdst == NULL) {
  1770. /* To accelerate a bit... */
  1771. if ((dst_orig->flags & DST_NOXFRM) ||
  1772. !net->xfrm.policy_count[XFRM_POLICY_OUT])
  1773. goto nopol;
  1774. flo = flow_cache_lookup(net, fl, family, dir,
  1775. xfrm_bundle_lookup, dst_orig);
  1776. if (flo == NULL)
  1777. goto nopol;
  1778. if (IS_ERR(flo)) {
  1779. err = PTR_ERR(flo);
  1780. goto dropdst;
  1781. }
  1782. xdst = container_of(flo, struct xfrm_dst, flo);
  1783. num_pols = xdst->num_pols;
  1784. num_xfrms = xdst->num_xfrms;
  1785. memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
  1786. route = xdst->route;
  1787. }
  1788. dst = &xdst->u.dst;
  1789. if (route == NULL && num_xfrms > 0) {
  1790. /* The only case when xfrm_bundle_lookup() returns a
  1791. * bundle with null route, is when the template could
  1792. * not be resolved. It means policies are there, but
  1793. * bundle could not be created, since we don't yet
  1794. * have the xfrm_state's. We need to wait for KM to
  1795. * negotiate new SA's or bail out with error.*/
  1796. if (net->xfrm.sysctl_larval_drop) {
  1797. dst_release(dst);
  1798. xfrm_pols_put(pols, drop_pols);
  1799. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1800. return make_blackhole(net, family, dst_orig);
  1801. }
  1802. err = -EAGAIN;
  1803. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1804. goto error;
  1805. }
  1806. no_transform:
  1807. if (num_pols == 0)
  1808. goto nopol;
  1809. if ((flags & XFRM_LOOKUP_ICMP) &&
  1810. !(pols[0]->flags & XFRM_POLICY_ICMP)) {
  1811. err = -ENOENT;
  1812. goto error;
  1813. }
  1814. for (i = 0; i < num_pols; i++)
  1815. pols[i]->curlft.use_time = get_seconds();
  1816. if (num_xfrms < 0) {
  1817. /* Prohibit the flow */
  1818. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  1819. err = -EPERM;
  1820. goto error;
  1821. } else if (num_xfrms > 0) {
  1822. /* Flow transformed */
  1823. dst_release(dst_orig);
  1824. } else {
  1825. /* Flow passes untransformed */
  1826. dst_release(dst);
  1827. dst = dst_orig;
  1828. }
  1829. ok:
  1830. xfrm_pols_put(pols, drop_pols);
  1831. if (dst && dst->xfrm &&
  1832. dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
  1833. dst->flags |= DST_XFRM_TUNNEL;
  1834. return dst;
  1835. nopol:
  1836. if (!(flags & XFRM_LOOKUP_ICMP)) {
  1837. dst = dst_orig;
  1838. goto ok;
  1839. }
  1840. err = -ENOENT;
  1841. error:
  1842. dst_release(dst);
  1843. dropdst:
  1844. dst_release(dst_orig);
  1845. xfrm_pols_put(pols, drop_pols);
  1846. return ERR_PTR(err);
  1847. }
  1848. EXPORT_SYMBOL(xfrm_lookup);
  1849. static inline int
  1850. xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
  1851. {
  1852. struct xfrm_state *x;
  1853. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1854. return 0;
  1855. x = skb->sp->xvec[idx];
  1856. if (!x->type->reject)
  1857. return 0;
  1858. return x->type->reject(x, skb, fl);
  1859. }
  1860. /* When skb is transformed back to its "native" form, we have to
  1861. * check policy restrictions. At the moment we make this in maximally
  1862. * stupid way. Shame on me. :-) Of course, connected sockets must
  1863. * have policy cached at them.
  1864. */
  1865. static inline int
  1866. xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
  1867. unsigned short family)
  1868. {
  1869. if (xfrm_state_kern(x))
  1870. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1871. return x->id.proto == tmpl->id.proto &&
  1872. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1873. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1874. x->props.mode == tmpl->mode &&
  1875. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  1876. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1877. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1878. xfrm_state_addr_cmp(tmpl, x, family));
  1879. }
  1880. /*
  1881. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1882. * because of optional transport mode, or next index of the mathced secpath
  1883. * state with the template.
  1884. * -1 is returned when no matching template is found.
  1885. * Otherwise "-2 - errored_index" is returned.
  1886. */
  1887. static inline int
  1888. xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
  1889. unsigned short family)
  1890. {
  1891. int idx = start;
  1892. if (tmpl->optional) {
  1893. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1894. return start;
  1895. } else
  1896. start = -1;
  1897. for (; idx < sp->len; idx++) {
  1898. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1899. return ++idx;
  1900. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1901. if (start == -1)
  1902. start = -2-idx;
  1903. break;
  1904. }
  1905. }
  1906. return start;
  1907. }
  1908. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1909. unsigned int family, int reverse)
  1910. {
  1911. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1912. int err;
  1913. if (unlikely(afinfo == NULL))
  1914. return -EAFNOSUPPORT;
  1915. afinfo->decode_session(skb, fl, reverse);
  1916. err = security_xfrm_decode_session(skb, &fl->flowi_secid);
  1917. xfrm_policy_put_afinfo(afinfo);
  1918. return err;
  1919. }
  1920. EXPORT_SYMBOL(__xfrm_decode_session);
  1921. static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
  1922. {
  1923. for (; k < sp->len; k++) {
  1924. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1925. *idxp = k;
  1926. return 1;
  1927. }
  1928. }
  1929. return 0;
  1930. }
  1931. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1932. unsigned short family)
  1933. {
  1934. struct net *net = dev_net(skb->dev);
  1935. struct xfrm_policy *pol;
  1936. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1937. int npols = 0;
  1938. int xfrm_nr;
  1939. int pi;
  1940. int reverse;
  1941. struct flowi fl;
  1942. u8 fl_dir;
  1943. int xerr_idx = -1;
  1944. reverse = dir & ~XFRM_POLICY_MASK;
  1945. dir &= XFRM_POLICY_MASK;
  1946. fl_dir = policy_to_flow_dir(dir);
  1947. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1948. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  1949. return 0;
  1950. }
  1951. nf_nat_decode_session(skb, &fl, family);
  1952. /* First, check used SA against their selectors. */
  1953. if (skb->sp) {
  1954. int i;
  1955. for (i = skb->sp->len-1; i >= 0; i--) {
  1956. struct xfrm_state *x = skb->sp->xvec[i];
  1957. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1958. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  1959. return 0;
  1960. }
  1961. }
  1962. }
  1963. pol = NULL;
  1964. if (sk && sk->sk_policy[dir]) {
  1965. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1966. if (IS_ERR(pol)) {
  1967. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1968. return 0;
  1969. }
  1970. }
  1971. if (!pol) {
  1972. struct flow_cache_object *flo;
  1973. flo = flow_cache_lookup(net, &fl, family, fl_dir,
  1974. xfrm_policy_lookup, NULL);
  1975. if (IS_ERR_OR_NULL(flo))
  1976. pol = ERR_CAST(flo);
  1977. else
  1978. pol = container_of(flo, struct xfrm_policy, flo);
  1979. }
  1980. if (IS_ERR(pol)) {
  1981. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1982. return 0;
  1983. }
  1984. if (!pol) {
  1985. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1986. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1987. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  1988. return 0;
  1989. }
  1990. return 1;
  1991. }
  1992. pol->curlft.use_time = get_seconds();
  1993. pols[0] = pol;
  1994. npols++;
  1995. #ifdef CONFIG_XFRM_SUB_POLICY
  1996. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1997. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  1998. &fl, family,
  1999. XFRM_POLICY_IN);
  2000. if (pols[1]) {
  2001. if (IS_ERR(pols[1])) {
  2002. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  2003. return 0;
  2004. }
  2005. pols[1]->curlft.use_time = get_seconds();
  2006. npols++;
  2007. }
  2008. }
  2009. #endif
  2010. if (pol->action == XFRM_POLICY_ALLOW) {
  2011. struct sec_path *sp;
  2012. static struct sec_path dummy;
  2013. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  2014. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  2015. struct xfrm_tmpl **tpp = tp;
  2016. int ti = 0;
  2017. int i, k;
  2018. if ((sp = skb->sp) == NULL)
  2019. sp = &dummy;
  2020. for (pi = 0; pi < npols; pi++) {
  2021. if (pols[pi] != pol &&
  2022. pols[pi]->action != XFRM_POLICY_ALLOW) {
  2023. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  2024. goto reject;
  2025. }
  2026. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  2027. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  2028. goto reject_error;
  2029. }
  2030. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  2031. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  2032. }
  2033. xfrm_nr = ti;
  2034. if (npols > 1) {
  2035. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
  2036. tpp = stp;
  2037. }
  2038. /* For each tunnel xfrm, find the first matching tmpl.
  2039. * For each tmpl before that, find corresponding xfrm.
  2040. * Order is _important_. Later we will implement
  2041. * some barriers, but at the moment barriers
  2042. * are implied between each two transformations.
  2043. */
  2044. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  2045. k = xfrm_policy_ok(tpp[i], sp, k, family);
  2046. if (k < 0) {
  2047. if (k < -1)
  2048. /* "-2 - errored_index" returned */
  2049. xerr_idx = -(2+k);
  2050. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  2051. goto reject;
  2052. }
  2053. }
  2054. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  2055. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  2056. goto reject;
  2057. }
  2058. xfrm_pols_put(pols, npols);
  2059. return 1;
  2060. }
  2061. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  2062. reject:
  2063. xfrm_secpath_reject(xerr_idx, skb, &fl);
  2064. reject_error:
  2065. xfrm_pols_put(pols, npols);
  2066. return 0;
  2067. }
  2068. EXPORT_SYMBOL(__xfrm_policy_check);
  2069. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  2070. {
  2071. struct net *net = dev_net(skb->dev);
  2072. struct flowi fl;
  2073. struct dst_entry *dst;
  2074. int res = 1;
  2075. if (xfrm_decode_session(skb, &fl, family) < 0) {
  2076. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  2077. return 0;
  2078. }
  2079. skb_dst_force(skb);
  2080. dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
  2081. if (IS_ERR(dst)) {
  2082. res = 0;
  2083. dst = NULL;
  2084. }
  2085. skb_dst_set(skb, dst);
  2086. return res;
  2087. }
  2088. EXPORT_SYMBOL(__xfrm_route_forward);
  2089. /* Optimize later using cookies and generation ids. */
  2090. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  2091. {
  2092. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  2093. * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
  2094. * get validated by dst_ops->check on every use. We do this
  2095. * because when a normal route referenced by an XFRM dst is
  2096. * obsoleted we do not go looking around for all parent
  2097. * referencing XFRM dsts so that we can invalidate them. It
  2098. * is just too much work. Instead we make the checks here on
  2099. * every use. For example:
  2100. *
  2101. * XFRM dst A --> IPv4 dst X
  2102. *
  2103. * X is the "xdst->route" of A (X is also the "dst->path" of A
  2104. * in this example). If X is marked obsolete, "A" will not
  2105. * notice. That's what we are validating here via the
  2106. * stale_bundle() check.
  2107. *
  2108. * When a policy's bundle is pruned, we dst_free() the XFRM
  2109. * dst which causes it's ->obsolete field to be set to
  2110. * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
  2111. * this, we want to force a new route lookup.
  2112. */
  2113. if (dst->obsolete < 0 && !stale_bundle(dst))
  2114. return dst;
  2115. return NULL;
  2116. }
  2117. static int stale_bundle(struct dst_entry *dst)
  2118. {
  2119. return !xfrm_bundle_ok((struct xfrm_dst *)dst);
  2120. }
  2121. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  2122. {
  2123. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  2124. dst->dev = dev_net(dev)->loopback_dev;
  2125. dev_hold(dst->dev);
  2126. dev_put(dev);
  2127. }
  2128. }
  2129. EXPORT_SYMBOL(xfrm_dst_ifdown);
  2130. static void xfrm_link_failure(struct sk_buff *skb)
  2131. {
  2132. /* Impossible. Such dst must be popped before reaches point of failure. */
  2133. }
  2134. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  2135. {
  2136. if (dst) {
  2137. if (dst->obsolete) {
  2138. dst_release(dst);
  2139. dst = NULL;
  2140. }
  2141. }
  2142. return dst;
  2143. }
  2144. void xfrm_garbage_collect(struct net *net)
  2145. {
  2146. flow_cache_flush(net);
  2147. }
  2148. EXPORT_SYMBOL(xfrm_garbage_collect);
  2149. static void xfrm_garbage_collect_deferred(struct net *net)
  2150. {
  2151. flow_cache_flush_deferred(net);
  2152. }
  2153. static void xfrm_init_pmtu(struct dst_entry *dst)
  2154. {
  2155. do {
  2156. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  2157. u32 pmtu, route_mtu_cached;
  2158. pmtu = dst_mtu(dst->child);
  2159. xdst->child_mtu_cached = pmtu;
  2160. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  2161. route_mtu_cached = dst_mtu(xdst->route);
  2162. xdst->route_mtu_cached = route_mtu_cached;
  2163. if (pmtu > route_mtu_cached)
  2164. pmtu = route_mtu_cached;
  2165. dst_metric_set(dst, RTAX_MTU, pmtu);
  2166. } while ((dst = dst->next));
  2167. }
  2168. /* Check that the bundle accepts the flow and its components are
  2169. * still valid.
  2170. */
  2171. static int xfrm_bundle_ok(struct xfrm_dst *first)
  2172. {
  2173. struct dst_entry *dst = &first->u.dst;
  2174. struct xfrm_dst *last;
  2175. u32 mtu;
  2176. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  2177. (dst->dev && !netif_running(dst->dev)))
  2178. return 0;
  2179. if (dst->flags & DST_XFRM_QUEUE)
  2180. return 1;
  2181. last = NULL;
  2182. do {
  2183. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  2184. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  2185. return 0;
  2186. if (xdst->xfrm_genid != dst->xfrm->genid)
  2187. return 0;
  2188. if (xdst->num_pols > 0 &&
  2189. xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
  2190. return 0;
  2191. mtu = dst_mtu(dst->child);
  2192. if (xdst->child_mtu_cached != mtu) {
  2193. last = xdst;
  2194. xdst->child_mtu_cached = mtu;
  2195. }
  2196. if (!dst_check(xdst->route, xdst->route_cookie))
  2197. return 0;
  2198. mtu = dst_mtu(xdst->route);
  2199. if (xdst->route_mtu_cached != mtu) {
  2200. last = xdst;
  2201. xdst->route_mtu_cached = mtu;
  2202. }
  2203. dst = dst->child;
  2204. } while (dst->xfrm);
  2205. if (likely(!last))
  2206. return 1;
  2207. mtu = last->child_mtu_cached;
  2208. for (;;) {
  2209. dst = &last->u.dst;
  2210. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  2211. if (mtu > last->route_mtu_cached)
  2212. mtu = last->route_mtu_cached;
  2213. dst_metric_set(dst, RTAX_MTU, mtu);
  2214. if (last == first)
  2215. break;
  2216. last = (struct xfrm_dst *)last->u.dst.next;
  2217. last->child_mtu_cached = mtu;
  2218. }
  2219. return 1;
  2220. }
  2221. static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
  2222. {
  2223. return dst_metric_advmss(dst->path);
  2224. }
  2225. static unsigned int xfrm_mtu(const struct dst_entry *dst)
  2226. {
  2227. unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  2228. return mtu ? : dst_mtu(dst->path);
  2229. }
  2230. static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
  2231. struct sk_buff *skb,
  2232. const void *daddr)
  2233. {
  2234. return dst->path->ops->neigh_lookup(dst, skb, daddr);
  2235. }
  2236. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  2237. {
  2238. struct net *net;
  2239. int err = 0;
  2240. if (unlikely(afinfo == NULL))
  2241. return -EINVAL;
  2242. if (unlikely(afinfo->family >= NPROTO))
  2243. return -EAFNOSUPPORT;
  2244. spin_lock(&xfrm_policy_afinfo_lock);
  2245. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  2246. err = -ENOBUFS;
  2247. else {
  2248. struct dst_ops *dst_ops = afinfo->dst_ops;
  2249. if (likely(dst_ops->kmem_cachep == NULL))
  2250. dst_ops->kmem_cachep = xfrm_dst_cache;
  2251. if (likely(dst_ops->check == NULL))
  2252. dst_ops->check = xfrm_dst_check;
  2253. if (likely(dst_ops->default_advmss == NULL))
  2254. dst_ops->default_advmss = xfrm_default_advmss;
  2255. if (likely(dst_ops->mtu == NULL))
  2256. dst_ops->mtu = xfrm_mtu;
  2257. if (likely(dst_ops->negative_advice == NULL))
  2258. dst_ops->negative_advice = xfrm_negative_advice;
  2259. if (likely(dst_ops->link_failure == NULL))
  2260. dst_ops->link_failure = xfrm_link_failure;
  2261. if (likely(dst_ops->neigh_lookup == NULL))
  2262. dst_ops->neigh_lookup = xfrm_neigh_lookup;
  2263. if (likely(afinfo->garbage_collect == NULL))
  2264. afinfo->garbage_collect = xfrm_garbage_collect_deferred;
  2265. rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
  2266. }
  2267. spin_unlock(&xfrm_policy_afinfo_lock);
  2268. rtnl_lock();
  2269. for_each_net(net) {
  2270. struct dst_ops *xfrm_dst_ops;
  2271. switch (afinfo->family) {
  2272. case AF_INET:
  2273. xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
  2274. break;
  2275. #if IS_ENABLED(CONFIG_IPV6)
  2276. case AF_INET6:
  2277. xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
  2278. break;
  2279. #endif
  2280. default:
  2281. BUG();
  2282. }
  2283. *xfrm_dst_ops = *afinfo->dst_ops;
  2284. }
  2285. rtnl_unlock();
  2286. return err;
  2287. }
  2288. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  2289. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  2290. {
  2291. int err = 0;
  2292. if (unlikely(afinfo == NULL))
  2293. return -EINVAL;
  2294. if (unlikely(afinfo->family >= NPROTO))
  2295. return -EAFNOSUPPORT;
  2296. spin_lock(&xfrm_policy_afinfo_lock);
  2297. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  2298. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  2299. err = -EINVAL;
  2300. else
  2301. RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
  2302. NULL);
  2303. }
  2304. spin_unlock(&xfrm_policy_afinfo_lock);
  2305. if (!err) {
  2306. struct dst_ops *dst_ops = afinfo->dst_ops;
  2307. synchronize_rcu();
  2308. dst_ops->kmem_cachep = NULL;
  2309. dst_ops->check = NULL;
  2310. dst_ops->negative_advice = NULL;
  2311. dst_ops->link_failure = NULL;
  2312. afinfo->garbage_collect = NULL;
  2313. }
  2314. return err;
  2315. }
  2316. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  2317. static void __net_init xfrm_dst_ops_init(struct net *net)
  2318. {
  2319. struct xfrm_policy_afinfo *afinfo;
  2320. rcu_read_lock();
  2321. afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
  2322. if (afinfo)
  2323. net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
  2324. #if IS_ENABLED(CONFIG_IPV6)
  2325. afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
  2326. if (afinfo)
  2327. net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
  2328. #endif
  2329. rcu_read_unlock();
  2330. }
  2331. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2332. {
  2333. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  2334. switch (event) {
  2335. case NETDEV_DOWN:
  2336. xfrm_garbage_collect(dev_net(dev));
  2337. }
  2338. return NOTIFY_DONE;
  2339. }
  2340. static struct notifier_block xfrm_dev_notifier = {
  2341. .notifier_call = xfrm_dev_event,
  2342. };
  2343. #ifdef CONFIG_XFRM_STATISTICS
  2344. static int __net_init xfrm_statistics_init(struct net *net)
  2345. {
  2346. int rv;
  2347. net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
  2348. if (!net->mib.xfrm_statistics)
  2349. return -ENOMEM;
  2350. rv = xfrm_proc_init(net);
  2351. if (rv < 0)
  2352. free_percpu(net->mib.xfrm_statistics);
  2353. return rv;
  2354. }
  2355. static void xfrm_statistics_fini(struct net *net)
  2356. {
  2357. xfrm_proc_fini(net);
  2358. free_percpu(net->mib.xfrm_statistics);
  2359. }
  2360. #else
  2361. static int __net_init xfrm_statistics_init(struct net *net)
  2362. {
  2363. return 0;
  2364. }
  2365. static void xfrm_statistics_fini(struct net *net)
  2366. {
  2367. }
  2368. #endif
  2369. static int __net_init xfrm_policy_init(struct net *net)
  2370. {
  2371. unsigned int hmask, sz;
  2372. int dir;
  2373. if (net_eq(net, &init_net))
  2374. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2375. sizeof(struct xfrm_dst),
  2376. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2377. NULL);
  2378. hmask = 8 - 1;
  2379. sz = (hmask+1) * sizeof(struct hlist_head);
  2380. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  2381. if (!net->xfrm.policy_byidx)
  2382. goto out_byidx;
  2383. net->xfrm.policy_idx_hmask = hmask;
  2384. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2385. struct xfrm_policy_hash *htab;
  2386. net->xfrm.policy_count[dir] = 0;
  2387. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  2388. htab = &net->xfrm.policy_bydst[dir];
  2389. htab->table = xfrm_hash_alloc(sz);
  2390. if (!htab->table)
  2391. goto out_bydst;
  2392. htab->hmask = hmask;
  2393. }
  2394. INIT_LIST_HEAD(&net->xfrm.policy_all);
  2395. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  2396. if (net_eq(net, &init_net))
  2397. register_netdevice_notifier(&xfrm_dev_notifier);
  2398. return 0;
  2399. out_bydst:
  2400. for (dir--; dir >= 0; dir--) {
  2401. struct xfrm_policy_hash *htab;
  2402. htab = &net->xfrm.policy_bydst[dir];
  2403. xfrm_hash_free(htab->table, sz);
  2404. }
  2405. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2406. out_byidx:
  2407. return -ENOMEM;
  2408. }
  2409. static void xfrm_policy_fini(struct net *net)
  2410. {
  2411. unsigned int sz;
  2412. int dir;
  2413. flush_work(&net->xfrm.policy_hash_work);
  2414. #ifdef CONFIG_XFRM_SUB_POLICY
  2415. xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
  2416. #endif
  2417. xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
  2418. WARN_ON(!list_empty(&net->xfrm.policy_all));
  2419. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2420. struct xfrm_policy_hash *htab;
  2421. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  2422. htab = &net->xfrm.policy_bydst[dir];
  2423. sz = (htab->hmask + 1) * sizeof(struct hlist_head);
  2424. WARN_ON(!hlist_empty(htab->table));
  2425. xfrm_hash_free(htab->table, sz);
  2426. }
  2427. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  2428. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  2429. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2430. }
  2431. static int __net_init xfrm_net_init(struct net *net)
  2432. {
  2433. int rv;
  2434. rv = xfrm_statistics_init(net);
  2435. if (rv < 0)
  2436. goto out_statistics;
  2437. rv = xfrm_state_init(net);
  2438. if (rv < 0)
  2439. goto out_state;
  2440. rv = xfrm_policy_init(net);
  2441. if (rv < 0)
  2442. goto out_policy;
  2443. xfrm_dst_ops_init(net);
  2444. rv = xfrm_sysctl_init(net);
  2445. if (rv < 0)
  2446. goto out_sysctl;
  2447. rv = flow_cache_init(net);
  2448. if (rv < 0)
  2449. goto out;
  2450. /* Initialize the per-net locks here */
  2451. spin_lock_init(&net->xfrm.xfrm_state_lock);
  2452. rwlock_init(&net->xfrm.xfrm_policy_lock);
  2453. mutex_init(&net->xfrm.xfrm_cfg_mutex);
  2454. return 0;
  2455. out:
  2456. xfrm_sysctl_fini(net);
  2457. out_sysctl:
  2458. xfrm_policy_fini(net);
  2459. out_policy:
  2460. xfrm_state_fini(net);
  2461. out_state:
  2462. xfrm_statistics_fini(net);
  2463. out_statistics:
  2464. return rv;
  2465. }
  2466. static void __net_exit xfrm_net_exit(struct net *net)
  2467. {
  2468. flow_cache_fini(net);
  2469. xfrm_sysctl_fini(net);
  2470. xfrm_policy_fini(net);
  2471. xfrm_state_fini(net);
  2472. xfrm_statistics_fini(net);
  2473. }
  2474. static struct pernet_operations __net_initdata xfrm_net_ops = {
  2475. .init = xfrm_net_init,
  2476. .exit = xfrm_net_exit,
  2477. };
  2478. void __init xfrm_init(void)
  2479. {
  2480. register_pernet_subsys(&xfrm_net_ops);
  2481. xfrm_input_init();
  2482. }
  2483. #ifdef CONFIG_AUDITSYSCALL
  2484. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2485. struct audit_buffer *audit_buf)
  2486. {
  2487. struct xfrm_sec_ctx *ctx = xp->security;
  2488. struct xfrm_selector *sel = &xp->selector;
  2489. if (ctx)
  2490. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2491. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2492. switch (sel->family) {
  2493. case AF_INET:
  2494. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  2495. if (sel->prefixlen_s != 32)
  2496. audit_log_format(audit_buf, " src_prefixlen=%d",
  2497. sel->prefixlen_s);
  2498. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  2499. if (sel->prefixlen_d != 32)
  2500. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2501. sel->prefixlen_d);
  2502. break;
  2503. case AF_INET6:
  2504. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  2505. if (sel->prefixlen_s != 128)
  2506. audit_log_format(audit_buf, " src_prefixlen=%d",
  2507. sel->prefixlen_s);
  2508. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  2509. if (sel->prefixlen_d != 128)
  2510. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2511. sel->prefixlen_d);
  2512. break;
  2513. }
  2514. }
  2515. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
  2516. {
  2517. struct audit_buffer *audit_buf;
  2518. audit_buf = xfrm_audit_start("SPD-add");
  2519. if (audit_buf == NULL)
  2520. return;
  2521. xfrm_audit_helper_usrinfo(task_valid, audit_buf);
  2522. audit_log_format(audit_buf, " res=%u", result);
  2523. xfrm_audit_common_policyinfo(xp, audit_buf);
  2524. audit_log_end(audit_buf);
  2525. }
  2526. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2527. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2528. bool task_valid)
  2529. {
  2530. struct audit_buffer *audit_buf;
  2531. audit_buf = xfrm_audit_start("SPD-delete");
  2532. if (audit_buf == NULL)
  2533. return;
  2534. xfrm_audit_helper_usrinfo(task_valid, audit_buf);
  2535. audit_log_format(audit_buf, " res=%u", result);
  2536. xfrm_audit_common_policyinfo(xp, audit_buf);
  2537. audit_log_end(audit_buf);
  2538. }
  2539. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2540. #endif
  2541. #ifdef CONFIG_XFRM_MIGRATE
  2542. static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
  2543. const struct xfrm_selector *sel_tgt)
  2544. {
  2545. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2546. if (sel_tgt->family == sel_cmp->family &&
  2547. xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
  2548. sel_cmp->family) &&
  2549. xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
  2550. sel_cmp->family) &&
  2551. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2552. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2553. return true;
  2554. }
  2555. } else {
  2556. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2557. return true;
  2558. }
  2559. }
  2560. return false;
  2561. }
  2562. static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
  2563. u8 dir, u8 type, struct net *net)
  2564. {
  2565. struct xfrm_policy *pol, *ret = NULL;
  2566. struct hlist_head *chain;
  2567. u32 priority = ~0U;
  2568. read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
  2569. chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
  2570. hlist_for_each_entry(pol, chain, bydst) {
  2571. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2572. pol->type == type) {
  2573. ret = pol;
  2574. priority = ret->priority;
  2575. break;
  2576. }
  2577. }
  2578. chain = &net->xfrm.policy_inexact[dir];
  2579. hlist_for_each_entry(pol, chain, bydst) {
  2580. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2581. pol->type == type &&
  2582. pol->priority < priority) {
  2583. ret = pol;
  2584. break;
  2585. }
  2586. }
  2587. if (ret)
  2588. xfrm_pol_hold(ret);
  2589. read_unlock_bh(&net->xfrm.xfrm_policy_lock);
  2590. return ret;
  2591. }
  2592. static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
  2593. {
  2594. int match = 0;
  2595. if (t->mode == m->mode && t->id.proto == m->proto &&
  2596. (m->reqid == 0 || t->reqid == m->reqid)) {
  2597. switch (t->mode) {
  2598. case XFRM_MODE_TUNNEL:
  2599. case XFRM_MODE_BEET:
  2600. if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
  2601. m->old_family) &&
  2602. xfrm_addr_equal(&t->saddr, &m->old_saddr,
  2603. m->old_family)) {
  2604. match = 1;
  2605. }
  2606. break;
  2607. case XFRM_MODE_TRANSPORT:
  2608. /* in case of transport mode, template does not store
  2609. any IP addresses, hence we just compare mode and
  2610. protocol */
  2611. match = 1;
  2612. break;
  2613. default:
  2614. break;
  2615. }
  2616. }
  2617. return match;
  2618. }
  2619. /* update endpoint address(es) of template(s) */
  2620. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2621. struct xfrm_migrate *m, int num_migrate)
  2622. {
  2623. struct xfrm_migrate *mp;
  2624. int i, j, n = 0;
  2625. write_lock_bh(&pol->lock);
  2626. if (unlikely(pol->walk.dead)) {
  2627. /* target policy has been deleted */
  2628. write_unlock_bh(&pol->lock);
  2629. return -ENOENT;
  2630. }
  2631. for (i = 0; i < pol->xfrm_nr; i++) {
  2632. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2633. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2634. continue;
  2635. n++;
  2636. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2637. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2638. continue;
  2639. /* update endpoints */
  2640. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2641. sizeof(pol->xfrm_vec[i].id.daddr));
  2642. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2643. sizeof(pol->xfrm_vec[i].saddr));
  2644. pol->xfrm_vec[i].encap_family = mp->new_family;
  2645. /* flush bundles */
  2646. atomic_inc(&pol->genid);
  2647. }
  2648. }
  2649. write_unlock_bh(&pol->lock);
  2650. if (!n)
  2651. return -ENODATA;
  2652. return 0;
  2653. }
  2654. static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
  2655. {
  2656. int i, j;
  2657. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2658. return -EINVAL;
  2659. for (i = 0; i < num_migrate; i++) {
  2660. if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
  2661. m[i].old_family) &&
  2662. xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
  2663. m[i].old_family))
  2664. return -EINVAL;
  2665. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2666. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2667. return -EINVAL;
  2668. /* check if there is any duplicated entry */
  2669. for (j = i + 1; j < num_migrate; j++) {
  2670. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2671. sizeof(m[i].old_daddr)) &&
  2672. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2673. sizeof(m[i].old_saddr)) &&
  2674. m[i].proto == m[j].proto &&
  2675. m[i].mode == m[j].mode &&
  2676. m[i].reqid == m[j].reqid &&
  2677. m[i].old_family == m[j].old_family)
  2678. return -EINVAL;
  2679. }
  2680. }
  2681. return 0;
  2682. }
  2683. int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
  2684. struct xfrm_migrate *m, int num_migrate,
  2685. struct xfrm_kmaddress *k, struct net *net)
  2686. {
  2687. int i, err, nx_cur = 0, nx_new = 0;
  2688. struct xfrm_policy *pol = NULL;
  2689. struct xfrm_state *x, *xc;
  2690. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2691. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2692. struct xfrm_migrate *mp;
  2693. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2694. goto out;
  2695. /* Stage 1 - find policy */
  2696. if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
  2697. err = -ENOENT;
  2698. goto out;
  2699. }
  2700. /* Stage 2 - find and update state(s) */
  2701. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2702. if ((x = xfrm_migrate_state_find(mp, net))) {
  2703. x_cur[nx_cur] = x;
  2704. nx_cur++;
  2705. if ((xc = xfrm_state_migrate(x, mp))) {
  2706. x_new[nx_new] = xc;
  2707. nx_new++;
  2708. } else {
  2709. err = -ENODATA;
  2710. goto restore_state;
  2711. }
  2712. }
  2713. }
  2714. /* Stage 3 - update policy */
  2715. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2716. goto restore_state;
  2717. /* Stage 4 - delete old state(s) */
  2718. if (nx_cur) {
  2719. xfrm_states_put(x_cur, nx_cur);
  2720. xfrm_states_delete(x_cur, nx_cur);
  2721. }
  2722. /* Stage 5 - announce */
  2723. km_migrate(sel, dir, type, m, num_migrate, k);
  2724. xfrm_pol_put(pol);
  2725. return 0;
  2726. out:
  2727. return err;
  2728. restore_state:
  2729. if (pol)
  2730. xfrm_pol_put(pol);
  2731. if (nx_cur)
  2732. xfrm_states_put(x_cur, nx_cur);
  2733. if (nx_new)
  2734. xfrm_states_delete(x_new, nx_new);
  2735. return err;
  2736. }
  2737. EXPORT_SYMBOL(xfrm_migrate);
  2738. #endif