xfrm_policy.c 68 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898
  1. /*
  2. * xfrm_policy.c
  3. *
  4. * Changes:
  5. * Mitsuru KANDA @USAGI
  6. * Kazunori MIYAZAWA @USAGI
  7. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  8. * IPv6 support
  9. * Kazunori MIYAZAWA @USAGI
  10. * YOSHIFUJI Hideaki
  11. * Split up af-specific portion
  12. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  13. *
  14. */
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/kmod.h>
  18. #include <linux/list.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/notifier.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/netfilter.h>
  24. #include <linux/module.h>
  25. #include <linux/cache.h>
  26. #include <linux/audit.h>
  27. #include <net/dst.h>
  28. #include <net/xfrm.h>
  29. #include <net/ip.h>
  30. #ifdef CONFIG_XFRM_STATISTICS
  31. #include <net/snmp.h>
  32. #endif
  33. #include "xfrm_hash.h"
  34. DEFINE_MUTEX(xfrm_cfg_mutex);
  35. EXPORT_SYMBOL(xfrm_cfg_mutex);
  36. static DEFINE_RWLOCK(xfrm_policy_lock);
  37. static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
  38. static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
  39. static struct kmem_cache *xfrm_dst_cache __read_mostly;
  40. static HLIST_HEAD(xfrm_policy_gc_list);
  41. static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
  42. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
  43. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
  44. static void xfrm_init_pmtu(struct dst_entry *dst);
  45. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  46. int dir);
  47. static inline int
  48. __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  49. {
  50. return addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
  51. addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
  52. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  53. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  54. (fl->proto == sel->proto || !sel->proto) &&
  55. (fl->oif == sel->ifindex || !sel->ifindex);
  56. }
  57. static inline int
  58. __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
  59. {
  60. return addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
  61. addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
  62. !((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
  63. !((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
  64. (fl->proto == sel->proto || !sel->proto) &&
  65. (fl->oif == sel->ifindex || !sel->ifindex);
  66. }
  67. int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
  68. unsigned short family)
  69. {
  70. switch (family) {
  71. case AF_INET:
  72. return __xfrm4_selector_match(sel, fl);
  73. case AF_INET6:
  74. return __xfrm6_selector_match(sel, fl);
  75. }
  76. return 0;
  77. }
  78. static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
  79. xfrm_address_t *saddr,
  80. xfrm_address_t *daddr,
  81. int family)
  82. {
  83. struct xfrm_policy_afinfo *afinfo;
  84. struct dst_entry *dst;
  85. afinfo = xfrm_policy_get_afinfo(family);
  86. if (unlikely(afinfo == NULL))
  87. return ERR_PTR(-EAFNOSUPPORT);
  88. dst = afinfo->dst_lookup(net, tos, saddr, daddr);
  89. xfrm_policy_put_afinfo(afinfo);
  90. return dst;
  91. }
  92. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
  93. xfrm_address_t *prev_saddr,
  94. xfrm_address_t *prev_daddr,
  95. int family)
  96. {
  97. struct net *net = xs_net(x);
  98. xfrm_address_t *saddr = &x->props.saddr;
  99. xfrm_address_t *daddr = &x->id.daddr;
  100. struct dst_entry *dst;
  101. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  102. saddr = x->coaddr;
  103. daddr = prev_daddr;
  104. }
  105. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  106. saddr = prev_saddr;
  107. daddr = x->coaddr;
  108. }
  109. dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
  110. if (!IS_ERR(dst)) {
  111. if (prev_saddr != saddr)
  112. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  113. if (prev_daddr != daddr)
  114. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  115. }
  116. return dst;
  117. }
  118. static inline unsigned long make_jiffies(long secs)
  119. {
  120. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  121. return MAX_SCHEDULE_TIMEOUT-1;
  122. else
  123. return secs*HZ;
  124. }
  125. static void xfrm_policy_timer(unsigned long data)
  126. {
  127. struct xfrm_policy *xp = (struct xfrm_policy*)data;
  128. unsigned long now = get_seconds();
  129. long next = LONG_MAX;
  130. int warn = 0;
  131. int dir;
  132. read_lock(&xp->lock);
  133. if (xp->walk.dead)
  134. goto out;
  135. dir = xfrm_policy_id2dir(xp->index);
  136. if (xp->lft.hard_add_expires_seconds) {
  137. long tmo = xp->lft.hard_add_expires_seconds +
  138. xp->curlft.add_time - now;
  139. if (tmo <= 0)
  140. goto expired;
  141. if (tmo < next)
  142. next = tmo;
  143. }
  144. if (xp->lft.hard_use_expires_seconds) {
  145. long tmo = xp->lft.hard_use_expires_seconds +
  146. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  147. if (tmo <= 0)
  148. goto expired;
  149. if (tmo < next)
  150. next = tmo;
  151. }
  152. if (xp->lft.soft_add_expires_seconds) {
  153. long tmo = xp->lft.soft_add_expires_seconds +
  154. xp->curlft.add_time - now;
  155. if (tmo <= 0) {
  156. warn = 1;
  157. tmo = XFRM_KM_TIMEOUT;
  158. }
  159. if (tmo < next)
  160. next = tmo;
  161. }
  162. if (xp->lft.soft_use_expires_seconds) {
  163. long tmo = xp->lft.soft_use_expires_seconds +
  164. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  165. if (tmo <= 0) {
  166. warn = 1;
  167. tmo = XFRM_KM_TIMEOUT;
  168. }
  169. if (tmo < next)
  170. next = tmo;
  171. }
  172. if (warn)
  173. km_policy_expired(xp, dir, 0, 0);
  174. if (next != LONG_MAX &&
  175. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  176. xfrm_pol_hold(xp);
  177. out:
  178. read_unlock(&xp->lock);
  179. xfrm_pol_put(xp);
  180. return;
  181. expired:
  182. read_unlock(&xp->lock);
  183. if (!xfrm_policy_delete(xp, dir))
  184. km_policy_expired(xp, dir, 1, 0);
  185. xfrm_pol_put(xp);
  186. }
  187. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  188. * SPD calls.
  189. */
  190. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  191. {
  192. struct xfrm_policy *policy;
  193. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  194. if (policy) {
  195. write_pnet(&policy->xp_net, net);
  196. INIT_LIST_HEAD(&policy->walk.all);
  197. INIT_HLIST_NODE(&policy->bydst);
  198. INIT_HLIST_NODE(&policy->byidx);
  199. rwlock_init(&policy->lock);
  200. atomic_set(&policy->refcnt, 1);
  201. setup_timer(&policy->timer, xfrm_policy_timer,
  202. (unsigned long)policy);
  203. }
  204. return policy;
  205. }
  206. EXPORT_SYMBOL(xfrm_policy_alloc);
  207. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  208. void xfrm_policy_destroy(struct xfrm_policy *policy)
  209. {
  210. BUG_ON(!policy->walk.dead);
  211. BUG_ON(policy->bundles);
  212. if (del_timer(&policy->timer))
  213. BUG();
  214. security_xfrm_policy_free(policy->security);
  215. kfree(policy);
  216. }
  217. EXPORT_SYMBOL(xfrm_policy_destroy);
  218. static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
  219. {
  220. struct dst_entry *dst;
  221. while ((dst = policy->bundles) != NULL) {
  222. policy->bundles = dst->next;
  223. dst_free(dst);
  224. }
  225. if (del_timer(&policy->timer))
  226. atomic_dec(&policy->refcnt);
  227. if (atomic_read(&policy->refcnt) > 1)
  228. flow_cache_flush();
  229. xfrm_pol_put(policy);
  230. }
  231. static void xfrm_policy_gc_task(struct work_struct *work)
  232. {
  233. struct xfrm_policy *policy;
  234. struct hlist_node *entry, *tmp;
  235. struct hlist_head gc_list;
  236. spin_lock_bh(&xfrm_policy_gc_lock);
  237. gc_list.first = xfrm_policy_gc_list.first;
  238. INIT_HLIST_HEAD(&xfrm_policy_gc_list);
  239. spin_unlock_bh(&xfrm_policy_gc_lock);
  240. hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
  241. xfrm_policy_gc_kill(policy);
  242. }
  243. static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
  244. /* Rule must be locked. Release descentant resources, announce
  245. * entry dead. The rule must be unlinked from lists to the moment.
  246. */
  247. static void xfrm_policy_kill(struct xfrm_policy *policy)
  248. {
  249. int dead;
  250. write_lock_bh(&policy->lock);
  251. dead = policy->walk.dead;
  252. policy->walk.dead = 1;
  253. write_unlock_bh(&policy->lock);
  254. if (unlikely(dead)) {
  255. WARN_ON(1);
  256. return;
  257. }
  258. spin_lock_bh(&xfrm_policy_gc_lock);
  259. hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
  260. spin_unlock_bh(&xfrm_policy_gc_lock);
  261. schedule_work(&xfrm_policy_gc_work);
  262. }
  263. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  264. static inline unsigned int idx_hash(struct net *net, u32 index)
  265. {
  266. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  267. }
  268. static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selector *sel, unsigned short family, int dir)
  269. {
  270. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  271. unsigned int hash = __sel_hash(sel, family, hmask);
  272. return (hash == hmask + 1 ?
  273. &net->xfrm.policy_inexact[dir] :
  274. net->xfrm.policy_bydst[dir].table + hash);
  275. }
  276. static struct hlist_head *policy_hash_direct(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
  277. {
  278. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  279. unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
  280. return net->xfrm.policy_bydst[dir].table + hash;
  281. }
  282. static void xfrm_dst_hash_transfer(struct hlist_head *list,
  283. struct hlist_head *ndsttable,
  284. unsigned int nhashmask)
  285. {
  286. struct hlist_node *entry, *tmp, *entry0 = NULL;
  287. struct xfrm_policy *pol;
  288. unsigned int h0 = 0;
  289. redo:
  290. hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
  291. unsigned int h;
  292. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  293. pol->family, nhashmask);
  294. if (!entry0) {
  295. hlist_del(entry);
  296. hlist_add_head(&pol->bydst, ndsttable+h);
  297. h0 = h;
  298. } else {
  299. if (h != h0)
  300. continue;
  301. hlist_del(entry);
  302. hlist_add_after(entry0, &pol->bydst);
  303. }
  304. entry0 = entry;
  305. }
  306. if (!hlist_empty(list)) {
  307. entry0 = NULL;
  308. goto redo;
  309. }
  310. }
  311. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  312. struct hlist_head *nidxtable,
  313. unsigned int nhashmask)
  314. {
  315. struct hlist_node *entry, *tmp;
  316. struct xfrm_policy *pol;
  317. hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
  318. unsigned int h;
  319. h = __idx_hash(pol->index, nhashmask);
  320. hlist_add_head(&pol->byidx, nidxtable+h);
  321. }
  322. }
  323. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  324. {
  325. return ((old_hmask + 1) << 1) - 1;
  326. }
  327. static void xfrm_bydst_resize(struct net *net, int dir)
  328. {
  329. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  330. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  331. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  332. struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
  333. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  334. int i;
  335. if (!ndst)
  336. return;
  337. write_lock_bh(&xfrm_policy_lock);
  338. for (i = hmask; i >= 0; i--)
  339. xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
  340. net->xfrm.policy_bydst[dir].table = ndst;
  341. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  342. write_unlock_bh(&xfrm_policy_lock);
  343. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  344. }
  345. static void xfrm_byidx_resize(struct net *net, int total)
  346. {
  347. unsigned int hmask = net->xfrm.policy_idx_hmask;
  348. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  349. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  350. struct hlist_head *oidx = net->xfrm.policy_byidx;
  351. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  352. int i;
  353. if (!nidx)
  354. return;
  355. write_lock_bh(&xfrm_policy_lock);
  356. for (i = hmask; i >= 0; i--)
  357. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  358. net->xfrm.policy_byidx = nidx;
  359. net->xfrm.policy_idx_hmask = nhashmask;
  360. write_unlock_bh(&xfrm_policy_lock);
  361. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  362. }
  363. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  364. {
  365. unsigned int cnt = net->xfrm.policy_count[dir];
  366. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  367. if (total)
  368. *total += cnt;
  369. if ((hmask + 1) < xfrm_policy_hashmax &&
  370. cnt > hmask)
  371. return 1;
  372. return 0;
  373. }
  374. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  375. {
  376. unsigned int hmask = net->xfrm.policy_idx_hmask;
  377. if ((hmask + 1) < xfrm_policy_hashmax &&
  378. total > hmask)
  379. return 1;
  380. return 0;
  381. }
  382. void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
  383. {
  384. read_lock_bh(&xfrm_policy_lock);
  385. si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
  386. si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
  387. si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
  388. si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  389. si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  390. si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  391. si->spdhcnt = net->xfrm.policy_idx_hmask;
  392. si->spdhmcnt = xfrm_policy_hashmax;
  393. read_unlock_bh(&xfrm_policy_lock);
  394. }
  395. EXPORT_SYMBOL(xfrm_spd_getinfo);
  396. static DEFINE_MUTEX(hash_resize_mutex);
  397. static void xfrm_hash_resize(struct work_struct *work)
  398. {
  399. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  400. int dir, total;
  401. mutex_lock(&hash_resize_mutex);
  402. total = 0;
  403. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  404. if (xfrm_bydst_should_resize(net, dir, &total))
  405. xfrm_bydst_resize(net, dir);
  406. }
  407. if (xfrm_byidx_should_resize(net, total))
  408. xfrm_byidx_resize(net, total);
  409. mutex_unlock(&hash_resize_mutex);
  410. }
  411. /* Generate new index... KAME seems to generate them ordered by cost
  412. * of an absolute inpredictability of ordering of rules. This will not pass. */
  413. static u32 xfrm_gen_index(struct net *net, int dir)
  414. {
  415. static u32 idx_generator;
  416. for (;;) {
  417. struct hlist_node *entry;
  418. struct hlist_head *list;
  419. struct xfrm_policy *p;
  420. u32 idx;
  421. int found;
  422. idx = (idx_generator | dir);
  423. idx_generator += 8;
  424. if (idx == 0)
  425. idx = 8;
  426. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  427. found = 0;
  428. hlist_for_each_entry(p, entry, list, byidx) {
  429. if (p->index == idx) {
  430. found = 1;
  431. break;
  432. }
  433. }
  434. if (!found)
  435. return idx;
  436. }
  437. }
  438. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  439. {
  440. u32 *p1 = (u32 *) s1;
  441. u32 *p2 = (u32 *) s2;
  442. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  443. int i;
  444. for (i = 0; i < len; i++) {
  445. if (p1[i] != p2[i])
  446. return 1;
  447. }
  448. return 0;
  449. }
  450. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  451. {
  452. struct net *net = xp_net(policy);
  453. struct xfrm_policy *pol;
  454. struct xfrm_policy *delpol;
  455. struct hlist_head *chain;
  456. struct hlist_node *entry, *newpos;
  457. struct dst_entry *gc_list;
  458. u32 mark = policy->mark.v & policy->mark.m;
  459. write_lock_bh(&xfrm_policy_lock);
  460. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  461. delpol = NULL;
  462. newpos = NULL;
  463. hlist_for_each_entry(pol, entry, chain, bydst) {
  464. if (pol->type == policy->type &&
  465. !selector_cmp(&pol->selector, &policy->selector) &&
  466. (mark & pol->mark.m) == pol->mark.v &&
  467. xfrm_sec_ctx_match(pol->security, policy->security) &&
  468. !WARN_ON(delpol)) {
  469. if (excl) {
  470. write_unlock_bh(&xfrm_policy_lock);
  471. return -EEXIST;
  472. }
  473. delpol = pol;
  474. if (policy->priority > pol->priority)
  475. continue;
  476. } else if (policy->priority >= pol->priority) {
  477. newpos = &pol->bydst;
  478. continue;
  479. }
  480. if (delpol)
  481. break;
  482. }
  483. if (newpos)
  484. hlist_add_after(newpos, &policy->bydst);
  485. else
  486. hlist_add_head(&policy->bydst, chain);
  487. xfrm_pol_hold(policy);
  488. net->xfrm.policy_count[dir]++;
  489. atomic_inc(&flow_cache_genid);
  490. if (delpol)
  491. __xfrm_policy_unlink(delpol, dir);
  492. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
  493. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  494. policy->curlft.add_time = get_seconds();
  495. policy->curlft.use_time = 0;
  496. if (!mod_timer(&policy->timer, jiffies + HZ))
  497. xfrm_pol_hold(policy);
  498. list_add(&policy->walk.all, &net->xfrm.policy_all);
  499. write_unlock_bh(&xfrm_policy_lock);
  500. if (delpol)
  501. xfrm_policy_kill(delpol);
  502. else if (xfrm_bydst_should_resize(net, dir, NULL))
  503. schedule_work(&net->xfrm.policy_hash_work);
  504. read_lock_bh(&xfrm_policy_lock);
  505. gc_list = NULL;
  506. entry = &policy->bydst;
  507. hlist_for_each_entry_continue(policy, entry, bydst) {
  508. struct dst_entry *dst;
  509. write_lock(&policy->lock);
  510. dst = policy->bundles;
  511. if (dst) {
  512. struct dst_entry *tail = dst;
  513. while (tail->next)
  514. tail = tail->next;
  515. tail->next = gc_list;
  516. gc_list = dst;
  517. policy->bundles = NULL;
  518. }
  519. write_unlock(&policy->lock);
  520. }
  521. read_unlock_bh(&xfrm_policy_lock);
  522. while (gc_list) {
  523. struct dst_entry *dst = gc_list;
  524. gc_list = dst->next;
  525. dst_free(dst);
  526. }
  527. return 0;
  528. }
  529. EXPORT_SYMBOL(xfrm_policy_insert);
  530. struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
  531. int dir, struct xfrm_selector *sel,
  532. struct xfrm_sec_ctx *ctx, int delete,
  533. int *err)
  534. {
  535. struct xfrm_policy *pol, *ret;
  536. struct hlist_head *chain;
  537. struct hlist_node *entry;
  538. *err = 0;
  539. write_lock_bh(&xfrm_policy_lock);
  540. chain = policy_hash_bysel(net, sel, sel->family, dir);
  541. ret = NULL;
  542. hlist_for_each_entry(pol, entry, chain, bydst) {
  543. if (pol->type == type &&
  544. (mark & pol->mark.m) == pol->mark.v &&
  545. !selector_cmp(sel, &pol->selector) &&
  546. xfrm_sec_ctx_match(ctx, pol->security)) {
  547. xfrm_pol_hold(pol);
  548. if (delete) {
  549. *err = security_xfrm_policy_delete(
  550. pol->security);
  551. if (*err) {
  552. write_unlock_bh(&xfrm_policy_lock);
  553. return pol;
  554. }
  555. __xfrm_policy_unlink(pol, dir);
  556. }
  557. ret = pol;
  558. break;
  559. }
  560. }
  561. write_unlock_bh(&xfrm_policy_lock);
  562. if (ret && delete) {
  563. atomic_inc(&flow_cache_genid);
  564. xfrm_policy_kill(ret);
  565. }
  566. return ret;
  567. }
  568. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  569. struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
  570. int dir, u32 id, int delete, int *err)
  571. {
  572. struct xfrm_policy *pol, *ret;
  573. struct hlist_head *chain;
  574. struct hlist_node *entry;
  575. *err = -ENOENT;
  576. if (xfrm_policy_id2dir(id) != dir)
  577. return NULL;
  578. *err = 0;
  579. write_lock_bh(&xfrm_policy_lock);
  580. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  581. ret = NULL;
  582. hlist_for_each_entry(pol, entry, chain, byidx) {
  583. if (pol->type == type && pol->index == id &&
  584. (mark & pol->mark.m) == pol->mark.v) {
  585. xfrm_pol_hold(pol);
  586. if (delete) {
  587. *err = security_xfrm_policy_delete(
  588. pol->security);
  589. if (*err) {
  590. write_unlock_bh(&xfrm_policy_lock);
  591. return pol;
  592. }
  593. __xfrm_policy_unlink(pol, dir);
  594. }
  595. ret = pol;
  596. break;
  597. }
  598. }
  599. write_unlock_bh(&xfrm_policy_lock);
  600. if (ret && delete) {
  601. atomic_inc(&flow_cache_genid);
  602. xfrm_policy_kill(ret);
  603. }
  604. return ret;
  605. }
  606. EXPORT_SYMBOL(xfrm_policy_byid);
  607. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  608. static inline int
  609. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  610. {
  611. int dir, err = 0;
  612. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  613. struct xfrm_policy *pol;
  614. struct hlist_node *entry;
  615. int i;
  616. hlist_for_each_entry(pol, entry,
  617. &net->xfrm.policy_inexact[dir], bydst) {
  618. if (pol->type != type)
  619. continue;
  620. err = security_xfrm_policy_delete(pol->security);
  621. if (err) {
  622. xfrm_audit_policy_delete(pol, 0,
  623. audit_info->loginuid,
  624. audit_info->sessionid,
  625. audit_info->secid);
  626. return err;
  627. }
  628. }
  629. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  630. hlist_for_each_entry(pol, entry,
  631. net->xfrm.policy_bydst[dir].table + i,
  632. bydst) {
  633. if (pol->type != type)
  634. continue;
  635. err = security_xfrm_policy_delete(
  636. pol->security);
  637. if (err) {
  638. xfrm_audit_policy_delete(pol, 0,
  639. audit_info->loginuid,
  640. audit_info->sessionid,
  641. audit_info->secid);
  642. return err;
  643. }
  644. }
  645. }
  646. }
  647. return err;
  648. }
  649. #else
  650. static inline int
  651. xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
  652. {
  653. return 0;
  654. }
  655. #endif
  656. int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
  657. {
  658. int dir, err = 0, cnt = 0;
  659. struct xfrm_policy *dp;
  660. write_lock_bh(&xfrm_policy_lock);
  661. err = xfrm_policy_flush_secctx_check(net, type, audit_info);
  662. if (err)
  663. goto out;
  664. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  665. struct xfrm_policy *pol;
  666. struct hlist_node *entry;
  667. int i;
  668. again1:
  669. hlist_for_each_entry(pol, entry,
  670. &net->xfrm.policy_inexact[dir], bydst) {
  671. if (pol->type != type)
  672. continue;
  673. dp = __xfrm_policy_unlink(pol, dir);
  674. write_unlock_bh(&xfrm_policy_lock);
  675. if (dp)
  676. cnt++;
  677. xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
  678. audit_info->sessionid,
  679. audit_info->secid);
  680. xfrm_policy_kill(pol);
  681. write_lock_bh(&xfrm_policy_lock);
  682. goto again1;
  683. }
  684. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  685. again2:
  686. hlist_for_each_entry(pol, entry,
  687. net->xfrm.policy_bydst[dir].table + i,
  688. bydst) {
  689. if (pol->type != type)
  690. continue;
  691. dp = __xfrm_policy_unlink(pol, dir);
  692. write_unlock_bh(&xfrm_policy_lock);
  693. if (dp)
  694. cnt++;
  695. xfrm_audit_policy_delete(pol, 1,
  696. audit_info->loginuid,
  697. audit_info->sessionid,
  698. audit_info->secid);
  699. xfrm_policy_kill(pol);
  700. write_lock_bh(&xfrm_policy_lock);
  701. goto again2;
  702. }
  703. }
  704. }
  705. if (!cnt)
  706. err = -ESRCH;
  707. atomic_inc(&flow_cache_genid);
  708. out:
  709. write_unlock_bh(&xfrm_policy_lock);
  710. return err;
  711. }
  712. EXPORT_SYMBOL(xfrm_policy_flush);
  713. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  714. int (*func)(struct xfrm_policy *, int, int, void*),
  715. void *data)
  716. {
  717. struct xfrm_policy *pol;
  718. struct xfrm_policy_walk_entry *x;
  719. int error = 0;
  720. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  721. walk->type != XFRM_POLICY_TYPE_ANY)
  722. return -EINVAL;
  723. if (list_empty(&walk->walk.all) && walk->seq != 0)
  724. return 0;
  725. write_lock_bh(&xfrm_policy_lock);
  726. if (list_empty(&walk->walk.all))
  727. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  728. else
  729. x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
  730. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  731. if (x->dead)
  732. continue;
  733. pol = container_of(x, struct xfrm_policy, walk);
  734. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  735. walk->type != pol->type)
  736. continue;
  737. error = func(pol, xfrm_policy_id2dir(pol->index),
  738. walk->seq, data);
  739. if (error) {
  740. list_move_tail(&walk->walk.all, &x->all);
  741. goto out;
  742. }
  743. walk->seq++;
  744. }
  745. if (walk->seq == 0) {
  746. error = -ENOENT;
  747. goto out;
  748. }
  749. list_del_init(&walk->walk.all);
  750. out:
  751. write_unlock_bh(&xfrm_policy_lock);
  752. return error;
  753. }
  754. EXPORT_SYMBOL(xfrm_policy_walk);
  755. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  756. {
  757. INIT_LIST_HEAD(&walk->walk.all);
  758. walk->walk.dead = 1;
  759. walk->type = type;
  760. walk->seq = 0;
  761. }
  762. EXPORT_SYMBOL(xfrm_policy_walk_init);
  763. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
  764. {
  765. if (list_empty(&walk->walk.all))
  766. return;
  767. write_lock_bh(&xfrm_policy_lock);
  768. list_del(&walk->walk.all);
  769. write_unlock_bh(&xfrm_policy_lock);
  770. }
  771. EXPORT_SYMBOL(xfrm_policy_walk_done);
  772. /*
  773. * Find policy to apply to this flow.
  774. *
  775. * Returns 0 if policy found, else an -errno.
  776. */
  777. static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
  778. u8 type, u16 family, int dir)
  779. {
  780. struct xfrm_selector *sel = &pol->selector;
  781. int match, ret = -ESRCH;
  782. if (pol->family != family ||
  783. (fl->mark & pol->mark.m) != pol->mark.v ||
  784. pol->type != type)
  785. return ret;
  786. match = xfrm_selector_match(sel, fl, family);
  787. if (match)
  788. ret = security_xfrm_policy_lookup(pol->security, fl->secid,
  789. dir);
  790. return ret;
  791. }
  792. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  793. struct flowi *fl,
  794. u16 family, u8 dir)
  795. {
  796. int err;
  797. struct xfrm_policy *pol, *ret;
  798. xfrm_address_t *daddr, *saddr;
  799. struct hlist_node *entry;
  800. struct hlist_head *chain;
  801. u32 priority = ~0U;
  802. daddr = xfrm_flowi_daddr(fl, family);
  803. saddr = xfrm_flowi_saddr(fl, family);
  804. if (unlikely(!daddr || !saddr))
  805. return NULL;
  806. read_lock_bh(&xfrm_policy_lock);
  807. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  808. ret = NULL;
  809. hlist_for_each_entry(pol, entry, chain, bydst) {
  810. err = xfrm_policy_match(pol, fl, type, family, dir);
  811. if (err) {
  812. if (err == -ESRCH)
  813. continue;
  814. else {
  815. ret = ERR_PTR(err);
  816. goto fail;
  817. }
  818. } else {
  819. ret = pol;
  820. priority = ret->priority;
  821. break;
  822. }
  823. }
  824. chain = &net->xfrm.policy_inexact[dir];
  825. hlist_for_each_entry(pol, entry, chain, bydst) {
  826. err = xfrm_policy_match(pol, fl, type, family, dir);
  827. if (err) {
  828. if (err == -ESRCH)
  829. continue;
  830. else {
  831. ret = ERR_PTR(err);
  832. goto fail;
  833. }
  834. } else if (pol->priority < priority) {
  835. ret = pol;
  836. break;
  837. }
  838. }
  839. if (ret)
  840. xfrm_pol_hold(ret);
  841. fail:
  842. read_unlock_bh(&xfrm_policy_lock);
  843. return ret;
  844. }
  845. static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
  846. u8 dir, void **objp, atomic_t **obj_refp)
  847. {
  848. struct xfrm_policy *pol;
  849. int err = 0;
  850. #ifdef CONFIG_XFRM_SUB_POLICY
  851. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
  852. if (IS_ERR(pol)) {
  853. err = PTR_ERR(pol);
  854. pol = NULL;
  855. }
  856. if (pol || err)
  857. goto end;
  858. #endif
  859. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
  860. if (IS_ERR(pol)) {
  861. err = PTR_ERR(pol);
  862. pol = NULL;
  863. }
  864. #ifdef CONFIG_XFRM_SUB_POLICY
  865. end:
  866. #endif
  867. if ((*objp = (void *) pol) != NULL)
  868. *obj_refp = &pol->refcnt;
  869. return err;
  870. }
  871. static inline int policy_to_flow_dir(int dir)
  872. {
  873. if (XFRM_POLICY_IN == FLOW_DIR_IN &&
  874. XFRM_POLICY_OUT == FLOW_DIR_OUT &&
  875. XFRM_POLICY_FWD == FLOW_DIR_FWD)
  876. return dir;
  877. switch (dir) {
  878. default:
  879. case XFRM_POLICY_IN:
  880. return FLOW_DIR_IN;
  881. case XFRM_POLICY_OUT:
  882. return FLOW_DIR_OUT;
  883. case XFRM_POLICY_FWD:
  884. return FLOW_DIR_FWD;
  885. }
  886. }
  887. static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
  888. {
  889. struct xfrm_policy *pol;
  890. read_lock_bh(&xfrm_policy_lock);
  891. if ((pol = sk->sk_policy[dir]) != NULL) {
  892. int match = xfrm_selector_match(&pol->selector, fl,
  893. sk->sk_family);
  894. int err = 0;
  895. if (match) {
  896. if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
  897. pol = NULL;
  898. goto out;
  899. }
  900. err = security_xfrm_policy_lookup(pol->security,
  901. fl->secid,
  902. policy_to_flow_dir(dir));
  903. if (!err)
  904. xfrm_pol_hold(pol);
  905. else if (err == -ESRCH)
  906. pol = NULL;
  907. else
  908. pol = ERR_PTR(err);
  909. } else
  910. pol = NULL;
  911. }
  912. out:
  913. read_unlock_bh(&xfrm_policy_lock);
  914. return pol;
  915. }
  916. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  917. {
  918. struct net *net = xp_net(pol);
  919. struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
  920. pol->family, dir);
  921. list_add(&pol->walk.all, &net->xfrm.policy_all);
  922. hlist_add_head(&pol->bydst, chain);
  923. hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
  924. net->xfrm.policy_count[dir]++;
  925. xfrm_pol_hold(pol);
  926. if (xfrm_bydst_should_resize(net, dir, NULL))
  927. schedule_work(&net->xfrm.policy_hash_work);
  928. }
  929. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  930. int dir)
  931. {
  932. struct net *net = xp_net(pol);
  933. if (hlist_unhashed(&pol->bydst))
  934. return NULL;
  935. hlist_del(&pol->bydst);
  936. hlist_del(&pol->byidx);
  937. list_del(&pol->walk.all);
  938. net->xfrm.policy_count[dir]--;
  939. return pol;
  940. }
  941. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  942. {
  943. write_lock_bh(&xfrm_policy_lock);
  944. pol = __xfrm_policy_unlink(pol, dir);
  945. write_unlock_bh(&xfrm_policy_lock);
  946. if (pol) {
  947. if (dir < XFRM_POLICY_MAX)
  948. atomic_inc(&flow_cache_genid);
  949. xfrm_policy_kill(pol);
  950. return 0;
  951. }
  952. return -ENOENT;
  953. }
  954. EXPORT_SYMBOL(xfrm_policy_delete);
  955. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  956. {
  957. struct net *net = xp_net(pol);
  958. struct xfrm_policy *old_pol;
  959. #ifdef CONFIG_XFRM_SUB_POLICY
  960. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  961. return -EINVAL;
  962. #endif
  963. write_lock_bh(&xfrm_policy_lock);
  964. old_pol = sk->sk_policy[dir];
  965. sk->sk_policy[dir] = pol;
  966. if (pol) {
  967. pol->curlft.add_time = get_seconds();
  968. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
  969. __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
  970. }
  971. if (old_pol)
  972. __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
  973. write_unlock_bh(&xfrm_policy_lock);
  974. if (old_pol) {
  975. xfrm_policy_kill(old_pol);
  976. }
  977. return 0;
  978. }
  979. static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
  980. {
  981. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  982. if (newp) {
  983. newp->selector = old->selector;
  984. if (security_xfrm_policy_clone(old->security,
  985. &newp->security)) {
  986. kfree(newp);
  987. return NULL; /* ENOMEM */
  988. }
  989. newp->lft = old->lft;
  990. newp->curlft = old->curlft;
  991. newp->mark = old->mark;
  992. newp->action = old->action;
  993. newp->flags = old->flags;
  994. newp->xfrm_nr = old->xfrm_nr;
  995. newp->index = old->index;
  996. newp->type = old->type;
  997. memcpy(newp->xfrm_vec, old->xfrm_vec,
  998. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  999. write_lock_bh(&xfrm_policy_lock);
  1000. __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
  1001. write_unlock_bh(&xfrm_policy_lock);
  1002. xfrm_pol_put(newp);
  1003. }
  1004. return newp;
  1005. }
  1006. int __xfrm_sk_clone_policy(struct sock *sk)
  1007. {
  1008. struct xfrm_policy *p0 = sk->sk_policy[0],
  1009. *p1 = sk->sk_policy[1];
  1010. sk->sk_policy[0] = sk->sk_policy[1] = NULL;
  1011. if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
  1012. return -ENOMEM;
  1013. if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
  1014. return -ENOMEM;
  1015. return 0;
  1016. }
  1017. static int
  1018. xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
  1019. unsigned short family)
  1020. {
  1021. int err;
  1022. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1023. if (unlikely(afinfo == NULL))
  1024. return -EINVAL;
  1025. err = afinfo->get_saddr(net, local, remote);
  1026. xfrm_policy_put_afinfo(afinfo);
  1027. return err;
  1028. }
  1029. /* Resolve list of templates for the flow, given policy. */
  1030. static int
  1031. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
  1032. struct xfrm_state **xfrm,
  1033. unsigned short family)
  1034. {
  1035. struct net *net = xp_net(policy);
  1036. int nx;
  1037. int i, error;
  1038. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1039. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1040. xfrm_address_t tmp;
  1041. for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
  1042. struct xfrm_state *x;
  1043. xfrm_address_t *remote = daddr;
  1044. xfrm_address_t *local = saddr;
  1045. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  1046. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  1047. tmpl->mode == XFRM_MODE_BEET) {
  1048. remote = &tmpl->id.daddr;
  1049. local = &tmpl->saddr;
  1050. family = tmpl->encap_family;
  1051. if (xfrm_addr_any(local, family)) {
  1052. error = xfrm_get_saddr(net, &tmp, remote, family);
  1053. if (error)
  1054. goto fail;
  1055. local = &tmp;
  1056. }
  1057. }
  1058. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
  1059. if (x && x->km.state == XFRM_STATE_VALID) {
  1060. xfrm[nx++] = x;
  1061. daddr = remote;
  1062. saddr = local;
  1063. continue;
  1064. }
  1065. if (x) {
  1066. error = (x->km.state == XFRM_STATE_ERROR ?
  1067. -EINVAL : -EAGAIN);
  1068. xfrm_state_put(x);
  1069. }
  1070. else if (error == -ESRCH)
  1071. error = -EAGAIN;
  1072. if (!tmpl->optional)
  1073. goto fail;
  1074. }
  1075. return nx;
  1076. fail:
  1077. for (nx--; nx>=0; nx--)
  1078. xfrm_state_put(xfrm[nx]);
  1079. return error;
  1080. }
  1081. static int
  1082. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
  1083. struct xfrm_state **xfrm,
  1084. unsigned short family)
  1085. {
  1086. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  1087. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  1088. int cnx = 0;
  1089. int error;
  1090. int ret;
  1091. int i;
  1092. for (i = 0; i < npols; i++) {
  1093. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1094. error = -ENOBUFS;
  1095. goto fail;
  1096. }
  1097. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  1098. if (ret < 0) {
  1099. error = ret;
  1100. goto fail;
  1101. } else
  1102. cnx += ret;
  1103. }
  1104. /* found states are sorted for outbound processing */
  1105. if (npols > 1)
  1106. xfrm_state_sort(xfrm, tpp, cnx, family);
  1107. return cnx;
  1108. fail:
  1109. for (cnx--; cnx>=0; cnx--)
  1110. xfrm_state_put(tpp[cnx]);
  1111. return error;
  1112. }
  1113. /* Check that the bundle accepts the flow and its components are
  1114. * still valid.
  1115. */
  1116. static struct dst_entry *
  1117. xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
  1118. {
  1119. struct dst_entry *x;
  1120. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1121. if (unlikely(afinfo == NULL))
  1122. return ERR_PTR(-EINVAL);
  1123. x = afinfo->find_bundle(fl, policy);
  1124. xfrm_policy_put_afinfo(afinfo);
  1125. return x;
  1126. }
  1127. static inline int xfrm_get_tos(struct flowi *fl, int family)
  1128. {
  1129. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1130. int tos;
  1131. if (!afinfo)
  1132. return -EINVAL;
  1133. tos = afinfo->get_tos(fl);
  1134. xfrm_policy_put_afinfo(afinfo);
  1135. return tos;
  1136. }
  1137. static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
  1138. {
  1139. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1140. struct dst_ops *dst_ops;
  1141. struct xfrm_dst *xdst;
  1142. if (!afinfo)
  1143. return ERR_PTR(-EINVAL);
  1144. switch (family) {
  1145. case AF_INET:
  1146. dst_ops = &net->xfrm.xfrm4_dst_ops;
  1147. break;
  1148. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  1149. case AF_INET6:
  1150. dst_ops = &net->xfrm.xfrm6_dst_ops;
  1151. break;
  1152. #endif
  1153. default:
  1154. BUG();
  1155. }
  1156. xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
  1157. xfrm_policy_put_afinfo(afinfo);
  1158. return xdst;
  1159. }
  1160. static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  1161. int nfheader_len)
  1162. {
  1163. struct xfrm_policy_afinfo *afinfo =
  1164. xfrm_policy_get_afinfo(dst->ops->family);
  1165. int err;
  1166. if (!afinfo)
  1167. return -EINVAL;
  1168. err = afinfo->init_path(path, dst, nfheader_len);
  1169. xfrm_policy_put_afinfo(afinfo);
  1170. return err;
  1171. }
  1172. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
  1173. {
  1174. struct xfrm_policy_afinfo *afinfo =
  1175. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  1176. int err;
  1177. if (!afinfo)
  1178. return -EINVAL;
  1179. err = afinfo->fill_dst(xdst, dev);
  1180. xfrm_policy_put_afinfo(afinfo);
  1181. return err;
  1182. }
  1183. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  1184. * all the metrics... Shortly, bundle a bundle.
  1185. */
  1186. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  1187. struct xfrm_state **xfrm, int nx,
  1188. struct flowi *fl,
  1189. struct dst_entry *dst)
  1190. {
  1191. struct net *net = xp_net(policy);
  1192. unsigned long now = jiffies;
  1193. struct net_device *dev;
  1194. struct dst_entry *dst_prev = NULL;
  1195. struct dst_entry *dst0 = NULL;
  1196. int i = 0;
  1197. int err;
  1198. int header_len = 0;
  1199. int nfheader_len = 0;
  1200. int trailer_len = 0;
  1201. int tos;
  1202. int family = policy->selector.family;
  1203. xfrm_address_t saddr, daddr;
  1204. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  1205. tos = xfrm_get_tos(fl, family);
  1206. err = tos;
  1207. if (tos < 0)
  1208. goto put_states;
  1209. dst_hold(dst);
  1210. for (; i < nx; i++) {
  1211. struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
  1212. struct dst_entry *dst1 = &xdst->u.dst;
  1213. err = PTR_ERR(xdst);
  1214. if (IS_ERR(xdst)) {
  1215. dst_release(dst);
  1216. goto put_states;
  1217. }
  1218. if (!dst_prev)
  1219. dst0 = dst1;
  1220. else {
  1221. dst_prev->child = dst_clone(dst1);
  1222. dst1->flags |= DST_NOHASH;
  1223. }
  1224. xdst->route = dst;
  1225. memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
  1226. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  1227. family = xfrm[i]->props.family;
  1228. dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
  1229. family);
  1230. err = PTR_ERR(dst);
  1231. if (IS_ERR(dst))
  1232. goto put_states;
  1233. } else
  1234. dst_hold(dst);
  1235. dst1->xfrm = xfrm[i];
  1236. xdst->genid = xfrm[i]->genid;
  1237. dst1->obsolete = -1;
  1238. dst1->flags |= DST_HOST;
  1239. dst1->lastuse = now;
  1240. dst1->input = dst_discard;
  1241. dst1->output = xfrm[i]->outer_mode->afinfo->output;
  1242. dst1->next = dst_prev;
  1243. dst_prev = dst1;
  1244. header_len += xfrm[i]->props.header_len;
  1245. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  1246. nfheader_len += xfrm[i]->props.header_len;
  1247. trailer_len += xfrm[i]->props.trailer_len;
  1248. }
  1249. dst_prev->child = dst;
  1250. dst0->path = dst;
  1251. err = -ENODEV;
  1252. dev = dst->dev;
  1253. if (!dev)
  1254. goto free_dst;
  1255. /* Copy neighbour for reachability confirmation */
  1256. dst0->neighbour = neigh_clone(dst->neighbour);
  1257. xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
  1258. xfrm_init_pmtu(dst_prev);
  1259. for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
  1260. struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
  1261. err = xfrm_fill_dst(xdst, dev);
  1262. if (err)
  1263. goto free_dst;
  1264. dst_prev->header_len = header_len;
  1265. dst_prev->trailer_len = trailer_len;
  1266. header_len -= xdst->u.dst.xfrm->props.header_len;
  1267. trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
  1268. }
  1269. out:
  1270. return dst0;
  1271. put_states:
  1272. for (; i < nx; i++)
  1273. xfrm_state_put(xfrm[i]);
  1274. free_dst:
  1275. if (dst0)
  1276. dst_free(dst0);
  1277. dst0 = ERR_PTR(err);
  1278. goto out;
  1279. }
  1280. static int inline
  1281. xfrm_dst_alloc_copy(void **target, void *src, int size)
  1282. {
  1283. if (!*target) {
  1284. *target = kmalloc(size, GFP_ATOMIC);
  1285. if (!*target)
  1286. return -ENOMEM;
  1287. }
  1288. memcpy(*target, src, size);
  1289. return 0;
  1290. }
  1291. static int inline
  1292. xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
  1293. {
  1294. #ifdef CONFIG_XFRM_SUB_POLICY
  1295. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1296. return xfrm_dst_alloc_copy((void **)&(xdst->partner),
  1297. sel, sizeof(*sel));
  1298. #else
  1299. return 0;
  1300. #endif
  1301. }
  1302. static int inline
  1303. xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
  1304. {
  1305. #ifdef CONFIG_XFRM_SUB_POLICY
  1306. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1307. return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
  1308. #else
  1309. return 0;
  1310. #endif
  1311. }
  1312. static int stale_bundle(struct dst_entry *dst);
  1313. /* Main function: finds/creates a bundle for given flow.
  1314. *
  1315. * At the moment we eat a raw IP route. Mostly to speed up lookups
  1316. * on interfaces with disabled IPsec.
  1317. */
  1318. int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
  1319. struct sock *sk, int flags)
  1320. {
  1321. struct xfrm_policy *policy;
  1322. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1323. int npols;
  1324. int pol_dead;
  1325. int xfrm_nr;
  1326. int pi;
  1327. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  1328. struct dst_entry *dst, *dst_orig = *dst_p;
  1329. int nx = 0;
  1330. int err;
  1331. u32 genid;
  1332. u16 family;
  1333. u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
  1334. restart:
  1335. genid = atomic_read(&flow_cache_genid);
  1336. policy = NULL;
  1337. for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
  1338. pols[pi] = NULL;
  1339. npols = 0;
  1340. pol_dead = 0;
  1341. xfrm_nr = 0;
  1342. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  1343. policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
  1344. err = PTR_ERR(policy);
  1345. if (IS_ERR(policy)) {
  1346. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1347. goto dropdst;
  1348. }
  1349. }
  1350. if (!policy) {
  1351. /* To accelerate a bit... */
  1352. if ((dst_orig->flags & DST_NOXFRM) ||
  1353. !net->xfrm.policy_count[XFRM_POLICY_OUT])
  1354. goto nopol;
  1355. policy = flow_cache_lookup(net, fl, dst_orig->ops->family,
  1356. dir, xfrm_policy_lookup);
  1357. err = PTR_ERR(policy);
  1358. if (IS_ERR(policy)) {
  1359. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1360. goto dropdst;
  1361. }
  1362. }
  1363. if (!policy)
  1364. goto nopol;
  1365. family = dst_orig->ops->family;
  1366. pols[0] = policy;
  1367. npols ++;
  1368. xfrm_nr += pols[0]->xfrm_nr;
  1369. err = -ENOENT;
  1370. if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
  1371. goto error;
  1372. policy->curlft.use_time = get_seconds();
  1373. switch (policy->action) {
  1374. default:
  1375. case XFRM_POLICY_BLOCK:
  1376. /* Prohibit the flow */
  1377. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  1378. err = -EPERM;
  1379. goto error;
  1380. case XFRM_POLICY_ALLOW:
  1381. #ifndef CONFIG_XFRM_SUB_POLICY
  1382. if (policy->xfrm_nr == 0) {
  1383. /* Flow passes not transformed. */
  1384. xfrm_pol_put(policy);
  1385. return 0;
  1386. }
  1387. #endif
  1388. /* Try to find matching bundle.
  1389. *
  1390. * LATER: help from flow cache. It is optional, this
  1391. * is required only for output policy.
  1392. */
  1393. dst = xfrm_find_bundle(fl, policy, family);
  1394. if (IS_ERR(dst)) {
  1395. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1396. err = PTR_ERR(dst);
  1397. goto error;
  1398. }
  1399. if (dst)
  1400. break;
  1401. #ifdef CONFIG_XFRM_SUB_POLICY
  1402. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1403. pols[1] = xfrm_policy_lookup_bytype(net,
  1404. XFRM_POLICY_TYPE_MAIN,
  1405. fl, family,
  1406. XFRM_POLICY_OUT);
  1407. if (pols[1]) {
  1408. if (IS_ERR(pols[1])) {
  1409. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  1410. err = PTR_ERR(pols[1]);
  1411. goto error;
  1412. }
  1413. if (pols[1]->action == XFRM_POLICY_BLOCK) {
  1414. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  1415. err = -EPERM;
  1416. goto error;
  1417. }
  1418. npols ++;
  1419. xfrm_nr += pols[1]->xfrm_nr;
  1420. }
  1421. }
  1422. /*
  1423. * Because neither flowi nor bundle information knows about
  1424. * transformation template size. On more than one policy usage
  1425. * we can realize whether all of them is bypass or not after
  1426. * they are searched. See above not-transformed bypass
  1427. * is surrounded by non-sub policy configuration, too.
  1428. */
  1429. if (xfrm_nr == 0) {
  1430. /* Flow passes not transformed. */
  1431. xfrm_pols_put(pols, npols);
  1432. return 0;
  1433. }
  1434. #endif
  1435. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1436. if (unlikely(nx<0)) {
  1437. err = nx;
  1438. if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) {
  1439. /* EREMOTE tells the caller to generate
  1440. * a one-shot blackhole route.
  1441. */
  1442. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1443. xfrm_pol_put(policy);
  1444. return -EREMOTE;
  1445. }
  1446. if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
  1447. DECLARE_WAITQUEUE(wait, current);
  1448. add_wait_queue(&net->xfrm.km_waitq, &wait);
  1449. set_current_state(TASK_INTERRUPTIBLE);
  1450. schedule();
  1451. set_current_state(TASK_RUNNING);
  1452. remove_wait_queue(&net->xfrm.km_waitq, &wait);
  1453. nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
  1454. if (nx == -EAGAIN && signal_pending(current)) {
  1455. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1456. err = -ERESTART;
  1457. goto error;
  1458. }
  1459. if (nx == -EAGAIN ||
  1460. genid != atomic_read(&flow_cache_genid)) {
  1461. xfrm_pols_put(pols, npols);
  1462. goto restart;
  1463. }
  1464. err = nx;
  1465. }
  1466. if (err < 0) {
  1467. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  1468. goto error;
  1469. }
  1470. }
  1471. if (nx == 0) {
  1472. /* Flow passes not transformed. */
  1473. xfrm_pols_put(pols, npols);
  1474. return 0;
  1475. }
  1476. dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
  1477. err = PTR_ERR(dst);
  1478. if (IS_ERR(dst)) {
  1479. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  1480. goto error;
  1481. }
  1482. for (pi = 0; pi < npols; pi++) {
  1483. read_lock_bh(&pols[pi]->lock);
  1484. pol_dead |= pols[pi]->walk.dead;
  1485. read_unlock_bh(&pols[pi]->lock);
  1486. }
  1487. write_lock_bh(&policy->lock);
  1488. if (unlikely(pol_dead || stale_bundle(dst))) {
  1489. /* Wow! While we worked on resolving, this
  1490. * policy has gone. Retry. It is not paranoia,
  1491. * we just cannot enlist new bundle to dead object.
  1492. * We can't enlist stable bundles either.
  1493. */
  1494. write_unlock_bh(&policy->lock);
  1495. dst_free(dst);
  1496. if (pol_dead)
  1497. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD);
  1498. else
  1499. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1500. err = -EHOSTUNREACH;
  1501. goto error;
  1502. }
  1503. if (npols > 1)
  1504. err = xfrm_dst_update_parent(dst, &pols[1]->selector);
  1505. else
  1506. err = xfrm_dst_update_origin(dst, fl);
  1507. if (unlikely(err)) {
  1508. write_unlock_bh(&policy->lock);
  1509. dst_free(dst);
  1510. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
  1511. goto error;
  1512. }
  1513. dst->next = policy->bundles;
  1514. policy->bundles = dst;
  1515. dst_hold(dst);
  1516. write_unlock_bh(&policy->lock);
  1517. }
  1518. *dst_p = dst;
  1519. dst_release(dst_orig);
  1520. xfrm_pols_put(pols, npols);
  1521. return 0;
  1522. error:
  1523. xfrm_pols_put(pols, npols);
  1524. dropdst:
  1525. dst_release(dst_orig);
  1526. *dst_p = NULL;
  1527. return err;
  1528. nopol:
  1529. err = -ENOENT;
  1530. if (flags & XFRM_LOOKUP_ICMP)
  1531. goto dropdst;
  1532. return 0;
  1533. }
  1534. EXPORT_SYMBOL(__xfrm_lookup);
  1535. int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
  1536. struct sock *sk, int flags)
  1537. {
  1538. int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
  1539. if (err == -EREMOTE) {
  1540. dst_release(*dst_p);
  1541. *dst_p = NULL;
  1542. err = -EAGAIN;
  1543. }
  1544. return err;
  1545. }
  1546. EXPORT_SYMBOL(xfrm_lookup);
  1547. static inline int
  1548. xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
  1549. {
  1550. struct xfrm_state *x;
  1551. if (!skb->sp || idx < 0 || idx >= skb->sp->len)
  1552. return 0;
  1553. x = skb->sp->xvec[idx];
  1554. if (!x->type->reject)
  1555. return 0;
  1556. return x->type->reject(x, skb, fl);
  1557. }
  1558. /* When skb is transformed back to its "native" form, we have to
  1559. * check policy restrictions. At the moment we make this in maximally
  1560. * stupid way. Shame on me. :-) Of course, connected sockets must
  1561. * have policy cached at them.
  1562. */
  1563. static inline int
  1564. xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
  1565. unsigned short family)
  1566. {
  1567. if (xfrm_state_kern(x))
  1568. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  1569. return x->id.proto == tmpl->id.proto &&
  1570. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  1571. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  1572. x->props.mode == tmpl->mode &&
  1573. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  1574. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  1575. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  1576. xfrm_state_addr_cmp(tmpl, x, family));
  1577. }
  1578. /*
  1579. * 0 or more than 0 is returned when validation is succeeded (either bypass
  1580. * because of optional transport mode, or next index of the mathced secpath
  1581. * state with the template.
  1582. * -1 is returned when no matching template is found.
  1583. * Otherwise "-2 - errored_index" is returned.
  1584. */
  1585. static inline int
  1586. xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
  1587. unsigned short family)
  1588. {
  1589. int idx = start;
  1590. if (tmpl->optional) {
  1591. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  1592. return start;
  1593. } else
  1594. start = -1;
  1595. for (; idx < sp->len; idx++) {
  1596. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  1597. return ++idx;
  1598. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  1599. if (start == -1)
  1600. start = -2-idx;
  1601. break;
  1602. }
  1603. }
  1604. return start;
  1605. }
  1606. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  1607. unsigned int family, int reverse)
  1608. {
  1609. struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1610. int err;
  1611. if (unlikely(afinfo == NULL))
  1612. return -EAFNOSUPPORT;
  1613. afinfo->decode_session(skb, fl, reverse);
  1614. err = security_xfrm_decode_session(skb, &fl->secid);
  1615. xfrm_policy_put_afinfo(afinfo);
  1616. return err;
  1617. }
  1618. EXPORT_SYMBOL(__xfrm_decode_session);
  1619. static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
  1620. {
  1621. for (; k < sp->len; k++) {
  1622. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  1623. *idxp = k;
  1624. return 1;
  1625. }
  1626. }
  1627. return 0;
  1628. }
  1629. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  1630. unsigned short family)
  1631. {
  1632. struct net *net = dev_net(skb->dev);
  1633. struct xfrm_policy *pol;
  1634. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  1635. int npols = 0;
  1636. int xfrm_nr;
  1637. int pi;
  1638. int reverse;
  1639. struct flowi fl;
  1640. u8 fl_dir;
  1641. int xerr_idx = -1;
  1642. reverse = dir & ~XFRM_POLICY_MASK;
  1643. dir &= XFRM_POLICY_MASK;
  1644. fl_dir = policy_to_flow_dir(dir);
  1645. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  1646. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  1647. return 0;
  1648. }
  1649. nf_nat_decode_session(skb, &fl, family);
  1650. /* First, check used SA against their selectors. */
  1651. if (skb->sp) {
  1652. int i;
  1653. for (i=skb->sp->len-1; i>=0; i--) {
  1654. struct xfrm_state *x = skb->sp->xvec[i];
  1655. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  1656. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  1657. return 0;
  1658. }
  1659. }
  1660. }
  1661. pol = NULL;
  1662. if (sk && sk->sk_policy[dir]) {
  1663. pol = xfrm_sk_policy_lookup(sk, dir, &fl);
  1664. if (IS_ERR(pol)) {
  1665. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1666. return 0;
  1667. }
  1668. }
  1669. if (!pol)
  1670. pol = flow_cache_lookup(net, &fl, family, fl_dir,
  1671. xfrm_policy_lookup);
  1672. if (IS_ERR(pol)) {
  1673. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1674. return 0;
  1675. }
  1676. if (!pol) {
  1677. if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
  1678. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1679. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  1680. return 0;
  1681. }
  1682. return 1;
  1683. }
  1684. pol->curlft.use_time = get_seconds();
  1685. pols[0] = pol;
  1686. npols ++;
  1687. #ifdef CONFIG_XFRM_SUB_POLICY
  1688. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  1689. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  1690. &fl, family,
  1691. XFRM_POLICY_IN);
  1692. if (pols[1]) {
  1693. if (IS_ERR(pols[1])) {
  1694. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  1695. return 0;
  1696. }
  1697. pols[1]->curlft.use_time = get_seconds();
  1698. npols ++;
  1699. }
  1700. }
  1701. #endif
  1702. if (pol->action == XFRM_POLICY_ALLOW) {
  1703. struct sec_path *sp;
  1704. static struct sec_path dummy;
  1705. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  1706. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  1707. struct xfrm_tmpl **tpp = tp;
  1708. int ti = 0;
  1709. int i, k;
  1710. if ((sp = skb->sp) == NULL)
  1711. sp = &dummy;
  1712. for (pi = 0; pi < npols; pi++) {
  1713. if (pols[pi] != pol &&
  1714. pols[pi]->action != XFRM_POLICY_ALLOW) {
  1715. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1716. goto reject;
  1717. }
  1718. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  1719. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  1720. goto reject_error;
  1721. }
  1722. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  1723. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  1724. }
  1725. xfrm_nr = ti;
  1726. if (npols > 1) {
  1727. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  1728. tpp = stp;
  1729. }
  1730. /* For each tunnel xfrm, find the first matching tmpl.
  1731. * For each tmpl before that, find corresponding xfrm.
  1732. * Order is _important_. Later we will implement
  1733. * some barriers, but at the moment barriers
  1734. * are implied between each two transformations.
  1735. */
  1736. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  1737. k = xfrm_policy_ok(tpp[i], sp, k, family);
  1738. if (k < 0) {
  1739. if (k < -1)
  1740. /* "-2 - errored_index" returned */
  1741. xerr_idx = -(2+k);
  1742. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1743. goto reject;
  1744. }
  1745. }
  1746. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  1747. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  1748. goto reject;
  1749. }
  1750. xfrm_pols_put(pols, npols);
  1751. return 1;
  1752. }
  1753. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  1754. reject:
  1755. xfrm_secpath_reject(xerr_idx, skb, &fl);
  1756. reject_error:
  1757. xfrm_pols_put(pols, npols);
  1758. return 0;
  1759. }
  1760. EXPORT_SYMBOL(__xfrm_policy_check);
  1761. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  1762. {
  1763. struct net *net = dev_net(skb->dev);
  1764. struct flowi fl;
  1765. struct dst_entry *dst;
  1766. int res;
  1767. if (xfrm_decode_session(skb, &fl, family) < 0) {
  1768. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  1769. return 0;
  1770. }
  1771. dst = skb_dst(skb);
  1772. res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
  1773. skb_dst_set(skb, dst);
  1774. return res;
  1775. }
  1776. EXPORT_SYMBOL(__xfrm_route_forward);
  1777. /* Optimize later using cookies and generation ids. */
  1778. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  1779. {
  1780. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  1781. * to "-1" to force all XFRM destinations to get validated by
  1782. * dst_ops->check on every use. We do this because when a
  1783. * normal route referenced by an XFRM dst is obsoleted we do
  1784. * not go looking around for all parent referencing XFRM dsts
  1785. * so that we can invalidate them. It is just too much work.
  1786. * Instead we make the checks here on every use. For example:
  1787. *
  1788. * XFRM dst A --> IPv4 dst X
  1789. *
  1790. * X is the "xdst->route" of A (X is also the "dst->path" of A
  1791. * in this example). If X is marked obsolete, "A" will not
  1792. * notice. That's what we are validating here via the
  1793. * stale_bundle() check.
  1794. *
  1795. * When a policy's bundle is pruned, we dst_free() the XFRM
  1796. * dst which causes it's ->obsolete field to be set to a
  1797. * positive non-zero integer. If an XFRM dst has been pruned
  1798. * like this, we want to force a new route lookup.
  1799. */
  1800. if (dst->obsolete < 0 && !stale_bundle(dst))
  1801. return dst;
  1802. return NULL;
  1803. }
  1804. static int stale_bundle(struct dst_entry *dst)
  1805. {
  1806. return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
  1807. }
  1808. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  1809. {
  1810. while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
  1811. dst->dev = dev_net(dev)->loopback_dev;
  1812. dev_hold(dst->dev);
  1813. dev_put(dev);
  1814. }
  1815. }
  1816. EXPORT_SYMBOL(xfrm_dst_ifdown);
  1817. static void xfrm_link_failure(struct sk_buff *skb)
  1818. {
  1819. /* Impossible. Such dst must be popped before reaches point of failure. */
  1820. return;
  1821. }
  1822. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  1823. {
  1824. if (dst) {
  1825. if (dst->obsolete) {
  1826. dst_release(dst);
  1827. dst = NULL;
  1828. }
  1829. }
  1830. return dst;
  1831. }
  1832. static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
  1833. {
  1834. struct dst_entry *dst, **dstp;
  1835. write_lock(&pol->lock);
  1836. dstp = &pol->bundles;
  1837. while ((dst=*dstp) != NULL) {
  1838. if (func(dst)) {
  1839. *dstp = dst->next;
  1840. dst->next = *gc_list_p;
  1841. *gc_list_p = dst;
  1842. } else {
  1843. dstp = &dst->next;
  1844. }
  1845. }
  1846. write_unlock(&pol->lock);
  1847. }
  1848. static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
  1849. {
  1850. struct dst_entry *gc_list = NULL;
  1851. int dir;
  1852. read_lock_bh(&xfrm_policy_lock);
  1853. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  1854. struct xfrm_policy *pol;
  1855. struct hlist_node *entry;
  1856. struct hlist_head *table;
  1857. int i;
  1858. hlist_for_each_entry(pol, entry,
  1859. &net->xfrm.policy_inexact[dir], bydst)
  1860. prune_one_bundle(pol, func, &gc_list);
  1861. table = net->xfrm.policy_bydst[dir].table;
  1862. for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
  1863. hlist_for_each_entry(pol, entry, table + i, bydst)
  1864. prune_one_bundle(pol, func, &gc_list);
  1865. }
  1866. }
  1867. read_unlock_bh(&xfrm_policy_lock);
  1868. while (gc_list) {
  1869. struct dst_entry *dst = gc_list;
  1870. gc_list = dst->next;
  1871. dst_free(dst);
  1872. }
  1873. }
  1874. static int unused_bundle(struct dst_entry *dst)
  1875. {
  1876. return !atomic_read(&dst->__refcnt);
  1877. }
  1878. static void __xfrm_garbage_collect(struct net *net)
  1879. {
  1880. xfrm_prune_bundles(net, unused_bundle);
  1881. }
  1882. static int xfrm_flush_bundles(struct net *net)
  1883. {
  1884. xfrm_prune_bundles(net, stale_bundle);
  1885. return 0;
  1886. }
  1887. static void xfrm_init_pmtu(struct dst_entry *dst)
  1888. {
  1889. do {
  1890. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1891. u32 pmtu, route_mtu_cached;
  1892. pmtu = dst_mtu(dst->child);
  1893. xdst->child_mtu_cached = pmtu;
  1894. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  1895. route_mtu_cached = dst_mtu(xdst->route);
  1896. xdst->route_mtu_cached = route_mtu_cached;
  1897. if (pmtu > route_mtu_cached)
  1898. pmtu = route_mtu_cached;
  1899. dst->metrics[RTAX_MTU-1] = pmtu;
  1900. } while ((dst = dst->next));
  1901. }
  1902. /* Check that the bundle accepts the flow and its components are
  1903. * still valid.
  1904. */
  1905. int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
  1906. struct flowi *fl, int family, int strict)
  1907. {
  1908. struct dst_entry *dst = &first->u.dst;
  1909. struct xfrm_dst *last;
  1910. u32 mtu;
  1911. if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
  1912. (dst->dev && !netif_running(dst->dev)))
  1913. return 0;
  1914. #ifdef CONFIG_XFRM_SUB_POLICY
  1915. if (fl) {
  1916. if (first->origin && !flow_cache_uli_match(first->origin, fl))
  1917. return 0;
  1918. if (first->partner &&
  1919. !xfrm_selector_match(first->partner, fl, family))
  1920. return 0;
  1921. }
  1922. #endif
  1923. last = NULL;
  1924. do {
  1925. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  1926. if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
  1927. return 0;
  1928. if (fl && pol &&
  1929. !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
  1930. return 0;
  1931. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  1932. return 0;
  1933. if (xdst->genid != dst->xfrm->genid)
  1934. return 0;
  1935. if (strict && fl &&
  1936. !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
  1937. !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
  1938. return 0;
  1939. mtu = dst_mtu(dst->child);
  1940. if (xdst->child_mtu_cached != mtu) {
  1941. last = xdst;
  1942. xdst->child_mtu_cached = mtu;
  1943. }
  1944. if (!dst_check(xdst->route, xdst->route_cookie))
  1945. return 0;
  1946. mtu = dst_mtu(xdst->route);
  1947. if (xdst->route_mtu_cached != mtu) {
  1948. last = xdst;
  1949. xdst->route_mtu_cached = mtu;
  1950. }
  1951. dst = dst->child;
  1952. } while (dst->xfrm);
  1953. if (likely(!last))
  1954. return 1;
  1955. mtu = last->child_mtu_cached;
  1956. for (;;) {
  1957. dst = &last->u.dst;
  1958. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  1959. if (mtu > last->route_mtu_cached)
  1960. mtu = last->route_mtu_cached;
  1961. dst->metrics[RTAX_MTU-1] = mtu;
  1962. if (last == first)
  1963. break;
  1964. last = (struct xfrm_dst *)last->u.dst.next;
  1965. last->child_mtu_cached = mtu;
  1966. }
  1967. return 1;
  1968. }
  1969. EXPORT_SYMBOL(xfrm_bundle_ok);
  1970. int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
  1971. {
  1972. struct net *net;
  1973. int err = 0;
  1974. if (unlikely(afinfo == NULL))
  1975. return -EINVAL;
  1976. if (unlikely(afinfo->family >= NPROTO))
  1977. return -EAFNOSUPPORT;
  1978. write_lock_bh(&xfrm_policy_afinfo_lock);
  1979. if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
  1980. err = -ENOBUFS;
  1981. else {
  1982. struct dst_ops *dst_ops = afinfo->dst_ops;
  1983. if (likely(dst_ops->kmem_cachep == NULL))
  1984. dst_ops->kmem_cachep = xfrm_dst_cache;
  1985. if (likely(dst_ops->check == NULL))
  1986. dst_ops->check = xfrm_dst_check;
  1987. if (likely(dst_ops->negative_advice == NULL))
  1988. dst_ops->negative_advice = xfrm_negative_advice;
  1989. if (likely(dst_ops->link_failure == NULL))
  1990. dst_ops->link_failure = xfrm_link_failure;
  1991. if (likely(afinfo->garbage_collect == NULL))
  1992. afinfo->garbage_collect = __xfrm_garbage_collect;
  1993. xfrm_policy_afinfo[afinfo->family] = afinfo;
  1994. }
  1995. write_unlock_bh(&xfrm_policy_afinfo_lock);
  1996. rtnl_lock();
  1997. for_each_net(net) {
  1998. struct dst_ops *xfrm_dst_ops;
  1999. switch (afinfo->family) {
  2000. case AF_INET:
  2001. xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
  2002. break;
  2003. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  2004. case AF_INET6:
  2005. xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
  2006. break;
  2007. #endif
  2008. default:
  2009. BUG();
  2010. }
  2011. *xfrm_dst_ops = *afinfo->dst_ops;
  2012. }
  2013. rtnl_unlock();
  2014. return err;
  2015. }
  2016. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  2017. int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
  2018. {
  2019. int err = 0;
  2020. if (unlikely(afinfo == NULL))
  2021. return -EINVAL;
  2022. if (unlikely(afinfo->family >= NPROTO))
  2023. return -EAFNOSUPPORT;
  2024. write_lock_bh(&xfrm_policy_afinfo_lock);
  2025. if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
  2026. if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
  2027. err = -EINVAL;
  2028. else {
  2029. struct dst_ops *dst_ops = afinfo->dst_ops;
  2030. xfrm_policy_afinfo[afinfo->family] = NULL;
  2031. dst_ops->kmem_cachep = NULL;
  2032. dst_ops->check = NULL;
  2033. dst_ops->negative_advice = NULL;
  2034. dst_ops->link_failure = NULL;
  2035. afinfo->garbage_collect = NULL;
  2036. }
  2037. }
  2038. write_unlock_bh(&xfrm_policy_afinfo_lock);
  2039. return err;
  2040. }
  2041. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  2042. static void __net_init xfrm_dst_ops_init(struct net *net)
  2043. {
  2044. struct xfrm_policy_afinfo *afinfo;
  2045. read_lock_bh(&xfrm_policy_afinfo_lock);
  2046. afinfo = xfrm_policy_afinfo[AF_INET];
  2047. if (afinfo)
  2048. net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
  2049. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  2050. afinfo = xfrm_policy_afinfo[AF_INET6];
  2051. if (afinfo)
  2052. net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
  2053. #endif
  2054. read_unlock_bh(&xfrm_policy_afinfo_lock);
  2055. }
  2056. static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  2057. {
  2058. struct xfrm_policy_afinfo *afinfo;
  2059. if (unlikely(family >= NPROTO))
  2060. return NULL;
  2061. read_lock(&xfrm_policy_afinfo_lock);
  2062. afinfo = xfrm_policy_afinfo[family];
  2063. if (unlikely(!afinfo))
  2064. read_unlock(&xfrm_policy_afinfo_lock);
  2065. return afinfo;
  2066. }
  2067. static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
  2068. {
  2069. read_unlock(&xfrm_policy_afinfo_lock);
  2070. }
  2071. static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
  2072. {
  2073. struct net_device *dev = ptr;
  2074. switch (event) {
  2075. case NETDEV_DOWN:
  2076. xfrm_flush_bundles(dev_net(dev));
  2077. }
  2078. return NOTIFY_DONE;
  2079. }
  2080. static struct notifier_block xfrm_dev_notifier = {
  2081. .notifier_call = xfrm_dev_event,
  2082. };
  2083. #ifdef CONFIG_XFRM_STATISTICS
  2084. static int __net_init xfrm_statistics_init(struct net *net)
  2085. {
  2086. int rv;
  2087. if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
  2088. sizeof(struct linux_xfrm_mib)) < 0)
  2089. return -ENOMEM;
  2090. rv = xfrm_proc_init(net);
  2091. if (rv < 0)
  2092. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2093. return rv;
  2094. }
  2095. static void xfrm_statistics_fini(struct net *net)
  2096. {
  2097. xfrm_proc_fini(net);
  2098. snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
  2099. }
  2100. #else
  2101. static int __net_init xfrm_statistics_init(struct net *net)
  2102. {
  2103. return 0;
  2104. }
  2105. static void xfrm_statistics_fini(struct net *net)
  2106. {
  2107. }
  2108. #endif
  2109. static int __net_init xfrm_policy_init(struct net *net)
  2110. {
  2111. unsigned int hmask, sz;
  2112. int dir;
  2113. if (net_eq(net, &init_net))
  2114. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  2115. sizeof(struct xfrm_dst),
  2116. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  2117. NULL);
  2118. hmask = 8 - 1;
  2119. sz = (hmask+1) * sizeof(struct hlist_head);
  2120. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  2121. if (!net->xfrm.policy_byidx)
  2122. goto out_byidx;
  2123. net->xfrm.policy_idx_hmask = hmask;
  2124. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2125. struct xfrm_policy_hash *htab;
  2126. net->xfrm.policy_count[dir] = 0;
  2127. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  2128. htab = &net->xfrm.policy_bydst[dir];
  2129. htab->table = xfrm_hash_alloc(sz);
  2130. if (!htab->table)
  2131. goto out_bydst;
  2132. htab->hmask = hmask;
  2133. }
  2134. INIT_LIST_HEAD(&net->xfrm.policy_all);
  2135. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  2136. if (net_eq(net, &init_net))
  2137. register_netdevice_notifier(&xfrm_dev_notifier);
  2138. return 0;
  2139. out_bydst:
  2140. for (dir--; dir >= 0; dir--) {
  2141. struct xfrm_policy_hash *htab;
  2142. htab = &net->xfrm.policy_bydst[dir];
  2143. xfrm_hash_free(htab->table, sz);
  2144. }
  2145. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2146. out_byidx:
  2147. return -ENOMEM;
  2148. }
  2149. static void xfrm_policy_fini(struct net *net)
  2150. {
  2151. struct xfrm_audit audit_info;
  2152. unsigned int sz;
  2153. int dir;
  2154. flush_work(&net->xfrm.policy_hash_work);
  2155. #ifdef CONFIG_XFRM_SUB_POLICY
  2156. audit_info.loginuid = -1;
  2157. audit_info.sessionid = -1;
  2158. audit_info.secid = 0;
  2159. xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
  2160. #endif
  2161. audit_info.loginuid = -1;
  2162. audit_info.sessionid = -1;
  2163. audit_info.secid = 0;
  2164. xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
  2165. flush_work(&xfrm_policy_gc_work);
  2166. WARN_ON(!list_empty(&net->xfrm.policy_all));
  2167. for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
  2168. struct xfrm_policy_hash *htab;
  2169. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  2170. htab = &net->xfrm.policy_bydst[dir];
  2171. sz = (htab->hmask + 1);
  2172. WARN_ON(!hlist_empty(htab->table));
  2173. xfrm_hash_free(htab->table, sz);
  2174. }
  2175. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  2176. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  2177. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  2178. }
  2179. static int __net_init xfrm_net_init(struct net *net)
  2180. {
  2181. int rv;
  2182. rv = xfrm_statistics_init(net);
  2183. if (rv < 0)
  2184. goto out_statistics;
  2185. rv = xfrm_state_init(net);
  2186. if (rv < 0)
  2187. goto out_state;
  2188. rv = xfrm_policy_init(net);
  2189. if (rv < 0)
  2190. goto out_policy;
  2191. xfrm_dst_ops_init(net);
  2192. rv = xfrm_sysctl_init(net);
  2193. if (rv < 0)
  2194. goto out_sysctl;
  2195. return 0;
  2196. out_sysctl:
  2197. xfrm_policy_fini(net);
  2198. out_policy:
  2199. xfrm_state_fini(net);
  2200. out_state:
  2201. xfrm_statistics_fini(net);
  2202. out_statistics:
  2203. return rv;
  2204. }
  2205. static void __net_exit xfrm_net_exit(struct net *net)
  2206. {
  2207. xfrm_sysctl_fini(net);
  2208. xfrm_policy_fini(net);
  2209. xfrm_state_fini(net);
  2210. xfrm_statistics_fini(net);
  2211. }
  2212. static struct pernet_operations __net_initdata xfrm_net_ops = {
  2213. .init = xfrm_net_init,
  2214. .exit = xfrm_net_exit,
  2215. };
  2216. void __init xfrm_init(void)
  2217. {
  2218. register_pernet_subsys(&xfrm_net_ops);
  2219. xfrm_input_init();
  2220. }
  2221. #ifdef CONFIG_AUDITSYSCALL
  2222. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  2223. struct audit_buffer *audit_buf)
  2224. {
  2225. struct xfrm_sec_ctx *ctx = xp->security;
  2226. struct xfrm_selector *sel = &xp->selector;
  2227. if (ctx)
  2228. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  2229. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  2230. switch(sel->family) {
  2231. case AF_INET:
  2232. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  2233. if (sel->prefixlen_s != 32)
  2234. audit_log_format(audit_buf, " src_prefixlen=%d",
  2235. sel->prefixlen_s);
  2236. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  2237. if (sel->prefixlen_d != 32)
  2238. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2239. sel->prefixlen_d);
  2240. break;
  2241. case AF_INET6:
  2242. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  2243. if (sel->prefixlen_s != 128)
  2244. audit_log_format(audit_buf, " src_prefixlen=%d",
  2245. sel->prefixlen_s);
  2246. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  2247. if (sel->prefixlen_d != 128)
  2248. audit_log_format(audit_buf, " dst_prefixlen=%d",
  2249. sel->prefixlen_d);
  2250. break;
  2251. }
  2252. }
  2253. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
  2254. uid_t auid, u32 sessionid, u32 secid)
  2255. {
  2256. struct audit_buffer *audit_buf;
  2257. audit_buf = xfrm_audit_start("SPD-add");
  2258. if (audit_buf == NULL)
  2259. return;
  2260. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2261. audit_log_format(audit_buf, " res=%u", result);
  2262. xfrm_audit_common_policyinfo(xp, audit_buf);
  2263. audit_log_end(audit_buf);
  2264. }
  2265. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  2266. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  2267. uid_t auid, u32 sessionid, u32 secid)
  2268. {
  2269. struct audit_buffer *audit_buf;
  2270. audit_buf = xfrm_audit_start("SPD-delete");
  2271. if (audit_buf == NULL)
  2272. return;
  2273. xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
  2274. audit_log_format(audit_buf, " res=%u", result);
  2275. xfrm_audit_common_policyinfo(xp, audit_buf);
  2276. audit_log_end(audit_buf);
  2277. }
  2278. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  2279. #endif
  2280. #ifdef CONFIG_XFRM_MIGRATE
  2281. static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
  2282. struct xfrm_selector *sel_tgt)
  2283. {
  2284. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  2285. if (sel_tgt->family == sel_cmp->family &&
  2286. xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
  2287. sel_cmp->family) == 0 &&
  2288. xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
  2289. sel_cmp->family) == 0 &&
  2290. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  2291. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  2292. return 1;
  2293. }
  2294. } else {
  2295. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  2296. return 1;
  2297. }
  2298. }
  2299. return 0;
  2300. }
  2301. static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
  2302. u8 dir, u8 type)
  2303. {
  2304. struct xfrm_policy *pol, *ret = NULL;
  2305. struct hlist_node *entry;
  2306. struct hlist_head *chain;
  2307. u32 priority = ~0U;
  2308. read_lock_bh(&xfrm_policy_lock);
  2309. chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
  2310. hlist_for_each_entry(pol, entry, chain, bydst) {
  2311. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2312. pol->type == type) {
  2313. ret = pol;
  2314. priority = ret->priority;
  2315. break;
  2316. }
  2317. }
  2318. chain = &init_net.xfrm.policy_inexact[dir];
  2319. hlist_for_each_entry(pol, entry, chain, bydst) {
  2320. if (xfrm_migrate_selector_match(sel, &pol->selector) &&
  2321. pol->type == type &&
  2322. pol->priority < priority) {
  2323. ret = pol;
  2324. break;
  2325. }
  2326. }
  2327. if (ret)
  2328. xfrm_pol_hold(ret);
  2329. read_unlock_bh(&xfrm_policy_lock);
  2330. return ret;
  2331. }
  2332. static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
  2333. {
  2334. int match = 0;
  2335. if (t->mode == m->mode && t->id.proto == m->proto &&
  2336. (m->reqid == 0 || t->reqid == m->reqid)) {
  2337. switch (t->mode) {
  2338. case XFRM_MODE_TUNNEL:
  2339. case XFRM_MODE_BEET:
  2340. if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
  2341. m->old_family) == 0 &&
  2342. xfrm_addr_cmp(&t->saddr, &m->old_saddr,
  2343. m->old_family) == 0) {
  2344. match = 1;
  2345. }
  2346. break;
  2347. case XFRM_MODE_TRANSPORT:
  2348. /* in case of transport mode, template does not store
  2349. any IP addresses, hence we just compare mode and
  2350. protocol */
  2351. match = 1;
  2352. break;
  2353. default:
  2354. break;
  2355. }
  2356. }
  2357. return match;
  2358. }
  2359. /* update endpoint address(es) of template(s) */
  2360. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  2361. struct xfrm_migrate *m, int num_migrate)
  2362. {
  2363. struct xfrm_migrate *mp;
  2364. struct dst_entry *dst;
  2365. int i, j, n = 0;
  2366. write_lock_bh(&pol->lock);
  2367. if (unlikely(pol->walk.dead)) {
  2368. /* target policy has been deleted */
  2369. write_unlock_bh(&pol->lock);
  2370. return -ENOENT;
  2371. }
  2372. for (i = 0; i < pol->xfrm_nr; i++) {
  2373. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  2374. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  2375. continue;
  2376. n++;
  2377. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  2378. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  2379. continue;
  2380. /* update endpoints */
  2381. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  2382. sizeof(pol->xfrm_vec[i].id.daddr));
  2383. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  2384. sizeof(pol->xfrm_vec[i].saddr));
  2385. pol->xfrm_vec[i].encap_family = mp->new_family;
  2386. /* flush bundles */
  2387. while ((dst = pol->bundles) != NULL) {
  2388. pol->bundles = dst->next;
  2389. dst_free(dst);
  2390. }
  2391. }
  2392. }
  2393. write_unlock_bh(&pol->lock);
  2394. if (!n)
  2395. return -ENODATA;
  2396. return 0;
  2397. }
  2398. static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
  2399. {
  2400. int i, j;
  2401. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  2402. return -EINVAL;
  2403. for (i = 0; i < num_migrate; i++) {
  2404. if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
  2405. m[i].old_family) == 0) &&
  2406. (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
  2407. m[i].old_family) == 0))
  2408. return -EINVAL;
  2409. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  2410. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  2411. return -EINVAL;
  2412. /* check if there is any duplicated entry */
  2413. for (j = i + 1; j < num_migrate; j++) {
  2414. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  2415. sizeof(m[i].old_daddr)) &&
  2416. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  2417. sizeof(m[i].old_saddr)) &&
  2418. m[i].proto == m[j].proto &&
  2419. m[i].mode == m[j].mode &&
  2420. m[i].reqid == m[j].reqid &&
  2421. m[i].old_family == m[j].old_family)
  2422. return -EINVAL;
  2423. }
  2424. }
  2425. return 0;
  2426. }
  2427. int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
  2428. struct xfrm_migrate *m, int num_migrate,
  2429. struct xfrm_kmaddress *k)
  2430. {
  2431. int i, err, nx_cur = 0, nx_new = 0;
  2432. struct xfrm_policy *pol = NULL;
  2433. struct xfrm_state *x, *xc;
  2434. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  2435. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  2436. struct xfrm_migrate *mp;
  2437. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  2438. goto out;
  2439. /* Stage 1 - find policy */
  2440. if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
  2441. err = -ENOENT;
  2442. goto out;
  2443. }
  2444. /* Stage 2 - find and update state(s) */
  2445. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  2446. if ((x = xfrm_migrate_state_find(mp))) {
  2447. x_cur[nx_cur] = x;
  2448. nx_cur++;
  2449. if ((xc = xfrm_state_migrate(x, mp))) {
  2450. x_new[nx_new] = xc;
  2451. nx_new++;
  2452. } else {
  2453. err = -ENODATA;
  2454. goto restore_state;
  2455. }
  2456. }
  2457. }
  2458. /* Stage 3 - update policy */
  2459. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  2460. goto restore_state;
  2461. /* Stage 4 - delete old state(s) */
  2462. if (nx_cur) {
  2463. xfrm_states_put(x_cur, nx_cur);
  2464. xfrm_states_delete(x_cur, nx_cur);
  2465. }
  2466. /* Stage 5 - announce */
  2467. km_migrate(sel, dir, type, m, num_migrate, k);
  2468. xfrm_pol_put(pol);
  2469. return 0;
  2470. out:
  2471. return err;
  2472. restore_state:
  2473. if (pol)
  2474. xfrm_pol_put(pol);
  2475. if (nx_cur)
  2476. xfrm_states_put(x_cur, nx_cur);
  2477. if (nx_new)
  2478. xfrm_states_delete(x_new, nx_new);
  2479. return err;
  2480. }
  2481. EXPORT_SYMBOL(xfrm_migrate);
  2482. #endif