z3fold.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. /*
  2. * z3fold.c
  3. *
  4. * Author: Vitaly Wool <vitaly.wool@konsulko.com>
  5. * Copyright (C) 2016, Sony Mobile Communications Inc.
  6. *
  7. * This implementation is based on zbud written by Seth Jennings.
  8. *
  9. * z3fold is an special purpose allocator for storing compressed pages. It
  10. * can store up to three compressed pages per page which improves the
  11. * compression ratio of zbud while retaining its main concepts (e. g. always
  12. * storing an integral number of objects per page) and simplicity.
  13. * It still has simple and deterministic reclaim properties that make it
  14. * preferable to a higher density approach (with no requirement on integral
  15. * number of object per page) when reclaim is used.
  16. *
  17. * As in zbud, pages are divided into "chunks". The size of the chunks is
  18. * fixed at compile time and is determined by NCHUNKS_ORDER below.
  19. *
  20. * z3fold doesn't export any API and is meant to be used via zpool API.
  21. */
  22. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  23. #include <linux/atomic.h>
  24. #include <linux/sched.h>
  25. #include <linux/list.h>
  26. #include <linux/mm.h>
  27. #include <linux/module.h>
  28. #include <linux/percpu.h>
  29. #include <linux/preempt.h>
  30. #include <linux/workqueue.h>
  31. #include <linux/slab.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/zpool.h>
  34. /*****************
  35. * Structures
  36. *****************/
  37. struct z3fold_pool;
  38. struct z3fold_ops {
  39. int (*evict)(struct z3fold_pool *pool, unsigned long handle);
  40. };
  41. enum buddy {
  42. HEADLESS = 0,
  43. FIRST,
  44. MIDDLE,
  45. LAST,
  46. BUDDIES_MAX
  47. };
  48. /*
  49. * struct z3fold_header - z3fold page metadata occupying first chunks of each
  50. * z3fold page, except for HEADLESS pages
  51. * @buddy: links the z3fold page into the relevant list in the
  52. * pool
  53. * @page_lock: per-page lock
  54. * @refcount: reference count for the z3fold page
  55. * @work: work_struct for page layout optimization
  56. * @pool: pointer to the pool which this page belongs to
  57. * @cpu: CPU which this page "belongs" to
  58. * @first_chunks: the size of the first buddy in chunks, 0 if free
  59. * @middle_chunks: the size of the middle buddy in chunks, 0 if free
  60. * @last_chunks: the size of the last buddy in chunks, 0 if free
  61. * @first_num: the starting number (for the first handle)
  62. */
  63. struct z3fold_header {
  64. struct list_head buddy;
  65. spinlock_t page_lock;
  66. struct kref refcount;
  67. struct work_struct work;
  68. struct z3fold_pool *pool;
  69. short cpu;
  70. unsigned short first_chunks;
  71. unsigned short middle_chunks;
  72. unsigned short last_chunks;
  73. unsigned short start_middle;
  74. unsigned short first_num:2;
  75. };
  76. /*
  77. * NCHUNKS_ORDER determines the internal allocation granularity, effectively
  78. * adjusting internal fragmentation. It also determines the number of
  79. * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
  80. * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
  81. * in the beginning of an allocated page are occupied by z3fold header, so
  82. * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
  83. * which shows the max number of free chunks in z3fold page, also there will
  84. * be 63, or 62, respectively, freelists per pool.
  85. */
  86. #define NCHUNKS_ORDER 6
  87. #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
  88. #define CHUNK_SIZE (1 << CHUNK_SHIFT)
  89. #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
  90. #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
  91. #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
  92. #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
  93. #define BUDDY_MASK (0x3)
  94. /**
  95. * struct z3fold_pool - stores metadata for each z3fold pool
  96. * @name: pool name
  97. * @lock: protects pool unbuddied/lru lists
  98. * @stale_lock: protects pool stale page list
  99. * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
  100. * buddies; the list each z3fold page is added to depends on
  101. * the size of its free region.
  102. * @lru: list tracking the z3fold pages in LRU order by most recently
  103. * added buddy.
  104. * @stale: list of pages marked for freeing
  105. * @pages_nr: number of z3fold pages in the pool.
  106. * @ops: pointer to a structure of user defined operations specified at
  107. * pool creation time.
  108. * @compact_wq: workqueue for page layout background optimization
  109. * @release_wq: workqueue for safe page release
  110. * @work: work_struct for safe page release
  111. *
  112. * This structure is allocated at pool creation time and maintains metadata
  113. * pertaining to a particular z3fold pool.
  114. */
  115. struct z3fold_pool {
  116. const char *name;
  117. spinlock_t lock;
  118. spinlock_t stale_lock;
  119. struct list_head *unbuddied;
  120. struct list_head lru;
  121. struct list_head stale;
  122. atomic64_t pages_nr;
  123. const struct z3fold_ops *ops;
  124. struct zpool *zpool;
  125. const struct zpool_ops *zpool_ops;
  126. struct workqueue_struct *compact_wq;
  127. struct workqueue_struct *release_wq;
  128. struct work_struct work;
  129. };
  130. /*
  131. * Internal z3fold page flags
  132. */
  133. enum z3fold_page_flags {
  134. PAGE_HEADLESS = 0,
  135. MIDDLE_CHUNK_MAPPED,
  136. NEEDS_COMPACTING,
  137. PAGE_STALE,
  138. UNDER_RECLAIM
  139. };
  140. /*****************
  141. * Helpers
  142. *****************/
  143. /* Converts an allocation size in bytes to size in z3fold chunks */
  144. static int size_to_chunks(size_t size)
  145. {
  146. return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
  147. }
  148. #define for_each_unbuddied_list(_iter, _begin) \
  149. for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
  150. static void compact_page_work(struct work_struct *w);
  151. /* Initializes the z3fold header of a newly allocated z3fold page */
  152. static struct z3fold_header *init_z3fold_page(struct page *page,
  153. struct z3fold_pool *pool)
  154. {
  155. struct z3fold_header *zhdr = page_address(page);
  156. INIT_LIST_HEAD(&page->lru);
  157. clear_bit(PAGE_HEADLESS, &page->private);
  158. clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  159. clear_bit(NEEDS_COMPACTING, &page->private);
  160. clear_bit(PAGE_STALE, &page->private);
  161. clear_bit(UNDER_RECLAIM, &page->private);
  162. spin_lock_init(&zhdr->page_lock);
  163. kref_init(&zhdr->refcount);
  164. zhdr->first_chunks = 0;
  165. zhdr->middle_chunks = 0;
  166. zhdr->last_chunks = 0;
  167. zhdr->first_num = 0;
  168. zhdr->start_middle = 0;
  169. zhdr->cpu = -1;
  170. zhdr->pool = pool;
  171. INIT_LIST_HEAD(&zhdr->buddy);
  172. INIT_WORK(&zhdr->work, compact_page_work);
  173. return zhdr;
  174. }
  175. /* Resets the struct page fields and frees the page */
  176. static void free_z3fold_page(struct page *page)
  177. {
  178. __free_page(page);
  179. }
  180. /* Lock a z3fold page */
  181. static inline void z3fold_page_lock(struct z3fold_header *zhdr)
  182. {
  183. spin_lock(&zhdr->page_lock);
  184. }
  185. /* Try to lock a z3fold page */
  186. static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
  187. {
  188. return spin_trylock(&zhdr->page_lock);
  189. }
  190. /* Unlock a z3fold page */
  191. static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
  192. {
  193. spin_unlock(&zhdr->page_lock);
  194. }
  195. /*
  196. * Encodes the handle of a particular buddy within a z3fold page
  197. * Pool lock should be held as this function accesses first_num
  198. */
  199. static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
  200. {
  201. unsigned long handle;
  202. handle = (unsigned long)zhdr;
  203. if (bud != HEADLESS)
  204. handle += (bud + zhdr->first_num) & BUDDY_MASK;
  205. return handle;
  206. }
  207. /* Returns the z3fold page where a given handle is stored */
  208. static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
  209. {
  210. return (struct z3fold_header *)(handle & PAGE_MASK);
  211. }
  212. /*
  213. * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
  214. * but that doesn't matter. because the masking will result in the
  215. * correct buddy number.
  216. */
  217. static enum buddy handle_to_buddy(unsigned long handle)
  218. {
  219. struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
  220. return (handle - zhdr->first_num) & BUDDY_MASK;
  221. }
  222. static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
  223. {
  224. struct page *page = virt_to_page(zhdr);
  225. struct z3fold_pool *pool = zhdr->pool;
  226. WARN_ON(!list_empty(&zhdr->buddy));
  227. set_bit(PAGE_STALE, &page->private);
  228. clear_bit(NEEDS_COMPACTING, &page->private);
  229. spin_lock(&pool->lock);
  230. if (!list_empty(&page->lru))
  231. list_del(&page->lru);
  232. spin_unlock(&pool->lock);
  233. if (locked)
  234. z3fold_page_unlock(zhdr);
  235. spin_lock(&pool->stale_lock);
  236. list_add(&zhdr->buddy, &pool->stale);
  237. queue_work(pool->release_wq, &pool->work);
  238. spin_unlock(&pool->stale_lock);
  239. }
  240. static void __attribute__((__unused__))
  241. release_z3fold_page(struct kref *ref)
  242. {
  243. struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
  244. refcount);
  245. __release_z3fold_page(zhdr, false);
  246. }
  247. static void release_z3fold_page_locked(struct kref *ref)
  248. {
  249. struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
  250. refcount);
  251. WARN_ON(z3fold_page_trylock(zhdr));
  252. __release_z3fold_page(zhdr, true);
  253. }
  254. static void release_z3fold_page_locked_list(struct kref *ref)
  255. {
  256. struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
  257. refcount);
  258. spin_lock(&zhdr->pool->lock);
  259. list_del_init(&zhdr->buddy);
  260. spin_unlock(&zhdr->pool->lock);
  261. WARN_ON(z3fold_page_trylock(zhdr));
  262. __release_z3fold_page(zhdr, true);
  263. }
  264. static void free_pages_work(struct work_struct *w)
  265. {
  266. struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
  267. spin_lock(&pool->stale_lock);
  268. while (!list_empty(&pool->stale)) {
  269. struct z3fold_header *zhdr = list_first_entry(&pool->stale,
  270. struct z3fold_header, buddy);
  271. struct page *page = virt_to_page(zhdr);
  272. list_del(&zhdr->buddy);
  273. if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
  274. continue;
  275. spin_unlock(&pool->stale_lock);
  276. cancel_work_sync(&zhdr->work);
  277. free_z3fold_page(page);
  278. cond_resched();
  279. spin_lock(&pool->stale_lock);
  280. }
  281. spin_unlock(&pool->stale_lock);
  282. }
  283. /*
  284. * Returns the number of free chunks in a z3fold page.
  285. * NB: can't be used with HEADLESS pages.
  286. */
  287. static int num_free_chunks(struct z3fold_header *zhdr)
  288. {
  289. int nfree;
  290. /*
  291. * If there is a middle object, pick up the bigger free space
  292. * either before or after it. Otherwise just subtract the number
  293. * of chunks occupied by the first and the last objects.
  294. */
  295. if (zhdr->middle_chunks != 0) {
  296. int nfree_before = zhdr->first_chunks ?
  297. 0 : zhdr->start_middle - ZHDR_CHUNKS;
  298. int nfree_after = zhdr->last_chunks ?
  299. 0 : TOTAL_CHUNKS -
  300. (zhdr->start_middle + zhdr->middle_chunks);
  301. nfree = max(nfree_before, nfree_after);
  302. } else
  303. nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
  304. return nfree;
  305. }
  306. static inline void *mchunk_memmove(struct z3fold_header *zhdr,
  307. unsigned short dst_chunk)
  308. {
  309. void *beg = zhdr;
  310. return memmove(beg + (dst_chunk << CHUNK_SHIFT),
  311. beg + (zhdr->start_middle << CHUNK_SHIFT),
  312. zhdr->middle_chunks << CHUNK_SHIFT);
  313. }
  314. #define BIG_CHUNK_GAP 3
  315. /* Has to be called with lock held */
  316. static int z3fold_compact_page(struct z3fold_header *zhdr)
  317. {
  318. struct page *page = virt_to_page(zhdr);
  319. if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
  320. return 0; /* can't move middle chunk, it's used */
  321. if (zhdr->middle_chunks == 0)
  322. return 0; /* nothing to compact */
  323. if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
  324. /* move to the beginning */
  325. mchunk_memmove(zhdr, ZHDR_CHUNKS);
  326. zhdr->first_chunks = zhdr->middle_chunks;
  327. zhdr->middle_chunks = 0;
  328. zhdr->start_middle = 0;
  329. zhdr->first_num++;
  330. return 1;
  331. }
  332. /*
  333. * moving data is expensive, so let's only do that if
  334. * there's substantial gain (at least BIG_CHUNK_GAP chunks)
  335. */
  336. if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
  337. zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
  338. BIG_CHUNK_GAP) {
  339. mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
  340. zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
  341. return 1;
  342. } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
  343. TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
  344. + zhdr->middle_chunks) >=
  345. BIG_CHUNK_GAP) {
  346. unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
  347. zhdr->middle_chunks;
  348. mchunk_memmove(zhdr, new_start);
  349. zhdr->start_middle = new_start;
  350. return 1;
  351. }
  352. return 0;
  353. }
  354. static void do_compact_page(struct z3fold_header *zhdr, bool locked)
  355. {
  356. struct z3fold_pool *pool = zhdr->pool;
  357. struct page *page;
  358. struct list_head *unbuddied;
  359. int fchunks;
  360. page = virt_to_page(zhdr);
  361. if (locked)
  362. WARN_ON(z3fold_page_trylock(zhdr));
  363. else
  364. z3fold_page_lock(zhdr);
  365. if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
  366. z3fold_page_unlock(zhdr);
  367. return;
  368. }
  369. spin_lock(&pool->lock);
  370. list_del_init(&zhdr->buddy);
  371. spin_unlock(&pool->lock);
  372. if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
  373. atomic64_dec(&pool->pages_nr);
  374. return;
  375. }
  376. z3fold_compact_page(zhdr);
  377. unbuddied = get_cpu_ptr(pool->unbuddied);
  378. fchunks = num_free_chunks(zhdr);
  379. if (fchunks < NCHUNKS &&
  380. (!zhdr->first_chunks || !zhdr->middle_chunks ||
  381. !zhdr->last_chunks)) {
  382. /* the page's not completely free and it's unbuddied */
  383. spin_lock(&pool->lock);
  384. list_add(&zhdr->buddy, &unbuddied[fchunks]);
  385. spin_unlock(&pool->lock);
  386. zhdr->cpu = smp_processor_id();
  387. }
  388. put_cpu_ptr(pool->unbuddied);
  389. z3fold_page_unlock(zhdr);
  390. }
  391. static void compact_page_work(struct work_struct *w)
  392. {
  393. struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
  394. work);
  395. do_compact_page(zhdr, false);
  396. }
  397. /*
  398. * API Functions
  399. */
  400. /**
  401. * z3fold_create_pool() - create a new z3fold pool
  402. * @name: pool name
  403. * @gfp: gfp flags when allocating the z3fold pool structure
  404. * @ops: user-defined operations for the z3fold pool
  405. *
  406. * Return: pointer to the new z3fold pool or NULL if the metadata allocation
  407. * failed.
  408. */
  409. static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
  410. const struct z3fold_ops *ops)
  411. {
  412. struct z3fold_pool *pool = NULL;
  413. int i, cpu;
  414. pool = kzalloc(sizeof(struct z3fold_pool), gfp);
  415. if (!pool)
  416. goto out;
  417. spin_lock_init(&pool->lock);
  418. spin_lock_init(&pool->stale_lock);
  419. pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
  420. if (!pool->unbuddied)
  421. goto out_pool;
  422. for_each_possible_cpu(cpu) {
  423. struct list_head *unbuddied =
  424. per_cpu_ptr(pool->unbuddied, cpu);
  425. for_each_unbuddied_list(i, 0)
  426. INIT_LIST_HEAD(&unbuddied[i]);
  427. }
  428. INIT_LIST_HEAD(&pool->lru);
  429. INIT_LIST_HEAD(&pool->stale);
  430. atomic64_set(&pool->pages_nr, 0);
  431. pool->name = name;
  432. pool->compact_wq = create_singlethread_workqueue(pool->name);
  433. if (!pool->compact_wq)
  434. goto out_unbuddied;
  435. pool->release_wq = create_singlethread_workqueue(pool->name);
  436. if (!pool->release_wq)
  437. goto out_wq;
  438. INIT_WORK(&pool->work, free_pages_work);
  439. pool->ops = ops;
  440. return pool;
  441. out_wq:
  442. destroy_workqueue(pool->compact_wq);
  443. out_unbuddied:
  444. free_percpu(pool->unbuddied);
  445. out_pool:
  446. kfree(pool);
  447. out:
  448. return NULL;
  449. }
  450. /**
  451. * z3fold_destroy_pool() - destroys an existing z3fold pool
  452. * @pool: the z3fold pool to be destroyed
  453. *
  454. * The pool should be emptied before this function is called.
  455. */
  456. static void z3fold_destroy_pool(struct z3fold_pool *pool)
  457. {
  458. destroy_workqueue(pool->release_wq);
  459. destroy_workqueue(pool->compact_wq);
  460. kfree(pool);
  461. }
  462. /**
  463. * z3fold_alloc() - allocates a region of a given size
  464. * @pool: z3fold pool from which to allocate
  465. * @size: size in bytes of the desired allocation
  466. * @gfp: gfp flags used if the pool needs to grow
  467. * @handle: handle of the new allocation
  468. *
  469. * This function will attempt to find a free region in the pool large enough to
  470. * satisfy the allocation request. A search of the unbuddied lists is
  471. * performed first. If no suitable free region is found, then a new page is
  472. * allocated and added to the pool to satisfy the request.
  473. *
  474. * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
  475. * as z3fold pool pages.
  476. *
  477. * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
  478. * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
  479. * a new page.
  480. */
  481. static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
  482. unsigned long *handle)
  483. {
  484. int chunks = 0, i, freechunks;
  485. struct z3fold_header *zhdr = NULL;
  486. struct page *page = NULL;
  487. enum buddy bud;
  488. bool can_sleep = gfpflags_allow_blocking(gfp);
  489. if (!size || (gfp & __GFP_HIGHMEM))
  490. return -EINVAL;
  491. if (size > PAGE_SIZE)
  492. return -ENOSPC;
  493. if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
  494. bud = HEADLESS;
  495. else {
  496. struct list_head *unbuddied;
  497. chunks = size_to_chunks(size);
  498. lookup:
  499. /* First, try to find an unbuddied z3fold page. */
  500. unbuddied = get_cpu_ptr(pool->unbuddied);
  501. for_each_unbuddied_list(i, chunks) {
  502. struct list_head *l = &unbuddied[i];
  503. zhdr = list_first_entry_or_null(READ_ONCE(l),
  504. struct z3fold_header, buddy);
  505. if (!zhdr)
  506. continue;
  507. /* Re-check under lock. */
  508. spin_lock(&pool->lock);
  509. l = &unbuddied[i];
  510. if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
  511. struct z3fold_header, buddy)) ||
  512. !z3fold_page_trylock(zhdr)) {
  513. spin_unlock(&pool->lock);
  514. put_cpu_ptr(pool->unbuddied);
  515. goto lookup;
  516. }
  517. list_del_init(&zhdr->buddy);
  518. zhdr->cpu = -1;
  519. spin_unlock(&pool->lock);
  520. page = virt_to_page(zhdr);
  521. if (test_bit(NEEDS_COMPACTING, &page->private)) {
  522. z3fold_page_unlock(zhdr);
  523. zhdr = NULL;
  524. put_cpu_ptr(pool->unbuddied);
  525. if (can_sleep)
  526. cond_resched();
  527. goto lookup;
  528. }
  529. /*
  530. * this page could not be removed from its unbuddied
  531. * list while pool lock was held, and then we've taken
  532. * page lock so kref_put could not be called before
  533. * we got here, so it's safe to just call kref_get()
  534. */
  535. kref_get(&zhdr->refcount);
  536. break;
  537. }
  538. put_cpu_ptr(pool->unbuddied);
  539. if (zhdr) {
  540. if (zhdr->first_chunks == 0) {
  541. if (zhdr->middle_chunks != 0 &&
  542. chunks >= zhdr->start_middle)
  543. bud = LAST;
  544. else
  545. bud = FIRST;
  546. } else if (zhdr->last_chunks == 0)
  547. bud = LAST;
  548. else if (zhdr->middle_chunks == 0)
  549. bud = MIDDLE;
  550. else {
  551. if (kref_put(&zhdr->refcount,
  552. release_z3fold_page_locked))
  553. atomic64_dec(&pool->pages_nr);
  554. else
  555. z3fold_page_unlock(zhdr);
  556. pr_err("No free chunks in unbuddied\n");
  557. WARN_ON(1);
  558. goto lookup;
  559. }
  560. goto found;
  561. }
  562. bud = FIRST;
  563. }
  564. page = NULL;
  565. if (can_sleep) {
  566. spin_lock(&pool->stale_lock);
  567. zhdr = list_first_entry_or_null(&pool->stale,
  568. struct z3fold_header, buddy);
  569. /*
  570. * Before allocating a page, let's see if we can take one from
  571. * the stale pages list. cancel_work_sync() can sleep so we
  572. * limit this case to the contexts where we can sleep
  573. */
  574. if (zhdr) {
  575. list_del(&zhdr->buddy);
  576. spin_unlock(&pool->stale_lock);
  577. cancel_work_sync(&zhdr->work);
  578. page = virt_to_page(zhdr);
  579. } else {
  580. spin_unlock(&pool->stale_lock);
  581. }
  582. }
  583. if (!page)
  584. page = alloc_page(gfp);
  585. if (!page)
  586. return -ENOMEM;
  587. atomic64_inc(&pool->pages_nr);
  588. zhdr = init_z3fold_page(page, pool);
  589. if (bud == HEADLESS) {
  590. set_bit(PAGE_HEADLESS, &page->private);
  591. goto headless;
  592. }
  593. z3fold_page_lock(zhdr);
  594. found:
  595. if (bud == FIRST)
  596. zhdr->first_chunks = chunks;
  597. else if (bud == LAST)
  598. zhdr->last_chunks = chunks;
  599. else {
  600. zhdr->middle_chunks = chunks;
  601. zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
  602. }
  603. if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
  604. zhdr->middle_chunks == 0) {
  605. struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
  606. /* Add to unbuddied list */
  607. freechunks = num_free_chunks(zhdr);
  608. spin_lock(&pool->lock);
  609. list_add(&zhdr->buddy, &unbuddied[freechunks]);
  610. spin_unlock(&pool->lock);
  611. zhdr->cpu = smp_processor_id();
  612. put_cpu_ptr(pool->unbuddied);
  613. }
  614. headless:
  615. spin_lock(&pool->lock);
  616. /* Add/move z3fold page to beginning of LRU */
  617. if (!list_empty(&page->lru))
  618. list_del(&page->lru);
  619. list_add(&page->lru, &pool->lru);
  620. *handle = encode_handle(zhdr, bud);
  621. spin_unlock(&pool->lock);
  622. if (bud != HEADLESS)
  623. z3fold_page_unlock(zhdr);
  624. return 0;
  625. }
  626. /**
  627. * z3fold_free() - frees the allocation associated with the given handle
  628. * @pool: pool in which the allocation resided
  629. * @handle: handle associated with the allocation returned by z3fold_alloc()
  630. *
  631. * In the case that the z3fold page in which the allocation resides is under
  632. * reclaim, as indicated by the PG_reclaim flag being set, this function
  633. * only sets the first|last_chunks to 0. The page is actually freed
  634. * once both buddies are evicted (see z3fold_reclaim_page() below).
  635. */
  636. static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
  637. {
  638. struct z3fold_header *zhdr;
  639. struct page *page;
  640. enum buddy bud;
  641. zhdr = handle_to_z3fold_header(handle);
  642. page = virt_to_page(zhdr);
  643. if (test_bit(PAGE_HEADLESS, &page->private)) {
  644. /* HEADLESS page stored */
  645. bud = HEADLESS;
  646. } else {
  647. z3fold_page_lock(zhdr);
  648. bud = handle_to_buddy(handle);
  649. switch (bud) {
  650. case FIRST:
  651. zhdr->first_chunks = 0;
  652. break;
  653. case MIDDLE:
  654. zhdr->middle_chunks = 0;
  655. zhdr->start_middle = 0;
  656. break;
  657. case LAST:
  658. zhdr->last_chunks = 0;
  659. break;
  660. default:
  661. pr_err("%s: unknown bud %d\n", __func__, bud);
  662. WARN_ON(1);
  663. z3fold_page_unlock(zhdr);
  664. return;
  665. }
  666. }
  667. if (bud == HEADLESS) {
  668. spin_lock(&pool->lock);
  669. list_del(&page->lru);
  670. spin_unlock(&pool->lock);
  671. free_z3fold_page(page);
  672. atomic64_dec(&pool->pages_nr);
  673. return;
  674. }
  675. if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
  676. atomic64_dec(&pool->pages_nr);
  677. return;
  678. }
  679. if (test_bit(UNDER_RECLAIM, &page->private)) {
  680. z3fold_page_unlock(zhdr);
  681. return;
  682. }
  683. if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
  684. z3fold_page_unlock(zhdr);
  685. return;
  686. }
  687. if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
  688. spin_lock(&pool->lock);
  689. list_del_init(&zhdr->buddy);
  690. spin_unlock(&pool->lock);
  691. zhdr->cpu = -1;
  692. kref_get(&zhdr->refcount);
  693. do_compact_page(zhdr, true);
  694. return;
  695. }
  696. kref_get(&zhdr->refcount);
  697. queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
  698. z3fold_page_unlock(zhdr);
  699. }
  700. /**
  701. * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
  702. * @pool: pool from which a page will attempt to be evicted
  703. * @retries: number of pages on the LRU list for which eviction will
  704. * be attempted before failing
  705. *
  706. * z3fold reclaim is different from normal system reclaim in that it is done
  707. * from the bottom, up. This is because only the bottom layer, z3fold, has
  708. * information on how the allocations are organized within each z3fold page.
  709. * This has the potential to create interesting locking situations between
  710. * z3fold and the user, however.
  711. *
  712. * To avoid these, this is how z3fold_reclaim_page() should be called:
  713. *
  714. * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
  715. * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
  716. * call the user-defined eviction handler with the pool and handle as
  717. * arguments.
  718. *
  719. * If the handle can not be evicted, the eviction handler should return
  720. * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
  721. * appropriate list and try the next z3fold page on the LRU up to
  722. * a user defined number of retries.
  723. *
  724. * If the handle is successfully evicted, the eviction handler should
  725. * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
  726. * contains logic to delay freeing the page if the page is under reclaim,
  727. * as indicated by the setting of the PG_reclaim flag on the underlying page.
  728. *
  729. * If all buddies in the z3fold page are successfully evicted, then the
  730. * z3fold page can be freed.
  731. *
  732. * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
  733. * no pages to evict or an eviction handler is not registered, -EAGAIN if
  734. * the retry limit was hit.
  735. */
  736. static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
  737. {
  738. int i, ret = 0;
  739. struct z3fold_header *zhdr = NULL;
  740. struct page *page = NULL;
  741. struct list_head *pos;
  742. unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
  743. spin_lock(&pool->lock);
  744. if (!pool->ops || !pool->ops->evict || retries == 0) {
  745. spin_unlock(&pool->lock);
  746. return -EINVAL;
  747. }
  748. for (i = 0; i < retries; i++) {
  749. if (list_empty(&pool->lru)) {
  750. spin_unlock(&pool->lock);
  751. return -EINVAL;
  752. }
  753. list_for_each_prev(pos, &pool->lru) {
  754. page = list_entry(pos, struct page, lru);
  755. if (test_bit(PAGE_HEADLESS, &page->private))
  756. /* candidate found */
  757. break;
  758. zhdr = page_address(page);
  759. if (!z3fold_page_trylock(zhdr))
  760. continue; /* can't evict at this point */
  761. kref_get(&zhdr->refcount);
  762. list_del_init(&zhdr->buddy);
  763. zhdr->cpu = -1;
  764. set_bit(UNDER_RECLAIM, &page->private);
  765. break;
  766. }
  767. list_del_init(&page->lru);
  768. spin_unlock(&pool->lock);
  769. if (!test_bit(PAGE_HEADLESS, &page->private)) {
  770. /*
  771. * We need encode the handles before unlocking, since
  772. * we can race with free that will set
  773. * (first|last)_chunks to 0
  774. */
  775. first_handle = 0;
  776. last_handle = 0;
  777. middle_handle = 0;
  778. if (zhdr->first_chunks)
  779. first_handle = encode_handle(zhdr, FIRST);
  780. if (zhdr->middle_chunks)
  781. middle_handle = encode_handle(zhdr, MIDDLE);
  782. if (zhdr->last_chunks)
  783. last_handle = encode_handle(zhdr, LAST);
  784. /*
  785. * it's safe to unlock here because we hold a
  786. * reference to this page
  787. */
  788. z3fold_page_unlock(zhdr);
  789. } else {
  790. first_handle = encode_handle(zhdr, HEADLESS);
  791. last_handle = middle_handle = 0;
  792. }
  793. /* Issue the eviction callback(s) */
  794. if (middle_handle) {
  795. ret = pool->ops->evict(pool, middle_handle);
  796. if (ret)
  797. goto next;
  798. }
  799. if (first_handle) {
  800. ret = pool->ops->evict(pool, first_handle);
  801. if (ret)
  802. goto next;
  803. }
  804. if (last_handle) {
  805. ret = pool->ops->evict(pool, last_handle);
  806. if (ret)
  807. goto next;
  808. }
  809. next:
  810. if (test_bit(PAGE_HEADLESS, &page->private)) {
  811. if (ret == 0) {
  812. free_z3fold_page(page);
  813. return 0;
  814. }
  815. spin_lock(&pool->lock);
  816. list_add(&page->lru, &pool->lru);
  817. spin_unlock(&pool->lock);
  818. } else {
  819. z3fold_page_lock(zhdr);
  820. clear_bit(UNDER_RECLAIM, &page->private);
  821. if (kref_put(&zhdr->refcount,
  822. release_z3fold_page_locked)) {
  823. atomic64_dec(&pool->pages_nr);
  824. return 0;
  825. }
  826. /*
  827. * if we are here, the page is still not completely
  828. * free. Take the global pool lock then to be able
  829. * to add it back to the lru list
  830. */
  831. spin_lock(&pool->lock);
  832. list_add(&page->lru, &pool->lru);
  833. spin_unlock(&pool->lock);
  834. z3fold_page_unlock(zhdr);
  835. }
  836. /* We started off locked to we need to lock the pool back */
  837. spin_lock(&pool->lock);
  838. }
  839. spin_unlock(&pool->lock);
  840. return -EAGAIN;
  841. }
  842. /**
  843. * z3fold_map() - maps the allocation associated with the given handle
  844. * @pool: pool in which the allocation resides
  845. * @handle: handle associated with the allocation to be mapped
  846. *
  847. * Extracts the buddy number from handle and constructs the pointer to the
  848. * correct starting chunk within the page.
  849. *
  850. * Returns: a pointer to the mapped allocation
  851. */
  852. static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
  853. {
  854. struct z3fold_header *zhdr;
  855. struct page *page;
  856. void *addr;
  857. enum buddy buddy;
  858. zhdr = handle_to_z3fold_header(handle);
  859. addr = zhdr;
  860. page = virt_to_page(zhdr);
  861. if (test_bit(PAGE_HEADLESS, &page->private))
  862. goto out;
  863. z3fold_page_lock(zhdr);
  864. buddy = handle_to_buddy(handle);
  865. switch (buddy) {
  866. case FIRST:
  867. addr += ZHDR_SIZE_ALIGNED;
  868. break;
  869. case MIDDLE:
  870. addr += zhdr->start_middle << CHUNK_SHIFT;
  871. set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  872. break;
  873. case LAST:
  874. addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
  875. break;
  876. default:
  877. pr_err("unknown buddy id %d\n", buddy);
  878. WARN_ON(1);
  879. addr = NULL;
  880. break;
  881. }
  882. z3fold_page_unlock(zhdr);
  883. out:
  884. return addr;
  885. }
  886. /**
  887. * z3fold_unmap() - unmaps the allocation associated with the given handle
  888. * @pool: pool in which the allocation resides
  889. * @handle: handle associated with the allocation to be unmapped
  890. */
  891. static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
  892. {
  893. struct z3fold_header *zhdr;
  894. struct page *page;
  895. enum buddy buddy;
  896. zhdr = handle_to_z3fold_header(handle);
  897. page = virt_to_page(zhdr);
  898. if (test_bit(PAGE_HEADLESS, &page->private))
  899. return;
  900. z3fold_page_lock(zhdr);
  901. buddy = handle_to_buddy(handle);
  902. if (buddy == MIDDLE)
  903. clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  904. z3fold_page_unlock(zhdr);
  905. }
  906. /**
  907. * z3fold_get_pool_size() - gets the z3fold pool size in pages
  908. * @pool: pool whose size is being queried
  909. *
  910. * Returns: size in pages of the given pool.
  911. */
  912. static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
  913. {
  914. return atomic64_read(&pool->pages_nr);
  915. }
  916. /*****************
  917. * zpool
  918. ****************/
  919. static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
  920. {
  921. if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
  922. return pool->zpool_ops->evict(pool->zpool, handle);
  923. else
  924. return -ENOENT;
  925. }
  926. static const struct z3fold_ops z3fold_zpool_ops = {
  927. .evict = z3fold_zpool_evict
  928. };
  929. static void *z3fold_zpool_create(const char *name, gfp_t gfp,
  930. const struct zpool_ops *zpool_ops,
  931. struct zpool *zpool)
  932. {
  933. struct z3fold_pool *pool;
  934. pool = z3fold_create_pool(name, gfp,
  935. zpool_ops ? &z3fold_zpool_ops : NULL);
  936. if (pool) {
  937. pool->zpool = zpool;
  938. pool->zpool_ops = zpool_ops;
  939. }
  940. return pool;
  941. }
  942. static void z3fold_zpool_destroy(void *pool)
  943. {
  944. z3fold_destroy_pool(pool);
  945. }
  946. static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
  947. unsigned long *handle)
  948. {
  949. return z3fold_alloc(pool, size, gfp, handle);
  950. }
  951. static void z3fold_zpool_free(void *pool, unsigned long handle)
  952. {
  953. z3fold_free(pool, handle);
  954. }
  955. static int z3fold_zpool_shrink(void *pool, unsigned int pages,
  956. unsigned int *reclaimed)
  957. {
  958. unsigned int total = 0;
  959. int ret = -EINVAL;
  960. while (total < pages) {
  961. ret = z3fold_reclaim_page(pool, 8);
  962. if (ret < 0)
  963. break;
  964. total++;
  965. }
  966. if (reclaimed)
  967. *reclaimed = total;
  968. return ret;
  969. }
  970. static void *z3fold_zpool_map(void *pool, unsigned long handle,
  971. enum zpool_mapmode mm)
  972. {
  973. return z3fold_map(pool, handle);
  974. }
  975. static void z3fold_zpool_unmap(void *pool, unsigned long handle)
  976. {
  977. z3fold_unmap(pool, handle);
  978. }
  979. static u64 z3fold_zpool_total_size(void *pool)
  980. {
  981. return z3fold_get_pool_size(pool) * PAGE_SIZE;
  982. }
  983. static struct zpool_driver z3fold_zpool_driver = {
  984. .type = "z3fold",
  985. .owner = THIS_MODULE,
  986. .create = z3fold_zpool_create,
  987. .destroy = z3fold_zpool_destroy,
  988. .malloc = z3fold_zpool_malloc,
  989. .free = z3fold_zpool_free,
  990. .shrink = z3fold_zpool_shrink,
  991. .map = z3fold_zpool_map,
  992. .unmap = z3fold_zpool_unmap,
  993. .total_size = z3fold_zpool_total_size,
  994. };
  995. MODULE_ALIAS("zpool-z3fold");
  996. static int __init init_z3fold(void)
  997. {
  998. /* Make sure the z3fold header is not larger than the page size */
  999. BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
  1000. zpool_register_driver(&z3fold_zpool_driver);
  1001. return 0;
  1002. }
  1003. static void __exit exit_z3fold(void)
  1004. {
  1005. zpool_unregister_driver(&z3fold_zpool_driver);
  1006. }
  1007. module_init(init_z3fold);
  1008. module_exit(exit_z3fold);
  1009. MODULE_LICENSE("GPL");
  1010. MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
  1011. MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");