|
@@ -149,6 +149,13 @@ static int alloc_swap_slot_cache(unsigned int cpu)
|
|
|
cache->nr = 0;
|
|
|
cache->cur = 0;
|
|
|
cache->n_ret = 0;
|
|
|
+ /*
|
|
|
+ * We initialized alloc_lock and free_lock earlier. We use
|
|
|
+ * !cache->slots or !cache->slots_ret to know if it is safe to acquire
|
|
|
+ * the corresponding lock and use the cache. Memory barrier below
|
|
|
+ * ensures the assumption.
|
|
|
+ */
|
|
|
+ mb();
|
|
|
cache->slots = slots;
|
|
|
slots = NULL;
|
|
|
cache->slots_ret = slots_ret;
|
|
@@ -275,7 +282,7 @@ int free_swap_slot(swp_entry_t entry)
|
|
|
struct swap_slots_cache *cache;
|
|
|
|
|
|
cache = raw_cpu_ptr(&swp_slots);
|
|
|
- if (use_swap_slot_cache && cache->slots_ret) {
|
|
|
+ if (likely(use_swap_slot_cache && cache->slots_ret)) {
|
|
|
spin_lock_irq(&cache->free_lock);
|
|
|
/* Swap slots cache may be deactivated before acquiring lock */
|
|
|
if (!use_swap_slot_cache || !cache->slots_ret) {
|
|
@@ -326,7 +333,7 @@ swp_entry_t get_swap_page(struct page *page)
|
|
|
*/
|
|
|
cache = raw_cpu_ptr(&swp_slots);
|
|
|
|
|
|
- if (check_cache_active()) {
|
|
|
+ if (likely(check_cache_active() && cache->slots)) {
|
|
|
mutex_lock(&cache->alloc_lock);
|
|
|
if (cache->slots) {
|
|
|
repeat:
|