|
@@ -357,7 +357,7 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
|
|
|
static void *get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
pgoff_t index, void ***slotp)
|
|
|
{
|
|
|
- void *ret, **slot;
|
|
|
+ void *entry, **slot;
|
|
|
struct wait_exceptional_entry_queue ewait;
|
|
|
wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
|
|
|
|
|
@@ -367,13 +367,13 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
ewait.key.index = index;
|
|
|
|
|
|
for (;;) {
|
|
|
- ret = __radix_tree_lookup(&mapping->page_tree, index, NULL,
|
|
|
+ entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
|
|
|
&slot);
|
|
|
- if (!ret || !radix_tree_exceptional_entry(ret) ||
|
|
|
+ if (!entry || !radix_tree_exceptional_entry(entry) ||
|
|
|
!slot_locked(mapping, slot)) {
|
|
|
if (slotp)
|
|
|
*slotp = slot;
|
|
|
- return ret;
|
|
|
+ return entry;
|
|
|
}
|
|
|
prepare_to_wait_exclusive(wq, &ewait.wait,
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
@@ -396,13 +396,13 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
*/
|
|
|
static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
|
|
|
{
|
|
|
- void *ret, **slot;
|
|
|
+ void *entry, **slot;
|
|
|
|
|
|
restart:
|
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
|
- ret = get_unlocked_mapping_entry(mapping, index, &slot);
|
|
|
+ entry = get_unlocked_mapping_entry(mapping, index, &slot);
|
|
|
/* No entry for given index? Make sure radix tree is big enough. */
|
|
|
- if (!ret) {
|
|
|
+ if (!entry) {
|
|
|
int err;
|
|
|
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
@@ -410,10 +410,10 @@ restart:
|
|
|
mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
|
|
|
if (err)
|
|
|
return ERR_PTR(err);
|
|
|
- ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
|
|
|
+ entry = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
|
|
|
RADIX_DAX_ENTRY_LOCK);
|
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
|
- err = radix_tree_insert(&mapping->page_tree, index, ret);
|
|
|
+ err = radix_tree_insert(&mapping->page_tree, index, entry);
|
|
|
radix_tree_preload_end();
|
|
|
if (err) {
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
@@ -425,11 +425,11 @@ restart:
|
|
|
/* Good, we have inserted empty locked entry into the tree. */
|
|
|
mapping->nrexceptional++;
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
|
- return ret;
|
|
|
+ return entry;
|
|
|
}
|
|
|
/* Normal page in radix tree? */
|
|
|
- if (!radix_tree_exceptional_entry(ret)) {
|
|
|
- struct page *page = ret;
|
|
|
+ if (!radix_tree_exceptional_entry(entry)) {
|
|
|
+ struct page *page = entry;
|
|
|
|
|
|
get_page(page);
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
@@ -442,9 +442,9 @@ restart:
|
|
|
}
|
|
|
return page;
|
|
|
}
|
|
|
- ret = lock_slot(mapping, slot);
|
|
|
+ entry = lock_slot(mapping, slot);
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
|
- return ret;
|
|
|
+ return entry;
|
|
|
}
|
|
|
|
|
|
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
|
@@ -469,11 +469,11 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
|
|
|
|
|
void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
|
|
|
{
|
|
|
- void *ret, **slot;
|
|
|
+ void *entry, **slot;
|
|
|
|
|
|
spin_lock_irq(&mapping->tree_lock);
|
|
|
- ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
|
|
|
- if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) ||
|
|
|
+ entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
|
|
|
+ if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
|
|
|
!slot_locked(mapping, slot))) {
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
|
|
return;
|