|
@@ -2143,11 +2143,10 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
|
|
|
if (S_ISBLK(inode->i_mode)) {
|
|
|
p->bdev = bdgrab(I_BDEV(inode));
|
|
|
error = blkdev_get(p->bdev,
|
|
|
- FMODE_READ | FMODE_WRITE | FMODE_EXCL,
|
|
|
- sys_swapon);
|
|
|
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
|
|
|
if (error < 0) {
|
|
|
p->bdev = NULL;
|
|
|
- return -EINVAL;
|
|
|
+ return error;
|
|
|
}
|
|
|
p->old_block_size = block_size(p->bdev);
|
|
|
error = set_blocksize(p->bdev, PAGE_SIZE);
|
|
@@ -2348,7 +2347,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|
|
struct filename *name;
|
|
|
struct file *swap_file = NULL;
|
|
|
struct address_space *mapping;
|
|
|
- int i;
|
|
|
int prio;
|
|
|
int error;
|
|
|
union swap_header *swap_header;
|
|
@@ -2388,19 +2386,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|
|
|
|
|
p->swap_file = swap_file;
|
|
|
mapping = swap_file->f_mapping;
|
|
|
-
|
|
|
- for (i = 0; i < nr_swapfiles; i++) {
|
|
|
- struct swap_info_struct *q = swap_info[i];
|
|
|
-
|
|
|
- if (q == p || !q->swap_file)
|
|
|
- continue;
|
|
|
- if (mapping == q->swap_file->f_mapping) {
|
|
|
- error = -EBUSY;
|
|
|
- goto bad_swap;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
inode = mapping->host;
|
|
|
+
|
|
|
/* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
|
|
|
error = claim_swapfile(p, inode);
|
|
|
if (unlikely(error))
|
|
@@ -2433,6 +2420,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|
|
goto bad_swap;
|
|
|
}
|
|
|
if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
|
|
|
+ int cpu;
|
|
|
+
|
|
|
p->flags |= SWP_SOLIDSTATE;
|
|
|
/*
|
|
|
* select a random position to start with to help wear leveling
|
|
@@ -2451,9 +2440,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|
|
error = -ENOMEM;
|
|
|
goto bad_swap;
|
|
|
}
|
|
|
- for_each_possible_cpu(i) {
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
struct percpu_cluster *cluster;
|
|
|
- cluster = per_cpu_ptr(p->percpu_cluster, i);
|
|
|
+ cluster = per_cpu_ptr(p->percpu_cluster, cpu);
|
|
|
cluster_set_null(&cluster->index);
|
|
|
}
|
|
|
}
|