|
@@ -871,84 +871,59 @@ int get_compat_sigevent(struct sigevent *event,
|
|
|
long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
|
|
|
unsigned long bitmap_size)
|
|
|
{
|
|
|
- int i, j;
|
|
|
- unsigned long m;
|
|
|
- compat_ulong_t um;
|
|
|
unsigned long nr_compat_longs;
|
|
|
|
|
|
/* align bitmap up to nearest compat_long_t boundary */
|
|
|
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
|
|
|
+ nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
|
|
|
|
|
|
if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
|
|
|
-
|
|
|
- for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
|
|
|
- m = 0;
|
|
|
-
|
|
|
- for (j = 0; j < sizeof(m)/sizeof(um); j++) {
|
|
|
- /*
|
|
|
- * We dont want to read past the end of the userspace
|
|
|
- * bitmap. We must however ensure the end of the
|
|
|
- * kernel bitmap is zeroed.
|
|
|
- */
|
|
|
- if (nr_compat_longs) {
|
|
|
- nr_compat_longs--;
|
|
|
- if (__get_user(um, umask))
|
|
|
- return -EFAULT;
|
|
|
- } else {
|
|
|
- um = 0;
|
|
|
- }
|
|
|
-
|
|
|
- umask++;
|
|
|
- m |= (long)um << (j * BITS_PER_COMPAT_LONG);
|
|
|
- }
|
|
|
- *mask++ = m;
|
|
|
+ user_access_begin();
|
|
|
+ while (nr_compat_longs > 1) {
|
|
|
+ compat_ulong_t l1, l2;
|
|
|
+ unsafe_get_user(l1, umask++, Efault);
|
|
|
+ unsafe_get_user(l2, umask++, Efault);
|
|
|
+ *mask++ = ((unsigned long)l2 << BITS_PER_COMPAT_LONG) | l1;
|
|
|
+ nr_compat_longs -= 2;
|
|
|
}
|
|
|
-
|
|
|
+ if (nr_compat_longs)
|
|
|
+ unsafe_get_user(*mask, umask++, Efault);
|
|
|
+ user_access_end();
|
|
|
return 0;
|
|
|
+
|
|
|
+Efault:
|
|
|
+ user_access_end();
|
|
|
+ return -EFAULT;
|
|
|
}
|
|
|
|
|
|
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
|
|
|
unsigned long bitmap_size)
|
|
|
{
|
|
|
- int i, j;
|
|
|
- unsigned long m;
|
|
|
- compat_ulong_t um;
|
|
|
unsigned long nr_compat_longs;
|
|
|
|
|
|
/* align bitmap up to nearest compat_long_t boundary */
|
|
|
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
|
|
|
+ nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
|
|
|
return -EFAULT;
|
|
|
|
|
|
- nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
|
|
|
-
|
|
|
- for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
|
|
|
- m = *mask++;
|
|
|
-
|
|
|
- for (j = 0; j < sizeof(m)/sizeof(um); j++) {
|
|
|
- um = m;
|
|
|
-
|
|
|
- /*
|
|
|
- * We dont want to write past the end of the userspace
|
|
|
- * bitmap.
|
|
|
- */
|
|
|
- if (nr_compat_longs) {
|
|
|
- nr_compat_longs--;
|
|
|
- if (__put_user(um, umask))
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
-
|
|
|
- umask++;
|
|
|
- m >>= 4*sizeof(um);
|
|
|
- m >>= 4*sizeof(um);
|
|
|
- }
|
|
|
+ user_access_begin();
|
|
|
+ while (nr_compat_longs > 1) {
|
|
|
+ unsigned long m = *mask++;
|
|
|
+ unsafe_put_user((compat_ulong_t)m, umask++, Efault);
|
|
|
+ unsafe_put_user(m >> BITS_PER_COMPAT_LONG, umask++, Efault);
|
|
|
+ nr_compat_longs -= 2;
|
|
|
}
|
|
|
-
|
|
|
+ if (nr_compat_longs)
|
|
|
+ unsafe_put_user((compat_ulong_t)*mask, umask++, Efault);
|
|
|
+ user_access_end();
|
|
|
return 0;
|
|
|
+Efault:
|
|
|
+ user_access_end();
|
|
|
+ return -EFAULT;
|
|
|
}
|
|
|
|
|
|
void
|