|
@@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t)
|
|
|
{
|
|
|
u32 cpu_id = raw_smp_processor_id();
|
|
|
|
|
|
- if (__put_user(cpu_id, &t->rseq->cpu_id_start))
|
|
|
+ if (put_user(cpu_id, &t->rseq->cpu_id_start))
|
|
|
return -EFAULT;
|
|
|
- if (__put_user(cpu_id, &t->rseq->cpu_id))
|
|
|
+ if (put_user(cpu_id, &t->rseq->cpu_id))
|
|
|
return -EFAULT;
|
|
|
trace_rseq_update(t);
|
|
|
return 0;
|
|
@@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
|
|
|
/*
|
|
|
* Reset cpu_id_start to its initial state (0).
|
|
|
*/
|
|
|
- if (__put_user(cpu_id_start, &t->rseq->cpu_id_start))
|
|
|
+ if (put_user(cpu_id_start, &t->rseq->cpu_id_start))
|
|
|
return -EFAULT;
|
|
|
/*
|
|
|
* Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming
|
|
|
* in after unregistration can figure out that rseq needs to be
|
|
|
* registered again.
|
|
|
*/
|
|
|
- if (__put_user(cpu_id, &t->rseq->cpu_id))
|
|
|
+ if (put_user(cpu_id, &t->rseq->cpu_id))
|
|
|
return -EFAULT;
|
|
|
return 0;
|
|
|
}
|
|
@@ -120,7 +120,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
|
|
|
u32 sig;
|
|
|
int ret;
|
|
|
|
|
|
- ret = __get_user(ptr, &t->rseq->rseq_cs);
|
|
|
+ ret = get_user(ptr, &t->rseq->rseq_cs);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
if (!ptr) {
|
|
@@ -163,7 +163,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
|
|
|
int ret;
|
|
|
|
|
|
/* Get thread flags. */
|
|
|
- ret = __get_user(flags, &t->rseq->flags);
|
|
|
+ ret = get_user(flags, &t->rseq->flags);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -203,7 +203,7 @@ static int clear_rseq_cs(struct task_struct *t)
|
|
|
*
|
|
|
* Set rseq_cs to NULL with single-copy atomicity.
|
|
|
*/
|
|
|
- return __put_user(0UL, &t->rseq->rseq_cs);
|
|
|
+ return put_user(0UL, &t->rseq->rseq_cs);
|
|
|
}
|
|
|
|
|
|
/*
|