|
@@ -408,6 +408,11 @@ struct uprobe_xol_ops {
|
|
|
int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
|
|
|
};
|
|
|
|
|
|
+static inline int sizeof_long(void)
|
|
|
+{
|
|
|
+ return is_ia32_task() ? 4 : 8;
|
|
|
+}
|
|
|
+
|
|
|
static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
|
{
|
|
|
pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask);
|
|
@@ -419,21 +424,14 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
|
*/
|
|
|
static int adjust_ret_addr(unsigned long sp, long correction)
|
|
|
{
|
|
|
- int rasize, ncopied;
|
|
|
- long ra = 0;
|
|
|
-
|
|
|
- if (is_ia32_task())
|
|
|
- rasize = 4;
|
|
|
- else
|
|
|
- rasize = 8;
|
|
|
+ int rasize = sizeof_long();
|
|
|
+ long ra;
|
|
|
|
|
|
- ncopied = copy_from_user(&ra, (void __user *)sp, rasize);
|
|
|
- if (unlikely(ncopied))
|
|
|
+ if (copy_from_user(&ra, (void __user *)sp, rasize))
|
|
|
return -EFAULT;
|
|
|
|
|
|
ra += correction;
|
|
|
- ncopied = copy_to_user((void __user *)sp, &ra, rasize);
|
|
|
- if (unlikely(ncopied))
|
|
|
+ if (copy_to_user((void __user *)sp, &ra, rasize))
|
|
|
return -EFAULT;
|
|
|
|
|
|
return 0;
|
|
@@ -450,10 +448,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
|
|
|
|
|
|
if (auprobe->fixups & UPROBE_FIX_CALL) {
|
|
|
if (adjust_ret_addr(regs->sp, correction)) {
|
|
|
- if (is_ia32_task())
|
|
|
- regs->sp += 4;
|
|
|
- else
|
|
|
- regs->sp += 8;
|
|
|
+ regs->sp += sizeof_long();
|
|
|
return -ERESTART;
|
|
|
}
|
|
|
}
|
|
@@ -714,23 +709,21 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|
|
unsigned long
|
|
|
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
|
|
|
{
|
|
|
- int rasize, ncopied;
|
|
|
+ int rasize = sizeof_long(), nleft;
|
|
|
unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
|
|
|
|
|
|
- rasize = is_ia32_task() ? 4 : 8;
|
|
|
- ncopied = copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize);
|
|
|
- if (unlikely(ncopied))
|
|
|
+ if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
|
|
|
return -1;
|
|
|
|
|
|
/* check whether address has been already hijacked */
|
|
|
if (orig_ret_vaddr == trampoline_vaddr)
|
|
|
return orig_ret_vaddr;
|
|
|
|
|
|
- ncopied = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
|
|
|
- if (likely(!ncopied))
|
|
|
+ nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
|
|
|
+ if (likely(!nleft))
|
|
|
return orig_ret_vaddr;
|
|
|
|
|
|
- if (ncopied != rasize) {
|
|
|
+ if (nleft != rasize) {
|
|
|
pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
|
|
|
"%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
|
|
|
|