Răsfoiți Sursa

Merge tag 'pstore-v4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull pstore updates from Kees Cook:
 "Improvements and fixes to pstore subsystem:

   - add additional checks for bad platform data

   - remove bounce buffer in console writer

   - protect read/unlink race with a mutex

   - correctly give up during dump locking failures

   - increase ftrace bandwidth by splitting ftrace buffers per CPU"

* tag 'pstore-v4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  ramoops: add pdata NULL check to ramoops_probe
  pstore: Convert console write to use ->write_buf
  pstore: Protect unlink with read_mutex
  pstore: Use global ftrace filters for function trace filtering
  ftrace: Provide API to use global filtering for ftrace ops
  pstore: Clarify context field przs as dprzs
  pstore: improve error report for failed setup
  pstore: Merge per-CPU ftrace records into one
  pstore: Add ftrace timestamp counter
  ramoops: Split ftrace buffer space into per-CPU zones
  pstore: Make ramoops_init_przs generic for other prz arrays
  pstore: Allow prz to control need for locking
  pstore: Warn on PSTORE_TYPE_PMSG using deprecated function
  pstore: Make spinlock per zone instead of global
  pstore: Actually give up during locking failure
Linus Torvalds 9 ani în urmă
părinte
comite
52281b38bc

+ 3 - 0
Documentation/devicetree/bindings/reserved-memory/ramoops.txt

@@ -46,3 +46,6 @@ Optional properties:
   (defaults to buffered mappings)
   (defaults to buffered mappings)
 
 
 - no-dump-oops: if present, only dump panics (defaults to panics and oops)
 - no-dump-oops: if present, only dump panics (defaults to panics and oops)
+
+- flags: if present, pass ramoops behavioral flags (defaults to 0,
+  see include/linux/pstore_ram.h RAMOOPS_FLAG_* for flag values).

+ 9 - 2
fs/pstore/ftrace.c

@@ -27,6 +27,9 @@
 #include <asm/barrier.h>
 #include <asm/barrier.h>
 #include "internal.h"
 #include "internal.h"
 
 
+/* This doesn't need to be atomic: speed is chosen over correctness here. */
+static u64 pstore_ftrace_stamp;
+
 static void notrace pstore_ftrace_call(unsigned long ip,
 static void notrace pstore_ftrace_call(unsigned long ip,
 				       unsigned long parent_ip,
 				       unsigned long parent_ip,
 				       struct ftrace_ops *op,
 				       struct ftrace_ops *op,
@@ -42,6 +45,7 @@ static void notrace pstore_ftrace_call(unsigned long ip,
 
 
 	rec.ip = ip;
 	rec.ip = ip;
 	rec.parent_ip = parent_ip;
 	rec.parent_ip = parent_ip;
+	pstore_ftrace_write_timestamp(&rec, pstore_ftrace_stamp++);
 	pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
 	pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
 	psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec,
 	psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec,
 			  0, sizeof(rec), psinfo);
 			  0, sizeof(rec), psinfo);
@@ -71,10 +75,13 @@ static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf,
 	if (!on ^ pstore_ftrace_enabled)
 	if (!on ^ pstore_ftrace_enabled)
 		goto out;
 		goto out;
 
 
-	if (on)
+	if (on) {
+		ftrace_ops_set_global_filter(&pstore_ftrace_ops);
 		ret = register_ftrace_function(&pstore_ftrace_ops);
 		ret = register_ftrace_function(&pstore_ftrace_ops);
-	else
+	} else {
 		ret = unregister_ftrace_function(&pstore_ftrace_ops);
 		ret = unregister_ftrace_function(&pstore_ftrace_ops);
+	}
+
 	if (ret) {
 	if (ret) {
 		pr_err("%s: unable to %sregister ftrace ops: %zd\n",
 		pr_err("%s: unable to %sregister ftrace ops: %zd\n",
 		       __func__, on ? "" : "un", ret);
 		       __func__, on ? "" : "un", ret);

+ 10 - 5
fs/pstore/inode.c

@@ -107,9 +107,11 @@ static int pstore_ftrace_seq_show(struct seq_file *s, void *v)
 	struct pstore_ftrace_seq_data *data = v;
 	struct pstore_ftrace_seq_data *data = v;
 	struct pstore_ftrace_record *rec = (void *)(ps->data + data->off);
 	struct pstore_ftrace_record *rec = (void *)(ps->data + data->off);
 
 
-	seq_printf(s, "%d %08lx  %08lx  %pf <- %pF\n",
-		pstore_ftrace_decode_cpu(rec), rec->ip, rec->parent_ip,
-		(void *)rec->ip, (void *)rec->parent_ip);
+	seq_printf(s, "CPU:%d ts:%llu %08lx  %08lx  %pf <- %pF\n",
+		   pstore_ftrace_decode_cpu(rec),
+		   pstore_ftrace_read_timestamp(rec),
+		   rec->ip, rec->parent_ip, (void *)rec->ip,
+		   (void *)rec->parent_ip);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -197,11 +199,14 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	if (p->psi->erase)
+	if (p->psi->erase) {
+		mutex_lock(&p->psi->read_mutex);
 		p->psi->erase(p->type, p->id, p->count,
 		p->psi->erase(p->type, p->id, p->count,
 			      d_inode(dentry)->i_ctime, p->psi);
 			      d_inode(dentry)->i_ctime, p->psi);
-	else
+		mutex_unlock(&p->psi->read_mutex);
+	} else {
 		return -EPERM;
 		return -EPERM;
+	}
 
 
 	return simple_unlink(dir, dentry);
 	return simple_unlink(dir, dentry);
 }
 }

+ 0 - 34
fs/pstore/internal.h

@@ -5,40 +5,6 @@
 #include <linux/time.h>
 #include <linux/time.h>
 #include <linux/pstore.h>
 #include <linux/pstore.h>
 
 
-#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB)
-#define PSTORE_CPU_IN_IP 0x1
-#elif NR_CPUS <= 4 && defined(CONFIG_ARM)
-#define PSTORE_CPU_IN_IP 0x3
-#endif
-
-struct pstore_ftrace_record {
-	unsigned long ip;
-	unsigned long parent_ip;
-#ifndef PSTORE_CPU_IN_IP
-	unsigned int cpu;
-#endif
-};
-
-static inline void
-pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
-{
-#ifndef PSTORE_CPU_IN_IP
-	rec->cpu = cpu;
-#else
-	rec->ip |= cpu;
-#endif
-}
-
-static inline unsigned int
-pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
-{
-#ifndef PSTORE_CPU_IN_IP
-	return rec->cpu;
-#else
-	return rec->ip & PSTORE_CPU_IN_IP;
-#endif
-}
-
 #ifdef CONFIG_PSTORE_FTRACE
 #ifdef CONFIG_PSTORE_FTRACE
 extern void pstore_register_ftrace(void);
 extern void pstore_register_ftrace(void);
 extern void pstore_unregister_ftrace(void);
 extern void pstore_unregister_ftrace(void);

+ 3 - 2
fs/pstore/platform.c

@@ -493,6 +493,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
 		if (!is_locked) {
 		if (!is_locked) {
 			pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
 			pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
 				       , in_nmi() ? "NMI" : why);
 				       , in_nmi() ? "NMI" : why);
+			return;
 		}
 		}
 	} else {
 	} else {
 		spin_lock_irqsave(&psinfo->buf_lock, flags);
 		spin_lock_irqsave(&psinfo->buf_lock, flags);
@@ -584,8 +585,8 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
 		} else {
 		} else {
 			spin_lock_irqsave(&psinfo->buf_lock, flags);
 			spin_lock_irqsave(&psinfo->buf_lock, flags);
 		}
 		}
-		memcpy(psinfo->buf, s, c);
-		psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, 0, 0, c, psinfo);
+		psinfo->write_buf(PSTORE_TYPE_CONSOLE, 0, &id, 0,
+				  s, 0, c, psinfo);
 		spin_unlock_irqrestore(&psinfo->buf_lock, flags);
 		spin_unlock_irqrestore(&psinfo->buf_lock, flags);
 		s += c;
 		s += c;
 		c = e - s;
 		c = e - s;

+ 255 - 72
fs/pstore/ram.c

@@ -85,10 +85,10 @@ MODULE_PARM_DESC(ramoops_ecc,
 		"bytes ECC)");
 		"bytes ECC)");
 
 
 struct ramoops_context {
 struct ramoops_context {
-	struct persistent_ram_zone **przs;
-	struct persistent_ram_zone *cprz;
-	struct persistent_ram_zone *fprz;
-	struct persistent_ram_zone *mprz;
+	struct persistent_ram_zone **dprzs;	/* Oops dump zones */
+	struct persistent_ram_zone *cprz;	/* Console zone */
+	struct persistent_ram_zone **fprzs;	/* Ftrace zones */
+	struct persistent_ram_zone *mprz;	/* PMSG zone */
 	phys_addr_t phys_addr;
 	phys_addr_t phys_addr;
 	unsigned long size;
 	unsigned long size;
 	unsigned int memtype;
 	unsigned int memtype;
@@ -97,12 +97,14 @@ struct ramoops_context {
 	size_t ftrace_size;
 	size_t ftrace_size;
 	size_t pmsg_size;
 	size_t pmsg_size;
 	int dump_oops;
 	int dump_oops;
+	u32 flags;
 	struct persistent_ram_ecc_info ecc_info;
 	struct persistent_ram_ecc_info ecc_info;
 	unsigned int max_dump_cnt;
 	unsigned int max_dump_cnt;
 	unsigned int dump_write_cnt;
 	unsigned int dump_write_cnt;
 	/* _read_cnt need clear on ramoops_pstore_open */
 	/* _read_cnt need clear on ramoops_pstore_open */
 	unsigned int dump_read_cnt;
 	unsigned int dump_read_cnt;
 	unsigned int console_read_cnt;
 	unsigned int console_read_cnt;
+	unsigned int max_ftrace_cnt;
 	unsigned int ftrace_read_cnt;
 	unsigned int ftrace_read_cnt;
 	unsigned int pmsg_read_cnt;
 	unsigned int pmsg_read_cnt;
 	struct pstore_info pstore;
 	struct pstore_info pstore;
@@ -180,16 +182,69 @@ static bool prz_ok(struct persistent_ram_zone *prz)
 			   persistent_ram_ecc_string(prz, NULL, 0));
 			   persistent_ram_ecc_string(prz, NULL, 0));
 }
 }
 
 
+static ssize_t ftrace_log_combine(struct persistent_ram_zone *dest,
+				  struct persistent_ram_zone *src)
+{
+	size_t dest_size, src_size, total, dest_off, src_off;
+	size_t dest_idx = 0, src_idx = 0, merged_idx = 0;
+	void *merged_buf;
+	struct pstore_ftrace_record *drec, *srec, *mrec;
+	size_t record_size = sizeof(struct pstore_ftrace_record);
+
+	dest_off = dest->old_log_size % record_size;
+	dest_size = dest->old_log_size - dest_off;
+
+	src_off = src->old_log_size % record_size;
+	src_size = src->old_log_size - src_off;
+
+	total = dest_size + src_size;
+	merged_buf = kmalloc(total, GFP_KERNEL);
+	if (!merged_buf)
+		return -ENOMEM;
+
+	drec = (struct pstore_ftrace_record *)(dest->old_log + dest_off);
+	srec = (struct pstore_ftrace_record *)(src->old_log + src_off);
+	mrec = (struct pstore_ftrace_record *)(merged_buf);
+
+	while (dest_size > 0 && src_size > 0) {
+		if (pstore_ftrace_read_timestamp(&drec[dest_idx]) <
+		    pstore_ftrace_read_timestamp(&srec[src_idx])) {
+			mrec[merged_idx++] = drec[dest_idx++];
+			dest_size -= record_size;
+		} else {
+			mrec[merged_idx++] = srec[src_idx++];
+			src_size -= record_size;
+		}
+	}
+
+	while (dest_size > 0) {
+		mrec[merged_idx++] = drec[dest_idx++];
+		dest_size -= record_size;
+	}
+
+	while (src_size > 0) {
+		mrec[merged_idx++] = srec[src_idx++];
+		src_size -= record_size;
+	}
+
+	kfree(dest->old_log);
+	dest->old_log = merged_buf;
+	dest->old_log_size = total;
+
+	return 0;
+}
+
 static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
 static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
 				   int *count, struct timespec *time,
 				   int *count, struct timespec *time,
 				   char **buf, bool *compressed,
 				   char **buf, bool *compressed,
 				   ssize_t *ecc_notice_size,
 				   ssize_t *ecc_notice_size,
 				   struct pstore_info *psi)
 				   struct pstore_info *psi)
 {
 {
-	ssize_t size;
+	ssize_t size = 0;
 	struct ramoops_context *cxt = psi->data;
 	struct ramoops_context *cxt = psi->data;
 	struct persistent_ram_zone *prz = NULL;
 	struct persistent_ram_zone *prz = NULL;
 	int header_length = 0;
 	int header_length = 0;
+	bool free_prz = false;
 
 
 	/* Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but
 	/* Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but
 	 * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have
 	 * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have
@@ -201,7 +256,7 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
 
 
 	/* Find the next valid persistent_ram_zone for DMESG */
 	/* Find the next valid persistent_ram_zone for DMESG */
 	while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) {
 	while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) {
-		prz = ramoops_get_next_prz(cxt->przs, &cxt->dump_read_cnt,
+		prz = ramoops_get_next_prz(cxt->dprzs, &cxt->dump_read_cnt,
 					   cxt->max_dump_cnt, id, type,
 					   cxt->max_dump_cnt, id, type,
 					   PSTORE_TYPE_DMESG, 1);
 					   PSTORE_TYPE_DMESG, 1);
 		if (!prz_ok(prz))
 		if (!prz_ok(prz))
@@ -219,14 +274,56 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
 	if (!prz_ok(prz))
 	if (!prz_ok(prz))
 		prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
 		prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
 					   1, id, type, PSTORE_TYPE_CONSOLE, 0);
 					   1, id, type, PSTORE_TYPE_CONSOLE, 0);
-	if (!prz_ok(prz))
-		prz = ramoops_get_next_prz(&cxt->fprz, &cxt->ftrace_read_cnt,
-					   1, id, type, PSTORE_TYPE_FTRACE, 0);
+
 	if (!prz_ok(prz))
 	if (!prz_ok(prz))
 		prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
 		prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
 					   1, id, type, PSTORE_TYPE_PMSG, 0);
 					   1, id, type, PSTORE_TYPE_PMSG, 0);
-	if (!prz_ok(prz))
-		return 0;
+
+	/* ftrace is last since it may want to dynamically allocate memory. */
+	if (!prz_ok(prz)) {
+		if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) {
+			prz = ramoops_get_next_prz(cxt->fprzs,
+					&cxt->ftrace_read_cnt, 1, id, type,
+					PSTORE_TYPE_FTRACE, 0);
+		} else {
+			/*
+			 * Build a new dummy record which combines all the
+			 * per-cpu records including metadata and ecc info.
+			 */
+			struct persistent_ram_zone *tmp_prz, *prz_next;
+
+			tmp_prz = kzalloc(sizeof(struct persistent_ram_zone),
+					  GFP_KERNEL);
+			if (!tmp_prz)
+				return -ENOMEM;
+			free_prz = true;
+
+			while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) {
+				prz_next = ramoops_get_next_prz(cxt->fprzs,
+						&cxt->ftrace_read_cnt,
+						cxt->max_ftrace_cnt, id,
+						type, PSTORE_TYPE_FTRACE, 0);
+
+				if (!prz_ok(prz_next))
+					continue;
+
+				tmp_prz->ecc_info = prz_next->ecc_info;
+				tmp_prz->corrected_bytes +=
+						prz_next->corrected_bytes;
+				tmp_prz->bad_blocks += prz_next->bad_blocks;
+				size = ftrace_log_combine(tmp_prz, prz_next);
+				if (size)
+					goto out;
+			}
+			*id = 0;
+			prz = tmp_prz;
+		}
+	}
+
+	if (!prz_ok(prz)) {
+		size = 0;
+		goto out;
+	}
 
 
 	size = persistent_ram_old_size(prz) - header_length;
 	size = persistent_ram_old_size(prz) - header_length;
 
 
@@ -234,12 +331,21 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
 	*ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
 	*ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
 
 
 	*buf = kmalloc(size + *ecc_notice_size + 1, GFP_KERNEL);
 	*buf = kmalloc(size + *ecc_notice_size + 1, GFP_KERNEL);
-	if (*buf == NULL)
-		return -ENOMEM;
+	if (*buf == NULL) {
+		size = -ENOMEM;
+		goto out;
+	}
 
 
 	memcpy(*buf, (char *)persistent_ram_old(prz) + header_length, size);
 	memcpy(*buf, (char *)persistent_ram_old(prz) + header_length, size);
+
 	persistent_ram_ecc_string(prz, *buf + size, *ecc_notice_size + 1);
 	persistent_ram_ecc_string(prz, *buf + size, *ecc_notice_size + 1);
 
 
+out:
+	if (free_prz) {
+		kfree(prz->old_log);
+		kfree(prz);
+	}
+
 	return size;
 	return size;
 }
 }
 
 
@@ -283,15 +389,23 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
 		persistent_ram_write(cxt->cprz, buf, size);
 		persistent_ram_write(cxt->cprz, buf, size);
 		return 0;
 		return 0;
 	} else if (type == PSTORE_TYPE_FTRACE) {
 	} else if (type == PSTORE_TYPE_FTRACE) {
-		if (!cxt->fprz)
+		int zonenum;
+
+		if (!cxt->fprzs)
 			return -ENOMEM;
 			return -ENOMEM;
-		persistent_ram_write(cxt->fprz, buf, size);
+		/*
+		 * Choose zone by if we're using per-cpu buffers.
+		 */
+		if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
+			zonenum = smp_processor_id();
+		else
+			zonenum = 0;
+
+		persistent_ram_write(cxt->fprzs[zonenum], buf, size);
 		return 0;
 		return 0;
 	} else if (type == PSTORE_TYPE_PMSG) {
 	} else if (type == PSTORE_TYPE_PMSG) {
-		if (!cxt->mprz)
-			return -ENOMEM;
-		persistent_ram_write(cxt->mprz, buf, size);
-		return 0;
+		pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__);
+		return -EINVAL;
 	}
 	}
 
 
 	if (type != PSTORE_TYPE_DMESG)
 	if (type != PSTORE_TYPE_DMESG)
@@ -316,10 +430,10 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
 	if (part != 1)
 	if (part != 1)
 		return -ENOSPC;
 		return -ENOSPC;
 
 
-	if (!cxt->przs)
+	if (!cxt->dprzs)
 		return -ENOSPC;
 		return -ENOSPC;
 
 
-	prz = cxt->przs[cxt->dump_write_cnt];
+	prz = cxt->dprzs[cxt->dump_write_cnt];
 
 
 	hlen = ramoops_write_kmsg_hdr(prz, compressed);
 	hlen = ramoops_write_kmsg_hdr(prz, compressed);
 	if (size + hlen > prz->buffer_size)
 	if (size + hlen > prz->buffer_size)
@@ -359,13 +473,15 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
 	case PSTORE_TYPE_DMESG:
 	case PSTORE_TYPE_DMESG:
 		if (id >= cxt->max_dump_cnt)
 		if (id >= cxt->max_dump_cnt)
 			return -EINVAL;
 			return -EINVAL;
-		prz = cxt->przs[id];
+		prz = cxt->dprzs[id];
 		break;
 		break;
 	case PSTORE_TYPE_CONSOLE:
 	case PSTORE_TYPE_CONSOLE:
 		prz = cxt->cprz;
 		prz = cxt->cprz;
 		break;
 		break;
 	case PSTORE_TYPE_FTRACE:
 	case PSTORE_TYPE_FTRACE:
-		prz = cxt->fprz;
+		if (id >= cxt->max_ftrace_cnt)
+			return -EINVAL;
+		prz = cxt->fprzs[id];
 		break;
 		break;
 	case PSTORE_TYPE_PMSG:
 	case PSTORE_TYPE_PMSG:
 		prz = cxt->mprz;
 		prz = cxt->mprz;
@@ -396,68 +512,113 @@ static void ramoops_free_przs(struct ramoops_context *cxt)
 {
 {
 	int i;
 	int i;
 
 
-	if (!cxt->przs)
-		return;
+	/* Free dump PRZs */
+	if (cxt->dprzs) {
+		for (i = 0; i < cxt->max_dump_cnt; i++)
+			persistent_ram_free(cxt->dprzs[i]);
 
 
-	for (i = 0; i < cxt->max_dump_cnt; i++)
-		persistent_ram_free(cxt->przs[i]);
+		kfree(cxt->dprzs);
+		cxt->max_dump_cnt = 0;
+	}
 
 
-	kfree(cxt->przs);
-	cxt->max_dump_cnt = 0;
+	/* Free ftrace PRZs */
+	if (cxt->fprzs) {
+		for (i = 0; i < cxt->max_ftrace_cnt; i++)
+			persistent_ram_free(cxt->fprzs[i]);
+		kfree(cxt->fprzs);
+		cxt->max_ftrace_cnt = 0;
+	}
 }
 }
 
 
-static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
-			     phys_addr_t *paddr, size_t dump_mem_sz)
+static int ramoops_init_przs(const char *name,
+			     struct device *dev, struct ramoops_context *cxt,
+			     struct persistent_ram_zone ***przs,
+			     phys_addr_t *paddr, size_t mem_sz,
+			     ssize_t record_size,
+			     unsigned int *cnt, u32 sig, u32 flags)
 {
 {
 	int err = -ENOMEM;
 	int err = -ENOMEM;
 	int i;
 	int i;
+	size_t zone_sz;
+	struct persistent_ram_zone **prz_ar;
 
 
-	if (!cxt->record_size)
+	/* Allocate nothing for 0 mem_sz or 0 record_size. */
+	if (mem_sz == 0 || record_size == 0) {
+		*cnt = 0;
 		return 0;
 		return 0;
+	}
 
 
-	if (*paddr + dump_mem_sz - cxt->phys_addr > cxt->size) {
-		dev_err(dev, "no room for dumps\n");
-		return -ENOMEM;
+	/*
+	 * If we have a negative record size, calculate it based on
+	 * mem_sz / *cnt. If we have a positive record size, calculate
+	 * cnt from mem_sz / record_size.
+	 */
+	if (record_size < 0) {
+		if (*cnt == 0)
+			return 0;
+		record_size = mem_sz / *cnt;
+		if (record_size == 0) {
+			dev_err(dev, "%s record size == 0 (%zu / %u)\n",
+				name, mem_sz, *cnt);
+			goto fail;
+		}
+	} else {
+		*cnt = mem_sz / record_size;
+		if (*cnt == 0) {
+			dev_err(dev, "%s record count == 0 (%zu / %zu)\n",
+				name, mem_sz, record_size);
+			goto fail;
+		}
 	}
 	}
 
 
-	cxt->max_dump_cnt = dump_mem_sz / cxt->record_size;
-	if (!cxt->max_dump_cnt)
-		return -ENOMEM;
+	if (*paddr + mem_sz - cxt->phys_addr > cxt->size) {
+		dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n",
+			name,
+			mem_sz, (unsigned long long)*paddr,
+			cxt->size, (unsigned long long)cxt->phys_addr);
+		goto fail;
+	}
 
 
-	cxt->przs = kzalloc(sizeof(*cxt->przs) * cxt->max_dump_cnt,
-			     GFP_KERNEL);
-	if (!cxt->przs) {
-		dev_err(dev, "failed to initialize a prz array for dumps\n");
-		goto fail_mem;
+	zone_sz = mem_sz / *cnt;
+	if (!zone_sz) {
+		dev_err(dev, "%s zone size == 0\n", name);
+		goto fail;
 	}
 	}
 
 
-	for (i = 0; i < cxt->max_dump_cnt; i++) {
-		cxt->przs[i] = persistent_ram_new(*paddr, cxt->record_size, 0,
+	prz_ar = kcalloc(*cnt, sizeof(**przs), GFP_KERNEL);
+	if (!prz_ar)
+		goto fail;
+
+	for (i = 0; i < *cnt; i++) {
+		prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
 						  &cxt->ecc_info,
 						  &cxt->ecc_info,
-						  cxt->memtype);
-		if (IS_ERR(cxt->przs[i])) {
-			err = PTR_ERR(cxt->przs[i]);
-			dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
-				cxt->record_size, (unsigned long long)*paddr, err);
+						  cxt->memtype, flags);
+		if (IS_ERR(prz_ar[i])) {
+			err = PTR_ERR(prz_ar[i]);
+			dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
+				name, record_size,
+				(unsigned long long)*paddr, err);
 
 
 			while (i > 0) {
 			while (i > 0) {
 				i--;
 				i--;
-				persistent_ram_free(cxt->przs[i]);
+				persistent_ram_free(prz_ar[i]);
 			}
 			}
-			goto fail_prz;
+			kfree(prz_ar);
+			goto fail;
 		}
 		}
-		*paddr += cxt->record_size;
+		*paddr += zone_sz;
 	}
 	}
 
 
+	*przs = prz_ar;
 	return 0;
 	return 0;
-fail_prz:
-	kfree(cxt->przs);
-fail_mem:
-	cxt->max_dump_cnt = 0;
+
+fail:
+	*cnt = 0;
 	return err;
 	return err;
 }
 }
 
 
-static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
+static int ramoops_init_prz(const char *name,
+			    struct device *dev, struct ramoops_context *cxt,
 			    struct persistent_ram_zone **prz,
 			    struct persistent_ram_zone **prz,
 			    phys_addr_t *paddr, size_t sz, u32 sig)
 			    phys_addr_t *paddr, size_t sz, u32 sig)
 {
 {
@@ -465,18 +626,19 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
 		return 0;
 		return 0;
 
 
 	if (*paddr + sz - cxt->phys_addr > cxt->size) {
 	if (*paddr + sz - cxt->phys_addr > cxt->size) {
-		dev_err(dev, "no room for mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n",
-			sz, (unsigned long long)*paddr,
+		dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n",
+			name, sz, (unsigned long long)*paddr,
 			cxt->size, (unsigned long long)cxt->phys_addr);
 			cxt->size, (unsigned long long)cxt->phys_addr);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
+	*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
+				  cxt->memtype, 0);
 	if (IS_ERR(*prz)) {
 	if (IS_ERR(*prz)) {
 		int err = PTR_ERR(*prz);
 		int err = PTR_ERR(*prz);
 
 
-		dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
-			sz, (unsigned long long)*paddr, err);
+		dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
+			name, sz, (unsigned long long)*paddr, err);
 		return err;
 		return err;
 	}
 	}
 
 
@@ -543,6 +705,7 @@ static int ramoops_parse_dt(struct platform_device *pdev,
 	parse_size("ftrace-size", pdata->ftrace_size);
 	parse_size("ftrace-size", pdata->ftrace_size);
 	parse_size("pmsg-size", pdata->pmsg_size);
 	parse_size("pmsg-size", pdata->pmsg_size);
 	parse_size("ecc-size", pdata->ecc_info.ecc_size);
 	parse_size("ecc-size", pdata->ecc_info.ecc_size);
+	parse_size("flags", pdata->flags);
 
 
 #undef parse_size
 #undef parse_size
 
 
@@ -561,6 +724,7 @@ static int ramoops_probe(struct platform_device *pdev)
 	if (dev_of_node(dev) && !pdata) {
 	if (dev_of_node(dev) && !pdata) {
 		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 		if (!pdata) {
 		if (!pdata) {
+			pr_err("cannot allocate platform data buffer\n");
 			err = -ENOMEM;
 			err = -ENOMEM;
 			goto fail_out;
 			goto fail_out;
 		}
 		}
@@ -570,11 +734,20 @@ static int ramoops_probe(struct platform_device *pdev)
 			goto fail_out;
 			goto fail_out;
 	}
 	}
 
 
-	/* Only a single ramoops area allowed at a time, so fail extra
+	/*
+	 * Only a single ramoops area allowed at a time, so fail extra
 	 * probes.
 	 * probes.
 	 */
 	 */
-	if (cxt->max_dump_cnt)
+	if (cxt->max_dump_cnt) {
+		pr_err("already initialized\n");
 		goto fail_out;
 		goto fail_out;
+	}
+
+	/* Make sure we didn't get bogus platform data pointer. */
+	if (!pdata) {
+		pr_err("NULL platform data\n");
+		goto fail_out;
+	}
 
 
 	if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size &&
 	if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size &&
 			!pdata->ftrace_size && !pdata->pmsg_size)) {
 			!pdata->ftrace_size && !pdata->pmsg_size)) {
@@ -600,27 +773,37 @@ static int ramoops_probe(struct platform_device *pdev)
 	cxt->ftrace_size = pdata->ftrace_size;
 	cxt->ftrace_size = pdata->ftrace_size;
 	cxt->pmsg_size = pdata->pmsg_size;
 	cxt->pmsg_size = pdata->pmsg_size;
 	cxt->dump_oops = pdata->dump_oops;
 	cxt->dump_oops = pdata->dump_oops;
+	cxt->flags = pdata->flags;
 	cxt->ecc_info = pdata->ecc_info;
 	cxt->ecc_info = pdata->ecc_info;
 
 
 	paddr = cxt->phys_addr;
 	paddr = cxt->phys_addr;
 
 
 	dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
 	dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
 			- cxt->pmsg_size;
 			- cxt->pmsg_size;
-	err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz);
+	err = ramoops_init_przs("dump", dev, cxt, &cxt->dprzs, &paddr,
+				dump_mem_sz, cxt->record_size,
+				&cxt->max_dump_cnt, 0, 0);
 	if (err)
 	if (err)
 		goto fail_out;
 		goto fail_out;
 
 
-	err = ramoops_init_prz(dev, cxt, &cxt->cprz, &paddr,
+	err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr,
 			       cxt->console_size, 0);
 			       cxt->console_size, 0);
 	if (err)
 	if (err)
 		goto fail_init_cprz;
 		goto fail_init_cprz;
 
 
-	err = ramoops_init_prz(dev, cxt, &cxt->fprz, &paddr, cxt->ftrace_size,
-			       LINUX_VERSION_CODE);
+	cxt->max_ftrace_cnt = (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
+				? nr_cpu_ids
+				: 1;
+	err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr,
+				cxt->ftrace_size, -1,
+				&cxt->max_ftrace_cnt, LINUX_VERSION_CODE,
+				(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
+					? PRZ_FLAG_NO_LOCK : 0);
 	if (err)
 	if (err)
 		goto fail_init_fprz;
 		goto fail_init_fprz;
 
 
-	err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0);
+	err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr,
+				cxt->pmsg_size, 0);
 	if (err)
 	if (err)
 		goto fail_init_mprz;
 		goto fail_init_mprz;
 
 
@@ -680,7 +863,6 @@ fail_clear:
 	cxt->pstore.bufsize = 0;
 	cxt->pstore.bufsize = 0;
 	persistent_ram_free(cxt->mprz);
 	persistent_ram_free(cxt->mprz);
 fail_init_mprz:
 fail_init_mprz:
-	persistent_ram_free(cxt->fprz);
 fail_init_fprz:
 fail_init_fprz:
 	persistent_ram_free(cxt->cprz);
 	persistent_ram_free(cxt->cprz);
 fail_init_cprz:
 fail_init_cprz:
@@ -699,7 +881,6 @@ static int ramoops_remove(struct platform_device *pdev)
 	cxt->pstore.bufsize = 0;
 	cxt->pstore.bufsize = 0;
 
 
 	persistent_ram_free(cxt->mprz);
 	persistent_ram_free(cxt->mprz);
-	persistent_ram_free(cxt->fprz);
 	persistent_ram_free(cxt->cprz);
 	persistent_ram_free(cxt->cprz);
 	ramoops_free_przs(cxt);
 	ramoops_free_przs(cxt);
 
 
@@ -741,6 +922,8 @@ static void ramoops_register_dummy(void)
 	dummy_data->ftrace_size = ramoops_ftrace_size;
 	dummy_data->ftrace_size = ramoops_ftrace_size;
 	dummy_data->pmsg_size = ramoops_pmsg_size;
 	dummy_data->pmsg_size = ramoops_pmsg_size;
 	dummy_data->dump_oops = dump_oops;
 	dummy_data->dump_oops = dump_oops;
+	dummy_data->flags = RAMOOPS_FLAG_FTRACE_PER_CPU;
+
 	/*
 	/*
 	 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC
 	 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC
 	 * (using 1 byte for ECC isn't much of use anyway).
 	 * (using 1 byte for ECC isn't much of use anyway).

+ 16 - 11
fs/pstore/ram_core.c

@@ -48,16 +48,15 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
 	return atomic_read(&prz->buffer->start);
 	return atomic_read(&prz->buffer->start);
 }
 }
 
 
-static DEFINE_RAW_SPINLOCK(buffer_lock);
-
 /* increase and wrap the start pointer, returning the old value */
 /* increase and wrap the start pointer, returning the old value */
 static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 {
 {
 	int old;
 	int old;
 	int new;
 	int new;
-	unsigned long flags;
+	unsigned long flags = 0;
 
 
-	raw_spin_lock_irqsave(&buffer_lock, flags);
+	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+		raw_spin_lock_irqsave(&prz->buffer_lock, flags);
 
 
 	old = atomic_read(&prz->buffer->start);
 	old = atomic_read(&prz->buffer->start);
 	new = old + a;
 	new = old + a;
@@ -65,7 +64,8 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 		new -= prz->buffer_size;
 		new -= prz->buffer_size;
 	atomic_set(&prz->buffer->start, new);
 	atomic_set(&prz->buffer->start, new);
 
 
-	raw_spin_unlock_irqrestore(&buffer_lock, flags);
+	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+		raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
 
 
 	return old;
 	return old;
 }
 }
@@ -75,9 +75,10 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
 {
 {
 	size_t old;
 	size_t old;
 	size_t new;
 	size_t new;
-	unsigned long flags;
+	unsigned long flags = 0;
 
 
-	raw_spin_lock_irqsave(&buffer_lock, flags);
+	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+		raw_spin_lock_irqsave(&prz->buffer_lock, flags);
 
 
 	old = atomic_read(&prz->buffer->size);
 	old = atomic_read(&prz->buffer->size);
 	if (old == prz->buffer_size)
 	if (old == prz->buffer_size)
@@ -89,7 +90,8 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
 	atomic_set(&prz->buffer->size, new);
 	atomic_set(&prz->buffer->size, new);
 
 
 exit:
 exit:
-	raw_spin_unlock_irqrestore(&buffer_lock, flags);
+	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+		raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
 }
 }
 
 
 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
@@ -465,7 +467,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
 }
 }
 
 
 static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
 static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
-				    struct persistent_ram_ecc_info *ecc_info)
+				    struct persistent_ram_ecc_info *ecc_info,
+				    unsigned long flags)
 {
 {
 	int ret;
 	int ret;
 
 
@@ -493,6 +496,8 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
 
 
 	prz->buffer->sig = sig;
 	prz->buffer->sig = sig;
 	persistent_ram_zap(prz);
 	persistent_ram_zap(prz);
+	prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
+	prz->flags = flags;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -517,7 +522,7 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
 
 
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
-			unsigned int memtype)
+			unsigned int memtype, u32 flags)
 {
 {
 	struct persistent_ram_zone *prz;
 	struct persistent_ram_zone *prz;
 	int ret = -ENOMEM;
 	int ret = -ENOMEM;
@@ -532,7 +537,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 
-	ret = persistent_ram_post_init(prz, sig, ecc_info);
+	ret = persistent_ram_post_init(prz, sig, ecc_info, flags);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
 
 

+ 2 - 0
include/linux/ftrace.h

@@ -398,6 +398,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
 void ftrace_free_filter(struct ftrace_ops *ops);
 void ftrace_free_filter(struct ftrace_ops *ops);
+void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
 
 
 int register_ftrace_command(struct ftrace_func_command *cmd);
 int register_ftrace_command(struct ftrace_func_command *cmd);
 int unregister_ftrace_command(struct ftrace_func_command *cmd);
 int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -645,6 +646,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
 #define ftrace_free_filter(ops) do { } while (0)
 #define ftrace_free_filter(ops) do { } while (0)
+#define ftrace_ops_set_global_filter(ops) do { } while (0)
 
 
 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
 			    size_t cnt, loff_t *ppos) { return -ENODEV; }
 			    size_t cnt, loff_t *ppos) { return -ENODEV; }

+ 76 - 0
include/linux/pstore.h

@@ -89,4 +89,80 @@ extern int pstore_register(struct pstore_info *);
 extern void pstore_unregister(struct pstore_info *);
 extern void pstore_unregister(struct pstore_info *);
 extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
 extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
 
 
+struct pstore_ftrace_record {
+	unsigned long ip;
+	unsigned long parent_ip;
+	u64 ts;
+};
+
+/*
+ * ftrace related stuff: Both backends and frontends need these so expose
+ * them here.
+ */
+
+#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB)
+#define PSTORE_CPU_IN_IP 0x1
+#elif NR_CPUS <= 4 && defined(CONFIG_ARM)
+#define PSTORE_CPU_IN_IP 0x3
+#endif
+
+#define TS_CPU_SHIFT 8
+#define TS_CPU_MASK (BIT(TS_CPU_SHIFT) - 1)
+
+/*
+ * If CPU number can be stored in IP, store it there, otherwise store it in
+ * the time stamp. This means more timestamp resolution is available when
+ * the CPU can be stored in the IP.
+ */
+#ifdef PSTORE_CPU_IN_IP
+static inline void
+pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
+{
+	rec->ip |= cpu;
+}
+
+static inline unsigned int
+pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
+{
+	return rec->ip & PSTORE_CPU_IN_IP;
+}
+
+static inline u64
+pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec)
+{
+	return rec->ts;
+}
+
+static inline void
+pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val)
+{
+	rec->ts = val;
+}
+#else
+static inline void
+pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
+{
+	rec->ts &= ~(TS_CPU_MASK);
+	rec->ts |= cpu;
+}
+
+static inline unsigned int
+pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
+{
+	return rec->ts & TS_CPU_MASK;
+}
+
+static inline u64
+pstore_ftrace_read_timestamp(struct pstore_ftrace_record *rec)
+{
+	return rec->ts >> TS_CPU_SHIFT;
+}
+
+static inline void
+pstore_ftrace_write_timestamp(struct pstore_ftrace_record *rec, u64 val)
+{
+	rec->ts = (rec->ts & TS_CPU_MASK) | (val << TS_CPU_SHIFT);
+}
+#endif
+
 #endif /*_LINUX_PSTORE_H*/
 #endif /*_LINUX_PSTORE_H*/

+ 13 - 1
include/linux/pstore_ram.h

@@ -24,6 +24,13 @@
 #include <linux/list.h>
 #include <linux/list.h>
 #include <linux/types.h>
 #include <linux/types.h>
 
 
+/*
+ * Choose whether access to the RAM zone requires locking or not.  If a zone
+ * can be written to from different CPUs like with ftrace for example, then
+ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
+ */
+#define PRZ_FLAG_NO_LOCK	BIT(0)
+
 struct persistent_ram_buffer;
 struct persistent_ram_buffer;
 struct rs_control;
 struct rs_control;
 
 
@@ -40,6 +47,8 @@ struct persistent_ram_zone {
 	void *vaddr;
 	void *vaddr;
 	struct persistent_ram_buffer *buffer;
 	struct persistent_ram_buffer *buffer;
 	size_t buffer_size;
 	size_t buffer_size;
+	u32 flags;
+	raw_spinlock_t buffer_lock;
 
 
 	/* ECC correction */
 	/* ECC correction */
 	char *par_buffer;
 	char *par_buffer;
@@ -55,7 +64,7 @@ struct persistent_ram_zone {
 
 
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
-			unsigned int memtype);
+			unsigned int memtype, u32 flags);
 void persistent_ram_free(struct persistent_ram_zone *prz);
 void persistent_ram_free(struct persistent_ram_zone *prz);
 void persistent_ram_zap(struct persistent_ram_zone *prz);
 void persistent_ram_zap(struct persistent_ram_zone *prz);
 
 
@@ -77,6 +86,8 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
  * @mem_address	physical memory address to contain ramoops
  * @mem_address	physical memory address to contain ramoops
  */
  */
 
 
+#define RAMOOPS_FLAG_FTRACE_PER_CPU	BIT(0)
+
 struct ramoops_platform_data {
 struct ramoops_platform_data {
 	unsigned long	mem_size;
 	unsigned long	mem_size;
 	phys_addr_t	mem_address;
 	phys_addr_t	mem_address;
@@ -86,6 +97,7 @@ struct ramoops_platform_data {
 	unsigned long	ftrace_size;
 	unsigned long	ftrace_size;
 	unsigned long	pmsg_size;
 	unsigned long	pmsg_size;
 	int		dump_oops;
 	int		dump_oops;
+	u32		flags;
 	struct persistent_ram_ecc_info ecc_info;
 	struct persistent_ram_ecc_info ecc_info;
 };
 };
 
 

+ 17 - 0
kernel/trace/ftrace.c

@@ -4258,6 +4258,23 @@ int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
 }
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
 
 
+/**
+ * ftrace_ops_set_global_filter - setup ops to use global filters
+ * @ops - the ops which will use the global filters
+ *
+ * ftrace users who need global function trace filtering should call this.
+ * It can set the global filter only if ops were not initialized before.
+ */
+void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
+{
+	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
+		return;
+
+	ftrace_ops_init(ops);
+	ops->func_hash = &global_ops.local_hash;
+}
+EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
+
 static int
 static int
 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
 		 int reset, int enable)
 		 int reset, int enable)