Browse Source

Merge branch 'for-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu

Pull percpu updates from Tejun Heo:
 "Nothing too exciting.  percpu_ref is going through some interface
  changes and getting new features with more changes in the pipeline but
  given its young age and few users, it's very low impact"

* 'for-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu-refcount: implement percpu_ref_tryget()
  percpu-refcount: rename percpu_ref_tryget() to percpu_ref_tryget_live()
  percpu: Replace __get_cpu_var with this_cpu_ptr
Linus Torvalds 11 năm trước cách đây
mục cha
commit
68a29ef2e3

+ 1 - 1
include/linux/cgroup.h

@@ -101,7 +101,7 @@ static inline bool css_tryget(struct cgroup_subsys_state *css)
 {
 {
 	if (css->flags & CSS_ROOT)
 	if (css->flags & CSS_ROOT)
 		return true;
 		return true;
-	return percpu_ref_tryget(&css->refcnt);
+	return percpu_ref_tryget_live(&css->refcnt);
 }
 }
 
 
 /**
 /**

+ 33 - 1
include/linux/percpu-refcount.h

@@ -121,6 +121,36 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
  * percpu_ref_tryget - try to increment a percpu refcount
  * percpu_ref_tryget - try to increment a percpu refcount
  * @ref: percpu_ref to try-get
  * @ref: percpu_ref to try-get
  *
  *
+ * Increment a percpu refcount unless its count already reached zero.
+ * Returns %true on success; %false on failure.
+ *
+ * The caller is responsible for ensuring that @ref stays accessible.
+ */
+static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+{
+	unsigned __percpu *pcpu_count;
+	int ret = false;
+
+	rcu_read_lock_sched();
+
+	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+
+	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
+		this_cpu_inc(*pcpu_count);
+		ret = true;
+	} else {
+		ret = atomic_inc_not_zero(&ref->count);
+	}
+
+	rcu_read_unlock_sched();
+
+	return ret;
+}
+
+/**
+ * percpu_ref_tryget_live - try to increment a live percpu refcount
+ * @ref: percpu_ref to try-get
+ *
  * Increment a percpu refcount unless it has already been killed.  Returns
  * Increment a percpu refcount unless it has already been killed.  Returns
  * %true on success; %false on failure.
  * %true on success; %false on failure.
  *
  *
@@ -128,8 +158,10 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
  * will fail.  For such guarantee, percpu_ref_kill_and_confirm() should be
  * will fail.  For such guarantee, percpu_ref_kill_and_confirm() should be
  * used.  After the confirm_kill callback is invoked, it's guaranteed that
  * used.  After the confirm_kill callback is invoked, it's guaranteed that
  * no new reference will be given out by percpu_ref_tryget().
  * no new reference will be given out by percpu_ref_tryget().
+ *
+ * The caller is responsible for ensuring that @ref stays accessible.
  */
  */
-static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 {
 {
 	unsigned __percpu *pcpu_count;
 	unsigned __percpu *pcpu_count;
 	int ret = false;
 	int ret = false;

+ 1 - 1
include/linux/percpu.h

@@ -29,7 +29,7 @@
  */
  */
 #define get_cpu_var(var) (*({				\
 #define get_cpu_var(var) (*({				\
 	preempt_disable();				\
 	preempt_disable();				\
-	&__get_cpu_var(var); }))
+	this_cpu_ptr(&var); }))
 
 
 /*
 /*
  * The weird & is necessary because sparse considers (void)(var) to be
  * The weird & is necessary because sparse considers (void)(var) to be