summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/rcuclassic.h37
-rw-r--r--include/linux/rculist.h14
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcupreempt.h11
5 files changed, 58 insertions, 28 deletions
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c8bd2daf95ec..8322141ee480 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* ACCESS_ONCE() in different C statements.
*
* This macro does absolutely -nothing- to prevent the CPU from reordering,
- * merging, or refetching absolutely anything at any time.
+ * merging, or refetching absolutely anything at any time. Its main intended
+ * use is to mediate communication between process-level code and irq/NMI
+ * handlers, all running on the same CPU.
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 4ab843622727..5f89b62e6983 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -40,12 +40,21 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
/* Global control variables for rcupdate callback mechanism. */
struct rcu_ctrlblk {
long cur; /* Current batch number. */
long completed; /* Number of the last completed batch */
- int next_pending; /* Is the next batch already waiting? */
+ long pending; /* Number of the last pending batch */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+ unsigned long gp_start; /* Time at which GP started in jiffies. */
+ unsigned long jiffies_stall;
+ /* Time at which to check for CPU stalls. */
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
int signaled;
@@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b)
return (a - b) > 0;
}
-/*
- * Per-CPU data for Read-Copy UPdate.
- * nxtlist - new callbacks are added here
- * curlist - current batch for which quiescent cycle started if any
- */
+/* Per-CPU data for Read-Copy UPdate. */
struct rcu_data {
/* 1) quiescent state handling : */
long quiescbatch; /* Batch # for grace period */
@@ -78,12 +83,24 @@ struct rcu_data {
int qs_pending; /* core waits for quiesc state */
/* 2) batch handling */
- long batch; /* Batch # for current RCU batch */
+ /*
+ * if nxtlist is not NULL, then:
+ * batch:
+ * The batch # for the last entry of nxtlist
+ * [*nxttail[1], NULL = *nxttail[2]):
+ * Entries that batch # <= batch
+ * [*nxttail[0], *nxttail[1]):
+ * Entries that batch # <= batch - 1
+ * [nxtlist, *nxttail[0]):
+ * Entries that batch # <= batch - 2
+ * The grace period for these entries has completed, and
+ * the other grace-period-completed entries may be moved
+ * here temporarily in rcu_process_callbacks().
+ */
+ long batch;
struct rcu_head *nxtlist;
- struct rcu_head **nxttail;
+ struct rcu_head **nxttail[3];
long qlen; /* # of queued callbacks */
- struct rcu_head *curlist;
- struct rcu_head **curtail;
struct rcu_head *donelist;
struct rcu_head **donetail;
long blimit; /* Upper limit on a processed batch */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index eb4443c7e05b..e649bd3f2c97 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
at->prev = last;
}
-/**
- * list_for_each_rcu - iterate over an rcu-protected list
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as list_add_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- */
-#define list_for_each_rcu(pos, head) \
- for (pos = rcu_dereference((head)->next); \
- prefetch(pos->next), pos != (head); \
- pos = rcu_dereference(pos->next))
-
#define __list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
pos != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e8b4039cfb2f..86f1f5e43e33 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -133,6 +133,26 @@ struct rcu_head {
#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
/**
+ * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
+ *
+ * Should be used with either
+ * - synchronize_sched()
+ * or
+ * - call_rcu_sched() and rcu_barrier_sched()
+ * on the write-side to insure proper synchronization.
+ */
+#define rcu_read_lock_sched() preempt_disable()
+
+/*
+ * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
+ *
+ * See rcu_read_lock_sched for more information.
+ */
+#define rcu_read_unlock_sched() preempt_enable()
+
+
+
+/**
* rcu_dereference - fetch an RCU-protected pointer in an
* RCU read-side critical section. This pointer may later
* be safely dereferenced.
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 0967f03b0705..3e05c09b54a2 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu)
rdssp->sched_qs++;
}
#define rcu_bh_qsctr_inc(cpu)
-#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
+
+/*
+ * Someone might want to pass call_rcu_bh as a function pointer.
+ * So this needs to just be a rename and not a macro function.
+ * (no parentheses)
+ */
+#define call_rcu_bh call_rcu
/**
* call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
struct softirq_action;
#ifdef CONFIG_NO_HZ
-DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
static inline void rcu_enter_nohz(void)
{
@@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void)
{
static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
__get_cpu_var(rcu_dyntick_sched).dynticks++;
+ smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
&rs);
}