blob: 0491d963d47ebea1acbe9f43e1c1227846600188 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_SEQLOCK_H
3#define __LINUX_SEQLOCK_H
4/*
5 * Reader/writer consistent mechanism without starving writers. This type of
Robert P. J. Dayd08df602007-02-17 19:07:33 +01006 * lock for data where the reader wants a consistent set of information
Waiman Long1370e972013-09-12 10:55:34 -04007 * and is willing to retry if the information changes. There are two types
8 * of readers:
9 * 1. Sequence readers which never block a writer but they may have to retry
10 * if a writer is in progress by detecting change in sequence number.
11 * Writers do not wait for a sequence reader.
12 * 2. Locking readers which will wait if a writer or another locking reader
13 * is in progress. A locking reader in progress will also block a writer
14 * from going forward. Unlike the regular rwlock, the read lock here is
15 * exclusive so that only one locking reader can get it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 *
Waiman Long1370e972013-09-12 10:55:34 -040017 * This is not as cache friendly as brlock. Also, this may not work well
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * for data that contains pointers, because any writer could
19 * invalidate a pointer that a reader was following.
20 *
Waiman Long1370e972013-09-12 10:55:34 -040021 * Expected non-blocking reader usage:
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * do {
23 * seq = read_seqbegin(&foo);
24 * ...
25 * } while (read_seqretry(&foo, seq));
26 *
27 *
28 * On non-SMP the spin locks disappear but the writer still needs
29 * to increment the sequence variables because an interrupt routine could
30 * change the state of the data.
31 *
32 * Based on x86_64 vsyscall gettimeofday
33 * by Keith Owens and Andrea Arcangeli
34 */
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/spinlock.h>
37#include <linux/preempt.h>
John Stultz1ca7d672013-10-07 15:51:59 -070038#include <linux/lockdep.h>
Peter Zijlstra7fc26322015-05-27 11:09:36 +093039#include <linux/compiler.h>
David Howells56a21052011-06-11 12:29:58 +010040#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/*
43 * Version using sequence counter only.
44 * This can be used when code has its own mutex protecting the
45 * updating starting before the write_seqcountbeqin() and ending
46 * after the write_seqcount_end().
47 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070048typedef struct seqcount {
49 unsigned sequence;
John Stultz1ca7d672013-10-07 15:51:59 -070050#ifdef CONFIG_DEBUG_LOCK_ALLOC
51 struct lockdep_map dep_map;
52#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070053} seqcount_t;
54
John Stultz1ca7d672013-10-07 15:51:59 -070055static inline void __seqcount_init(seqcount_t *s, const char *name,
56 struct lock_class_key *key)
57{
58 /*
59 * Make sure we are not reinitializing a held lock:
60 */
61 lockdep_init_map(&s->dep_map, name, key, 0);
62 s->sequence = 0;
63}
64
65#ifdef CONFIG_DEBUG_LOCK_ALLOC
66# define SEQCOUNT_DEP_MAP_INIT(lockname) \
67 .dep_map = { .name = #lockname } \
68
69# define seqcount_init(s) \
70 do { \
71 static struct lock_class_key __key; \
72 __seqcount_init((s), #s, &__key); \
73 } while (0)
74
75static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
76{
77 seqcount_t *l = (seqcount_t *)s;
78 unsigned long flags;
79
80 local_irq_save(flags);
81 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
Qian Cai5facae42019-09-19 12:09:40 -040082 seqcount_release(&l->dep_map, _RET_IP_);
John Stultz1ca7d672013-10-07 15:51:59 -070083 local_irq_restore(flags);
84}
85
86#else
87# define SEQCOUNT_DEP_MAP_INIT(lockname)
88# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
89# define seqcount_lockdep_reader_access(x)
90#endif
91
92#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Nick Piggin3c22cd52011-01-07 17:49:51 +110095/**
96 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
97 * @s: pointer to seqcount_t
98 * Returns: count to be passed to read_seqcount_retry
99 *
100 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
101 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
102 * provided before actually loading any of the variables that are to be
103 * protected in this critical section.
104 *
105 * Use carefully, only in critical code, and comment how the barrier is
106 * provided.
107 */
108static inline unsigned __read_seqcount_begin(const seqcount_t *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
Ingo Molnar88a411c2008-04-03 09:06:13 +0200110 unsigned ret;
111
112repeat:
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800113 ret = READ_ONCE(s->sequence);
Ingo Molnar88a411c2008-04-03 09:06:13 +0200114 if (unlikely(ret & 1)) {
115 cpu_relax();
116 goto repeat;
117 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 return ret;
119}
120
Nick Piggin3c22cd52011-01-07 17:49:51 +1100121/**
Thomas Gleixner0ea5a522014-07-16 21:05:20 +0000122 * raw_read_seqcount - Read the raw seqcount
123 * @s: pointer to seqcount_t
124 * Returns: count to be passed to read_seqcount_retry
125 *
126 * raw_read_seqcount opens a read critical section of the given
127 * seqcount without any lockdep checking and without checking or
128 * masking the LSB. Calling code is responsible for handling that.
129 */
130static inline unsigned raw_read_seqcount(const seqcount_t *s)
131{
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800132 unsigned ret = READ_ONCE(s->sequence);
Thomas Gleixner0ea5a522014-07-16 21:05:20 +0000133 smp_rmb();
134 return ret;
135}
136
137/**
John Stultz0c3351d2014-01-02 15:11:13 -0800138 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
John Stultz1ca7d672013-10-07 15:51:59 -0700139 * @s: pointer to seqcount_t
140 * Returns: count to be passed to read_seqcount_retry
141 *
John Stultz0c3351d2014-01-02 15:11:13 -0800142 * raw_read_seqcount_begin opens a read critical section of the given
John Stultz1ca7d672013-10-07 15:51:59 -0700143 * seqcount, but without any lockdep checking. Validity of the critical
144 * section is tested by checking read_seqcount_retry function.
145 */
John Stultz0c3351d2014-01-02 15:11:13 -0800146static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
John Stultz1ca7d672013-10-07 15:51:59 -0700147{
148 unsigned ret = __read_seqcount_begin(s);
149 smp_rmb();
150 return ret;
151}
152
153/**
Nick Piggin3c22cd52011-01-07 17:49:51 +1100154 * read_seqcount_begin - begin a seq-read critical section
155 * @s: pointer to seqcount_t
156 * Returns: count to be passed to read_seqcount_retry
157 *
158 * read_seqcount_begin opens a read critical section of the given seqcount.
159 * Validity of the critical section is tested by checking read_seqcount_retry
160 * function.
161 */
162static inline unsigned read_seqcount_begin(const seqcount_t *s)
163{
John Stultz1ca7d672013-10-07 15:51:59 -0700164 seqcount_lockdep_reader_access(s);
John Stultz0c3351d2014-01-02 15:11:13 -0800165 return raw_read_seqcount_begin(s);
Nick Piggin3c22cd52011-01-07 17:49:51 +1100166}
167
168/**
Linus Torvalds4f988f12012-05-04 15:13:54 -0700169 * raw_seqcount_begin - begin a seq-read critical section
170 * @s: pointer to seqcount_t
171 * Returns: count to be passed to read_seqcount_retry
172 *
173 * raw_seqcount_begin opens a read critical section of the given seqcount.
174 * Validity of the critical section is tested by checking read_seqcount_retry
175 * function.
176 *
177 * Unlike read_seqcount_begin(), this function will not wait for the count
178 * to stabilize. If a writer is active when we begin, we will fail the
179 * read_seqcount_retry() instead of stabilizing at the beginning of the
180 * critical section.
181 */
182static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183{
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800184 unsigned ret = READ_ONCE(s->sequence);
Linus Torvalds4f988f12012-05-04 15:13:54 -0700185 smp_rmb();
186 return ret & ~1;
187}
188
189/**
Nick Piggin3c22cd52011-01-07 17:49:51 +1100190 * __read_seqcount_retry - end a seq-read critical section (without barrier)
191 * @s: pointer to seqcount_t
192 * @start: count, from read_seqcount_begin
193 * Returns: 1 if retry is required, else 0
194 *
195 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
196 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
197 * provided before actually loading any of the variables that are to be
198 * protected in this critical section.
199 *
200 * Use carefully, only in critical code, and comment how the barrier is
201 * provided.
202 */
203static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204{
205 return unlikely(s->sequence != start);
206}
207
208/**
209 * read_seqcount_retry - end a seq-read critical section
210 * @s: pointer to seqcount_t
211 * @start: count, from read_seqcount_begin
212 * Returns: 1 if retry is required, else 0
213 *
214 * read_seqcount_retry closes a read critical section of the given seqcount.
215 * If the critical section was invalid, it must be ignored (and typically
216 * retried).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 */
Ingo Molnar88a411c2008-04-03 09:06:13 +0200218static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
220 smp_rmb();
Nick Piggin3c22cd52011-01-07 17:49:51 +1100221 return __read_seqcount_retry(s, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
224
John Stultz0c3351d2014-01-02 15:11:13 -0800225
226static inline void raw_write_seqcount_begin(seqcount_t *s)
227{
228 s->sequence++;
229 smp_wmb();
230}
231
232static inline void raw_write_seqcount_end(seqcount_t *s)
233{
234 smp_wmb();
235 s->sequence++;
236}
237
Peter Zijlstrac4bfa3f2015-06-17 14:29:24 +0200238/**
239 * raw_write_seqcount_barrier - do a seq write barrier
240 * @s: pointer to seqcount_t
241 *
242 * This can be used to provide an ordering guarantee instead of the
243 * usual consistency guarantee. It is one wmb cheaper, because we can
244 * collapse the two back-to-back wmb()s.
245 *
246 * seqcount_t seq;
247 * bool X = true, Y = false;
248 *
249 * void read(void)
250 * {
251 * bool x, y;
252 *
253 * do {
254 * int s = read_seqcount_begin(&seq);
255 *
256 * x = X; y = Y;
257 *
258 * } while (read_seqcount_retry(&seq, s));
259 *
260 * BUG_ON(!x && !y);
261 * }
262 *
263 * void write(void)
264 * {
265 * Y = true;
266 *
267 * raw_write_seqcount_barrier(seq);
268 *
269 * X = false;
270 * }
271 */
272static inline void raw_write_seqcount_barrier(seqcount_t *s)
273{
274 s->sequence++;
275 smp_wmb();
276 s->sequence++;
277}
278
Peter Zijlstra7fc26322015-05-27 11:09:36 +0930279static inline int raw_read_seqcount_latch(seqcount_t *s)
280{
Peter Zijlstra55eed752016-05-27 13:11:17 +0200281 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
Paul E. McKenney98b227372017-10-09 11:00:32 -0700282 int seq = READ_ONCE(s->sequence); /* ^^^ */
Peter Zijlstra55eed752016-05-27 13:11:17 +0200283 return seq;
Peter Zijlstra7fc26322015-05-27 11:09:36 +0930284}
285
Peter Zijlstra6695b922015-05-27 11:09:36 +0930286/**
Mathieu Desnoyers9b0fd802014-07-16 21:05:21 +0000287 * raw_write_seqcount_latch - redirect readers to even/odd copy
288 * @s: pointer to seqcount_t
Peter Zijlstra6695b922015-05-27 11:09:36 +0930289 *
290 * The latch technique is a multiversion concurrency control method that allows
291 * queries during non-atomic modifications. If you can guarantee queries never
292 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
293 * -- you most likely do not need this.
294 *
295 * Where the traditional RCU/lockless data structures rely on atomic
296 * modifications to ensure queries observe either the old or the new state the
297 * latch allows the same for non-atomic updates. The trade-off is doubling the
298 * cost of storage; we have to maintain two copies of the entire data
299 * structure.
300 *
301 * Very simply put: we first modify one copy and then the other. This ensures
302 * there is always one copy in a stable state, ready to give us an answer.
303 *
304 * The basic form is a data structure like:
305 *
306 * struct latch_struct {
307 * seqcount_t seq;
308 * struct data_struct data[2];
309 * };
310 *
311 * Where a modification, which is assumed to be externally serialized, does the
312 * following:
313 *
314 * void latch_modify(struct latch_struct *latch, ...)
315 * {
316 * smp_wmb(); <- Ensure that the last data[1] update is visible
317 * latch->seq++;
318 * smp_wmb(); <- Ensure that the seqcount update is visible
319 *
320 * modify(latch->data[0], ...);
321 *
322 * smp_wmb(); <- Ensure that the data[0] update is visible
323 * latch->seq++;
324 * smp_wmb(); <- Ensure that the seqcount update is visible
325 *
326 * modify(latch->data[1], ...);
327 * }
328 *
329 * The query will have a form like:
330 *
331 * struct entry *latch_query(struct latch_struct *latch, ...)
332 * {
333 * struct entry *entry;
334 * unsigned seq, idx;
335 *
336 * do {
Peter Zijlstra55eed752016-05-27 13:11:17 +0200337 * seq = raw_read_seqcount_latch(&latch->seq);
Peter Zijlstra6695b922015-05-27 11:09:36 +0930338 *
339 * idx = seq & 0x01;
340 * entry = data_query(latch->data[idx], ...);
341 *
342 * smp_rmb();
343 * } while (seq != latch->seq);
344 *
345 * return entry;
346 * }
347 *
348 * So during the modification, queries are first redirected to data[1]. Then we
349 * modify data[0]. When that is complete, we redirect queries back to data[0]
350 * and we can modify data[1].
351 *
352 * NOTE: The non-requirement for atomic modifications does _NOT_ include
353 * the publishing of new entries in the case where data is a dynamic
354 * data structure.
355 *
356 * An iteration might start in data[0] and get suspended long enough
357 * to miss an entire modification sequence, once it resumes it might
358 * observe the new entry.
359 *
360 * NOTE: When data is a dynamic data structure; one should use regular RCU
361 * patterns to manage the lifetimes of the objects within.
Mathieu Desnoyers9b0fd802014-07-16 21:05:21 +0000362 */
363static inline void raw_write_seqcount_latch(seqcount_t *s)
364{
365 smp_wmb(); /* prior stores before incrementing "sequence" */
366 s->sequence++;
367 smp_wmb(); /* increment "sequence" before following stores */
368}
369
370/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 * Sequence counter only version assumes that callers are using their
372 * own mutexing.
373 */
John Stultz1ca7d672013-10-07 15:51:59 -0700374static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
John Stultz0c3351d2014-01-02 15:11:13 -0800376 raw_write_seqcount_begin(s);
John Stultz1ca7d672013-10-07 15:51:59 -0700377 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
378}
379
380static inline void write_seqcount_begin(seqcount_t *s)
381{
382 write_seqcount_begin_nested(s, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383}
384
385static inline void write_seqcount_end(seqcount_t *s)
386{
Qian Cai5facae42019-09-19 12:09:40 -0400387 seqcount_release(&s->dep_map, _RET_IP_);
John Stultz0c3351d2014-01-02 15:11:13 -0800388 raw_write_seqcount_end(s);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389}
390
Nick Piggin3c22cd52011-01-07 17:49:51 +1100391/**
Peter Zijlstraa7c6f572015-06-11 14:46:46 +0200392 * write_seqcount_invalidate - invalidate in-progress read-side seq operations
Nick Piggin3c22cd52011-01-07 17:49:51 +1100393 * @s: pointer to seqcount_t
394 *
Peter Zijlstraa7c6f572015-06-11 14:46:46 +0200395 * After write_seqcount_invalidate, no read-side seq operations will complete
Nick Piggin3c22cd52011-01-07 17:49:51 +1100396 * successfully and see data older than this.
397 */
Peter Zijlstraa7c6f572015-06-11 14:46:46 +0200398static inline void write_seqcount_invalidate(seqcount_t *s)
Nick Piggin3c22cd52011-01-07 17:49:51 +1100399{
400 smp_wmb();
401 s->sequence+=2;
402}
403
Thomas Gleixner6617fec2011-07-16 18:40:26 +0200404typedef struct {
405 struct seqcount seqcount;
406 spinlock_t lock;
407} seqlock_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Thomas Gleixner6617fec2011-07-16 18:40:26 +0200409/*
410 * These macros triggered gcc-3.x compile-time problems. We think these are
411 * OK now. Be cautious.
412 */
413#define __SEQLOCK_UNLOCKED(lockname) \
414 { \
John Stultz1ca7d672013-10-07 15:51:59 -0700415 .seqcount = SEQCNT_ZERO(lockname), \
Thomas Gleixner6617fec2011-07-16 18:40:26 +0200416 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
417 }
418
419#define seqlock_init(x) \
420 do { \
421 seqcount_init(&(x)->seqcount); \
422 spin_lock_init(&(x)->lock); \
423 } while (0)
424
425#define DEFINE_SEQLOCK(x) \
426 seqlock_t x = __SEQLOCK_UNLOCKED(x)
427
428/*
429 * Read side functions for starting and finalizing a read side section.
430 */
431static inline unsigned read_seqbegin(const seqlock_t *sl)
432{
433 return read_seqcount_begin(&sl->seqcount);
434}
435
436static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
437{
438 return read_seqcount_retry(&sl->seqcount, start);
439}
440
441/*
442 * Lock out other writers and update the count.
443 * Acts like a normal spin_lock/unlock.
444 * Don't need preempt_disable() because that is in the spin_lock already.
445 */
446static inline void write_seqlock(seqlock_t *sl)
447{
448 spin_lock(&sl->lock);
449 write_seqcount_begin(&sl->seqcount);
450}
451
452static inline void write_sequnlock(seqlock_t *sl)
453{
454 write_seqcount_end(&sl->seqcount);
455 spin_unlock(&sl->lock);
456}
457
458static inline void write_seqlock_bh(seqlock_t *sl)
459{
460 spin_lock_bh(&sl->lock);
461 write_seqcount_begin(&sl->seqcount);
462}
463
464static inline void write_sequnlock_bh(seqlock_t *sl)
465{
466 write_seqcount_end(&sl->seqcount);
467 spin_unlock_bh(&sl->lock);
468}
469
470static inline void write_seqlock_irq(seqlock_t *sl)
471{
472 spin_lock_irq(&sl->lock);
473 write_seqcount_begin(&sl->seqcount);
474}
475
476static inline void write_sequnlock_irq(seqlock_t *sl)
477{
478 write_seqcount_end(&sl->seqcount);
479 spin_unlock_irq(&sl->lock);
480}
481
482static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
483{
484 unsigned long flags;
485
486 spin_lock_irqsave(&sl->lock, flags);
487 write_seqcount_begin(&sl->seqcount);
488 return flags;
489}
490
491#define write_seqlock_irqsave(lock, flags) \
492 do { flags = __write_seqlock_irqsave(lock); } while (0)
493
494static inline void
495write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
496{
497 write_seqcount_end(&sl->seqcount);
498 spin_unlock_irqrestore(&sl->lock, flags);
499}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Waiman Long1370e972013-09-12 10:55:34 -0400501/*
502 * A locking reader exclusively locks out other writers and locking readers,
503 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
504 * Don't need preempt_disable() because that is in the spin_lock already.
505 */
506static inline void read_seqlock_excl(seqlock_t *sl)
507{
508 spin_lock(&sl->lock);
509}
510
511static inline void read_sequnlock_excl(seqlock_t *sl)
512{
513 spin_unlock(&sl->lock);
514}
515
Al Viro2bc74fe2013-10-25 16:39:14 -0400516/**
517 * read_seqbegin_or_lock - begin a sequence number check or locking block
518 * @lock: sequence lock
519 * @seq : sequence number to be checked
520 *
521 * First try it once optimistically without taking the lock. If that fails,
522 * take the lock. The sequence number is also used as a marker for deciding
523 * whether to be a reader (even) or writer (odd).
524 * N.B. seq must be initialized to an even number to begin with.
525 */
526static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
527{
528 if (!(*seq & 1)) /* Even */
529 *seq = read_seqbegin(lock);
530 else /* Odd */
531 read_seqlock_excl(lock);
532}
533
534static inline int need_seqretry(seqlock_t *lock, int seq)
535{
536 return !(seq & 1) && read_seqretry(lock, seq);
537}
538
539static inline void done_seqretry(seqlock_t *lock, int seq)
540{
541 if (seq & 1)
542 read_sequnlock_excl(lock);
543}
544
Waiman Long1370e972013-09-12 10:55:34 -0400545static inline void read_seqlock_excl_bh(seqlock_t *sl)
546{
547 spin_lock_bh(&sl->lock);
548}
549
550static inline void read_sequnlock_excl_bh(seqlock_t *sl)
551{
552 spin_unlock_bh(&sl->lock);
553}
554
555static inline void read_seqlock_excl_irq(seqlock_t *sl)
556{
557 spin_lock_irq(&sl->lock);
558}
559
560static inline void read_sequnlock_excl_irq(seqlock_t *sl)
561{
562 spin_unlock_irq(&sl->lock);
563}
564
565static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
566{
567 unsigned long flags;
568
569 spin_lock_irqsave(&sl->lock, flags);
570 return flags;
571}
572
573#define read_seqlock_excl_irqsave(lock, flags) \
574 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
575
576static inline void
577read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
578{
579 spin_unlock_irqrestore(&sl->lock, flags);
580}
581
Rik van Rielef8ac062014-09-12 09:12:14 -0400582static inline unsigned long
583read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
584{
585 unsigned long flags = 0;
586
587 if (!(*seq & 1)) /* Even */
588 *seq = read_seqbegin(lock);
589 else /* Odd */
590 read_seqlock_excl_irqsave(lock, flags);
591
592 return flags;
593}
594
595static inline void
596done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
597{
598 if (seq & 1)
599 read_sequnlock_excl_irqrestore(lock, flags);
600}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601#endif /* __LINUX_SEQLOCK_H */