Branch data Line data Source code
1 : : /* SPDX-License-Identifier: GPL-2.0 */
2 : : #ifndef __LINUX_SEQLOCK_H
3 : : #define __LINUX_SEQLOCK_H
4 : : /*
5 : : * Reader/writer consistent mechanism without starving writers. This type of
6 : : * lock for data where the reader wants a consistent set of information
7 : : * and is willing to retry if the information changes. There are two types
8 : : * of readers:
9 : : * 1. Sequence readers which never block a writer but they may have to retry
10 : : * if a writer is in progress by detecting change in sequence number.
11 : : * Writers do not wait for a sequence reader.
12 : : * 2. Locking readers which will wait if a writer or another locking reader
13 : : * is in progress. A locking reader in progress will also block a writer
14 : : * from going forward. Unlike the regular rwlock, the read lock here is
15 : : * exclusive so that only one locking reader can get it.
16 : : *
17 : : * This is not as cache friendly as brlock. Also, this may not work well
18 : : * for data that contains pointers, because any writer could
19 : : * invalidate a pointer that a reader was following.
20 : : *
21 : : * Expected non-blocking reader usage:
22 : : * do {
23 : : * seq = read_seqbegin(&foo);
24 : : * ...
25 : : * } while (read_seqretry(&foo, seq));
26 : : *
27 : : *
28 : : * On non-SMP the spin locks disappear but the writer still needs
29 : : * to increment the sequence variables because an interrupt routine could
30 : : * change the state of the data.
31 : : *
32 : : * Based on x86_64 vsyscall gettimeofday
33 : : * by Keith Owens and Andrea Arcangeli
34 : : */
35 : :
36 : : #include <linux/spinlock.h>
37 : : #include <linux/preempt.h>
38 : : #include <linux/lockdep.h>
39 : : #include <linux/compiler.h>
40 : : #include <asm/processor.h>
41 : :
42 : : /*
43 : : * Version using sequence counter only.
44 : : * This can be used when code has its own mutex protecting the
45 : : * updating starting before the write_seqcountbeqin() and ending
46 : : * after the write_seqcount_end().
47 : : */
48 : : typedef struct seqcount {
49 : : unsigned sequence;
50 : : #ifdef CONFIG_DEBUG_LOCK_ALLOC
51 : : struct lockdep_map dep_map;
52 : : #endif
53 : : } seqcount_t;
54 : :
55 : 416854 : static inline void __seqcount_init(seqcount_t *s, const char *name,
56 : : struct lock_class_key *key)
57 : : {
58 : : /*
59 : : * Make sure we are not reinitializing a held lock:
60 : : */
61 : 416854 : lockdep_init_map(&s->dep_map, name, key, 0);
62 [ # # ]: 416854 : s->sequence = 0;
63 : 13 : }
64 : :
65 : : #ifdef CONFIG_DEBUG_LOCK_ALLOC
66 : : # define SEQCOUNT_DEP_MAP_INIT(lockname) \
67 : : .dep_map = { .name = #lockname } \
68 : :
69 : : # define seqcount_init(s) \
70 : : do { \
71 : : static struct lock_class_key __key; \
72 : : __seqcount_init((s), #s, &__key); \
73 : : } while (0)
74 : :
75 : : static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
76 : : {
77 : : seqcount_t *l = (seqcount_t *)s;
78 : : unsigned long flags;
79 : :
80 : : local_irq_save(flags);
81 : : seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
82 : : seqcount_release(&l->dep_map, _RET_IP_);
83 : : local_irq_restore(flags);
84 : : }
85 : :
86 : : #else
87 : : # define SEQCOUNT_DEP_MAP_INIT(lockname)
88 : : # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
89 : : # define seqcount_lockdep_reader_access(x)
90 : : #endif
91 : :
92 : : #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
93 : :
94 : :
95 : : /**
96 : : * __read_seqcount_begin - begin a seq-read critical section (without barrier)
97 : : * @s: pointer to seqcount_t
98 : : * Returns: count to be passed to read_seqcount_retry
99 : : *
100 : : * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
101 : : * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
102 : : * provided before actually loading any of the variables that are to be
103 : : * protected in this critical section.
104 : : *
105 : : * Use carefully, only in critical code, and comment how the barrier is
106 : : * provided.
107 : : */
108 : 3860132 : static inline unsigned __read_seqcount_begin(const seqcount_t *s)
109 : : {
110 : 5367528 : unsigned ret;
111 : :
112 : 5367528 : repeat:
113 [ - + - + : 5367528 : ret = READ_ONCE(s->sequence);
- + - + -
+ - + - +
- - - + -
+ - + - -
- + - - -
+ - + ]
114 [ - + - + : 5367528 : if (unlikely(ret & 1)) {
- + - + -
+ - + - +
- - - + -
+ - + - -
- + - - -
+ - + ]
115 : 0 : cpu_relax();
116 : 0 : goto repeat;
117 : : }
118 : 4411063 : return ret;
119 : : }
120 : :
121 : : /**
122 : : * raw_read_seqcount - Read the raw seqcount
123 : : * @s: pointer to seqcount_t
124 : : * Returns: count to be passed to read_seqcount_retry
125 : : *
126 : : * raw_read_seqcount opens a read critical section of the given
127 : : * seqcount without any lockdep checking and without checking or
128 : : * masking the LSB. Calling code is responsible for handling that.
129 : : */
130 : 52 : static inline unsigned raw_read_seqcount(const seqcount_t *s)
131 : : {
132 : 52 : unsigned ret = READ_ONCE(s->sequence);
133 : 52 : smp_rmb();
134 [ - + - - ]: 52 : return ret;
135 : : }
136 : :
137 : : /**
138 : : * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
139 : : * @s: pointer to seqcount_t
140 : : * Returns: count to be passed to read_seqcount_retry
141 : : *
142 : : * raw_read_seqcount_begin opens a read critical section of the given
143 : : * seqcount, but without any lockdep checking. Validity of the critical
144 : : * section is tested by checking read_seqcount_retry function.
145 : : */
146 : 2300265 : static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
147 : : {
148 : 2256830 : unsigned ret = __read_seqcount_begin(s);
149 : 2807761 : smp_rmb();
150 [ + + # # ]: 2672368 : return ret;
151 : : }
152 : :
153 : : /**
154 : : * read_seqcount_begin - begin a seq-read critical section
155 : : * @s: pointer to seqcount_t
156 : : * Returns: count to be passed to read_seqcount_retry
157 : : *
158 : : * read_seqcount_begin opens a read critical section of the given seqcount.
159 : : * Validity of the critical section is tested by checking read_seqcount_retry
160 : : * function.
161 : : */
162 : 1300365 : static inline unsigned read_seqcount_begin(const seqcount_t *s)
163 : : {
164 : 2256830 : seqcount_lockdep_reader_access(s);
165 : 2256830 : return raw_read_seqcount_begin(s);
166 : : }
167 : :
168 : : /**
169 : : * raw_seqcount_begin - begin a seq-read critical section
170 : : * @s: pointer to seqcount_t
171 : : * Returns: count to be passed to read_seqcount_retry
172 : : *
173 : : * raw_seqcount_begin opens a read critical section of the given seqcount.
174 : : * Validity of the critical section is tested by checking read_seqcount_retry
175 : : * function.
176 : : *
177 : : * Unlike read_seqcount_begin(), this function will not wait for the count
178 : : * to stabilize. If a writer is active when we begin, we will fail the
179 : : * read_seqcount_retry() instead of stabilizing at the beginning of the
180 : : * critical section.
181 : : */
182 : 2048523 : static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183 : : {
184 : 2048523 : unsigned ret = READ_ONCE(s->sequence);
185 : 2048523 : smp_rmb();
186 [ + + ]: 2048523 : return ret & ~1;
187 : : }
188 : :
189 : : /**
190 : : * __read_seqcount_retry - end a seq-read critical section (without barrier)
191 : : * @s: pointer to seqcount_t
192 : : * @start: count, from read_seqcount_begin
193 : : * Returns: 1 if retry is required, else 0
194 : : *
195 : : * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
196 : : * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
197 : : * provided before actually loading any of the variables that are to be
198 : : * protected in this critical section.
199 : : *
200 : : * Use carefully, only in critical code, and comment how the barrier is
201 : : * provided.
202 : : */
203 : 9089456 : static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204 : : {
205 [ - - - - : 5833875 : return unlikely(s->sequence != start);
+ - ]
206 : : }
207 : :
208 : : /**
209 : : * read_seqcount_retry - end a seq-read critical section
210 : : * @s: pointer to seqcount_t
211 : : * @start: count, from read_seqcount_begin
212 : : * Returns: 1 if retry is required, else 0
213 : : *
214 : : * read_seqcount_retry closes a read critical section of the given seqcount.
215 : : * If the critical section was invalid, it must be ignored (and typically
216 : : * retried).
217 : : */
218 : 7204585 : static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
219 : : {
220 : 6521685 : smp_rmb();
221 [ - + + + : 6506281 : return __read_seqcount_retry(s, start);
+ + + + +
+ - + - +
- - - + -
- - + - -
+ + - - +
+ - + ]
222 : : }
223 : :
224 : :
225 : :
226 : 428525 : static inline void raw_write_seqcount_begin(seqcount_t *s)
227 : : {
228 : 428525 : s->sequence++;
229 : 418242 : smp_wmb();
230 : 0 : }
231 : :
232 : 428525 : static inline void raw_write_seqcount_end(seqcount_t *s)
233 : : {
234 : 418242 : smp_wmb();
235 [ - + # # : 387857 : s->sequence++;
# # # # ]
236 : : }
237 : :
238 : : /**
239 : : * raw_write_seqcount_barrier - do a seq write barrier
240 : : * @s: pointer to seqcount_t
241 : : *
242 : : * This can be used to provide an ordering guarantee instead of the
243 : : * usual consistency guarantee. It is one wmb cheaper, because we can
244 : : * collapse the two back-to-back wmb()s.
245 : : *
246 : : * seqcount_t seq;
247 : : * bool X = true, Y = false;
248 : : *
249 : : * void read(void)
250 : : * {
251 : : * bool x, y;
252 : : *
253 : : * do {
254 : : * int s = read_seqcount_begin(&seq);
255 : : *
256 : : * x = X; y = Y;
257 : : *
258 : : * } while (read_seqcount_retry(&seq, s));
259 : : *
260 : : * BUG_ON(!x && !y);
261 : : * }
262 : : *
263 : : * void write(void)
264 : : * {
265 : : * Y = true;
266 : : *
267 : : * raw_write_seqcount_barrier(seq);
268 : : *
269 : : * X = false;
270 : : * }
271 : : */
272 : 61028 : static inline void raw_write_seqcount_barrier(seqcount_t *s)
273 : : {
274 : 61028 : s->sequence++;
275 : 61028 : smp_wmb();
276 [ - + ]: 61028 : s->sequence++;
277 : : }
278 : :
279 : 44948 : static inline int raw_read_seqcount_latch(seqcount_t *s)
280 : : {
281 : : /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
282 : 44948 : int seq = READ_ONCE(s->sequence); /* ^^^ */
283 : 44948 : return seq;
284 : : }
285 : :
286 : : /**
287 : : * raw_write_seqcount_latch - redirect readers to even/odd copy
288 : : * @s: pointer to seqcount_t
289 : : *
290 : : * The latch technique is a multiversion concurrency control method that allows
291 : : * queries during non-atomic modifications. If you can guarantee queries never
292 : : * interrupt the modification -- e.g. the concurrency is strictly between CPUs
293 : : * -- you most likely do not need this.
294 : : *
295 : : * Where the traditional RCU/lockless data structures rely on atomic
296 : : * modifications to ensure queries observe either the old or the new state the
297 : : * latch allows the same for non-atomic updates. The trade-off is doubling the
298 : : * cost of storage; we have to maintain two copies of the entire data
299 : : * structure.
300 : : *
301 : : * Very simply put: we first modify one copy and then the other. This ensures
302 : : * there is always one copy in a stable state, ready to give us an answer.
303 : : *
304 : : * The basic form is a data structure like:
305 : : *
306 : : * struct latch_struct {
307 : : * seqcount_t seq;
308 : : * struct data_struct data[2];
309 : : * };
310 : : *
311 : : * Where a modification, which is assumed to be externally serialized, does the
312 : : * following:
313 : : *
314 : : * void latch_modify(struct latch_struct *latch, ...)
315 : : * {
316 : : * smp_wmb(); <- Ensure that the last data[1] update is visible
317 : : * latch->seq++;
318 : : * smp_wmb(); <- Ensure that the seqcount update is visible
319 : : *
320 : : * modify(latch->data[0], ...);
321 : : *
322 : : * smp_wmb(); <- Ensure that the data[0] update is visible
323 : : * latch->seq++;
324 : : * smp_wmb(); <- Ensure that the seqcount update is visible
325 : : *
326 : : * modify(latch->data[1], ...);
327 : : * }
328 : : *
329 : : * The query will have a form like:
330 : : *
331 : : * struct entry *latch_query(struct latch_struct *latch, ...)
332 : : * {
333 : : * struct entry *entry;
334 : : * unsigned seq, idx;
335 : : *
336 : : * do {
337 : : * seq = raw_read_seqcount_latch(&latch->seq);
338 : : *
339 : : * idx = seq & 0x01;
340 : : * entry = data_query(latch->data[idx], ...);
341 : : *
342 : : * smp_rmb();
343 : : * } while (seq != latch->seq);
344 : : *
345 : : * return entry;
346 : : * }
347 : : *
348 : : * So during the modification, queries are first redirected to data[1]. Then we
349 : : * modify data[0]. When that is complete, we redirect queries back to data[0]
350 : : * and we can modify data[1].
351 : : *
352 : : * NOTE: The non-requirement for atomic modifications does _NOT_ include
353 : : * the publishing of new entries in the case where data is a dynamic
354 : : * data structure.
355 : : *
356 : : * An iteration might start in data[0] and get suspended long enough
357 : : * to miss an entire modification sequence, once it resumes it might
358 : : * observe the new entry.
359 : : *
360 : : * NOTE: When data is a dynamic data structure; one should use regular RCU
361 : : * patterns to manage the lifetimes of the objects within.
362 : : */
363 : 133942 : static inline void raw_write_seqcount_latch(seqcount_t *s)
364 : : {
365 : 133942 : smp_wmb(); /* prior stores before incrementing "sequence" */
366 : 133942 : s->sequence++;
367 : 133942 : smp_wmb(); /* increment "sequence" before following stores */
368 : : }
369 : :
370 : : /*
371 : : * Sequence counter only version assumes that callers are using their
372 : : * own mutexing.
373 : : */
374 : 138350 : static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
375 : : {
376 : 131096 : raw_write_seqcount_begin(s);
377 [ + + + - : 134697 : seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
- + - - ]
378 : : }
379 : :
380 : 135321 : static inline void write_seqcount_begin(seqcount_t *s)
381 : : {
382 : 131096 : write_seqcount_begin_nested(s, 0);
383 : : }
384 : :
385 : 138350 : static inline void write_seqcount_end(seqcount_t *s)
386 : : {
387 : 138350 : seqcount_release(&s->dep_map, _RET_IP_);
388 : 131096 : raw_write_seqcount_end(s);
389 : 33414 : }
390 : :
391 : : /**
392 : : * write_seqcount_invalidate - invalidate in-progress read-side seq operations
393 : : * @s: pointer to seqcount_t
394 : : *
395 : : * After write_seqcount_invalidate, no read-side seq operations will complete
396 : : * successfully and see data older than this.
397 : : */
398 : 96013 : static inline void write_seqcount_invalidate(seqcount_t *s)
399 : : {
400 : 96013 : smp_wmb();
401 : 585 : s->sequence+=2;
402 : 95428 : }
403 : :
404 : : typedef struct {
405 : : struct seqcount seqcount;
406 : : spinlock_t lock;
407 : : } seqlock_t;
408 : :
409 : : /*
410 : : * These macros triggered gcc-3.x compile-time problems. We think these are
411 : : * OK now. Be cautious.
412 : : */
413 : : #define __SEQLOCK_UNLOCKED(lockname) \
414 : : { \
415 : : .seqcount = SEQCNT_ZERO(lockname), \
416 : : .lock = __SPIN_LOCK_UNLOCKED(lockname) \
417 : : }
418 : :
419 : : #define seqlock_init(x) \
420 : : do { \
421 : : seqcount_init(&(x)->seqcount); \
422 : : spin_lock_init(&(x)->lock); \
423 : : } while (0)
424 : :
425 : : #define DEFINE_SEQLOCK(x) \
426 : : seqlock_t x = __SEQLOCK_UNLOCKED(x)
427 : :
428 : : /*
429 : : * Read side functions for starting and finalizing a read side section.
430 : : */
431 : 409959 : static inline unsigned read_seqbegin(const seqlock_t *sl)
432 : : {
433 : 369186 : return read_seqcount_begin(&sl->seqcount);
434 : : }
435 : :
436 : 2891506 : static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
437 : : {
438 [ - + + + : 2929899 : return read_seqcount_retry(&sl->seqcount, start);
- + # # ]
439 : : }
440 : :
441 : : /*
442 : : * Lock out other writers and update the count.
443 : : * Acts like a normal spin_lock/unlock.
444 : : * Don't need preempt_disable() because that is in the spin_lock already.
445 : : */
446 : 52780 : static inline void write_seqlock(seqlock_t *sl)
447 : : {
448 : 52780 : spin_lock(&sl->lock);
449 : 52780 : write_seqcount_begin(&sl->seqcount);
450 : 0 : }
451 : :
452 : 52780 : static inline void write_sequnlock(seqlock_t *sl)
453 : : {
454 : 52780 : write_seqcount_end(&sl->seqcount);
455 : 52780 : spin_unlock(&sl->lock);
456 : 0 : }
457 : :
458 : 0 : static inline void write_seqlock_bh(seqlock_t *sl)
459 : : {
460 : 0 : spin_lock_bh(&sl->lock);
461 : 0 : write_seqcount_begin(&sl->seqcount);
462 : : }
463 : :
464 : 0 : static inline void write_sequnlock_bh(seqlock_t *sl)
465 : : {
466 : 0 : write_seqcount_end(&sl->seqcount);
467 : 0 : spin_unlock_bh(&sl->lock);
468 : 0 : }
469 : :
470 : 0 : static inline void write_seqlock_irq(seqlock_t *sl)
471 : : {
472 : 0 : spin_lock_irq(&sl->lock);
473 : 0 : write_seqcount_begin(&sl->seqcount);
474 : : }
475 : :
476 : 0 : static inline void write_sequnlock_irq(seqlock_t *sl)
477 : : {
478 : 0 : write_seqcount_end(&sl->seqcount);
479 : 0 : spin_unlock_irq(&sl->lock);
480 : 0 : }
481 : :
482 : 0 : static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
483 : : {
484 : 0 : unsigned long flags;
485 : :
486 : 0 : spin_lock_irqsave(&sl->lock, flags);
487 : 0 : write_seqcount_begin(&sl->seqcount);
488 [ # # # # ]: 0 : return flags;
489 : : }
490 : :
491 : : #define write_seqlock_irqsave(lock, flags) \
492 : : do { flags = __write_seqlock_irqsave(lock); } while (0)
493 : :
494 : : static inline void
495 : 0 : write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
496 : : {
497 : 0 : write_seqcount_end(&sl->seqcount);
498 : 0 : spin_unlock_irqrestore(&sl->lock, flags);
499 : 0 : }
500 : :
501 : : /*
502 : : * A locking reader exclusively locks out other writers and locking readers,
503 : : * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
504 : : * Don't need preempt_disable() because that is in the spin_lock already.
505 : : */
506 : 2340 : static inline void read_seqlock_excl(seqlock_t *sl)
507 : : {
508 : 2340 : spin_lock(&sl->lock);
509 : 0 : }
510 : :
511 : 2340 : static inline void read_sequnlock_excl(seqlock_t *sl)
512 : : {
513 : 2340 : spin_unlock(&sl->lock);
514 : 52 : }
515 : :
516 : : /**
517 : : * read_seqbegin_or_lock - begin a sequence number check or locking block
518 : : * @lock: sequence lock
519 : : * @seq : sequence number to be checked
520 : : *
521 : : * First try it once optimistically without taking the lock. If that fails,
522 : : * take the lock. The sequence number is also used as a marker for deciding
523 : : * whether to be a reader (even) or writer (odd).
524 : : * N.B. seq must be initialized to an even number to begin with.
525 : : */
526 : 135393 : static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
527 : : {
528 [ + - ]: 135393 : if (!(*seq & 1)) /* Even */
529 : 135393 : *seq = read_seqbegin(lock);
530 : : else /* Odd */
531 : 0 : read_seqlock_excl(lock);
532 : 135393 : }
533 : :
534 : 48440 : static inline int need_seqretry(seqlock_t *lock, int seq)
535 : : {
536 [ + - - + : 58487 : return !(seq & 1) && read_seqretry(lock, seq);
+ - - + ]
537 : : }
538 : :
539 : 41023 : static inline void done_seqretry(seqlock_t *lock, int seq)
540 : : {
541 [ - + ]: 41023 : if (seq & 1)
542 : 0 : read_sequnlock_excl(lock);
543 : : }
544 : :
545 : : static inline void read_seqlock_excl_bh(seqlock_t *sl)
546 : : {
547 : : spin_lock_bh(&sl->lock);
548 : : }
549 : :
550 : : static inline void read_sequnlock_excl_bh(seqlock_t *sl)
551 : : {
552 : : spin_unlock_bh(&sl->lock);
553 : : }
554 : :
555 : : static inline void read_seqlock_excl_irq(seqlock_t *sl)
556 : : {
557 : : spin_lock_irq(&sl->lock);
558 : : }
559 : :
560 : : static inline void read_sequnlock_excl_irq(seqlock_t *sl)
561 : : {
562 : : spin_unlock_irq(&sl->lock);
563 : : }
564 : :
565 : 0 : static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
566 : : {
567 : 0 : unsigned long flags;
568 : :
569 : 0 : spin_lock_irqsave(&sl->lock, flags);
570 : 0 : return flags;
571 : : }
572 : :
573 : : #define read_seqlock_excl_irqsave(lock, flags) \
574 : : do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
575 : :
576 : : static inline void
577 : 0 : read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
578 : : {
579 : 0 : spin_unlock_irqrestore(&sl->lock, flags);
580 : 0 : }
581 : :
582 : : static inline unsigned long
583 : 7031 : read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
584 : : {
585 : 7031 : unsigned long flags = 0;
586 : :
587 [ + - ]: 7031 : if (!(*seq & 1)) /* Even */
588 : 7031 : *seq = read_seqbegin(lock);
589 : : else /* Odd */
590 : 0 : read_seqlock_excl_irqsave(lock, flags);
591 : :
592 : 7031 : return flags;
593 : : }
594 : :
595 : : static inline void
596 : 7031 : done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
597 : : {
598 [ - + ]: 7031 : if (seq & 1)
599 : 0 : read_sequnlock_excl_irqrestore(lock, flags);
600 : : }
601 : : #endif /* __LINUX_SEQLOCK_H */
|