Branch data Line data Source code
1 : : /* SPDX-License-Identifier: GPL-2.0 */ 2 : : #ifndef _LINUX_RCULIST_NULLS_H 3 : : #define _LINUX_RCULIST_NULLS_H 4 : : 5 : : #ifdef __KERNEL__ 6 : : 7 : : /* 8 : : * RCU-protected list version 9 : : */ 10 : : #include <linux/list_nulls.h> 11 : : #include <linux/rcupdate.h> 12 : : 13 : : /** 14 : : * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization 15 : : * @n: the element to delete from the hash list. 16 : : * 17 : : * Note: hlist_nulls_unhashed() on the node return true after this. It is 18 : : * useful for RCU based read lockfree traversal if the writer side 19 : : * must know if the list entry is still hashed or already unhashed. 20 : : * 21 : : * In particular, it means that we can not poison the forward pointers 22 : : * that may still be used for walking the hash list and we can only 23 : : * zero the pprev pointer so list_unhashed() will return true after 24 : : * this. 25 : : * 26 : : * The caller must take whatever precautions are necessary (such as 27 : : * holding appropriate locks) to avoid racing with another 28 : : * list-mutation primitive, such as hlist_nulls_add_head_rcu() or 29 : : * hlist_nulls_del_rcu(), running on this same list. However, it is 30 : : * perfectly legal to run concurrently with the _rcu list-traversal 31 : : * primitives, such as hlist_nulls_for_each_entry_rcu(). 32 : : */ 33 : : static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) 34 : : { 35 : 1 : if (!hlist_nulls_unhashed(n)) { 36 : : __hlist_nulls_del(n); 37 : : WRITE_ONCE(n->pprev, NULL); 38 : : } 39 : : } 40 : : 41 : : #define hlist_nulls_first_rcu(head) \ 42 : : (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) 43 : : 44 : : #define hlist_nulls_next_rcu(node) \ 45 : : (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) 46 : : 47 : : /** 48 : : * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization 49 : : * @n: the element to delete from the hash list. 50 : : * 51 : : * Note: hlist_nulls_unhashed() on entry does not return true after this, 52 : : * the entry is in an undefined state. It is useful for RCU based 53 : : * lockfree traversal. 54 : : * 55 : : * In particular, it means that we can not poison the forward 56 : : * pointers that may still be used for walking the hash list. 57 : : * 58 : : * The caller must take whatever precautions are necessary 59 : : * (such as holding appropriate locks) to avoid racing 60 : : * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() 61 : : * or hlist_nulls_del_rcu(), running on this same list. 62 : : * However, it is perfectly legal to run concurrently with 63 : : * the _rcu list-traversal primitives, such as 64 : : * hlist_nulls_for_each_entry(). 65 : : */ 66 : : static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) 67 : : { 68 : : __hlist_nulls_del(n); 69 : : WRITE_ONCE(n->pprev, LIST_POISON2); 70 : : } 71 : : 72 : : /** 73 : : * hlist_nulls_add_head_rcu 74 : : * @n: the element to add to the hash list. 75 : : * @h: the list to add to. 76 : : * 77 : : * Description: 78 : : * Adds the specified element to the specified hlist_nulls, 79 : : * while permitting racing traversals. 80 : : * 81 : : * The caller must take whatever precautions are necessary 82 : : * (such as holding appropriate locks) to avoid racing 83 : : * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() 84 : : * or hlist_nulls_del_rcu(), running on this same list. 85 : : * However, it is perfectly legal to run concurrently with 86 : : * the _rcu list-traversal primitives, such as 87 : : * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency 88 : : * problems on Alpha CPUs. Regardless of the type of CPU, the 89 : : * list-traversal primitive must be guarded by rcu_read_lock(). 90 : : */ 91 : : static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, 92 : : struct hlist_nulls_head *h) 93 : : { 94 : 1 : struct hlist_nulls_node *first = h->first; 95 : : 96 : 1 : n->next = first; 97 : 1 : WRITE_ONCE(n->pprev, &h->first); 98 : 1 : rcu_assign_pointer(hlist_nulls_first_rcu(h), n); 99 : 1 : if (!is_a_nulls(first)) 100 : 1 : WRITE_ONCE(first->pprev, &n->next); 101 : : } 102 : : 103 : : /** 104 : : * hlist_nulls_add_tail_rcu 105 : : * @n: the element to add to the hash list. 106 : : * @h: the list to add to. 107 : : * 108 : : * Description: 109 : : * Adds the specified element to the specified hlist_nulls, 110 : : * while permitting racing traversals. 111 : : * 112 : : * The caller must take whatever precautions are necessary 113 : : * (such as holding appropriate locks) to avoid racing 114 : : * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() 115 : : * or hlist_nulls_del_rcu(), running on this same list. 116 : : * However, it is perfectly legal to run concurrently with 117 : : * the _rcu list-traversal primitives, such as 118 : : * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency 119 : : * problems on Alpha CPUs. Regardless of the type of CPU, the 120 : : * list-traversal primitive must be guarded by rcu_read_lock(). 121 : : */ 122 : 0 : static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, 123 : : struct hlist_nulls_head *h) 124 : : { 125 : : struct hlist_nulls_node *i, *last = NULL; 126 : : 127 : : /* Note: write side code, so rcu accessors are not needed. */ 128 : 0 : for (i = h->first; !is_a_nulls(i); i = i->next) 129 : : last = i; 130 : : 131 : 0 : if (last) { 132 : 0 : n->next = last->next; 133 : 0 : n->pprev = &last->next; 134 : 0 : rcu_assign_pointer(hlist_next_rcu(last), n); 135 : : } else { 136 : : hlist_nulls_add_head_rcu(n, h); 137 : : } 138 : 0 : } 139 : : 140 : : /** 141 : : * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 142 : : * @tpos: the type * to use as a loop cursor. 143 : : * @pos: the &struct hlist_nulls_node to use as a loop cursor. 144 : : * @head: the head for your list. 145 : : * @member: the name of the hlist_nulls_node within the struct. 146 : : * 147 : : * The barrier() is needed to make sure compiler doesn't cache first element [1], 148 : : * as this loop can be restarted [2] 149 : : * [1] Documentation/core-api/atomic_ops.rst around line 114 150 : : * [2] Documentation/RCU/rculist_nulls.txt around line 146 151 : : */ 152 : : #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ 153 : : for (({barrier();}), \ 154 : : pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ 155 : : (!is_a_nulls(pos)) && \ 156 : : ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ 157 : : pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) 158 : : 159 : : /** 160 : : * hlist_nulls_for_each_entry_safe - 161 : : * iterate over list of given type safe against removal of list entry 162 : : * @tpos: the type * to use as a loop cursor. 163 : : * @pos: the &struct hlist_nulls_node to use as a loop cursor. 164 : : * @head: the head for your list. 165 : : * @member: the name of the hlist_nulls_node within the struct. 166 : : */ 167 : : #define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ 168 : : for (({barrier();}), \ 169 : : pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ 170 : : (!is_a_nulls(pos)) && \ 171 : : ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ 172 : : pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) 173 : : #endif 174 : : #endif