Branch data Line data Source code
1 : : /* SPDX-License-Identifier: GPL-2.0 */ 2 : : /* 3 : : * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance 4 : : * 5 : : * Original mutex implementation started by Ingo Molnar: 6 : : * 7 : : * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 : : * 9 : : * Wait/Die implementation: 10 : : * Copyright (C) 2013 Canonical Ltd. 11 : : * Choice of algorithm: 12 : : * Copyright (C) 2018 WMWare Inc. 13 : : * 14 : : * This file contains the main data structure and API definitions. 15 : : */ 16 : : 17 : : #ifndef __LINUX_WW_MUTEX_H 18 : : #define __LINUX_WW_MUTEX_H 19 : : 20 : : #include <linux/mutex.h> 21 : : 22 : : struct ww_class { 23 : : atomic_long_t stamp; 24 : : struct lock_class_key acquire_key; 25 : : struct lock_class_key mutex_key; 26 : : const char *acquire_name; 27 : : const char *mutex_name; 28 : : unsigned int is_wait_die; 29 : : }; 30 : : 31 : : struct ww_acquire_ctx { 32 : : struct task_struct *task; 33 : : unsigned long stamp; 34 : : unsigned int acquired; 35 : : unsigned short wounded; 36 : : unsigned short is_wait_die; 37 : : #ifdef CONFIG_DEBUG_MUTEXES 38 : : unsigned int done_acquire; 39 : : struct ww_class *ww_class; 40 : : struct ww_mutex *contending_lock; 41 : : #endif 42 : : #ifdef CONFIG_DEBUG_LOCK_ALLOC 43 : : struct lockdep_map dep_map; 44 : : #endif 45 : : #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 46 : : unsigned int deadlock_inject_interval; 47 : : unsigned int deadlock_inject_countdown; 48 : : #endif 49 : : }; 50 : : 51 : : struct ww_mutex { 52 : : struct mutex base; 53 : : struct ww_acquire_ctx *ctx; 54 : : #ifdef CONFIG_DEBUG_MUTEXES 55 : : struct ww_class *ww_class; 56 : : #endif 57 : : }; 58 : : 59 : : #ifdef CONFIG_DEBUG_LOCK_ALLOC 60 : : # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ 61 : : , .ww_class = class 62 : : #else 63 : : # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) 64 : : #endif 65 : : 66 : : #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \ 67 : : { .stamp = ATOMIC_LONG_INIT(0) \ 68 : : , .acquire_name = #ww_class "_acquire" \ 69 : : , .mutex_name = #ww_class "_mutex" \ 70 : : , .is_wait_die = _is_wait_die } 71 : : 72 : : #define __WW_MUTEX_INITIALIZER(lockname, class) \ 73 : : { .base = __MUTEX_INITIALIZER(lockname.base) \ 74 : : __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } 75 : : 76 : : #define DEFINE_WD_CLASS(classname) \ 77 : : struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1) 78 : : 79 : : #define DEFINE_WW_CLASS(classname) \ 80 : : struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0) 81 : : 82 : : #define DEFINE_WW_MUTEX(mutexname, ww_class) \ 83 : : struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) 84 : : 85 : : /** 86 : : * ww_mutex_init - initialize the w/w mutex 87 : : * @lock: the mutex to be initialized 88 : : * @ww_class: the w/w class the mutex should belong to 89 : : * 90 : : * Initialize the w/w mutex to unlocked state and associate it with the given 91 : : * class. 92 : : * 93 : : * It is not allowed to initialize an already locked mutex. 94 : : */ 95 : : static inline void ww_mutex_init(struct ww_mutex *lock, 96 : : struct ww_class *ww_class) 97 : : { 98 : 1212 : __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); 99 : 1212 : lock->ctx = NULL; 100 : : #ifdef CONFIG_DEBUG_MUTEXES 101 : : lock->ww_class = ww_class; 102 : : #endif 103 : : } 104 : : 105 : : /** 106 : : * ww_acquire_init - initialize a w/w acquire context 107 : : * @ctx: w/w acquire context to initialize 108 : : * @ww_class: w/w class of the context 109 : : * 110 : : * Initializes an context to acquire multiple mutexes of the given w/w class. 111 : : * 112 : : * Context-based w/w mutex acquiring can be done in any order whatsoever within 113 : : * a given lock class. Deadlocks will be detected and handled with the 114 : : * wait/die logic. 115 : : * 116 : : * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can 117 : : * result in undetected deadlocks and is so forbidden. Mixing different contexts 118 : : * for the same w/w class when acquiring mutexes can also result in undetected 119 : : * deadlocks, and is hence also forbidden. Both types of abuse will be caught by 120 : : * enabling CONFIG_PROVE_LOCKING. 121 : : * 122 : : * Nesting of acquire contexts for _different_ w/w classes is possible, subject 123 : : * to the usual locking rules between different lock classes. 124 : : * 125 : : * An acquire context must be released with ww_acquire_fini by the same task 126 : : * before the memory is freed. It is recommended to allocate the context itself 127 : : * on the stack. 128 : : */ 129 : 0 : static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, 130 : : struct ww_class *ww_class) 131 : : { 132 : 0 : ctx->task = current; 133 : 0 : ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); 134 : 0 : ctx->acquired = 0; 135 : 0 : ctx->wounded = false; 136 : 0 : ctx->is_wait_die = ww_class->is_wait_die; 137 : : #ifdef CONFIG_DEBUG_MUTEXES 138 : : ctx->ww_class = ww_class; 139 : : ctx->done_acquire = 0; 140 : : ctx->contending_lock = NULL; 141 : : #endif 142 : : #ifdef CONFIG_DEBUG_LOCK_ALLOC 143 : : debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); 144 : : lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, 145 : : &ww_class->acquire_key, 0); 146 : : mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); 147 : : #endif 148 : : #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 149 : : ctx->deadlock_inject_interval = 1; 150 : : ctx->deadlock_inject_countdown = ctx->stamp & 0xf; 151 : : #endif 152 : 0 : } 153 : : 154 : : /** 155 : : * ww_acquire_done - marks the end of the acquire phase 156 : : * @ctx: the acquire context 157 : : * 158 : : * Marks the end of the acquire phase, any further w/w mutex lock calls using 159 : : * this context are forbidden. 160 : : * 161 : : * Calling this function is optional, it is just useful to document w/w mutex 162 : : * code and clearly designated the acquire phase from actually using the locked 163 : : * data structures. 164 : : */ 165 : : static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) 166 : : { 167 : : #ifdef CONFIG_DEBUG_MUTEXES 168 : : lockdep_assert_held(ctx); 169 : : 170 : : DEBUG_LOCKS_WARN_ON(ctx->done_acquire); 171 : : ctx->done_acquire = 1; 172 : : #endif 173 : : } 174 : : 175 : : /** 176 : : * ww_acquire_fini - releases a w/w acquire context 177 : : * @ctx: the acquire context to free 178 : : * 179 : : * Releases a w/w acquire context. This must be called _after_ all acquired w/w 180 : : * mutexes have been released with ww_mutex_unlock. 181 : : */ 182 : : static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) 183 : : { 184 : : #ifdef CONFIG_DEBUG_MUTEXES 185 : : mutex_release(&ctx->dep_map, 0, _THIS_IP_); 186 : : 187 : : DEBUG_LOCKS_WARN_ON(ctx->acquired); 188 : : if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) 189 : : /* 190 : : * lockdep will normally handle this, 191 : : * but fail without anyway 192 : : */ 193 : : ctx->done_acquire = 1; 194 : : 195 : : if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC)) 196 : : /* ensure ww_acquire_fini will still fail if called twice */ 197 : : ctx->acquired = ~0U; 198 : : #endif 199 : : } 200 : : 201 : : /** 202 : : * ww_mutex_lock - acquire the w/w mutex 203 : : * @lock: the mutex to be acquired 204 : : * @ctx: w/w acquire context, or NULL to acquire only a single lock. 205 : : * 206 : : * Lock the w/w mutex exclusively for this task. 207 : : * 208 : : * Deadlocks within a given w/w class of locks are detected and handled with the 209 : : * wait/die algorithm. If the lock isn't immediately available this function 210 : : * will either sleep until it is (wait case). Or it selects the current context 211 : : * for backing off by returning -EDEADLK (die case). Trying to acquire the 212 : : * same lock with the same context twice is also detected and signalled by 213 : : * returning -EALREADY. Returns 0 if the mutex was successfully acquired. 214 : : * 215 : : * In the die case the caller must release all currently held w/w mutexes for 216 : : * the given context and then wait for this contending lock to be available by 217 : : * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this 218 : : * lock and proceed with trying to acquire further w/w mutexes (e.g. when 219 : : * scanning through lru lists trying to free resources). 220 : : * 221 : : * The mutex must later on be released by the same task that 222 : : * acquired it. The task may not exit without first unlocking the mutex. Also, 223 : : * kernel memory where the mutex resides must not be freed with the mutex still 224 : : * locked. The mutex must first be initialized (or statically defined) before it 225 : : * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 226 : : * of the same w/w lock class as was used to initialize the acquire context. 227 : : * 228 : : * A mutex acquired with this function must be released with ww_mutex_unlock. 229 : : */ 230 : : extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx); 231 : : 232 : : /** 233 : : * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible 234 : : * @lock: the mutex to be acquired 235 : : * @ctx: w/w acquire context 236 : : * 237 : : * Lock the w/w mutex exclusively for this task. 238 : : * 239 : : * Deadlocks within a given w/w class of locks are detected and handled with the 240 : : * wait/die algorithm. If the lock isn't immediately available this function 241 : : * will either sleep until it is (wait case). Or it selects the current context 242 : : * for backing off by returning -EDEADLK (die case). Trying to acquire the 243 : : * same lock with the same context twice is also detected and signalled by 244 : : * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a 245 : : * signal arrives while waiting for the lock then this function returns -EINTR. 246 : : * 247 : : * In the die case the caller must release all currently held w/w mutexes for 248 : : * the given context and then wait for this contending lock to be available by 249 : : * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to 250 : : * not acquire this lock and proceed with trying to acquire further w/w mutexes 251 : : * (e.g. when scanning through lru lists trying to free resources). 252 : : * 253 : : * The mutex must later on be released by the same task that 254 : : * acquired it. The task may not exit without first unlocking the mutex. Also, 255 : : * kernel memory where the mutex resides must not be freed with the mutex still 256 : : * locked. The mutex must first be initialized (or statically defined) before it 257 : : * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 258 : : * of the same w/w lock class as was used to initialize the acquire context. 259 : : * 260 : : * A mutex acquired with this function must be released with ww_mutex_unlock. 261 : : */ 262 : : extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, 263 : : struct ww_acquire_ctx *ctx); 264 : : 265 : : /** 266 : : * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex 267 : : * @lock: the mutex to be acquired 268 : : * @ctx: w/w acquire context 269 : : * 270 : : * Acquires a w/w mutex with the given context after a die case. This function 271 : : * will sleep until the lock becomes available. 272 : : * 273 : : * The caller must have released all w/w mutexes already acquired with the 274 : : * context and then call this function on the contended lock. 275 : : * 276 : : * Afterwards the caller may continue to (re)acquire the other w/w mutexes it 277 : : * needs with ww_mutex_lock. Note that the -EALREADY return code from 278 : : * ww_mutex_lock can be used to avoid locking this contended mutex twice. 279 : : * 280 : : * It is forbidden to call this function with any other w/w mutexes associated 281 : : * with the context held. It is forbidden to call this on anything else than the 282 : : * contending mutex. 283 : : * 284 : : * Note that the slowpath lock acquiring can also be done by calling 285 : : * ww_mutex_lock directly. This function here is simply to help w/w mutex 286 : : * locking code readability by clearly denoting the slowpath. 287 : : */ 288 : : static inline void 289 : : ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 290 : : { 291 : : int ret; 292 : : #ifdef CONFIG_DEBUG_MUTEXES 293 : : DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); 294 : : #endif 295 : 0 : ret = ww_mutex_lock(lock, ctx); 296 : : (void)ret; 297 : : } 298 : : 299 : : /** 300 : : * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible 301 : : * @lock: the mutex to be acquired 302 : : * @ctx: w/w acquire context 303 : : * 304 : : * Acquires a w/w mutex with the given context after a die case. This function 305 : : * will sleep until the lock becomes available and returns 0 when the lock has 306 : : * been acquired. If a signal arrives while waiting for the lock then this 307 : : * function returns -EINTR. 308 : : * 309 : : * The caller must have released all w/w mutexes already acquired with the 310 : : * context and then call this function on the contended lock. 311 : : * 312 : : * Afterwards the caller may continue to (re)acquire the other w/w mutexes it 313 : : * needs with ww_mutex_lock. Note that the -EALREADY return code from 314 : : * ww_mutex_lock can be used to avoid locking this contended mutex twice. 315 : : * 316 : : * It is forbidden to call this function with any other w/w mutexes associated 317 : : * with the given context held. It is forbidden to call this on anything else 318 : : * than the contending mutex. 319 : : * 320 : : * Note that the slowpath lock acquiring can also be done by calling 321 : : * ww_mutex_lock_interruptible directly. This function here is simply to help 322 : : * w/w mutex locking code readability by clearly denoting the slowpath. 323 : : */ 324 : : static inline int __must_check 325 : : ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, 326 : : struct ww_acquire_ctx *ctx) 327 : : { 328 : : #ifdef CONFIG_DEBUG_MUTEXES 329 : : DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); 330 : : #endif 331 : : return ww_mutex_lock_interruptible(lock, ctx); 332 : : } 333 : : 334 : : extern void ww_mutex_unlock(struct ww_mutex *lock); 335 : : 336 : : /** 337 : : * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context 338 : : * @lock: mutex to lock 339 : : * 340 : : * Trylocks a mutex without acquire context, so no deadlock detection is 341 : : * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. 342 : : */ 343 : : static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) 344 : : { 345 : 1212 : return mutex_trylock(&lock->base); 346 : : } 347 : : 348 : : /*** 349 : : * ww_mutex_destroy - mark a w/w mutex unusable 350 : : * @lock: the mutex to be destroyed 351 : : * 352 : : * This function marks the mutex uninitialized, and any subsequent 353 : : * use of the mutex is forbidden. The mutex must not be locked when 354 : : * this function is called. 355 : : */ 356 : : static inline void ww_mutex_destroy(struct ww_mutex *lock) 357 : : { 358 : : mutex_destroy(&lock->base); 359 : : } 360 : : 361 : : /** 362 : : * ww_mutex_is_locked - is the w/w mutex locked 363 : : * @lock: the mutex to be queried 364 : : * 365 : : * Returns 1 if the mutex is locked, 0 if unlocked. 366 : : */ 367 : : static inline bool ww_mutex_is_locked(struct ww_mutex *lock) 368 : : { 369 : : return mutex_is_locked(&lock->base); 370 : : } 371 : : 372 : : #endif