LCOV - code coverage report
Current view: top level - kernel/locking - qrwlock.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 0 21 0.0 %
Date: 2022-04-01 14:58:12 Functions: 0 2 0.0 %
Branches: 0 14 0.0 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0-or-later
       2                 :            : /*
       3                 :            :  * Queued read/write locks
       4                 :            :  *
       5                 :            :  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
       6                 :            :  *
       7                 :            :  * Authors: Waiman Long <waiman.long@hp.com>
       8                 :            :  */
       9                 :            : #include <linux/smp.h>
      10                 :            : #include <linux/bug.h>
      11                 :            : #include <linux/cpumask.h>
      12                 :            : #include <linux/percpu.h>
      13                 :            : #include <linux/hardirq.h>
      14                 :            : #include <linux/spinlock.h>
      15                 :            : #include <asm/qrwlock.h>
      16                 :            : 
      17                 :            : /**
      18                 :            :  * queued_read_lock_slowpath - acquire read lock of a queue rwlock
      19                 :            :  * @lock: Pointer to queue rwlock structure
      20                 :            :  */
      21                 :          0 : void queued_read_lock_slowpath(struct qrwlock *lock)
      22                 :            : {
      23                 :            :         /*
      24                 :            :          * Readers come here when they cannot get the lock without waiting
      25                 :            :          */
      26         [ #  # ]:          0 :         if (unlikely(in_interrupt())) {
      27                 :            :                 /*
      28                 :            :                  * Readers in interrupt context will get the lock immediately
      29                 :            :                  * if the writer is just waiting (not holding the lock yet),
      30                 :            :                  * so spin with ACQUIRE semantics until the lock is available
      31                 :            :                  * without waiting in the queue.
      32                 :            :                  */
      33         [ #  # ]:          0 :                 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
      34                 :          0 :                 return;
      35                 :            :         }
      36                 :          0 :         atomic_sub(_QR_BIAS, &lock->cnts);
      37                 :            : 
      38                 :            :         /*
      39                 :            :          * Put the reader into the wait queue
      40                 :            :          */
      41                 :          0 :         arch_spin_lock(&lock->wait_lock);
      42                 :          0 :         atomic_add(_QR_BIAS, &lock->cnts);
      43                 :            : 
      44                 :            :         /*
      45                 :            :          * The ACQUIRE semantics of the following spinning code ensure
      46                 :            :          * that accesses can't leak upwards out of our subsequent critical
      47                 :            :          * section in the case that the lock is currently held for write.
      48                 :            :          */
      49         [ #  # ]:          0 :         atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
      50                 :            : 
      51                 :            :         /*
      52                 :            :          * Signal the next one in queue to become queue head
      53                 :            :          */
      54                 :          0 :         arch_spin_unlock(&lock->wait_lock);
      55                 :            : }
      56                 :            : EXPORT_SYMBOL(queued_read_lock_slowpath);
      57                 :            : 
      58                 :            : /**
      59                 :            :  * queued_write_lock_slowpath - acquire write lock of a queue rwlock
      60                 :            :  * @lock : Pointer to queue rwlock structure
      61                 :            :  */
      62                 :          0 : void queued_write_lock_slowpath(struct qrwlock *lock)
      63                 :            : {
      64                 :            :         /* Put the writer into the wait queue */
      65                 :          0 :         arch_spin_lock(&lock->wait_lock);
      66                 :            : 
      67                 :            :         /* Try to acquire the lock directly if no reader is present */
      68   [ #  #  #  # ]:          0 :         if (!atomic_read(&lock->cnts) &&
      69                 :            :             (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
      70                 :          0 :                 goto unlock;
      71                 :            : 
      72                 :            :         /* Set the waiting flag to notify readers that a writer is pending */
      73                 :          0 :         atomic_add(_QW_WAITING, &lock->cnts);
      74                 :            : 
      75                 :            :         /* When no more readers or writers, set the locked flag */
      76                 :          0 :         do {
      77         [ #  # ]:          0 :                 atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
      78                 :          0 :         } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
      79         [ #  # ]:          0 :                                         _QW_LOCKED) != _QW_WAITING);
      80                 :          0 : unlock:
      81                 :          0 :         arch_spin_unlock(&lock->wait_lock);
      82                 :          0 : }
      83                 :            : EXPORT_SYMBOL(queued_write_lock_slowpath);

Generated by: LCOV version 1.14