LCOV - code coverage report
Current view: top level - drivers/base/power - runtime.c (source / functions) Hit Total Coverage
Test: gcov_data_raspi2_qemu_modules_combined.info Lines: 345 513 67.3 %
Date: 2020-09-30 20:25:01 Functions: 33 45 73.3 %
Branches: 224 453 49.4 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0
       2                 :            : /*
       3                 :            :  * drivers/base/power/runtime.c - Helper functions for device runtime PM
       4                 :            :  *
       5                 :            :  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
       6                 :            :  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
       7                 :            :  */
       8                 :            : #include <linux/sched/mm.h>
       9                 :            : #include <linux/ktime.h>
      10                 :            : #include <linux/hrtimer.h>
      11                 :            : #include <linux/export.h>
      12                 :            : #include <linux/pm_runtime.h>
      13                 :            : #include <linux/pm_wakeirq.h>
      14                 :            : #include <trace/events/rpm.h>
      15                 :            : 
      16                 :            : #include "../base.h"
      17                 :            : #include "power.h"
      18                 :            : 
      19                 :            : typedef int (*pm_callback_t)(struct device *);
      20                 :            : 
      21                 :       7720 : static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
      22                 :            : {
      23                 :            :         pm_callback_t cb;
      24                 :            :         const struct dev_pm_ops *ops;
      25                 :            : 
      26         [ -  + ]:       7720 :         if (dev->pm_domain)
      27                 :          0 :                 ops = &dev->pm_domain->ops;
      28   [ +  -  +  + ]:       7720 :         else if (dev->type && dev->type->pm)
      29                 :            :                 ops = dev->type->pm;
      30   [ -  +  #  # ]:        448 :         else if (dev->class && dev->class->pm)
      31                 :            :                 ops = dev->class->pm;
      32   [ +  -  +  - ]:        448 :         else if (dev->bus && dev->bus->pm)
      33                 :        448 :                 ops = dev->bus->pm;
      34                 :            :         else
      35                 :            :                 ops = NULL;
      36                 :            : 
      37         [ +  - ]:       7720 :         if (ops)
      38                 :       7720 :                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
      39                 :            :         else
      40                 :            :                 cb = NULL;
      41                 :            : 
      42   [ +  +  +  -  :       7720 :         if (!cb && dev->driver && dev->driver->pm)
                   +  + ]
      43                 :        404 :                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
      44                 :            : 
      45                 :       7720 :         return cb;
      46                 :            : }
      47                 :            : 
      48                 :            : #define RPM_GET_CALLBACK(dev, callback) \
      49                 :            :                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
      50                 :            : 
      51                 :            : static int rpm_resume(struct device *dev, int rpmflags);
      52                 :            : static int rpm_suspend(struct device *dev, int rpmflags);
      53                 :            : 
      54                 :            : /**
      55                 :            :  * update_pm_runtime_accounting - Update the time accounting of power states
      56                 :            :  * @dev: Device to update the accounting for
      57                 :            :  *
      58                 :            :  * In order to be able to have time accounting of the various power states
      59                 :            :  * (as used by programs such as PowerTOP to show the effectiveness of runtime
      60                 :            :  * PM), we need to track the time spent in each state.
      61                 :            :  * update_pm_runtime_accounting must be called each time before the
      62                 :            :  * runtime_status field is updated, to account the time in the old state
      63                 :            :  * correctly.
      64                 :            :  */
      65                 :      35262 : static void update_pm_runtime_accounting(struct device *dev)
      66                 :            : {
      67                 :            :         u64 now, last, delta;
      68                 :            : 
      69         [ +  + ]:      35262 :         if (dev->power.disable_depth > 0)
      70                 :            :                 return;
      71                 :            : 
      72                 :      25750 :         last = dev->power.accounting_timestamp;
      73                 :            : 
      74                 :      25750 :         now = ktime_get_mono_fast_ns();
      75                 :      25750 :         dev->power.accounting_timestamp = now;
      76                 :            : 
      77                 :            :         /*
      78                 :            :          * Because ktime_get_mono_fast_ns() is not monotonic during
      79                 :            :          * timekeeping updates, ensure that 'now' is after the last saved
      80                 :            :          * timesptamp.
      81                 :            :          */
      82         [ +  - ]:      25750 :         if (now < last)
      83                 :            :                 return;
      84                 :            : 
      85                 :      25750 :         delta = now - last;
      86                 :            : 
      87         [ +  + ]:      25750 :         if (dev->power.runtime_status == RPM_SUSPENDED)
      88                 :       8276 :                 dev->power.suspended_time += delta;
      89                 :            :         else
      90                 :      17474 :                 dev->power.active_time += delta;
      91                 :            : }
      92                 :            : 
      93                 :            : static void __update_runtime_status(struct device *dev, enum rpm_status status)
      94                 :            : {
      95                 :      34344 :         update_pm_runtime_accounting(dev);
      96                 :      34344 :         dev->power.runtime_status = status;
      97                 :            : }
      98                 :            : 
      99                 :          0 : static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
     100                 :            : {
     101                 :            :         u64 time;
     102                 :            :         unsigned long flags;
     103                 :            : 
     104                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
     105                 :            : 
     106                 :          0 :         update_pm_runtime_accounting(dev);
     107         [ #  # ]:          0 :         time = suspended ? dev->power.suspended_time : dev->power.active_time;
     108                 :            : 
     109                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
     110                 :            : 
     111                 :          0 :         return time;
     112                 :            : }
     113                 :            : 
     114                 :          0 : u64 pm_runtime_active_time(struct device *dev)
     115                 :            : {
     116                 :          0 :         return rpm_get_accounted_time(dev, false);
     117                 :            : }
     118                 :            : 
     119                 :          0 : u64 pm_runtime_suspended_time(struct device *dev)
     120                 :            : {
     121                 :          0 :         return rpm_get_accounted_time(dev, true);
     122                 :            : }
     123                 :            : EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
     124                 :            : 
     125                 :            : /**
     126                 :            :  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
     127                 :            :  * @dev: Device to handle.
     128                 :            :  */
     129                 :            : static void pm_runtime_deactivate_timer(struct device *dev)
     130                 :            : {
     131   [ -  +  #  #  :      90604 :         if (dev->power.timer_expires > 0) {
          -  +  #  #  -  
             +  -  +  #  
                      # ]
     132                 :          0 :                 hrtimer_try_to_cancel(&dev->power.suspend_timer);
     133                 :          0 :                 dev->power.timer_expires = 0;
     134                 :            :         }
     135                 :            : }
     136                 :            : 
     137                 :            : /**
     138                 :            :  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
     139                 :            :  * @dev: Device to handle.
     140                 :            :  */
     141                 :            : static void pm_runtime_cancel_pending(struct device *dev)
     142                 :            : {
     143                 :            :         pm_runtime_deactivate_timer(dev);
     144                 :            :         /*
     145                 :            :          * In case there's a request pending, make sure its work function will
     146                 :            :          * return without doing anything.
     147                 :            :          */
     148                 :      13318 :         dev->power.request = RPM_REQ_NONE;
     149                 :            : }
     150                 :            : 
     151                 :            : /*
     152                 :            :  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
     153                 :            :  * @dev: Device to handle.
     154                 :            :  *
     155                 :            :  * Compute the autosuspend-delay expiration time based on the device's
     156                 :            :  * power.last_busy time.  If the delay has already expired or is disabled
     157                 :            :  * (negative) or the power.use_autosuspend flag isn't set, return 0.
     158                 :            :  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
     159                 :            :  *
     160                 :            :  * This function may be called either with or without dev->power.lock held.
     161                 :            :  * Either way it can be racy, since power.last_busy may be updated at any time.
     162                 :            :  */
     163                 :    2036544 : u64 pm_runtime_autosuspend_expiration(struct device *dev)
     164                 :            : {
     165                 :            :         int autosuspend_delay;
     166                 :            :         u64 expires;
     167                 :            : 
     168         [ +  + ]:    2036544 :         if (!dev->power.use_autosuspend)
     169                 :            :                 return 0;
     170                 :            : 
     171                 :    2019650 :         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
     172         [ +  - ]:    2019650 :         if (autosuspend_delay < 0)
     173                 :            :                 return 0;
     174                 :            : 
     175                 :            :         expires  = READ_ONCE(dev->power.last_busy);
     176                 :    2019650 :         expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
     177         [ +  + ]:    2019650 :         if (expires > ktime_get_mono_fast_ns())
     178                 :    2019590 :                 return expires; /* Expires in the future */
     179                 :            : 
     180                 :            :         return 0;
     181                 :            : }
     182                 :            : EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
     183                 :            : 
     184                 :          0 : static int dev_memalloc_noio(struct device *dev, void *data)
     185                 :            : {
     186                 :          0 :         return dev->power.memalloc_noio;
     187                 :            : }
     188                 :            : 
     189                 :            : /*
     190                 :            :  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
     191                 :            :  * @dev: Device to handle.
     192                 :            :  * @enable: True for setting the flag and False for clearing the flag.
     193                 :            :  *
     194                 :            :  * Set the flag for all devices in the path from the device to the
     195                 :            :  * root device in the device tree if @enable is true, otherwise clear
     196                 :            :  * the flag for devices in the path whose siblings don't set the flag.
     197                 :            :  *
     198                 :            :  * The function should only be called by block device, or network
     199                 :            :  * device driver for solving the deadlock problem during runtime
     200                 :            :  * resume/suspend:
     201                 :            :  *
     202                 :            :  *     If memory allocation with GFP_KERNEL is called inside runtime
     203                 :            :  *     resume/suspend callback of any one of its ancestors(or the
     204                 :            :  *     block device itself), the deadlock may be triggered inside the
     205                 :            :  *     memory allocation since it might not complete until the block
     206                 :            :  *     device becomes active and the involed page I/O finishes. The
     207                 :            :  *     situation is pointed out first by Alan Stern. Network device
     208                 :            :  *     are involved in iSCSI kind of situation.
     209                 :            :  *
     210                 :            :  * The lock of dev_hotplug_mutex is held in the function for handling
     211                 :            :  * hotplug race because pm_runtime_set_memalloc_noio() may be called
     212                 :            :  * in async probe().
     213                 :            :  *
     214                 :            :  * The function should be called between device_add() and device_del()
     215                 :            :  * on the affected device(block/network device).
     216                 :            :  */
     217                 :      10912 : void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
     218                 :            : {
     219                 :            :         static DEFINE_MUTEX(dev_hotplug_mutex);
     220                 :            : 
     221                 :      10912 :         mutex_lock(&dev_hotplug_mutex);
     222                 :            :         for (;;) {
     223                 :            :                 bool enabled;
     224                 :            : 
     225                 :            :                 /* hold power lock since bitfield is not SMP-safe. */
     226                 :            :                 spin_lock_irq(&dev->power.lock);
     227                 :      15356 :                 enabled = dev->power.memalloc_noio;
     228                 :      15356 :                 dev->power.memalloc_noio = enable;
     229                 :            :                 spin_unlock_irq(&dev->power.lock);
     230                 :            : 
     231                 :            :                 /*
     232                 :            :                  * not need to enable ancestors any more if the device
     233                 :            :                  * has been enabled.
     234                 :            :                  */
     235         [ +  + ]:      15356 :                 if (enabled && enable)
     236                 :            :                         break;
     237                 :            : 
     238                 :      14952 :                 dev = dev->parent;
     239                 :            : 
     240                 :            :                 /*
     241                 :            :                  * clear flag of the parent device only if all the
     242                 :            :                  * children don't set the flag because ancestor's
     243                 :            :                  * flag was set by any one of the descendants.
     244                 :            :                  */
     245   [ +  +  +  -  :      14952 :                 if (!dev || (!enable &&
                   #  # ]
     246                 :          0 :                              device_for_each_child(dev, NULL,
     247                 :            :                                                    dev_memalloc_noio)))
     248                 :            :                         break;
     249                 :            :         }
     250                 :      10912 :         mutex_unlock(&dev_hotplug_mutex);
     251                 :      10912 : }
     252                 :            : EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
     253                 :            : 
     254                 :            : /**
     255                 :            :  * rpm_check_suspend_allowed - Test whether a device may be suspended.
     256                 :            :  * @dev: Device to test.
     257                 :            :  */
     258                 :    4190370 : static int rpm_check_suspend_allowed(struct device *dev)
     259                 :            : {
     260                 :            :         int retval = 0;
     261                 :            : 
     262         [ +  - ]:    4190370 :         if (dev->power.runtime_error)
     263                 :            :                 retval = -EINVAL;
     264         [ +  + ]:    4190370 :         else if (dev->power.disable_depth > 0)
     265                 :            :                 retval = -EACCES;
     266         [ +  + ]:    2086400 :         else if (atomic_read(&dev->power.usage_count) > 0)
     267                 :            :                 retval = -EAGAIN;
     268   [ +  +  +  + ]:    4107684 :         else if (!dev->power.ignore_children &&
     269                 :    2053034 :                         atomic_read(&dev->power.child_count))
     270                 :            :                 retval = -EBUSY;
     271                 :            : 
     272                 :            :         /* Pending resume requests take precedence over suspends. */
     273         [ -  + ]:    2052630 :         else if ((dev->power.deferred_resume
     274         [ #  # ]:          0 :                         && dev->power.runtime_status == RPM_SUSPENDING)
     275         [ +  + ]:    2052630 :             || (dev->power.request_pending
     276         [ +  - ]:        418 :                         && dev->power.request == RPM_REQ_RESUME))
     277                 :            :                 retval = -EAGAIN;
     278         [ +  - ]:    2052630 :         else if (__dev_pm_qos_resume_latency(dev) == 0)
     279                 :            :                 retval = -EPERM;
     280         [ +  + ]:    2052630 :         else if (dev->power.runtime_status == RPM_SUSPENDED)
     281                 :            :                 retval = 1;
     282                 :            : 
     283                 :    4190370 :         return retval;
     284                 :            : }
     285                 :            : 
     286                 :       8998 : static int rpm_get_suppliers(struct device *dev)
     287                 :            : {
     288                 :            :         struct device_link *link;
     289                 :            : 
     290         [ -  + ]:       8998 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     291                 :            :                                 device_links_read_lock_held()) {
     292                 :            :                 int retval;
     293                 :            : 
     294   [ #  #  #  # ]:          0 :                 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
     295                 :          0 :                     READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
     296                 :          0 :                         continue;
     297                 :            : 
     298                 :          0 :                 retval = pm_runtime_get_sync(link->supplier);
     299                 :            :                 /* Ignore suppliers with disabled runtime PM. */
     300         [ #  # ]:          0 :                 if (retval < 0 && retval != -EACCES) {
     301                 :          0 :                         pm_runtime_put_noidle(link->supplier);
     302                 :          0 :                         return retval;
     303                 :            :                 }
     304                 :          0 :                 refcount_inc(&link->rpm_active);
     305                 :            :         }
     306                 :            :         return 0;
     307                 :            : }
     308                 :            : 
     309                 :        514 : static void rpm_put_suppliers(struct device *dev)
     310                 :            : {
     311                 :            :         struct device_link *link;
     312                 :            : 
     313         [ -  + ]:        514 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     314                 :            :                                 device_links_read_lock_held()) {
     315         [ #  # ]:          0 :                 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
     316                 :          0 :                         continue;
     317                 :            : 
     318         [ #  # ]:          0 :                 while (refcount_dec_not_one(&link->rpm_active))
     319                 :          0 :                         pm_runtime_put(link->supplier);
     320                 :            :         }
     321                 :        514 : }
     322                 :            : 
     323                 :            : /**
     324                 :            :  * __rpm_callback - Run a given runtime PM callback for a given device.
     325                 :            :  * @cb: Runtime PM callback to run.
     326                 :            :  * @dev: Device to run the callback for.
     327                 :            :  */
     328                 :       3680 : static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
     329                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     330                 :            : {
     331                 :            :         int retval, idx;
     332                 :       3680 :         bool use_links = dev->power.links_count > 0;
     333                 :            : 
     334         [ -  + ]:       3680 :         if (dev->power.irq_safe) {
     335                 :            :                 spin_unlock(&dev->power.lock);
     336                 :            :         } else {
     337                 :            :                 spin_unlock_irq(&dev->power.lock);
     338                 :            : 
     339                 :            :                 /*
     340                 :            :                  * Resume suppliers if necessary.
     341                 :            :                  *
     342                 :            :                  * The device's runtime PM status cannot change until this
     343                 :            :                  * routine returns, so it is safe to read the status outside of
     344                 :            :                  * the lock.
     345                 :            :                  */
     346   [ -  +  #  # ]:       3680 :                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
     347                 :          0 :                         idx = device_links_read_lock();
     348                 :            : 
     349                 :          0 :                         retval = rpm_get_suppliers(dev);
     350         [ #  # ]:          0 :                         if (retval)
     351                 :            :                                 goto fail;
     352                 :            : 
     353                 :          0 :                         device_links_read_unlock(idx);
     354                 :            :                 }
     355                 :            :         }
     356                 :            : 
     357                 :       3680 :         retval = cb(dev);
     358                 :            : 
     359         [ -  + ]:       3680 :         if (dev->power.irq_safe) {
     360                 :            :                 spin_lock(&dev->power.lock);
     361                 :            :         } else {
     362                 :            :                 /*
     363                 :            :                  * If the device is suspending and the callback has returned
     364                 :            :                  * success, drop the usage counters of the suppliers that have
     365                 :            :                  * been reference counted on its resume.
     366                 :            :                  *
     367                 :            :                  * Do that if resume fails too.
     368                 :            :                  */
     369         [ -  + ]:       3680 :                 if (use_links
     370   [ #  #  #  # ]:          0 :                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
     371   [ #  #  #  # ]:          0 :                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
     372                 :          0 :                         idx = device_links_read_lock();
     373                 :            : 
     374                 :            :  fail:
     375                 :          0 :                         rpm_put_suppliers(dev);
     376                 :            : 
     377                 :          0 :                         device_links_read_unlock(idx);
     378                 :            :                 }
     379                 :            : 
     380                 :            :                 spin_lock_irq(&dev->power.lock);
     381                 :            :         }
     382                 :            : 
     383                 :       3680 :         return retval;
     384                 :            : }
     385                 :            : 
     386                 :            : /**
     387                 :            :  * rpm_idle - Notify device bus type if the device can be suspended.
     388                 :            :  * @dev: Device to notify the bus type about.
     389                 :            :  * @rpmflags: Flag bits.
     390                 :            :  *
     391                 :            :  * Check if the device's runtime PM status allows it to be suspended.  If
     392                 :            :  * another idle notification has been started earlier, return immediately.  If
     393                 :            :  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
     394                 :            :  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
     395                 :            :  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
     396                 :            :  *
     397                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     398                 :            :  */
     399                 :     115048 : static int rpm_idle(struct device *dev, int rpmflags)
     400                 :            : {
     401                 :            :         int (*callback)(struct device *);
     402                 :            :         int retval;
     403                 :            : 
     404                 :     115048 :         trace_rpm_idle_rcuidle(dev, rpmflags);
     405                 :     115048 :         retval = rpm_check_suspend_allowed(dev);
     406         [ +  + ]:     115048 :         if (retval < 0)
     407                 :            :                 ;       /* Conditions are wrong. */
     408                 :            : 
     409                 :            :         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
     410         [ +  + ]:      19722 :         else if (dev->power.runtime_status != RPM_ACTIVE)
     411                 :            :                 retval = -EAGAIN;
     412                 :            : 
     413                 :            :         /*
     414                 :            :          * Any pending request other than an idle notification takes
     415                 :            :          * precedence over us, except that the timer may be running.
     416                 :            :          */
     417   [ +  +  +  - ]:      18510 :         else if (dev->power.request_pending &&
     418                 :        404 :             dev->power.request > RPM_REQ_IDLE)
     419                 :            :                 retval = -EAGAIN;
     420                 :            : 
     421                 :            :         /* Act as though RPM_NOWAIT is always set. */
     422         [ -  + ]:      18106 :         else if (dev->power.idle_notification)
     423                 :            :                 retval = -EINPROGRESS;
     424         [ +  + ]:     115048 :         if (retval)
     425                 :            :                 goto out;
     426                 :            : 
     427                 :            :         /* Pending requests need to be canceled. */
     428                 :      18106 :         dev->power.request = RPM_REQ_NONE;
     429                 :            : 
     430         [ +  + ]:      18106 :         if (dev->power.no_callbacks)
     431                 :            :                 goto out;
     432                 :            : 
     433                 :            :         /* Carry out an asynchronous or a synchronous idle notification. */
     434         [ +  + ]:       8484 :         if (rpmflags & RPM_ASYNC) {
     435                 :       4444 :                 dev->power.request = RPM_REQ_IDLE;
     436         [ +  + ]:       4444 :                 if (!dev->power.request_pending) {
     437                 :       4040 :                         dev->power.request_pending = true;
     438                 :       4040 :                         queue_work(pm_wq, &dev->power.work);
     439                 :            :                 }
     440                 :       4444 :                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
     441                 :       4444 :                 return 0;
     442                 :            :         }
     443                 :            : 
     444                 :       4040 :         dev->power.idle_notification = true;
     445                 :            : 
     446                 :       4040 :         callback = RPM_GET_CALLBACK(dev, runtime_idle);
     447                 :            : 
     448         [ -  + ]:       4040 :         if (callback)
     449                 :          0 :                 retval = __rpm_callback(callback, dev);
     450                 :            : 
     451                 :       4040 :         dev->power.idle_notification = false;
     452                 :       4040 :         wake_up_all(&dev->power.wait_queue);
     453                 :            : 
     454                 :            :  out:
     455                 :     110604 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     456         [ +  + ]:     110604 :         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
     457                 :            : }
     458                 :            : 
     459                 :            : /**
     460                 :            :  * rpm_callback - Run a given runtime PM callback for a given device.
     461                 :            :  * @cb: Runtime PM callback to run.
     462                 :            :  * @dev: Device to run the callback for.
     463                 :            :  */
     464                 :       3680 : static int rpm_callback(int (*cb)(struct device *), struct device *dev)
     465                 :            : {
     466                 :            :         int retval;
     467                 :            : 
     468         [ +  - ]:       3680 :         if (!cb)
     469                 :            :                 return -ENOSYS;
     470                 :            : 
     471         [ +  + ]:       3680 :         if (dev->power.memalloc_noio) {
     472                 :            :                 unsigned int noio_flag;
     473                 :            : 
     474                 :            :                 /*
     475                 :            :                  * Deadlock might be caused if memory allocation with
     476                 :            :                  * GFP_KERNEL happens inside runtime_suspend and
     477                 :            :                  * runtime_resume callbacks of one block device's
     478                 :            :                  * ancestor or the block device itself. Network
     479                 :            :                  * device might be thought as part of iSCSI block
     480                 :            :                  * device, so network device and its ancestor should
     481                 :            :                  * be marked as memalloc_noio too.
     482                 :            :                  */
     483                 :            :                 noio_flag = memalloc_noio_save();
     484                 :         44 :                 retval = __rpm_callback(cb, dev);
     485                 :            :                 memalloc_noio_restore(noio_flag);
     486                 :            :         } else {
     487                 :       3636 :                 retval = __rpm_callback(cb, dev);
     488                 :            :         }
     489                 :            : 
     490                 :       3680 :         dev->power.runtime_error = retval;
     491         [ +  - ]:       3680 :         return retval != -EACCES ? retval : -EIO;
     492                 :            : }
     493                 :            : 
     494                 :            : /**
     495                 :            :  * rpm_suspend - Carry out runtime suspend of given device.
     496                 :            :  * @dev: Device to suspend.
     497                 :            :  * @rpmflags: Flag bits.
     498                 :            :  *
     499                 :            :  * Check if the device's runtime PM status allows it to be suspended.
     500                 :            :  * Cancel a pending idle notification, autosuspend or suspend. If
     501                 :            :  * another suspend has been started earlier, either return immediately
     502                 :            :  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
     503                 :            :  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
     504                 :            :  * otherwise run the ->runtime_suspend() callback directly. When
     505                 :            :  * ->runtime_suspend succeeded, if a deferred resume was requested while
     506                 :            :  * the callback was running then carry it out, otherwise send an idle
     507                 :            :  * notification for its parent (if the suspend succeeded and both
     508                 :            :  * ignore_children of parent->power and irq_safe of dev->power are not set).
     509                 :            :  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
     510                 :            :  * flag is set and the next autosuspend-delay expiration time is in the
     511                 :            :  * future, schedule another autosuspend attempt.
     512                 :            :  *
     513                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     514                 :            :  */
     515                 :    4075326 : static int rpm_suspend(struct device *dev, int rpmflags)
     516                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     517                 :            : {
     518                 :            :         int (*callback)(struct device *);
     519                 :            :         struct device *parent = NULL;
     520                 :            :         int retval;
     521                 :            : 
     522                 :    4075326 :         trace_rpm_suspend_rcuidle(dev, rpmflags);
     523                 :            : 
     524                 :            :  repeat:
     525                 :    4075326 :         retval = rpm_check_suspend_allowed(dev);
     526                 :            : 
     527         [ +  + ]:    4075326 :         if (retval < 0)
     528                 :            :                 ;       /* Conditions are wrong. */
     529                 :            : 
     530                 :            :         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
     531   [ -  +  #  # ]:    2032908 :         else if (dev->power.runtime_status == RPM_RESUMING &&
     532                 :          0 :             !(rpmflags & RPM_ASYNC))
     533                 :            :                 retval = -EAGAIN;
     534         [ +  + ]:    4075326 :         if (retval)
     535                 :            :                 goto out;
     536                 :            : 
     537                 :            :         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
     538         [ +  - ]:    2032908 :         if ((rpmflags & RPM_AUTO)
     539         [ +  - ]:    2032908 :             && dev->power.runtime_status != RPM_SUSPENDING) {
     540                 :    2032908 :                 u64 expires = pm_runtime_autosuspend_expiration(dev);
     541                 :            : 
     542         [ +  + ]:    2032908 :                 if (expires != 0) {
     543                 :            :                         /* Pending requests need to be canceled. */
     544                 :    2019590 :                         dev->power.request = RPM_REQ_NONE;
     545                 :            : 
     546                 :            :                         /*
     547                 :            :                          * Optimization: If the timer is already running and is
     548                 :            :                          * set to expire at or before the autosuspend delay,
     549                 :            :                          * avoid the overhead of resetting it.  Just let it
     550                 :            :                          * expire; pm_suspend_timer_fn() will take care of the
     551                 :            :                          * rest.
     552                 :            :                          */
     553   [ +  +  -  + ]:    4019666 :                         if (!(dev->power.timer_expires &&
     554                 :    2000076 :                                         dev->power.timer_expires <= expires)) {
     555                 :            :                                 /*
     556                 :            :                                  * We add a slack of 25% to gather wakeups
     557                 :            :                                  * without sacrificing the granularity.
     558                 :            :                                  */
     559                 :      19514 :                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
     560                 :            :                                                     (NSEC_PER_MSEC >> 2);
     561                 :            : 
     562                 :      19514 :                                 dev->power.timer_expires = expires;
     563                 :      39028 :                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
     564                 :            :                                                 ns_to_ktime(expires),
     565                 :            :                                                 slack,
     566                 :            :                                                 HRTIMER_MODE_ABS);
     567                 :            :                         }
     568                 :    2019590 :                         dev->power.timer_autosuspends = 1;
     569                 :    2019590 :                         goto out;
     570                 :            :                 }
     571                 :            :         }
     572                 :            : 
     573                 :            :         /* Other scheduled or pending requests need to be canceled. */
     574                 :            :         pm_runtime_cancel_pending(dev);
     575                 :            : 
     576         [ -  + ]:      13318 :         if (dev->power.runtime_status == RPM_SUSPENDING) {
     577                 :          0 :                 DEFINE_WAIT(wait);
     578                 :            : 
     579         [ #  # ]:          0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     580                 :            :                         retval = -EINPROGRESS;
     581                 :          0 :                         goto out;
     582                 :            :                 }
     583                 :            : 
     584         [ #  # ]:          0 :                 if (dev->power.irq_safe) {
     585                 :            :                         spin_unlock(&dev->power.lock);
     586                 :            : 
     587                 :          0 :                         cpu_relax();
     588                 :            : 
     589                 :            :                         spin_lock(&dev->power.lock);
     590                 :          0 :                         goto repeat;
     591                 :            :                 }
     592                 :            : 
     593                 :            :                 /* Wait for the other suspend running in parallel with us. */
     594                 :            :                 for (;;) {
     595                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     596                 :            :                                         TASK_UNINTERRUPTIBLE);
     597         [ #  # ]:          0 :                         if (dev->power.runtime_status != RPM_SUSPENDING)
     598                 :            :                                 break;
     599                 :            : 
     600                 :            :                         spin_unlock_irq(&dev->power.lock);
     601                 :            : 
     602                 :          0 :                         schedule();
     603                 :            : 
     604                 :            :                         spin_lock_irq(&dev->power.lock);
     605                 :            :                 }
     606                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
     607                 :          0 :                 goto repeat;
     608                 :            :         }
     609                 :            : 
     610         [ +  + ]:      13318 :         if (dev->power.no_callbacks)
     611                 :            :                 goto no_callback;       /* Assume success. */
     612                 :            : 
     613                 :            :         /* Carry out an asynchronous or a synchronous suspend. */
     614         [ +  + ]:       3696 :         if (rpmflags & RPM_ASYNC) {
     615         [ -  + ]:         38 :                 dev->power.request = (rpmflags & RPM_AUTO) ?
     616                 :            :                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
     617         [ +  - ]:         38 :                 if (!dev->power.request_pending) {
     618                 :         38 :                         dev->power.request_pending = true;
     619                 :         38 :                         queue_work(pm_wq, &dev->power.work);
     620                 :            :                 }
     621                 :            :                 goto out;
     622                 :            :         }
     623                 :            : 
     624                 :            :         __update_runtime_status(dev, RPM_SUSPENDING);
     625                 :            : 
     626                 :       3658 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
     627                 :            : 
     628                 :       3658 :         dev_pm_enable_wake_irq_check(dev, true);
     629                 :       3658 :         retval = rpm_callback(callback, dev);
     630         [ +  + ]:       3658 :         if (retval)
     631                 :            :                 goto fail;
     632                 :            : 
     633                 :            :  no_callback:
     634                 :            :         __update_runtime_status(dev, RPM_SUSPENDED);
     635                 :            :         pm_runtime_deactivate_timer(dev);
     636                 :            : 
     637         [ +  - ]:       9644 :         if (dev->parent) {
     638                 :            :                 parent = dev->parent;
     639                 :       9644 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
     640                 :            :         }
     641                 :       9644 :         wake_up_all(&dev->power.wait_queue);
     642                 :            : 
     643         [ -  + ]:       9644 :         if (dev->power.deferred_resume) {
     644                 :          0 :                 dev->power.deferred_resume = false;
     645                 :          0 :                 rpm_resume(dev, 0);
     646                 :            :                 retval = -EAGAIN;
     647                 :          0 :                 goto out;
     648                 :            :         }
     649                 :            : 
     650                 :            :         /* Maybe the parent is now able to suspend. */
     651   [ +  -  +  -  :       9644 :         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
                   +  - ]
     652                 :            :                 spin_unlock(&dev->power.lock);
     653                 :            : 
     654                 :            :                 spin_lock(&parent->power.lock);
     655                 :       9644 :                 rpm_idle(parent, RPM_ASYNC);
     656                 :            :                 spin_unlock(&parent->power.lock);
     657                 :            : 
     658                 :            :                 spin_lock(&dev->power.lock);
     659                 :            :         }
     660                 :            : 
     661                 :            :  out:
     662                 :    4075326 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     663                 :            : 
     664                 :    4075326 :         return retval;
     665                 :            : 
     666                 :            :  fail:
     667                 :       3636 :         dev_pm_disable_wake_irq_check(dev);
     668                 :            :         __update_runtime_status(dev, RPM_ACTIVE);
     669                 :       3636 :         dev->power.deferred_resume = false;
     670                 :       3636 :         wake_up_all(&dev->power.wait_queue);
     671                 :            : 
     672         [ +  - ]:       3636 :         if (retval == -EAGAIN || retval == -EBUSY) {
     673                 :       3636 :                 dev->power.runtime_error = 0;
     674                 :            : 
     675                 :            :                 /*
     676                 :            :                  * If the callback routine failed an autosuspend, and
     677                 :            :                  * if the last_busy time has been updated so that there
     678                 :            :                  * is a new autosuspend expiration time, automatically
     679                 :            :                  * reschedule another autosuspend.
     680                 :            :                  */
     681   [ -  +  -  + ]:       7272 :                 if ((rpmflags & RPM_AUTO) &&
     682                 :       3636 :                     pm_runtime_autosuspend_expiration(dev) != 0)
     683                 :            :                         goto repeat;
     684                 :            :         } else {
     685                 :            :                 pm_runtime_cancel_pending(dev);
     686                 :            :         }
     687                 :            :         goto out;
     688                 :            : }
     689                 :            : 
     690                 :            : /**
     691                 :            :  * rpm_resume - Carry out runtime resume of given device.
     692                 :            :  * @dev: Device to resume.
     693                 :            :  * @rpmflags: Flag bits.
     694                 :            :  *
     695                 :            :  * Check if the device's runtime PM status allows it to be resumed.  Cancel
     696                 :            :  * any scheduled or pending requests.  If another resume has been started
     697                 :            :  * earlier, either return immediately or wait for it to finish, depending on the
     698                 :            :  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
     699                 :            :  * parallel with this function, either tell the other process to resume after
     700                 :            :  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
     701                 :            :  * flag is set then queue a resume request; otherwise run the
     702                 :            :  * ->runtime_resume() callback directly.  Queue an idle notification for the
     703                 :            :  * device if the resume succeeded.
     704                 :            :  *
     705                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     706                 :            :  */
     707                 :    4152052 : static int rpm_resume(struct device *dev, int rpmflags)
     708                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     709                 :            : {
     710                 :            :         int (*callback)(struct device *);
     711                 :            :         struct device *parent = NULL;
     712                 :            :         int retval = 0;
     713                 :            : 
     714                 :    4152052 :         trace_rpm_resume_rcuidle(dev, rpmflags);
     715                 :            : 
     716                 :            :  repeat:
     717         [ +  + ]:    4152068 :         if (dev->power.runtime_error)
     718                 :            :                 retval = -EINVAL;
     719   [ +  +  -  + ]:    4152072 :         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
     720         [ #  # ]:          0 :             && dev->power.runtime_status == RPM_ACTIVE)
     721                 :            :                 retval = 1;
     722         [ +  + ]:    4152074 :         else if (dev->power.disable_depth > 0)
     723                 :            :                 retval = -EACCES;
     724         [ +  + ]:    4152068 :         if (retval)
     725                 :            :                 goto out;
     726                 :            : 
     727                 :            :         /*
     728                 :            :          * Other scheduled or pending requests need to be canceled.  Small
     729                 :            :          * optimization: If an autosuspend timer is running, leave it running
     730                 :            :          * rather than cancelling it now only to restart it again in the near
     731                 :            :          * future.
     732                 :            :          */
     733                 :    2082512 :         dev->power.request = RPM_REQ_NONE;
     734         [ +  + ]:    2082512 :         if (!dev->power.timer_autosuspends)
     735                 :            :                 pm_runtime_deactivate_timer(dev);
     736                 :            : 
     737         [ +  + ]:    2082512 :         if (dev->power.runtime_status == RPM_ACTIVE) {
     738                 :            :                 retval = 1;
     739                 :            :                 goto out;
     740                 :            :         }
     741                 :            : 
     742         [ -  + ]:       7894 :         if (dev->power.runtime_status == RPM_RESUMING
     743                 :       7894 :             || dev->power.runtime_status == RPM_SUSPENDING) {
     744                 :          0 :                 DEFINE_WAIT(wait);
     745                 :            : 
     746         [ #  # ]:          0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     747         [ #  # ]:          0 :                         if (dev->power.runtime_status == RPM_SUSPENDING)
     748                 :          0 :                                 dev->power.deferred_resume = true;
     749                 :            :                         else
     750                 :            :                                 retval = -EINPROGRESS;
     751                 :          0 :                         goto out;
     752                 :            :                 }
     753                 :            : 
     754         [ #  # ]:          0 :                 if (dev->power.irq_safe) {
     755                 :            :                         spin_unlock(&dev->power.lock);
     756                 :            : 
     757                 :          0 :                         cpu_relax();
     758                 :            : 
     759                 :            :                         spin_lock(&dev->power.lock);
     760                 :          0 :                         goto repeat;
     761                 :            :                 }
     762                 :            : 
     763                 :            :                 /* Wait for the operation carried out in parallel with us. */
     764                 :            :                 for (;;) {
     765                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     766                 :            :                                         TASK_UNINTERRUPTIBLE);
     767         [ #  # ]:          0 :                         if (dev->power.runtime_status != RPM_RESUMING
     768                 :          0 :                             && dev->power.runtime_status != RPM_SUSPENDING)
     769                 :            :                                 break;
     770                 :            : 
     771                 :            :                         spin_unlock_irq(&dev->power.lock);
     772                 :            : 
     773                 :          0 :                         schedule();
     774                 :            : 
     775                 :            :                         spin_lock_irq(&dev->power.lock);
     776                 :            :                 }
     777                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
     778                 :          0 :                 goto repeat;
     779                 :            :         }
     780                 :            : 
     781                 :            :         /*
     782                 :            :          * See if we can skip waking up the parent.  This is safe only if
     783                 :            :          * power.no_callbacks is set, because otherwise we don't know whether
     784                 :            :          * the resume will actually succeed.
     785                 :            :          */
     786   [ +  +  +  -  :       7894 :         if (dev->power.no_callbacks && !parent && dev->parent) {
                   +  - ]
     787                 :       7850 :                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
     788         [ +  - ]:       7850 :                 if (dev->parent->power.disable_depth > 0
     789         [ +  - ]:       7850 :                     || dev->parent->power.ignore_children
     790         [ +  - ]:       7850 :                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
     791                 :       7850 :                         atomic_inc(&dev->parent->power.child_count);
     792                 :       7850 :                         spin_unlock(&dev->parent->power.lock);
     793                 :            :                         retval = 1;
     794                 :       7850 :                         goto no_callback;       /* Assume success. */
     795                 :            :                 }
     796                 :            :                 spin_unlock(&dev->parent->power.lock);
     797                 :            :         }
     798                 :            : 
     799                 :            :         /* Carry out an asynchronous or a synchronous resume. */
     800         [ -  + ]:         44 :         if (rpmflags & RPM_ASYNC) {
     801                 :          0 :                 dev->power.request = RPM_REQ_RESUME;
     802         [ #  # ]:          0 :                 if (!dev->power.request_pending) {
     803                 :          0 :                         dev->power.request_pending = true;
     804                 :          0 :                         queue_work(pm_wq, &dev->power.work);
     805                 :            :                 }
     806                 :            :                 retval = 0;
     807                 :            :                 goto out;
     808                 :            :         }
     809                 :            : 
     810   [ +  +  +  - ]:         44 :         if (!parent && dev->parent) {
     811                 :            :                 /*
     812                 :            :                  * Increment the parent's usage counter and resume it if
     813                 :            :                  * necessary.  Not needed if dev is irq-safe; then the
     814                 :            :                  * parent is permanently resumed.
     815                 :            :                  */
     816                 :            :                 parent = dev->parent;
     817         [ +  - ]:         22 :                 if (dev->power.irq_safe)
     818                 :            :                         goto skip_parent;
     819                 :            :                 spin_unlock(&dev->power.lock);
     820                 :            : 
     821                 :            :                 pm_runtime_get_noresume(parent);
     822                 :            : 
     823                 :            :                 spin_lock(&parent->power.lock);
     824                 :            :                 /*
     825                 :            :                  * Resume the parent if it has runtime PM enabled and not been
     826                 :            :                  * set to ignore its children.
     827                 :            :                  */
     828         [ -  + ]:         22 :                 if (!parent->power.disable_depth
     829         [ #  # ]:          0 :                     && !parent->power.ignore_children) {
     830                 :          0 :                         rpm_resume(parent, 0);
     831         [ #  # ]:          0 :                         if (parent->power.runtime_status != RPM_ACTIVE)
     832                 :            :                                 retval = -EBUSY;
     833                 :            :                 }
     834                 :            :                 spin_unlock(&parent->power.lock);
     835                 :            : 
     836                 :            :                 spin_lock(&dev->power.lock);
     837         [ +  - ]:         22 :                 if (retval)
     838                 :            :                         goto out;
     839                 :            :                 goto repeat;
     840                 :            :         }
     841                 :            :  skip_parent:
     842                 :            : 
     843         [ +  - ]:         22 :         if (dev->power.no_callbacks)
     844                 :            :                 goto no_callback;       /* Assume success. */
     845                 :            : 
     846                 :            :         __update_runtime_status(dev, RPM_RESUMING);
     847                 :            : 
     848                 :         22 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
     849                 :            : 
     850                 :         22 :         dev_pm_disable_wake_irq_check(dev);
     851                 :         22 :         retval = rpm_callback(callback, dev);
     852         [ -  + ]:         22 :         if (retval) {
     853                 :            :                 __update_runtime_status(dev, RPM_SUSPENDED);
     854                 :            :                 pm_runtime_cancel_pending(dev);
     855                 :          0 :                 dev_pm_enable_wake_irq_check(dev, false);
     856                 :            :         } else {
     857                 :            :  no_callback:
     858                 :            :                 __update_runtime_status(dev, RPM_ACTIVE);
     859                 :            :                 pm_runtime_mark_last_busy(dev);
     860         [ +  + ]:       7872 :                 if (parent)
     861                 :         22 :                         atomic_inc(&parent->power.child_count);
     862                 :            :         }
     863                 :       7872 :         wake_up_all(&dev->power.wait_queue);
     864                 :            : 
     865         [ +  - ]:       7872 :         if (retval >= 0)
     866                 :       7872 :                 rpm_idle(dev, RPM_ASYNC);
     867                 :            : 
     868                 :            :  out:
     869   [ +  +  +  - ]:    4152046 :         if (parent && !dev->power.irq_safe) {
     870                 :            :                 spin_unlock_irq(&dev->power.lock);
     871                 :            : 
     872                 :            :                 pm_runtime_put(parent);
     873                 :            : 
     874                 :            :                 spin_lock_irq(&dev->power.lock);
     875                 :            :         }
     876                 :            : 
     877                 :    4152046 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     878                 :            : 
     879                 :    4152050 :         return retval;
     880                 :            : }
     881                 :            : 
     882                 :            : /**
     883                 :            :  * pm_runtime_work - Universal runtime PM work function.
     884                 :            :  * @work: Work structure used for scheduling the execution of this function.
     885                 :            :  *
     886                 :            :  * Use @work to get the device object the work is to be done for, determine what
     887                 :            :  * is to be done and execute the appropriate runtime PM function.
     888                 :            :  */
     889                 :       4078 : static void pm_runtime_work(struct work_struct *work)
     890                 :            : {
     891                 :       4078 :         struct device *dev = container_of(work, struct device, power.work);
     892                 :            :         enum rpm_request req;
     893                 :            : 
     894                 :            :         spin_lock_irq(&dev->power.lock);
     895                 :            : 
     896         [ +  - ]:       4078 :         if (!dev->power.request_pending)
     897                 :            :                 goto out;
     898                 :            : 
     899                 :       4078 :         req = dev->power.request;
     900                 :       4078 :         dev->power.request = RPM_REQ_NONE;
     901                 :       4078 :         dev->power.request_pending = false;
     902                 :            : 
     903   [ +  -  +  -  :       4078 :         switch (req) {
                      + ]
     904                 :            :         case RPM_REQ_NONE:
     905                 :            :                 break;
     906                 :            :         case RPM_REQ_IDLE:
     907                 :       4040 :                 rpm_idle(dev, RPM_NOWAIT);
     908                 :       4040 :                 break;
     909                 :            :         case RPM_REQ_SUSPEND:
     910                 :          0 :                 rpm_suspend(dev, RPM_NOWAIT);
     911                 :          0 :                 break;
     912                 :            :         case RPM_REQ_AUTOSUSPEND:
     913                 :         24 :                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
     914                 :         24 :                 break;
     915                 :            :         case RPM_REQ_RESUME:
     916                 :          0 :                 rpm_resume(dev, RPM_NOWAIT);
     917                 :          0 :                 break;
     918                 :            :         }
     919                 :            : 
     920                 :            :  out:
     921                 :            :         spin_unlock_irq(&dev->power.lock);
     922                 :       4078 : }
     923                 :            : 
     924                 :            : /**
     925                 :            :  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
     926                 :            :  * @data: Device pointer passed by pm_schedule_suspend().
     927                 :            :  *
     928                 :            :  * Check if the time is right and queue a suspend request.
     929                 :            :  */
     930                 :      19110 : static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
     931                 :            : {
     932                 :      19110 :         struct device *dev = container_of(timer, struct device, power.suspend_timer);
     933                 :            :         unsigned long flags;
     934                 :            :         u64 expires;
     935                 :            : 
     936                 :      19110 :         spin_lock_irqsave(&dev->power.lock, flags);
     937                 :            : 
     938                 :      19110 :         expires = dev->power.timer_expires;
     939                 :            :         /*
     940                 :            :          * If 'expires' is after the current time, we've been called
     941                 :            :          * too early.
     942                 :            :          */
     943   [ +  -  +  - ]:      19110 :         if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
     944                 :      19110 :                 dev->power.timer_expires = 0;
     945         [ -  + ]:      19110 :                 rpm_suspend(dev, dev->power.timer_autosuspends ?
     946                 :            :                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
     947                 :            :         }
     948                 :            : 
     949                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
     950                 :            : 
     951                 :      19110 :         return HRTIMER_NORESTART;
     952                 :            : }
     953                 :            : 
     954                 :            : /**
     955                 :            :  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
     956                 :            :  * @dev: Device to suspend.
     957                 :            :  * @delay: Time to wait before submitting a suspend request, in milliseconds.
     958                 :            :  */
     959                 :          0 : int pm_schedule_suspend(struct device *dev, unsigned int delay)
     960                 :            : {
     961                 :            :         unsigned long flags;
     962                 :            :         u64 expires;
     963                 :            :         int retval;
     964                 :            : 
     965                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
     966                 :            : 
     967         [ #  # ]:          0 :         if (!delay) {
     968                 :          0 :                 retval = rpm_suspend(dev, RPM_ASYNC);
     969                 :          0 :                 goto out;
     970                 :            :         }
     971                 :            : 
     972                 :          0 :         retval = rpm_check_suspend_allowed(dev);
     973         [ #  # ]:          0 :         if (retval)
     974                 :            :                 goto out;
     975                 :            : 
     976                 :            :         /* Other scheduled or pending requests need to be canceled. */
     977                 :            :         pm_runtime_cancel_pending(dev);
     978                 :            : 
     979                 :          0 :         expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
     980                 :          0 :         dev->power.timer_expires = expires;
     981                 :          0 :         dev->power.timer_autosuspends = 0;
     982                 :          0 :         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
     983                 :            : 
     984                 :            :  out:
     985                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
     986                 :            : 
     987                 :          0 :         return retval;
     988                 :            : }
     989                 :            : EXPORT_SYMBOL_GPL(pm_schedule_suspend);
     990                 :            : 
     991                 :            : /**
     992                 :            :  * __pm_runtime_idle - Entry point for runtime idle operations.
     993                 :            :  * @dev: Device to send idle notification for.
     994                 :            :  * @rpmflags: Flag bits.
     995                 :            :  *
     996                 :            :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
     997                 :            :  * return immediately if it is larger than zero.  Then carry out an idle
     998                 :            :  * notification, either synchronous or asynchronous.
     999                 :            :  *
    1000                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1001                 :            :  * or if pm_runtime_irq_safe() has been called.
    1002                 :            :  */
    1003                 :     122654 : int __pm_runtime_idle(struct device *dev, int rpmflags)
    1004                 :            : {
    1005                 :            :         unsigned long flags;
    1006                 :            :         int retval;
    1007                 :            : 
    1008         [ +  + ]:     122654 :         if (rpmflags & RPM_GET_PUT) {
    1009         [ +  + ]:     148716 :                 if (!atomic_dec_and_test(&dev->power.usage_count))
    1010                 :            :                         return 0;
    1011                 :            :         }
    1012                 :            : 
    1013   [ +  +  +  - ]:      87836 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1014                 :            : 
    1015                 :      87836 :         spin_lock_irqsave(&dev->power.lock, flags);
    1016                 :      87836 :         retval = rpm_idle(dev, rpmflags);
    1017                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1018                 :            : 
    1019                 :      87836 :         return retval;
    1020                 :            : }
    1021                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_idle);
    1022                 :            : 
    1023                 :            : /**
    1024                 :            :  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
    1025                 :            :  * @dev: Device to suspend.
    1026                 :            :  * @rpmflags: Flag bits.
    1027                 :            :  *
    1028                 :            :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
    1029                 :            :  * return immediately if it is larger than zero.  Then carry out a suspend,
    1030                 :            :  * either synchronous or asynchronous.
    1031                 :            :  *
    1032                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1033                 :            :  * or if pm_runtime_irq_safe() has been called.
    1034                 :            :  */
    1035                 :    4083712 : int __pm_runtime_suspend(struct device *dev, int rpmflags)
    1036                 :            : {
    1037                 :            :         unsigned long flags;
    1038                 :            :         int retval;
    1039                 :            : 
    1040         [ +  - ]:    4083712 :         if (rpmflags & RPM_GET_PUT) {
    1041         [ +  + ]:    8167426 :                 if (!atomic_dec_and_test(&dev->power.usage_count))
    1042                 :            :                         return 0;
    1043                 :            :         }
    1044                 :            : 
    1045   [ +  +  +  - ]:    4042530 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1046                 :            : 
    1047                 :    4042530 :         spin_lock_irqsave(&dev->power.lock, flags);
    1048                 :    4042530 :         retval = rpm_suspend(dev, rpmflags);
    1049                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1050                 :            : 
    1051                 :    4042530 :         return retval;
    1052                 :            : }
    1053                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
    1054                 :            : 
    1055                 :            : /**
    1056                 :            :  * __pm_runtime_resume - Entry point for runtime resume operations.
    1057                 :            :  * @dev: Device to resume.
    1058                 :            :  * @rpmflags: Flag bits.
    1059                 :            :  *
    1060                 :            :  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
    1061                 :            :  * carry out a resume, either synchronous or asynchronous.
    1062                 :            :  *
    1063                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1064                 :            :  * or if pm_runtime_irq_safe() has been called.
    1065                 :            :  */
    1066                 :    4150032 : int __pm_runtime_resume(struct device *dev, int rpmflags)
    1067                 :            : {
    1068                 :            :         unsigned long flags;
    1069                 :            :         int retval;
    1070                 :            : 
    1071   [ +  +  +  -  :    4150032 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
                   +  + ]
    1072                 :            :                         dev->power.runtime_status != RPM_ACTIVE);
    1073                 :            : 
    1074         [ +  - ]:    4150030 :         if (rpmflags & RPM_GET_PUT)
    1075                 :    4150030 :                 atomic_inc(&dev->power.usage_count);
    1076                 :            : 
    1077                 :    4150032 :         spin_lock_irqsave(&dev->power.lock, flags);
    1078                 :    4150030 :         retval = rpm_resume(dev, rpmflags);
    1079                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1080                 :            : 
    1081                 :    4150032 :         return retval;
    1082                 :            : }
    1083                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_resume);
    1084                 :            : 
    1085                 :            : /**
    1086                 :            :  * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
    1087                 :            :  * @dev: Device to handle.
    1088                 :            :  *
    1089                 :            :  * Return -EINVAL if runtime PM is disabled for the device.
    1090                 :            :  *
    1091                 :            :  * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
    1092                 :            :  * and the runtime PM usage counter is nonzero, increment the counter and
    1093                 :            :  * return 1.  Otherwise return 0 without changing the counter.
    1094                 :            :  */
    1095                 :          0 : int pm_runtime_get_if_in_use(struct device *dev)
    1096                 :            : {
    1097                 :            :         unsigned long flags;
    1098                 :            :         int retval;
    1099                 :            : 
    1100                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1101         [ #  # ]:          0 :         retval = dev->power.disable_depth > 0 ? -EINVAL :
    1102                 :          0 :                 dev->power.runtime_status == RPM_ACTIVE
    1103   [ #  #  #  # ]:          0 :                         && atomic_inc_not_zero(&dev->power.usage_count);
    1104                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1105                 :          0 :         return retval;
    1106                 :            : }
    1107                 :            : EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
    1108                 :            : 
    1109                 :            : /**
    1110                 :            :  * __pm_runtime_set_status - Set runtime PM status of a device.
    1111                 :            :  * @dev: Device to handle.
    1112                 :            :  * @status: New runtime PM status of the device.
    1113                 :            :  *
    1114                 :            :  * If runtime PM of the device is disabled or its power.runtime_error field is
    1115                 :            :  * different from zero, the status may be changed either to RPM_ACTIVE, or to
    1116                 :            :  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
    1117                 :            :  * However, if the device has a parent and the parent is not active, and the
    1118                 :            :  * parent's power.ignore_children flag is unset, the device's status cannot be
    1119                 :            :  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
    1120                 :            :  *
    1121                 :            :  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
    1122                 :            :  * and the device parent's counter of unsuspended children is modified to
    1123                 :            :  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
    1124                 :            :  * notification request for the parent is submitted.
    1125                 :            :  *
    1126                 :            :  * If @dev has any suppliers (as reflected by device links to them), and @status
    1127                 :            :  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
    1128                 :            :  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
    1129                 :            :  * of the @status value) and the suppliers will be deacticated on exit.  The
    1130                 :            :  * error returned by the failing supplier activation will be returned in that
    1131                 :            :  * case.
    1132                 :            :  */
    1133                 :       9512 : int __pm_runtime_set_status(struct device *dev, unsigned int status)
    1134                 :            : {
    1135                 :       9512 :         struct device *parent = dev->parent;
    1136                 :            :         bool notify_parent = false;
    1137                 :            :         int error = 0;
    1138                 :            : 
    1139         [ +  - ]:       9512 :         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
    1140                 :            :                 return -EINVAL;
    1141                 :            : 
    1142                 :            :         spin_lock_irq(&dev->power.lock);
    1143                 :            : 
    1144                 :            :         /*
    1145                 :            :          * Prevent PM-runtime from being enabled for the device or return an
    1146                 :            :          * error if it is enabled already and working.
    1147                 :            :          */
    1148   [ +  -  +  - ]:       9512 :         if (dev->power.runtime_error || dev->power.disable_depth)
    1149                 :       9512 :                 dev->power.disable_depth++;
    1150                 :            :         else
    1151                 :            :                 error = -EAGAIN;
    1152                 :            : 
    1153                 :            :         spin_unlock_irq(&dev->power.lock);
    1154                 :            : 
    1155         [ +  - ]:       9512 :         if (error)
    1156                 :            :                 return error;
    1157                 :            : 
    1158                 :            :         /*
    1159                 :            :          * If the new status is RPM_ACTIVE, the suppliers can be activated
    1160                 :            :          * upfront regardless of the current status, because next time
    1161                 :            :          * rpm_put_suppliers() runs, the rpm_active refcounts of the links
    1162                 :            :          * involved will be dropped down to one anyway.
    1163                 :            :          */
    1164         [ +  + ]:       9512 :         if (status == RPM_ACTIVE) {
    1165                 :       8998 :                 int idx = device_links_read_lock();
    1166                 :            : 
    1167                 :       8998 :                 error = rpm_get_suppliers(dev);
    1168         [ -  + ]:       8998 :                 if (error)
    1169                 :            :                         status = RPM_SUSPENDED;
    1170                 :            : 
    1171                 :       8998 :                 device_links_read_unlock(idx);
    1172                 :            :         }
    1173                 :            : 
    1174                 :            :         spin_lock_irq(&dev->power.lock);
    1175                 :            : 
    1176   [ +  -  +  - ]:       9512 :         if (dev->power.runtime_status == status || !parent)
    1177                 :            :                 goto out_set;
    1178                 :            : 
    1179         [ +  + ]:       9512 :         if (status == RPM_SUSPENDED) {
    1180                 :        514 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
    1181                 :        514 :                 notify_parent = !parent->power.ignore_children;
    1182                 :            :         } else {
    1183                 :       8998 :                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
    1184                 :            : 
    1185                 :            :                 /*
    1186                 :            :                  * It is invalid to put an active child under a parent that is
    1187                 :            :                  * not active, has runtime PM enabled and the
    1188                 :            :                  * 'power.ignore_children' flag unset.
    1189                 :            :                  */
    1190         [ +  + ]:       8998 :                 if (!parent->power.disable_depth
    1191         [ +  + ]:       7786 :                     && !parent->power.ignore_children
    1192         [ -  + ]:       4150 :                     && parent->power.runtime_status != RPM_ACTIVE) {
    1193                 :          0 :                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
    1194                 :            :                                 dev_name(dev),
    1195                 :            :                                 dev_name(parent));
    1196                 :          0 :                         error = -EBUSY;
    1197         [ +  - ]:       8998 :                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
    1198                 :       8998 :                         atomic_inc(&parent->power.child_count);
    1199                 :            :                 }
    1200                 :            : 
    1201                 :            :                 spin_unlock(&parent->power.lock);
    1202                 :            : 
    1203         [ +  - ]:       8998 :                 if (error) {
    1204                 :            :                         status = RPM_SUSPENDED;
    1205                 :            :                         goto out;
    1206                 :            :                 }
    1207                 :            :         }
    1208                 :            : 
    1209                 :            :  out_set:
    1210                 :            :         __update_runtime_status(dev, status);
    1211         [ +  - ]:       9512 :         if (!error)
    1212                 :       9512 :                 dev->power.runtime_error = 0;
    1213                 :            : 
    1214                 :            :  out:
    1215                 :            :         spin_unlock_irq(&dev->power.lock);
    1216                 :            : 
    1217         [ +  + ]:       9512 :         if (notify_parent)
    1218                 :            :                 pm_request_idle(parent);
    1219                 :            : 
    1220         [ +  + ]:       9512 :         if (status == RPM_SUSPENDED) {
    1221                 :        514 :                 int idx = device_links_read_lock();
    1222                 :            : 
    1223                 :        514 :                 rpm_put_suppliers(dev);
    1224                 :            : 
    1225                 :        514 :                 device_links_read_unlock(idx);
    1226                 :            :         }
    1227                 :            : 
    1228                 :       9512 :         pm_runtime_enable(dev);
    1229                 :            : 
    1230                 :       9512 :         return error;
    1231                 :            : }
    1232                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
    1233                 :            : 
    1234                 :            : /**
    1235                 :            :  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
    1236                 :            :  * @dev: Device to handle.
    1237                 :            :  *
    1238                 :            :  * Flush all pending requests for the device from pm_wq and wait for all
    1239                 :            :  * runtime PM operations involving the device in progress to complete.
    1240                 :            :  *
    1241                 :            :  * Should be called under dev->power.lock with interrupts disabled.
    1242                 :            :  */
    1243                 :      23438 : static void __pm_runtime_barrier(struct device *dev)
    1244                 :            : {
    1245                 :            :         pm_runtime_deactivate_timer(dev);
    1246                 :            : 
    1247         [ -  + ]:      23438 :         if (dev->power.request_pending) {
    1248                 :          0 :                 dev->power.request = RPM_REQ_NONE;
    1249                 :            :                 spin_unlock_irq(&dev->power.lock);
    1250                 :            : 
    1251                 :          0 :                 cancel_work_sync(&dev->power.work);
    1252                 :            : 
    1253                 :            :                 spin_lock_irq(&dev->power.lock);
    1254                 :          0 :                 dev->power.request_pending = false;
    1255                 :            :         }
    1256                 :            : 
    1257         [ +  - ]:      46876 :         if (dev->power.runtime_status == RPM_SUSPENDING
    1258                 :      23438 :             || dev->power.runtime_status == RPM_RESUMING
    1259         [ -  + ]:      23438 :             || dev->power.idle_notification) {
    1260                 :          0 :                 DEFINE_WAIT(wait);
    1261                 :            : 
    1262                 :            :                 /* Suspend, wake-up or idle notification in progress. */
    1263                 :            :                 for (;;) {
    1264                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
    1265                 :            :                                         TASK_UNINTERRUPTIBLE);
    1266         [ #  # ]:          0 :                         if (dev->power.runtime_status != RPM_SUSPENDING
    1267                 :          0 :                             && dev->power.runtime_status != RPM_RESUMING
    1268         [ #  # ]:          0 :                             && !dev->power.idle_notification)
    1269                 :            :                                 break;
    1270                 :            :                         spin_unlock_irq(&dev->power.lock);
    1271                 :            : 
    1272                 :          0 :                         schedule();
    1273                 :            : 
    1274                 :            :                         spin_lock_irq(&dev->power.lock);
    1275                 :            :                 }
    1276                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
    1277                 :            :         }
    1278                 :      23438 : }
    1279                 :            : 
    1280                 :            : /**
    1281                 :            :  * pm_runtime_barrier - Flush pending requests and wait for completions.
    1282                 :            :  * @dev: Device to handle.
    1283                 :            :  *
    1284                 :            :  * Prevent the device from being suspended by incrementing its usage counter and
    1285                 :            :  * if there's a pending resume request for the device, wake the device up.
    1286                 :            :  * Next, make sure that all pending requests for the device have been flushed
    1287                 :            :  * from pm_wq and wait for all runtime PM operations involving the device in
    1288                 :            :  * progress to complete.
    1289                 :            :  *
    1290                 :            :  * Return value:
    1291                 :            :  * 1, if there was a resume request pending and the device had to be woken up,
    1292                 :            :  * 0, otherwise
    1293                 :            :  */
    1294                 :      22520 : int pm_runtime_barrier(struct device *dev)
    1295                 :            : {
    1296                 :            :         int retval = 0;
    1297                 :            : 
    1298                 :            :         pm_runtime_get_noresume(dev);
    1299                 :            :         spin_lock_irq(&dev->power.lock);
    1300                 :            : 
    1301         [ -  + ]:      22520 :         if (dev->power.request_pending
    1302         [ #  # ]:          0 :             && dev->power.request == RPM_REQ_RESUME) {
    1303                 :          0 :                 rpm_resume(dev, 0);
    1304                 :            :                 retval = 1;
    1305                 :            :         }
    1306                 :            : 
    1307                 :      22520 :         __pm_runtime_barrier(dev);
    1308                 :            : 
    1309                 :            :         spin_unlock_irq(&dev->power.lock);
    1310                 :            :         pm_runtime_put_noidle(dev);
    1311                 :            : 
    1312                 :      22520 :         return retval;
    1313                 :            : }
    1314                 :            : EXPORT_SYMBOL_GPL(pm_runtime_barrier);
    1315                 :            : 
    1316                 :            : /**
    1317                 :            :  * __pm_runtime_disable - Disable runtime PM of a device.
    1318                 :            :  * @dev: Device to handle.
    1319                 :            :  * @check_resume: If set, check if there's a resume request for the device.
    1320                 :            :  *
    1321                 :            :  * Increment power.disable_depth for the device and if it was zero previously,
    1322                 :            :  * cancel all pending runtime PM requests for the device and wait for all
    1323                 :            :  * operations in progress to complete.  The device can be either active or
    1324                 :            :  * suspended after its runtime PM has been disabled.
    1325                 :            :  *
    1326                 :            :  * If @check_resume is set and there's a resume request pending when
    1327                 :            :  * __pm_runtime_disable() is called and power.disable_depth is zero, the
    1328                 :            :  * function will wake up the device before disabling its runtime PM.
    1329                 :            :  */
    1330                 :       1324 : void __pm_runtime_disable(struct device *dev, bool check_resume)
    1331                 :            : {
    1332                 :            :         spin_lock_irq(&dev->power.lock);
    1333                 :            : 
    1334         [ +  + ]:       1324 :         if (dev->power.disable_depth > 0) {
    1335                 :        406 :                 dev->power.disable_depth++;
    1336                 :        406 :                 goto out;
    1337                 :            :         }
    1338                 :            : 
    1339                 :            :         /*
    1340                 :            :          * Wake up the device if there's a resume request pending, because that
    1341                 :            :          * means there probably is some I/O to process and disabling runtime PM
    1342                 :            :          * shouldn't prevent the device from processing the I/O.
    1343                 :            :          */
    1344   [ +  -  -  + ]:        918 :         if (check_resume && dev->power.request_pending
    1345         [ #  # ]:          0 :             && dev->power.request == RPM_REQ_RESUME) {
    1346                 :            :                 /*
    1347                 :            :                  * Prevent suspends and idle notifications from being carried
    1348                 :            :                  * out after we have woken up the device.
    1349                 :            :                  */
    1350                 :            :                 pm_runtime_get_noresume(dev);
    1351                 :            : 
    1352                 :          0 :                 rpm_resume(dev, 0);
    1353                 :            : 
    1354                 :            :                 pm_runtime_put_noidle(dev);
    1355                 :            :         }
    1356                 :            : 
    1357                 :            :         /* Update time accounting before disabling PM-runtime. */
    1358                 :        918 :         update_pm_runtime_accounting(dev);
    1359                 :            : 
    1360         [ +  - ]:        918 :         if (!dev->power.disable_depth++)
    1361                 :        918 :                 __pm_runtime_barrier(dev);
    1362                 :            : 
    1363                 :            :  out:
    1364                 :            :         spin_unlock_irq(&dev->power.lock);
    1365                 :       1324 : }
    1366                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_disable);
    1367                 :            : 
    1368                 :            : /**
    1369                 :            :  * pm_runtime_enable - Enable runtime PM of a device.
    1370                 :            :  * @dev: Device to handle.
    1371                 :            :  */
    1372                 :      19070 : void pm_runtime_enable(struct device *dev)
    1373                 :            : {
    1374                 :            :         unsigned long flags;
    1375                 :            : 
    1376                 :      19070 :         spin_lock_irqsave(&dev->power.lock, flags);
    1377                 :            : 
    1378         [ +  - ]:      19070 :         if (dev->power.disable_depth > 0) {
    1379                 :      19070 :                 dev->power.disable_depth--;
    1380                 :            : 
    1381                 :            :                 /* About to enable runtime pm, set accounting_timestamp to now */
    1382         [ +  + ]:      19070 :                 if (!dev->power.disable_depth)
    1383                 :       9558 :                         dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
    1384                 :            :         } else {
    1385                 :          0 :                 dev_warn(dev, "Unbalanced %s!\n", __func__);
    1386                 :            :         }
    1387                 :            : 
    1388   [ +  +  +  +  :      19226 :         WARN(!dev->power.disable_depth &&
          +  +  +  -  -  
                      + ]
    1389                 :            :              dev->power.runtime_status == RPM_SUSPENDED &&
    1390                 :            :              !dev->power.ignore_children &&
    1391                 :            :              atomic_read(&dev->power.child_count) > 0,
    1392                 :            :              "Enabling runtime PM for inactive device (%s) with active children\n",
    1393                 :            :              dev_name(dev));
    1394                 :            : 
    1395                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1396                 :      19070 : }
    1397                 :            : EXPORT_SYMBOL_GPL(pm_runtime_enable);
    1398                 :            : 
    1399                 :            : /**
    1400                 :            :  * pm_runtime_forbid - Block runtime PM of a device.
    1401                 :            :  * @dev: Device to handle.
    1402                 :            :  *
    1403                 :            :  * Increase the device's usage count and clear its power.runtime_auto flag,
    1404                 :            :  * so that it cannot be suspended at run time until pm_runtime_allow() is called
    1405                 :            :  * for it.
    1406                 :            :  */
    1407                 :       2020 : void pm_runtime_forbid(struct device *dev)
    1408                 :            : {
    1409                 :            :         spin_lock_irq(&dev->power.lock);
    1410         [ +  - ]:       2020 :         if (!dev->power.runtime_auto)
    1411                 :            :                 goto out;
    1412                 :            : 
    1413                 :       2020 :         dev->power.runtime_auto = false;
    1414                 :       2020 :         atomic_inc(&dev->power.usage_count);
    1415                 :       2020 :         rpm_resume(dev, 0);
    1416                 :            : 
    1417                 :            :  out:
    1418                 :            :         spin_unlock_irq(&dev->power.lock);
    1419                 :       2020 : }
    1420                 :            : EXPORT_SYMBOL_GPL(pm_runtime_forbid);
    1421                 :            : 
    1422                 :            : /**
    1423                 :            :  * pm_runtime_allow - Unblock runtime PM of a device.
    1424                 :            :  * @dev: Device to handle.
    1425                 :            :  *
    1426                 :            :  * Decrease the device's usage count and set its power.runtime_auto flag.
    1427                 :            :  */
    1428                 :        404 : void pm_runtime_allow(struct device *dev)
    1429                 :            : {
    1430                 :            :         spin_lock_irq(&dev->power.lock);
    1431         [ +  - ]:        404 :         if (dev->power.runtime_auto)
    1432                 :            :                 goto out;
    1433                 :            : 
    1434                 :        404 :         dev->power.runtime_auto = true;
    1435         [ -  + ]:        808 :         if (atomic_dec_and_test(&dev->power.usage_count))
    1436                 :          0 :                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
    1437                 :            : 
    1438                 :            :  out:
    1439                 :            :         spin_unlock_irq(&dev->power.lock);
    1440                 :        404 : }
    1441                 :            : EXPORT_SYMBOL_GPL(pm_runtime_allow);
    1442                 :            : 
    1443                 :            : /**
    1444                 :            :  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
    1445                 :            :  * @dev: Device to handle.
    1446                 :            :  *
    1447                 :            :  * Set the power.no_callbacks flag, which tells the PM core that this
    1448                 :            :  * device is power-managed through its parent and has no runtime PM
    1449                 :            :  * callbacks of its own.  The runtime sysfs attributes will be removed.
    1450                 :            :  */
    1451                 :       2828 : void pm_runtime_no_callbacks(struct device *dev)
    1452                 :            : {
    1453                 :            :         spin_lock_irq(&dev->power.lock);
    1454                 :       2828 :         dev->power.no_callbacks = 1;
    1455                 :            :         spin_unlock_irq(&dev->power.lock);
    1456         [ -  + ]:       2828 :         if (device_is_registered(dev))
    1457                 :          0 :                 rpm_sysfs_remove(dev);
    1458                 :       2828 : }
    1459                 :            : EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
    1460                 :            : 
    1461                 :            : /**
    1462                 :            :  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
    1463                 :            :  * @dev: Device to handle
    1464                 :            :  *
    1465                 :            :  * Set the power.irq_safe flag, which tells the PM core that the
    1466                 :            :  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
    1467                 :            :  * always be invoked with the spinlock held and interrupts disabled.  It also
    1468                 :            :  * causes the parent's usage counter to be permanently incremented, preventing
    1469                 :            :  * the parent from runtime suspending -- otherwise an irq-safe child might have
    1470                 :            :  * to wait for a non-irq-safe parent.
    1471                 :            :  */
    1472                 :          0 : void pm_runtime_irq_safe(struct device *dev)
    1473                 :            : {
    1474         [ #  # ]:          0 :         if (dev->parent)
    1475                 :            :                 pm_runtime_get_sync(dev->parent);
    1476                 :            :         spin_lock_irq(&dev->power.lock);
    1477                 :          0 :         dev->power.irq_safe = 1;
    1478                 :            :         spin_unlock_irq(&dev->power.lock);
    1479                 :          0 : }
    1480                 :            : EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
    1481                 :            : 
    1482                 :            : /**
    1483                 :            :  * update_autosuspend - Handle a change to a device's autosuspend settings.
    1484                 :            :  * @dev: Device to handle.
    1485                 :            :  * @old_delay: The former autosuspend_delay value.
    1486                 :            :  * @old_use: The former use_autosuspend value.
    1487                 :            :  *
    1488                 :            :  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
    1489                 :            :  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
    1490                 :            :  *
    1491                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
    1492                 :            :  */
    1493                 :       5656 : static void update_autosuspend(struct device *dev, int old_delay, int old_use)
    1494                 :            : {
    1495                 :       5656 :         int delay = dev->power.autosuspend_delay;
    1496                 :            : 
    1497                 :            :         /* Should runtime suspend be prevented now? */
    1498   [ +  +  -  + ]:       5656 :         if (dev->power.use_autosuspend && delay < 0) {
    1499                 :            : 
    1500                 :            :                 /* If it used to be allowed then prevent it. */
    1501         [ #  # ]:          0 :                 if (!old_use || old_delay >= 0) {
    1502                 :          0 :                         atomic_inc(&dev->power.usage_count);
    1503                 :          0 :                         rpm_resume(dev, 0);
    1504                 :            :                 }
    1505                 :            :         }
    1506                 :            : 
    1507                 :            :         /* Runtime suspend should be allowed now. */
    1508                 :            :         else {
    1509                 :            : 
    1510                 :            :                 /* If it used to be prevented then allow it. */
    1511         [ -  + ]:       5656 :                 if (old_use && old_delay < 0)
    1512                 :          0 :                         atomic_dec(&dev->power.usage_count);
    1513                 :            : 
    1514                 :            :                 /* Maybe we can autosuspend now. */
    1515                 :       5656 :                 rpm_idle(dev, RPM_AUTO);
    1516                 :            :         }
    1517                 :       5656 : }
    1518                 :            : 
    1519                 :            : /**
    1520                 :            :  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
    1521                 :            :  * @dev: Device to handle.
    1522                 :            :  * @delay: Value of the new delay in milliseconds.
    1523                 :            :  *
    1524                 :            :  * Set the device's power.autosuspend_delay value.  If it changes to negative
    1525                 :            :  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
    1526                 :            :  * changes the other way, allow runtime suspends.
    1527                 :            :  */
    1528                 :       3232 : void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
    1529                 :            : {
    1530                 :            :         int old_delay, old_use;
    1531                 :            : 
    1532                 :            :         spin_lock_irq(&dev->power.lock);
    1533                 :       3232 :         old_delay = dev->power.autosuspend_delay;
    1534                 :       3232 :         old_use = dev->power.use_autosuspend;
    1535                 :       3232 :         dev->power.autosuspend_delay = delay;
    1536                 :       3232 :         update_autosuspend(dev, old_delay, old_use);
    1537                 :            :         spin_unlock_irq(&dev->power.lock);
    1538                 :       3232 : }
    1539                 :            : EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
    1540                 :            : 
    1541                 :            : /**
    1542                 :            :  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
    1543                 :            :  * @dev: Device to handle.
    1544                 :            :  * @use: New value for use_autosuspend.
    1545                 :            :  *
    1546                 :            :  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
    1547                 :            :  * suspends as needed.
    1548                 :            :  */
    1549                 :       2424 : void __pm_runtime_use_autosuspend(struct device *dev, bool use)
    1550                 :            : {
    1551                 :            :         int old_delay, old_use;
    1552                 :            : 
    1553                 :            :         spin_lock_irq(&dev->power.lock);
    1554                 :       2424 :         old_delay = dev->power.autosuspend_delay;
    1555                 :       2424 :         old_use = dev->power.use_autosuspend;
    1556                 :       2424 :         dev->power.use_autosuspend = use;
    1557                 :       2424 :         update_autosuspend(dev, old_delay, old_use);
    1558                 :            :         spin_unlock_irq(&dev->power.lock);
    1559                 :       2424 : }
    1560                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
    1561                 :            : 
    1562                 :            : /**
    1563                 :            :  * pm_runtime_init - Initialize runtime PM fields in given device object.
    1564                 :            :  * @dev: Device object to initialize.
    1565                 :            :  */
    1566                 :     124030 : void pm_runtime_init(struct device *dev)
    1567                 :            : {
    1568                 :     124030 :         dev->power.runtime_status = RPM_SUSPENDED;
    1569                 :     124030 :         dev->power.idle_notification = false;
    1570                 :            : 
    1571                 :     124030 :         dev->power.disable_depth = 1;
    1572                 :            :         atomic_set(&dev->power.usage_count, 0);
    1573                 :            : 
    1574                 :     124030 :         dev->power.runtime_error = 0;
    1575                 :            : 
    1576                 :            :         atomic_set(&dev->power.child_count, 0);
    1577                 :            :         pm_suspend_ignore_children(dev, false);
    1578                 :     124030 :         dev->power.runtime_auto = true;
    1579                 :            : 
    1580                 :     124030 :         dev->power.request_pending = false;
    1581                 :     124030 :         dev->power.request = RPM_REQ_NONE;
    1582                 :     124030 :         dev->power.deferred_resume = false;
    1583                 :     248060 :         INIT_WORK(&dev->power.work, pm_runtime_work);
    1584                 :            : 
    1585                 :     124030 :         dev->power.timer_expires = 0;
    1586                 :     124030 :         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
    1587                 :     124030 :         dev->power.suspend_timer.function = pm_suspend_timer_fn;
    1588                 :            : 
    1589                 :     124030 :         init_waitqueue_head(&dev->power.wait_queue);
    1590                 :     124030 : }
    1591                 :            : 
    1592                 :            : /**
    1593                 :            :  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
    1594                 :            :  * @dev: Device object to re-initialize.
    1595                 :            :  */
    1596                 :       4556 : void pm_runtime_reinit(struct device *dev)
    1597                 :            : {
    1598         [ +  - ]:       4556 :         if (!pm_runtime_enabled(dev)) {
    1599         [ -  + ]:       4556 :                 if (dev->power.runtime_status == RPM_ACTIVE)
    1600                 :            :                         pm_runtime_set_suspended(dev);
    1601         [ -  + ]:       4556 :                 if (dev->power.irq_safe) {
    1602                 :            :                         spin_lock_irq(&dev->power.lock);
    1603                 :          0 :                         dev->power.irq_safe = 0;
    1604                 :            :                         spin_unlock_irq(&dev->power.lock);
    1605         [ #  # ]:          0 :                         if (dev->parent)
    1606                 :            :                                 pm_runtime_put(dev->parent);
    1607                 :            :                 }
    1608                 :            :         }
    1609                 :       4556 : }
    1610                 :            : 
    1611                 :            : /**
    1612                 :            :  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
    1613                 :            :  * @dev: Device object being removed from device hierarchy.
    1614                 :            :  */
    1615                 :        406 : void pm_runtime_remove(struct device *dev)
    1616                 :            : {
    1617                 :        406 :         __pm_runtime_disable(dev, false);
    1618                 :        406 :         pm_runtime_reinit(dev);
    1619                 :        406 : }
    1620                 :            : 
    1621                 :            : /**
    1622                 :            :  * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
    1623                 :            :  * @dev: Device whose driver is going to be removed.
    1624                 :            :  *
    1625                 :            :  * Check links from this device to any consumers and if any of them have active
    1626                 :            :  * runtime PM references to the device, drop the usage counter of the device
    1627                 :            :  * (as many times as needed).
    1628                 :            :  *
    1629                 :            :  * Links with the DL_FLAG_MANAGED flag unset are ignored.
    1630                 :            :  *
    1631                 :            :  * Since the device is guaranteed to be runtime-active at the point this is
    1632                 :            :  * called, nothing else needs to be done here.
    1633                 :            :  *
    1634                 :            :  * Moreover, this is called after device_links_busy() has returned 'false', so
    1635                 :            :  * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
    1636                 :            :  * therefore rpm_active can't be manipulated concurrently.
    1637                 :            :  */
    1638                 :          0 : void pm_runtime_clean_up_links(struct device *dev)
    1639                 :            : {
    1640                 :            :         struct device_link *link;
    1641                 :            :         int idx;
    1642                 :            : 
    1643                 :          0 :         idx = device_links_read_lock();
    1644                 :            : 
    1645         [ #  # ]:          0 :         list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
    1646                 :            :                                 device_links_read_lock_held()) {
    1647         [ #  # ]:          0 :                 if (!(link->flags & DL_FLAG_MANAGED))
    1648                 :          0 :                         continue;
    1649                 :            : 
    1650         [ #  # ]:          0 :                 while (refcount_dec_not_one(&link->rpm_active))
    1651                 :            :                         pm_runtime_put_noidle(dev);
    1652                 :            :         }
    1653                 :            : 
    1654                 :          0 :         device_links_read_unlock(idx);
    1655                 :          0 : }
    1656                 :            : 
    1657                 :            : /**
    1658                 :            :  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
    1659                 :            :  * @dev: Consumer device.
    1660                 :            :  */
    1661                 :      19098 : void pm_runtime_get_suppliers(struct device *dev)
    1662                 :            : {
    1663                 :            :         struct device_link *link;
    1664                 :            :         int idx;
    1665                 :            : 
    1666                 :      19098 :         idx = device_links_read_lock();
    1667                 :            : 
    1668         [ -  + ]:      19098 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1669                 :            :                                 device_links_read_lock_held())
    1670         [ #  # ]:          0 :                 if (link->flags & DL_FLAG_PM_RUNTIME) {
    1671                 :          0 :                         link->supplier_preactivated = true;
    1672                 :          0 :                         refcount_inc(&link->rpm_active);
    1673                 :          0 :                         pm_runtime_get_sync(link->supplier);
    1674                 :            :                 }
    1675                 :            : 
    1676                 :      19098 :         device_links_read_unlock(idx);
    1677                 :      19098 : }
    1678                 :            : 
    1679                 :            : /**
    1680                 :            :  * pm_runtime_put_suppliers - Drop references to supplier devices.
    1681                 :            :  * @dev: Consumer device.
    1682                 :            :  */
    1683                 :      19098 : void pm_runtime_put_suppliers(struct device *dev)
    1684                 :            : {
    1685                 :            :         struct device_link *link;
    1686                 :            :         int idx;
    1687                 :            : 
    1688                 :      19098 :         idx = device_links_read_lock();
    1689                 :            : 
    1690         [ -  + ]:      19098 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1691                 :            :                                 device_links_read_lock_held())
    1692         [ #  # ]:          0 :                 if (link->supplier_preactivated) {
    1693                 :          0 :                         link->supplier_preactivated = false;
    1694         [ #  # ]:          0 :                         if (refcount_dec_not_one(&link->rpm_active))
    1695                 :          0 :                                 pm_runtime_put(link->supplier);
    1696                 :            :                 }
    1697                 :            : 
    1698                 :      19098 :         device_links_read_unlock(idx);
    1699                 :      19098 : }
    1700                 :            : 
    1701                 :          0 : void pm_runtime_new_link(struct device *dev)
    1702                 :            : {
    1703                 :            :         spin_lock_irq(&dev->power.lock);
    1704                 :          0 :         dev->power.links_count++;
    1705                 :            :         spin_unlock_irq(&dev->power.lock);
    1706                 :          0 : }
    1707                 :            : 
    1708                 :          0 : void pm_runtime_drop_link(struct device *dev)
    1709                 :            : {
    1710                 :            :         spin_lock_irq(&dev->power.lock);
    1711         [ #  # ]:          0 :         WARN_ON(dev->power.links_count == 0);
    1712                 :          0 :         dev->power.links_count--;
    1713                 :            :         spin_unlock_irq(&dev->power.lock);
    1714                 :          0 : }
    1715                 :            : 
    1716                 :            : static bool pm_runtime_need_not_resume(struct device *dev)
    1717                 :            : {
    1718   [ #  #  #  #  :          0 :         return atomic_read(&dev->power.usage_count) <= 1 &&
             #  #  #  # ]
    1719   [ #  #  #  # ]:          0 :                 (atomic_read(&dev->power.child_count) == 0 ||
    1720                 :          0 :                  dev->power.ignore_children);
    1721                 :            : }
    1722                 :            : 
    1723                 :            : /**
    1724                 :            :  * pm_runtime_force_suspend - Force a device into suspend state if needed.
    1725                 :            :  * @dev: Device to suspend.
    1726                 :            :  *
    1727                 :            :  * Disable runtime PM so we safely can check the device's runtime PM status and
    1728                 :            :  * if it is active, invoke its ->runtime_suspend callback to suspend it and
    1729                 :            :  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
    1730                 :            :  * usage and children counters don't indicate that the device was in use before
    1731                 :            :  * the system-wide transition under way, decrement its parent's children counter
    1732                 :            :  * (if there is a parent).  Keep runtime PM disabled to preserve the state
    1733                 :            :  * unless we encounter errors.
    1734                 :            :  *
    1735                 :            :  * Typically this function may be invoked from a system suspend callback to make
    1736                 :            :  * sure the device is put into low power state and it should only be used during
    1737                 :            :  * system-wide PM transitions to sleep states.  It assumes that the analogous
    1738                 :            :  * pm_runtime_force_resume() will be used to resume the device.
    1739                 :            :  */
    1740                 :          0 : int pm_runtime_force_suspend(struct device *dev)
    1741                 :            : {
    1742                 :            :         int (*callback)(struct device *);
    1743                 :            :         int ret;
    1744                 :            : 
    1745                 :            :         pm_runtime_disable(dev);
    1746         [ #  # ]:          0 :         if (pm_runtime_status_suspended(dev))
    1747                 :            :                 return 0;
    1748                 :            : 
    1749                 :          0 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
    1750                 :            : 
    1751         [ #  # ]:          0 :         ret = callback ? callback(dev) : 0;
    1752         [ #  # ]:          0 :         if (ret)
    1753                 :            :                 goto err;
    1754                 :            : 
    1755                 :            :         /*
    1756                 :            :          * If the device can stay in suspend after the system-wide transition
    1757                 :            :          * to the working state that will follow, drop the children counter of
    1758                 :            :          * its parent, but set its status to RPM_SUSPENDED anyway in case this
    1759                 :            :          * function will be called again for it in the meantime.
    1760                 :            :          */
    1761         [ #  # ]:          0 :         if (pm_runtime_need_not_resume(dev))
    1762                 :            :                 pm_runtime_set_suspended(dev);
    1763                 :            :         else
    1764                 :            :                 __update_runtime_status(dev, RPM_SUSPENDED);
    1765                 :            : 
    1766                 :            :         return 0;
    1767                 :            : 
    1768                 :            : err:
    1769                 :          0 :         pm_runtime_enable(dev);
    1770                 :          0 :         return ret;
    1771                 :            : }
    1772                 :            : EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
    1773                 :            : 
    1774                 :            : /**
    1775                 :            :  * pm_runtime_force_resume - Force a device into resume state if needed.
    1776                 :            :  * @dev: Device to resume.
    1777                 :            :  *
    1778                 :            :  * Prior invoking this function we expect the user to have brought the device
    1779                 :            :  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
    1780                 :            :  * those actions and bring the device into full power, if it is expected to be
    1781                 :            :  * used on system resume.  In the other case, we defer the resume to be managed
    1782                 :            :  * via runtime PM.
    1783                 :            :  *
    1784                 :            :  * Typically this function may be invoked from a system resume callback.
    1785                 :            :  */
    1786                 :          0 : int pm_runtime_force_resume(struct device *dev)
    1787                 :            : {
    1788                 :            :         int (*callback)(struct device *);
    1789                 :            :         int ret = 0;
    1790                 :            : 
    1791   [ #  #  #  # ]:          0 :         if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
    1792                 :            :                 goto out;
    1793                 :            : 
    1794                 :            :         /*
    1795                 :            :          * The value of the parent's children counter is correct already, so
    1796                 :            :          * just update the status of the device.
    1797                 :            :          */
    1798                 :            :         __update_runtime_status(dev, RPM_ACTIVE);
    1799                 :            : 
    1800                 :          0 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
    1801                 :            : 
    1802         [ #  # ]:          0 :         ret = callback ? callback(dev) : 0;
    1803         [ #  # ]:          0 :         if (ret) {
    1804                 :            :                 pm_runtime_set_suspended(dev);
    1805                 :            :                 goto out;
    1806                 :            :         }
    1807                 :            : 
    1808                 :            :         pm_runtime_mark_last_busy(dev);
    1809                 :            : out:
    1810                 :          0 :         pm_runtime_enable(dev);
    1811                 :          0 :         return ret;
    1812                 :            : }
    1813                 :            : EXPORT_SYMBOL_GPL(pm_runtime_force_resume);

Generated by: LCOV version 1.14