LCOV - code coverage report
Current view: top level - drivers/base/power - runtime.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 353 743 47.5 %
Date: 2022-03-28 13:20:08 Functions: 25 46 54.3 %
Branches: 151 457 33.0 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0
       2                 :            : /*
       3                 :            :  * drivers/base/power/runtime.c - Helper functions for device runtime PM
       4                 :            :  *
       5                 :            :  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
       6                 :            :  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
       7                 :            :  */
       8                 :            : #include <linux/sched/mm.h>
       9                 :            : #include <linux/ktime.h>
      10                 :            : #include <linux/hrtimer.h>
      11                 :            : #include <linux/export.h>
      12                 :            : #include <linux/pm_runtime.h>
      13                 :            : #include <linux/pm_wakeirq.h>
      14                 :            : #include <trace/events/rpm.h>
      15                 :            : 
      16                 :            : #include "../base.h"
      17                 :            : #include "power.h"
      18                 :            : 
      19                 :            : typedef int (*pm_callback_t)(struct device *);
      20                 :            : 
      21                 :        180 : static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
      22                 :            : {
      23                 :        180 :         pm_callback_t cb;
      24                 :        180 :         const struct dev_pm_ops *ops;
      25                 :            : 
      26         [ -  + ]:        180 :         if (dev->pm_domain)
      27                 :          0 :                 ops = &dev->pm_domain->ops;
      28   [ +  -  +  - ]:        180 :         else if (dev->type && dev->type->pm)
      29                 :            :                 ops = dev->type->pm;
      30   [ -  +  -  - ]:        180 :         else if (dev->class && dev->class->pm)
      31                 :            :                 ops = dev->class->pm;
      32   [ +  -  +  - ]:        180 :         else if (dev->bus && dev->bus->pm)
      33                 :            :                 ops = dev->bus->pm;
      34                 :            :         else
      35                 :            :                 ops = NULL;
      36                 :            : 
      37         [ +  - ]:        180 :         if (ops)
      38                 :        180 :                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
      39                 :            :         else
      40                 :            :                 cb = NULL;
      41                 :            : 
      42   [ -  +  -  -  :        180 :         if (!cb && dev->driver && dev->driver->pm)
                   -  - ]
      43                 :          0 :                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
      44                 :            : 
      45                 :        180 :         return cb;
      46                 :            : }
      47                 :            : 
      48                 :            : #define RPM_GET_CALLBACK(dev, callback) \
      49                 :            :                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
      50                 :            : 
      51                 :            : static int rpm_resume(struct device *dev, int rpmflags);
      52                 :            : static int rpm_suspend(struct device *dev, int rpmflags);
      53                 :            : 
      54                 :            : /**
      55                 :            :  * update_pm_runtime_accounting - Update the time accounting of power states
      56                 :            :  * @dev: Device to update the accounting for
      57                 :            :  *
      58                 :            :  * In order to be able to have time accounting of the various power states
      59                 :            :  * (as used by programs such as PowerTOP to show the effectiveness of runtime
      60                 :            :  * PM), we need to track the time spent in each state.
      61                 :            :  * update_pm_runtime_accounting must be called each time before the
      62                 :            :  * runtime_status field is updated, to account the time in the old state
      63                 :            :  * correctly.
      64                 :            :  */
      65                 :        750 : static void update_pm_runtime_accounting(struct device *dev)
      66                 :            : {
      67                 :        750 :         u64 now, last, delta;
      68                 :            : 
      69         [ +  + ]:        750 :         if (dev->power.disable_depth > 0)
      70                 :            :                 return;
      71                 :            : 
      72                 :        240 :         last = dev->power.accounting_timestamp;
      73                 :            : 
      74                 :        240 :         now = ktime_get_mono_fast_ns();
      75                 :        240 :         dev->power.accounting_timestamp = now;
      76                 :            : 
      77                 :            :         /*
      78                 :            :          * Because ktime_get_mono_fast_ns() is not monotonic during
      79                 :            :          * timekeeping updates, ensure that 'now' is after the last saved
      80                 :            :          * timesptamp.
      81                 :            :          */
      82         [ +  - ]:        240 :         if (now < last)
      83                 :            :                 return;
      84                 :            : 
      85                 :        240 :         delta = now - last;
      86                 :            : 
      87         [ +  + ]:        240 :         if (dev->power.runtime_status == RPM_SUSPENDED)
      88                 :         60 :                 dev->power.suspended_time += delta;
      89                 :            :         else
      90                 :        180 :                 dev->power.active_time += delta;
      91                 :            : }
      92                 :            : 
      93                 :        750 : static void __update_runtime_status(struct device *dev, enum rpm_status status)
      94                 :            : {
      95                 :        750 :         update_pm_runtime_accounting(dev);
      96                 :        750 :         dev->power.runtime_status = status;
      97                 :          0 : }
      98                 :            : 
      99                 :          0 : static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
     100                 :            : {
     101                 :          0 :         u64 time;
     102                 :          0 :         unsigned long flags;
     103                 :            : 
     104                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
     105                 :            : 
     106                 :          0 :         update_pm_runtime_accounting(dev);
     107         [ #  # ]:          0 :         time = suspended ? dev->power.suspended_time : dev->power.active_time;
     108                 :            : 
     109                 :          0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
     110                 :            : 
     111                 :          0 :         return time;
     112                 :            : }
     113                 :            : 
     114                 :          0 : u64 pm_runtime_active_time(struct device *dev)
     115                 :            : {
     116                 :          0 :         return rpm_get_accounted_time(dev, false);
     117                 :            : }
     118                 :            : 
     119                 :          0 : u64 pm_runtime_suspended_time(struct device *dev)
     120                 :            : {
     121                 :          0 :         return rpm_get_accounted_time(dev, true);
     122                 :            : }
     123                 :            : EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
     124                 :            : 
     125                 :            : /**
     126                 :            :  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
     127                 :            :  * @dev: Device to handle.
     128                 :            :  */
     129                 :       2472 : static void pm_runtime_deactivate_timer(struct device *dev)
     130                 :            : {
     131                 :       2472 :         if (dev->power.timer_expires > 0) {
     132                 :          0 :                 hrtimer_try_to_cancel(&dev->power.suspend_timer);
     133                 :          0 :                 dev->power.timer_expires = 0;
     134                 :            :         }
     135                 :            : }
     136                 :            : 
     137                 :            : /**
     138                 :            :  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
     139                 :            :  * @dev: Device to handle.
     140                 :            :  */
     141                 :         60 : static void pm_runtime_cancel_pending(struct device *dev)
     142                 :            : {
     143                 :         60 :         pm_runtime_deactivate_timer(dev);
     144                 :            :         /*
     145                 :            :          * In case there's a request pending, make sure its work function will
     146                 :            :          * return without doing anything.
     147                 :            :          */
     148                 :         60 :         dev->power.request = RPM_REQ_NONE;
     149                 :          0 : }
     150                 :            : 
     151                 :            : /*
     152                 :            :  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
     153                 :            :  * @dev: Device to handle.
     154                 :            :  *
     155                 :            :  * Compute the autosuspend-delay expiration time based on the device's
     156                 :            :  * power.last_busy time.  If the delay has already expired or is disabled
     157                 :            :  * (negative) or the power.use_autosuspend flag isn't set, return 0.
     158                 :            :  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
     159                 :            :  *
     160                 :            :  * This function may be called either with or without dev->power.lock held.
     161                 :            :  * Either way it can be racy, since power.last_busy may be updated at any time.
     162                 :            :  */
     163                 :         60 : u64 pm_runtime_autosuspend_expiration(struct device *dev)
     164                 :            : {
     165                 :         60 :         int autosuspend_delay;
     166                 :         60 :         u64 expires;
     167                 :            : 
     168         [ -  + ]:         60 :         if (!dev->power.use_autosuspend)
     169                 :            :                 return 0;
     170                 :            : 
     171         [ #  # ]:          0 :         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
     172         [ #  # ]:          0 :         if (autosuspend_delay < 0)
     173                 :            :                 return 0;
     174                 :            : 
     175                 :          0 :         expires  = READ_ONCE(dev->power.last_busy);
     176                 :          0 :         expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
     177         [ #  # ]:          0 :         if (expires > ktime_get_mono_fast_ns())
     178                 :          0 :                 return expires; /* Expires in the future */
     179                 :            : 
     180                 :            :         return 0;
     181                 :            : }
     182                 :            : EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
     183                 :            : 
     184                 :          0 : static int dev_memalloc_noio(struct device *dev, void *data)
     185                 :            : {
     186                 :          0 :         return dev->power.memalloc_noio;
     187                 :            : }
     188                 :            : 
     189                 :            : /*
     190                 :            :  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
     191                 :            :  * @dev: Device to handle.
     192                 :            :  * @enable: True for setting the flag and False for clearing the flag.
     193                 :            :  *
     194                 :            :  * Set the flag for all devices in the path from the device to the
     195                 :            :  * root device in the device tree if @enable is true, otherwise clear
     196                 :            :  * the flag for devices in the path whose siblings don't set the flag.
     197                 :            :  *
     198                 :            :  * The function should only be called by block device, or network
     199                 :            :  * device driver for solving the deadlock problem during runtime
     200                 :            :  * resume/suspend:
     201                 :            :  *
     202                 :            :  *     If memory allocation with GFP_KERNEL is called inside runtime
     203                 :            :  *     resume/suspend callback of any one of its ancestors(or the
     204                 :            :  *     block device itself), the deadlock may be triggered inside the
     205                 :            :  *     memory allocation since it might not complete until the block
     206                 :            :  *     device becomes active and the involed page I/O finishes. The
     207                 :            :  *     situation is pointed out first by Alan Stern. Network device
     208                 :            :  *     are involved in iSCSI kind of situation.
     209                 :            :  *
     210                 :            :  * The lock of dev_hotplug_mutex is held in the function for handling
     211                 :            :  * hotplug race because pm_runtime_set_memalloc_noio() may be called
     212                 :            :  * in async probe().
     213                 :            :  *
     214                 :            :  * The function should be called between device_add() and device_del()
     215                 :            :  * on the affected device(block/network device).
     216                 :            :  */
     217                 :        426 : void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
     218                 :            : {
     219                 :        426 :         static DEFINE_MUTEX(dev_hotplug_mutex);
     220                 :            : 
     221                 :        426 :         mutex_lock(&dev_hotplug_mutex);
     222                 :        858 :         for (;;) {
     223                 :        858 :                 bool enabled;
     224                 :            : 
     225                 :            :                 /* hold power lock since bitfield is not SMP-safe. */
     226                 :        858 :                 spin_lock_irq(&dev->power.lock);
     227                 :        858 :                 enabled = dev->power.memalloc_noio;
     228                 :        858 :                 dev->power.memalloc_noio = enable;
     229                 :        858 :                 spin_unlock_irq(&dev->power.lock);
     230                 :            : 
     231                 :            :                 /*
     232                 :            :                  * not need to enable ancestors any more if the device
     233                 :            :                  * has been enabled.
     234                 :            :                  */
     235         [ +  + ]:        858 :                 if (enabled && enable)
     236                 :            :                         break;
     237                 :            : 
     238                 :        792 :                 dev = dev->parent;
     239                 :            : 
     240                 :            :                 /*
     241                 :            :                  * clear flag of the parent device only if all the
     242                 :            :                  * children don't set the flag because ancestor's
     243                 :            :                  * flag was set by any one of the descendants.
     244                 :            :                  */
     245   [ +  +  +  -  :        792 :                 if (!dev || (!enable &&
                   -  - ]
     246                 :          0 :                              device_for_each_child(dev, NULL,
     247                 :            :                                                    dev_memalloc_noio)))
     248                 :            :                         break;
     249                 :            :         }
     250                 :        426 :         mutex_unlock(&dev_hotplug_mutex);
     251                 :        426 : }
     252                 :            : EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
     253                 :            : 
     254                 :            : /**
     255                 :            :  * rpm_check_suspend_allowed - Test whether a device may be suspended.
     256                 :            :  * @dev: Device to test.
     257                 :            :  */
     258                 :      10203 : static int rpm_check_suspend_allowed(struct device *dev)
     259                 :            : {
     260                 :      10203 :         int retval = 0;
     261                 :            : 
     262         [ +  - ]:      10203 :         if (dev->power.runtime_error)
     263                 :            :                 retval = -EINVAL;
     264         [ +  + ]:      10203 :         else if (dev->power.disable_depth > 0)
     265                 :            :                 retval = -EACCES;
     266         [ +  + ]:       1413 :         else if (atomic_read(&dev->power.usage_count) > 0)
     267                 :            :                 retval = -EAGAIN;
     268   [ +  -  +  + ]:        666 :         else if (!dev->power.ignore_children &&
     269                 :        333 :                         atomic_read(&dev->power.child_count))
     270                 :            :                 retval = -EBUSY;
     271                 :            : 
     272                 :            :         /* Pending resume requests take precedence over suspends. */
     273         [ -  + ]:        120 :         else if ((dev->power.deferred_resume
     274         [ #  # ]:          0 :                         && dev->power.runtime_status == RPM_SUSPENDING)
     275         [ -  + ]:        120 :             || (dev->power.request_pending
     276         [ #  # ]:          0 :                         && dev->power.request == RPM_REQ_RESUME))
     277                 :            :                 retval = -EAGAIN;
     278         [ +  - ]:        120 :         else if (__dev_pm_qos_resume_latency(dev) == 0)
     279                 :            :                 retval = -EPERM;
     280         [ -  + ]:        120 :         else if (dev->power.runtime_status == RPM_SUSPENDED)
     281                 :          0 :                 retval = 1;
     282                 :            : 
     283                 :      10203 :         return retval;
     284                 :            : }
     285                 :            : 
     286                 :        510 : static int rpm_get_suppliers(struct device *dev)
     287                 :            : {
     288                 :        510 :         struct device_link *link;
     289                 :            : 
     290         [ -  + ]:        510 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     291                 :            :                                 device_links_read_lock_held()) {
     292                 :          0 :                 int retval;
     293                 :            : 
     294   [ #  #  #  # ]:          0 :                 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
     295         [ #  # ]:          0 :                     READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
     296                 :          0 :                         continue;
     297                 :            : 
     298                 :          0 :                 retval = pm_runtime_get_sync(link->supplier);
     299                 :            :                 /* Ignore suppliers with disabled runtime PM. */
     300         [ #  # ]:          0 :                 if (retval < 0 && retval != -EACCES) {
     301                 :          0 :                         pm_runtime_put_noidle(link->supplier);
     302                 :          0 :                         return retval;
     303                 :            :                 }
     304                 :          0 :                 refcount_inc(&link->rpm_active);
     305                 :            :         }
     306                 :            :         return 0;
     307                 :            : }
     308                 :            : 
     309                 :          0 : static void rpm_put_suppliers(struct device *dev)
     310                 :            : {
     311                 :          0 :         struct device_link *link;
     312                 :            : 
     313         [ #  # ]:          0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     314                 :            :                                 device_links_read_lock_held()) {
     315         [ #  # ]:          0 :                 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
     316                 :          0 :                         continue;
     317                 :            : 
     318         [ #  # ]:          0 :                 while (refcount_dec_not_one(&link->rpm_active))
     319                 :          0 :                         pm_runtime_put(link->supplier);
     320                 :            :         }
     321                 :          0 : }
     322                 :            : 
     323                 :            : /**
     324                 :            :  * __rpm_callback - Run a given runtime PM callback for a given device.
     325                 :            :  * @cb: Runtime PM callback to run.
     326                 :            :  * @dev: Device to run the callback for.
     327                 :            :  */
     328                 :        180 : static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
     329                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     330                 :            : {
     331                 :        180 :         int retval, idx;
     332                 :        180 :         bool use_links = dev->power.links_count > 0;
     333                 :            : 
     334         [ -  + ]:        180 :         if (dev->power.irq_safe) {
     335                 :          0 :                 spin_unlock(&dev->power.lock);
     336                 :            :         } else {
     337                 :        180 :                 spin_unlock_irq(&dev->power.lock);
     338                 :            : 
     339                 :            :                 /*
     340                 :            :                  * Resume suppliers if necessary.
     341                 :            :                  *
     342                 :            :                  * The device's runtime PM status cannot change until this
     343                 :            :                  * routine returns, so it is safe to read the status outside of
     344                 :            :                  * the lock.
     345                 :            :                  */
     346   [ -  +  -  - ]:        180 :                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
     347                 :          0 :                         idx = device_links_read_lock();
     348                 :            : 
     349                 :          0 :                         retval = rpm_get_suppliers(dev);
     350         [ #  # ]:          0 :                         if (retval)
     351                 :          0 :                                 goto fail;
     352                 :            : 
     353                 :          0 :                         device_links_read_unlock(idx);
     354                 :            :                 }
     355                 :            :         }
     356                 :            : 
     357                 :        180 :         retval = cb(dev);
     358                 :            : 
     359         [ -  + ]:        180 :         if (dev->power.irq_safe) {
     360                 :          0 :                 spin_lock(&dev->power.lock);
     361                 :            :         } else {
     362                 :            :                 /*
     363                 :            :                  * If the device is suspending and the callback has returned
     364                 :            :                  * success, drop the usage counters of the suppliers that have
     365                 :            :                  * been reference counted on its resume.
     366                 :            :                  *
     367                 :            :                  * Do that if resume fails too.
     368                 :            :                  */
     369         [ -  + ]:        180 :                 if (use_links
     370   [ #  #  #  # ]:          0 :                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
     371   [ #  #  #  # ]:          0 :                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
     372                 :          0 :                         idx = device_links_read_lock();
     373                 :            : 
     374                 :          0 :  fail:
     375                 :          0 :                         rpm_put_suppliers(dev);
     376                 :            : 
     377                 :          0 :                         device_links_read_unlock(idx);
     378                 :            :                 }
     379                 :            : 
     380                 :        180 :                 spin_lock_irq(&dev->power.lock);
     381                 :            :         }
     382                 :            : 
     383                 :        180 :         return retval;
     384                 :            : }
     385                 :            : 
     386                 :            : /**
     387                 :            :  * rpm_idle - Notify device bus type if the device can be suspended.
     388                 :            :  * @dev: Device to notify the bus type about.
     389                 :            :  * @rpmflags: Flag bits.
     390                 :            :  *
     391                 :            :  * Check if the device's runtime PM status allows it to be suspended.  If
     392                 :            :  * another idle notification has been started earlier, return immediately.  If
     393                 :            :  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
     394                 :            :  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
     395                 :            :  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
     396                 :            :  *
     397                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     398                 :            :  */
     399                 :      10143 : static int rpm_idle(struct device *dev, int rpmflags)
     400                 :            : {
     401                 :      10143 :         int (*callback)(struct device *);
     402                 :      10143 :         int retval;
     403                 :            : 
     404                 :      10143 :         trace_rpm_idle_rcuidle(dev, rpmflags);
     405                 :      10143 :         retval = rpm_check_suspend_allowed(dev);
     406         [ +  + ]:      10143 :         if (retval < 0)
     407                 :            :                 ;       /* Conditions are wrong. */
     408                 :            : 
     409                 :            :         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
     410         [ +  - ]:         60 :         else if (dev->power.runtime_status != RPM_ACTIVE)
     411                 :            :                 retval = -EAGAIN;
     412                 :            : 
     413                 :            :         /*
     414                 :            :          * Any pending request other than an idle notification takes
     415                 :            :          * precedence over us, except that the timer may be running.
     416                 :            :          */
     417         [ -  + ]:         60 :         else if (dev->power.request_pending &&
     418         [ #  # ]:          0 :             dev->power.request > RPM_REQ_IDLE)
     419                 :            :                 retval = -EAGAIN;
     420                 :            : 
     421                 :            :         /* Act as though RPM_NOWAIT is always set. */
     422         [ +  - ]:         60 :         else if (dev->power.idle_notification)
     423                 :            :                 retval = -EINPROGRESS;
     424         [ +  + ]:      10143 :         if (retval)
     425                 :      10083 :                 goto out;
     426                 :            : 
     427                 :            :         /* Pending requests need to be canceled. */
     428                 :         60 :         dev->power.request = RPM_REQ_NONE;
     429                 :            : 
     430         [ -  + ]:         60 :         if (dev->power.no_callbacks)
     431                 :          0 :                 goto out;
     432                 :            : 
     433                 :            :         /* Carry out an asynchronous or a synchronous idle notification. */
     434         [ -  + ]:         60 :         if (rpmflags & RPM_ASYNC) {
     435                 :          0 :                 dev->power.request = RPM_REQ_IDLE;
     436         [ #  # ]:          0 :                 if (!dev->power.request_pending) {
     437                 :          0 :                         dev->power.request_pending = true;
     438                 :          0 :                         queue_work(pm_wq, &dev->power.work);
     439                 :            :                 }
     440                 :          0 :                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
     441                 :          0 :                 return 0;
     442                 :            :         }
     443                 :            : 
     444                 :         60 :         dev->power.idle_notification = true;
     445                 :            : 
     446                 :         60 :         callback = RPM_GET_CALLBACK(dev, runtime_idle);
     447                 :            : 
     448         [ +  - ]:         60 :         if (callback)
     449                 :         60 :                 retval = __rpm_callback(callback, dev);
     450                 :            : 
     451                 :         60 :         dev->power.idle_notification = false;
     452                 :         60 :         wake_up_all(&dev->power.wait_queue);
     453                 :            : 
     454                 :      10143 :  out:
     455                 :      10143 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     456         [ +  + ]:      10143 :         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
     457                 :            : }
     458                 :            : 
     459                 :            : /**
     460                 :            :  * rpm_callback - Run a given runtime PM callback for a given device.
     461                 :            :  * @cb: Runtime PM callback to run.
     462                 :            :  * @dev: Device to run the callback for.
     463                 :            :  */
     464                 :        120 : static int rpm_callback(int (*cb)(struct device *), struct device *dev)
     465                 :            : {
     466                 :        120 :         int retval;
     467                 :            : 
     468         [ +  - ]:        120 :         if (!cb)
     469                 :            :                 return -ENOSYS;
     470                 :            : 
     471         [ -  + ]:        120 :         if (dev->power.memalloc_noio) {
     472                 :          0 :                 unsigned int noio_flag;
     473                 :            : 
     474                 :            :                 /*
     475                 :            :                  * Deadlock might be caused if memory allocation with
     476                 :            :                  * GFP_KERNEL happens inside runtime_suspend and
     477                 :            :                  * runtime_resume callbacks of one block device's
     478                 :            :                  * ancestor or the block device itself. Network
     479                 :            :                  * device might be thought as part of iSCSI block
     480                 :            :                  * device, so network device and its ancestor should
     481                 :            :                  * be marked as memalloc_noio too.
     482                 :            :                  */
     483                 :          0 :                 noio_flag = memalloc_noio_save();
     484                 :          0 :                 retval = __rpm_callback(cb, dev);
     485                 :          0 :                 memalloc_noio_restore(noio_flag);
     486                 :            :         } else {
     487                 :        120 :                 retval = __rpm_callback(cb, dev);
     488                 :            :         }
     489                 :            : 
     490                 :        120 :         dev->power.runtime_error = retval;
     491         [ -  + ]:        120 :         return retval != -EACCES ? retval : -EIO;
     492                 :            : }
     493                 :            : 
     494                 :            : /**
     495                 :            :  * rpm_suspend - Carry out runtime suspend of given device.
     496                 :            :  * @dev: Device to suspend.
     497                 :            :  * @rpmflags: Flag bits.
     498                 :            :  *
     499                 :            :  * Check if the device's runtime PM status allows it to be suspended.
     500                 :            :  * Cancel a pending idle notification, autosuspend or suspend. If
     501                 :            :  * another suspend has been started earlier, either return immediately
     502                 :            :  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
     503                 :            :  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
     504                 :            :  * otherwise run the ->runtime_suspend() callback directly. When
     505                 :            :  * ->runtime_suspend succeeded, if a deferred resume was requested while
     506                 :            :  * the callback was running then carry it out, otherwise send an idle
     507                 :            :  * notification for its parent (if the suspend succeeded and both
     508                 :            :  * ignore_children of parent->power and irq_safe of dev->power are not set).
     509                 :            :  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
     510                 :            :  * flag is set and the next autosuspend-delay expiration time is in the
     511                 :            :  * future, schedule another autosuspend attempt.
     512                 :            :  *
     513                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     514                 :            :  */
     515                 :         60 : static int rpm_suspend(struct device *dev, int rpmflags)
     516                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     517                 :            : {
     518                 :         60 :         int (*callback)(struct device *);
     519                 :         60 :         struct device *parent = NULL;
     520                 :         60 :         int retval;
     521                 :            : 
     522                 :         60 :         trace_rpm_suspend_rcuidle(dev, rpmflags);
     523                 :            : 
     524                 :            :  repeat:
     525                 :         60 :         retval = rpm_check_suspend_allowed(dev);
     526                 :            : 
     527         [ +  - ]:         60 :         if (retval < 0)
     528                 :            :                 ;       /* Conditions are wrong. */
     529                 :            : 
     530                 :            :         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
     531         [ -  + ]:         60 :         else if (dev->power.runtime_status == RPM_RESUMING &&
     532         [ #  # ]:          0 :             !(rpmflags & RPM_ASYNC))
     533                 :            :                 retval = -EAGAIN;
     534         [ -  + ]:         60 :         if (retval)
     535                 :          0 :                 goto out;
     536                 :            : 
     537                 :            :         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
     538         [ +  - ]:         60 :         if ((rpmflags & RPM_AUTO)
     539         [ +  - ]:         60 :             && dev->power.runtime_status != RPM_SUSPENDING) {
     540                 :         60 :                 u64 expires = pm_runtime_autosuspend_expiration(dev);
     541                 :            : 
     542         [ -  + ]:         60 :                 if (expires != 0) {
     543                 :            :                         /* Pending requests need to be canceled. */
     544                 :          0 :                         dev->power.request = RPM_REQ_NONE;
     545                 :            : 
     546                 :            :                         /*
     547                 :            :                          * Optimization: If the timer is already running and is
     548                 :            :                          * set to expire at or before the autosuspend delay,
     549                 :            :                          * avoid the overhead of resetting it.  Just let it
     550                 :            :                          * expire; pm_suspend_timer_fn() will take care of the
     551                 :            :                          * rest.
     552                 :            :                          */
     553   [ #  #  #  # ]:          0 :                         if (!(dev->power.timer_expires &&
     554                 :            :                                         dev->power.timer_expires <= expires)) {
     555                 :            :                                 /*
     556                 :            :                                  * We add a slack of 25% to gather wakeups
     557                 :            :                                  * without sacrificing the granularity.
     558                 :            :                                  */
     559                 :          0 :                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
     560                 :            :                                                     (NSEC_PER_MSEC >> 2);
     561                 :            : 
     562                 :          0 :                                 dev->power.timer_expires = expires;
     563                 :          0 :                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
     564                 :            :                                                 ns_to_ktime(expires),
     565                 :            :                                                 slack,
     566                 :            :                                                 HRTIMER_MODE_ABS);
     567                 :            :                         }
     568                 :          0 :                         dev->power.timer_autosuspends = 1;
     569                 :          0 :                         goto out;
     570                 :            :                 }
     571                 :            :         }
     572                 :            : 
     573                 :            :         /* Other scheduled or pending requests need to be canceled. */
     574         [ -  + ]:         60 :         pm_runtime_cancel_pending(dev);
     575                 :            : 
     576         [ -  + ]:         60 :         if (dev->power.runtime_status == RPM_SUSPENDING) {
     577         [ #  # ]:          0 :                 DEFINE_WAIT(wait);
     578                 :            : 
     579         [ #  # ]:          0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     580                 :          0 :                         retval = -EINPROGRESS;
     581                 :          0 :                         goto out;
     582                 :            :                 }
     583                 :            : 
     584         [ #  # ]:          0 :                 if (dev->power.irq_safe) {
     585                 :          0 :                         spin_unlock(&dev->power.lock);
     586                 :            : 
     587                 :          0 :                         cpu_relax();
     588                 :            : 
     589                 :          0 :                         spin_lock(&dev->power.lock);
     590                 :          0 :                         goto repeat;
     591                 :            :                 }
     592                 :            : 
     593                 :            :                 /* Wait for the other suspend running in parallel with us. */
     594                 :          0 :                 for (;;) {
     595                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     596                 :            :                                         TASK_UNINTERRUPTIBLE);
     597         [ #  # ]:          0 :                         if (dev->power.runtime_status != RPM_SUSPENDING)
     598                 :            :                                 break;
     599                 :            : 
     600                 :          0 :                         spin_unlock_irq(&dev->power.lock);
     601                 :            : 
     602                 :          0 :                         schedule();
     603                 :            : 
     604                 :          0 :                         spin_lock_irq(&dev->power.lock);
     605                 :            :                 }
     606                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
     607                 :          0 :                 goto repeat;
     608                 :            :         }
     609                 :            : 
     610         [ -  + ]:         60 :         if (dev->power.no_callbacks)
     611                 :          0 :                 goto no_callback;       /* Assume success. */
     612                 :            : 
     613                 :            :         /* Carry out an asynchronous or a synchronous suspend. */
     614         [ -  + ]:         60 :         if (rpmflags & RPM_ASYNC) {
     615                 :          0 :                 dev->power.request = (rpmflags & RPM_AUTO) ?
     616         [ #  # ]:          0 :                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
     617         [ #  # ]:          0 :                 if (!dev->power.request_pending) {
     618                 :          0 :                         dev->power.request_pending = true;
     619                 :          0 :                         queue_work(pm_wq, &dev->power.work);
     620                 :            :                 }
     621                 :          0 :                 goto out;
     622                 :            :         }
     623                 :            : 
     624                 :         60 :         __update_runtime_status(dev, RPM_SUSPENDING);
     625                 :            : 
     626                 :         60 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
     627                 :            : 
     628                 :         60 :         dev_pm_enable_wake_irq_check(dev, true);
     629                 :         60 :         retval = rpm_callback(callback, dev);
     630         [ -  + ]:         60 :         if (retval)
     631                 :          0 :                 goto fail;
     632                 :            : 
     633                 :         60 :  no_callback:
     634                 :         60 :         __update_runtime_status(dev, RPM_SUSPENDED);
     635         [ -  + ]:         60 :         pm_runtime_deactivate_timer(dev);
     636                 :            : 
     637         [ +  - ]:         60 :         if (dev->parent) {
     638                 :         60 :                 parent = dev->parent;
     639                 :         60 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
     640                 :            :         }
     641                 :         60 :         wake_up_all(&dev->power.wait_queue);
     642                 :            : 
     643         [ -  + ]:         60 :         if (dev->power.deferred_resume) {
     644                 :          0 :                 dev->power.deferred_resume = false;
     645                 :          0 :                 rpm_resume(dev, 0);
     646                 :          0 :                 retval = -EAGAIN;
     647                 :          0 :                 goto out;
     648                 :            :         }
     649                 :            : 
     650                 :            :         /* Maybe the parent is now able to suspend. */
     651   [ -  +  -  +  :         60 :         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
                   -  + ]
     652                 :         60 :                 spin_unlock(&dev->power.lock);
     653                 :            : 
     654                 :         60 :                 spin_lock(&parent->power.lock);
     655                 :         60 :                 rpm_idle(parent, RPM_ASYNC);
     656                 :         60 :                 spin_unlock(&parent->power.lock);
     657                 :            : 
     658                 :         60 :                 spin_lock(&dev->power.lock);
     659                 :            :         }
     660                 :            : 
     661                 :          0 :  out:
     662                 :         60 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     663                 :            : 
     664                 :         60 :         return retval;
     665                 :            : 
     666                 :            :  fail:
     667                 :          0 :         dev_pm_disable_wake_irq_check(dev);
     668                 :          0 :         __update_runtime_status(dev, RPM_ACTIVE);
     669                 :          0 :         dev->power.deferred_resume = false;
     670                 :          0 :         wake_up_all(&dev->power.wait_queue);
     671                 :            : 
     672         [ #  # ]:          0 :         if (retval == -EAGAIN || retval == -EBUSY) {
     673                 :          0 :                 dev->power.runtime_error = 0;
     674                 :            : 
     675                 :            :                 /*
     676                 :            :                  * If the callback routine failed an autosuspend, and
     677                 :            :                  * if the last_busy time has been updated so that there
     678                 :            :                  * is a new autosuspend expiration time, automatically
     679                 :            :                  * reschedule another autosuspend.
     680                 :            :                  */
     681   [ #  #  #  # ]:          0 :                 if ((rpmflags & RPM_AUTO) &&
     682                 :          0 :                     pm_runtime_autosuspend_expiration(dev) != 0)
     683                 :          0 :                         goto repeat;
     684                 :            :         } else {
     685         [ #  # ]:          0 :                 pm_runtime_cancel_pending(dev);
     686                 :            :         }
     687                 :          0 :         goto out;
     688                 :            : }
     689                 :            : 
     690                 :            : /**
     691                 :            :  * rpm_resume - Carry out runtime resume of given device.
     692                 :            :  * @dev: Device to resume.
     693                 :            :  * @rpmflags: Flag bits.
     694                 :            :  *
     695                 :            :  * Check if the device's runtime PM status allows it to be resumed.  Cancel
     696                 :            :  * any scheduled or pending requests.  If another resume has been started
     697                 :            :  * earlier, either return immediately or wait for it to finish, depending on the
     698                 :            :  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
     699                 :            :  * parallel with this function, either tell the other process to resume after
     700                 :            :  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
     701                 :            :  * flag is set then queue a resume request; otherwise run the
     702                 :            :  * ->runtime_resume() callback directly.  Queue an idle notification for the
     703                 :            :  * device if the resume succeeded.
     704                 :            :  *
     705                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     706                 :            :  */
     707                 :       6312 : static int rpm_resume(struct device *dev, int rpmflags)
     708                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     709                 :            : {
     710                 :       6312 :         int (*callback)(struct device *);
     711                 :       6312 :         struct device *parent = NULL;
     712                 :       6312 :         int retval = 0;
     713                 :            : 
     714                 :       6312 :         trace_rpm_resume_rcuidle(dev, rpmflags);
     715                 :            : 
     716                 :            :  repeat:
     717         [ +  - ]:       6372 :         if (dev->power.runtime_error)
     718                 :            :                 retval = -EINVAL;
     719   [ +  +  -  + ]:       6372 :         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
     720         [ #  # ]:          0 :             && dev->power.runtime_status == RPM_ACTIVE)
     721                 :            :                 retval = 1;
     722         [ +  + ]:       6372 :         else if (dev->power.disable_depth > 0)
     723                 :            :                 retval = -EACCES;
     724         [ -  + ]:       1650 :         if (retval)
     725                 :       4722 :                 goto out;
     726                 :            : 
     727                 :            :         /*
     728                 :            :          * Other scheduled or pending requests need to be canceled.  Small
     729                 :            :          * optimization: If an autosuspend timer is running, leave it running
     730                 :            :          * rather than cancelling it now only to restart it again in the near
     731                 :            :          * future.
     732                 :            :          */
     733                 :       1650 :         dev->power.request = RPM_REQ_NONE;
     734         [ +  - ]:       1650 :         if (!dev->power.timer_autosuspends)
     735         [ -  + ]:       1650 :                 pm_runtime_deactivate_timer(dev);
     736                 :            : 
     737         [ +  + ]:       1650 :         if (dev->power.runtime_status == RPM_ACTIVE) {
     738                 :       1530 :                 retval = 1;
     739                 :       1530 :                 goto out;
     740                 :            :         }
     741                 :            : 
     742                 :        120 :         if (dev->power.runtime_status == RPM_RESUMING
     743         [ -  + ]:        120 :             || dev->power.runtime_status == RPM_SUSPENDING) {
     744         [ #  # ]:          0 :                 DEFINE_WAIT(wait);
     745                 :            : 
     746         [ #  # ]:          0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     747         [ #  # ]:          0 :                         if (dev->power.runtime_status == RPM_SUSPENDING)
     748                 :          0 :                                 dev->power.deferred_resume = true;
     749                 :            :                         else
     750                 :            :                                 retval = -EINPROGRESS;
     751                 :          0 :                         goto out;
     752                 :            :                 }
     753                 :            : 
     754         [ #  # ]:          0 :                 if (dev->power.irq_safe) {
     755                 :          0 :                         spin_unlock(&dev->power.lock);
     756                 :            : 
     757                 :          0 :                         cpu_relax();
     758                 :            : 
     759                 :          0 :                         spin_lock(&dev->power.lock);
     760                 :          0 :                         goto repeat;
     761                 :            :                 }
     762                 :            : 
     763                 :            :                 /* Wait for the operation carried out in parallel with us. */
     764                 :          0 :                 for (;;) {
     765                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     766                 :            :                                         TASK_UNINTERRUPTIBLE);
     767                 :          0 :                         if (dev->power.runtime_status != RPM_RESUMING
     768         [ #  # ]:          0 :                             && dev->power.runtime_status != RPM_SUSPENDING)
     769                 :            :                                 break;
     770                 :            : 
     771                 :          0 :                         spin_unlock_irq(&dev->power.lock);
     772                 :            : 
     773                 :          0 :                         schedule();
     774                 :            : 
     775                 :          0 :                         spin_lock_irq(&dev->power.lock);
     776                 :            :                 }
     777                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
     778                 :          0 :                 goto repeat;
     779                 :            :         }
     780                 :            : 
     781                 :            :         /*
     782                 :            :          * See if we can skip waking up the parent.  This is safe only if
     783                 :            :          * power.no_callbacks is set, because otherwise we don't know whether
     784                 :            :          * the resume will actually succeed.
     785                 :            :          */
     786   [ -  +  -  -  :        120 :         if (dev->power.no_callbacks && !parent && dev->parent) {
                   -  - ]
     787                 :          0 :                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
     788                 :          0 :                 if (dev->parent->power.disable_depth > 0
     789         [ #  # ]:          0 :                     || dev->parent->power.ignore_children
     790         [ #  # ]:          0 :                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
     791                 :          0 :                         atomic_inc(&dev->parent->power.child_count);
     792                 :          0 :                         spin_unlock(&dev->parent->power.lock);
     793                 :          0 :                         retval = 1;
     794                 :          0 :                         goto no_callback;       /* Assume success. */
     795                 :            :                 }
     796                 :          0 :                 spin_unlock(&dev->parent->power.lock);
     797                 :            :         }
     798                 :            : 
     799                 :            :         /* Carry out an asynchronous or a synchronous resume. */
     800         [ -  + ]:        120 :         if (rpmflags & RPM_ASYNC) {
     801                 :          0 :                 dev->power.request = RPM_REQ_RESUME;
     802         [ #  # ]:          0 :                 if (!dev->power.request_pending) {
     803                 :          0 :                         dev->power.request_pending = true;
     804                 :          0 :                         queue_work(pm_wq, &dev->power.work);
     805                 :            :                 }
     806                 :          0 :                 retval = 0;
     807                 :          0 :                 goto out;
     808                 :            :         }
     809                 :            : 
     810   [ +  +  -  + ]:        120 :         if (!parent && dev->parent) {
     811                 :            :                 /*
     812                 :            :                  * Increment the parent's usage counter and resume it if
     813                 :            :                  * necessary.  Not needed if dev is irq-safe; then the
     814                 :            :                  * parent is permanently resumed.
     815                 :            :                  */
     816                 :         60 :                 parent = dev->parent;
     817         [ -  + ]:         60 :                 if (dev->power.irq_safe)
     818                 :          0 :                         goto skip_parent;
     819                 :         60 :                 spin_unlock(&dev->power.lock);
     820                 :            : 
     821                 :         60 :                 pm_runtime_get_noresume(parent);
     822                 :            : 
     823                 :         60 :                 spin_lock(&parent->power.lock);
     824                 :            :                 /*
     825                 :            :                  * Resume the parent if it has runtime PM enabled and not been
     826                 :            :                  * set to ignore its children.
     827                 :            :                  */
     828         [ +  - ]:         60 :                 if (!parent->power.disable_depth
     829         [ +  - ]:         60 :                     && !parent->power.ignore_children) {
     830                 :         60 :                         rpm_resume(parent, 0);
     831         [ -  + ]:         60 :                         if (parent->power.runtime_status != RPM_ACTIVE)
     832                 :          0 :                                 retval = -EBUSY;
     833                 :            :                 }
     834                 :         60 :                 spin_unlock(&parent->power.lock);
     835                 :            : 
     836                 :         60 :                 spin_lock(&dev->power.lock);
     837         [ -  + ]:         60 :                 if (retval)
     838                 :          0 :                         goto out;
     839                 :         60 :                 goto repeat;
     840                 :            :         }
     841                 :         60 :  skip_parent:
     842                 :            : 
     843         [ -  + ]:         60 :         if (dev->power.no_callbacks)
     844                 :          0 :                 goto no_callback;       /* Assume success. */
     845                 :            : 
     846                 :         60 :         __update_runtime_status(dev, RPM_RESUMING);
     847                 :            : 
     848                 :         60 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
     849                 :            : 
     850                 :         60 :         dev_pm_disable_wake_irq_check(dev);
     851                 :         60 :         retval = rpm_callback(callback, dev);
     852         [ -  + ]:         60 :         if (retval) {
     853                 :          0 :                 __update_runtime_status(dev, RPM_SUSPENDED);
     854         [ #  # ]:          0 :                 pm_runtime_cancel_pending(dev);
     855                 :          0 :                 dev_pm_enable_wake_irq_check(dev, false);
     856                 :            :         } else {
     857                 :         60 :  no_callback:
     858                 :         60 :                 __update_runtime_status(dev, RPM_ACTIVE);
     859                 :         60 :                 pm_runtime_mark_last_busy(dev);
     860         [ +  - ]:         60 :                 if (parent)
     861                 :         60 :                         atomic_inc(&parent->power.child_count);
     862                 :            :         }
     863                 :         60 :         wake_up_all(&dev->power.wait_queue);
     864                 :            : 
     865         [ -  + ]:         60 :         if (retval >= 0)
     866                 :         60 :                 rpm_idle(dev, RPM_ASYNC);
     867                 :            : 
     868                 :          0 :  out:
     869   [ +  +  +  - ]:       6312 :         if (parent && !dev->power.irq_safe) {
     870                 :         60 :                 spin_unlock_irq(&dev->power.lock);
     871                 :            : 
     872                 :         60 :                 pm_runtime_put(parent);
     873                 :            : 
     874                 :         60 :                 spin_lock_irq(&dev->power.lock);
     875                 :            :         }
     876                 :            : 
     877                 :       6312 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     878                 :            : 
     879                 :       6312 :         return retval;
     880                 :            : }
     881                 :            : 
     882                 :            : /**
     883                 :            :  * pm_runtime_work - Universal runtime PM work function.
     884                 :            :  * @work: Work structure used for scheduling the execution of this function.
     885                 :            :  *
     886                 :            :  * Use @work to get the device object the work is to be done for, determine what
     887                 :            :  * is to be done and execute the appropriate runtime PM function.
     888                 :            :  */
     889                 :          0 : static void pm_runtime_work(struct work_struct *work)
     890                 :            : {
     891                 :          0 :         struct device *dev = container_of(work, struct device, power.work);
     892                 :          0 :         enum rpm_request req;
     893                 :            : 
     894                 :          0 :         spin_lock_irq(&dev->power.lock);
     895                 :            : 
     896         [ #  # ]:          0 :         if (!dev->power.request_pending)
     897                 :          0 :                 goto out;
     898                 :            : 
     899                 :          0 :         req = dev->power.request;
     900                 :          0 :         dev->power.request = RPM_REQ_NONE;
     901                 :          0 :         dev->power.request_pending = false;
     902                 :            : 
     903   [ #  #  #  #  :          0 :         switch (req) {
                      # ]
     904                 :            :         case RPM_REQ_NONE:
     905                 :            :                 break;
     906                 :          0 :         case RPM_REQ_IDLE:
     907                 :          0 :                 rpm_idle(dev, RPM_NOWAIT);
     908                 :          0 :                 break;
     909                 :          0 :         case RPM_REQ_SUSPEND:
     910                 :          0 :                 rpm_suspend(dev, RPM_NOWAIT);
     911                 :          0 :                 break;
     912                 :          0 :         case RPM_REQ_AUTOSUSPEND:
     913                 :          0 :                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
     914                 :          0 :                 break;
     915                 :          0 :         case RPM_REQ_RESUME:
     916                 :          0 :                 rpm_resume(dev, RPM_NOWAIT);
     917                 :          0 :                 break;
     918                 :            :         }
     919                 :            : 
     920                 :          0 :  out:
     921                 :          0 :         spin_unlock_irq(&dev->power.lock);
     922                 :          0 : }
     923                 :            : 
     924                 :            : /**
     925                 :            :  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
     926                 :            :  * @data: Device pointer passed by pm_schedule_suspend().
     927                 :            :  *
     928                 :            :  * Check if the time is right and queue a suspend request.
     929                 :            :  */
     930                 :          0 : static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
     931                 :            : {
     932                 :          0 :         struct device *dev = container_of(timer, struct device, power.suspend_timer);
     933                 :          0 :         unsigned long flags;
     934                 :          0 :         u64 expires;
     935                 :            : 
     936                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
     937                 :            : 
     938                 :          0 :         expires = dev->power.timer_expires;
     939                 :            :         /*
     940                 :            :          * If 'expires' is after the current time, we've been called
     941                 :            :          * too early.
     942                 :            :          */
     943   [ #  #  #  # ]:          0 :         if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
     944                 :          0 :                 dev->power.timer_expires = 0;
     945         [ #  # ]:          0 :                 rpm_suspend(dev, dev->power.timer_autosuspends ?
     946                 :            :                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
     947                 :            :         }
     948                 :            : 
     949                 :          0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
     950                 :            : 
     951                 :          0 :         return HRTIMER_NORESTART;
     952                 :            : }
     953                 :            : 
     954                 :            : /**
     955                 :            :  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
     956                 :            :  * @dev: Device to suspend.
     957                 :            :  * @delay: Time to wait before submitting a suspend request, in milliseconds.
     958                 :            :  */
     959                 :          0 : int pm_schedule_suspend(struct device *dev, unsigned int delay)
     960                 :            : {
     961                 :          0 :         unsigned long flags;
     962                 :          0 :         u64 expires;
     963                 :          0 :         int retval;
     964                 :            : 
     965                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
     966                 :            : 
     967         [ #  # ]:          0 :         if (!delay) {
     968                 :          0 :                 retval = rpm_suspend(dev, RPM_ASYNC);
     969                 :          0 :                 goto out;
     970                 :            :         }
     971                 :            : 
     972                 :          0 :         retval = rpm_check_suspend_allowed(dev);
     973         [ #  # ]:          0 :         if (retval)
     974                 :          0 :                 goto out;
     975                 :            : 
     976                 :            :         /* Other scheduled or pending requests need to be canceled. */
     977         [ #  # ]:          0 :         pm_runtime_cancel_pending(dev);
     978                 :            : 
     979                 :          0 :         expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
     980                 :          0 :         dev->power.timer_expires = expires;
     981                 :          0 :         dev->power.timer_autosuspends = 0;
     982                 :          0 :         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
     983                 :            : 
     984                 :          0 :  out:
     985                 :          0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
     986                 :            : 
     987                 :          0 :         return retval;
     988                 :            : }
     989                 :            : EXPORT_SYMBOL_GPL(pm_schedule_suspend);
     990                 :            : 
     991                 :            : /**
     992                 :            :  * __pm_runtime_idle - Entry point for runtime idle operations.
     993                 :            :  * @dev: Device to send idle notification for.
     994                 :            :  * @rpmflags: Flag bits.
     995                 :            :  *
     996                 :            :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
     997                 :            :  * return immediately if it is larger than zero.  Then carry out an idle
     998                 :            :  * notification, either synchronous or asynchronous.
     999                 :            :  *
    1000                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1001                 :            :  * or if pm_runtime_irq_safe() has been called.
    1002                 :            :  */
    1003                 :      11238 : int __pm_runtime_idle(struct device *dev, int rpmflags)
    1004                 :            : {
    1005                 :      11238 :         unsigned long flags;
    1006                 :      11238 :         int retval;
    1007                 :            : 
    1008         [ +  + ]:      11238 :         if (rpmflags & RPM_GET_PUT) {
    1009         [ +  + ]:       5868 :                 if (!atomic_dec_and_test(&dev->power.usage_count)) {
    1010                 :       1305 :                         trace_rpm_usage_rcuidle(dev, rpmflags);
    1011                 :       1305 :                         return 0;
    1012                 :            :                 }
    1013                 :            :         }
    1014                 :            : 
    1015   [ +  +  +  - ]:       9933 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1016                 :            : 
    1017                 :       9933 :         spin_lock_irqsave(&dev->power.lock, flags);
    1018                 :       9933 :         retval = rpm_idle(dev, rpmflags);
    1019                 :       9933 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1020                 :            : 
    1021                 :       9933 :         return retval;
    1022                 :            : }
    1023                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_idle);
    1024                 :            : 
    1025                 :            : /**
    1026                 :            :  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
    1027                 :            :  * @dev: Device to suspend.
    1028                 :            :  * @rpmflags: Flag bits.
    1029                 :            :  *
    1030                 :            :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
    1031                 :            :  * return immediately if it is larger than zero.  Then carry out a suspend,
    1032                 :            :  * either synchronous or asynchronous.
    1033                 :            :  *
    1034                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1035                 :            :  * or if pm_runtime_irq_safe() has been called.
    1036                 :            :  */
    1037                 :          0 : int __pm_runtime_suspend(struct device *dev, int rpmflags)
    1038                 :            : {
    1039                 :          0 :         unsigned long flags;
    1040                 :          0 :         int retval;
    1041                 :            : 
    1042         [ #  # ]:          0 :         if (rpmflags & RPM_GET_PUT) {
    1043         [ #  # ]:          0 :                 if (!atomic_dec_and_test(&dev->power.usage_count)) {
    1044                 :          0 :                         trace_rpm_usage_rcuidle(dev, rpmflags);
    1045                 :          0 :                         return 0;
    1046                 :            :                 }
    1047                 :            :         }
    1048                 :            : 
    1049   [ #  #  #  # ]:          0 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1050                 :            : 
    1051                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1052                 :          0 :         retval = rpm_suspend(dev, rpmflags);
    1053                 :          0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1054                 :            : 
    1055                 :          0 :         return retval;
    1056                 :            : }
    1057                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
    1058                 :            : 
    1059                 :            : /**
    1060                 :            :  * __pm_runtime_resume - Entry point for runtime resume operations.
    1061                 :            :  * @dev: Device to resume.
    1062                 :            :  * @rpmflags: Flag bits.
    1063                 :            :  *
    1064                 :            :  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
    1065                 :            :  * carry out a resume, either synchronous or asynchronous.
    1066                 :            :  *
    1067                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1068                 :            :  * or if pm_runtime_irq_safe() has been called.
    1069                 :            :  */
    1070                 :       5802 : int __pm_runtime_resume(struct device *dev, int rpmflags)
    1071                 :            : {
    1072                 :       5802 :         unsigned long flags;
    1073                 :       5802 :         int retval;
    1074                 :            : 
    1075   [ +  -  +  -  :       5802 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
                   +  + ]
    1076                 :            :                         dev->power.runtime_status != RPM_ACTIVE);
    1077                 :            : 
    1078         [ +  - ]:       5802 :         if (rpmflags & RPM_GET_PUT)
    1079                 :       5802 :                 atomic_inc(&dev->power.usage_count);
    1080                 :            : 
    1081                 :       5802 :         spin_lock_irqsave(&dev->power.lock, flags);
    1082                 :       5802 :         retval = rpm_resume(dev, rpmflags);
    1083                 :       5802 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1084                 :            : 
    1085                 :       5802 :         return retval;
    1086                 :            : }
    1087                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_resume);
    1088                 :            : 
    1089                 :            : /**
    1090                 :            :  * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
    1091                 :            :  * @dev: Device to handle.
    1092                 :            :  *
    1093                 :            :  * Return -EINVAL if runtime PM is disabled for the device.
    1094                 :            :  *
    1095                 :            :  * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
    1096                 :            :  * and the runtime PM usage counter is nonzero, increment the counter and
    1097                 :            :  * return 1.  Otherwise return 0 without changing the counter.
    1098                 :            :  */
    1099                 :          0 : int pm_runtime_get_if_in_use(struct device *dev)
    1100                 :            : {
    1101                 :          0 :         unsigned long flags;
    1102                 :          0 :         int retval;
    1103                 :            : 
    1104                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1105         [ #  # ]:          0 :         retval = dev->power.disable_depth > 0 ? -EINVAL :
    1106                 :          0 :                 dev->power.runtime_status == RPM_ACTIVE
    1107   [ #  #  #  # ]:          0 :                         && atomic_inc_not_zero(&dev->power.usage_count);
    1108                 :          0 :         trace_rpm_usage_rcuidle(dev, 0);
    1109                 :          0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1110                 :          0 :         return retval;
    1111                 :            : }
    1112                 :            : EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
    1113                 :            : 
    1114                 :            : /**
    1115                 :            :  * __pm_runtime_set_status - Set runtime PM status of a device.
    1116                 :            :  * @dev: Device to handle.
    1117                 :            :  * @status: New runtime PM status of the device.
    1118                 :            :  *
    1119                 :            :  * If runtime PM of the device is disabled or its power.runtime_error field is
    1120                 :            :  * different from zero, the status may be changed either to RPM_ACTIVE, or to
    1121                 :            :  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
    1122                 :            :  * However, if the device has a parent and the parent is not active, and the
    1123                 :            :  * parent's power.ignore_children flag is unset, the device's status cannot be
    1124                 :            :  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
    1125                 :            :  *
    1126                 :            :  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
    1127                 :            :  * and the device parent's counter of unsuspended children is modified to
    1128                 :            :  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
    1129                 :            :  * notification request for the parent is submitted.
    1130                 :            :  *
    1131                 :            :  * If @dev has any suppliers (as reflected by device links to them), and @status
    1132                 :            :  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
    1133                 :            :  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
    1134                 :            :  * of the @status value) and the suppliers will be deacticated on exit.  The
    1135                 :            :  * error returned by the failing supplier activation will be returned in that
    1136                 :            :  * case.
    1137                 :            :  */
    1138                 :        510 : int __pm_runtime_set_status(struct device *dev, unsigned int status)
    1139                 :            : {
    1140                 :        510 :         struct device *parent = dev->parent;
    1141                 :        510 :         bool notify_parent = false;
    1142                 :        510 :         int error = 0;
    1143                 :            : 
    1144         [ +  - ]:        510 :         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
    1145                 :            :                 return -EINVAL;
    1146                 :            : 
    1147                 :        510 :         spin_lock_irq(&dev->power.lock);
    1148                 :            : 
    1149                 :            :         /*
    1150                 :            :          * Prevent PM-runtime from being enabled for the device or return an
    1151                 :            :          * error if it is enabled already and working.
    1152                 :            :          */
    1153   [ +  -  +  - ]:        510 :         if (dev->power.runtime_error || dev->power.disable_depth)
    1154                 :        510 :                 dev->power.disable_depth++;
    1155                 :            :         else
    1156                 :            :                 error = -EAGAIN;
    1157                 :            : 
    1158                 :        510 :         spin_unlock_irq(&dev->power.lock);
    1159                 :            : 
    1160         [ +  - ]:        510 :         if (error)
    1161                 :            :                 return error;
    1162                 :            : 
    1163                 :            :         /*
    1164                 :            :          * If the new status is RPM_ACTIVE, the suppliers can be activated
    1165                 :            :          * upfront regardless of the current status, because next time
    1166                 :            :          * rpm_put_suppliers() runs, the rpm_active refcounts of the links
    1167                 :            :          * involved will be dropped down to one anyway.
    1168                 :            :          */
    1169         [ +  - ]:        510 :         if (status == RPM_ACTIVE) {
    1170                 :        510 :                 int idx = device_links_read_lock();
    1171                 :            : 
    1172                 :        510 :                 error = rpm_get_suppliers(dev);
    1173         [ -  + ]:        510 :                 if (error)
    1174                 :          0 :                         status = RPM_SUSPENDED;
    1175                 :            : 
    1176                 :        510 :                 device_links_read_unlock(idx);
    1177                 :            :         }
    1178                 :            : 
    1179                 :        510 :         spin_lock_irq(&dev->power.lock);
    1180                 :            : 
    1181   [ +  -  -  + ]:        510 :         if (dev->power.runtime_status == status || !parent)
    1182                 :          0 :                 goto out_set;
    1183                 :            : 
    1184         [ -  + ]:        510 :         if (status == RPM_SUSPENDED) {
    1185                 :          0 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
    1186                 :          0 :                 notify_parent = !parent->power.ignore_children;
    1187                 :            :         } else {
    1188                 :        510 :                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
    1189                 :            : 
    1190                 :            :                 /*
    1191                 :            :                  * It is invalid to put an active child under a parent that is
    1192                 :            :                  * not active, has runtime PM enabled and the
    1193                 :            :                  * 'power.ignore_children' flag unset.
    1194                 :            :                  */
    1195         [ +  + ]:        510 :                 if (!parent->power.disable_depth
    1196         [ +  - ]:        300 :                     && !parent->power.ignore_children
    1197         [ -  + ]:        300 :                     && parent->power.runtime_status != RPM_ACTIVE) {
    1198         [ #  # ]:          0 :                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
    1199                 :            :                                 dev_name(dev),
    1200                 :            :                                 dev_name(parent));
    1201                 :          0 :                         error = -EBUSY;
    1202         [ +  - ]:        510 :                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
    1203                 :        510 :                         atomic_inc(&parent->power.child_count);
    1204                 :            :                 }
    1205                 :            : 
    1206                 :        510 :                 spin_unlock(&parent->power.lock);
    1207                 :            : 
    1208         [ -  + ]:        510 :                 if (error) {
    1209                 :          0 :                         status = RPM_SUSPENDED;
    1210                 :          0 :                         goto out;
    1211                 :            :                 }
    1212                 :            :         }
    1213                 :            : 
    1214                 :        510 :  out_set:
    1215                 :        510 :         __update_runtime_status(dev, status);
    1216         [ -  + ]:        510 :         if (!error)
    1217                 :        510 :                 dev->power.runtime_error = 0;
    1218                 :            : 
    1219                 :          0 :  out:
    1220                 :        510 :         spin_unlock_irq(&dev->power.lock);
    1221                 :            : 
    1222         [ -  + ]:        510 :         if (notify_parent)
    1223                 :          0 :                 pm_request_idle(parent);
    1224                 :            : 
    1225         [ -  + ]:        510 :         if (status == RPM_SUSPENDED) {
    1226                 :          0 :                 int idx = device_links_read_lock();
    1227                 :            : 
    1228                 :          0 :                 rpm_put_suppliers(dev);
    1229                 :            : 
    1230                 :          0 :                 device_links_read_unlock(idx);
    1231                 :            :         }
    1232                 :            : 
    1233                 :        510 :         pm_runtime_enable(dev);
    1234                 :            : 
    1235                 :        510 :         return error;
    1236                 :            : }
    1237                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
    1238                 :            : 
    1239                 :            : /**
    1240                 :            :  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
    1241                 :            :  * @dev: Device to handle.
    1242                 :            :  *
    1243                 :            :  * Flush all pending requests for the device from pm_wq and wait for all
    1244                 :            :  * runtime PM operations involving the device in progress to complete.
    1245                 :            :  *
    1246                 :            :  * Should be called under dev->power.lock with interrupts disabled.
    1247                 :            :  */
    1248                 :        702 : static void __pm_runtime_barrier(struct device *dev)
    1249                 :            : {
    1250         [ -  + ]:        702 :         pm_runtime_deactivate_timer(dev);
    1251                 :            : 
    1252         [ -  + ]:        702 :         if (dev->power.request_pending) {
    1253                 :          0 :                 dev->power.request = RPM_REQ_NONE;
    1254                 :          0 :                 spin_unlock_irq(&dev->power.lock);
    1255                 :            : 
    1256                 :          0 :                 cancel_work_sync(&dev->power.work);
    1257                 :            : 
    1258                 :          0 :                 spin_lock_irq(&dev->power.lock);
    1259                 :          0 :                 dev->power.request_pending = false;
    1260                 :            :         }
    1261                 :            : 
    1262                 :        702 :         if (dev->power.runtime_status == RPM_SUSPENDING
    1263         [ +  - ]:        702 :             || dev->power.runtime_status == RPM_RESUMING
    1264         [ -  + ]:        702 :             || dev->power.idle_notification) {
    1265                 :          0 :                 DEFINE_WAIT(wait);
    1266                 :            : 
    1267                 :            :                 /* Suspend, wake-up or idle notification in progress. */
    1268                 :          0 :                 for (;;) {
    1269                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
    1270                 :            :                                         TASK_UNINTERRUPTIBLE);
    1271                 :          0 :                         if (dev->power.runtime_status != RPM_SUSPENDING
    1272         [ #  # ]:          0 :                             && dev->power.runtime_status != RPM_RESUMING
    1273         [ #  # ]:          0 :                             && !dev->power.idle_notification)
    1274                 :            :                                 break;
    1275                 :          0 :                         spin_unlock_irq(&dev->power.lock);
    1276                 :            : 
    1277                 :          0 :                         schedule();
    1278                 :            : 
    1279                 :          0 :                         spin_lock_irq(&dev->power.lock);
    1280                 :            :                 }
    1281                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
    1282                 :            :         }
    1283                 :        702 : }
    1284                 :            : 
    1285                 :            : /**
    1286                 :            :  * pm_runtime_barrier - Flush pending requests and wait for completions.
    1287                 :            :  * @dev: Device to handle.
    1288                 :            :  *
    1289                 :            :  * Prevent the device from being suspended by incrementing its usage counter and
    1290                 :            :  * if there's a pending resume request for the device, wake the device up.
    1291                 :            :  * Next, make sure that all pending requests for the device have been flushed
    1292                 :            :  * from pm_wq and wait for all runtime PM operations involving the device in
    1293                 :            :  * progress to complete.
    1294                 :            :  *
    1295                 :            :  * Return value:
    1296                 :            :  * 1, if there was a resume request pending and the device had to be woken up,
    1297                 :            :  * 0, otherwise
    1298                 :            :  */
    1299                 :        702 : int pm_runtime_barrier(struct device *dev)
    1300                 :            : {
    1301                 :        702 :         int retval = 0;
    1302                 :            : 
    1303                 :        702 :         pm_runtime_get_noresume(dev);
    1304                 :        702 :         spin_lock_irq(&dev->power.lock);
    1305                 :            : 
    1306         [ -  + ]:        702 :         if (dev->power.request_pending
    1307         [ #  # ]:          0 :             && dev->power.request == RPM_REQ_RESUME) {
    1308                 :          0 :                 rpm_resume(dev, 0);
    1309                 :          0 :                 retval = 1;
    1310                 :            :         }
    1311                 :            : 
    1312                 :        702 :         __pm_runtime_barrier(dev);
    1313                 :            : 
    1314                 :        702 :         spin_unlock_irq(&dev->power.lock);
    1315                 :        702 :         pm_runtime_put_noidle(dev);
    1316                 :            : 
    1317                 :        702 :         return retval;
    1318                 :            : }
    1319                 :            : EXPORT_SYMBOL_GPL(pm_runtime_barrier);
    1320                 :            : 
    1321                 :            : /**
    1322                 :            :  * __pm_runtime_disable - Disable runtime PM of a device.
    1323                 :            :  * @dev: Device to handle.
    1324                 :            :  * @check_resume: If set, check if there's a resume request for the device.
    1325                 :            :  *
    1326                 :            :  * Increment power.disable_depth for the device and if it was zero previously,
    1327                 :            :  * cancel all pending runtime PM requests for the device and wait for all
    1328                 :            :  * operations in progress to complete.  The device can be either active or
    1329                 :            :  * suspended after its runtime PM has been disabled.
    1330                 :            :  *
    1331                 :            :  * If @check_resume is set and there's a resume request pending when
    1332                 :            :  * __pm_runtime_disable() is called and power.disable_depth is zero, the
    1333                 :            :  * function will wake up the device before disabling its runtime PM.
    1334                 :            :  */
    1335                 :          0 : void __pm_runtime_disable(struct device *dev, bool check_resume)
    1336                 :            : {
    1337                 :          0 :         spin_lock_irq(&dev->power.lock);
    1338                 :            : 
    1339         [ #  # ]:          0 :         if (dev->power.disable_depth > 0) {
    1340                 :          0 :                 dev->power.disable_depth++;
    1341                 :          0 :                 goto out;
    1342                 :            :         }
    1343                 :            : 
    1344                 :            :         /*
    1345                 :            :          * Wake up the device if there's a resume request pending, because that
    1346                 :            :          * means there probably is some I/O to process and disabling runtime PM
    1347                 :            :          * shouldn't prevent the device from processing the I/O.
    1348                 :            :          */
    1349   [ #  #  #  # ]:          0 :         if (check_resume && dev->power.request_pending
    1350         [ #  # ]:          0 :             && dev->power.request == RPM_REQ_RESUME) {
    1351                 :            :                 /*
    1352                 :            :                  * Prevent suspends and idle notifications from being carried
    1353                 :            :                  * out after we have woken up the device.
    1354                 :            :                  */
    1355                 :          0 :                 pm_runtime_get_noresume(dev);
    1356                 :            : 
    1357                 :          0 :                 rpm_resume(dev, 0);
    1358                 :            : 
    1359                 :          0 :                 pm_runtime_put_noidle(dev);
    1360                 :            :         }
    1361                 :            : 
    1362                 :            :         /* Update time accounting before disabling PM-runtime. */
    1363                 :          0 :         update_pm_runtime_accounting(dev);
    1364                 :            : 
    1365         [ #  # ]:          0 :         if (!dev->power.disable_depth++)
    1366                 :          0 :                 __pm_runtime_barrier(dev);
    1367                 :            : 
    1368                 :          0 :  out:
    1369                 :          0 :         spin_unlock_irq(&dev->power.lock);
    1370                 :          0 : }
    1371                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_disable);
    1372                 :            : 
    1373                 :            : /**
    1374                 :            :  * pm_runtime_enable - Enable runtime PM of a device.
    1375                 :            :  * @dev: Device to handle.
    1376                 :            :  */
    1377                 :       1020 : void pm_runtime_enable(struct device *dev)
    1378                 :            : {
    1379                 :       1020 :         unsigned long flags;
    1380                 :            : 
    1381                 :       1020 :         spin_lock_irqsave(&dev->power.lock, flags);
    1382                 :            : 
    1383         [ +  - ]:       1020 :         if (dev->power.disable_depth > 0) {
    1384                 :       1020 :                 dev->power.disable_depth--;
    1385                 :            : 
    1386                 :            :                 /* About to enable runtime pm, set accounting_timestamp to now */
    1387         [ +  + ]:       1020 :                 if (!dev->power.disable_depth)
    1388                 :        510 :                         dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
    1389                 :            :         } else {
    1390                 :          0 :                 dev_warn(dev, "Unbalanced %s!\n", __func__);
    1391                 :            :         }
    1392                 :            : 
    1393   [ +  +  -  +  :       1020 :         WARN(!dev->power.disable_depth &&
          -  -  -  -  -  
                +  -  - ]
    1394                 :            :              dev->power.runtime_status == RPM_SUSPENDED &&
    1395                 :            :              !dev->power.ignore_children &&
    1396                 :            :              atomic_read(&dev->power.child_count) > 0,
    1397                 :            :              "Enabling runtime PM for inactive device (%s) with active children\n",
    1398                 :            :              dev_name(dev));
    1399                 :            : 
    1400                 :       1020 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1401                 :       1020 : }
    1402                 :            : EXPORT_SYMBOL_GPL(pm_runtime_enable);
    1403                 :            : 
    1404                 :            : /**
    1405                 :            :  * pm_runtime_forbid - Block runtime PM of a device.
    1406                 :            :  * @dev: Device to handle.
    1407                 :            :  *
    1408                 :            :  * Increase the device's usage count and clear its power.runtime_auto flag,
    1409                 :            :  * so that it cannot be suspended at run time until pm_runtime_allow() is called
    1410                 :            :  * for it.
    1411                 :            :  */
    1412                 :        360 : void pm_runtime_forbid(struct device *dev)
    1413                 :            : {
    1414                 :        360 :         spin_lock_irq(&dev->power.lock);
    1415         [ -  + ]:        360 :         if (!dev->power.runtime_auto)
    1416                 :          0 :                 goto out;
    1417                 :            : 
    1418                 :        360 :         dev->power.runtime_auto = false;
    1419                 :        360 :         atomic_inc(&dev->power.usage_count);
    1420                 :        360 :         rpm_resume(dev, 0);
    1421                 :            : 
    1422                 :        360 :  out:
    1423                 :        360 :         spin_unlock_irq(&dev->power.lock);
    1424                 :        360 : }
    1425                 :            : EXPORT_SYMBOL_GPL(pm_runtime_forbid);
    1426                 :            : 
    1427                 :            : /**
    1428                 :            :  * pm_runtime_allow - Unblock runtime PM of a device.
    1429                 :            :  * @dev: Device to handle.
    1430                 :            :  *
    1431                 :            :  * Decrease the device's usage count and set its power.runtime_auto flag.
    1432                 :            :  */
    1433                 :          0 : void pm_runtime_allow(struct device *dev)
    1434                 :            : {
    1435                 :          0 :         spin_lock_irq(&dev->power.lock);
    1436         [ #  # ]:          0 :         if (dev->power.runtime_auto)
    1437                 :          0 :                 goto out;
    1438                 :            : 
    1439                 :          0 :         dev->power.runtime_auto = true;
    1440         [ #  # ]:          0 :         if (atomic_dec_and_test(&dev->power.usage_count))
    1441                 :          0 :                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
    1442                 :            :         else
    1443                 :          0 :                 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
    1444                 :            : 
    1445                 :          0 :  out:
    1446                 :          0 :         spin_unlock_irq(&dev->power.lock);
    1447                 :          0 : }
    1448                 :            : EXPORT_SYMBOL_GPL(pm_runtime_allow);
    1449                 :            : 
    1450                 :            : /**
    1451                 :            :  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
    1452                 :            :  * @dev: Device to handle.
    1453                 :            :  *
    1454                 :            :  * Set the power.no_callbacks flag, which tells the PM core that this
    1455                 :            :  * device is power-managed through its parent and has no runtime PM
    1456                 :            :  * callbacks of its own.  The runtime sysfs attributes will be removed.
    1457                 :            :  */
    1458                 :          0 : void pm_runtime_no_callbacks(struct device *dev)
    1459                 :            : {
    1460                 :          0 :         spin_lock_irq(&dev->power.lock);
    1461                 :          0 :         dev->power.no_callbacks = 1;
    1462                 :          0 :         spin_unlock_irq(&dev->power.lock);
    1463         [ #  # ]:          0 :         if (device_is_registered(dev))
    1464                 :          0 :                 rpm_sysfs_remove(dev);
    1465                 :          0 : }
    1466                 :            : EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
    1467                 :            : 
    1468                 :            : /**
    1469                 :            :  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
    1470                 :            :  * @dev: Device to handle
    1471                 :            :  *
    1472                 :            :  * Set the power.irq_safe flag, which tells the PM core that the
    1473                 :            :  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
    1474                 :            :  * always be invoked with the spinlock held and interrupts disabled.  It also
    1475                 :            :  * causes the parent's usage counter to be permanently incremented, preventing
    1476                 :            :  * the parent from runtime suspending -- otherwise an irq-safe child might have
    1477                 :            :  * to wait for a non-irq-safe parent.
    1478                 :            :  */
    1479                 :          0 : void pm_runtime_irq_safe(struct device *dev)
    1480                 :            : {
    1481         [ #  # ]:          0 :         if (dev->parent)
    1482                 :          0 :                 pm_runtime_get_sync(dev->parent);
    1483                 :          0 :         spin_lock_irq(&dev->power.lock);
    1484                 :          0 :         dev->power.irq_safe = 1;
    1485                 :          0 :         spin_unlock_irq(&dev->power.lock);
    1486                 :          0 : }
    1487                 :            : EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
    1488                 :            : 
    1489                 :            : /**
    1490                 :            :  * update_autosuspend - Handle a change to a device's autosuspend settings.
    1491                 :            :  * @dev: Device to handle.
    1492                 :            :  * @old_delay: The former autosuspend_delay value.
    1493                 :            :  * @old_use: The former use_autosuspend value.
    1494                 :            :  *
    1495                 :            :  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
    1496                 :            :  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
    1497                 :            :  *
    1498                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
    1499                 :            :  */
    1500                 :        180 : static void update_autosuspend(struct device *dev, int old_delay, int old_use)
    1501                 :            : {
    1502                 :        180 :         int delay = dev->power.autosuspend_delay;
    1503                 :            : 
    1504                 :            :         /* Should runtime suspend be prevented now? */
    1505   [ +  +  +  - ]:        180 :         if (dev->power.use_autosuspend && delay < 0) {
    1506                 :            : 
    1507                 :            :                 /* If it used to be allowed then prevent it. */
    1508         [ +  - ]:         90 :                 if (!old_use || old_delay >= 0) {
    1509                 :         90 :                         atomic_inc(&dev->power.usage_count);
    1510                 :         90 :                         rpm_resume(dev, 0);
    1511                 :            :                 } else {
    1512                 :          0 :                         trace_rpm_usage_rcuidle(dev, 0);
    1513                 :            :                 }
    1514                 :            :         }
    1515                 :            : 
    1516                 :            :         /* Runtime suspend should be allowed now. */
    1517                 :            :         else {
    1518                 :            : 
    1519                 :            :                 /* If it used to be prevented then allow it. */
    1520         [ -  + ]:         90 :                 if (old_use && old_delay < 0)
    1521                 :          0 :                         atomic_dec(&dev->power.usage_count);
    1522                 :            : 
    1523                 :            :                 /* Maybe we can autosuspend now. */
    1524                 :         90 :                 rpm_idle(dev, RPM_AUTO);
    1525                 :            :         }
    1526                 :        180 : }
    1527                 :            : 
    1528                 :            : /**
    1529                 :            :  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
    1530                 :            :  * @dev: Device to handle.
    1531                 :            :  * @delay: Value of the new delay in milliseconds.
    1532                 :            :  *
    1533                 :            :  * Set the device's power.autosuspend_delay value.  If it changes to negative
    1534                 :            :  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
    1535                 :            :  * changes the other way, allow runtime suspends.
    1536                 :            :  */
    1537                 :         90 : void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
    1538                 :            : {
    1539                 :         90 :         int old_delay, old_use;
    1540                 :            : 
    1541                 :         90 :         spin_lock_irq(&dev->power.lock);
    1542                 :         90 :         old_delay = dev->power.autosuspend_delay;
    1543                 :         90 :         old_use = dev->power.use_autosuspend;
    1544                 :         90 :         dev->power.autosuspend_delay = delay;
    1545                 :         90 :         update_autosuspend(dev, old_delay, old_use);
    1546                 :         90 :         spin_unlock_irq(&dev->power.lock);
    1547                 :         90 : }
    1548                 :            : EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
    1549                 :            : 
    1550                 :            : /**
    1551                 :            :  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
    1552                 :            :  * @dev: Device to handle.
    1553                 :            :  * @use: New value for use_autosuspend.
    1554                 :            :  *
    1555                 :            :  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
    1556                 :            :  * suspends as needed.
    1557                 :            :  */
    1558                 :         90 : void __pm_runtime_use_autosuspend(struct device *dev, bool use)
    1559                 :            : {
    1560                 :         90 :         int old_delay, old_use;
    1561                 :            : 
    1562                 :         90 :         spin_lock_irq(&dev->power.lock);
    1563                 :         90 :         old_delay = dev->power.autosuspend_delay;
    1564                 :         90 :         old_use = dev->power.use_autosuspend;
    1565                 :         90 :         dev->power.use_autosuspend = use;
    1566                 :         90 :         update_autosuspend(dev, old_delay, old_use);
    1567                 :         90 :         spin_unlock_irq(&dev->power.lock);
    1568                 :         90 : }
    1569                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
    1570                 :            : 
    1571                 :            : /**
    1572                 :            :  * pm_runtime_init - Initialize runtime PM fields in given device object.
    1573                 :            :  * @dev: Device object to initialize.
    1574                 :            :  */
    1575                 :       9372 : void pm_runtime_init(struct device *dev)
    1576                 :            : {
    1577                 :       9372 :         dev->power.runtime_status = RPM_SUSPENDED;
    1578                 :       9372 :         dev->power.idle_notification = false;
    1579                 :            : 
    1580                 :       9372 :         dev->power.disable_depth = 1;
    1581                 :       9372 :         atomic_set(&dev->power.usage_count, 0);
    1582                 :            : 
    1583                 :       9372 :         dev->power.runtime_error = 0;
    1584                 :            : 
    1585                 :       9372 :         atomic_set(&dev->power.child_count, 0);
    1586                 :       9372 :         pm_suspend_ignore_children(dev, false);
    1587                 :       9372 :         dev->power.runtime_auto = true;
    1588                 :            : 
    1589                 :       9372 :         dev->power.request_pending = false;
    1590                 :       9372 :         dev->power.request = RPM_REQ_NONE;
    1591                 :       9372 :         dev->power.deferred_resume = false;
    1592                 :       9372 :         INIT_WORK(&dev->power.work, pm_runtime_work);
    1593                 :            : 
    1594                 :       9372 :         dev->power.timer_expires = 0;
    1595                 :       9372 :         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
    1596                 :       9372 :         dev->power.suspend_timer.function = pm_suspend_timer_fn;
    1597                 :            : 
    1598                 :       9372 :         init_waitqueue_head(&dev->power.wait_queue);
    1599                 :       9372 : }
    1600                 :            : 
    1601                 :            : /**
    1602                 :            :  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
    1603                 :            :  * @dev: Device object to re-initialize.
    1604                 :            :  */
    1605                 :        204 : void pm_runtime_reinit(struct device *dev)
    1606                 :            : {
    1607         [ +  + ]:        204 :         if (!pm_runtime_enabled(dev)) {
    1608         [ -  + ]:         60 :                 if (dev->power.runtime_status == RPM_ACTIVE)
    1609                 :          0 :                         pm_runtime_set_suspended(dev);
    1610         [ -  + ]:         60 :                 if (dev->power.irq_safe) {
    1611                 :          0 :                         spin_lock_irq(&dev->power.lock);
    1612                 :          0 :                         dev->power.irq_safe = 0;
    1613                 :          0 :                         spin_unlock_irq(&dev->power.lock);
    1614         [ #  # ]:          0 :                         if (dev->parent)
    1615                 :          0 :                                 pm_runtime_put(dev->parent);
    1616                 :            :                 }
    1617                 :            :         }
    1618                 :        204 : }
    1619                 :            : 
    1620                 :            : /**
    1621                 :            :  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
    1622                 :            :  * @dev: Device object being removed from device hierarchy.
    1623                 :            :  */
    1624                 :          0 : void pm_runtime_remove(struct device *dev)
    1625                 :            : {
    1626                 :          0 :         __pm_runtime_disable(dev, false);
    1627                 :          0 :         pm_runtime_reinit(dev);
    1628                 :          0 : }
    1629                 :            : 
    1630                 :            : /**
    1631                 :            :  * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
    1632                 :            :  * @dev: Device whose driver is going to be removed.
    1633                 :            :  *
    1634                 :            :  * Check links from this device to any consumers and if any of them have active
    1635                 :            :  * runtime PM references to the device, drop the usage counter of the device
    1636                 :            :  * (as many times as needed).
    1637                 :            :  *
    1638                 :            :  * Links with the DL_FLAG_MANAGED flag unset are ignored.
    1639                 :            :  *
    1640                 :            :  * Since the device is guaranteed to be runtime-active at the point this is
    1641                 :            :  * called, nothing else needs to be done here.
    1642                 :            :  *
    1643                 :            :  * Moreover, this is called after device_links_busy() has returned 'false', so
    1644                 :            :  * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
    1645                 :            :  * therefore rpm_active can't be manipulated concurrently.
    1646                 :            :  */
    1647                 :          0 : void pm_runtime_clean_up_links(struct device *dev)
    1648                 :            : {
    1649                 :          0 :         struct device_link *link;
    1650                 :          0 :         int idx;
    1651                 :            : 
    1652                 :          0 :         idx = device_links_read_lock();
    1653                 :            : 
    1654         [ #  # ]:          0 :         list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
    1655                 :            :                                 device_links_read_lock_held()) {
    1656         [ #  # ]:          0 :                 if (!(link->flags & DL_FLAG_MANAGED))
    1657                 :          0 :                         continue;
    1658                 :            : 
    1659         [ #  # ]:          0 :                 while (refcount_dec_not_one(&link->rpm_active))
    1660                 :          0 :                         pm_runtime_put_noidle(dev);
    1661                 :            :         }
    1662                 :            : 
    1663                 :          0 :         device_links_read_unlock(idx);
    1664                 :          0 : }
    1665                 :            : 
    1666                 :            : /**
    1667                 :            :  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
    1668                 :            :  * @dev: Consumer device.
    1669                 :            :  */
    1670                 :        690 : void pm_runtime_get_suppliers(struct device *dev)
    1671                 :            : {
    1672                 :        690 :         struct device_link *link;
    1673                 :        690 :         int idx;
    1674                 :            : 
    1675                 :        690 :         idx = device_links_read_lock();
    1676                 :            : 
    1677         [ -  + ]:        690 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1678                 :            :                                 device_links_read_lock_held())
    1679         [ #  # ]:          0 :                 if (link->flags & DL_FLAG_PM_RUNTIME) {
    1680                 :          0 :                         link->supplier_preactivated = true;
    1681                 :          0 :                         refcount_inc(&link->rpm_active);
    1682                 :          0 :                         pm_runtime_get_sync(link->supplier);
    1683                 :            :                 }
    1684                 :            : 
    1685                 :        690 :         device_links_read_unlock(idx);
    1686                 :        690 : }
    1687                 :            : 
    1688                 :            : /**
    1689                 :            :  * pm_runtime_put_suppliers - Drop references to supplier devices.
    1690                 :            :  * @dev: Consumer device.
    1691                 :            :  */
    1692                 :        690 : void pm_runtime_put_suppliers(struct device *dev)
    1693                 :            : {
    1694                 :        690 :         struct device_link *link;
    1695                 :        690 :         int idx;
    1696                 :            : 
    1697                 :        690 :         idx = device_links_read_lock();
    1698                 :            : 
    1699         [ -  + ]:        690 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1700                 :            :                                 device_links_read_lock_held())
    1701         [ #  # ]:          0 :                 if (link->supplier_preactivated) {
    1702                 :          0 :                         link->supplier_preactivated = false;
    1703         [ #  # ]:          0 :                         if (refcount_dec_not_one(&link->rpm_active))
    1704                 :          0 :                                 pm_runtime_put(link->supplier);
    1705                 :            :                 }
    1706                 :            : 
    1707                 :        690 :         device_links_read_unlock(idx);
    1708                 :        690 : }
    1709                 :            : 
    1710                 :          0 : void pm_runtime_new_link(struct device *dev)
    1711                 :            : {
    1712                 :          0 :         spin_lock_irq(&dev->power.lock);
    1713                 :          0 :         dev->power.links_count++;
    1714                 :          0 :         spin_unlock_irq(&dev->power.lock);
    1715                 :          0 : }
    1716                 :            : 
    1717                 :          0 : void pm_runtime_drop_link(struct device *dev)
    1718                 :            : {
    1719                 :          0 :         spin_lock_irq(&dev->power.lock);
    1720         [ #  # ]:          0 :         WARN_ON(dev->power.links_count == 0);
    1721                 :          0 :         dev->power.links_count--;
    1722                 :          0 :         spin_unlock_irq(&dev->power.lock);
    1723                 :          0 : }
    1724                 :            : 
    1725                 :          0 : static bool pm_runtime_need_not_resume(struct device *dev)
    1726                 :            : {
    1727   [ #  #  #  # ]:          0 :         return atomic_read(&dev->power.usage_count) <= 1 &&
    1728         [ #  # ]:          0 :                 (atomic_read(&dev->power.child_count) == 0 ||
    1729                 :            :                  dev->power.ignore_children);
    1730                 :            : }
    1731                 :            : 
    1732                 :            : /**
    1733                 :            :  * pm_runtime_force_suspend - Force a device into suspend state if needed.
    1734                 :            :  * @dev: Device to suspend.
    1735                 :            :  *
    1736                 :            :  * Disable runtime PM so we safely can check the device's runtime PM status and
    1737                 :            :  * if it is active, invoke its ->runtime_suspend callback to suspend it and
    1738                 :            :  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
    1739                 :            :  * usage and children counters don't indicate that the device was in use before
    1740                 :            :  * the system-wide transition under way, decrement its parent's children counter
    1741                 :            :  * (if there is a parent).  Keep runtime PM disabled to preserve the state
    1742                 :            :  * unless we encounter errors.
    1743                 :            :  *
    1744                 :            :  * Typically this function may be invoked from a system suspend callback to make
    1745                 :            :  * sure the device is put into low power state and it should only be used during
    1746                 :            :  * system-wide PM transitions to sleep states.  It assumes that the analogous
    1747                 :            :  * pm_runtime_force_resume() will be used to resume the device.
    1748                 :            :  */
    1749                 :          0 : int pm_runtime_force_suspend(struct device *dev)
    1750                 :            : {
    1751                 :          0 :         int (*callback)(struct device *);
    1752                 :          0 :         int ret;
    1753                 :            : 
    1754                 :          0 :         pm_runtime_disable(dev);
    1755         [ #  # ]:          0 :         if (pm_runtime_status_suspended(dev))
    1756                 :            :                 return 0;
    1757                 :            : 
    1758                 :          0 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
    1759                 :            : 
    1760         [ #  # ]:          0 :         ret = callback ? callback(dev) : 0;
    1761         [ #  # ]:          0 :         if (ret)
    1762                 :          0 :                 goto err;
    1763                 :            : 
    1764                 :            :         /*
    1765                 :            :          * If the device can stay in suspend after the system-wide transition
    1766                 :            :          * to the working state that will follow, drop the children counter of
    1767                 :            :          * its parent, but set its status to RPM_SUSPENDED anyway in case this
    1768                 :            :          * function will be called again for it in the meantime.
    1769                 :            :          */
    1770         [ #  # ]:          0 :         if (pm_runtime_need_not_resume(dev))
    1771                 :          0 :                 pm_runtime_set_suspended(dev);
    1772                 :            :         else
    1773                 :          0 :                 __update_runtime_status(dev, RPM_SUSPENDED);
    1774                 :            : 
    1775                 :            :         return 0;
    1776                 :            : 
    1777                 :            : err:
    1778                 :          0 :         pm_runtime_enable(dev);
    1779                 :          0 :         return ret;
    1780                 :            : }
    1781                 :            : EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
    1782                 :            : 
    1783                 :            : /**
    1784                 :            :  * pm_runtime_force_resume - Force a device into resume state if needed.
    1785                 :            :  * @dev: Device to resume.
    1786                 :            :  *
    1787                 :            :  * Prior invoking this function we expect the user to have brought the device
    1788                 :            :  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
    1789                 :            :  * those actions and bring the device into full power, if it is expected to be
    1790                 :            :  * used on system resume.  In the other case, we defer the resume to be managed
    1791                 :            :  * via runtime PM.
    1792                 :            :  *
    1793                 :            :  * Typically this function may be invoked from a system resume callback.
    1794                 :            :  */
    1795                 :          0 : int pm_runtime_force_resume(struct device *dev)
    1796                 :            : {
    1797                 :          0 :         int (*callback)(struct device *);
    1798                 :          0 :         int ret = 0;
    1799                 :            : 
    1800   [ #  #  #  # ]:          0 :         if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
    1801                 :          0 :                 goto out;
    1802                 :            : 
    1803                 :            :         /*
    1804                 :            :          * The value of the parent's children counter is correct already, so
    1805                 :            :          * just update the status of the device.
    1806                 :            :          */
    1807                 :          0 :         __update_runtime_status(dev, RPM_ACTIVE);
    1808                 :            : 
    1809                 :          0 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
    1810                 :            : 
    1811         [ #  # ]:          0 :         ret = callback ? callback(dev) : 0;
    1812         [ #  # ]:          0 :         if (ret) {
    1813                 :          0 :                 pm_runtime_set_suspended(dev);
    1814                 :          0 :                 goto out;
    1815                 :            :         }
    1816                 :            : 
    1817                 :          0 :         pm_runtime_mark_last_busy(dev);
    1818                 :          0 : out:
    1819                 :          0 :         pm_runtime_enable(dev);
    1820                 :          0 :         return ret;
    1821                 :            : }
    1822                 :            : EXPORT_SYMBOL_GPL(pm_runtime_force_resume);

Generated by: LCOV version 1.14