LCOV - code coverage report
Current view: top level - drivers/base/power - runtime.c (source / functions) Hit Total Coverage
Test: gcov_data_raspi2_real_modules_combined.info Lines: 319 513 62.2 %
Date: 2020-09-30 20:25:40 Functions: 32 45 71.1 %
Branches: 198 453 43.7 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0
       2                 :            : /*
       3                 :            :  * drivers/base/power/runtime.c - Helper functions for device runtime PM
       4                 :            :  *
       5                 :            :  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
       6                 :            :  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
       7                 :            :  */
       8                 :            : #include <linux/sched/mm.h>
       9                 :            : #include <linux/ktime.h>
      10                 :            : #include <linux/hrtimer.h>
      11                 :            : #include <linux/export.h>
      12                 :            : #include <linux/pm_runtime.h>
      13                 :            : #include <linux/pm_wakeirq.h>
      14                 :            : #include <trace/events/rpm.h>
      15                 :            : 
      16                 :            : #include "../base.h"
      17                 :            : #include "power.h"
      18                 :            : 
      19                 :            : typedef int (*pm_callback_t)(struct device *);
      20                 :            : 
      21                 :       2691 : static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
      22                 :            : {
      23                 :            :         pm_callback_t cb;
      24                 :            :         const struct dev_pm_ops *ops;
      25                 :            : 
      26         [ -  + ]:       2691 :         if (dev->pm_domain)
      27                 :          0 :                 ops = &dev->pm_domain->ops;
      28   [ +  -  +  + ]:       2691 :         else if (dev->type && dev->type->pm)
      29                 :            :                 ops = dev->type->pm;
      30   [ -  +  #  # ]:        207 :         else if (dev->class && dev->class->pm)
      31                 :            :                 ops = dev->class->pm;
      32   [ +  -  +  - ]:        207 :         else if (dev->bus && dev->bus->pm)
      33                 :        207 :                 ops = dev->bus->pm;
      34                 :            :         else
      35                 :            :                 ops = NULL;
      36                 :            : 
      37         [ +  - ]:       2691 :         if (ops)
      38                 :       2691 :                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
      39                 :            :         else
      40                 :            :                 cb = NULL;
      41                 :            : 
      42   [ +  +  +  -  :       2691 :         if (!cb && dev->driver && dev->driver->pm)
                   +  + ]
      43                 :        207 :                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
      44                 :            : 
      45                 :       2691 :         return cb;
      46                 :            : }
      47                 :            : 
      48                 :            : #define RPM_GET_CALLBACK(dev, callback) \
      49                 :            :                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
      50                 :            : 
      51                 :            : static int rpm_resume(struct device *dev, int rpmflags);
      52                 :            : static int rpm_suspend(struct device *dev, int rpmflags);
      53                 :            : 
      54                 :            : /**
      55                 :            :  * update_pm_runtime_accounting - Update the time accounting of power states
      56                 :            :  * @dev: Device to update the accounting for
      57                 :            :  *
      58                 :            :  * In order to be able to have time accounting of the various power states
      59                 :            :  * (as used by programs such as PowerTOP to show the effectiveness of runtime
      60                 :            :  * PM), we need to track the time spent in each state.
      61                 :            :  * update_pm_runtime_accounting must be called each time before the
      62                 :            :  * runtime_status field is updated, to account the time in the old state
      63                 :            :  * correctly.
      64                 :            :  */
      65                 :      46581 : static void update_pm_runtime_accounting(struct device *dev)
      66                 :            : {
      67                 :            :         u64 now, last, delta;
      68                 :            : 
      69         [ +  + ]:      46581 :         if (dev->power.disable_depth > 0)
      70                 :            :                 return;
      71                 :            : 
      72                 :      42648 :         last = dev->power.accounting_timestamp;
      73                 :            : 
      74                 :      42648 :         now = ktime_get_mono_fast_ns();
      75                 :      42648 :         dev->power.accounting_timestamp = now;
      76                 :            : 
      77                 :            :         /*
      78                 :            :          * Because ktime_get_mono_fast_ns() is not monotonic during
      79                 :            :          * timekeeping updates, ensure that 'now' is after the last saved
      80                 :            :          * timesptamp.
      81                 :            :          */
      82         [ +  - ]:      42648 :         if (now < last)
      83                 :            :                 return;
      84                 :            : 
      85                 :      42648 :         delta = now - last;
      86                 :            : 
      87         [ +  + ]:      42648 :         if (dev->power.runtime_status == RPM_SUSPENDED)
      88                 :      19668 :                 dev->power.suspended_time += delta;
      89                 :            :         else
      90                 :      22980 :                 dev->power.active_time += delta;
      91                 :            : }
      92                 :            : 
      93                 :            : static void __update_runtime_status(struct device *dev, enum rpm_status status)
      94                 :            : {
      95                 :      46374 :         update_pm_runtime_accounting(dev);
      96                 :      46374 :         dev->power.runtime_status = status;
      97                 :            : }
      98                 :            : 
      99                 :          0 : static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
     100                 :            : {
     101                 :            :         u64 time;
     102                 :            :         unsigned long flags;
     103                 :            : 
     104                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
     105                 :            : 
     106                 :          0 :         update_pm_runtime_accounting(dev);
     107         [ #  # ]:          0 :         time = suspended ? dev->power.suspended_time : dev->power.active_time;
     108                 :            : 
     109                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
     110                 :            : 
     111                 :          0 :         return time;
     112                 :            : }
     113                 :            : 
     114                 :          0 : u64 pm_runtime_active_time(struct device *dev)
     115                 :            : {
     116                 :          0 :         return rpm_get_accounted_time(dev, false);
     117                 :            : }
     118                 :            : 
     119                 :          0 : u64 pm_runtime_suspended_time(struct device *dev)
     120                 :            : {
     121                 :          0 :         return rpm_get_accounted_time(dev, true);
     122                 :            : }
     123                 :            : EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
     124                 :            : 
     125                 :            : /**
     126                 :            :  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
     127                 :            :  * @dev: Device to handle.
     128                 :            :  */
     129                 :            : static void pm_runtime_deactivate_timer(struct device *dev)
     130                 :            : {
     131   [ -  +  #  #  :     154467 :         if (dev->power.timer_expires > 0) {
          -  +  #  #  -  
             +  -  +  #  
                      # ]
     132                 :          0 :                 hrtimer_try_to_cancel(&dev->power.suspend_timer);
     133                 :          0 :                 dev->power.timer_expires = 0;
     134                 :            :         }
     135                 :            : }
     136                 :            : 
     137                 :            : /**
     138                 :            :  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
     139                 :            :  * @dev: Device to handle.
     140                 :            :  */
     141                 :            : static void pm_runtime_cancel_pending(struct device *dev)
     142                 :            : {
     143                 :            :         pm_runtime_deactivate_timer(dev);
     144                 :            :         /*
     145                 :            :          * In case there's a request pending, make sure its work function will
     146                 :            :          * return without doing anything.
     147                 :            :          */
     148                 :      21738 :         dev->power.request = RPM_REQ_NONE;
     149                 :            : }
     150                 :            : 
     151                 :            : /*
     152                 :            :  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
     153                 :            :  * @dev: Device to handle.
     154                 :            :  *
     155                 :            :  * Compute the autosuspend-delay expiration time based on the device's
     156                 :            :  * power.last_busy time.  If the delay has already expired or is disabled
     157                 :            :  * (negative) or the power.use_autosuspend flag isn't set, return 0.
     158                 :            :  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
     159                 :            :  *
     160                 :            :  * This function may be called either with or without dev->power.lock held.
     161                 :            :  * Either way it can be racy, since power.last_busy may be updated at any time.
     162                 :            :  */
     163                 :     798960 : u64 pm_runtime_autosuspend_expiration(struct device *dev)
     164                 :            : {
     165                 :            :         int autosuspend_delay;
     166                 :            :         u64 expires;
     167                 :            : 
     168         [ +  + ]:     798960 :         if (!dev->power.use_autosuspend)
     169                 :            :                 return 0;
     170                 :            : 
     171                 :     775981 :         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
     172         [ +  - ]:     775981 :         if (autosuspend_delay < 0)
     173                 :            :                 return 0;
     174                 :            : 
     175                 :            :         expires  = READ_ONCE(dev->power.last_busy);
     176                 :     775981 :         expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
     177         [ +  - ]:     775981 :         if (expires > ktime_get_mono_fast_ns())
     178                 :     775981 :                 return expires; /* Expires in the future */
     179                 :            : 
     180                 :            :         return 0;
     181                 :            : }
     182                 :            : EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
     183                 :            : 
     184                 :          0 : static int dev_memalloc_noio(struct device *dev, void *data)
     185                 :            : {
     186                 :          0 :         return dev->power.memalloc_noio;
     187                 :            : }
     188                 :            : 
     189                 :            : /*
     190                 :            :  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
     191                 :            :  * @dev: Device to handle.
     192                 :            :  * @enable: True for setting the flag and False for clearing the flag.
     193                 :            :  *
     194                 :            :  * Set the flag for all devices in the path from the device to the
     195                 :            :  * root device in the device tree if @enable is true, otherwise clear
     196                 :            :  * the flag for devices in the path whose siblings don't set the flag.
     197                 :            :  *
     198                 :            :  * The function should only be called by block device, or network
     199                 :            :  * device driver for solving the deadlock problem during runtime
     200                 :            :  * resume/suspend:
     201                 :            :  *
     202                 :            :  *     If memory allocation with GFP_KERNEL is called inside runtime
     203                 :            :  *     resume/suspend callback of any one of its ancestors(or the
     204                 :            :  *     block device itself), the deadlock may be triggered inside the
     205                 :            :  *     memory allocation since it might not complete until the block
     206                 :            :  *     device becomes active and the involed page I/O finishes. The
     207                 :            :  *     situation is pointed out first by Alan Stern. Network device
     208                 :            :  *     are involved in iSCSI kind of situation.
     209                 :            :  *
     210                 :            :  * The lock of dev_hotplug_mutex is held in the function for handling
     211                 :            :  * hotplug race because pm_runtime_set_memalloc_noio() may be called
     212                 :            :  * in async probe().
     213                 :            :  *
     214                 :            :  * The function should be called between device_add() and device_del()
     215                 :            :  * on the affected device(block/network device).
     216                 :            :  */
     217                 :       5589 : void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
     218                 :            : {
     219                 :            :         static DEFINE_MUTEX(dev_hotplug_mutex);
     220                 :            : 
     221                 :       5589 :         mutex_lock(&dev_hotplug_mutex);
     222                 :            :         for (;;) {
     223                 :            :                 bool enabled;
     224                 :            : 
     225                 :            :                 /* hold power lock since bitfield is not SMP-safe. */
     226                 :            :                 spin_lock_irq(&dev->power.lock);
     227                 :       7866 :                 enabled = dev->power.memalloc_noio;
     228                 :       7866 :                 dev->power.memalloc_noio = enable;
     229                 :            :                 spin_unlock_irq(&dev->power.lock);
     230                 :            : 
     231                 :            :                 /*
     232                 :            :                  * not need to enable ancestors any more if the device
     233                 :            :                  * has been enabled.
     234                 :            :                  */
     235         [ +  + ]:       7866 :                 if (enabled && enable)
     236                 :            :                         break;
     237                 :            : 
     238                 :       7659 :                 dev = dev->parent;
     239                 :            : 
     240                 :            :                 /*
     241                 :            :                  * clear flag of the parent device only if all the
     242                 :            :                  * children don't set the flag because ancestor's
     243                 :            :                  * flag was set by any one of the descendants.
     244                 :            :                  */
     245   [ +  +  +  -  :       7659 :                 if (!dev || (!enable &&
                   #  # ]
     246                 :          0 :                              device_for_each_child(dev, NULL,
     247                 :            :                                                    dev_memalloc_noio)))
     248                 :            :                         break;
     249                 :            :         }
     250                 :       5589 :         mutex_unlock(&dev_hotplug_mutex);
     251                 :       5589 : }
     252                 :            : EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
     253                 :            : 
     254                 :            : /**
     255                 :            :  * rpm_check_suspend_allowed - Test whether a device may be suspended.
     256                 :            :  * @dev: Device to test.
     257                 :            :  */
     258                 :    1685858 : static int rpm_check_suspend_allowed(struct device *dev)
     259                 :            : {
     260                 :            :         int retval = 0;
     261                 :            : 
     262         [ +  + ]:    1685858 :         if (dev->power.runtime_error)
     263                 :            :                 retval = -EINVAL;
     264         [ +  + ]:    1685858 :         else if (dev->power.disable_depth > 0)
     265                 :            :                 retval = -EACCES;
     266         [ +  + ]:     868218 :         else if (atomic_read(&dev->power.usage_count) > 0)
     267                 :            :                 retval = -EAGAIN;
     268   [ +  +  +  + ]:    1647188 :         else if (!dev->power.ignore_children &&
     269                 :     823177 :                         atomic_read(&dev->power.child_count))
     270                 :            :                 retval = -EBUSY;
     271                 :            : 
     272                 :            :         /* Pending resume requests take precedence over suspends. */
     273         [ -  + ]:     822976 :         else if ((dev->power.deferred_resume
     274         [ #  # ]:          0 :                         && dev->power.runtime_status == RPM_SUSPENDING)
     275         [ +  + ]:     822976 :             || (dev->power.request_pending
     276         [ +  - ]:        207 :                         && dev->power.request == RPM_REQ_RESUME))
     277                 :            :                 retval = -EAGAIN;
     278         [ +  + ]:     822976 :         else if (__dev_pm_qos_resume_latency(dev) == 0)
     279                 :            :                 retval = -EPERM;
     280         [ +  + ]:     822974 :         else if (dev->power.runtime_status == RPM_SUSPENDED)
     281                 :            :                 retval = 1;
     282                 :            : 
     283                 :    1685858 :         return retval;
     284                 :            : }
     285                 :            : 
     286                 :       3933 : static int rpm_get_suppliers(struct device *dev)
     287                 :            : {
     288                 :            :         struct device_link *link;
     289                 :            : 
     290         [ -  + ]:       3933 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     291                 :            :                                 device_links_read_lock_held()) {
     292                 :            :                 int retval;
     293                 :            : 
     294   [ #  #  #  # ]:          0 :                 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
     295                 :          0 :                     READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
     296                 :          0 :                         continue;
     297                 :            : 
     298                 :          0 :                 retval = pm_runtime_get_sync(link->supplier);
     299                 :            :                 /* Ignore suppliers with disabled runtime PM. */
     300         [ #  # ]:          0 :                 if (retval < 0 && retval != -EACCES) {
     301                 :          0 :                         pm_runtime_put_noidle(link->supplier);
     302                 :          0 :                         return retval;
     303                 :            :                 }
     304                 :          0 :                 refcount_inc(&link->rpm_active);
     305                 :            :         }
     306                 :            :         return 0;
     307                 :            : }
     308                 :            : 
     309                 :          0 : static void rpm_put_suppliers(struct device *dev)
     310                 :            : {
     311                 :            :         struct device_link *link;
     312                 :            : 
     313         [ #  # ]:          0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     314                 :            :                                 device_links_read_lock_held()) {
     315         [ #  # ]:          0 :                 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
     316                 :          0 :                         continue;
     317                 :            : 
     318         [ #  # ]:          0 :                 while (refcount_dec_not_one(&link->rpm_active))
     319                 :          0 :                         pm_runtime_put(link->supplier);
     320                 :            :         }
     321                 :          0 : }
     322                 :            : 
     323                 :            : /**
     324                 :            :  * __rpm_callback - Run a given runtime PM callback for a given device.
     325                 :            :  * @cb: Runtime PM callback to run.
     326                 :            :  * @dev: Device to run the callback for.
     327                 :            :  */
     328                 :       1242 : static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
     329                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     330                 :            : {
     331                 :            :         int retval, idx;
     332                 :       1242 :         bool use_links = dev->power.links_count > 0;
     333                 :            : 
     334         [ -  + ]:       1242 :         if (dev->power.irq_safe) {
     335                 :            :                 spin_unlock(&dev->power.lock);
     336                 :            :         } else {
     337                 :            :                 spin_unlock_irq(&dev->power.lock);
     338                 :            : 
     339                 :            :                 /*
     340                 :            :                  * Resume suppliers if necessary.
     341                 :            :                  *
     342                 :            :                  * The device's runtime PM status cannot change until this
     343                 :            :                  * routine returns, so it is safe to read the status outside of
     344                 :            :                  * the lock.
     345                 :            :                  */
     346   [ -  +  #  # ]:       1242 :                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
     347                 :          0 :                         idx = device_links_read_lock();
     348                 :            : 
     349                 :          0 :                         retval = rpm_get_suppliers(dev);
     350         [ #  # ]:          0 :                         if (retval)
     351                 :            :                                 goto fail;
     352                 :            : 
     353                 :          0 :                         device_links_read_unlock(idx);
     354                 :            :                 }
     355                 :            :         }
     356                 :            : 
     357                 :       1242 :         retval = cb(dev);
     358                 :            : 
     359         [ -  + ]:       1242 :         if (dev->power.irq_safe) {
     360                 :            :                 spin_lock(&dev->power.lock);
     361                 :            :         } else {
     362                 :            :                 /*
     363                 :            :                  * If the device is suspending and the callback has returned
     364                 :            :                  * success, drop the usage counters of the suppliers that have
     365                 :            :                  * been reference counted on its resume.
     366                 :            :                  *
     367                 :            :                  * Do that if resume fails too.
     368                 :            :                  */
     369         [ -  + ]:       1242 :                 if (use_links
     370   [ #  #  #  # ]:          0 :                     && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
     371   [ #  #  #  # ]:          0 :                     || (dev->power.runtime_status == RPM_RESUMING && retval))) {
     372                 :          0 :                         idx = device_links_read_lock();
     373                 :            : 
     374                 :            :  fail:
     375                 :          0 :                         rpm_put_suppliers(dev);
     376                 :            : 
     377                 :          0 :                         device_links_read_unlock(idx);
     378                 :            :                 }
     379                 :            : 
     380                 :            :                 spin_lock_irq(&dev->power.lock);
     381                 :            :         }
     382                 :            : 
     383                 :       1242 :         return retval;
     384                 :            : }
     385                 :            : 
     386                 :            : /**
     387                 :            :  * rpm_idle - Notify device bus type if the device can be suspended.
     388                 :            :  * @dev: Device to notify the bus type about.
     389                 :            :  * @rpmflags: Flag bits.
     390                 :            :  *
     391                 :            :  * Check if the device's runtime PM status allows it to be suspended.  If
     392                 :            :  * another idle notification has been started earlier, return immediately.  If
     393                 :            :  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
     394                 :            :  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
     395                 :            :  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
     396                 :            :  *
     397                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     398                 :            :  */
     399                 :     110354 : static int rpm_idle(struct device *dev, int rpmflags)
     400                 :            : {
     401                 :            :         int (*callback)(struct device *);
     402                 :            :         int retval;
     403                 :            : 
     404                 :     110354 :         trace_rpm_idle_rcuidle(dev, rpmflags);
     405                 :     110354 :         retval = rpm_check_suspend_allowed(dev);
     406         [ +  + ]:     110354 :         if (retval < 0)
     407                 :            :                 ;       /* Conditions are wrong. */
     408                 :            : 
     409                 :            :         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
     410         [ +  + ]:      25257 :         else if (dev->power.runtime_status != RPM_ACTIVE)
     411                 :            :                 retval = -EAGAIN;
     412                 :            : 
     413                 :            :         /*
     414                 :            :          * Any pending request other than an idle notification takes
     415                 :            :          * precedence over us, except that the timer may be running.
     416                 :            :          */
     417   [ +  +  +  - ]:      23808 :         else if (dev->power.request_pending &&
     418                 :        207 :             dev->power.request > RPM_REQ_IDLE)
     419                 :            :                 retval = -EAGAIN;
     420                 :            : 
     421                 :            :         /* Act as though RPM_NOWAIT is always set. */
     422         [ -  + ]:      23601 :         else if (dev->power.idle_notification)
     423                 :            :                 retval = -EINPROGRESS;
     424         [ +  + ]:     110354 :         if (retval)
     425                 :            :                 goto out;
     426                 :            : 
     427                 :            :         /* Pending requests need to be canceled. */
     428                 :      23601 :         dev->power.request = RPM_REQ_NONE;
     429                 :            : 
     430         [ +  + ]:      23601 :         if (dev->power.no_callbacks)
     431                 :            :                 goto out;
     432                 :            : 
     433                 :            :         /* Carry out an asynchronous or a synchronous idle notification. */
     434         [ +  + ]:       3105 :         if (rpmflags & RPM_ASYNC) {
     435                 :       1656 :                 dev->power.request = RPM_REQ_IDLE;
     436         [ +  + ]:       1656 :                 if (!dev->power.request_pending) {
     437                 :       1449 :                         dev->power.request_pending = true;
     438                 :       1449 :                         queue_work(pm_wq, &dev->power.work);
     439                 :            :                 }
     440                 :       1656 :                 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
     441                 :       1656 :                 return 0;
     442                 :            :         }
     443                 :            : 
     444                 :       1449 :         dev->power.idle_notification = true;
     445                 :            : 
     446                 :       1449 :         callback = RPM_GET_CALLBACK(dev, runtime_idle);
     447                 :            : 
     448         [ -  + ]:       1449 :         if (callback)
     449                 :          0 :                 retval = __rpm_callback(callback, dev);
     450                 :            : 
     451                 :       1449 :         dev->power.idle_notification = false;
     452                 :       1449 :         wake_up_all(&dev->power.wait_queue);
     453                 :            : 
     454                 :            :  out:
     455                 :     108698 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     456         [ +  + ]:     108698 :         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
     457                 :            : }
     458                 :            : 
     459                 :            : /**
     460                 :            :  * rpm_callback - Run a given runtime PM callback for a given device.
     461                 :            :  * @cb: Runtime PM callback to run.
     462                 :            :  * @dev: Device to run the callback for.
     463                 :            :  */
     464                 :       1242 : static int rpm_callback(int (*cb)(struct device *), struct device *dev)
     465                 :            : {
     466                 :            :         int retval;
     467                 :            : 
     468         [ +  - ]:       1242 :         if (!cb)
     469                 :            :                 return -ENOSYS;
     470                 :            : 
     471         [ -  + ]:       1242 :         if (dev->power.memalloc_noio) {
     472                 :            :                 unsigned int noio_flag;
     473                 :            : 
     474                 :            :                 /*
     475                 :            :                  * Deadlock might be caused if memory allocation with
     476                 :            :                  * GFP_KERNEL happens inside runtime_suspend and
     477                 :            :                  * runtime_resume callbacks of one block device's
     478                 :            :                  * ancestor or the block device itself. Network
     479                 :            :                  * device might be thought as part of iSCSI block
     480                 :            :                  * device, so network device and its ancestor should
     481                 :            :                  * be marked as memalloc_noio too.
     482                 :            :                  */
     483                 :            :                 noio_flag = memalloc_noio_save();
     484                 :          0 :                 retval = __rpm_callback(cb, dev);
     485                 :            :                 memalloc_noio_restore(noio_flag);
     486                 :            :         } else {
     487                 :       1242 :                 retval = __rpm_callback(cb, dev);
     488                 :            :         }
     489                 :            : 
     490                 :       1242 :         dev->power.runtime_error = retval;
     491         [ +  - ]:       1242 :         return retval != -EACCES ? retval : -EIO;
     492                 :            : }
     493                 :            : 
     494                 :            : /**
     495                 :            :  * rpm_suspend - Carry out runtime suspend of given device.
     496                 :            :  * @dev: Device to suspend.
     497                 :            :  * @rpmflags: Flag bits.
     498                 :            :  *
     499                 :            :  * Check if the device's runtime PM status allows it to be suspended.
     500                 :            :  * Cancel a pending idle notification, autosuspend or suspend. If
     501                 :            :  * another suspend has been started earlier, either return immediately
     502                 :            :  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
     503                 :            :  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
     504                 :            :  * otherwise run the ->runtime_suspend() callback directly. When
     505                 :            :  * ->runtime_suspend succeeded, if a deferred resume was requested while
     506                 :            :  * the callback was running then carry it out, otherwise send an idle
     507                 :            :  * notification for its parent (if the suspend succeeded and both
     508                 :            :  * ignore_children of parent->power and irq_safe of dev->power are not set).
     509                 :            :  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
     510                 :            :  * flag is set and the next autosuspend-delay expiration time is in the
     511                 :            :  * future, schedule another autosuspend attempt.
     512                 :            :  *
     513                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     514                 :            :  */
     515                 :    1575507 : static int rpm_suspend(struct device *dev, int rpmflags)
     516                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     517                 :            : {
     518                 :            :         int (*callback)(struct device *);
     519                 :            :         struct device *parent = NULL;
     520                 :            :         int retval;
     521                 :            : 
     522                 :    1575507 :         trace_rpm_suspend_rcuidle(dev, rpmflags);
     523                 :            : 
     524                 :            :  repeat:
     525                 :    1575507 :         retval = rpm_check_suspend_allowed(dev);
     526                 :            : 
     527         [ +  + ]:    1575506 :         if (retval < 0)
     528                 :            :                 ;       /* Conditions are wrong. */
     529                 :            : 
     530                 :            :         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
     531   [ -  +  #  # ]:     797719 :         else if (dev->power.runtime_status == RPM_RESUMING &&
     532                 :          0 :             !(rpmflags & RPM_ASYNC))
     533                 :            :                 retval = -EAGAIN;
     534         [ +  + ]:    1575506 :         if (retval)
     535                 :            :                 goto out;
     536                 :            : 
     537                 :            :         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
     538         [ +  - ]:     797718 :         if ((rpmflags & RPM_AUTO)
     539         [ +  + ]:     797719 :             && dev->power.runtime_status != RPM_SUSPENDING) {
     540                 :     797718 :                 u64 expires = pm_runtime_autosuspend_expiration(dev);
     541                 :            : 
     542         [ +  + ]:     797719 :                 if (expires != 0) {
     543                 :            :                         /* Pending requests need to be canceled. */
     544                 :     775981 :                         dev->power.request = RPM_REQ_NONE;
     545                 :            : 
     546                 :            :                         /*
     547                 :            :                          * Optimization: If the timer is already running and is
     548                 :            :                          * set to expire at or before the autosuspend delay,
     549                 :            :                          * avoid the overhead of resetting it.  Just let it
     550                 :            :                          * expire; pm_suspend_timer_fn() will take care of the
     551                 :            :                          * rest.
     552                 :            :                          */
     553   [ +  +  -  + ]:    1545056 :                         if (!(dev->power.timer_expires &&
     554                 :     769075 :                                         dev->power.timer_expires <= expires)) {
     555                 :            :                                 /*
     556                 :            :                                  * We add a slack of 25% to gather wakeups
     557                 :            :                                  * without sacrificing the granularity.
     558                 :            :                                  */
     559                 :       6906 :                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
     560                 :            :                                                     (NSEC_PER_MSEC >> 2);
     561                 :            : 
     562                 :       6906 :                                 dev->power.timer_expires = expires;
     563                 :      13812 :                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
     564                 :            :                                                 ns_to_ktime(expires),
     565                 :            :                                                 slack,
     566                 :            :                                                 HRTIMER_MODE_ABS);
     567                 :            :                         }
     568                 :     775981 :                         dev->power.timer_autosuspends = 1;
     569                 :     775981 :                         goto out;
     570                 :            :                 }
     571                 :            :         }
     572                 :            : 
     573                 :            :         /* Other scheduled or pending requests need to be canceled. */
     574                 :            :         pm_runtime_cancel_pending(dev);
     575                 :            : 
     576         [ -  + ]:      21738 :         if (dev->power.runtime_status == RPM_SUSPENDING) {
     577                 :          0 :                 DEFINE_WAIT(wait);
     578                 :            : 
     579         [ #  # ]:          0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     580                 :            :                         retval = -EINPROGRESS;
     581                 :          0 :                         goto out;
     582                 :            :                 }
     583                 :            : 
     584         [ #  # ]:          0 :                 if (dev->power.irq_safe) {
     585                 :            :                         spin_unlock(&dev->power.lock);
     586                 :            : 
     587                 :          0 :                         cpu_relax();
     588                 :            : 
     589                 :            :                         spin_lock(&dev->power.lock);
     590                 :          0 :                         goto repeat;
     591                 :            :                 }
     592                 :            : 
     593                 :            :                 /* Wait for the other suspend running in parallel with us. */
     594                 :            :                 for (;;) {
     595                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     596                 :            :                                         TASK_UNINTERRUPTIBLE);
     597         [ #  # ]:          0 :                         if (dev->power.runtime_status != RPM_SUSPENDING)
     598                 :            :                                 break;
     599                 :            : 
     600                 :            :                         spin_unlock_irq(&dev->power.lock);
     601                 :            : 
     602                 :          0 :                         schedule();
     603                 :            : 
     604                 :            :                         spin_lock_irq(&dev->power.lock);
     605                 :            :                 }
     606                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
     607                 :          0 :                 goto repeat;
     608                 :            :         }
     609                 :            : 
     610         [ +  + ]:      21738 :         if (dev->power.no_callbacks)
     611                 :            :                 goto no_callback;       /* Assume success. */
     612                 :            : 
     613                 :            :         /* Carry out an asynchronous or a synchronous suspend. */
     614         [ -  + ]:       1242 :         if (rpmflags & RPM_ASYNC) {
     615         [ #  # ]:          0 :                 dev->power.request = (rpmflags & RPM_AUTO) ?
     616                 :            :                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
     617         [ #  # ]:          0 :                 if (!dev->power.request_pending) {
     618                 :          0 :                         dev->power.request_pending = true;
     619                 :          0 :                         queue_work(pm_wq, &dev->power.work);
     620                 :            :                 }
     621                 :            :                 goto out;
     622                 :            :         }
     623                 :            : 
     624                 :            :         __update_runtime_status(dev, RPM_SUSPENDING);
     625                 :            : 
     626                 :       1242 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
     627                 :            : 
     628                 :       1242 :         dev_pm_enable_wake_irq_check(dev, true);
     629                 :       1242 :         retval = rpm_callback(callback, dev);
     630         [ -  + ]:       1242 :         if (retval)
     631                 :            :                 goto fail;
     632                 :            : 
     633                 :            :  no_callback:
     634                 :            :         __update_runtime_status(dev, RPM_SUSPENDED);
     635                 :            :         pm_runtime_deactivate_timer(dev);
     636                 :            : 
     637         [ +  - ]:      20496 :         if (dev->parent) {
     638                 :            :                 parent = dev->parent;
     639                 :      20496 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
     640                 :            :         }
     641                 :      20496 :         wake_up_all(&dev->power.wait_queue);
     642                 :            : 
     643         [ -  + ]:      20496 :         if (dev->power.deferred_resume) {
     644                 :          0 :                 dev->power.deferred_resume = false;
     645                 :          0 :                 rpm_resume(dev, 0);
     646                 :            :                 retval = -EAGAIN;
     647                 :          0 :                 goto out;
     648                 :            :         }
     649                 :            : 
     650                 :            :         /* Maybe the parent is now able to suspend. */
     651   [ +  -  +  -  :      20496 :         if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
                   +  - ]
     652                 :            :                 spin_unlock(&dev->power.lock);
     653                 :            : 
     654                 :            :                 spin_lock(&parent->power.lock);
     655                 :      20496 :                 rpm_idle(parent, RPM_ASYNC);
     656                 :            :                 spin_unlock(&parent->power.lock);
     657                 :            : 
     658                 :            :                 spin_lock(&dev->power.lock);
     659                 :            :         }
     660                 :            : 
     661                 :            :  out:
     662                 :    1575507 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     663                 :            : 
     664                 :    1575505 :         return retval;
     665                 :            : 
     666                 :            :  fail:
     667                 :       1242 :         dev_pm_disable_wake_irq_check(dev);
     668                 :            :         __update_runtime_status(dev, RPM_ACTIVE);
     669                 :       1242 :         dev->power.deferred_resume = false;
     670                 :       1242 :         wake_up_all(&dev->power.wait_queue);
     671                 :            : 
     672         [ +  - ]:       1242 :         if (retval == -EAGAIN || retval == -EBUSY) {
     673                 :       1242 :                 dev->power.runtime_error = 0;
     674                 :            : 
     675                 :            :                 /*
     676                 :            :                  * If the callback routine failed an autosuspend, and
     677                 :            :                  * if the last_busy time has been updated so that there
     678                 :            :                  * is a new autosuspend expiration time, automatically
     679                 :            :                  * reschedule another autosuspend.
     680                 :            :                  */
     681   [ -  +  -  + ]:       2484 :                 if ((rpmflags & RPM_AUTO) &&
     682                 :       1242 :                     pm_runtime_autosuspend_expiration(dev) != 0)
     683                 :            :                         goto repeat;
     684                 :            :         } else {
     685                 :            :                 pm_runtime_cancel_pending(dev);
     686                 :            :         }
     687                 :            :         goto out;
     688                 :            : }
     689                 :            : 
     690                 :            : /**
     691                 :            :  * rpm_resume - Carry out runtime resume of given device.
     692                 :            :  * @dev: Device to resume.
     693                 :            :  * @rpmflags: Flag bits.
     694                 :            :  *
     695                 :            :  * Check if the device's runtime PM status allows it to be resumed.  Cancel
     696                 :            :  * any scheduled or pending requests.  If another resume has been started
     697                 :            :  * earlier, either return immediately or wait for it to finish, depending on the
     698                 :            :  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
     699                 :            :  * parallel with this function, either tell the other process to resume after
     700                 :            :  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
     701                 :            :  * flag is set then queue a resume request; otherwise run the
     702                 :            :  * ->runtime_resume() callback directly.  Queue an idle notification for the
     703                 :            :  * device if the resume succeeded.
     704                 :            :  *
     705                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
     706                 :            :  */
     707                 :    1671513 : static int rpm_resume(struct device *dev, int rpmflags)
     708                 :            :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     709                 :            : {
     710                 :            :         int (*callback)(struct device *);
     711                 :            :         struct device *parent = NULL;
     712                 :            :         int retval = 0;
     713                 :            : 
     714                 :    1671513 :         trace_rpm_resume_rcuidle(dev, rpmflags);
     715                 :            : 
     716                 :            :  repeat:
     717         [ +  - ]:    1671517 :         if (dev->power.runtime_error)
     718                 :            :                 retval = -EINVAL;
     719   [ +  +  -  + ]:    1671517 :         else if (dev->power.disable_depth == 1 && dev->power.is_suspended
     720         [ #  # ]:          0 :             && dev->power.runtime_status == RPM_ACTIVE)
     721                 :            :                 retval = 1;
     722         [ +  + ]:    1671505 :         else if (dev->power.disable_depth > 0)
     723                 :            :                 retval = -EACCES;
     724         [ +  + ]:    1671517 :         if (retval)
     725                 :            :                 goto out;
     726                 :            : 
     727                 :            :         /*
     728                 :            :          * Other scheduled or pending requests need to be canceled.  Small
     729                 :            :          * optimization: If an autosuspend timer is running, leave it running
     730                 :            :          * rather than cancelling it now only to restart it again in the near
     731                 :            :          * future.
     732                 :            :          */
     733                 :     875514 :         dev->power.request = RPM_REQ_NONE;
     734         [ +  + ]:     875514 :         if (!dev->power.timer_autosuspends)
     735                 :            :                 pm_runtime_deactivate_timer(dev);
     736                 :            : 
     737         [ +  + ]:     875514 :         if (dev->power.runtime_status == RPM_ACTIVE) {
     738                 :            :                 retval = 1;
     739                 :            :                 goto out;
     740                 :            :         }
     741                 :            : 
     742         [ -  + ]:      19461 :         if (dev->power.runtime_status == RPM_RESUMING
     743                 :      19461 :             || dev->power.runtime_status == RPM_SUSPENDING) {
     744                 :          0 :                 DEFINE_WAIT(wait);
     745                 :            : 
     746         [ #  # ]:          0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     747         [ #  # ]:          0 :                         if (dev->power.runtime_status == RPM_SUSPENDING)
     748                 :          0 :                                 dev->power.deferred_resume = true;
     749                 :            :                         else
     750                 :            :                                 retval = -EINPROGRESS;
     751                 :          0 :                         goto out;
     752                 :            :                 }
     753                 :            : 
     754         [ #  # ]:          0 :                 if (dev->power.irq_safe) {
     755                 :            :                         spin_unlock(&dev->power.lock);
     756                 :            : 
     757                 :          0 :                         cpu_relax();
     758                 :            : 
     759                 :            :                         spin_lock(&dev->power.lock);
     760                 :          0 :                         goto repeat;
     761                 :            :                 }
     762                 :            : 
     763                 :            :                 /* Wait for the operation carried out in parallel with us. */
     764                 :            :                 for (;;) {
     765                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     766                 :            :                                         TASK_UNINTERRUPTIBLE);
     767         [ #  # ]:          0 :                         if (dev->power.runtime_status != RPM_RESUMING
     768                 :          0 :                             && dev->power.runtime_status != RPM_SUSPENDING)
     769                 :            :                                 break;
     770                 :            : 
     771                 :            :                         spin_unlock_irq(&dev->power.lock);
     772                 :            : 
     773                 :          0 :                         schedule();
     774                 :            : 
     775                 :            :                         spin_lock_irq(&dev->power.lock);
     776                 :            :                 }
     777                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
     778                 :          0 :                 goto repeat;
     779                 :            :         }
     780                 :            : 
     781                 :            :         /*
     782                 :            :          * See if we can skip waking up the parent.  This is safe only if
     783                 :            :          * power.no_callbacks is set, because otherwise we don't know whether
     784                 :            :          * the resume will actually succeed.
     785                 :            :          */
     786   [ +  -  +  -  :      19461 :         if (dev->power.no_callbacks && !parent && dev->parent) {
                   +  - ]
     787                 :      19461 :                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
     788         [ +  - ]:      19461 :                 if (dev->parent->power.disable_depth > 0
     789         [ +  - ]:      19461 :                     || dev->parent->power.ignore_children
     790         [ +  - ]:      19461 :                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
     791                 :      19461 :                         atomic_inc(&dev->parent->power.child_count);
     792                 :      19461 :                         spin_unlock(&dev->parent->power.lock);
     793                 :            :                         retval = 1;
     794                 :      19461 :                         goto no_callback;       /* Assume success. */
     795                 :            :                 }
     796                 :            :                 spin_unlock(&dev->parent->power.lock);
     797                 :            :         }
     798                 :            : 
     799                 :            :         /* Carry out an asynchronous or a synchronous resume. */
     800         [ #  # ]:          0 :         if (rpmflags & RPM_ASYNC) {
     801                 :          0 :                 dev->power.request = RPM_REQ_RESUME;
     802         [ #  # ]:          0 :                 if (!dev->power.request_pending) {
     803                 :          0 :                         dev->power.request_pending = true;
     804                 :          0 :                         queue_work(pm_wq, &dev->power.work);
     805                 :            :                 }
     806                 :            :                 retval = 0;
     807                 :            :                 goto out;
     808                 :            :         }
     809                 :            : 
     810   [ #  #  #  # ]:          0 :         if (!parent && dev->parent) {
     811                 :            :                 /*
     812                 :            :                  * Increment the parent's usage counter and resume it if
     813                 :            :                  * necessary.  Not needed if dev is irq-safe; then the
     814                 :            :                  * parent is permanently resumed.
     815                 :            :                  */
     816                 :            :                 parent = dev->parent;
     817         [ #  # ]:          0 :                 if (dev->power.irq_safe)
     818                 :            :                         goto skip_parent;
     819                 :            :                 spin_unlock(&dev->power.lock);
     820                 :            : 
     821                 :            :                 pm_runtime_get_noresume(parent);
     822                 :            : 
     823                 :            :                 spin_lock(&parent->power.lock);
     824                 :            :                 /*
     825                 :            :                  * Resume the parent if it has runtime PM enabled and not been
     826                 :            :                  * set to ignore its children.
     827                 :            :                  */
     828         [ #  # ]:          0 :                 if (!parent->power.disable_depth
     829         [ #  # ]:          0 :                     && !parent->power.ignore_children) {
     830                 :          0 :                         rpm_resume(parent, 0);
     831         [ #  # ]:          0 :                         if (parent->power.runtime_status != RPM_ACTIVE)
     832                 :            :                                 retval = -EBUSY;
     833                 :            :                 }
     834                 :            :                 spin_unlock(&parent->power.lock);
     835                 :            : 
     836                 :            :                 spin_lock(&dev->power.lock);
     837         [ #  # ]:          0 :                 if (retval)
     838                 :            :                         goto out;
     839                 :            :                 goto repeat;
     840                 :            :         }
     841                 :            :  skip_parent:
     842                 :            : 
     843         [ #  # ]:          0 :         if (dev->power.no_callbacks)
     844                 :            :                 goto no_callback;       /* Assume success. */
     845                 :            : 
     846                 :            :         __update_runtime_status(dev, RPM_RESUMING);
     847                 :            : 
     848                 :          0 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
     849                 :            : 
     850                 :          0 :         dev_pm_disable_wake_irq_check(dev);
     851                 :          0 :         retval = rpm_callback(callback, dev);
     852         [ #  # ]:          0 :         if (retval) {
     853                 :            :                 __update_runtime_status(dev, RPM_SUSPENDED);
     854                 :            :                 pm_runtime_cancel_pending(dev);
     855                 :          0 :                 dev_pm_enable_wake_irq_check(dev, false);
     856                 :            :         } else {
     857                 :            :  no_callback:
     858                 :            :                 __update_runtime_status(dev, RPM_ACTIVE);
     859                 :            :                 pm_runtime_mark_last_busy(dev);
     860         [ -  + ]:      19461 :                 if (parent)
     861                 :          0 :                         atomic_inc(&parent->power.child_count);
     862                 :            :         }
     863                 :      19461 :         wake_up_all(&dev->power.wait_queue);
     864                 :            : 
     865         [ +  - ]:      19461 :         if (retval >= 0)
     866                 :      19461 :                 rpm_idle(dev, RPM_ASYNC);
     867                 :            : 
     868                 :            :  out:
     869   [ -  +  #  # ]:    1671517 :         if (parent && !dev->power.irq_safe) {
     870                 :            :                 spin_unlock_irq(&dev->power.lock);
     871                 :            : 
     872                 :            :                 pm_runtime_put(parent);
     873                 :            : 
     874                 :            :                 spin_lock_irq(&dev->power.lock);
     875                 :            :         }
     876                 :            : 
     877                 :    1671517 :         trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
     878                 :            : 
     879                 :    1671504 :         return retval;
     880                 :            : }
     881                 :            : 
     882                 :            : /**
     883                 :            :  * pm_runtime_work - Universal runtime PM work function.
     884                 :            :  * @work: Work structure used for scheduling the execution of this function.
     885                 :            :  *
     886                 :            :  * Use @work to get the device object the work is to be done for, determine what
     887                 :            :  * is to be done and execute the appropriate runtime PM function.
     888                 :            :  */
     889                 :       1449 : static void pm_runtime_work(struct work_struct *work)
     890                 :            : {
     891                 :       1449 :         struct device *dev = container_of(work, struct device, power.work);
     892                 :            :         enum rpm_request req;
     893                 :            : 
     894                 :            :         spin_lock_irq(&dev->power.lock);
     895                 :            : 
     896         [ +  - ]:       1449 :         if (!dev->power.request_pending)
     897                 :            :                 goto out;
     898                 :            : 
     899                 :       1449 :         req = dev->power.request;
     900                 :       1449 :         dev->power.request = RPM_REQ_NONE;
     901                 :       1449 :         dev->power.request_pending = false;
     902                 :            : 
     903   [ +  -  -  -  :       1449 :         switch (req) {
                      - ]
     904                 :            :         case RPM_REQ_NONE:
     905                 :            :                 break;
     906                 :            :         case RPM_REQ_IDLE:
     907                 :       1449 :                 rpm_idle(dev, RPM_NOWAIT);
     908                 :       1449 :                 break;
     909                 :            :         case RPM_REQ_SUSPEND:
     910                 :          0 :                 rpm_suspend(dev, RPM_NOWAIT);
     911                 :          0 :                 break;
     912                 :            :         case RPM_REQ_AUTOSUSPEND:
     913                 :          0 :                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
     914                 :          0 :                 break;
     915                 :            :         case RPM_REQ_RESUME:
     916                 :          0 :                 rpm_resume(dev, RPM_NOWAIT);
     917                 :          0 :                 break;
     918                 :            :         }
     919                 :            : 
     920                 :            :  out:
     921                 :            :         spin_unlock_irq(&dev->power.lock);
     922                 :       1449 : }
     923                 :            : 
     924                 :            : /**
     925                 :            :  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
     926                 :            :  * @data: Device pointer passed by pm_schedule_suspend().
     927                 :            :  *
     928                 :            :  * Check if the time is right and queue a suspend request.
     929                 :            :  */
     930                 :       6699 : static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
     931                 :            : {
     932                 :       6699 :         struct device *dev = container_of(timer, struct device, power.suspend_timer);
     933                 :            :         unsigned long flags;
     934                 :            :         u64 expires;
     935                 :            : 
     936                 :       6699 :         spin_lock_irqsave(&dev->power.lock, flags);
     937                 :            : 
     938                 :       6699 :         expires = dev->power.timer_expires;
     939                 :            :         /*
     940                 :            :          * If 'expires' is after the current time, we've been called
     941                 :            :          * too early.
     942                 :            :          */
     943   [ +  -  +  - ]:       6699 :         if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
     944                 :       6699 :                 dev->power.timer_expires = 0;
     945         [ -  + ]:       6699 :                 rpm_suspend(dev, dev->power.timer_autosuspends ?
     946                 :            :                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
     947                 :            :         }
     948                 :            : 
     949                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
     950                 :            : 
     951                 :       6699 :         return HRTIMER_NORESTART;
     952                 :            : }
     953                 :            : 
     954                 :            : /**
     955                 :            :  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
     956                 :            :  * @dev: Device to suspend.
     957                 :            :  * @delay: Time to wait before submitting a suspend request, in milliseconds.
     958                 :            :  */
     959                 :          0 : int pm_schedule_suspend(struct device *dev, unsigned int delay)
     960                 :            : {
     961                 :            :         unsigned long flags;
     962                 :            :         u64 expires;
     963                 :            :         int retval;
     964                 :            : 
     965                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
     966                 :            : 
     967         [ #  # ]:          0 :         if (!delay) {
     968                 :          0 :                 retval = rpm_suspend(dev, RPM_ASYNC);
     969                 :          0 :                 goto out;
     970                 :            :         }
     971                 :            : 
     972                 :          0 :         retval = rpm_check_suspend_allowed(dev);
     973         [ #  # ]:          0 :         if (retval)
     974                 :            :                 goto out;
     975                 :            : 
     976                 :            :         /* Other scheduled or pending requests need to be canceled. */
     977                 :            :         pm_runtime_cancel_pending(dev);
     978                 :            : 
     979                 :          0 :         expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
     980                 :          0 :         dev->power.timer_expires = expires;
     981                 :          0 :         dev->power.timer_autosuspends = 0;
     982                 :          0 :         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
     983                 :            : 
     984                 :            :  out:
     985                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
     986                 :            : 
     987                 :          0 :         return retval;
     988                 :            : }
     989                 :            : EXPORT_SYMBOL_GPL(pm_schedule_suspend);
     990                 :            : 
     991                 :            : /**
     992                 :            :  * __pm_runtime_idle - Entry point for runtime idle operations.
     993                 :            :  * @dev: Device to send idle notification for.
     994                 :            :  * @rpmflags: Flag bits.
     995                 :            :  *
     996                 :            :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
     997                 :            :  * return immediately if it is larger than zero.  Then carry out an idle
     998                 :            :  * notification, either synchronous or asynchronous.
     999                 :            :  *
    1000                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1001                 :            :  * or if pm_runtime_irq_safe() has been called.
    1002                 :            :  */
    1003                 :     145146 : int __pm_runtime_idle(struct device *dev, int rpmflags)
    1004                 :            : {
    1005                 :            :         unsigned long flags;
    1006                 :            :         int retval;
    1007                 :            : 
    1008         [ +  + ]:     145146 :         if (rpmflags & RPM_GET_PUT) {
    1009         [ +  + ]:     233988 :                 if (!atomic_dec_and_test(&dev->power.usage_count))
    1010                 :            :                         return 0;
    1011                 :            :         }
    1012                 :            : 
    1013   [ +  +  +  - ]:      66050 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1014                 :            : 
    1015                 :      66050 :         spin_lock_irqsave(&dev->power.lock, flags);
    1016                 :      66050 :         retval = rpm_idle(dev, rpmflags);
    1017                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1018                 :            : 
    1019                 :      66050 :         return retval;
    1020                 :            : }
    1021                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_idle);
    1022                 :            : 
    1023                 :            : /**
    1024                 :            :  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
    1025                 :            :  * @dev: Device to suspend.
    1026                 :            :  * @rpmflags: Flag bits.
    1027                 :            :  *
    1028                 :            :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
    1029                 :            :  * return immediately if it is larger than zero.  Then carry out a suspend,
    1030                 :            :  * either synchronous or asynchronous.
    1031                 :            :  *
    1032                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1033                 :            :  * or if pm_runtime_irq_safe() has been called.
    1034                 :            :  */
    1035                 :    1556388 : int __pm_runtime_suspend(struct device *dev, int rpmflags)
    1036                 :            : {
    1037                 :            :         unsigned long flags;
    1038                 :            :         int retval;
    1039                 :            : 
    1040         [ +  - ]:    1556388 :         if (rpmflags & RPM_GET_PUT) {
    1041         [ +  + ]:    3112776 :                 if (!atomic_dec_and_test(&dev->power.usage_count))
    1042                 :            :                         return 0;
    1043                 :            :         }
    1044                 :            : 
    1045   [ +  +  +  - ]:    1546863 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1046                 :            : 
    1047                 :    1546863 :         spin_lock_irqsave(&dev->power.lock, flags);
    1048                 :    1546863 :         retval = rpm_suspend(dev, rpmflags);
    1049                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1050                 :            : 
    1051                 :    1546863 :         return retval;
    1052                 :            : }
    1053                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
    1054                 :            : 
    1055                 :            : /**
    1056                 :            :  * __pm_runtime_resume - Entry point for runtime resume operations.
    1057                 :            :  * @dev: Device to resume.
    1058                 :            :  * @rpmflags: Flag bits.
    1059                 :            :  *
    1060                 :            :  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
    1061                 :            :  * carry out a resume, either synchronous or asynchronous.
    1062                 :            :  *
    1063                 :            :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1064                 :            :  * or if pm_runtime_irq_safe() has been called.
    1065                 :            :  */
    1066                 :    1670481 : int __pm_runtime_resume(struct device *dev, int rpmflags)
    1067                 :            : {
    1068                 :            :         unsigned long flags;
    1069                 :            :         int retval;
    1070                 :            : 
    1071   [ +  +  +  -  :    1670481 :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
                   +  + ]
    1072                 :            :                         dev->power.runtime_status != RPM_ACTIVE);
    1073                 :            : 
    1074         [ +  - ]:    1670481 :         if (rpmflags & RPM_GET_PUT)
    1075                 :    1670481 :                 atomic_inc(&dev->power.usage_count);
    1076                 :            : 
    1077                 :    1670480 :         spin_lock_irqsave(&dev->power.lock, flags);
    1078                 :    1670482 :         retval = rpm_resume(dev, rpmflags);
    1079                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1080                 :            : 
    1081                 :    1670482 :         return retval;
    1082                 :            : }
    1083                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_resume);
    1084                 :            : 
    1085                 :            : /**
    1086                 :            :  * pm_runtime_get_if_in_use - Conditionally bump up the device's usage counter.
    1087                 :            :  * @dev: Device to handle.
    1088                 :            :  *
    1089                 :            :  * Return -EINVAL if runtime PM is disabled for the device.
    1090                 :            :  *
    1091                 :            :  * If that's not the case and if the device's runtime PM status is RPM_ACTIVE
    1092                 :            :  * and the runtime PM usage counter is nonzero, increment the counter and
    1093                 :            :  * return 1.  Otherwise return 0 without changing the counter.
    1094                 :            :  */
    1095                 :          0 : int pm_runtime_get_if_in_use(struct device *dev)
    1096                 :            : {
    1097                 :            :         unsigned long flags;
    1098                 :            :         int retval;
    1099                 :            : 
    1100                 :          0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1101         [ #  # ]:          0 :         retval = dev->power.disable_depth > 0 ? -EINVAL :
    1102                 :          0 :                 dev->power.runtime_status == RPM_ACTIVE
    1103   [ #  #  #  # ]:          0 :                         && atomic_inc_not_zero(&dev->power.usage_count);
    1104                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1105                 :          0 :         return retval;
    1106                 :            : }
    1107                 :            : EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
    1108                 :            : 
    1109                 :            : /**
    1110                 :            :  * __pm_runtime_set_status - Set runtime PM status of a device.
    1111                 :            :  * @dev: Device to handle.
    1112                 :            :  * @status: New runtime PM status of the device.
    1113                 :            :  *
    1114                 :            :  * If runtime PM of the device is disabled or its power.runtime_error field is
    1115                 :            :  * different from zero, the status may be changed either to RPM_ACTIVE, or to
    1116                 :            :  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
    1117                 :            :  * However, if the device has a parent and the parent is not active, and the
    1118                 :            :  * parent's power.ignore_children flag is unset, the device's status cannot be
    1119                 :            :  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
    1120                 :            :  *
    1121                 :            :  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
    1122                 :            :  * and the device parent's counter of unsuspended children is modified to
    1123                 :            :  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
    1124                 :            :  * notification request for the parent is submitted.
    1125                 :            :  *
    1126                 :            :  * If @dev has any suppliers (as reflected by device links to them), and @status
    1127                 :            :  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
    1128                 :            :  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
    1129                 :            :  * of the @status value) and the suppliers will be deacticated on exit.  The
    1130                 :            :  * error returned by the failing supplier activation will be returned in that
    1131                 :            :  * case.
    1132                 :            :  */
    1133                 :       3933 : int __pm_runtime_set_status(struct device *dev, unsigned int status)
    1134                 :            : {
    1135                 :       3933 :         struct device *parent = dev->parent;
    1136                 :            :         bool notify_parent = false;
    1137                 :            :         int error = 0;
    1138                 :            : 
    1139         [ +  - ]:       3933 :         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
    1140                 :            :                 return -EINVAL;
    1141                 :            : 
    1142                 :            :         spin_lock_irq(&dev->power.lock);
    1143                 :            : 
    1144                 :            :         /*
    1145                 :            :          * Prevent PM-runtime from being enabled for the device or return an
    1146                 :            :          * error if it is enabled already and working.
    1147                 :            :          */
    1148   [ +  -  +  - ]:       3933 :         if (dev->power.runtime_error || dev->power.disable_depth)
    1149                 :       3933 :                 dev->power.disable_depth++;
    1150                 :            :         else
    1151                 :            :                 error = -EAGAIN;
    1152                 :            : 
    1153                 :            :         spin_unlock_irq(&dev->power.lock);
    1154                 :            : 
    1155         [ +  - ]:       3933 :         if (error)
    1156                 :            :                 return error;
    1157                 :            : 
    1158                 :            :         /*
    1159                 :            :          * If the new status is RPM_ACTIVE, the suppliers can be activated
    1160                 :            :          * upfront regardless of the current status, because next time
    1161                 :            :          * rpm_put_suppliers() runs, the rpm_active refcounts of the links
    1162                 :            :          * involved will be dropped down to one anyway.
    1163                 :            :          */
    1164         [ +  - ]:       3933 :         if (status == RPM_ACTIVE) {
    1165                 :       3933 :                 int idx = device_links_read_lock();
    1166                 :            : 
    1167                 :       3933 :                 error = rpm_get_suppliers(dev);
    1168         [ -  + ]:       3933 :                 if (error)
    1169                 :            :                         status = RPM_SUSPENDED;
    1170                 :            : 
    1171                 :       3933 :                 device_links_read_unlock(idx);
    1172                 :            :         }
    1173                 :            : 
    1174                 :            :         spin_lock_irq(&dev->power.lock);
    1175                 :            : 
    1176   [ +  -  +  - ]:       3933 :         if (dev->power.runtime_status == status || !parent)
    1177                 :            :                 goto out_set;
    1178                 :            : 
    1179         [ -  + ]:       3933 :         if (status == RPM_SUSPENDED) {
    1180                 :          0 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
    1181                 :          0 :                 notify_parent = !parent->power.ignore_children;
    1182                 :            :         } else {
    1183                 :       3933 :                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
    1184                 :            : 
    1185                 :            :                 /*
    1186                 :            :                  * It is invalid to put an active child under a parent that is
    1187                 :            :                  * not active, has runtime PM enabled and the
    1188                 :            :                  * 'power.ignore_children' flag unset.
    1189                 :            :                  */
    1190         [ +  + ]:       3933 :                 if (!parent->power.disable_depth
    1191         [ +  + ]:       3312 :                     && !parent->power.ignore_children
    1192         [ -  + ]:       2070 :                     && parent->power.runtime_status != RPM_ACTIVE) {
    1193                 :          0 :                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
    1194                 :            :                                 dev_name(dev),
    1195                 :            :                                 dev_name(parent));
    1196                 :          0 :                         error = -EBUSY;
    1197         [ +  - ]:       3933 :                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
    1198                 :       3933 :                         atomic_inc(&parent->power.child_count);
    1199                 :            :                 }
    1200                 :            : 
    1201                 :            :                 spin_unlock(&parent->power.lock);
    1202                 :            : 
    1203         [ +  - ]:       3933 :                 if (error) {
    1204                 :            :                         status = RPM_SUSPENDED;
    1205                 :            :                         goto out;
    1206                 :            :                 }
    1207                 :            :         }
    1208                 :            : 
    1209                 :            :  out_set:
    1210                 :            :         __update_runtime_status(dev, status);
    1211         [ +  - ]:       3933 :         if (!error)
    1212                 :       3933 :                 dev->power.runtime_error = 0;
    1213                 :            : 
    1214                 :            :  out:
    1215                 :            :         spin_unlock_irq(&dev->power.lock);
    1216                 :            : 
    1217         [ -  + ]:       3933 :         if (notify_parent)
    1218                 :            :                 pm_request_idle(parent);
    1219                 :            : 
    1220         [ -  + ]:       3933 :         if (status == RPM_SUSPENDED) {
    1221                 :          0 :                 int idx = device_links_read_lock();
    1222                 :            : 
    1223                 :          0 :                 rpm_put_suppliers(dev);
    1224                 :            : 
    1225                 :          0 :                 device_links_read_unlock(idx);
    1226                 :            :         }
    1227                 :            : 
    1228                 :       3933 :         pm_runtime_enable(dev);
    1229                 :            : 
    1230                 :       3933 :         return error;
    1231                 :            : }
    1232                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
    1233                 :            : 
    1234                 :            : /**
    1235                 :            :  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
    1236                 :            :  * @dev: Device to handle.
    1237                 :            :  *
    1238                 :            :  * Flush all pending requests for the device from pm_wq and wait for all
    1239                 :            :  * runtime PM operations involving the device in progress to complete.
    1240                 :            :  *
    1241                 :            :  * Should be called under dev->power.lock with interrupts disabled.
    1242                 :            :  */
    1243                 :      12639 : static void __pm_runtime_barrier(struct device *dev)
    1244                 :            : {
    1245                 :            :         pm_runtime_deactivate_timer(dev);
    1246                 :            : 
    1247         [ -  + ]:      12639 :         if (dev->power.request_pending) {
    1248                 :          0 :                 dev->power.request = RPM_REQ_NONE;
    1249                 :            :                 spin_unlock_irq(&dev->power.lock);
    1250                 :            : 
    1251                 :          0 :                 cancel_work_sync(&dev->power.work);
    1252                 :            : 
    1253                 :            :                 spin_lock_irq(&dev->power.lock);
    1254                 :          0 :                 dev->power.request_pending = false;
    1255                 :            :         }
    1256                 :            : 
    1257         [ +  - ]:      25278 :         if (dev->power.runtime_status == RPM_SUSPENDING
    1258                 :      12639 :             || dev->power.runtime_status == RPM_RESUMING
    1259         [ -  + ]:      12639 :             || dev->power.idle_notification) {
    1260                 :          0 :                 DEFINE_WAIT(wait);
    1261                 :            : 
    1262                 :            :                 /* Suspend, wake-up or idle notification in progress. */
    1263                 :            :                 for (;;) {
    1264                 :          0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
    1265                 :            :                                         TASK_UNINTERRUPTIBLE);
    1266         [ #  # ]:          0 :                         if (dev->power.runtime_status != RPM_SUSPENDING
    1267                 :          0 :                             && dev->power.runtime_status != RPM_RESUMING
    1268         [ #  # ]:          0 :                             && !dev->power.idle_notification)
    1269                 :            :                                 break;
    1270                 :            :                         spin_unlock_irq(&dev->power.lock);
    1271                 :            : 
    1272                 :          0 :                         schedule();
    1273                 :            : 
    1274                 :            :                         spin_lock_irq(&dev->power.lock);
    1275                 :            :                 }
    1276                 :          0 :                 finish_wait(&dev->power.wait_queue, &wait);
    1277                 :            :         }
    1278                 :      12639 : }
    1279                 :            : 
    1280                 :            : /**
    1281                 :            :  * pm_runtime_barrier - Flush pending requests and wait for completions.
    1282                 :            :  * @dev: Device to handle.
    1283                 :            :  *
    1284                 :            :  * Prevent the device from being suspended by incrementing its usage counter and
    1285                 :            :  * if there's a pending resume request for the device, wake the device up.
    1286                 :            :  * Next, make sure that all pending requests for the device have been flushed
    1287                 :            :  * from pm_wq and wait for all runtime PM operations involving the device in
    1288                 :            :  * progress to complete.
    1289                 :            :  *
    1290                 :            :  * Return value:
    1291                 :            :  * 1, if there was a resume request pending and the device had to be woken up,
    1292                 :            :  * 0, otherwise
    1293                 :            :  */
    1294                 :      12432 : int pm_runtime_barrier(struct device *dev)
    1295                 :            : {
    1296                 :            :         int retval = 0;
    1297                 :            : 
    1298                 :            :         pm_runtime_get_noresume(dev);
    1299                 :            :         spin_lock_irq(&dev->power.lock);
    1300                 :            : 
    1301         [ -  + ]:      12432 :         if (dev->power.request_pending
    1302         [ #  # ]:          0 :             && dev->power.request == RPM_REQ_RESUME) {
    1303                 :          0 :                 rpm_resume(dev, 0);
    1304                 :            :                 retval = 1;
    1305                 :            :         }
    1306                 :            : 
    1307                 :      12432 :         __pm_runtime_barrier(dev);
    1308                 :            : 
    1309                 :            :         spin_unlock_irq(&dev->power.lock);
    1310                 :            :         pm_runtime_put_noidle(dev);
    1311                 :            : 
    1312                 :      12432 :         return retval;
    1313                 :            : }
    1314                 :            : EXPORT_SYMBOL_GPL(pm_runtime_barrier);
    1315                 :            : 
    1316                 :            : /**
    1317                 :            :  * __pm_runtime_disable - Disable runtime PM of a device.
    1318                 :            :  * @dev: Device to handle.
    1319                 :            :  * @check_resume: If set, check if there's a resume request for the device.
    1320                 :            :  *
    1321                 :            :  * Increment power.disable_depth for the device and if it was zero previously,
    1322                 :            :  * cancel all pending runtime PM requests for the device and wait for all
    1323                 :            :  * operations in progress to complete.  The device can be either active or
    1324                 :            :  * suspended after its runtime PM has been disabled.
    1325                 :            :  *
    1326                 :            :  * If @check_resume is set and there's a resume request pending when
    1327                 :            :  * __pm_runtime_disable() is called and power.disable_depth is zero, the
    1328                 :            :  * function will wake up the device before disabling its runtime PM.
    1329                 :            :  */
    1330                 :        414 : void __pm_runtime_disable(struct device *dev, bool check_resume)
    1331                 :            : {
    1332                 :            :         spin_lock_irq(&dev->power.lock);
    1333                 :            : 
    1334         [ +  + ]:        414 :         if (dev->power.disable_depth > 0) {
    1335                 :        207 :                 dev->power.disable_depth++;
    1336                 :        207 :                 goto out;
    1337                 :            :         }
    1338                 :            : 
    1339                 :            :         /*
    1340                 :            :          * Wake up the device if there's a resume request pending, because that
    1341                 :            :          * means there probably is some I/O to process and disabling runtime PM
    1342                 :            :          * shouldn't prevent the device from processing the I/O.
    1343                 :            :          */
    1344   [ +  -  -  + ]:        207 :         if (check_resume && dev->power.request_pending
    1345         [ #  # ]:          0 :             && dev->power.request == RPM_REQ_RESUME) {
    1346                 :            :                 /*
    1347                 :            :                  * Prevent suspends and idle notifications from being carried
    1348                 :            :                  * out after we have woken up the device.
    1349                 :            :                  */
    1350                 :            :                 pm_runtime_get_noresume(dev);
    1351                 :            : 
    1352                 :          0 :                 rpm_resume(dev, 0);
    1353                 :            : 
    1354                 :            :                 pm_runtime_put_noidle(dev);
    1355                 :            :         }
    1356                 :            : 
    1357                 :            :         /* Update time accounting before disabling PM-runtime. */
    1358                 :        207 :         update_pm_runtime_accounting(dev);
    1359                 :            : 
    1360         [ +  - ]:        207 :         if (!dev->power.disable_depth++)
    1361                 :        207 :                 __pm_runtime_barrier(dev);
    1362                 :            : 
    1363                 :            :  out:
    1364                 :            :         spin_unlock_irq(&dev->power.lock);
    1365                 :        414 : }
    1366                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_disable);
    1367                 :            : 
    1368                 :            : /**
    1369                 :            :  * pm_runtime_enable - Enable runtime PM of a device.
    1370                 :            :  * @dev: Device to handle.
    1371                 :            :  */
    1372                 :       8073 : void pm_runtime_enable(struct device *dev)
    1373                 :            : {
    1374                 :            :         unsigned long flags;
    1375                 :            : 
    1376                 :       8073 :         spin_lock_irqsave(&dev->power.lock, flags);
    1377                 :            : 
    1378         [ +  - ]:       8073 :         if (dev->power.disable_depth > 0) {
    1379                 :       8073 :                 dev->power.disable_depth--;
    1380                 :            : 
    1381                 :            :                 /* About to enable runtime pm, set accounting_timestamp to now */
    1382         [ +  + ]:       8073 :                 if (!dev->power.disable_depth)
    1383                 :       4140 :                         dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
    1384                 :            :         } else {
    1385                 :          0 :                 dev_warn(dev, "Unbalanced %s!\n", __func__);
    1386                 :            :         }
    1387                 :            : 
    1388   [ +  +  +  +  :       8073 :         WARN(!dev->power.disable_depth &&
          -  +  #  #  -  
                      + ]
    1389                 :            :              dev->power.runtime_status == RPM_SUSPENDED &&
    1390                 :            :              !dev->power.ignore_children &&
    1391                 :            :              atomic_read(&dev->power.child_count) > 0,
    1392                 :            :              "Enabling runtime PM for inactive device (%s) with active children\n",
    1393                 :            :              dev_name(dev));
    1394                 :            : 
    1395                 :            :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1396                 :       8073 : }
    1397                 :            : EXPORT_SYMBOL_GPL(pm_runtime_enable);
    1398                 :            : 
    1399                 :            : /**
    1400                 :            :  * pm_runtime_forbid - Block runtime PM of a device.
    1401                 :            :  * @dev: Device to handle.
    1402                 :            :  *
    1403                 :            :  * Increase the device's usage count and clear its power.runtime_auto flag,
    1404                 :            :  * so that it cannot be suspended at run time until pm_runtime_allow() is called
    1405                 :            :  * for it.
    1406                 :            :  */
    1407                 :       1035 : void pm_runtime_forbid(struct device *dev)
    1408                 :            : {
    1409                 :            :         spin_lock_irq(&dev->power.lock);
    1410         [ +  - ]:       1035 :         if (!dev->power.runtime_auto)
    1411                 :            :                 goto out;
    1412                 :            : 
    1413                 :       1035 :         dev->power.runtime_auto = false;
    1414                 :       1035 :         atomic_inc(&dev->power.usage_count);
    1415                 :       1035 :         rpm_resume(dev, 0);
    1416                 :            : 
    1417                 :            :  out:
    1418                 :            :         spin_unlock_irq(&dev->power.lock);
    1419                 :       1035 : }
    1420                 :            : EXPORT_SYMBOL_GPL(pm_runtime_forbid);
    1421                 :            : 
    1422                 :            : /**
    1423                 :            :  * pm_runtime_allow - Unblock runtime PM of a device.
    1424                 :            :  * @dev: Device to handle.
    1425                 :            :  *
    1426                 :            :  * Decrease the device's usage count and set its power.runtime_auto flag.
    1427                 :            :  */
    1428                 :        207 : void pm_runtime_allow(struct device *dev)
    1429                 :            : {
    1430                 :            :         spin_lock_irq(&dev->power.lock);
    1431         [ +  - ]:        207 :         if (dev->power.runtime_auto)
    1432                 :            :                 goto out;
    1433                 :            : 
    1434                 :        207 :         dev->power.runtime_auto = true;
    1435         [ -  + ]:        414 :         if (atomic_dec_and_test(&dev->power.usage_count))
    1436                 :          0 :                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
    1437                 :            : 
    1438                 :            :  out:
    1439                 :            :         spin_unlock_irq(&dev->power.lock);
    1440                 :        207 : }
    1441                 :            : EXPORT_SYMBOL_GPL(pm_runtime_allow);
    1442                 :            : 
    1443                 :            : /**
    1444                 :            :  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
    1445                 :            :  * @dev: Device to handle.
    1446                 :            :  *
    1447                 :            :  * Set the power.no_callbacks flag, which tells the PM core that this
    1448                 :            :  * device is power-managed through its parent and has no runtime PM
    1449                 :            :  * callbacks of its own.  The runtime sysfs attributes will be removed.
    1450                 :            :  */
    1451                 :       1449 : void pm_runtime_no_callbacks(struct device *dev)
    1452                 :            : {
    1453                 :            :         spin_lock_irq(&dev->power.lock);
    1454                 :       1449 :         dev->power.no_callbacks = 1;
    1455                 :            :         spin_unlock_irq(&dev->power.lock);
    1456         [ -  + ]:       1449 :         if (device_is_registered(dev))
    1457                 :          0 :                 rpm_sysfs_remove(dev);
    1458                 :       1449 : }
    1459                 :            : EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
    1460                 :            : 
    1461                 :            : /**
    1462                 :            :  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
    1463                 :            :  * @dev: Device to handle
    1464                 :            :  *
    1465                 :            :  * Set the power.irq_safe flag, which tells the PM core that the
    1466                 :            :  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
    1467                 :            :  * always be invoked with the spinlock held and interrupts disabled.  It also
    1468                 :            :  * causes the parent's usage counter to be permanently incremented, preventing
    1469                 :            :  * the parent from runtime suspending -- otherwise an irq-safe child might have
    1470                 :            :  * to wait for a non-irq-safe parent.
    1471                 :            :  */
    1472                 :          0 : void pm_runtime_irq_safe(struct device *dev)
    1473                 :            : {
    1474         [ #  # ]:          0 :         if (dev->parent)
    1475                 :            :                 pm_runtime_get_sync(dev->parent);
    1476                 :            :         spin_lock_irq(&dev->power.lock);
    1477                 :          0 :         dev->power.irq_safe = 1;
    1478                 :            :         spin_unlock_irq(&dev->power.lock);
    1479                 :          0 : }
    1480                 :            : EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
    1481                 :            : 
    1482                 :            : /**
    1483                 :            :  * update_autosuspend - Handle a change to a device's autosuspend settings.
    1484                 :            :  * @dev: Device to handle.
    1485                 :            :  * @old_delay: The former autosuspend_delay value.
    1486                 :            :  * @old_use: The former use_autosuspend value.
    1487                 :            :  *
    1488                 :            :  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
    1489                 :            :  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
    1490                 :            :  *
    1491                 :            :  * This function must be called under dev->power.lock with interrupts disabled.
    1492                 :            :  */
    1493                 :       2898 : static void update_autosuspend(struct device *dev, int old_delay, int old_use)
    1494                 :            : {
    1495                 :       2898 :         int delay = dev->power.autosuspend_delay;
    1496                 :            : 
    1497                 :            :         /* Should runtime suspend be prevented now? */
    1498   [ +  +  -  + ]:       2898 :         if (dev->power.use_autosuspend && delay < 0) {
    1499                 :            : 
    1500                 :            :                 /* If it used to be allowed then prevent it. */
    1501         [ #  # ]:          0 :                 if (!old_use || old_delay >= 0) {
    1502                 :          0 :                         atomic_inc(&dev->power.usage_count);
    1503                 :          0 :                         rpm_resume(dev, 0);
    1504                 :            :                 }
    1505                 :            :         }
    1506                 :            : 
    1507                 :            :         /* Runtime suspend should be allowed now. */
    1508                 :            :         else {
    1509                 :            : 
    1510                 :            :                 /* If it used to be prevented then allow it. */
    1511         [ -  + ]:       2898 :                 if (old_use && old_delay < 0)
    1512                 :          0 :                         atomic_dec(&dev->power.usage_count);
    1513                 :            : 
    1514                 :            :                 /* Maybe we can autosuspend now. */
    1515                 :       2898 :                 rpm_idle(dev, RPM_AUTO);
    1516                 :            :         }
    1517                 :       2898 : }
    1518                 :            : 
    1519                 :            : /**
    1520                 :            :  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
    1521                 :            :  * @dev: Device to handle.
    1522                 :            :  * @delay: Value of the new delay in milliseconds.
    1523                 :            :  *
    1524                 :            :  * Set the device's power.autosuspend_delay value.  If it changes to negative
    1525                 :            :  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
    1526                 :            :  * changes the other way, allow runtime suspends.
    1527                 :            :  */
    1528                 :       1656 : void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
    1529                 :            : {
    1530                 :            :         int old_delay, old_use;
    1531                 :            : 
    1532                 :            :         spin_lock_irq(&dev->power.lock);
    1533                 :       1656 :         old_delay = dev->power.autosuspend_delay;
    1534                 :       1656 :         old_use = dev->power.use_autosuspend;
    1535                 :       1656 :         dev->power.autosuspend_delay = delay;
    1536                 :       1656 :         update_autosuspend(dev, old_delay, old_use);
    1537                 :            :         spin_unlock_irq(&dev->power.lock);
    1538                 :       1656 : }
    1539                 :            : EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
    1540                 :            : 
    1541                 :            : /**
    1542                 :            :  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
    1543                 :            :  * @dev: Device to handle.
    1544                 :            :  * @use: New value for use_autosuspend.
    1545                 :            :  *
    1546                 :            :  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
    1547                 :            :  * suspends as needed.
    1548                 :            :  */
    1549                 :       1242 : void __pm_runtime_use_autosuspend(struct device *dev, bool use)
    1550                 :            : {
    1551                 :            :         int old_delay, old_use;
    1552                 :            : 
    1553                 :            :         spin_lock_irq(&dev->power.lock);
    1554                 :       1242 :         old_delay = dev->power.autosuspend_delay;
    1555                 :       1242 :         old_use = dev->power.use_autosuspend;
    1556                 :       1242 :         dev->power.use_autosuspend = use;
    1557                 :       1242 :         update_autosuspend(dev, old_delay, old_use);
    1558                 :            :         spin_unlock_irq(&dev->power.lock);
    1559                 :       1242 : }
    1560                 :            : EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
    1561                 :            : 
    1562                 :            : /**
    1563                 :            :  * pm_runtime_init - Initialize runtime PM fields in given device object.
    1564                 :            :  * @dev: Device object to initialize.
    1565                 :            :  */
    1566                 :      76382 : void pm_runtime_init(struct device *dev)
    1567                 :            : {
    1568                 :      76382 :         dev->power.runtime_status = RPM_SUSPENDED;
    1569                 :      76382 :         dev->power.idle_notification = false;
    1570                 :            : 
    1571                 :      76382 :         dev->power.disable_depth = 1;
    1572                 :            :         atomic_set(&dev->power.usage_count, 0);
    1573                 :            : 
    1574                 :      76382 :         dev->power.runtime_error = 0;
    1575                 :            : 
    1576                 :            :         atomic_set(&dev->power.child_count, 0);
    1577                 :            :         pm_suspend_ignore_children(dev, false);
    1578                 :      76382 :         dev->power.runtime_auto = true;
    1579                 :            : 
    1580                 :      76382 :         dev->power.request_pending = false;
    1581                 :      76382 :         dev->power.request = RPM_REQ_NONE;
    1582                 :      76382 :         dev->power.deferred_resume = false;
    1583                 :     152764 :         INIT_WORK(&dev->power.work, pm_runtime_work);
    1584                 :            : 
    1585                 :      76382 :         dev->power.timer_expires = 0;
    1586                 :      76382 :         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
    1587                 :      76382 :         dev->power.suspend_timer.function = pm_suspend_timer_fn;
    1588                 :            : 
    1589                 :      76382 :         init_waitqueue_head(&dev->power.wait_queue);
    1590                 :      76383 : }
    1591                 :            : 
    1592                 :            : /**
    1593                 :            :  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
    1594                 :            :  * @dev: Device object to re-initialize.
    1595                 :            :  */
    1596                 :       1656 : void pm_runtime_reinit(struct device *dev)
    1597                 :            : {
    1598         [ +  - ]:       1656 :         if (!pm_runtime_enabled(dev)) {
    1599         [ -  + ]:       1656 :                 if (dev->power.runtime_status == RPM_ACTIVE)
    1600                 :            :                         pm_runtime_set_suspended(dev);
    1601         [ -  + ]:       1656 :                 if (dev->power.irq_safe) {
    1602                 :            :                         spin_lock_irq(&dev->power.lock);
    1603                 :          0 :                         dev->power.irq_safe = 0;
    1604                 :            :                         spin_unlock_irq(&dev->power.lock);
    1605         [ #  # ]:          0 :                         if (dev->parent)
    1606                 :            :                                 pm_runtime_put(dev->parent);
    1607                 :            :                 }
    1608                 :            :         }
    1609                 :       1656 : }
    1610                 :            : 
    1611                 :            : /**
    1612                 :            :  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
    1613                 :            :  * @dev: Device object being removed from device hierarchy.
    1614                 :            :  */
    1615                 :        207 : void pm_runtime_remove(struct device *dev)
    1616                 :            : {
    1617                 :        207 :         __pm_runtime_disable(dev, false);
    1618                 :        207 :         pm_runtime_reinit(dev);
    1619                 :        207 : }
    1620                 :            : 
    1621                 :            : /**
    1622                 :            :  * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
    1623                 :            :  * @dev: Device whose driver is going to be removed.
    1624                 :            :  *
    1625                 :            :  * Check links from this device to any consumers and if any of them have active
    1626                 :            :  * runtime PM references to the device, drop the usage counter of the device
    1627                 :            :  * (as many times as needed).
    1628                 :            :  *
    1629                 :            :  * Links with the DL_FLAG_MANAGED flag unset are ignored.
    1630                 :            :  *
    1631                 :            :  * Since the device is guaranteed to be runtime-active at the point this is
    1632                 :            :  * called, nothing else needs to be done here.
    1633                 :            :  *
    1634                 :            :  * Moreover, this is called after device_links_busy() has returned 'false', so
    1635                 :            :  * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
    1636                 :            :  * therefore rpm_active can't be manipulated concurrently.
    1637                 :            :  */
    1638                 :          0 : void pm_runtime_clean_up_links(struct device *dev)
    1639                 :            : {
    1640                 :            :         struct device_link *link;
    1641                 :            :         int idx;
    1642                 :            : 
    1643                 :          0 :         idx = device_links_read_lock();
    1644                 :            : 
    1645         [ #  # ]:          0 :         list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
    1646                 :            :                                 device_links_read_lock_held()) {
    1647         [ #  # ]:          0 :                 if (!(link->flags & DL_FLAG_MANAGED))
    1648                 :          0 :                         continue;
    1649                 :            : 
    1650         [ #  # ]:          0 :                 while (refcount_dec_not_one(&link->rpm_active))
    1651                 :            :                         pm_runtime_put_noidle(dev);
    1652                 :            :         }
    1653                 :            : 
    1654                 :          0 :         device_links_read_unlock(idx);
    1655                 :          0 : }
    1656                 :            : 
    1657                 :            : /**
    1658                 :            :  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
    1659                 :            :  * @dev: Consumer device.
    1660                 :            :  */
    1661                 :      11385 : void pm_runtime_get_suppliers(struct device *dev)
    1662                 :            : {
    1663                 :            :         struct device_link *link;
    1664                 :            :         int idx;
    1665                 :            : 
    1666                 :      11385 :         idx = device_links_read_lock();
    1667                 :            : 
    1668         [ -  + ]:      11385 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1669                 :            :                                 device_links_read_lock_held())
    1670         [ #  # ]:          0 :                 if (link->flags & DL_FLAG_PM_RUNTIME) {
    1671                 :          0 :                         link->supplier_preactivated = true;
    1672                 :          0 :                         refcount_inc(&link->rpm_active);
    1673                 :          0 :                         pm_runtime_get_sync(link->supplier);
    1674                 :            :                 }
    1675                 :            : 
    1676                 :      11385 :         device_links_read_unlock(idx);
    1677                 :      11385 : }
    1678                 :            : 
    1679                 :            : /**
    1680                 :            :  * pm_runtime_put_suppliers - Drop references to supplier devices.
    1681                 :            :  * @dev: Consumer device.
    1682                 :            :  */
    1683                 :      11385 : void pm_runtime_put_suppliers(struct device *dev)
    1684                 :            : {
    1685                 :            :         struct device_link *link;
    1686                 :            :         int idx;
    1687                 :            : 
    1688                 :      11385 :         idx = device_links_read_lock();
    1689                 :            : 
    1690         [ -  + ]:      11385 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1691                 :            :                                 device_links_read_lock_held())
    1692         [ #  # ]:          0 :                 if (link->supplier_preactivated) {
    1693                 :          0 :                         link->supplier_preactivated = false;
    1694         [ #  # ]:          0 :                         if (refcount_dec_not_one(&link->rpm_active))
    1695                 :          0 :                                 pm_runtime_put(link->supplier);
    1696                 :            :                 }
    1697                 :            : 
    1698                 :      11385 :         device_links_read_unlock(idx);
    1699                 :      11385 : }
    1700                 :            : 
    1701                 :          0 : void pm_runtime_new_link(struct device *dev)
    1702                 :            : {
    1703                 :            :         spin_lock_irq(&dev->power.lock);
    1704                 :          0 :         dev->power.links_count++;
    1705                 :            :         spin_unlock_irq(&dev->power.lock);
    1706                 :          0 : }
    1707                 :            : 
    1708                 :          0 : void pm_runtime_drop_link(struct device *dev)
    1709                 :            : {
    1710                 :            :         spin_lock_irq(&dev->power.lock);
    1711         [ #  # ]:          0 :         WARN_ON(dev->power.links_count == 0);
    1712                 :          0 :         dev->power.links_count--;
    1713                 :            :         spin_unlock_irq(&dev->power.lock);
    1714                 :          0 : }
    1715                 :            : 
    1716                 :            : static bool pm_runtime_need_not_resume(struct device *dev)
    1717                 :            : {
    1718   [ #  #  #  #  :          0 :         return atomic_read(&dev->power.usage_count) <= 1 &&
             #  #  #  # ]
    1719   [ #  #  #  # ]:          0 :                 (atomic_read(&dev->power.child_count) == 0 ||
    1720                 :          0 :                  dev->power.ignore_children);
    1721                 :            : }
    1722                 :            : 
    1723                 :            : /**
    1724                 :            :  * pm_runtime_force_suspend - Force a device into suspend state if needed.
    1725                 :            :  * @dev: Device to suspend.
    1726                 :            :  *
    1727                 :            :  * Disable runtime PM so we safely can check the device's runtime PM status and
    1728                 :            :  * if it is active, invoke its ->runtime_suspend callback to suspend it and
    1729                 :            :  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
    1730                 :            :  * usage and children counters don't indicate that the device was in use before
    1731                 :            :  * the system-wide transition under way, decrement its parent's children counter
    1732                 :            :  * (if there is a parent).  Keep runtime PM disabled to preserve the state
    1733                 :            :  * unless we encounter errors.
    1734                 :            :  *
    1735                 :            :  * Typically this function may be invoked from a system suspend callback to make
    1736                 :            :  * sure the device is put into low power state and it should only be used during
    1737                 :            :  * system-wide PM transitions to sleep states.  It assumes that the analogous
    1738                 :            :  * pm_runtime_force_resume() will be used to resume the device.
    1739                 :            :  */
    1740                 :          0 : int pm_runtime_force_suspend(struct device *dev)
    1741                 :            : {
    1742                 :            :         int (*callback)(struct device *);
    1743                 :            :         int ret;
    1744                 :            : 
    1745                 :            :         pm_runtime_disable(dev);
    1746         [ #  # ]:          0 :         if (pm_runtime_status_suspended(dev))
    1747                 :            :                 return 0;
    1748                 :            : 
    1749                 :          0 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
    1750                 :            : 
    1751         [ #  # ]:          0 :         ret = callback ? callback(dev) : 0;
    1752         [ #  # ]:          0 :         if (ret)
    1753                 :            :                 goto err;
    1754                 :            : 
    1755                 :            :         /*
    1756                 :            :          * If the device can stay in suspend after the system-wide transition
    1757                 :            :          * to the working state that will follow, drop the children counter of
    1758                 :            :          * its parent, but set its status to RPM_SUSPENDED anyway in case this
    1759                 :            :          * function will be called again for it in the meantime.
    1760                 :            :          */
    1761         [ #  # ]:          0 :         if (pm_runtime_need_not_resume(dev))
    1762                 :            :                 pm_runtime_set_suspended(dev);
    1763                 :            :         else
    1764                 :            :                 __update_runtime_status(dev, RPM_SUSPENDED);
    1765                 :            : 
    1766                 :            :         return 0;
    1767                 :            : 
    1768                 :            : err:
    1769                 :          0 :         pm_runtime_enable(dev);
    1770                 :          0 :         return ret;
    1771                 :            : }
    1772                 :            : EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
    1773                 :            : 
    1774                 :            : /**
    1775                 :            :  * pm_runtime_force_resume - Force a device into resume state if needed.
    1776                 :            :  * @dev: Device to resume.
    1777                 :            :  *
    1778                 :            :  * Prior invoking this function we expect the user to have brought the device
    1779                 :            :  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
    1780                 :            :  * those actions and bring the device into full power, if it is expected to be
    1781                 :            :  * used on system resume.  In the other case, we defer the resume to be managed
    1782                 :            :  * via runtime PM.
    1783                 :            :  *
    1784                 :            :  * Typically this function may be invoked from a system resume callback.
    1785                 :            :  */
    1786                 :          0 : int pm_runtime_force_resume(struct device *dev)
    1787                 :            : {
    1788                 :            :         int (*callback)(struct device *);
    1789                 :            :         int ret = 0;
    1790                 :            : 
    1791   [ #  #  #  # ]:          0 :         if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
    1792                 :            :                 goto out;
    1793                 :            : 
    1794                 :            :         /*
    1795                 :            :          * The value of the parent's children counter is correct already, so
    1796                 :            :          * just update the status of the device.
    1797                 :            :          */
    1798                 :            :         __update_runtime_status(dev, RPM_ACTIVE);
    1799                 :            : 
    1800                 :          0 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
    1801                 :            : 
    1802         [ #  # ]:          0 :         ret = callback ? callback(dev) : 0;
    1803         [ #  # ]:          0 :         if (ret) {
    1804                 :            :                 pm_runtime_set_suspended(dev);
    1805                 :            :                 goto out;
    1806                 :            :         }
    1807                 :            : 
    1808                 :            :         pm_runtime_mark_last_busy(dev);
    1809                 :            : out:
    1810                 :          0 :         pm_runtime_enable(dev);
    1811                 :          0 :         return ret;
    1812                 :            : }
    1813                 :            : EXPORT_SYMBOL_GPL(pm_runtime_force_resume);

Generated by: LCOV version 1.14