LCOV - code coverage report
Current view: top level - arch/x86/kernel - tsc_sync.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 10 136 7.4 %
Date: 2022-04-01 14:17:54 Functions: 2 7 28.6 %
Branches: 2 82 2.4 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0
       2                 :            : /*
       3                 :            :  * check TSC synchronization.
       4                 :            :  *
       5                 :            :  * Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
       6                 :            :  *
       7                 :            :  * We check whether all boot CPUs have their TSC's synchronized,
       8                 :            :  * print a warning if not and turn off the TSC clock-source.
       9                 :            :  *
      10                 :            :  * The warp-check is point-to-point between two CPUs, the CPU
      11                 :            :  * initiating the bootup is the 'source CPU', the freshly booting
      12                 :            :  * CPU is the 'target CPU'.
      13                 :            :  *
      14                 :            :  * Only two CPUs may participate - they can enter in any order.
      15                 :            :  * ( The serial nature of the boot logic and the CPU hotplug lock
      16                 :            :  *   protects against more than 2 CPUs entering this code. )
      17                 :            :  */
      18                 :            : #include <linux/topology.h>
      19                 :            : #include <linux/spinlock.h>
      20                 :            : #include <linux/kernel.h>
      21                 :            : #include <linux/smp.h>
      22                 :            : #include <linux/nmi.h>
      23                 :            : #include <asm/tsc.h>
      24                 :            : 
      25                 :            : struct tsc_adjust {
      26                 :            :         s64             bootval;
      27                 :            :         s64             adjusted;
      28                 :            :         unsigned long   nextcheck;
      29                 :            :         bool            warned;
      30                 :            : };
      31                 :            : 
      32                 :            : static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
      33                 :            : 
      34                 :            : /*
      35                 :            :  * TSC's on different sockets may be reset asynchronously.
      36                 :            :  * This may cause the TSC ADJUST value on socket 0 to be NOT 0.
      37                 :            :  */
      38                 :            : bool __read_mostly tsc_async_resets;
      39                 :            : 
      40                 :          0 : void mark_tsc_async_resets(char *reason)
      41                 :            : {
      42         [ #  # ]:          0 :         if (tsc_async_resets)
      43                 :            :                 return;
      44                 :          0 :         tsc_async_resets = true;
      45                 :          0 :         pr_info("tsc: Marking TSC async resets true due to %s\n", reason);
      46                 :            : }
      47                 :            : 
      48                 :       3949 : void tsc_verify_tsc_adjust(bool resume)
      49                 :            : {
      50                 :       3949 :         struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
      51                 :       3949 :         s64 curval;
      52                 :            : 
      53         [ -  + ]:       3949 :         if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
      54                 :            :                 return;
      55                 :            : 
      56                 :            :         /* Skip unnecessary error messages if TSC already unstable */
      57         [ #  # ]:          0 :         if (check_tsc_unstable())
      58                 :            :                 return;
      59                 :            : 
      60                 :            :         /* Rate limit the MSR check */
      61   [ #  #  #  # ]:          0 :         if (!resume && time_before(jiffies, adj->nextcheck))
      62                 :            :                 return;
      63                 :            : 
      64                 :          0 :         adj->nextcheck = jiffies + HZ;
      65                 :            : 
      66                 :          0 :         rdmsrl(MSR_IA32_TSC_ADJUST, curval);
      67         [ #  # ]:          0 :         if (adj->adjusted == curval)
      68                 :            :                 return;
      69                 :            : 
      70                 :            :         /* Restore the original value */
      71                 :          0 :         wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
      72                 :            : 
      73   [ #  #  #  # ]:          0 :         if (!adj->warned || resume) {
      74                 :          0 :                 pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
      75                 :            :                         smp_processor_id(), adj->adjusted, curval);
      76                 :          0 :                 adj->warned = true;
      77                 :            :         }
      78                 :            : }
      79                 :            : 
      80                 :          0 : static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
      81                 :            :                                    unsigned int cpu, bool bootcpu)
      82                 :            : {
      83                 :            :         /*
      84                 :            :          * First online CPU in a package stores the boot value in the
      85                 :            :          * adjustment value. This value might change later via the sync
      86                 :            :          * mechanism. If that fails we still can yell about boot values not
      87                 :            :          * being consistent.
      88                 :            :          *
      89                 :            :          * On the boot cpu we just force set the ADJUST value to 0 if it's
      90                 :            :          * non zero. We don't do that on non boot cpus because physical
      91                 :            :          * hotplug should have set the ADJUST register to a value > 0 so
      92                 :            :          * the TSC is in sync with the already running cpus.
      93                 :            :          *
      94                 :            :          * Also don't force the ADJUST value to zero if that is a valid value
      95                 :            :          * for socket 0 as determined by the system arch.  This is required
      96                 :            :          * when multiple sockets are reset asynchronously with each other
      97                 :            :          * and socket 0 may not have an TSC ADJUST value of 0.
      98                 :            :          */
      99         [ #  # ]:          0 :         if (bootcpu && bootval != 0) {
     100         [ #  # ]:          0 :                 if (likely(!tsc_async_resets)) {
     101                 :          0 :                         pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n",
     102                 :            :                                 cpu, bootval);
     103                 :          0 :                         wrmsrl(MSR_IA32_TSC_ADJUST, 0);
     104                 :          0 :                         bootval = 0;
     105                 :            :                 } else {
     106                 :          0 :                         pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n",
     107                 :            :                                 cpu, bootval);
     108                 :            :                 }
     109                 :            :         }
     110                 :          0 :         cur->adjusted = bootval;
     111                 :          0 : }
     112                 :            : 
     113                 :            : #ifndef CONFIG_SMP
     114                 :            : bool __init tsc_store_and_check_tsc_adjust(bool bootcpu)
     115                 :            : {
     116                 :            :         struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
     117                 :            :         s64 bootval;
     118                 :            : 
     119                 :            :         if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
     120                 :            :                 return false;
     121                 :            : 
     122                 :            :         /* Skip unnecessary error messages if TSC already unstable */
     123                 :            :         if (check_tsc_unstable())
     124                 :            :                 return false;
     125                 :            : 
     126                 :            :         rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
     127                 :            :         cur->bootval = bootval;
     128                 :            :         cur->nextcheck = jiffies + HZ;
     129                 :            :         tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu);
     130                 :            :         return false;
     131                 :            : }
     132                 :            : 
     133                 :            : #else /* !CONFIG_SMP */
     134                 :            : 
     135                 :            : /*
     136                 :            :  * Store and check the TSC ADJUST MSR if available
     137                 :            :  */
     138                 :         11 : bool tsc_store_and_check_tsc_adjust(bool bootcpu)
     139                 :            : {
     140                 :         11 :         struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
     141                 :         11 :         unsigned int refcpu, cpu = smp_processor_id();
     142                 :         11 :         struct cpumask *mask;
     143                 :         11 :         s64 bootval;
     144                 :            : 
     145         [ -  + ]:         11 :         if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
     146                 :            :                 return false;
     147                 :            : 
     148                 :          0 :         rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
     149                 :          0 :         cur->bootval = bootval;
     150                 :          0 :         cur->nextcheck = jiffies + HZ;
     151                 :          0 :         cur->warned = false;
     152                 :            : 
     153                 :            :         /*
     154                 :            :          * If a non-zero TSC value for socket 0 may be valid then the default
     155                 :            :          * adjusted value cannot assumed to be zero either.
     156                 :            :          */
     157         [ #  # ]:          0 :         if (tsc_async_resets)
     158                 :          0 :                 cur->adjusted = bootval;
     159                 :            : 
     160                 :            :         /*
     161                 :            :          * Check whether this CPU is the first in a package to come up. In
     162                 :            :          * this case do not check the boot value against another package
     163                 :            :          * because the new package might have been physically hotplugged,
     164                 :            :          * where TSC_ADJUST is expected to be different. When called on the
     165                 :            :          * boot CPU topology_core_cpumask() might not be available yet.
     166                 :            :          */
     167                 :          0 :         mask = topology_core_cpumask(cpu);
     168         [ #  # ]:          0 :         refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids;
     169                 :            : 
     170         [ #  # ]:          0 :         if (refcpu >= nr_cpu_ids) {
     171                 :          0 :                 tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(),
     172                 :            :                                        bootcpu);
     173                 :          0 :                 return false;
     174                 :            :         }
     175                 :            : 
     176                 :          0 :         ref = per_cpu_ptr(&tsc_adjust, refcpu);
     177                 :            :         /*
     178                 :            :          * Compare the boot value and complain if it differs in the
     179                 :            :          * package.
     180                 :            :          */
     181         [ #  # ]:          0 :         if (bootval != ref->bootval)
     182         [ #  # ]:          0 :                 printk_once(FW_BUG "TSC ADJUST differs within socket(s), fixing all errors\n");
     183                 :            : 
     184                 :            :         /*
     185                 :            :          * The TSC_ADJUST values in a package must be the same. If the boot
     186                 :            :          * value on this newly upcoming CPU differs from the adjustment
     187                 :            :          * value of the already online CPU in this package, set it to that
     188                 :            :          * adjusted value.
     189                 :            :          */
     190         [ #  # ]:          0 :         if (bootval != ref->adjusted) {
     191                 :          0 :                 cur->adjusted = ref->adjusted;
     192                 :          0 :                 wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
     193                 :            :         }
     194                 :            :         /*
     195                 :            :          * We have the TSCs forced to be in sync on this package. Skip sync
     196                 :            :          * test:
     197                 :            :          */
     198                 :            :         return true;
     199                 :            : }
     200                 :            : 
     201                 :            : /*
     202                 :            :  * Entry/exit counters that make sure that both CPUs
     203                 :            :  * run the measurement code at once:
     204                 :            :  */
     205                 :            : static atomic_t start_count;
     206                 :            : static atomic_t stop_count;
     207                 :            : static atomic_t skip_test;
     208                 :            : static atomic_t test_runs;
     209                 :            : 
     210                 :            : /*
     211                 :            :  * We use a raw spinlock in this exceptional case, because
     212                 :            :  * we want to have the fastest, inlined, non-debug version
     213                 :            :  * of a critical section, to be able to prove TSC time-warps:
     214                 :            :  */
     215                 :            : static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
     216                 :            : 
     217                 :            : static cycles_t last_tsc;
     218                 :            : static cycles_t max_warp;
     219                 :            : static int nr_warps;
     220                 :            : static int random_warps;
     221                 :            : 
     222                 :            : /*
     223                 :            :  * TSC-warp measurement loop running on both CPUs.  This is not called
     224                 :            :  * if there is no TSC.
     225                 :            :  */
     226                 :          0 : static cycles_t check_tsc_warp(unsigned int timeout)
     227                 :            : {
     228                 :          0 :         cycles_t start, now, prev, end, cur_max_warp = 0;
     229                 :          0 :         int i, cur_warps = 0;
     230                 :            : 
     231                 :          0 :         start = rdtsc_ordered();
     232                 :            :         /*
     233                 :            :          * The measurement runs for 'timeout' msecs:
     234                 :            :          */
     235                 :          0 :         end = start + (cycles_t) tsc_khz * timeout;
     236                 :            : 
     237                 :          0 :         for (i = 0; ; i++) {
     238                 :            :                 /*
     239                 :            :                  * We take the global lock, measure TSC, save the
     240                 :            :                  * previous TSC that was measured (possibly on
     241                 :            :                  * another CPU) and update the previous TSC timestamp.
     242                 :            :                  */
     243                 :          0 :                 arch_spin_lock(&sync_lock);
     244                 :          0 :                 prev = last_tsc;
     245                 :          0 :                 now = rdtsc_ordered();
     246                 :          0 :                 last_tsc = now;
     247                 :          0 :                 arch_spin_unlock(&sync_lock);
     248                 :            : 
     249                 :            :                 /*
     250                 :            :                  * Be nice every now and then (and also check whether
     251                 :            :                  * measurement is done [we also insert a 10 million
     252                 :            :                  * loops safety exit, so we dont lock up in case the
     253                 :            :                  * TSC readout is totally broken]):
     254                 :            :                  */
     255         [ #  # ]:          0 :                 if (unlikely(!(i & 7))) {
     256         [ #  # ]:          0 :                         if (now > end || i > 10000000)
     257                 :            :                                 break;
     258                 :          0 :                         cpu_relax();
     259                 :          0 :                         touch_nmi_watchdog();
     260                 :            :                 }
     261                 :            :                 /*
     262                 :            :                  * Outside the critical section we can now see whether
     263                 :            :                  * we saw a time-warp of the TSC going backwards:
     264                 :            :                  */
     265         [ #  # ]:          0 :                 if (unlikely(prev > now)) {
     266                 :          0 :                         arch_spin_lock(&sync_lock);
     267                 :          0 :                         max_warp = max(max_warp, prev - now);
     268                 :          0 :                         cur_max_warp = max_warp;
     269                 :            :                         /*
     270                 :            :                          * Check whether this bounces back and forth. Only
     271                 :            :                          * one CPU should observe time going backwards.
     272                 :            :                          */
     273         [ #  # ]:          0 :                         if (cur_warps != nr_warps)
     274                 :          0 :                                 random_warps++;
     275                 :          0 :                         nr_warps++;
     276                 :          0 :                         cur_warps = nr_warps;
     277                 :          0 :                         arch_spin_unlock(&sync_lock);
     278                 :            :                 }
     279                 :            :         }
     280         [ #  # ]:          0 :         WARN(!(now-start),
     281                 :            :                 "Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
     282                 :            :                         now-start, end-start);
     283                 :          0 :         return cur_max_warp;
     284                 :            : }
     285                 :            : 
     286                 :            : /*
     287                 :            :  * If the target CPU coming online doesn't have any of its core-siblings
     288                 :            :  * online, a timeout of 20msec will be used for the TSC-warp measurement
     289                 :            :  * loop. Otherwise a smaller timeout of 2msec will be used, as we have some
     290                 :            :  * information about this socket already (and this information grows as we
     291                 :            :  * have more and more logical-siblings in that socket).
     292                 :            :  *
     293                 :            :  * Ideally we should be able to skip the TSC sync check on the other
     294                 :            :  * core-siblings, if the first logical CPU in a socket passed the sync test.
     295                 :            :  * But as the TSC is per-logical CPU and can potentially be modified wrongly
     296                 :            :  * by the bios, TSC sync test for smaller duration should be able
     297                 :            :  * to catch such errors. Also this will catch the condition where all the
     298                 :            :  * cores in the socket doesn't get reset at the same time.
     299                 :            :  */
     300                 :          0 : static inline unsigned int loop_timeout(int cpu)
     301                 :            : {
     302                 :          0 :         return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
     303                 :            : }
     304                 :            : 
     305                 :            : /*
     306                 :            :  * Source CPU calls into this - it waits for the freshly booted
     307                 :            :  * target CPU to arrive and then starts the measurement:
     308                 :            :  */
     309                 :          0 : void check_tsc_sync_source(int cpu)
     310                 :            : {
     311                 :          0 :         int cpus = 2;
     312                 :            : 
     313                 :            :         /*
     314                 :            :          * No need to check if we already know that the TSC is not
     315                 :            :          * synchronized or if we have no TSC.
     316                 :            :          */
     317         [ #  # ]:          0 :         if (unsynchronized_tsc())
     318                 :            :                 return;
     319                 :            : 
     320                 :            :         /*
     321                 :            :          * Set the maximum number of test runs to
     322                 :            :          *  1 if the CPU does not provide the TSC_ADJUST MSR
     323                 :            :          *  3 if the MSR is available, so the target can try to adjust
     324                 :            :          */
     325         [ #  # ]:          0 :         if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
     326                 :          0 :                 atomic_set(&test_runs, 1);
     327                 :            :         else
     328                 :          0 :                 atomic_set(&test_runs, 3);
     329                 :            : retry:
     330                 :            :         /*
     331                 :            :          * Wait for the target to start or to skip the test:
     332                 :            :          */
     333         [ #  # ]:          0 :         while (atomic_read(&start_count) != cpus - 1) {
     334         [ #  # ]:          0 :                 if (atomic_read(&skip_test) > 0) {
     335                 :          0 :                         atomic_set(&skip_test, 0);
     336                 :          0 :                         return;
     337                 :            :                 }
     338                 :          0 :                 cpu_relax();
     339                 :            :         }
     340                 :            : 
     341                 :            :         /*
     342                 :            :          * Trigger the target to continue into the measurement too:
     343                 :            :          */
     344                 :          0 :         atomic_inc(&start_count);
     345                 :            : 
     346         [ #  # ]:          0 :         check_tsc_warp(loop_timeout(cpu));
     347                 :            : 
     348         [ #  # ]:          0 :         while (atomic_read(&stop_count) != cpus-1)
     349                 :          0 :                 cpu_relax();
     350                 :            : 
     351                 :            :         /*
     352                 :            :          * If the test was successful set the number of runs to zero and
     353                 :            :          * stop. If not, decrement the number of runs an check if we can
     354                 :            :          * retry. In case of random warps no retry is attempted.
     355                 :            :          */
     356         [ #  # ]:          0 :         if (!nr_warps) {
     357                 :          0 :                 atomic_set(&test_runs, 0);
     358                 :            : 
     359                 :          0 :                 pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
     360                 :            :                         smp_processor_id(), cpu);
     361                 :            : 
     362   [ #  #  #  # ]:          0 :         } else if (atomic_dec_and_test(&test_runs) || random_warps) {
     363                 :            :                 /* Force it to 0 if random warps brought us here */
     364                 :          0 :                 atomic_set(&test_runs, 0);
     365                 :            : 
     366                 :          0 :                 pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n",
     367                 :            :                         smp_processor_id(), cpu);
     368                 :          0 :                 pr_warn("Measured %Ld cycles TSC warp between CPUs, "
     369                 :            :                         "turning off TSC clock.\n", max_warp);
     370         [ #  # ]:          0 :                 if (random_warps)
     371                 :          0 :                         pr_warn("TSC warped randomly between CPUs\n");
     372                 :          0 :                 mark_tsc_unstable("check_tsc_sync_source failed");
     373                 :            :         }
     374                 :            : 
     375                 :            :         /*
     376                 :            :          * Reset it - just in case we boot another CPU later:
     377                 :            :          */
     378                 :          0 :         atomic_set(&start_count, 0);
     379                 :          0 :         random_warps = 0;
     380                 :          0 :         nr_warps = 0;
     381                 :          0 :         max_warp = 0;
     382                 :          0 :         last_tsc = 0;
     383                 :            : 
     384                 :            :         /*
     385                 :            :          * Let the target continue with the bootup:
     386                 :            :          */
     387                 :          0 :         atomic_inc(&stop_count);
     388                 :            : 
     389                 :            :         /*
     390                 :            :          * Retry, if there is a chance to do so.
     391                 :            :          */
     392         [ #  # ]:          0 :         if (atomic_read(&test_runs) > 0)
     393                 :          0 :                 goto retry;
     394                 :            : }
     395                 :            : 
     396                 :            : /*
     397                 :            :  * Freshly booted CPUs call into this:
     398                 :            :  */
     399                 :          0 : void check_tsc_sync_target(void)
     400                 :            : {
     401                 :          0 :         struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
     402                 :          0 :         unsigned int cpu = smp_processor_id();
     403                 :          0 :         cycles_t cur_max_warp, gbl_max_warp;
     404                 :          0 :         int cpus = 2;
     405                 :            : 
     406                 :            :         /* Also aborts if there is no TSC. */
     407         [ #  # ]:          0 :         if (unsynchronized_tsc())
     408                 :            :                 return;
     409                 :            : 
     410                 :            :         /*
     411                 :            :          * Store, verify and sanitize the TSC adjust register. If
     412                 :            :          * successful skip the test.
     413                 :            :          *
     414                 :            :          * The test is also skipped when the TSC is marked reliable. This
     415                 :            :          * is true for SoCs which have no fallback clocksource. On these
     416                 :            :          * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
     417                 :            :          * register might have been wreckaged by the BIOS..
     418                 :            :          */
     419   [ #  #  #  # ]:          0 :         if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
     420                 :          0 :                 atomic_inc(&skip_test);
     421                 :          0 :                 return;
     422                 :            :         }
     423                 :            : 
     424                 :          0 : retry:
     425                 :            :         /*
     426                 :            :          * Register this CPU's participation and wait for the
     427                 :            :          * source CPU to start the measurement:
     428                 :            :          */
     429                 :          0 :         atomic_inc(&start_count);
     430         [ #  # ]:          0 :         while (atomic_read(&start_count) != cpus)
     431                 :          0 :                 cpu_relax();
     432                 :            : 
     433         [ #  # ]:          0 :         cur_max_warp = check_tsc_warp(loop_timeout(cpu));
     434                 :            : 
     435                 :            :         /*
     436                 :            :          * Store the maximum observed warp value for a potential retry:
     437                 :            :          */
     438                 :          0 :         gbl_max_warp = max_warp;
     439                 :            : 
     440                 :            :         /*
     441                 :            :          * Ok, we are done:
     442                 :            :          */
     443                 :          0 :         atomic_inc(&stop_count);
     444                 :            : 
     445                 :            :         /*
     446                 :            :          * Wait for the source CPU to print stuff:
     447                 :            :          */
     448         [ #  # ]:          0 :         while (atomic_read(&stop_count) != cpus)
     449                 :          0 :                 cpu_relax();
     450                 :            : 
     451                 :            :         /*
     452                 :            :          * Reset it for the next sync test:
     453                 :            :          */
     454                 :          0 :         atomic_set(&stop_count, 0);
     455                 :            : 
     456                 :            :         /*
     457                 :            :          * Check the number of remaining test runs. If not zero, the test
     458                 :            :          * failed and a retry with adjusted TSC is possible. If zero the
     459                 :            :          * test was either successful or failed terminally.
     460                 :            :          */
     461         [ #  # ]:          0 :         if (!atomic_read(&test_runs))
     462                 :            :                 return;
     463                 :            : 
     464                 :            :         /*
     465                 :            :          * If the warp value of this CPU is 0, then the other CPU
     466                 :            :          * observed time going backwards so this TSC was ahead and
     467                 :            :          * needs to move backwards.
     468                 :            :          */
     469         [ #  # ]:          0 :         if (!cur_max_warp)
     470                 :          0 :                 cur_max_warp = -gbl_max_warp;
     471                 :            : 
     472                 :            :         /*
     473                 :            :          * Add the result to the previous adjustment value.
     474                 :            :          *
     475                 :            :          * The adjustement value is slightly off by the overhead of the
     476                 :            :          * sync mechanism (observed values are ~200 TSC cycles), but this
     477                 :            :          * really depends on CPU, node distance and frequency. So
     478                 :            :          * compensating for this is hard to get right. Experiments show
     479                 :            :          * that the warp is not longer detectable when the observed warp
     480                 :            :          * value is used. In the worst case the adjustment needs to go
     481                 :            :          * through a 3rd run for fine tuning.
     482                 :            :          */
     483                 :          0 :         cur->adjusted += cur_max_warp;
     484                 :            : 
     485                 :          0 :         pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
     486                 :            :                 cpu, cur_max_warp, cur->adjusted);
     487                 :            : 
     488                 :          0 :         wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted);
     489                 :          0 :         goto retry;
     490                 :            : 
     491                 :            : }
     492                 :            : 
     493                 :            : #endif /* CONFIG_SMP */

Generated by: LCOV version 1.14