1// SPDX-License-Identifier: GPL-2.0-only
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/sched/clock.h>
7#include <linux/init.h>
8#include <linux/export.h>
9#include <linux/timer.h>
10#include <linux/acpi_pmtmr.h>
11#include <linux/cpufreq.h>
12#include <linux/delay.h>
13#include <linux/clocksource.h>
14#include <linux/kvm_types.h>
15#include <linux/percpu.h>
16#include <linux/timex.h>
17#include <linux/static_key.h>
18#include <linux/static_call.h>
19
20#include <asm/cpuid/api.h>
21#include <asm/hpet.h>
22#include <asm/timer.h>
23#include <asm/vgtod.h>
24#include <asm/time.h>
25#include <asm/delay.h>
26#include <asm/hypervisor.h>
27#include <asm/nmi.h>
28#include <asm/x86_init.h>
29#include <asm/geode.h>
30#include <asm/apic.h>
31#include <asm/cpu_device_id.h>
32#include <asm/i8259.h>
33#include <asm/msr.h>
34#include <asm/topology.h>
35#include <asm/uv/uv.h>
36#include <asm/sev.h>
37
38unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
39EXPORT_SYMBOL(cpu_khz);
40
41unsigned int __read_mostly tsc_khz;
42EXPORT_SYMBOL(tsc_khz);
43
44#define KHZ 1000
45
46/*
47 * TSC can be unstable due to cpufreq or due to unsynced TSCs
48 */
49static int __read_mostly tsc_unstable;
50static unsigned int __initdata tsc_early_khz;
51
52static DEFINE_STATIC_KEY_FALSE_RO(__use_tsc);
53
54int tsc_clocksource_reliable;
55
56static int __read_mostly tsc_force_recalibrate;
57
58static struct clocksource_base art_base_clk = {
59 .id = CSID_X86_ART,
60};
61static bool have_art;
62
63struct cyc2ns {
64 struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
65 seqcount_latch_t seq; /* 32 + 4 = 36 */
66
67}; /* fits one cacheline */
68
69static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
70
71static int __init tsc_early_khz_setup(char *buf)
72{
73 return kstrtouint(s: buf, base: 0, res: &tsc_early_khz);
74}
75early_param("tsc_early_khz", tsc_early_khz_setup);
76
77__always_inline void __cyc2ns_read(struct cyc2ns_data *data)
78{
79 int seq, idx;
80
81 do {
82 seq = this_cpu_read(cyc2ns.seq.seqcount.sequence);
83 idx = seq & 1;
84
85 data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
86 data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
87 data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
88
89 } while (unlikely(seq != this_cpu_read(cyc2ns.seq.seqcount.sequence)));
90}
91
92__always_inline void cyc2ns_read_begin(struct cyc2ns_data *data)
93{
94 preempt_disable_notrace();
95 __cyc2ns_read(data);
96}
97
98__always_inline void cyc2ns_read_end(void)
99{
100 preempt_enable_notrace();
101}
102
103/*
104 * Accelerators for sched_clock()
105 * convert from cycles(64bits) => nanoseconds (64bits)
106 * basic equation:
107 * ns = cycles / (freq / ns_per_sec)
108 * ns = cycles * (ns_per_sec / freq)
109 * ns = cycles * (10^9 / (cpu_khz * 10^3))
110 * ns = cycles * (10^6 / cpu_khz)
111 *
112 * Then we use scaling math (suggested by george@mvista.com) to get:
113 * ns = cycles * (10^6 * SC / cpu_khz) / SC
114 * ns = cycles * cyc2ns_scale / SC
115 *
116 * And since SC is a constant power of two, we can convert the div
117 * into a shift. The larger SC is, the more accurate the conversion, but
118 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
119 * (64-bit result) can be used.
120 *
121 * We can use khz divisor instead of mhz to keep a better precision.
122 * (mathieu.desnoyers@polymtl.ca)
123 *
124 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
125 */
126
127static __always_inline unsigned long long __cycles_2_ns(unsigned long long cyc)
128{
129 struct cyc2ns_data data;
130 unsigned long long ns;
131
132 __cyc2ns_read(data: &data);
133
134 ns = data.cyc2ns_offset;
135 ns += mul_u64_u32_shr(a: cyc, mul: data.cyc2ns_mul, shift: data.cyc2ns_shift);
136
137 return ns;
138}
139
140static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
141{
142 unsigned long long ns;
143 preempt_disable_notrace();
144 ns = __cycles_2_ns(cyc);
145 preempt_enable_notrace();
146 return ns;
147}
148
149static void __set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
150{
151 unsigned long long ns_now;
152 struct cyc2ns_data data;
153 struct cyc2ns *c2n;
154
155 ns_now = cycles_2_ns(cyc: tsc_now);
156
157 /*
158 * Compute a new multiplier as per the above comment and ensure our
159 * time function is continuous; see the comment near struct
160 * cyc2ns_data.
161 */
162 clocks_calc_mult_shift(mult: &data.cyc2ns_mul, shift: &data.cyc2ns_shift, from: khz,
163 NSEC_PER_MSEC, minsec: 0);
164
165 /*
166 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
167 * not expected to be greater than 31 due to the original published
168 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
169 * value) - refer perf_event_mmap_page documentation in perf_event.h.
170 */
171 if (data.cyc2ns_shift == 32) {
172 data.cyc2ns_shift = 31;
173 data.cyc2ns_mul >>= 1;
174 }
175
176 data.cyc2ns_offset = ns_now -
177 mul_u64_u32_shr(a: tsc_now, mul: data.cyc2ns_mul, shift: data.cyc2ns_shift);
178
179 c2n = per_cpu_ptr(&cyc2ns, cpu);
180
181 write_seqcount_latch_begin(s: &c2n->seq);
182 c2n->data[0] = data;
183 write_seqcount_latch(s: &c2n->seq);
184 c2n->data[1] = data;
185 write_seqcount_latch_end(s: &c2n->seq);
186}
187
188static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
189{
190 unsigned long flags;
191
192 local_irq_save(flags);
193 sched_clock_idle_sleep_event();
194
195 if (khz)
196 __set_cyc2ns_scale(khz, cpu, tsc_now);
197
198 sched_clock_idle_wakeup_event();
199 local_irq_restore(flags);
200}
201
202/*
203 * Initialize cyc2ns for boot cpu
204 */
205static void __init cyc2ns_init_boot_cpu(void)
206{
207 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
208
209 seqcount_latch_init(&c2n->seq);
210 __set_cyc2ns_scale(khz: tsc_khz, smp_processor_id(), tsc_now: rdtsc());
211}
212
213/*
214 * Secondary CPUs do not run through tsc_init(), so set up
215 * all the scale factors for all CPUs, assuming the same
216 * speed as the bootup CPU.
217 */
218static void __init cyc2ns_init_secondary_cpus(void)
219{
220 unsigned int cpu, this_cpu = smp_processor_id();
221 struct cyc2ns *c2n = this_cpu_ptr(&cyc2ns);
222 struct cyc2ns_data *data = c2n->data;
223
224 for_each_possible_cpu(cpu) {
225 if (cpu != this_cpu) {
226 seqcount_latch_init(&c2n->seq);
227 c2n = per_cpu_ptr(&cyc2ns, cpu);
228 c2n->data[0] = data[0];
229 c2n->data[1] = data[1];
230 }
231 }
232}
233
234/*
235 * Scheduler clock - returns current time in nanosec units.
236 */
237noinstr u64 native_sched_clock(void)
238{
239 if (static_branch_likely(&__use_tsc)) {
240 u64 tsc_now = rdtsc();
241
242 /* return the value in ns */
243 return __cycles_2_ns(cyc: tsc_now);
244 }
245
246 /*
247 * Fall back to jiffies if there's no TSC available:
248 * ( But note that we still use it if the TSC is marked
249 * unstable. We do this because unlike Time Of Day,
250 * the scheduler clock tolerates small errors and it's
251 * very important for it to be as fast as the platform
252 * can achieve it. )
253 */
254
255 /* No locking but a rare wrong value is not a big deal: */
256 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
257}
258
259/*
260 * Generate a sched_clock if you already have a TSC value.
261 */
262u64 native_sched_clock_from_tsc(u64 tsc)
263{
264 return cycles_2_ns(cyc: tsc);
265}
266
267/* We need to define a real function for sched_clock, to override the
268 weak default version */
269#ifdef CONFIG_PARAVIRT
270noinstr u64 sched_clock_noinstr(void)
271{
272 return paravirt_sched_clock();
273}
274
275bool using_native_sched_clock(void)
276{
277 return static_call_query(pv_sched_clock) == native_sched_clock;
278}
279#else
280u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
281
282bool using_native_sched_clock(void) { return true; }
283#endif
284
285notrace u64 sched_clock(void)
286{
287 u64 now;
288 preempt_disable_notrace();
289 now = sched_clock_noinstr();
290 preempt_enable_notrace();
291 return now;
292}
293
294int check_tsc_unstable(void)
295{
296 return tsc_unstable;
297}
298EXPORT_SYMBOL_GPL(check_tsc_unstable);
299
300#ifdef CONFIG_X86_TSC
301int __init notsc_setup(char *str)
302{
303 mark_tsc_unstable(reason: "boot parameter notsc");
304 return 1;
305}
306#else
307/*
308 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
309 * in cpu/common.c
310 */
311int __init notsc_setup(char *str)
312{
313 setup_clear_cpu_cap(X86_FEATURE_TSC);
314 return 1;
315}
316#endif
317
318__setup("notsc", notsc_setup);
319
320static int no_sched_irq_time;
321static int no_tsc_watchdog;
322static int tsc_as_watchdog;
323
324static int __init tsc_setup(char *str)
325{
326 if (!strcmp(str, "reliable"))
327 tsc_clocksource_reliable = 1;
328 if (!strncmp(str, "noirqtime", 9))
329 no_sched_irq_time = 1;
330 if (!strcmp(str, "unstable"))
331 mark_tsc_unstable(reason: "boot parameter");
332 if (!strcmp(str, "nowatchdog")) {
333 no_tsc_watchdog = 1;
334 if (tsc_as_watchdog)
335 pr_alert("%s: Overriding earlier tsc=watchdog with tsc=nowatchdog\n",
336 __func__);
337 tsc_as_watchdog = 0;
338 }
339 if (!strcmp(str, "recalibrate"))
340 tsc_force_recalibrate = 1;
341 if (!strcmp(str, "watchdog")) {
342 if (no_tsc_watchdog)
343 pr_alert("%s: tsc=watchdog overridden by earlier tsc=nowatchdog\n",
344 __func__);
345 else
346 tsc_as_watchdog = 1;
347 }
348 return 1;
349}
350
351__setup("tsc=", tsc_setup);
352
353#define MAX_RETRIES 5
354#define TSC_DEFAULT_THRESHOLD 0x20000
355
356/*
357 * Read TSC and the reference counters. Take care of any disturbances
358 */
359static u64 tsc_read_refs(u64 *p, int hpet)
360{
361 u64 t1, t2;
362 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
363 int i;
364
365 for (i = 0; i < MAX_RETRIES; i++) {
366 t1 = get_cycles();
367 if (hpet)
368 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
369 else
370 *p = acpi_pm_read_early();
371 t2 = get_cycles();
372 if ((t2 - t1) < thresh)
373 return t2;
374 }
375 return ULLONG_MAX;
376}
377
378/*
379 * Calculate the TSC frequency from HPET reference
380 */
381static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
382{
383 u64 tmp;
384
385 if (hpet2 < hpet1)
386 hpet2 += 0x100000000ULL;
387 hpet2 -= hpet1;
388 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
389 do_div(tmp, 1000000);
390 deltatsc = div64_u64(dividend: deltatsc, divisor: tmp);
391
392 return (unsigned long) deltatsc;
393}
394
395/*
396 * Calculate the TSC frequency from PMTimer reference
397 */
398static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
399{
400 u64 tmp;
401
402 if (!pm1 && !pm2)
403 return ULONG_MAX;
404
405 if (pm2 < pm1)
406 pm2 += (u64)ACPI_PM_OVRRUN;
407 pm2 -= pm1;
408 tmp = pm2 * 1000000000LL;
409 do_div(tmp, PMTMR_TICKS_PER_SEC);
410 do_div(deltatsc, tmp);
411
412 return (unsigned long) deltatsc;
413}
414
415#define CAL_MS 10
416#define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
417#define CAL_PIT_LOOPS 1000
418
419#define CAL2_MS 50
420#define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
421#define CAL2_PIT_LOOPS 5000
422
423
424/*
425 * Try to calibrate the TSC against the Programmable
426 * Interrupt Timer and return the frequency of the TSC
427 * in kHz.
428 *
429 * Return ULONG_MAX on failure to calibrate.
430 */
431static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
432{
433 u64 tsc, t1, t2, delta;
434 unsigned long tscmin, tscmax;
435 int pitcnt;
436
437 if (!has_legacy_pic()) {
438 /*
439 * Relies on tsc_early_delay_calibrate() to have given us semi
440 * usable udelay(), wait for the same 50ms we would have with
441 * the PIT loop below.
442 */
443 udelay(usec: 10 * USEC_PER_MSEC);
444 udelay(usec: 10 * USEC_PER_MSEC);
445 udelay(usec: 10 * USEC_PER_MSEC);
446 udelay(usec: 10 * USEC_PER_MSEC);
447 udelay(usec: 10 * USEC_PER_MSEC);
448 return ULONG_MAX;
449 }
450
451 /* Set the Gate high, disable speaker */
452 outb(value: (inb(port: 0x61) & ~0x02) | 0x01, port: 0x61);
453
454 /*
455 * Setup CTC channel 2* for mode 0, (interrupt on terminal
456 * count mode), binary count. Set the latch register to 50ms
457 * (LSB then MSB) to begin countdown.
458 */
459 outb(value: 0xb0, port: 0x43);
460 outb(value: latch & 0xff, port: 0x42);
461 outb(value: latch >> 8, port: 0x42);
462
463 tsc = t1 = t2 = get_cycles();
464
465 pitcnt = 0;
466 tscmax = 0;
467 tscmin = ULONG_MAX;
468 while ((inb(port: 0x61) & 0x20) == 0) {
469 t2 = get_cycles();
470 delta = t2 - tsc;
471 tsc = t2;
472 if ((unsigned long) delta < tscmin)
473 tscmin = (unsigned int) delta;
474 if ((unsigned long) delta > tscmax)
475 tscmax = (unsigned int) delta;
476 pitcnt++;
477 }
478
479 /*
480 * Sanity checks:
481 *
482 * If we were not able to read the PIT more than loopmin
483 * times, then we have been hit by a massive SMI
484 *
485 * If the maximum is 10 times larger than the minimum,
486 * then we got hit by an SMI as well.
487 */
488 if (pitcnt < loopmin || tscmax > 10 * tscmin)
489 return ULONG_MAX;
490
491 /* Calculate the PIT value */
492 delta = t2 - t1;
493 do_div(delta, ms);
494 return delta;
495}
496
497/*
498 * This reads the current MSB of the PIT counter, and
499 * checks if we are running on sufficiently fast and
500 * non-virtualized hardware.
501 *
502 * Our expectations are:
503 *
504 * - the PIT is running at roughly 1.19MHz
505 *
506 * - each IO is going to take about 1us on real hardware,
507 * but we allow it to be much faster (by a factor of 10) or
508 * _slightly_ slower (ie we allow up to a 2us read+counter
509 * update - anything else implies a unacceptably slow CPU
510 * or PIT for the fast calibration to work.
511 *
512 * - with 256 PIT ticks to read the value, we have 214us to
513 * see the same MSB (and overhead like doing a single TSC
514 * read per MSB value etc).
515 *
516 * - We're doing 2 reads per loop (LSB, MSB), and we expect
517 * them each to take about a microsecond on real hardware.
518 * So we expect a count value of around 100. But we'll be
519 * generous, and accept anything over 50.
520 *
521 * - if the PIT is stuck, and we see *many* more reads, we
522 * return early (and the next caller of pit_expect_msb()
523 * then consider it a failure when they don't see the
524 * next expected value).
525 *
526 * These expectations mean that we know that we have seen the
527 * transition from one expected value to another with a fairly
528 * high accuracy, and we didn't miss any events. We can thus
529 * use the TSC value at the transitions to calculate a pretty
530 * good value for the TSC frequency.
531 */
532static inline int pit_verify_msb(unsigned char val)
533{
534 /* Ignore LSB */
535 inb(port: 0x42);
536 return inb(port: 0x42) == val;
537}
538
539static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
540{
541 int count;
542 u64 tsc = 0, prev_tsc = 0;
543
544 for (count = 0; count < 50000; count++) {
545 if (!pit_verify_msb(val))
546 break;
547 prev_tsc = tsc;
548 tsc = get_cycles();
549 }
550 *deltap = get_cycles() - prev_tsc;
551 *tscp = tsc;
552
553 /*
554 * We require _some_ success, but the quality control
555 * will be based on the error terms on the TSC values.
556 */
557 return count > 5;
558}
559
560/*
561 * How many MSB values do we want to see? We aim for
562 * a maximum error rate of 500ppm (in practice the
563 * real error is much smaller), but refuse to spend
564 * more than 50ms on it.
565 */
566#define MAX_QUICK_PIT_MS 50
567#define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
568
569static unsigned long quick_pit_calibrate(void)
570{
571 int i;
572 u64 tsc, delta;
573 unsigned long d1, d2;
574
575 if (!has_legacy_pic())
576 return 0;
577
578 /* Set the Gate high, disable speaker */
579 outb(value: (inb(port: 0x61) & ~0x02) | 0x01, port: 0x61);
580
581 /*
582 * Counter 2, mode 0 (one-shot), binary count
583 *
584 * NOTE! Mode 2 decrements by two (and then the
585 * output is flipped each time, giving the same
586 * final output frequency as a decrement-by-one),
587 * so mode 0 is much better when looking at the
588 * individual counts.
589 */
590 outb(value: 0xb0, port: 0x43);
591
592 /* Start at 0xffff */
593 outb(value: 0xff, port: 0x42);
594 outb(value: 0xff, port: 0x42);
595
596 /*
597 * The PIT starts counting at the next edge, so we
598 * need to delay for a microsecond. The easiest way
599 * to do that is to just read back the 16-bit counter
600 * once from the PIT.
601 */
602 pit_verify_msb(val: 0);
603
604 if (pit_expect_msb(val: 0xff, tscp: &tsc, deltap: &d1)) {
605 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
606 if (!pit_expect_msb(val: 0xff-i, tscp: &delta, deltap: &d2))
607 break;
608
609 delta -= tsc;
610
611 /*
612 * Extrapolate the error and fail fast if the error will
613 * never be below 500 ppm.
614 */
615 if (i == 1 &&
616 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
617 return 0;
618
619 /*
620 * Iterate until the error is less than 500 ppm
621 */
622 if (d1+d2 >= delta >> 11)
623 continue;
624
625 /*
626 * Check the PIT one more time to verify that
627 * all TSC reads were stable wrt the PIT.
628 *
629 * This also guarantees serialization of the
630 * last cycle read ('d2') in pit_expect_msb.
631 */
632 if (!pit_verify_msb(val: 0xfe - i))
633 break;
634 goto success;
635 }
636 }
637 pr_info("Fast TSC calibration failed\n");
638 return 0;
639
640success:
641 /*
642 * Ok, if we get here, then we've seen the
643 * MSB of the PIT decrement 'i' times, and the
644 * error has shrunk to less than 500 ppm.
645 *
646 * As a result, we can depend on there not being
647 * any odd delays anywhere, and the TSC reads are
648 * reliable (within the error).
649 *
650 * kHz = ticks / time-in-seconds / 1000;
651 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
652 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
653 */
654 delta *= PIT_TICK_RATE;
655 do_div(delta, i*256*1000);
656 pr_info("Fast TSC calibration using PIT\n");
657 return delta;
658}
659
660/**
661 * native_calibrate_tsc - determine TSC frequency
662 * Determine TSC frequency via CPUID, else return 0.
663 */
664unsigned long native_calibrate_tsc(void)
665{
666 unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
667 unsigned int crystal_khz;
668
669 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
670 return 0;
671
672 if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
673 return 0;
674
675 eax_denominator = ebx_numerator = ecx_hz = edx = 0;
676
677 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
678 cpuid(CPUID_LEAF_TSC, eax: &eax_denominator, ebx: &ebx_numerator, ecx: &ecx_hz, edx: &edx);
679
680 if (ebx_numerator == 0 || eax_denominator == 0)
681 return 0;
682
683 crystal_khz = ecx_hz / 1000;
684
685 /*
686 * Denverton SoCs don't report crystal clock, and also don't support
687 * CPUID_LEAF_FREQ for the calculation below, so hardcode the 25MHz
688 * crystal clock.
689 */
690 if (crystal_khz == 0 &&
691 boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT_D)
692 crystal_khz = 25000;
693
694 /*
695 * TSC frequency reported directly by CPUID is a "hardware reported"
696 * frequency and is the most accurate one so far we have. This
697 * is considered a known frequency.
698 */
699 if (crystal_khz != 0)
700 setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
701
702 /*
703 * Some Intel SoCs like Skylake and Kabylake don't report the crystal
704 * clock, but we can easily calculate it to a high degree of accuracy
705 * by considering the crystal ratio and the CPU speed.
706 */
707 if (crystal_khz == 0 && boot_cpu_data.cpuid_level >= CPUID_LEAF_FREQ) {
708 unsigned int eax_base_mhz, ebx, ecx, edx;
709
710 cpuid(CPUID_LEAF_FREQ, eax: &eax_base_mhz, ebx: &ebx, ecx: &ecx, edx: &edx);
711 crystal_khz = eax_base_mhz * 1000 *
712 eax_denominator / ebx_numerator;
713 }
714
715 if (crystal_khz == 0)
716 return 0;
717
718 /*
719 * For Atom SoCs TSC is the only reliable clocksource.
720 * Mark TSC reliable so no watchdog on it.
721 */
722 if (boot_cpu_data.x86_vfm == INTEL_ATOM_GOLDMONT)
723 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
724
725#ifdef CONFIG_X86_LOCAL_APIC
726 /*
727 * The local APIC appears to be fed by the core crystal clock
728 * (which sounds entirely sensible). We can set the global
729 * lapic_timer_period here to avoid having to calibrate the APIC
730 * timer later.
731 */
732 lapic_timer_period = crystal_khz * 1000 / HZ;
733#endif
734
735 return crystal_khz * ebx_numerator / eax_denominator;
736}
737
738static unsigned long cpu_khz_from_cpuid(void)
739{
740 unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
741
742 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
743 return 0;
744
745 if (boot_cpu_data.cpuid_level < CPUID_LEAF_FREQ)
746 return 0;
747
748 eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
749
750 cpuid(CPUID_LEAF_FREQ, eax: &eax_base_mhz, ebx: &ebx_max_mhz, ecx: &ecx_bus_mhz, edx: &edx);
751
752 return eax_base_mhz * 1000;
753}
754
755/*
756 * calibrate cpu using pit, hpet, and ptimer methods. They are available
757 * later in boot after acpi is initialized.
758 */
759static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
760{
761 u64 tsc1, tsc2, delta, ref1, ref2;
762 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
763 unsigned long flags, latch, ms;
764 int hpet = is_hpet_enabled(), i, loopmin;
765
766 /*
767 * Run 5 calibration loops to get the lowest frequency value
768 * (the best estimate). We use two different calibration modes
769 * here:
770 *
771 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
772 * load a timeout of 50ms. We read the time right after we
773 * started the timer and wait until the PIT count down reaches
774 * zero. In each wait loop iteration we read the TSC and check
775 * the delta to the previous read. We keep track of the min
776 * and max values of that delta. The delta is mostly defined
777 * by the IO time of the PIT access, so we can detect when
778 * any disturbance happened between the two reads. If the
779 * maximum time is significantly larger than the minimum time,
780 * then we discard the result and have another try.
781 *
782 * 2) Reference counter. If available we use the HPET or the
783 * PMTIMER as a reference to check the sanity of that value.
784 * We use separate TSC readouts and check inside of the
785 * reference read for any possible disturbance. We discard
786 * disturbed values here as well. We do that around the PIT
787 * calibration delay loop as we have to wait for a certain
788 * amount of time anyway.
789 */
790
791 /* Preset PIT loop values */
792 latch = CAL_LATCH;
793 ms = CAL_MS;
794 loopmin = CAL_PIT_LOOPS;
795
796 for (i = 0; i < 3; i++) {
797 unsigned long tsc_pit_khz;
798
799 /*
800 * Read the start value and the reference count of
801 * hpet/pmtimer when available. Then do the PIT
802 * calibration, which will take at least 50ms, and
803 * read the end value.
804 */
805 local_irq_save(flags);
806 tsc1 = tsc_read_refs(p: &ref1, hpet);
807 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
808 tsc2 = tsc_read_refs(p: &ref2, hpet);
809 local_irq_restore(flags);
810
811 /* Pick the lowest PIT TSC calibration so far */
812 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
813
814 /* hpet or pmtimer available ? */
815 if (ref1 == ref2)
816 continue;
817
818 /* Check, whether the sampling was disturbed */
819 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
820 continue;
821
822 tsc2 = (tsc2 - tsc1) * 1000000LL;
823 if (hpet)
824 tsc2 = calc_hpet_ref(deltatsc: tsc2, hpet1: ref1, hpet2: ref2);
825 else
826 tsc2 = calc_pmtimer_ref(deltatsc: tsc2, pm1: ref1, pm2: ref2);
827
828 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
829
830 /* Check the reference deviation */
831 delta = ((u64) tsc_pit_min) * 100;
832 do_div(delta, tsc_ref_min);
833
834 /*
835 * If both calibration results are inside a 10% window
836 * then we can be sure, that the calibration
837 * succeeded. We break out of the loop right away. We
838 * use the reference value, as it is more precise.
839 */
840 if (delta >= 90 && delta <= 110) {
841 pr_info("PIT calibration matches %s. %d loops\n",
842 hpet ? "HPET" : "PMTIMER", i + 1);
843 return tsc_ref_min;
844 }
845
846 /*
847 * Check whether PIT failed more than once. This
848 * happens in virtualized environments. We need to
849 * give the virtual PC a slightly longer timeframe for
850 * the HPET/PMTIMER to make the result precise.
851 */
852 if (i == 1 && tsc_pit_min == ULONG_MAX) {
853 latch = CAL2_LATCH;
854 ms = CAL2_MS;
855 loopmin = CAL2_PIT_LOOPS;
856 }
857 }
858
859 /*
860 * Now check the results.
861 */
862 if (tsc_pit_min == ULONG_MAX) {
863 /* PIT gave no useful value */
864 pr_warn("Unable to calibrate against PIT\n");
865
866 /* We don't have an alternative source, disable TSC */
867 if (!hpet && !ref1 && !ref2) {
868 pr_notice("No reference (HPET/PMTIMER) available\n");
869 return 0;
870 }
871
872 /* The alternative source failed as well, disable TSC */
873 if (tsc_ref_min == ULONG_MAX) {
874 pr_warn("HPET/PMTIMER calibration failed\n");
875 return 0;
876 }
877
878 /* Use the alternative source */
879 pr_info("using %s reference calibration\n",
880 hpet ? "HPET" : "PMTIMER");
881
882 return tsc_ref_min;
883 }
884
885 /* We don't have an alternative source, use the PIT calibration value */
886 if (!hpet && !ref1 && !ref2) {
887 pr_info("Using PIT calibration value\n");
888 return tsc_pit_min;
889 }
890
891 /* The alternative source failed, use the PIT calibration value */
892 if (tsc_ref_min == ULONG_MAX) {
893 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
894 return tsc_pit_min;
895 }
896
897 /*
898 * The calibration values differ too much. In doubt, we use
899 * the PIT value as we know that there are PMTIMERs around
900 * running at double speed. At least we let the user know:
901 */
902 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
903 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
904 pr_info("Using PIT calibration value\n");
905 return tsc_pit_min;
906}
907
908/**
909 * native_calibrate_cpu_early - can calibrate the cpu early in boot
910 */
911unsigned long native_calibrate_cpu_early(void)
912{
913 unsigned long flags, fast_calibrate = cpu_khz_from_cpuid();
914
915 if (!fast_calibrate)
916 fast_calibrate = cpu_khz_from_msr();
917 if (!fast_calibrate) {
918 local_irq_save(flags);
919 fast_calibrate = quick_pit_calibrate();
920 local_irq_restore(flags);
921 }
922 return fast_calibrate;
923}
924
925
926/**
927 * native_calibrate_cpu - calibrate the cpu
928 */
929static unsigned long native_calibrate_cpu(void)
930{
931 unsigned long tsc_freq = native_calibrate_cpu_early();
932
933 if (!tsc_freq)
934 tsc_freq = pit_hpet_ptimer_calibrate_cpu();
935
936 return tsc_freq;
937}
938
939void recalibrate_cpu_khz(void)
940{
941#ifndef CONFIG_SMP
942 unsigned long cpu_khz_old = cpu_khz;
943
944 if (!boot_cpu_has(X86_FEATURE_TSC))
945 return;
946
947 cpu_khz = x86_platform.calibrate_cpu();
948 tsc_khz = x86_platform.calibrate_tsc();
949 if (tsc_khz == 0)
950 tsc_khz = cpu_khz;
951 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
952 cpu_khz = tsc_khz;
953 cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
954 cpu_khz_old, cpu_khz);
955#endif
956}
957EXPORT_SYMBOL_GPL(recalibrate_cpu_khz);
958
959
960static unsigned long long cyc2ns_suspend;
961
962void tsc_save_sched_clock_state(void)
963{
964 if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
965 return;
966
967 cyc2ns_suspend = sched_clock();
968}
969
970/*
971 * Even on processors with invariant TSC, TSC gets reset in some the
972 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
973 * arbitrary value (still sync'd across cpu's) during resume from such sleep
974 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
975 * that sched_clock() continues from the point where it was left off during
976 * suspend.
977 */
978void tsc_restore_sched_clock_state(void)
979{
980 unsigned long long offset;
981 unsigned long flags;
982 int cpu;
983
984 if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
985 return;
986
987 local_irq_save(flags);
988
989 /*
990 * We're coming out of suspend, there's no concurrency yet; don't
991 * bother being nice about the RCU stuff, just write to both
992 * data fields.
993 */
994
995 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
996 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
997
998 offset = cyc2ns_suspend - sched_clock();
999
1000 for_each_possible_cpu(cpu) {
1001 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
1002 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
1003 }
1004
1005 local_irq_restore(flags);
1006}
1007
1008#ifdef CONFIG_CPU_FREQ
1009/*
1010 * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
1011 * changes.
1012 *
1013 * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
1014 * as unstable and give up in those cases.
1015 *
1016 * Should fix up last_tsc too. Currently gettimeofday in the
1017 * first tick after the change will be slightly wrong.
1018 */
1019
1020static unsigned int ref_freq;
1021static unsigned long loops_per_jiffy_ref;
1022static unsigned long tsc_khz_ref;
1023
1024static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
1025 void *data)
1026{
1027 struct cpufreq_freqs *freq = data;
1028
1029 if (num_online_cpus() > 1) {
1030 mark_tsc_unstable(reason: "cpufreq changes on SMP");
1031 return 0;
1032 }
1033
1034 if (!ref_freq) {
1035 ref_freq = freq->old;
1036 loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
1037 tsc_khz_ref = tsc_khz;
1038 }
1039
1040 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
1041 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
1042 boot_cpu_data.loops_per_jiffy =
1043 cpufreq_scale(old: loops_per_jiffy_ref, div: ref_freq, mult: freq->new);
1044
1045 tsc_khz = cpufreq_scale(old: tsc_khz_ref, div: ref_freq, mult: freq->new);
1046 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
1047 mark_tsc_unstable(reason: "cpufreq changes");
1048
1049 set_cyc2ns_scale(khz: tsc_khz, cpu: freq->policy->cpu, tsc_now: rdtsc());
1050 }
1051
1052 return 0;
1053}
1054
1055static struct notifier_block time_cpufreq_notifier_block = {
1056 .notifier_call = time_cpufreq_notifier
1057};
1058
1059static int __init cpufreq_register_tsc_scaling(void)
1060{
1061 if (!boot_cpu_has(X86_FEATURE_TSC))
1062 return 0;
1063 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1064 return 0;
1065 cpufreq_register_notifier(nb: &time_cpufreq_notifier_block,
1066 CPUFREQ_TRANSITION_NOTIFIER);
1067 return 0;
1068}
1069
1070core_initcall(cpufreq_register_tsc_scaling);
1071
1072#endif /* CONFIG_CPU_FREQ */
1073
1074#define ART_MIN_DENOMINATOR (1)
1075
1076/*
1077 * If ART is present detect the numerator:denominator to convert to TSC
1078 */
1079static void __init detect_art(void)
1080{
1081 unsigned int unused;
1082
1083 if (boot_cpu_data.cpuid_level < CPUID_LEAF_TSC)
1084 return;
1085
1086 /*
1087 * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
1088 * and the TSC counter resets must not occur asynchronously.
1089 */
1090 if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
1091 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
1092 !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
1093 tsc_async_resets)
1094 return;
1095
1096 cpuid(CPUID_LEAF_TSC, eax: &art_base_clk.denominator,
1097 ebx: &art_base_clk.numerator, ecx: &art_base_clk.freq_khz, edx: &unused);
1098
1099 art_base_clk.freq_khz /= KHZ;
1100 if (art_base_clk.denominator < ART_MIN_DENOMINATOR)
1101 return;
1102
1103 rdmsrq(MSR_IA32_TSC_ADJUST, art_base_clk.offset);
1104
1105 /* Make this sticky over multiple CPU init calls */
1106 setup_force_cpu_cap(X86_FEATURE_ART);
1107}
1108
1109
1110/* clocksource code */
1111
1112static void tsc_resume(struct clocksource *cs)
1113{
1114 tsc_verify_tsc_adjust(resume: true);
1115}
1116
1117/*
1118 * We used to compare the TSC to the cycle_last value in the clocksource
1119 * structure to avoid a nasty time-warp. This can be observed in a
1120 * very small window right after one CPU updated cycle_last under
1121 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
1122 * is smaller than the cycle_last reference value due to a TSC which
1123 * is slightly behind. This delta is nowhere else observable, but in
1124 * that case it results in a forward time jump in the range of hours
1125 * due to the unsigned delta calculation of the time keeping core
1126 * code, which is necessary to support wrapping clocksources like pm
1127 * timer.
1128 *
1129 * This sanity check is now done in the core timekeeping code.
1130 * checking the result of read_tsc() - cycle_last for being negative.
1131 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
1132 */
1133static u64 read_tsc(struct clocksource *cs)
1134{
1135 return (u64)rdtsc_ordered();
1136}
1137
1138static void tsc_cs_mark_unstable(struct clocksource *cs)
1139{
1140 if (tsc_unstable)
1141 return;
1142
1143 tsc_unstable = 1;
1144 if (using_native_sched_clock())
1145 clear_sched_clock_stable();
1146 disable_sched_clock_irqtime();
1147 pr_info("Marking TSC unstable due to clocksource watchdog\n");
1148}
1149
1150static void tsc_cs_tick_stable(struct clocksource *cs)
1151{
1152 if (tsc_unstable)
1153 return;
1154
1155 if (using_native_sched_clock())
1156 sched_clock_tick_stable();
1157}
1158
1159static int tsc_cs_enable(struct clocksource *cs)
1160{
1161 vclocks_set_used(which: VDSO_CLOCKMODE_TSC);
1162 return 0;
1163}
1164
1165/*
1166 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
1167 */
1168static struct clocksource clocksource_tsc_early = {
1169 .name = "tsc-early",
1170 .rating = 299,
1171 .uncertainty_margin = 32 * NSEC_PER_MSEC,
1172 .read = read_tsc,
1173 .mask = CLOCKSOURCE_MASK(64),
1174 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1175 CLOCK_SOURCE_MUST_VERIFY,
1176 .id = CSID_X86_TSC_EARLY,
1177 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1178 .enable = tsc_cs_enable,
1179 .resume = tsc_resume,
1180 .mark_unstable = tsc_cs_mark_unstable,
1181 .tick_stable = tsc_cs_tick_stable,
1182 .list = LIST_HEAD_INIT(clocksource_tsc_early.list),
1183};
1184
1185/*
1186 * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
1187 * this one will immediately take over. We will only register if TSC has
1188 * been found good.
1189 */
1190static struct clocksource clocksource_tsc = {
1191 .name = "tsc",
1192 .rating = 300,
1193 .read = read_tsc,
1194 .mask = CLOCKSOURCE_MASK(64),
1195 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1196 CLOCK_SOURCE_VALID_FOR_HRES |
1197 CLOCK_SOURCE_MUST_VERIFY |
1198 CLOCK_SOURCE_VERIFY_PERCPU,
1199 .id = CSID_X86_TSC,
1200 .vdso_clock_mode = VDSO_CLOCKMODE_TSC,
1201 .enable = tsc_cs_enable,
1202 .resume = tsc_resume,
1203 .mark_unstable = tsc_cs_mark_unstable,
1204 .tick_stable = tsc_cs_tick_stable,
1205 .list = LIST_HEAD_INIT(clocksource_tsc.list),
1206};
1207
1208void mark_tsc_unstable(char *reason)
1209{
1210 if (tsc_unstable)
1211 return;
1212
1213 tsc_unstable = 1;
1214 if (using_native_sched_clock())
1215 clear_sched_clock_stable();
1216 disable_sched_clock_irqtime();
1217 pr_info("Marking TSC unstable due to %s\n", reason);
1218
1219 clocksource_mark_unstable(cs: &clocksource_tsc_early);
1220 clocksource_mark_unstable(cs: &clocksource_tsc);
1221}
1222
1223EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1224
1225static void __init tsc_disable_clocksource_watchdog(void)
1226{
1227 clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1228 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1229}
1230
1231bool tsc_clocksource_watchdog_disabled(void)
1232{
1233 return !(clocksource_tsc.flags & CLOCK_SOURCE_MUST_VERIFY) &&
1234 tsc_as_watchdog && !no_tsc_watchdog;
1235}
1236
1237static void __init check_system_tsc_reliable(void)
1238{
1239#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1240 if (is_geode_lx()) {
1241 /* RTSC counts during suspend */
1242#define RTSC_SUSP 0x100
1243 unsigned long res_low, res_high;
1244
1245 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1246 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1247 if (res_low & RTSC_SUSP)
1248 tsc_clocksource_reliable = 1;
1249 }
1250#endif
1251 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1252 tsc_clocksource_reliable = 1;
1253
1254 /*
1255 * Disable the clocksource watchdog when the system has:
1256 * - TSC running at constant frequency
1257 * - TSC which does not stop in C-States
1258 * - the TSC_ADJUST register which allows to detect even minimal
1259 * modifications
1260 * - not more than four packages
1261 */
1262 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
1263 boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
1264 boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
1265 topology_max_packages() <= 4)
1266 tsc_disable_clocksource_watchdog();
1267}
1268
1269/*
1270 * Make an educated guess if the TSC is trustworthy and synchronized
1271 * over all CPUs.
1272 */
1273int unsynchronized_tsc(void)
1274{
1275 if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable)
1276 return 1;
1277
1278#ifdef CONFIG_SMP
1279 if (apic_is_clustered_box())
1280 return 1;
1281#endif
1282
1283 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1284 return 0;
1285
1286 if (tsc_clocksource_reliable)
1287 return 0;
1288 /*
1289 * Intel systems are normally all synchronized.
1290 * Exceptions must mark TSC as unstable:
1291 */
1292 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1293 /* assume multi socket systems are not synchronized: */
1294 if (topology_max_packages() > 1)
1295 return 1;
1296 }
1297
1298 return 0;
1299}
1300
1301static void tsc_refine_calibration_work(struct work_struct *work);
1302static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1303/**
1304 * tsc_refine_calibration_work - Further refine tsc freq calibration
1305 * @work: ignored.
1306 *
1307 * This functions uses delayed work over a period of a
1308 * second to further refine the TSC freq value. Since this is
1309 * timer based, instead of loop based, we don't block the boot
1310 * process while this longer calibration is done.
1311 *
1312 * If there are any calibration anomalies (too many SMIs, etc),
1313 * or the refined calibration is off by 1% of the fast early
1314 * calibration, we throw out the new calibration and use the
1315 * early calibration.
1316 */
1317static void tsc_refine_calibration_work(struct work_struct *work)
1318{
1319 static u64 tsc_start = ULLONG_MAX, ref_start;
1320 static int hpet;
1321 u64 tsc_stop, ref_stop, delta;
1322 unsigned long freq;
1323 int cpu;
1324
1325 /* Don't bother refining TSC on unstable systems */
1326 if (tsc_unstable)
1327 goto unreg;
1328
1329 /*
1330 * Since the work is started early in boot, we may be
1331 * delayed the first time we expire. So set the workqueue
1332 * again once we know timers are working.
1333 */
1334 if (tsc_start == ULLONG_MAX) {
1335restart:
1336 /*
1337 * Only set hpet once, to avoid mixing hardware
1338 * if the hpet becomes enabled later.
1339 */
1340 hpet = is_hpet_enabled();
1341 tsc_start = tsc_read_refs(p: &ref_start, hpet);
1342 schedule_delayed_work(dwork: &tsc_irqwork, HZ);
1343 return;
1344 }
1345
1346 tsc_stop = tsc_read_refs(p: &ref_stop, hpet);
1347
1348 /* hpet or pmtimer available ? */
1349 if (ref_start == ref_stop)
1350 goto out;
1351
1352 /* Check, whether the sampling was disturbed */
1353 if (tsc_stop == ULLONG_MAX)
1354 goto restart;
1355
1356 delta = tsc_stop - tsc_start;
1357 delta *= 1000000LL;
1358 if (hpet)
1359 freq = calc_hpet_ref(deltatsc: delta, hpet1: ref_start, hpet2: ref_stop);
1360 else
1361 freq = calc_pmtimer_ref(deltatsc: delta, pm1: ref_start, pm2: ref_stop);
1362
1363 /* Will hit this only if tsc_force_recalibrate has been set */
1364 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1365
1366 /* Warn if the deviation exceeds 500 ppm */
1367 if (abs(tsc_khz - freq) > (tsc_khz >> 11)) {
1368 pr_warn("Warning: TSC freq calibrated by CPUID/MSR differs from what is calibrated by HW timer, please check with vendor!!\n");
1369 pr_info("Previous calibrated TSC freq:\t %lu.%03lu MHz\n",
1370 (unsigned long)tsc_khz / 1000,
1371 (unsigned long)tsc_khz % 1000);
1372 }
1373
1374 pr_info("TSC freq recalibrated by [%s]:\t %lu.%03lu MHz\n",
1375 hpet ? "HPET" : "PM_TIMER",
1376 (unsigned long)freq / 1000,
1377 (unsigned long)freq % 1000);
1378
1379 return;
1380 }
1381
1382 /* Make sure we're within 1% */
1383 if (abs(tsc_khz - freq) > tsc_khz/100)
1384 goto out;
1385
1386 tsc_khz = freq;
1387 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1388 (unsigned long)tsc_khz / 1000,
1389 (unsigned long)tsc_khz % 1000);
1390
1391 /* Inform the TSC deadline clockevent devices about the recalibration */
1392 lapic_update_tsc_freq();
1393
1394 /* Update the sched_clock() rate to match the clocksource one */
1395 for_each_possible_cpu(cpu)
1396 set_cyc2ns_scale(khz: tsc_khz, cpu, tsc_now: tsc_stop);
1397
1398out:
1399 if (tsc_unstable)
1400 goto unreg;
1401
1402 if (boot_cpu_has(X86_FEATURE_ART)) {
1403 have_art = true;
1404 clocksource_tsc.base = &art_base_clk;
1405 }
1406 clocksource_register_khz(cs: &clocksource_tsc, khz: tsc_khz);
1407unreg:
1408 clocksource_unregister(&clocksource_tsc_early);
1409}
1410
1411
1412static int __init init_tsc_clocksource(void)
1413{
1414 if (!boot_cpu_has(X86_FEATURE_TSC) || !tsc_khz)
1415 return 0;
1416
1417 if (tsc_unstable) {
1418 clocksource_unregister(&clocksource_tsc_early);
1419 return 0;
1420 }
1421
1422 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1423 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1424
1425 /*
1426 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
1427 * the refined calibration and directly register it as a clocksource.
1428 */
1429 if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
1430 if (boot_cpu_has(X86_FEATURE_ART)) {
1431 have_art = true;
1432 clocksource_tsc.base = &art_base_clk;
1433 }
1434 clocksource_register_khz(cs: &clocksource_tsc, khz: tsc_khz);
1435 clocksource_unregister(&clocksource_tsc_early);
1436
1437 if (!tsc_force_recalibrate)
1438 return 0;
1439 }
1440
1441 schedule_delayed_work(dwork: &tsc_irqwork, delay: 0);
1442 return 0;
1443}
1444/*
1445 * We use device_initcall here, to ensure we run after the hpet
1446 * is fully initialized, which may occur at fs_initcall time.
1447 */
1448device_initcall(init_tsc_clocksource);
1449
1450static bool __init determine_cpu_tsc_frequencies(bool early)
1451{
1452 /* Make sure that cpu and tsc are not already calibrated */
1453 WARN_ON(cpu_khz || tsc_khz);
1454
1455 if (early) {
1456 cpu_khz = x86_platform.calibrate_cpu();
1457 if (tsc_early_khz) {
1458 tsc_khz = tsc_early_khz;
1459 } else {
1460 tsc_khz = x86_platform.calibrate_tsc();
1461 clocksource_tsc.freq_khz = tsc_khz;
1462 }
1463 } else {
1464 /* We should not be here with non-native cpu calibration */
1465 WARN_ON(x86_platform.calibrate_cpu != native_calibrate_cpu);
1466 cpu_khz = pit_hpet_ptimer_calibrate_cpu();
1467 }
1468
1469 /*
1470 * Trust non-zero tsc_khz as authoritative,
1471 * and use it to sanity check cpu_khz,
1472 * which will be off if system timer is off.
1473 */
1474 if (tsc_khz == 0)
1475 tsc_khz = cpu_khz;
1476 else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
1477 cpu_khz = tsc_khz;
1478
1479 if (tsc_khz == 0)
1480 return false;
1481
1482 pr_info("Detected %lu.%03lu MHz processor\n",
1483 (unsigned long)cpu_khz / KHZ,
1484 (unsigned long)cpu_khz % KHZ);
1485
1486 if (cpu_khz != tsc_khz) {
1487 pr_info("Detected %lu.%03lu MHz TSC",
1488 (unsigned long)tsc_khz / KHZ,
1489 (unsigned long)tsc_khz % KHZ);
1490 }
1491 return true;
1492}
1493
1494static unsigned long __init get_loops_per_jiffy(void)
1495{
1496 u64 lpj = (u64)tsc_khz * KHZ;
1497
1498 do_div(lpj, HZ);
1499 return lpj;
1500}
1501
1502static void __init tsc_enable_sched_clock(void)
1503{
1504 loops_per_jiffy = get_loops_per_jiffy();
1505 use_tsc_delay();
1506
1507 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1508 tsc_store_and_check_tsc_adjust(bootcpu: true);
1509 cyc2ns_init_boot_cpu();
1510 static_branch_enable(&__use_tsc);
1511}
1512
1513void __init tsc_early_init(void)
1514{
1515 if (!boot_cpu_has(X86_FEATURE_TSC))
1516 return;
1517 /* Don't change UV TSC multi-chassis synchronization */
1518 if (is_early_uv_system())
1519 return;
1520
1521 snp_secure_tsc_init();
1522
1523 if (!determine_cpu_tsc_frequencies(early: true))
1524 return;
1525 tsc_enable_sched_clock();
1526}
1527
1528void __init tsc_init(void)
1529{
1530 if (!cpu_feature_enabled(X86_FEATURE_TSC)) {
1531 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1532 return;
1533 }
1534
1535 /*
1536 * native_calibrate_cpu_early can only calibrate using methods that are
1537 * available early in boot.
1538 */
1539 if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
1540 x86_platform.calibrate_cpu = native_calibrate_cpu;
1541
1542 if (!tsc_khz) {
1543 /* We failed to determine frequencies earlier, try again */
1544 if (!determine_cpu_tsc_frequencies(early: false)) {
1545 mark_tsc_unstable("could not calculate TSC khz");
1546 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1547 return;
1548 }
1549 tsc_enable_sched_clock();
1550 }
1551
1552 cyc2ns_init_secondary_cpus();
1553
1554 if (!no_sched_irq_time)
1555 enable_sched_clock_irqtime();
1556
1557 lpj_fine = get_loops_per_jiffy();
1558
1559 check_system_tsc_reliable();
1560
1561 if (unsynchronized_tsc()) {
1562 mark_tsc_unstable("TSCs unsynchronized");
1563 return;
1564 }
1565
1566 if (tsc_clocksource_reliable || no_tsc_watchdog)
1567 tsc_disable_clocksource_watchdog();
1568
1569 clocksource_register_khz(cs: &clocksource_tsc_early, khz: tsc_khz);
1570 detect_art();
1571}
1572
1573#ifdef CONFIG_SMP
1574/*
1575 * Check whether existing calibration data can be reused.
1576 */
1577unsigned long calibrate_delay_is_known(void)
1578{
1579 int sibling, cpu = smp_processor_id();
1580 int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
1581 const struct cpumask *mask = topology_core_cpumask(cpu);
1582
1583 /*
1584 * If TSC has constant frequency and TSC is synchronized across
1585 * sockets then reuse CPU0 calibration.
1586 */
1587 if (constant_tsc && !tsc_unstable)
1588 return cpu_data(0).loops_per_jiffy;
1589
1590 /*
1591 * If TSC has constant frequency and TSC is not synchronized across
1592 * sockets and this is not the first CPU in the socket, then reuse
1593 * the calibration value of an already online CPU on that socket.
1594 *
1595 * This assumes that CONSTANT_TSC is consistent for all CPUs in a
1596 * socket.
1597 */
1598 if (!constant_tsc || !mask)
1599 return 0;
1600
1601 sibling = cpumask_any_but(mask, cpu);
1602 if (sibling < nr_cpu_ids)
1603 return cpu_data(sibling).loops_per_jiffy;
1604 return 0;
1605}
1606#endif
1607

source code of linux/arch/x86/kernel/tsc.c