1 // SPDX-License-Identifier: GPL-2.0-or-later !! 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright 2001 MontaVista Software Inc. !! 3 * linux/arch/alpha/kernel/time.c 4 * Author: Jun Sun, jsun@mvista.com or jsun@ju << 5 * Copyright (c) 2003, 2004 Maciej W. Rozycki << 6 * 4 * 7 * Common time service routines for MIPS machi !! 5 * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds >> 6 * >> 7 * This file contains the clocksource time handling. >> 8 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 >> 9 * "A Kernel Model for Precision Timekeeping" by Dave Mills >> 10 * 1997-01-09 Adrian Sun >> 11 * use interval timer if CONFIG_RTC=y >> 12 * 1997-10-29 John Bowman (bowman@math.ualberta.ca) >> 13 * fixed tick loss calculation in timer_interrupt >> 14 * (round system clock to nearest tick instead of truncating) >> 15 * fixed algorithm in time_init for getting time from CMOS clock >> 16 * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) >> 17 * fixed algorithm in do_gettimeofday() for calculating the precise time >> 18 * from processor cycle counter (now taking lost_ticks into account) >> 19 * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> >> 20 * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM 8 */ 21 */ 9 #include <linux/bug.h> !! 22 #include <linux/errno.h> 10 #include <linux/clockchips.h> !! 23 #include <linux/module.h> 11 #include <linux/types.h> << 12 #include <linux/kernel.h> << 13 #include <linux/init.h> << 14 #include <linux/sched.h> 24 #include <linux/sched.h> >> 25 #include <linux/kernel.h> 15 #include <linux/param.h> 26 #include <linux/param.h> >> 27 #include <linux/string.h> >> 28 #include <linux/mm.h> >> 29 #include <linux/delay.h> >> 30 #include <linux/ioport.h> >> 31 #include <linux/irq.h> >> 32 #include <linux/interrupt.h> >> 33 #include <linux/init.h> >> 34 #include <linux/bcd.h> >> 35 #include <linux/profile.h> >> 36 #include <linux/irq_work.h> >> 37 >> 38 #include <linux/uaccess.h> >> 39 #include <asm/io.h> >> 40 #include <asm/hwrpb.h> >> 41 >> 42 #include <linux/mc146818rtc.h> 16 #include <linux/time.h> 43 #include <linux/time.h> 17 #include <linux/timex.h> 44 #include <linux/timex.h> 18 #include <linux/smp.h> !! 45 #include <linux/clocksource.h> 19 #include <linux/spinlock.h> !! 46 #include <linux/clockchips.h> 20 #include <linux/export.h> << 21 #include <linux/cpufreq.h> << 22 #include <linux/delay.h> << 23 47 24 #include <asm/cpu-features.h> !! 48 #include "proto.h" 25 #include <asm/cpu-type.h> !! 49 #include "irq_impl.h" 26 #include <asm/div64.h> << 27 #include <asm/time.h> << 28 << 29 #ifdef CONFIG_CPU_FREQ << 30 << 31 static DEFINE_PER_CPU(unsigned long, pcp_lpj_r << 32 static DEFINE_PER_CPU(unsigned long, pcp_lpj_r << 33 static unsigned long glb_lpj_ref; << 34 static unsigned long glb_lpj_ref_freq; << 35 << 36 static int cpufreq_callback(struct notifier_bl << 37 unsigned long val, << 38 { << 39 struct cpufreq_freqs *freq = data; << 40 struct cpumask *cpus = freq->policy->c << 41 unsigned long lpj; << 42 int cpu; << 43 50 44 /* !! 51 DEFINE_SPINLOCK(rtc_lock); 45 * Skip lpj numbers adjustment if the !! 52 EXPORT_SYMBOL(rtc_lock); 46 * the loops delay. (Is this possible? << 47 */ << 48 if (freq->flags & CPUFREQ_CONST_LOOPS) << 49 return NOTIFY_OK; << 50 53 51 /* Save the initial values of the lpje !! 54 unsigned long est_cycle_freq; 52 if (!glb_lpj_ref) { << 53 glb_lpj_ref = boot_cpu_data.ud << 54 glb_lpj_ref_freq = freq->old; << 55 << 56 for_each_online_cpu(cpu) { << 57 per_cpu(pcp_lpj_ref, c << 58 cpu_data[cpu]. << 59 per_cpu(pcp_lpj_ref_fr << 60 } << 61 } << 62 55 63 /* !! 56 #ifdef CONFIG_IRQ_WORK 64 * Adjust global lpj variable and per- !! 57 65 * accordance with the new CPU frequen !! 58 DEFINE_PER_CPU(u8, irq_work_pending); 66 */ !! 59 67 if ((val == CPUFREQ_PRECHANGE && freq !! 60 #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) 68 (val == CPUFREQ_POSTCHANGE && freq !! 61 #define test_irq_work_pending() __this_cpu_read(irq_work_pending) 69 loops_per_jiffy = cpufreq_scal !! 62 #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) 70 !! 63 71 !! 64 void arch_irq_work_raise(void) 72 !! 65 { 73 for_each_cpu(cpu, cpus) { !! 66 set_irq_work_pending_flag(); 74 lpj = cpufreq_scale(pe !! 67 } 75 pe !! 68 76 fr !! 69 #else /* CONFIG_IRQ_WORK */ 77 cpu_data[cpu].udelay_v !! 70 78 } !! 71 #define test_irq_work_pending() 0 >> 72 #define clear_irq_work_pending() >> 73 >> 74 #endif /* CONFIG_IRQ_WORK */ >> 75 >> 76 >> 77 static inline __u32 rpcc(void) >> 78 { >> 79 return __builtin_alpha_rpcc(); >> 80 } >> 81 >> 82 >> 83 >> 84 /* >> 85 * The RTC as a clock_event_device primitive. >> 86 */ >> 87 >> 88 static DEFINE_PER_CPU(struct clock_event_device, cpu_ce); >> 89 >> 90 irqreturn_t >> 91 rtc_timer_interrupt(int irq, void *dev) >> 92 { >> 93 int cpu = smp_processor_id(); >> 94 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); >> 95 >> 96 /* Don't run the hook for UNUSED or SHUTDOWN. */ >> 97 if (likely(clockevent_state_periodic(ce))) >> 98 ce->event_handler(ce); >> 99 >> 100 if (test_irq_work_pending()) { >> 101 clear_irq_work_pending(); >> 102 irq_work_run(); 79 } 103 } 80 104 81 return NOTIFY_OK; !! 105 return IRQ_HANDLED; 82 } 106 } 83 107 84 static struct notifier_block cpufreq_notifier !! 108 static int 85 .notifier_call = cpufreq_callback, !! 109 rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) 86 }; !! 110 { >> 111 /* This hook is for oneshot mode, which we don't support. */ >> 112 return -EINVAL; >> 113 } 87 114 88 static int __init register_cpufreq_notifier(vo !! 115 static void __init >> 116 init_rtc_clockevent(void) 89 { 117 { 90 return cpufreq_register_notifier(&cpuf !! 118 int cpu = smp_processor_id(); 91 CPUFR !! 119 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); >> 120 >> 121 *ce = (struct clock_event_device){ >> 122 .name = "rtc", >> 123 .features = CLOCK_EVT_FEAT_PERIODIC, >> 124 .rating = 100, >> 125 .cpumask = cpumask_of(cpu), >> 126 .set_next_event = rtc_ce_set_next_event, >> 127 }; >> 128 >> 129 clockevents_config_and_register(ce, CONFIG_HZ, 0, 0); >> 130 } >> 131 >> 132 >> 133 /* >> 134 * The QEMU clock as a clocksource primitive. >> 135 */ >> 136 >> 137 static u64 >> 138 qemu_cs_read(struct clocksource *cs) >> 139 { >> 140 return qemu_get_vmtime(); 92 } 141 } 93 core_initcall(register_cpufreq_notifier); << 94 142 95 #endif /* CONFIG_CPU_FREQ */ !! 143 static struct clocksource qemu_cs = { >> 144 .name = "qemu", >> 145 .rating = 400, >> 146 .read = qemu_cs_read, >> 147 .mask = CLOCKSOURCE_MASK(64), >> 148 .flags = CLOCK_SOURCE_IS_CONTINUOUS, >> 149 .max_idle_ns = LONG_MAX >> 150 }; >> 151 96 152 97 /* 153 /* 98 * forward reference !! 154 * The QEMU alarm as a clock_event_device primitive. 99 */ 155 */ 100 DEFINE_SPINLOCK(rtc_lock); << 101 EXPORT_SYMBOL(rtc_lock); << 102 156 103 static int null_perf_irq(void) !! 157 static int qemu_ce_shutdown(struct clock_event_device *ce) >> 158 { >> 159 /* The mode member of CE is updated for us in generic code. >> 160 Just make sure that the event is disabled. */ >> 161 qemu_set_alarm_abs(0); >> 162 return 0; >> 163 } >> 164 >> 165 static int >> 166 qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) 104 { 167 { >> 168 qemu_set_alarm_rel(evt); 105 return 0; 169 return 0; 106 } 170 } 107 171 108 int (*perf_irq)(void) = null_perf_irq; !! 172 static irqreturn_t >> 173 qemu_timer_interrupt(int irq, void *dev) >> 174 { >> 175 int cpu = smp_processor_id(); >> 176 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); >> 177 >> 178 ce->event_handler(ce); >> 179 return IRQ_HANDLED; >> 180 } >> 181 >> 182 static void __init >> 183 init_qemu_clockevent(void) >> 184 { >> 185 int cpu = smp_processor_id(); >> 186 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); >> 187 >> 188 *ce = (struct clock_event_device){ >> 189 .name = "qemu", >> 190 .features = CLOCK_EVT_FEAT_ONESHOT, >> 191 .rating = 400, >> 192 .cpumask = cpumask_of(cpu), >> 193 .set_state_shutdown = qemu_ce_shutdown, >> 194 .set_state_oneshot = qemu_ce_shutdown, >> 195 .tick_resume = qemu_ce_shutdown, >> 196 .set_next_event = qemu_ce_set_next_event, >> 197 }; >> 198 >> 199 clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX); >> 200 } >> 201 >> 202 >> 203 void __init >> 204 common_init_rtc(void) >> 205 { >> 206 unsigned char x, sel = 0; >> 207 >> 208 /* Reset periodic interrupt frequency. */ >> 209 #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 >> 210 x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; >> 211 /* Test includes known working values on various platforms >> 212 where 0x26 is wrong; we refuse to change those. */ >> 213 if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { >> 214 sel = RTC_REF_CLCK_32KHZ + 6; >> 215 } >> 216 #elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32 >> 217 sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ); >> 218 #else >> 219 # error "Unknown HZ from arch/alpha/Kconfig" >> 220 #endif >> 221 if (sel) { >> 222 printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n", >> 223 CONFIG_HZ, sel); >> 224 CMOS_WRITE(sel, RTC_FREQ_SELECT); >> 225 } >> 226 >> 227 /* Turn on periodic interrupts. */ >> 228 x = CMOS_READ(RTC_CONTROL); >> 229 if (!(x & RTC_PIE)) { >> 230 printk("Turning on RTC interrupts.\n"); >> 231 x |= RTC_PIE; >> 232 x &= ~(RTC_AIE | RTC_UIE); >> 233 CMOS_WRITE(x, RTC_CONTROL); >> 234 } >> 235 (void) CMOS_READ(RTC_INTR_FLAGS); >> 236 >> 237 outb(0x36, 0x43); /* pit counter 0: system timer */ >> 238 outb(0x00, 0x40); >> 239 outb(0x00, 0x40); 109 240 110 EXPORT_SYMBOL(perf_irq); !! 241 outb(0xb6, 0x43); /* pit counter 2: speaker */ >> 242 outb(0x31, 0x42); >> 243 outb(0x13, 0x42); >> 244 >> 245 init_rtc_irq(NULL); >> 246 } 111 247 >> 248 >> 249 #ifndef CONFIG_ALPHA_WTINT 112 /* 250 /* 113 * time_init() - it does the following things. !! 251 * The RPCC as a clocksource primitive. 114 * 252 * 115 * 1) plat_time_init() - !! 253 * While we have free-running timecounters running on all CPUs, and we make 116 * a) (optional) set up RTC routines, !! 254 * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter 117 * b) (optional) calibrate and set the mi !! 255 * with the wall clock, that initialization isn't kept up-to-date across 118 * (only needed if you intended to us !! 256 * different time counters in SMP mode. Therefore we can only use this 119 * source) !! 257 * method when there's only one CPU enabled. 120 * 2) calculate a couple of cached variables f !! 258 * 121 */ !! 259 * When using the WTINT PALcall, the RPCC may shift to a lower frequency, 122 !! 260 * or stop altogether, while waiting for the interrupt. Therefore we cannot 123 unsigned int mips_hpt_frequency; !! 261 * use this method when WTINT is in use. 124 EXPORT_SYMBOL_GPL(mips_hpt_frequency); !! 262 */ 125 << 126 static __init int cpu_has_mfc0_count_bug(void) << 127 { << 128 switch (current_cpu_type()) { << 129 case CPU_R4000PC: << 130 case CPU_R4000SC: << 131 case CPU_R4000MC: << 132 /* << 133 * V3.0 is documented as suffe << 134 * Afaik this is the last vers << 135 * were marketed as R4400. << 136 */ << 137 return 1; << 138 << 139 case CPU_R4400PC: << 140 case CPU_R4400SC: << 141 case CPU_R4400MC: << 142 /* << 143 * The published errata for th << 144 * has the mfc0 from count bug << 145 * produced. << 146 */ << 147 return 1; << 148 } << 149 263 150 return 0; !! 264 static u64 read_rpcc(struct clocksource *cs) >> 265 { >> 266 return rpcc(); >> 267 } >> 268 >> 269 static struct clocksource clocksource_rpcc = { >> 270 .name = "rpcc", >> 271 .rating = 300, >> 272 .read = read_rpcc, >> 273 .mask = CLOCKSOURCE_MASK(32), >> 274 .flags = CLOCK_SOURCE_IS_CONTINUOUS >> 275 }; >> 276 #endif /* ALPHA_WTINT */ >> 277 >> 278 >> 279 /* Validate a computed cycle counter result against the known bounds for >> 280 the given processor core. There's too much brokenness in the way of >> 281 timing hardware for any one method to work everywhere. :-( >> 282 >> 283 Return 0 if the result cannot be trusted, otherwise return the argument. */ >> 284 >> 285 static unsigned long __init >> 286 validate_cc_value(unsigned long cc) >> 287 { >> 288 static struct bounds { >> 289 unsigned int min, max; >> 290 } cpu_hz[] __initdata = { >> 291 [EV3_CPU] = { 50000000, 200000000 }, /* guess */ >> 292 [EV4_CPU] = { 100000000, 300000000 }, >> 293 [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ >> 294 [EV45_CPU] = { 200000000, 300000000 }, >> 295 [EV5_CPU] = { 250000000, 433000000 }, >> 296 [EV56_CPU] = { 333000000, 667000000 }, >> 297 [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ >> 298 [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ >> 299 [EV6_CPU] = { 466000000, 600000000 }, >> 300 [EV67_CPU] = { 600000000, 750000000 }, >> 301 [EV68AL_CPU] = { 750000000, 940000000 }, >> 302 [EV68CB_CPU] = { 1000000000, 1333333333 }, >> 303 /* None of the following are shipping as of 2001-11-01. */ >> 304 [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ >> 305 [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ >> 306 [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ >> 307 [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ >> 308 }; >> 309 >> 310 /* Allow for some drift in the crystal. 10MHz is more than enough. */ >> 311 const unsigned int deviation = 10000000; >> 312 >> 313 struct percpu_struct *cpu; >> 314 unsigned int index; >> 315 >> 316 cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); >> 317 index = cpu->type & 0xffffffff; >> 318 >> 319 /* If index out of bounds, no way to validate. */ >> 320 if (index >= ARRAY_SIZE(cpu_hz)) >> 321 return cc; >> 322 >> 323 /* If index contains no data, no way to validate. */ >> 324 if (cpu_hz[index].max == 0) >> 325 return cc; >> 326 >> 327 if (cc < cpu_hz[index].min - deviation >> 328 || cc > cpu_hz[index].max + deviation) >> 329 return 0; >> 330 >> 331 return cc; 151 } 332 } 152 333 153 void __init time_init(void) !! 334 >> 335 /* >> 336 * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from >> 337 * arch/i386/time.c. >> 338 */ >> 339 >> 340 #define CALIBRATE_LATCH 0xffff >> 341 #define TIMEOUT_COUNT 0x100000 >> 342 >> 343 static unsigned long __init >> 344 calibrate_cc_with_pit(void) 154 { 345 { 155 plat_time_init(); !! 346 int cc, count = 0; >> 347 >> 348 /* Set the Gate high, disable speaker */ >> 349 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 156 350 157 /* 351 /* 158 * The use of the R4k timer as a clock !! 352 * Now let's take care of CTC channel 2 159 * if reading the Count register might !! 353 * 160 * interrupt, then we don't use the ti !! 354 * Set the Gate high, program CTC channel 2 for mode 0, 161 * We may still use the timer as a clo !! 355 * (interrupt on terminal count mode), binary count, 162 * timer interrupt isn't reliable; the !! 356 * load 5 * LATCH count, (LSB and MSB) to begin countdown. 163 * matter then, because we don't use t << 164 */ 357 */ 165 if (mips_clockevent_init() != 0 || !cp !! 358 outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ 166 init_mips_clocksource(); !! 359 outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ >> 360 outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ >> 361 >> 362 cc = rpcc(); >> 363 do { >> 364 count++; >> 365 } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); >> 366 cc = rpcc() - cc; >> 367 >> 368 /* Error: ECTCNEVERSET or ECPUTOOFAST. */ >> 369 if (count <= 1 || count == TIMEOUT_COUNT) >> 370 return 0; >> 371 >> 372 return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); >> 373 } >> 374 >> 375 /* The Linux interpretation of the CMOS clock register contents: >> 376 When the Update-In-Progress (UIP) flag goes from 1 to 0, the >> 377 RTC registers show the second which has precisely just started. >> 378 Let's hope other operating systems interpret the RTC the same way. */ >> 379 >> 380 static unsigned long __init >> 381 rpcc_after_update_in_progress(void) >> 382 { >> 383 do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); >> 384 do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); >> 385 >> 386 return rpcc(); >> 387 } >> 388 >> 389 void __init >> 390 time_init(void) >> 391 { >> 392 unsigned int cc1, cc2; >> 393 unsigned long cycle_freq, tolerance; >> 394 long diff; >> 395 >> 396 if (alpha_using_qemu) { >> 397 clocksource_register_hz(&qemu_cs, NSEC_PER_SEC); >> 398 init_qemu_clockevent(); >> 399 init_rtc_irq(qemu_timer_interrupt); >> 400 return; >> 401 } >> 402 >> 403 /* Calibrate CPU clock -- attempt #1. */ >> 404 if (!est_cycle_freq) >> 405 est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); >> 406 >> 407 cc1 = rpcc(); >> 408 >> 409 /* Calibrate CPU clock -- attempt #2. */ >> 410 if (!est_cycle_freq) { >> 411 cc1 = rpcc_after_update_in_progress(); >> 412 cc2 = rpcc_after_update_in_progress(); >> 413 est_cycle_freq = validate_cc_value(cc2 - cc1); >> 414 cc1 = cc2; >> 415 } >> 416 >> 417 cycle_freq = hwrpb->cycle_freq; >> 418 if (est_cycle_freq) { >> 419 /* If the given value is within 250 PPM of what we calculated, >> 420 accept it. Otherwise, use what we found. */ >> 421 tolerance = cycle_freq / 4000; >> 422 diff = cycle_freq - est_cycle_freq; >> 423 if (diff < 0) >> 424 diff = -diff; >> 425 if ((unsigned long)diff > tolerance) { >> 426 cycle_freq = est_cycle_freq; >> 427 printk("HWRPB cycle frequency bogus. " >> 428 "Estimated %lu Hz\n", cycle_freq); >> 429 } else { >> 430 est_cycle_freq = 0; >> 431 } >> 432 } else if (! validate_cc_value (cycle_freq)) { >> 433 printk("HWRPB cycle frequency bogus, " >> 434 "and unable to estimate a proper value!\n"); >> 435 } >> 436 >> 437 /* See above for restrictions on using clocksource_rpcc. */ >> 438 #ifndef CONFIG_ALPHA_WTINT >> 439 if (hwrpb->nr_processors == 1) >> 440 clocksource_register_hz(&clocksource_rpcc, cycle_freq); >> 441 #endif >> 442 >> 443 /* Startup the timer source. */ >> 444 alpha_mv.init_rtc(); >> 445 init_rtc_clockevent(); >> 446 } >> 447 >> 448 /* Initialize the clock_event_device for secondary cpus. */ >> 449 #ifdef CONFIG_SMP >> 450 void __init >> 451 init_clockevent(void) >> 452 { >> 453 if (alpha_using_qemu) >> 454 init_qemu_clockevent(); >> 455 else >> 456 init_rtc_clockevent(); 167 } 457 } >> 458 #endif 168 459
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.