kern_clock.c revision 2858
11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * (c) UNIX System Laboratories, Inc. 51541Srgrimes * All or some portions of this file are derived from material licensed 61541Srgrimes * to the University of California by American Telephone and Telegraph 71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 81541Srgrimes * the permission of UNIX System Laboratories, Inc. 91541Srgrimes * 101541Srgrimes * Redistribution and use in source and binary forms, with or without 111541Srgrimes * modification, are permitted provided that the following conditions 121541Srgrimes * are met: 131541Srgrimes * 1. Redistributions of source code must retain the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer. 151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer in the 171541Srgrimes * documentation and/or other materials provided with the distribution. 181541Srgrimes * 3. All advertising materials mentioning features or use of this software 191541Srgrimes * must display the following acknowledgement: 201541Srgrimes * This product includes software developed by the University of 211541Srgrimes * California, Berkeley and its contributors. 221541Srgrimes * 4. Neither the name of the University nor the names of its contributors 231541Srgrimes * may be used to endorse or promote products derived from this software 241541Srgrimes * without specific prior written permission. 251541Srgrimes * 261541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 271541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 281541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 291541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 301541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 311541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 321541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 331541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 341541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 351541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 361541Srgrimes * SUCH DAMAGE. 371541Srgrimes * 381541Srgrimes * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 392858Swollman * $Id: kern_clock.c,v 1.5 1994/08/27 16:14:26 davidg Exp $ 401541Srgrimes */ 411541Srgrimes 422858Swollman/* Portions of this software are covered by the following: */ 432858Swollman/****************************************************************************** 442858Swollman * * 452858Swollman * Copyright (c) David L. Mills 1993, 1994 * 462858Swollman * * 472858Swollman * Permission to use, copy, modify, and distribute this software and its * 482858Swollman * documentation for any purpose and without fee is hereby granted, provided * 492858Swollman * that the above copyright notice appears in all copies and that both the * 502858Swollman * copyright notice and this permission notice appear in supporting * 512858Swollman * documentation, and that the name University of Delaware not be used in * 522858Swollman * advertising or publicity pertaining to distribution of the software * 532858Swollman * without specific, written prior permission. The University of Delaware * 542858Swollman * makes no representations about the suitability this software for any * 552858Swollman * purpose. It is provided "as is" without express or implied warranty. * 562858Swollman * * 572858Swollman *****************************************************************************/ 582858Swollman 591541Srgrimes#include <sys/param.h> 601541Srgrimes#include <sys/systm.h> 611541Srgrimes#include <sys/dkstat.h> 621541Srgrimes#include <sys/callout.h> 631541Srgrimes#include <sys/kernel.h> 641541Srgrimes#include <sys/proc.h> 651541Srgrimes#include <sys/resourcevar.h> 662858Swollman#include <sys/timex.h> 672320Sdg#include <vm/vm.h> 681541Srgrimes 691541Srgrimes#include <machine/cpu.h> 702858Swollman#include <machine/clock.h> 711541Srgrimes 721541Srgrimes#ifdef GPROF 731541Srgrimes#include <sys/gmon.h> 741541Srgrimes#endif 751541Srgrimes 762112Swollman/* Does anybody else really care about these? */ 772112Swollmanstruct callout *callfree, *callout, calltodo; 782112Swollmanint ncallout; 792112Swollman 802112Swollman/* Some of these don't belong here, but it's easiest to concentrate them. */ 812112Swollmanlong cp_time[CPUSTATES]; 822112Swollmanlong dk_seek[DK_NDRIVE]; 832112Swollmanlong dk_time[DK_NDRIVE]; 842112Swollmanlong dk_wds[DK_NDRIVE]; 852112Swollmanlong dk_wpms[DK_NDRIVE]; 862112Swollmanlong dk_xfer[DK_NDRIVE]; 872112Swollman 882112Swollmanint dk_busy; 892112Swollmanint dk_ndrive = DK_NDRIVE; 902112Swollman 912112Swollmanlong tk_cancc; 922112Swollmanlong tk_nin; 932112Swollmanlong tk_nout; 942112Swollmanlong tk_rawcc; 952112Swollman 961541Srgrimes/* 971541Srgrimes * Clock handling routines. 981541Srgrimes * 991541Srgrimes * This code is written to operate with two timers that run independently of 1001541Srgrimes * each other. The main clock, running hz times per second, is used to keep 1011541Srgrimes * track of real time. The second timer handles kernel and user profiling, 1021541Srgrimes * and does resource use estimation. If the second timer is programmable, 1031541Srgrimes * it is randomized to avoid aliasing between the two clocks. For example, 1041541Srgrimes * the randomization prevents an adversary from always giving up the cpu 1051541Srgrimes * just before its quantum expires. Otherwise, it would never accumulate 1061541Srgrimes * cpu ticks. The mean frequency of the second timer is stathz. 1071541Srgrimes * 1081541Srgrimes * If no second timer exists, stathz will be zero; in this case we drive 1091541Srgrimes * profiling and statistics off the main clock. This WILL NOT be accurate; 1101541Srgrimes * do not do it unless absolutely necessary. 1111541Srgrimes * 1121541Srgrimes * The statistics clock may (or may not) be run at a higher rate while 1131541Srgrimes * profiling. This profile clock runs at profhz. We require that profhz 1141541Srgrimes * be an integral multiple of stathz. 1151541Srgrimes * 1161541Srgrimes * If the statistics clock is running fast, it must be divided by the ratio 1171541Srgrimes * profhz/stathz for statistics. (For profiling, every tick counts.) 1181541Srgrimes */ 1191541Srgrimes 1201541Srgrimes/* 1211541Srgrimes * TODO: 1221541Srgrimes * allocate more timeout table slots when table overflows. 1231541Srgrimes */ 1241541Srgrimes 1251541Srgrimes/* 1261541Srgrimes * Bump a timeval by a small number of usec's. 1271541Srgrimes */ 1281541Srgrimes#define BUMPTIME(t, usec) { \ 1291541Srgrimes register volatile struct timeval *tp = (t); \ 1301541Srgrimes register long us; \ 1311541Srgrimes \ 1321541Srgrimes tp->tv_usec = us = tp->tv_usec + (usec); \ 1331541Srgrimes if (us >= 1000000) { \ 1341541Srgrimes tp->tv_usec = us - 1000000; \ 1351541Srgrimes tp->tv_sec++; \ 1361541Srgrimes } \ 1371541Srgrimes} 1381541Srgrimes 1391541Srgrimesint stathz; 1401541Srgrimesint profhz; 1411541Srgrimesint profprocs; 1421541Srgrimesint ticks; 1431541Srgrimesstatic int psdiv, pscnt; /* prof => stat divider */ 1441541Srgrimesint psratio; /* ratio: prof / stat */ 1451541Srgrimes 1461541Srgrimesvolatile struct timeval time; 1471541Srgrimesvolatile struct timeval mono_time; 1481541Srgrimes 1491541Srgrimes/* 1502858Swollman * Phase-lock loop (PLL) definitions 1512858Swollman * 1522858Swollman * The following variables are read and set by the ntp_adjtime() system 1532858Swollman * call. 1542858Swollman * 1552858Swollman * time_state shows the state of the system clock, with values defined 1562858Swollman * in the timex.h header file. 1572858Swollman * 1582858Swollman * time_status shows the status of the system clock, with bits defined 1592858Swollman * in the timex.h header file. 1602858Swollman * 1612858Swollman * time_offset is used by the PLL to adjust the system time in small 1622858Swollman * increments. 1632858Swollman * 1642858Swollman * time_constant determines the bandwidth or "stiffness" of the PLL. 1652858Swollman * 1662858Swollman * time_tolerance determines maximum frequency error or tolerance of the 1672858Swollman * CPU clock oscillator and is a property of the architecture; however, 1682858Swollman * in principle it could change as result of the presence of external 1692858Swollman * discipline signals, for instance. 1702858Swollman * 1712858Swollman * time_precision is usually equal to the kernel tick variable; however, 1722858Swollman * in cases where a precision clock counter or external clock is 1732858Swollman * available, the resolution can be much less than this and depend on 1742858Swollman * whether the external clock is working or not. 1752858Swollman * 1762858Swollman * time_maxerror is initialized by a ntp_adjtime() call and increased by 1772858Swollman * the kernel once each second to reflect the maximum error 1782858Swollman * bound growth. 1792858Swollman * 1802858Swollman * time_esterror is set and read by the ntp_adjtime() call, but 1812858Swollman * otherwise not used by the kernel. 1822858Swollman */ 1832858Swollmanint time_status = STA_UNSYNC; /* clock status bits */ 1842858Swollmanint time_state = TIME_OK; /* clock state */ 1852858Swollmanlong time_offset = 0; /* time offset (us) */ 1862858Swollmanlong time_constant = 0; /* pll time constant */ 1872858Swollmanlong time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 1882858Swollmanlong time_precision = 1; /* clock precision (us) */ 1892858Swollmanlong time_maxerror = MAXPHASE; /* maximum error (us) */ 1902858Swollmanlong time_esterror = MAXPHASE; /* estimated error (us) */ 1912858Swollman 1922858Swollman/* 1932858Swollman * The following variables establish the state of the PLL and the 1942858Swollman * residual time and frequency offset of the local clock. The scale 1952858Swollman * factors are defined in the timex.h header file. 1962858Swollman * 1972858Swollman * time_phase and time_freq are the phase increment and the frequency 1982858Swollman * increment, respectively, of the kernel time variable at each tick of 1992858Swollman * the clock. 2002858Swollman * 2012858Swollman * time_freq is set via ntp_adjtime() from a value stored in a file when 2022858Swollman * the synchronization daemon is first started. Its value is retrieved 2032858Swollman * via ntp_adjtime() and written to the file about once per hour by the 2042858Swollman * daemon. 2052858Swollman * 2062858Swollman * time_adj is the adjustment added to the value of tick at each timer 2072858Swollman * interrupt and is recomputed at each timer interrupt. 2082858Swollman * 2092858Swollman * time_reftime is the second's portion of the system time on the last 2102858Swollman * call to ntp_adjtime(). It is used to adjust the time_freq variable 2112858Swollman * and to increase the time_maxerror as the time since last update 2122858Swollman * increases. 2132858Swollman */ 2142858Swollmanlong time_phase = 0; /* phase offset (scaled us) */ 2152858Swollmanlong time_freq = 0; /* frequency offset (scaled ppm) */ 2162858Swollmanlong time_adj = 0; /* tick adjust (scaled 1 / hz) */ 2172858Swollmanlong time_reftime = 0; /* time at last adjustment (s) */ 2182858Swollman 2192858Swollman#ifdef PPS_SYNC 2202858Swollman/* 2212858Swollman * The following variables are used only if the if the kernel PPS 2222858Swollman * discipline code is configured (PPS_SYNC). The scale factors are 2232858Swollman * defined in the timex.h header file. 2242858Swollman * 2252858Swollman * pps_time contains the time at each calibration interval, as read by 2262858Swollman * microtime(). 2272858Swollman * 2282858Swollman * pps_offset is the time offset produced by the time median filter 2292858Swollman * pps_tf[], while pps_jitter is the dispersion measured by this 2302858Swollman * filter. 2312858Swollman * 2322858Swollman * pps_freq is the frequency offset produced by the frequency median 2332858Swollman * filter pps_ff[], while pps_stabil is the dispersion measured by 2342858Swollman * this filter. 2352858Swollman * 2362858Swollman * pps_usec is latched from a high resolution counter or external clock 2372858Swollman * at pps_time. Here we want the hardware counter contents only, not the 2382858Swollman * contents plus the time_tv.usec as usual. 2392858Swollman * 2402858Swollman * pps_valid counts the number of seconds since the last PPS update. It 2412858Swollman * is used as a watchdog timer to disable the PPS discipline should the 2422858Swollman * PPS signal be lost. 2432858Swollman * 2442858Swollman * pps_glitch counts the number of seconds since the beginning of an 2452858Swollman * offset burst more than tick/2 from current nominal offset. It is used 2462858Swollman * mainly to suppress error bursts due to priority conflicts between the 2472858Swollman * PPS interrupt and timer interrupt. 2482858Swollman * 2492858Swollman * pps_count counts the seconds of the calibration interval, the 2502858Swollman * duration of which is pps_shift in powers of two. 2512858Swollman * 2522858Swollman * pps_intcnt counts the calibration intervals for use in the interval- 2532858Swollman * adaptation algorithm. It's just too complicated for words. 2542858Swollman */ 2552858Swollmanstruct timeval pps_time; /* kernel time at last interval */ 2562858Swollmanlong pps_offset = 0; /* pps time offset (us) */ 2572858Swollmanlong pps_jitter = MAXTIME; /* pps time dispersion (jitter) (us) */ 2582858Swollmanlong pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 2592858Swollmanlong pps_freq = 0; /* frequency offset (scaled ppm) */ 2602858Swollmanlong pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 2612858Swollmanlong pps_ff[] = {0, 0, 0}; /* frequency offset median filter */ 2622858Swollmanlong pps_usec = 0; /* microsec counter at last interval */ 2632858Swollmanlong pps_valid = PPS_VALID; /* pps signal watchdog counter */ 2642858Swollmanint pps_glitch = 0; /* pps signal glitch counter */ 2652858Swollmanint pps_count = 0; /* calibration interval counter (s) */ 2662858Swollmanint pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 2672858Swollmanint pps_intcnt = 0; /* intervals at current duration */ 2682858Swollman 2692858Swollman/* 2702858Swollman * PPS signal quality monitors 2712858Swollman * 2722858Swollman * pps_jitcnt counts the seconds that have been discarded because the 2732858Swollman * jitter measured by the time median filter exceeds the limit MAXTIME 2742858Swollman * (100 us). 2752858Swollman * 2762858Swollman * pps_calcnt counts the frequency calibration intervals, which are 2772858Swollman * variable from 4 s to 256 s. 2782858Swollman * 2792858Swollman * pps_errcnt counts the calibration intervals which have been discarded 2802858Swollman * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 2812858Swollman * calibration interval jitter exceeds two ticks. 2822858Swollman * 2832858Swollman * pps_stbcnt counts the calibration intervals that have been discarded 2842858Swollman * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 2852858Swollman */ 2862858Swollmanlong pps_jitcnt = 0; /* jitter limit exceeded */ 2872858Swollmanlong pps_calcnt = 0; /* calibration intervals */ 2882858Swollmanlong pps_errcnt = 0; /* calibration errors */ 2892858Swollmanlong pps_stbcnt = 0; /* stability limit exceeded */ 2902858Swollman#endif /* PPS_SYNC */ 2912858Swollman 2922858Swollman/* XXX none of this stuff works under FreeBSD */ 2932858Swollman#ifdef EXT_CLOCK 2942858Swollman/* 2952858Swollman * External clock definitions 2962858Swollman * 2972858Swollman * The following definitions and declarations are used only if an 2982858Swollman * external clock (HIGHBALL or TPRO) is configured on the system. 2992858Swollman */ 3002858Swollman#define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */ 3012858Swollman 3022858Swollman/* 3032858Swollman * The clock_count variable is set to CLOCK_INTERVAL at each PPS 3042858Swollman * interrupt and decremented once each second. 3052858Swollman */ 3062858Swollmanint clock_count = 0; /* CPU clock counter */ 3072858Swollman 3082858Swollman#ifdef HIGHBALL 3092858Swollman/* 3102858Swollman * The clock_offset and clock_cpu variables are used by the HIGHBALL 3112858Swollman * interface. The clock_offset variable defines the offset between 3122858Swollman * system time and the HIGBALL counters. The clock_cpu variable contains 3132858Swollman * the offset between the system clock and the HIGHBALL clock for use in 3142858Swollman * disciplining the kernel time variable. 3152858Swollman */ 3162858Swollmanextern struct timeval clock_offset; /* Highball clock offset */ 3172858Swollmanlong clock_cpu = 0; /* CPU clock adjust */ 3182858Swollman#endif /* HIGHBALL */ 3192858Swollman#endif /* EXT_CLOCK */ 3202858Swollman 3212858Swollman/* 3222858Swollman * hardupdate() - local clock update 3232858Swollman * 3242858Swollman * This routine is called by ntp_adjtime() to update the local clock 3252858Swollman * phase and frequency. This is used to implement an adaptive-parameter, 3262858Swollman * first-order, type-II phase-lock loop. The code computes new time and 3272858Swollman * frequency offsets each time it is called. The hardclock() routine 3282858Swollman * amortizes these offsets at each tick interrupt. If the kernel PPS 3292858Swollman * discipline code is configured (PPS_SYNC), the PPS signal itself 3302858Swollman * determines the new time offset, instead of the calling argument. 3312858Swollman * Presumably, calls to ntp_adjtime() occur only when the caller 3322858Swollman * believes the local clock is valid within some bound (+-128 ms with 3332858Swollman * NTP). If the caller's time is far different than the PPS time, an 3342858Swollman * argument will ensue, and it's not clear who will lose. 3352858Swollman * 3362858Swollman * For default SHIFT_UPDATE = 12, the offset is limited to +-512 ms, the 3372858Swollman * maximum interval between updates is 4096 s and the maximum frequency 3382858Swollman * offset is +-31.25 ms/s. 3392858Swollman * 3402858Swollman * Note: splclock() is in effect. 3412858Swollman */ 3422858Swollmanvoid 3432858Swollmanhardupdate(offset) 3442858Swollman long offset; 3452858Swollman{ 3462858Swollman long ltemp, mtemp; 3472858Swollman 3482858Swollman if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 3492858Swollman return; 3502858Swollman ltemp = offset; 3512858Swollman#ifdef PPS_SYNC 3522858Swollman if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) 3532858Swollman ltemp = pps_offset; 3542858Swollman#endif /* PPS_SYNC */ 3552858Swollman if (ltemp > MAXPHASE) 3562858Swollman time_offset = MAXPHASE << SHIFT_UPDATE; 3572858Swollman else if (ltemp < -MAXPHASE) 3582858Swollman time_offset = -(MAXPHASE << SHIFT_UPDATE); 3592858Swollman else 3602858Swollman time_offset = ltemp << SHIFT_UPDATE; 3612858Swollman mtemp = time.tv_sec - time_reftime; 3622858Swollman time_reftime = time.tv_sec; 3632858Swollman if (mtemp > MAXSEC) 3642858Swollman mtemp = 0; 3652858Swollman 3662858Swollman /* ugly multiply should be replaced */ 3672858Swollman if (ltemp < 0) 3682858Swollman time_freq -= (-ltemp * mtemp) >> (time_constant + 3692858Swollman time_constant + SHIFT_KF - SHIFT_USEC); 3702858Swollman else 3712858Swollman time_freq += (ltemp * mtemp) >> (time_constant + 3722858Swollman time_constant + SHIFT_KF - SHIFT_USEC); 3732858Swollman if (time_freq > time_tolerance) 3742858Swollman time_freq = time_tolerance; 3752858Swollman else if (time_freq < -time_tolerance) 3762858Swollman time_freq = -time_tolerance; 3772858Swollman} 3782858Swollman 3792858Swollman 3802858Swollman 3812858Swollman/* 3821541Srgrimes * Initialize clock frequencies and start both clocks running. 3831541Srgrimes */ 3841541Srgrimesvoid 3851541Srgrimesinitclocks() 3861541Srgrimes{ 3871541Srgrimes register int i; 3881541Srgrimes 3891541Srgrimes /* 3901541Srgrimes * Set divisors to 1 (normal case) and let the machine-specific 3911541Srgrimes * code do its bit. 3921541Srgrimes */ 3931541Srgrimes psdiv = pscnt = 1; 3941541Srgrimes cpu_initclocks(); 3951541Srgrimes 3961541Srgrimes /* 3971541Srgrimes * Compute profhz/stathz, and fix profhz if needed. 3981541Srgrimes */ 3991541Srgrimes i = stathz ? stathz : hz; 4001541Srgrimes if (profhz == 0) 4011541Srgrimes profhz = i; 4021541Srgrimes psratio = profhz / i; 4031541Srgrimes} 4041541Srgrimes 4051541Srgrimes/* 4061541Srgrimes * The real-time timer, interrupting hz times per second. 4071541Srgrimes */ 4081541Srgrimesvoid 4091541Srgrimeshardclock(frame) 4101541Srgrimes register struct clockframe *frame; 4111541Srgrimes{ 4121541Srgrimes register struct callout *p1; 4131541Srgrimes register struct proc *p; 4141541Srgrimes register int delta, needsoft; 4151541Srgrimes extern int tickdelta; 4161541Srgrimes extern long timedelta; 4171541Srgrimes 4181541Srgrimes /* 4191541Srgrimes * Update real-time timeout queue. 4201541Srgrimes * At front of queue are some number of events which are ``due''. 4211541Srgrimes * The time to these is <= 0 and if negative represents the 4221541Srgrimes * number of ticks which have passed since it was supposed to happen. 4231541Srgrimes * The rest of the q elements (times > 0) are events yet to happen, 4241541Srgrimes * where the time for each is given as a delta from the previous. 4251541Srgrimes * Decrementing just the first of these serves to decrement the time 4261541Srgrimes * to all events. 4271541Srgrimes */ 4281541Srgrimes needsoft = 0; 4291541Srgrimes for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) { 4301541Srgrimes if (--p1->c_time > 0) 4311541Srgrimes break; 4321541Srgrimes needsoft = 1; 4331541Srgrimes if (p1->c_time == 0) 4341541Srgrimes break; 4351541Srgrimes } 4361541Srgrimes 4371541Srgrimes p = curproc; 4381541Srgrimes if (p) { 4391541Srgrimes register struct pstats *pstats; 4401541Srgrimes 4411541Srgrimes /* 4421541Srgrimes * Run current process's virtual and profile time, as needed. 4431541Srgrimes */ 4441541Srgrimes pstats = p->p_stats; 4451541Srgrimes if (CLKF_USERMODE(frame) && 4461541Srgrimes timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 4471541Srgrimes itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 4481541Srgrimes psignal(p, SIGVTALRM); 4491541Srgrimes if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) && 4501541Srgrimes itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 4511541Srgrimes psignal(p, SIGPROF); 4521541Srgrimes } 4531541Srgrimes 4541541Srgrimes /* 4551541Srgrimes * If no separate statistics clock is available, run it from here. 4561541Srgrimes */ 4571541Srgrimes if (stathz == 0) 4581541Srgrimes statclock(frame); 4591541Srgrimes 4601541Srgrimes /* 4612858Swollman * Increment the time-of-day. 4621541Srgrimes */ 4631541Srgrimes ticks++; 4642858Swollman { 4652858Swollman int time_update; 4662858Swollman struct timeval newtime = time; 4672858Swollman long ltemp; 4682858Swollman 4692858Swollman if (timedelta == 0) { 4702858Swollman time_update = tick; 4712858Swollman } else { 4722858Swollman if (timedelta < 0) { 4732858Swollman time_update = tick - tickdelta; 4742858Swollman timedelta += tickdelta; 4752858Swollman } else { 4762858Swollman time_update = tick + tickdelta; 4772858Swollman timedelta -= tickdelta; 4782858Swollman } 4792858Swollman } 4802858Swollman BUMPTIME(&mono_time, time_update); 4812858Swollman 4822858Swollman /* 4832858Swollman * Compute the phase adjustment. If the low-order bits 4842858Swollman * (time_phase) of the update overflow, bump the high-order bits 4852858Swollman * (time_update). 4862858Swollman */ 4872858Swollman time_phase += time_adj; 4882858Swollman if (time_phase <= -FINEUSEC) { 4892858Swollman ltemp = -time_phase >> SHIFT_SCALE; 4902858Swollman time_phase += ltemp << SHIFT_SCALE; 4912858Swollman time_update -= ltemp; 4922858Swollman } 4932858Swollman else if (time_phase >= FINEUSEC) { 4942858Swollman ltemp = time_phase >> SHIFT_SCALE; 4952858Swollman time_phase -= ltemp << SHIFT_SCALE; 4962858Swollman time_update += ltemp; 4972858Swollman } 4982858Swollman 4992858Swollman newtime.tv_usec += time_update; 5002858Swollman /* 5012858Swollman * On rollover of the second the phase adjustment to be used for 5022858Swollman * the next second is calculated. Also, the maximum error is 5032858Swollman * increased by the tolerance. If the PPS frequency discipline 5042858Swollman * code is present, the phase is increased to compensate for the 5052858Swollman * CPU clock oscillator frequency error. 5062858Swollman * 5072858Swollman * With SHIFT_SCALE = 23, the maximum frequency adjustment is 5082858Swollman * +-256 us per tick, or 25.6 ms/s at a clock frequency of 100 5092858Swollman * Hz. The time contribution is shifted right a minimum of two 5102858Swollman * bits, while the frequency contribution is a right shift. 5112858Swollman * Thus, overflow is prevented if the frequency contribution is 5122858Swollman * limited to half the maximum or 15.625 ms/s. 5132858Swollman */ 5142858Swollman if (newtime.tv_usec >= 1000000) { 5152858Swollman newtime.tv_usec -= 1000000; 5162858Swollman newtime.tv_sec++; 5172858Swollman time_maxerror += time_tolerance >> SHIFT_USEC; 5182858Swollman if (time_offset < 0) { 5192858Swollman ltemp = -time_offset >> 5202858Swollman (SHIFT_KG + time_constant); 5212858Swollman time_offset += ltemp; 5222858Swollman time_adj = -ltemp << 5232858Swollman (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); 5242858Swollman } else { 5252858Swollman ltemp = time_offset >> 5262858Swollman (SHIFT_KG + time_constant); 5272858Swollman time_offset -= ltemp; 5282858Swollman time_adj = ltemp << 5292858Swollman (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); 5302858Swollman } 5312858Swollman#ifdef PPS_SYNC 5322858Swollman /* 5332858Swollman * Gnaw on the watchdog counter and update the frequency 5342858Swollman * computed by the pll and the PPS signal. 5352858Swollman */ 5362858Swollman pps_valid++; 5372858Swollman if (pps_valid == PPS_VALID) { 5382858Swollman pps_jitter = MAXTIME; 5392858Swollman pps_stabil = MAXFREQ; 5402858Swollman time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 5412858Swollman STA_PPSWANDER | STA_PPSERROR); 5422858Swollman } 5432858Swollman ltemp = time_freq + pps_freq; 5442858Swollman#else 5452858Swollman ltemp = time_freq; 5462858Swollman#endif /* PPS_SYNC */ 5472858Swollman if (ltemp < 0) 5482858Swollman time_adj -= -ltemp >> 5492858Swollman (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); 5502858Swollman else 5512858Swollman time_adj += ltemp >> 5522858Swollman (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); 5532858Swollman 5542858Swollman /* 5552858Swollman * When the CPU clock oscillator frequency is not a 5562858Swollman * power of two in Hz, the SHIFT_HZ is only an 5572858Swollman * approximate scale factor. In the SunOS kernel, this 5582858Swollman * results in a PLL gain factor of 1/1.28 = 0.78 what it 5592858Swollman * should be. In the following code the overall gain is 5602858Swollman * increased by a factor of 1.25, which results in a 5612858Swollman * residual error less than 3 percent. 5622858Swollman */ 5632858Swollman /* Same thing applies for FreeBSD --GAW */ 5642858Swollman if (hz == 100) { 5652858Swollman if (time_adj < 0) 5662858Swollman time_adj -= -time_adj >> 2; 5672858Swollman else 5682858Swollman time_adj += time_adj >> 2; 5692858Swollman } 5702858Swollman 5712858Swollman /* XXX - this is really bogus, but can't be fixed until 5722858Swollman xntpd's idea of the system clock is fixed to know how 5732858Swollman the user wants leap seconds handled; in the mean time, 5742858Swollman we assume that users of NTP are running without proper 5752858Swollman leap second support (this is now the default anyway) */ 5762858Swollman /* 5772858Swollman * Leap second processing. If in leap-insert state at 5782858Swollman * the end of the day, the system clock is set back one 5792858Swollman * second; if in leap-delete state, the system clock is 5802858Swollman * set ahead one second. The microtime() routine or 5812858Swollman * external clock driver will insure that reported time 5822858Swollman * is always monotonic. The ugly divides should be 5832858Swollman * replaced. 5842858Swollman */ 5852858Swollman switch (time_state) { 5862858Swollman 5872858Swollman case TIME_OK: 5882858Swollman if (time_status & STA_INS) 5892858Swollman time_state = TIME_INS; 5902858Swollman else if (time_status & STA_DEL) 5912858Swollman time_state = TIME_DEL; 5922858Swollman break; 5932858Swollman 5942858Swollman case TIME_INS: 5952858Swollman if (newtime.tv_sec % 86400 == 0) { 5962858Swollman newtime.tv_sec--; 5972858Swollman time_state = TIME_OOP; 5982858Swollman } 5992858Swollman break; 6002858Swollman 6012858Swollman case TIME_DEL: 6022858Swollman if ((newtime.tv_sec + 1) % 86400 == 0) { 6032858Swollman newtime.tv_sec++; 6042858Swollman time_state = TIME_WAIT; 6052858Swollman } 6062858Swollman break; 6072858Swollman 6082858Swollman case TIME_OOP: 6092858Swollman time_state = TIME_WAIT; 6102858Swollman break; 6112858Swollman 6122858Swollman case TIME_WAIT: 6132858Swollman if (!(time_status & (STA_INS | STA_DEL))) 6142858Swollman time_state = TIME_OK; 6152858Swollman } 6162858Swollman } 6172858Swollman CPU_CLOCKUPDATE(&time, &newtime); 6181541Srgrimes } 6191541Srgrimes 6201541Srgrimes /* 6211541Srgrimes * Process callouts at a very low cpu priority, so we don't keep the 6221541Srgrimes * relatively high clock interrupt priority any longer than necessary. 6231541Srgrimes */ 6241541Srgrimes if (needsoft) { 6251541Srgrimes if (CLKF_BASEPRI(frame)) { 6261541Srgrimes /* 6271541Srgrimes * Save the overhead of a software interrupt; 6281541Srgrimes * it will happen as soon as we return, so do it now. 6291541Srgrimes */ 6301541Srgrimes (void)splsoftclock(); 6311541Srgrimes softclock(); 6321541Srgrimes } else 6331541Srgrimes setsoftclock(); 6341541Srgrimes } 6351541Srgrimes} 6361541Srgrimes 6371541Srgrimes/* 6381541Srgrimes * Software (low priority) clock interrupt. 6391541Srgrimes * Run periodic events from timeout queue. 6401541Srgrimes */ 6411541Srgrimes/*ARGSUSED*/ 6421541Srgrimesvoid 6431541Srgrimessoftclock() 6441541Srgrimes{ 6451541Srgrimes register struct callout *c; 6461541Srgrimes register void *arg; 6471541Srgrimes register void (*func) __P((void *)); 6481541Srgrimes register int s; 6491541Srgrimes 6501541Srgrimes s = splhigh(); 6511541Srgrimes while ((c = calltodo.c_next) != NULL && c->c_time <= 0) { 6521541Srgrimes func = c->c_func; 6531541Srgrimes arg = c->c_arg; 6541541Srgrimes calltodo.c_next = c->c_next; 6551541Srgrimes c->c_next = callfree; 6561541Srgrimes callfree = c; 6571541Srgrimes splx(s); 6581541Srgrimes (*func)(arg); 6591541Srgrimes (void) splhigh(); 6601541Srgrimes } 6611541Srgrimes splx(s); 6621541Srgrimes} 6631541Srgrimes 6641541Srgrimes/* 6651541Srgrimes * timeout -- 6661541Srgrimes * Execute a function after a specified length of time. 6671541Srgrimes * 6681541Srgrimes * untimeout -- 6691541Srgrimes * Cancel previous timeout function call. 6701541Srgrimes * 6711541Srgrimes * See AT&T BCI Driver Reference Manual for specification. This 6721541Srgrimes * implementation differs from that one in that no identification 6731541Srgrimes * value is returned from timeout, rather, the original arguments 6741541Srgrimes * to timeout are used to identify entries for untimeout. 6751541Srgrimes */ 6761541Srgrimesvoid 6771541Srgrimestimeout(ftn, arg, ticks) 6782112Swollman timeout_t ftn; 6791541Srgrimes void *arg; 6801541Srgrimes register int ticks; 6811541Srgrimes{ 6821541Srgrimes register struct callout *new, *p, *t; 6831541Srgrimes register int s; 6841541Srgrimes 6851541Srgrimes if (ticks <= 0) 6861541Srgrimes ticks = 1; 6871541Srgrimes 6881541Srgrimes /* Lock out the clock. */ 6891541Srgrimes s = splhigh(); 6901541Srgrimes 6911541Srgrimes /* Fill in the next free callout structure. */ 6921541Srgrimes if (callfree == NULL) 6931541Srgrimes panic("timeout table full"); 6941541Srgrimes new = callfree; 6951541Srgrimes callfree = new->c_next; 6961541Srgrimes new->c_arg = arg; 6971541Srgrimes new->c_func = ftn; 6981541Srgrimes 6991541Srgrimes /* 7001541Srgrimes * The time for each event is stored as a difference from the time 7011541Srgrimes * of the previous event on the queue. Walk the queue, correcting 7021541Srgrimes * the ticks argument for queue entries passed. Correct the ticks 7031541Srgrimes * value for the queue entry immediately after the insertion point 7041541Srgrimes * as well. Watch out for negative c_time values; these represent 7051541Srgrimes * overdue events. 7061541Srgrimes */ 7071541Srgrimes for (p = &calltodo; 7081541Srgrimes (t = p->c_next) != NULL && ticks > t->c_time; p = t) 7091541Srgrimes if (t->c_time > 0) 7101541Srgrimes ticks -= t->c_time; 7111541Srgrimes new->c_time = ticks; 7121541Srgrimes if (t != NULL) 7131541Srgrimes t->c_time -= ticks; 7141541Srgrimes 7151541Srgrimes /* Insert the new entry into the queue. */ 7161541Srgrimes p->c_next = new; 7171541Srgrimes new->c_next = t; 7181541Srgrimes splx(s); 7191541Srgrimes} 7201541Srgrimes 7211541Srgrimesvoid 7221541Srgrimesuntimeout(ftn, arg) 7232112Swollman timeout_t ftn; 7241541Srgrimes void *arg; 7251541Srgrimes{ 7261541Srgrimes register struct callout *p, *t; 7271541Srgrimes register int s; 7281541Srgrimes 7291541Srgrimes s = splhigh(); 7301541Srgrimes for (p = &calltodo; (t = p->c_next) != NULL; p = t) 7311541Srgrimes if (t->c_func == ftn && t->c_arg == arg) { 7321541Srgrimes /* Increment next entry's tick count. */ 7331541Srgrimes if (t->c_next && t->c_time > 0) 7341541Srgrimes t->c_next->c_time += t->c_time; 7351541Srgrimes 7361541Srgrimes /* Move entry from callout queue to callfree queue. */ 7371541Srgrimes p->c_next = t->c_next; 7381541Srgrimes t->c_next = callfree; 7391541Srgrimes callfree = t; 7401541Srgrimes break; 7411541Srgrimes } 7421541Srgrimes splx(s); 7431541Srgrimes} 7441541Srgrimes 7451541Srgrimes/* 7461541Srgrimes * Compute number of hz until specified time. Used to 7471541Srgrimes * compute third argument to timeout() from an absolute time. 7481541Srgrimes */ 7491541Srgrimesint 7501541Srgrimeshzto(tv) 7511541Srgrimes struct timeval *tv; 7521541Srgrimes{ 7531541Srgrimes register long ticks, sec; 7541541Srgrimes int s; 7551541Srgrimes 7561541Srgrimes /* 7571541Srgrimes * If number of milliseconds will fit in 32 bit arithmetic, 7581541Srgrimes * then compute number of milliseconds to time and scale to 7591541Srgrimes * ticks. Otherwise just compute number of hz in time, rounding 7601541Srgrimes * times greater than representible to maximum value. 7611541Srgrimes * 7621541Srgrimes * Delta times less than 25 days can be computed ``exactly''. 7631541Srgrimes * Maximum value for any timeout in 10ms ticks is 250 days. 7641541Srgrimes */ 7651541Srgrimes s = splhigh(); 7661541Srgrimes sec = tv->tv_sec - time.tv_sec; 7671541Srgrimes if (sec <= 0x7fffffff / 1000 - 1000) 7681541Srgrimes ticks = ((tv->tv_sec - time.tv_sec) * 1000 + 7691541Srgrimes (tv->tv_usec - time.tv_usec) / 1000) / (tick / 1000); 7701541Srgrimes else if (sec <= 0x7fffffff / hz) 7711541Srgrimes ticks = sec * hz; 7721541Srgrimes else 7731541Srgrimes ticks = 0x7fffffff; 7741541Srgrimes splx(s); 7751541Srgrimes return (ticks); 7761541Srgrimes} 7771541Srgrimes 7781541Srgrimes/* 7791541Srgrimes * Start profiling on a process. 7801541Srgrimes * 7811541Srgrimes * Kernel profiling passes proc0 which never exits and hence 7821541Srgrimes * keeps the profile clock running constantly. 7831541Srgrimes */ 7841541Srgrimesvoid 7851541Srgrimesstartprofclock(p) 7861541Srgrimes register struct proc *p; 7871541Srgrimes{ 7881541Srgrimes int s; 7891541Srgrimes 7901541Srgrimes if ((p->p_flag & P_PROFIL) == 0) { 7911541Srgrimes p->p_flag |= P_PROFIL; 7921541Srgrimes if (++profprocs == 1 && stathz != 0) { 7931541Srgrimes s = splstatclock(); 7941541Srgrimes psdiv = pscnt = psratio; 7951541Srgrimes setstatclockrate(profhz); 7961541Srgrimes splx(s); 7971541Srgrimes } 7981541Srgrimes } 7991541Srgrimes} 8001541Srgrimes 8011541Srgrimes/* 8021541Srgrimes * Stop profiling on a process. 8031541Srgrimes */ 8041541Srgrimesvoid 8051541Srgrimesstopprofclock(p) 8061541Srgrimes register struct proc *p; 8071541Srgrimes{ 8081541Srgrimes int s; 8091541Srgrimes 8101541Srgrimes if (p->p_flag & P_PROFIL) { 8111541Srgrimes p->p_flag &= ~P_PROFIL; 8121541Srgrimes if (--profprocs == 0 && stathz != 0) { 8131541Srgrimes s = splstatclock(); 8141541Srgrimes psdiv = pscnt = 1; 8151541Srgrimes setstatclockrate(stathz); 8161541Srgrimes splx(s); 8171541Srgrimes } 8181541Srgrimes } 8191541Srgrimes} 8201541Srgrimes 8211541Srgrimes/* 8221541Srgrimes * Statistics clock. Grab profile sample, and if divider reaches 0, 8231541Srgrimes * do process and kernel statistics. 8241541Srgrimes */ 8251541Srgrimesvoid 8261541Srgrimesstatclock(frame) 8271541Srgrimes register struct clockframe *frame; 8281541Srgrimes{ 8291541Srgrimes#ifdef GPROF 8301541Srgrimes register struct gmonparam *g; 8311541Srgrimes#endif 8322320Sdg register struct proc *p = curproc; 8331541Srgrimes register int i; 8341541Srgrimes 8352320Sdg if (p) { 8362320Sdg struct pstats *pstats; 8372320Sdg struct rusage *ru; 8382320Sdg struct vmspace *vm; 8392320Sdg 8402320Sdg /* bump the resource usage of integral space use */ 8412320Sdg if ((pstats = p->p_stats) && (ru = &pstats->p_ru) && (vm = p->p_vmspace)) { 8422320Sdg ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024; 8432320Sdg ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024; 8442320Sdg ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024; 8452320Sdg if ((vm->vm_pmap.pm_stats.resident_count * PAGE_SIZE / 1024) > 8462320Sdg ru->ru_maxrss) { 8472320Sdg ru->ru_maxrss = 8482320Sdg vm->vm_pmap.pm_stats.resident_count * PAGE_SIZE / 1024; 8492320Sdg } 8502320Sdg } 8512320Sdg } 8522320Sdg 8531541Srgrimes if (CLKF_USERMODE(frame)) { 8541541Srgrimes if (p->p_flag & P_PROFIL) 8551541Srgrimes addupc_intr(p, CLKF_PC(frame), 1); 8561541Srgrimes if (--pscnt > 0) 8571541Srgrimes return; 8581541Srgrimes /* 8591541Srgrimes * Came from user mode; CPU was in user state. 8601541Srgrimes * If this process is being profiled record the tick. 8611541Srgrimes */ 8621541Srgrimes p->p_uticks++; 8631541Srgrimes if (p->p_nice > NZERO) 8641541Srgrimes cp_time[CP_NICE]++; 8651541Srgrimes else 8661541Srgrimes cp_time[CP_USER]++; 8671541Srgrimes } else { 8681541Srgrimes#ifdef GPROF 8691541Srgrimes /* 8701541Srgrimes * Kernel statistics are just like addupc_intr, only easier. 8711541Srgrimes */ 8721541Srgrimes g = &_gmonparam; 8731541Srgrimes if (g->state == GMON_PROF_ON) { 8741541Srgrimes i = CLKF_PC(frame) - g->lowpc; 8751541Srgrimes if (i < g->textsize) { 8761541Srgrimes i /= HISTFRACTION * sizeof(*g->kcount); 8771541Srgrimes g->kcount[i]++; 8781541Srgrimes } 8791541Srgrimes } 8801541Srgrimes#endif 8811541Srgrimes if (--pscnt > 0) 8821541Srgrimes return; 8831541Srgrimes /* 8841541Srgrimes * Came from kernel mode, so we were: 8851541Srgrimes * - handling an interrupt, 8861541Srgrimes * - doing syscall or trap work on behalf of the current 8871541Srgrimes * user process, or 8881541Srgrimes * - spinning in the idle loop. 8891541Srgrimes * Whichever it is, charge the time as appropriate. 8901541Srgrimes * Note that we charge interrupts to the current process, 8911541Srgrimes * regardless of whether they are ``for'' that process, 8921541Srgrimes * so that we know how much of its real time was spent 8931541Srgrimes * in ``non-process'' (i.e., interrupt) work. 8941541Srgrimes */ 8951541Srgrimes if (CLKF_INTR(frame)) { 8961541Srgrimes if (p != NULL) 8971541Srgrimes p->p_iticks++; 8981541Srgrimes cp_time[CP_INTR]++; 8991541Srgrimes } else if (p != NULL) { 9001541Srgrimes p->p_sticks++; 9011541Srgrimes cp_time[CP_SYS]++; 9021541Srgrimes } else 9031541Srgrimes cp_time[CP_IDLE]++; 9041541Srgrimes } 9051541Srgrimes pscnt = psdiv; 9061541Srgrimes 9071541Srgrimes /* 9081541Srgrimes * We maintain statistics shown by user-level statistics 9091541Srgrimes * programs: the amount of time in each cpu state, and 9101541Srgrimes * the amount of time each of DK_NDRIVE ``drives'' is busy. 9111541Srgrimes * 9121541Srgrimes * XXX should either run linked list of drives, or (better) 9131541Srgrimes * grab timestamps in the start & done code. 9141541Srgrimes */ 9151541Srgrimes for (i = 0; i < DK_NDRIVE; i++) 9161541Srgrimes if (dk_busy & (1 << i)) 9171541Srgrimes dk_time[i]++; 9181541Srgrimes 9191541Srgrimes /* 9201541Srgrimes * We adjust the priority of the current process. The priority of 9211541Srgrimes * a process gets worse as it accumulates CPU time. The cpu usage 9221541Srgrimes * estimator (p_estcpu) is increased here. The formula for computing 9231541Srgrimes * priorities (in kern_synch.c) will compute a different value each 9241541Srgrimes * time p_estcpu increases by 4. The cpu usage estimator ramps up 9251541Srgrimes * quite quickly when the process is running (linearly), and decays 9261541Srgrimes * away exponentially, at a rate which is proportionally slower when 9271541Srgrimes * the system is busy. The basic principal is that the system will 9281541Srgrimes * 90% forget that the process used a lot of CPU time in 5 * loadav 9291541Srgrimes * seconds. This causes the system to favor processes which haven't 9301541Srgrimes * run much recently, and to round-robin among other processes. 9311541Srgrimes */ 9321541Srgrimes if (p != NULL) { 9331541Srgrimes p->p_cpticks++; 9341541Srgrimes if (++p->p_estcpu == 0) 9351541Srgrimes p->p_estcpu--; 9361541Srgrimes if ((p->p_estcpu & 3) == 0) { 9371541Srgrimes resetpriority(p); 9381541Srgrimes if (p->p_priority >= PUSER) 9391541Srgrimes p->p_priority = p->p_usrpri; 9401541Srgrimes } 9411541Srgrimes } 9421541Srgrimes} 9431541Srgrimes 9441541Srgrimes/* 9451541Srgrimes * Return information about system clocks. 9461541Srgrimes */ 9471549Srgrimesint 9481541Srgrimessysctl_clockrate(where, sizep) 9491541Srgrimes register char *where; 9501541Srgrimes size_t *sizep; 9511541Srgrimes{ 9521541Srgrimes struct clockinfo clkinfo; 9531541Srgrimes 9541541Srgrimes /* 9551541Srgrimes * Construct clockinfo structure. 9561541Srgrimes */ 9571541Srgrimes clkinfo.hz = hz; 9581541Srgrimes clkinfo.tick = tick; 9591541Srgrimes clkinfo.profhz = profhz; 9601541Srgrimes clkinfo.stathz = stathz ? stathz : hz; 9611541Srgrimes return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo))); 9621541Srgrimes} 9632858Swollman 9642858Swollman/*#ifdef PPS_SYNC*/ 9652858Swollman#if 0 9662858Swollman/* This code is completely bogus; if anybody ever wants to use it, get 9672858Swollman * the current version from Dave Mills. */ 9682858Swollman 9692858Swollman/* 9702858Swollman * hardpps() - discipline CPU clock oscillator to external pps signal 9712858Swollman * 9722858Swollman * This routine is called at each PPS interrupt in order to discipline 9732858Swollman * the CPU clock oscillator to the PPS signal. It integrates successive 9742858Swollman * phase differences between the two oscillators and calculates the 9752858Swollman * frequency offset. This is used in hardclock() to discipline the CPU 9762858Swollman * clock oscillator so that intrinsic frequency error is cancelled out. 9772858Swollman * The code requires the caller to capture the time and hardware 9782858Swollman * counter value at the designated PPS signal transition. 9792858Swollman */ 9802858Swollmanvoid 9812858Swollmanhardpps(tvp, usec) 9822858Swollman struct timeval *tvp; /* time at PPS */ 9832858Swollman long usec; /* hardware counter at PPS */ 9842858Swollman{ 9852858Swollman long u_usec, v_usec, bigtick; 9862858Swollman long cal_sec, cal_usec; 9872858Swollman 9882858Swollman /* 9892858Swollman * During the calibration interval adjust the starting time when 9902858Swollman * the tick overflows. At the end of the interval compute the 9912858Swollman * duration of the interval and the difference of the hardware 9922858Swollman * counters at the beginning and end of the interval. This code 9932858Swollman * is deliciously complicated by the fact valid differences may 9942858Swollman * exceed the value of tick when using long calibration 9952858Swollman * intervals and small ticks. Note that the counter can be 9962858Swollman * greater than tick if caught at just the wrong instant, but 9972858Swollman * the values returned and used here are correct. 9982858Swollman */ 9992858Swollman bigtick = (long)tick << SHIFT_USEC; 10002858Swollman pps_usec -= ntp_pll.ybar; 10012858Swollman if (pps_usec >= bigtick) 10022858Swollman pps_usec -= bigtick; 10032858Swollman if (pps_usec < 0) 10042858Swollman pps_usec += bigtick; 10052858Swollman pps_time.tv_sec++; 10062858Swollman pps_count++; 10072858Swollman if (pps_count < (1 << pps_shift)) 10082858Swollman return; 10092858Swollman pps_count = 0; 10102858Swollman ntp_pll.calcnt++; 10112858Swollman u_usec = usec << SHIFT_USEC; 10122858Swollman v_usec = pps_usec - u_usec; 10132858Swollman if (v_usec >= bigtick >> 1) 10142858Swollman v_usec -= bigtick; 10152858Swollman if (v_usec < -(bigtick >> 1)) 10162858Swollman v_usec += bigtick; 10172858Swollman if (v_usec < 0) 10182858Swollman v_usec = -(-v_usec >> ntp_pll.shift); 10192858Swollman else 10202858Swollman v_usec = v_usec >> ntp_pll.shift; 10212858Swollman pps_usec = u_usec; 10222858Swollman cal_sec = tvp->tv_sec; 10232858Swollman cal_usec = tvp->tv_usec; 10242858Swollman cal_sec -= pps_time.tv_sec; 10252858Swollman cal_usec -= pps_time.tv_usec; 10262858Swollman if (cal_usec < 0) { 10272858Swollman cal_usec += 1000000; 10282858Swollman cal_sec--; 10292858Swollman } 10302858Swollman pps_time = *tvp; 10312858Swollman 10322858Swollman /* 10332858Swollman * Check for lost interrupts, noise, excessive jitter and 10342858Swollman * excessive frequency error. The number of timer ticks during 10352858Swollman * the interval may vary +-1 tick. Add to this a margin of one 10362858Swollman * tick for the PPS signal jitter and maximum frequency 10372858Swollman * deviation. If the limits are exceeded, the calibration 10382858Swollman * interval is reset to the minimum and we start over. 10392858Swollman */ 10402858Swollman u_usec = (long)tick << 1; 10412858Swollman if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec)) 10422858Swollman || (cal_sec == 0 && cal_usec < u_usec)) 10432858Swollman || v_usec > ntp_pll.tolerance || v_usec < -ntp_pll.tolerance) { 10442858Swollman ntp_pll.jitcnt++; 10452858Swollman ntp_pll.shift = NTP_PLL.SHIFT; 10462858Swollman pps_dispinc = PPS_DISPINC; 10472858Swollman ntp_pll.intcnt = 0; 10482858Swollman return; 10492858Swollman } 10502858Swollman 10512858Swollman /* 10522858Swollman * A three-stage median filter is used to help deglitch the pps 10532858Swollman * signal. The median sample becomes the offset estimate; the 10542858Swollman * difference between the other two samples becomes the 10552858Swollman * dispersion estimate. 10562858Swollman */ 10572858Swollman pps_mf[2] = pps_mf[1]; 10582858Swollman pps_mf[1] = pps_mf[0]; 10592858Swollman pps_mf[0] = v_usec; 10602858Swollman if (pps_mf[0] > pps_mf[1]) { 10612858Swollman if (pps_mf[1] > pps_mf[2]) { 10622858Swollman u_usec = pps_mf[1]; /* 0 1 2 */ 10632858Swollman v_usec = pps_mf[0] - pps_mf[2]; 10642858Swollman } else if (pps_mf[2] > pps_mf[0]) { 10652858Swollman u_usec = pps_mf[0]; /* 2 0 1 */ 10662858Swollman v_usec = pps_mf[2] - pps_mf[1]; 10672858Swollman } else { 10682858Swollman u_usec = pps_mf[2]; /* 0 2 1 */ 10692858Swollman v_usec = pps_mf[0] - pps_mf[1]; 10702858Swollman } 10712858Swollman } else { 10722858Swollman if (pps_mf[1] < pps_mf[2]) { 10732858Swollman u_usec = pps_mf[1]; /* 2 1 0 */ 10742858Swollman v_usec = pps_mf[2] - pps_mf[0]; 10752858Swollman } else if (pps_mf[2] < pps_mf[0]) { 10762858Swollman u_usec = pps_mf[0]; /* 1 0 2 */ 10772858Swollman v_usec = pps_mf[1] - pps_mf[2]; 10782858Swollman } else { 10792858Swollman u_usec = pps_mf[2]; /* 1 2 0 */ 10802858Swollman v_usec = pps_mf[1] - pps_mf[0]; 10812858Swollman } 10822858Swollman } 10832858Swollman 10842858Swollman /* 10852858Swollman * Here the dispersion average is updated. If it is less than 10862858Swollman * the threshold pps_dispmax, the frequency average is updated 10872858Swollman * as well, but clamped to the tolerance. 10882858Swollman */ 10892858Swollman v_usec = (v_usec >> 1) - ntp_pll.disp; 10902858Swollman if (v_usec < 0) 10912858Swollman ntp_pll.disp -= -v_usec >> PPS_AVG; 10922858Swollman else 10932858Swollman ntp_pll.disp += v_usec >> PPS_AVG; 10942858Swollman if (ntp_pll.disp > pps_dispmax) { 10952858Swollman ntp_pll.discnt++; 10962858Swollman return; 10972858Swollman } 10982858Swollman if (u_usec < 0) { 10992858Swollman ntp_pll.ybar -= -u_usec >> PPS_AVG; 11002858Swollman if (ntp_pll.ybar < -ntp_pll.tolerance) 11012858Swollman ntp_pll.ybar = -ntp_pll.tolerance; 11022858Swollman u_usec = -u_usec; 11032858Swollman } else { 11042858Swollman ntp_pll.ybar += u_usec >> PPS_AVG; 11052858Swollman if (ntp_pll.ybar > ntp_pll.tolerance) 11062858Swollman ntp_pll.ybar = ntp_pll.tolerance; 11072858Swollman } 11082858Swollman 11092858Swollman /* 11102858Swollman * Here the calibration interval is adjusted. If the maximum 11112858Swollman * time difference is greater than tick/4, reduce the interval 11122858Swollman * by half. If this is not the case for four consecutive 11132858Swollman * intervals, double the interval. 11142858Swollman */ 11152858Swollman if (u_usec << ntp_pll.shift > bigtick >> 2) { 11162858Swollman ntp_pll.intcnt = 0; 11172858Swollman if (ntp_pll.shift > NTP_PLL.SHIFT) { 11182858Swollman ntp_pll.shift--; 11192858Swollman pps_dispinc <<= 1; 11202858Swollman } 11212858Swollman } else if (ntp_pll.intcnt >= 4) { 11222858Swollman ntp_pll.intcnt = 0; 11232858Swollman if (ntp_pll.shift < NTP_PLL.SHIFTMAX) { 11242858Swollman ntp_pll.shift++; 11252858Swollman pps_dispinc >>= 1; 11262858Swollman } 11272858Swollman } else 11282858Swollman ntp_pll.intcnt++; 11292858Swollman} 11302858Swollman#endif /* PPS_SYNC */ 1131