kern_clock.c revision 201879
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_clock.c 201879 2010-01-09 01:46:38Z attilio $");
39
40#include "opt_kdb.h"
41#include "opt_device_polling.h"
42#include "opt_hwpmc_hooks.h"
43#include "opt_ntp.h"
44#include "opt_watchdog.h"
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/callout.h>
49#include <sys/kdb.h>
50#include <sys/kernel.h>
51#include <sys/kthread.h>
52#include <sys/ktr.h>
53#include <sys/lock.h>
54#include <sys/mutex.h>
55#include <sys/proc.h>
56#include <sys/resource.h>
57#include <sys/resourcevar.h>
58#include <sys/sched.h>
59#include <sys/signalvar.h>
60#include <sys/sleepqueue.h>
61#include <sys/smp.h>
62#include <vm/vm.h>
63#include <vm/pmap.h>
64#include <vm/vm_map.h>
65#include <sys/sysctl.h>
66#include <sys/bus.h>
67#include <sys/interrupt.h>
68#include <sys/limits.h>
69#include <sys/timetc.h>
70
71#ifdef GPROF
72#include <sys/gmon.h>
73#endif
74
75#ifdef HWPMC_HOOKS
76#include <sys/pmckern.h>
77#endif
78
79#ifdef DEVICE_POLLING
80extern void hardclock_device_poll(void);
81#endif /* DEVICE_POLLING */
82
83static void initclocks(void *dummy);
84SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL);
85
86/* Spin-lock protecting profiling statistics. */
87static struct mtx time_lock;
88
89static int
90sysctl_kern_cp_time(SYSCTL_HANDLER_ARGS)
91{
92	int error;
93	long cp_time[CPUSTATES];
94#ifdef SCTL_MASK32
95	int i;
96	unsigned int cp_time32[CPUSTATES];
97#endif
98
99	read_cpu_time(cp_time);
100#ifdef SCTL_MASK32
101	if (req->flags & SCTL_MASK32) {
102		if (!req->oldptr)
103			return SYSCTL_OUT(req, 0, sizeof(cp_time32));
104		for (i = 0; i < CPUSTATES; i++)
105			cp_time32[i] = (unsigned int)cp_time[i];
106		error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
107	} else
108#endif
109	{
110		if (!req->oldptr)
111			return SYSCTL_OUT(req, 0, sizeof(cp_time));
112		error = SYSCTL_OUT(req, cp_time, sizeof(cp_time));
113	}
114	return error;
115}
116
117SYSCTL_PROC(_kern, OID_AUTO, cp_time, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
118    0,0, sysctl_kern_cp_time, "LU", "CPU time statistics");
119
120static long empty[CPUSTATES];
121
122static int
123sysctl_kern_cp_times(SYSCTL_HANDLER_ARGS)
124{
125	struct pcpu *pcpu;
126	int error;
127	int c;
128	long *cp_time;
129#ifdef SCTL_MASK32
130	unsigned int cp_time32[CPUSTATES];
131	int i;
132#endif
133
134	if (!req->oldptr) {
135#ifdef SCTL_MASK32
136		if (req->flags & SCTL_MASK32)
137			return SYSCTL_OUT(req, 0, sizeof(cp_time32) * (mp_maxid + 1));
138		else
139#endif
140			return SYSCTL_OUT(req, 0, sizeof(long) * CPUSTATES * (mp_maxid + 1));
141	}
142	for (error = 0, c = 0; error == 0 && c <= mp_maxid; c++) {
143		if (!CPU_ABSENT(c)) {
144			pcpu = pcpu_find(c);
145			cp_time = pcpu->pc_cp_time;
146		} else {
147			cp_time = empty;
148		}
149#ifdef SCTL_MASK32
150		if (req->flags & SCTL_MASK32) {
151			for (i = 0; i < CPUSTATES; i++)
152				cp_time32[i] = (unsigned int)cp_time[i];
153			error = SYSCTL_OUT(req, cp_time32, sizeof(cp_time32));
154		} else
155#endif
156			error = SYSCTL_OUT(req, cp_time, sizeof(long) * CPUSTATES);
157	}
158	return error;
159}
160
161SYSCTL_PROC(_kern, OID_AUTO, cp_times, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE,
162    0,0, sysctl_kern_cp_times, "LU", "per-CPU time statistics");
163
164#ifdef DEADLKRES
165static int slptime_threshold = 1800;
166static int blktime_threshold = 900;
167static int sleepfreq = 3;
168
169static void
170deadlkres(void)
171{
172	struct proc *p;
173	struct thread *td;
174	void *wchan;
175	int blkticks, slpticks, slptype, tryl, tticks;
176
177	tryl = 0;
178	for (;;) {
179		blkticks = blktime_threshold * hz;
180		slpticks = slptime_threshold * hz;
181
182		/*
183		 * Avoid to sleep on the sx_lock in order to avoid a possible
184		 * priority inversion problem leading to starvation.
185		 * If the lock can't be held after 100 tries, panic.
186		 */
187		if (!sx_try_slock(&allproc_lock)) {
188			if (tryl > 100)
189		panic("%s: possible deadlock detected on allproc_lock\n",
190				    __func__);
191			tryl++;
192			pause("allproc_lock deadlkres", sleepfreq * hz);
193			continue;
194		}
195		tryl = 0;
196		FOREACH_PROC_IN_SYSTEM(p) {
197			PROC_LOCK(p);
198			FOREACH_THREAD_IN_PROC(p, td) {
199				thread_lock(td);
200				if (TD_ON_LOCK(td)) {
201
202					/*
203					 * The thread should be blocked on a
204					 * turnstile, simply check if the
205					 * turnstile channel is in good state.
206					 */
207					MPASS(td->td_blocked != NULL);
208					tticks = ticks - td->td_blktick;
209					thread_unlock(td);
210					if (tticks > blkticks) {
211
212						/*
213						 * Accordingly with provided
214						 * thresholds, this thread is
215						 * stuck for too long on a
216						 * turnstile.
217						 */
218						PROC_UNLOCK(p);
219						sx_sunlock(&allproc_lock);
220	panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
221						    __func__, td, tticks);
222					}
223				} else if (TD_IS_SLEEPING(td)) {
224
225					/*
226					 * Check if the thread is sleeping on a
227					 * lock, otherwise skip the check.
228					 * Drop the thread lock in order to
229					 * avoid a LOR with the sleepqueue
230					 * spinlock.
231					 */
232					wchan = td->td_wchan;
233					tticks = ticks - td->td_slptick;
234					thread_unlock(td);
235					slptype = sleepq_type(wchan);
236					if ((slptype == SLEEPQ_SX ||
237					    slptype == SLEEPQ_LK) &&
238					    tticks > slpticks) {
239
240						/*
241						 * Accordingly with provided
242						 * thresholds, this thread is
243						 * stuck for too long on a
244						 * sleepqueue.
245						 */
246						PROC_UNLOCK(p);
247						sx_sunlock(&allproc_lock);
248	panic("%s: possible deadlock detected for %p, blocked for %d ticks\n",
249						    __func__, td, tticks);
250					}
251				} else
252					thread_unlock(td);
253			}
254			PROC_UNLOCK(p);
255		}
256		sx_sunlock(&allproc_lock);
257
258		/* Sleep for sleepfreq seconds. */
259		pause("deadlkres", sleepfreq * hz);
260	}
261}
262
263static struct kthread_desc deadlkres_kd = {
264	"deadlkres",
265	deadlkres,
266	(struct thread **)NULL
267};
268
269SYSINIT(deadlkres, SI_SUB_CLOCKS, SI_ORDER_ANY, kthread_start, &deadlkres_kd);
270
271SYSCTL_NODE(_debug, OID_AUTO, deadlkres, CTLFLAG_RW, 0, "Deadlock resolver");
272SYSCTL_INT(_debug_deadlkres, OID_AUTO, slptime_threshold, CTLFLAG_RW,
273    &slptime_threshold, 0,
274    "Number of seconds within is valid to sleep on a sleepqueue");
275SYSCTL_INT(_debug_deadlkres, OID_AUTO, blktime_threshold, CTLFLAG_RW,
276    &blktime_threshold, 0,
277    "Number of seconds within is valid to block on a turnstile");
278SYSCTL_INT(_debug_deadlkres, OID_AUTO, sleepfreq, CTLFLAG_RW, &sleepfreq, 0,
279    "Number of seconds between any deadlock resolver thread run");
280#endif	/* DEADLKRES */
281
282void
283read_cpu_time(long *cp_time)
284{
285	struct pcpu *pc;
286	int i, j;
287
288	/* Sum up global cp_time[]. */
289	bzero(cp_time, sizeof(long) * CPUSTATES);
290	for (i = 0; i <= mp_maxid; i++) {
291		if (CPU_ABSENT(i))
292			continue;
293		pc = pcpu_find(i);
294		for (j = 0; j < CPUSTATES; j++)
295			cp_time[j] += pc->pc_cp_time[j];
296	}
297}
298
299#ifdef SW_WATCHDOG
300#include <sys/watchdog.h>
301
302static int watchdog_ticks;
303static int watchdog_enabled;
304static void watchdog_fire(void);
305static void watchdog_config(void *, u_int, int *);
306#endif /* SW_WATCHDOG */
307
308/*
309 * Clock handling routines.
310 *
311 * This code is written to operate with two timers that run independently of
312 * each other.
313 *
314 * The main timer, running hz times per second, is used to trigger interval
315 * timers, timeouts and rescheduling as needed.
316 *
317 * The second timer handles kernel and user profiling,
318 * and does resource use estimation.  If the second timer is programmable,
319 * it is randomized to avoid aliasing between the two clocks.  For example,
320 * the randomization prevents an adversary from always giving up the cpu
321 * just before its quantum expires.  Otherwise, it would never accumulate
322 * cpu ticks.  The mean frequency of the second timer is stathz.
323 *
324 * If no second timer exists, stathz will be zero; in this case we drive
325 * profiling and statistics off the main clock.  This WILL NOT be accurate;
326 * do not do it unless absolutely necessary.
327 *
328 * The statistics clock may (or may not) be run at a higher rate while
329 * profiling.  This profile clock runs at profhz.  We require that profhz
330 * be an integral multiple of stathz.
331 *
332 * If the statistics clock is running fast, it must be divided by the ratio
333 * profhz/stathz for statistics.  (For profiling, every tick counts.)
334 *
335 * Time-of-day is maintained using a "timecounter", which may or may
336 * not be related to the hardware generating the above mentioned
337 * interrupts.
338 */
339
340int	stathz;
341int	profhz;
342int	profprocs;
343int	ticks;
344int	psratio;
345
346/*
347 * Initialize clock frequencies and start both clocks running.
348 */
349/* ARGSUSED*/
350static void
351initclocks(dummy)
352	void *dummy;
353{
354	register int i;
355
356	/*
357	 * Set divisors to 1 (normal case) and let the machine-specific
358	 * code do its bit.
359	 */
360	mtx_init(&time_lock, "time lock", NULL, MTX_SPIN);
361	cpu_initclocks();
362
363	/*
364	 * Compute profhz/stathz, and fix profhz if needed.
365	 */
366	i = stathz ? stathz : hz;
367	if (profhz == 0)
368		profhz = i;
369	psratio = profhz / i;
370#ifdef SW_WATCHDOG
371	EVENTHANDLER_REGISTER(watchdog_list, watchdog_config, NULL, 0);
372#endif
373}
374
375/*
376 * Each time the real-time timer fires, this function is called on all CPUs.
377 * Note that hardclock() calls hardclock_cpu() for the boot CPU, so only
378 * the other CPUs in the system need to call this function.
379 */
380void
381hardclock_cpu(int usermode)
382{
383	struct pstats *pstats;
384	struct thread *td = curthread;
385	struct proc *p = td->td_proc;
386	int flags;
387
388	/*
389	 * Run current process's virtual and profile time, as needed.
390	 */
391	pstats = p->p_stats;
392	flags = 0;
393	if (usermode &&
394	    timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
395		PROC_SLOCK(p);
396		if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
397			flags |= TDF_ALRMPEND | TDF_ASTPENDING;
398		PROC_SUNLOCK(p);
399	}
400	if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
401		PROC_SLOCK(p);
402		if (itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
403			flags |= TDF_PROFPEND | TDF_ASTPENDING;
404		PROC_SUNLOCK(p);
405	}
406	thread_lock(td);
407	sched_tick();
408	td->td_flags |= flags;
409	thread_unlock(td);
410
411#ifdef	HWPMC_HOOKS
412	if (PMC_CPU_HAS_SAMPLES(PCPU_GET(cpuid)))
413		PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
414#endif
415	callout_tick();
416}
417
418/*
419 * The real-time timer, interrupting hz times per second.
420 */
421void
422hardclock(int usermode, uintfptr_t pc)
423{
424
425	atomic_add_int((volatile int *)&ticks, 1);
426	hardclock_cpu(usermode);
427	tc_ticktock();
428	/*
429	 * If no separate statistics clock is available, run it from here.
430	 *
431	 * XXX: this only works for UP
432	 */
433	if (stathz == 0) {
434		profclock(usermode, pc);
435		statclock(usermode);
436	}
437#ifdef DEVICE_POLLING
438	hardclock_device_poll();	/* this is very short and quick */
439#endif /* DEVICE_POLLING */
440#ifdef SW_WATCHDOG
441	if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
442		watchdog_fire();
443#endif /* SW_WATCHDOG */
444}
445
446/*
447 * Compute number of ticks in the specified amount of time.
448 */
449int
450tvtohz(tv)
451	struct timeval *tv;
452{
453	register unsigned long ticks;
454	register long sec, usec;
455
456	/*
457	 * If the number of usecs in the whole seconds part of the time
458	 * difference fits in a long, then the total number of usecs will
459	 * fit in an unsigned long.  Compute the total and convert it to
460	 * ticks, rounding up and adding 1 to allow for the current tick
461	 * to expire.  Rounding also depends on unsigned long arithmetic
462	 * to avoid overflow.
463	 *
464	 * Otherwise, if the number of ticks in the whole seconds part of
465	 * the time difference fits in a long, then convert the parts to
466	 * ticks separately and add, using similar rounding methods and
467	 * overflow avoidance.  This method would work in the previous
468	 * case but it is slightly slower and assumes that hz is integral.
469	 *
470	 * Otherwise, round the time difference down to the maximum
471	 * representable value.
472	 *
473	 * If ints have 32 bits, then the maximum value for any timeout in
474	 * 10ms ticks is 248 days.
475	 */
476	sec = tv->tv_sec;
477	usec = tv->tv_usec;
478	if (usec < 0) {
479		sec--;
480		usec += 1000000;
481	}
482	if (sec < 0) {
483#ifdef DIAGNOSTIC
484		if (usec > 0) {
485			sec++;
486			usec -= 1000000;
487		}
488		printf("tvotohz: negative time difference %ld sec %ld usec\n",
489		       sec, usec);
490#endif
491		ticks = 1;
492	} else if (sec <= LONG_MAX / 1000000)
493		ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
494			/ tick + 1;
495	else if (sec <= LONG_MAX / hz)
496		ticks = sec * hz
497			+ ((unsigned long)usec + (tick - 1)) / tick + 1;
498	else
499		ticks = LONG_MAX;
500	if (ticks > INT_MAX)
501		ticks = INT_MAX;
502	return ((int)ticks);
503}
504
505/*
506 * Start profiling on a process.
507 *
508 * Kernel profiling passes proc0 which never exits and hence
509 * keeps the profile clock running constantly.
510 */
511void
512startprofclock(p)
513	register struct proc *p;
514{
515
516	PROC_LOCK_ASSERT(p, MA_OWNED);
517	if (p->p_flag & P_STOPPROF)
518		return;
519	if ((p->p_flag & P_PROFIL) == 0) {
520		p->p_flag |= P_PROFIL;
521		mtx_lock_spin(&time_lock);
522		if (++profprocs == 1)
523			cpu_startprofclock();
524		mtx_unlock_spin(&time_lock);
525	}
526}
527
528/*
529 * Stop profiling on a process.
530 */
531void
532stopprofclock(p)
533	register struct proc *p;
534{
535
536	PROC_LOCK_ASSERT(p, MA_OWNED);
537	if (p->p_flag & P_PROFIL) {
538		if (p->p_profthreads != 0) {
539			p->p_flag |= P_STOPPROF;
540			while (p->p_profthreads != 0)
541				msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
542				    "stopprof", 0);
543			p->p_flag &= ~P_STOPPROF;
544		}
545		if ((p->p_flag & P_PROFIL) == 0)
546			return;
547		p->p_flag &= ~P_PROFIL;
548		mtx_lock_spin(&time_lock);
549		if (--profprocs == 0)
550			cpu_stopprofclock();
551		mtx_unlock_spin(&time_lock);
552	}
553}
554
555/*
556 * Statistics clock.  Updates rusage information and calls the scheduler
557 * to adjust priorities of the active thread.
558 *
559 * This should be called by all active processors.
560 */
561void
562statclock(int usermode)
563{
564	struct rusage *ru;
565	struct vmspace *vm;
566	struct thread *td;
567	struct proc *p;
568	long rss;
569	long *cp_time;
570
571	td = curthread;
572	p = td->td_proc;
573
574	cp_time = (long *)PCPU_PTR(cp_time);
575	if (usermode) {
576		/*
577		 * Charge the time as appropriate.
578		 */
579		td->td_uticks++;
580		if (p->p_nice > NZERO)
581			cp_time[CP_NICE]++;
582		else
583			cp_time[CP_USER]++;
584	} else {
585		/*
586		 * Came from kernel mode, so we were:
587		 * - handling an interrupt,
588		 * - doing syscall or trap work on behalf of the current
589		 *   user process, or
590		 * - spinning in the idle loop.
591		 * Whichever it is, charge the time as appropriate.
592		 * Note that we charge interrupts to the current process,
593		 * regardless of whether they are ``for'' that process,
594		 * so that we know how much of its real time was spent
595		 * in ``non-process'' (i.e., interrupt) work.
596		 */
597		if ((td->td_pflags & TDP_ITHREAD) ||
598		    td->td_intr_nesting_level >= 2) {
599			td->td_iticks++;
600			cp_time[CP_INTR]++;
601		} else {
602			td->td_pticks++;
603			td->td_sticks++;
604			if (!TD_IS_IDLETHREAD(td))
605				cp_time[CP_SYS]++;
606			else
607				cp_time[CP_IDLE]++;
608		}
609	}
610
611	/* Update resource usage integrals and maximums. */
612	MPASS(p->p_vmspace != NULL);
613	vm = p->p_vmspace;
614	ru = &td->td_ru;
615	ru->ru_ixrss += pgtok(vm->vm_tsize);
616	ru->ru_idrss += pgtok(vm->vm_dsize);
617	ru->ru_isrss += pgtok(vm->vm_ssize);
618	rss = pgtok(vmspace_resident_count(vm));
619	if (ru->ru_maxrss < rss)
620		ru->ru_maxrss = rss;
621	KTR_POINT2(KTR_SCHED, "thread", sched_tdname(td), "statclock",
622	    "prio:%d", td->td_priority, "stathz:%d", (stathz)?stathz:hz);
623	thread_lock_flags(td, MTX_QUIET);
624	sched_clock(td);
625	thread_unlock(td);
626}
627
628void
629profclock(int usermode, uintfptr_t pc)
630{
631	struct thread *td;
632#ifdef GPROF
633	struct gmonparam *g;
634	uintfptr_t i;
635#endif
636
637	td = curthread;
638	if (usermode) {
639		/*
640		 * Came from user mode; CPU was in user state.
641		 * If this process is being profiled, record the tick.
642		 * if there is no related user location yet, don't
643		 * bother trying to count it.
644		 */
645		if (td->td_proc->p_flag & P_PROFIL)
646			addupc_intr(td, pc, 1);
647	}
648#ifdef GPROF
649	else {
650		/*
651		 * Kernel statistics are just like addupc_intr, only easier.
652		 */
653		g = &_gmonparam;
654		if (g->state == GMON_PROF_ON && pc >= g->lowpc) {
655			i = PC_TO_I(g, pc);
656			if (i < g->textsize) {
657				KCOUNT(g, i)++;
658			}
659		}
660	}
661#endif
662}
663
664/*
665 * Return information about system clocks.
666 */
667static int
668sysctl_kern_clockrate(SYSCTL_HANDLER_ARGS)
669{
670	struct clockinfo clkinfo;
671	/*
672	 * Construct clockinfo structure.
673	 */
674	bzero(&clkinfo, sizeof(clkinfo));
675	clkinfo.hz = hz;
676	clkinfo.tick = tick;
677	clkinfo.profhz = profhz;
678	clkinfo.stathz = stathz ? stathz : hz;
679	return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
680}
681
682SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate,
683	CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE,
684	0, 0, sysctl_kern_clockrate, "S,clockinfo",
685	"Rate and period of various kernel clocks");
686
687#ifdef SW_WATCHDOG
688
689static void
690watchdog_config(void *unused __unused, u_int cmd, int *error)
691{
692	u_int u;
693
694	u = cmd & WD_INTERVAL;
695	if (u >= WD_TO_1SEC) {
696		watchdog_ticks = (1 << (u - WD_TO_1SEC)) * hz;
697		watchdog_enabled = 1;
698		*error = 0;
699	} else {
700		watchdog_enabled = 0;
701	}
702}
703
704/*
705 * Handle a watchdog timeout by dumping interrupt information and
706 * then either dropping to DDB or panicking.
707 */
708static void
709watchdog_fire(void)
710{
711	int nintr;
712	u_int64_t inttotal;
713	u_long *curintr;
714	char *curname;
715
716	curintr = intrcnt;
717	curname = intrnames;
718	inttotal = 0;
719	nintr = eintrcnt - intrcnt;
720
721	printf("interrupt                   total\n");
722	while (--nintr >= 0) {
723		if (*curintr)
724			printf("%-12s %20lu\n", curname, *curintr);
725		curname += strlen(curname) + 1;
726		inttotal += *curintr++;
727	}
728	printf("Total        %20ju\n", (uintmax_t)inttotal);
729
730#if defined(KDB) && !defined(KDB_UNATTENDED)
731	kdb_backtrace();
732	kdb_enter(KDB_WHY_WATCHDOG, "watchdog timeout");
733#else
734	panic("watchdog timeout");
735#endif
736}
737
738#endif /* SW_WATCHDOG */
739