1/*-
2 * Copyright (c) 2004-2007 Nate Lawson (SDG)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include <sys/param.h>
31#include <sys/bus.h>
32#include <sys/cpu.h>
33#include <sys/eventhandler.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/malloc.h>
37#include <sys/module.h>
38#include <sys/proc.h>
39#include <sys/queue.h>
40#include <sys/sbuf.h>
41#include <sys/sched.h>
42#include <sys/smp.h>
43#include <sys/sysctl.h>
44#include <sys/systm.h>
45#include <sys/sx.h>
46#include <sys/timetc.h>
47#include <sys/taskqueue.h>
48
49#include "cpufreq_if.h"
50
51/*
52 * Common CPU frequency glue code.  Drivers for specific hardware can
53 * attach this interface to allow users to get/set the CPU frequency.
54 */
55
56/*
57 * Number of levels we can handle.  Levels are synthesized from settings
58 * so for M settings and N drivers, there may be M*N levels.
59 */
60#define CF_MAX_LEVELS	64
61
62struct cf_saved_freq {
63	struct cf_level			level;
64	int				priority;
65	SLIST_ENTRY(cf_saved_freq)	link;
66};
67
68struct cpufreq_softc {
69	struct sx			lock;
70	struct cf_level			curr_level;
71	int				curr_priority;
72	SLIST_HEAD(, cf_saved_freq)	saved_freq;
73	struct cf_level_lst		all_levels;
74	int				all_count;
75	int				max_mhz;
76	device_t			dev;
77	struct sysctl_ctx_list		sysctl_ctx;
78	struct task			startup_task;
79	struct cf_level			*levels_buf;
80};
81
82struct cf_setting_array {
83	struct cf_setting		sets[MAX_SETTINGS];
84	int				count;
85	TAILQ_ENTRY(cf_setting_array)	link;
86};
87
88TAILQ_HEAD(cf_setting_lst, cf_setting_array);
89
90#define CF_MTX_INIT(x)		sx_init((x), "cpufreq lock")
91#define CF_MTX_LOCK(x)		sx_xlock((x))
92#define CF_MTX_UNLOCK(x)	sx_xunlock((x))
93#define CF_MTX_ASSERT(x)	sx_assert((x), SX_XLOCKED)
94
95#define CF_DEBUG(msg...)	do {		\
96	if (cf_verbose)				\
97		printf("cpufreq: " msg);	\
98	} while (0)
99
100static int	cpufreq_attach(device_t dev);
101static void	cpufreq_startup_task(void *ctx, int pending);
102static int	cpufreq_detach(device_t dev);
103static int	cf_set_method(device_t dev, const struct cf_level *level,
104		    int priority);
105static int	cf_get_method(device_t dev, struct cf_level *level);
106static int	cf_levels_method(device_t dev, struct cf_level *levels,
107		    int *count);
108static int	cpufreq_insert_abs(struct cpufreq_softc *sc,
109		    struct cf_setting *sets, int count);
110static int	cpufreq_expand_set(struct cpufreq_softc *sc,
111		    struct cf_setting_array *set_arr);
112static struct cf_level *cpufreq_dup_set(struct cpufreq_softc *sc,
113		    struct cf_level *dup, struct cf_setting *set);
114static int	cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS);
115static int	cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS);
116static int	cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS);
117
118static device_method_t cpufreq_methods[] = {
119	DEVMETHOD(device_probe,		bus_generic_probe),
120	DEVMETHOD(device_attach,	cpufreq_attach),
121	DEVMETHOD(device_detach,	cpufreq_detach),
122
123        DEVMETHOD(cpufreq_set,		cf_set_method),
124        DEVMETHOD(cpufreq_get,		cf_get_method),
125        DEVMETHOD(cpufreq_levels,	cf_levels_method),
126	{0, 0}
127};
128static driver_t cpufreq_driver = {
129	"cpufreq", cpufreq_methods, sizeof(struct cpufreq_softc)
130};
131static devclass_t cpufreq_dc;
132DRIVER_MODULE(cpufreq, cpu, cpufreq_driver, cpufreq_dc, 0, 0);
133
134static int		cf_lowest_freq;
135static int		cf_verbose;
136TUNABLE_INT("debug.cpufreq.lowest", &cf_lowest_freq);
137TUNABLE_INT("debug.cpufreq.verbose", &cf_verbose);
138static SYSCTL_NODE(_debug, OID_AUTO, cpufreq, CTLFLAG_RD, NULL,
139    "cpufreq debugging");
140SYSCTL_INT(_debug_cpufreq, OID_AUTO, lowest, CTLFLAG_RW, &cf_lowest_freq, 1,
141    "Don't provide levels below this frequency.");
142SYSCTL_INT(_debug_cpufreq, OID_AUTO, verbose, CTLFLAG_RW, &cf_verbose, 1,
143    "Print verbose debugging messages");
144
145static int
146cpufreq_attach(device_t dev)
147{
148	struct cpufreq_softc *sc;
149	struct pcpu *pc;
150	device_t parent;
151	uint64_t rate;
152	int numdevs;
153
154	CF_DEBUG("initializing %s\n", device_get_nameunit(dev));
155	sc = device_get_softc(dev);
156	parent = device_get_parent(dev);
157	sc->dev = dev;
158	sysctl_ctx_init(&sc->sysctl_ctx);
159	TAILQ_INIT(&sc->all_levels);
160	CF_MTX_INIT(&sc->lock);
161	sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
162	SLIST_INIT(&sc->saved_freq);
163	/* Try to get nominal CPU freq to use it as maximum later if needed */
164	sc->max_mhz = cpu_get_nominal_mhz(dev);
165	/* If that fails, try to measure the current rate */
166	if (sc->max_mhz <= 0) {
167		pc = cpu_get_pcpu(dev);
168		if (cpu_est_clockrate(pc->pc_cpuid, &rate) == 0)
169			sc->max_mhz = rate / 1000000;
170		else
171			sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
172	}
173
174	/*
175	 * Only initialize one set of sysctls for all CPUs.  In the future,
176	 * if multiple CPUs can have different settings, we can move these
177	 * sysctls to be under every CPU instead of just the first one.
178	 */
179	numdevs = devclass_get_count(cpufreq_dc);
180	if (numdevs > 1)
181		return (0);
182
183	CF_DEBUG("initializing one-time data for %s\n",
184	    device_get_nameunit(dev));
185	sc->levels_buf = malloc(CF_MAX_LEVELS * sizeof(*sc->levels_buf),
186	    M_DEVBUF, M_WAITOK);
187	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
188	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
189	    OID_AUTO, "freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
190	    cpufreq_curr_sysctl, "I", "Current CPU frequency");
191	SYSCTL_ADD_PROC(&sc->sysctl_ctx,
192	    SYSCTL_CHILDREN(device_get_sysctl_tree(parent)),
193	    OID_AUTO, "freq_levels", CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
194	    cpufreq_levels_sysctl, "A", "CPU frequency levels");
195
196	/*
197	 * Queue a one-shot broadcast that levels have changed.
198	 * It will run once the system has completed booting.
199	 */
200	TASK_INIT(&sc->startup_task, 0, cpufreq_startup_task, dev);
201	taskqueue_enqueue(taskqueue_thread, &sc->startup_task);
202
203	return (0);
204}
205
206/* Handle any work to be done for all drivers that attached during boot. */
207static void
208cpufreq_startup_task(void *ctx, int pending)
209{
210
211	cpufreq_settings_changed((device_t)ctx);
212}
213
214static int
215cpufreq_detach(device_t dev)
216{
217	struct cpufreq_softc *sc;
218	struct cf_saved_freq *saved_freq;
219	int numdevs;
220
221	CF_DEBUG("shutdown %s\n", device_get_nameunit(dev));
222	sc = device_get_softc(dev);
223	sysctl_ctx_free(&sc->sysctl_ctx);
224
225	while ((saved_freq = SLIST_FIRST(&sc->saved_freq)) != NULL) {
226		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
227		free(saved_freq, M_TEMP);
228	}
229
230	/* Only clean up these resources when the last device is detaching. */
231	numdevs = devclass_get_count(cpufreq_dc);
232	if (numdevs == 1) {
233		CF_DEBUG("final shutdown for %s\n", device_get_nameunit(dev));
234		free(sc->levels_buf, M_DEVBUF);
235	}
236
237	return (0);
238}
239
240static int
241cf_set_method(device_t dev, const struct cf_level *level, int priority)
242{
243	struct cpufreq_softc *sc;
244	const struct cf_setting *set;
245	struct cf_saved_freq *saved_freq, *curr_freq;
246	struct pcpu *pc;
247	int error, i;
248
249	sc = device_get_softc(dev);
250	error = 0;
251	set = NULL;
252	saved_freq = NULL;
253
254	/* We are going to change levels so notify the pre-change handler. */
255	EVENTHANDLER_INVOKE(cpufreq_pre_change, level, &error);
256	if (error != 0) {
257		EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
258		return (error);
259	}
260
261	CF_MTX_LOCK(&sc->lock);
262
263#ifdef SMP
264	/*
265	 * If still booting and secondary CPUs not started yet, don't allow
266	 * changing the frequency until they're online.  This is because we
267	 * can't switch to them using sched_bind() and thus we'd only be
268	 * switching the main CPU.  XXXTODO: Need to think more about how to
269	 * handle having different CPUs at different frequencies.
270	 */
271	if (mp_ncpus > 1 && !smp_started) {
272		device_printf(dev, "rejecting change, SMP not started yet\n");
273		error = ENXIO;
274		goto out;
275	}
276#endif /* SMP */
277
278	/*
279	 * If the requested level has a lower priority, don't allow
280	 * the new level right now.
281	 */
282	if (priority < sc->curr_priority) {
283		CF_DEBUG("ignoring, curr prio %d less than %d\n", priority,
284		    sc->curr_priority);
285		error = EPERM;
286		goto out;
287	}
288
289	/*
290	 * If the caller didn't specify a level and one is saved, prepare to
291	 * restore the saved level.  If none has been saved, return an error.
292	 */
293	if (level == NULL) {
294		saved_freq = SLIST_FIRST(&sc->saved_freq);
295		if (saved_freq == NULL) {
296			CF_DEBUG("NULL level, no saved level\n");
297			error = ENXIO;
298			goto out;
299		}
300		level = &saved_freq->level;
301		priority = saved_freq->priority;
302		CF_DEBUG("restoring saved level, freq %d prio %d\n",
303		    level->total_set.freq, priority);
304	}
305
306	/* Reject levels that are below our specified threshold. */
307	if (level->total_set.freq < cf_lowest_freq) {
308		CF_DEBUG("rejecting freq %d, less than %d limit\n",
309		    level->total_set.freq, cf_lowest_freq);
310		error = EINVAL;
311		goto out;
312	}
313
314	/* If already at this level, just return. */
315	if (sc->curr_level.total_set.freq == level->total_set.freq) {
316		CF_DEBUG("skipping freq %d, same as current level %d\n",
317		    level->total_set.freq, sc->curr_level.total_set.freq);
318		goto skip;
319	}
320
321	/* First, set the absolute frequency via its driver. */
322	set = &level->abs_set;
323	if (set->dev) {
324		if (!device_is_attached(set->dev)) {
325			error = ENXIO;
326			goto out;
327		}
328
329		/* Bind to the target CPU before switching. */
330		pc = cpu_get_pcpu(set->dev);
331		thread_lock(curthread);
332		sched_bind(curthread, pc->pc_cpuid);
333		thread_unlock(curthread);
334		CF_DEBUG("setting abs freq %d on %s (cpu %d)\n", set->freq,
335		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
336		error = CPUFREQ_DRV_SET(set->dev, set);
337		thread_lock(curthread);
338		sched_unbind(curthread);
339		thread_unlock(curthread);
340		if (error) {
341			goto out;
342		}
343	}
344
345	/* Next, set any/all relative frequencies via their drivers. */
346	for (i = 0; i < level->rel_count; i++) {
347		set = &level->rel_set[i];
348		if (!device_is_attached(set->dev)) {
349			error = ENXIO;
350			goto out;
351		}
352
353		/* Bind to the target CPU before switching. */
354		pc = cpu_get_pcpu(set->dev);
355		thread_lock(curthread);
356		sched_bind(curthread, pc->pc_cpuid);
357		thread_unlock(curthread);
358		CF_DEBUG("setting rel freq %d on %s (cpu %d)\n", set->freq,
359		    device_get_nameunit(set->dev), PCPU_GET(cpuid));
360		error = CPUFREQ_DRV_SET(set->dev, set);
361		thread_lock(curthread);
362		sched_unbind(curthread);
363		thread_unlock(curthread);
364		if (error) {
365			/* XXX Back out any successful setting? */
366			goto out;
367		}
368	}
369
370skip:
371	/*
372	 * Before recording the current level, check if we're going to a
373	 * higher priority.  If so, save the previous level and priority.
374	 */
375	if (sc->curr_level.total_set.freq != CPUFREQ_VAL_UNKNOWN &&
376	    priority > sc->curr_priority) {
377		CF_DEBUG("saving level, freq %d prio %d\n",
378		    sc->curr_level.total_set.freq, sc->curr_priority);
379		curr_freq = malloc(sizeof(*curr_freq), M_TEMP, M_NOWAIT);
380		if (curr_freq == NULL) {
381			error = ENOMEM;
382			goto out;
383		}
384		curr_freq->level = sc->curr_level;
385		curr_freq->priority = sc->curr_priority;
386		SLIST_INSERT_HEAD(&sc->saved_freq, curr_freq, link);
387	}
388	sc->curr_level = *level;
389	sc->curr_priority = priority;
390
391	/* If we were restoring a saved state, reset it to "unused". */
392	if (saved_freq != NULL) {
393		CF_DEBUG("resetting saved level\n");
394		sc->curr_level.total_set.freq = CPUFREQ_VAL_UNKNOWN;
395		SLIST_REMOVE_HEAD(&sc->saved_freq, link);
396		free(saved_freq, M_TEMP);
397	}
398
399out:
400	CF_MTX_UNLOCK(&sc->lock);
401
402	/*
403	 * We changed levels (or attempted to) so notify the post-change
404	 * handler of new frequency or error.
405	 */
406	EVENTHANDLER_INVOKE(cpufreq_post_change, level, error);
407	if (error && set)
408		device_printf(set->dev, "set freq failed, err %d\n", error);
409
410	return (error);
411}
412
413static int
414cf_get_method(device_t dev, struct cf_level *level)
415{
416	struct cpufreq_softc *sc;
417	struct cf_level *levels;
418	struct cf_setting *curr_set, set;
419	struct pcpu *pc;
420	device_t *devs;
421	int bdiff, count, diff, error, i, n, numdevs;
422	uint64_t rate;
423
424	sc = device_get_softc(dev);
425	error = 0;
426	levels = NULL;
427
428	/* If we already know the current frequency, we're done. */
429	CF_MTX_LOCK(&sc->lock);
430	curr_set = &sc->curr_level.total_set;
431	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
432		CF_DEBUG("get returning known freq %d\n", curr_set->freq);
433		goto out;
434	}
435	CF_MTX_UNLOCK(&sc->lock);
436
437	/*
438	 * We need to figure out the current level.  Loop through every
439	 * driver, getting the current setting.  Then, attempt to get a best
440	 * match of settings against each level.
441	 */
442	count = CF_MAX_LEVELS;
443	levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT);
444	if (levels == NULL)
445		return (ENOMEM);
446	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
447	if (error) {
448		if (error == E2BIG)
449			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
450		free(levels, M_TEMP);
451		return (error);
452	}
453	error = device_get_children(device_get_parent(dev), &devs, &numdevs);
454	if (error) {
455		free(levels, M_TEMP);
456		return (error);
457	}
458
459	/*
460	 * Reacquire the lock and search for the given level.
461	 *
462	 * XXX Note: this is not quite right since we really need to go
463	 * through each level and compare both absolute and relative
464	 * settings for each driver in the system before making a match.
465	 * The estimation code below catches this case though.
466	 */
467	CF_MTX_LOCK(&sc->lock);
468	for (n = 0; n < numdevs && curr_set->freq == CPUFREQ_VAL_UNKNOWN; n++) {
469		if (!device_is_attached(devs[n]))
470			continue;
471		if (CPUFREQ_DRV_GET(devs[n], &set) != 0)
472			continue;
473		for (i = 0; i < count; i++) {
474			if (set.freq == levels[i].total_set.freq) {
475				sc->curr_level = levels[i];
476				break;
477			}
478		}
479	}
480	free(devs, M_TEMP);
481	if (curr_set->freq != CPUFREQ_VAL_UNKNOWN) {
482		CF_DEBUG("get matched freq %d from drivers\n", curr_set->freq);
483		goto out;
484	}
485
486	/*
487	 * We couldn't find an exact match, so attempt to estimate and then
488	 * match against a level.
489	 */
490	pc = cpu_get_pcpu(dev);
491	if (pc == NULL) {
492		error = ENXIO;
493		goto out;
494	}
495	cpu_est_clockrate(pc->pc_cpuid, &rate);
496	rate /= 1000000;
497	bdiff = 1 << 30;
498	for (i = 0; i < count; i++) {
499		diff = abs(levels[i].total_set.freq - rate);
500		if (diff < bdiff) {
501			bdiff = diff;
502			sc->curr_level = levels[i];
503		}
504	}
505	CF_DEBUG("get estimated freq %d\n", curr_set->freq);
506
507out:
508	if (error == 0)
509		*level = sc->curr_level;
510
511	CF_MTX_UNLOCK(&sc->lock);
512	if (levels)
513		free(levels, M_TEMP);
514	return (error);
515}
516
517static int
518cf_levels_method(device_t dev, struct cf_level *levels, int *count)
519{
520	struct cf_setting_array *set_arr;
521	struct cf_setting_lst rel_sets;
522	struct cpufreq_softc *sc;
523	struct cf_level *lev;
524	struct cf_setting *sets;
525	struct pcpu *pc;
526	device_t *devs;
527	int error, i, numdevs, set_count, type;
528	uint64_t rate;
529
530	if (levels == NULL || count == NULL)
531		return (EINVAL);
532
533	TAILQ_INIT(&rel_sets);
534	sc = device_get_softc(dev);
535	error = device_get_children(device_get_parent(dev), &devs, &numdevs);
536	if (error)
537		return (error);
538	sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
539	if (sets == NULL) {
540		free(devs, M_TEMP);
541		return (ENOMEM);
542	}
543
544	/* Get settings from all cpufreq drivers. */
545	CF_MTX_LOCK(&sc->lock);
546	for (i = 0; i < numdevs; i++) {
547		/* Skip devices that aren't ready. */
548		if (!device_is_attached(devs[i]))
549			continue;
550
551		/*
552		 * Get settings, skipping drivers that offer no settings or
553		 * provide settings for informational purposes only.
554		 */
555		error = CPUFREQ_DRV_TYPE(devs[i], &type);
556		if (error || (type & CPUFREQ_FLAG_INFO_ONLY)) {
557			if (error == 0) {
558				CF_DEBUG("skipping info-only driver %s\n",
559				    device_get_nameunit(devs[i]));
560			}
561			continue;
562		}
563		set_count = MAX_SETTINGS;
564		error = CPUFREQ_DRV_SETTINGS(devs[i], sets, &set_count);
565		if (error || set_count == 0)
566			continue;
567
568		/* Add the settings to our absolute/relative lists. */
569		switch (type & CPUFREQ_TYPE_MASK) {
570		case CPUFREQ_TYPE_ABSOLUTE:
571			error = cpufreq_insert_abs(sc, sets, set_count);
572			break;
573		case CPUFREQ_TYPE_RELATIVE:
574			CF_DEBUG("adding %d relative settings\n", set_count);
575			set_arr = malloc(sizeof(*set_arr), M_TEMP, M_NOWAIT);
576			if (set_arr == NULL) {
577				error = ENOMEM;
578				goto out;
579			}
580			bcopy(sets, set_arr->sets, set_count * sizeof(*sets));
581			set_arr->count = set_count;
582			TAILQ_INSERT_TAIL(&rel_sets, set_arr, link);
583			break;
584		default:
585			error = EINVAL;
586		}
587		if (error)
588			goto out;
589	}
590
591	/*
592	 * If there are no absolute levels, create a fake one at 100%.  We
593	 * then cache the clockrate for later use as our base frequency.
594	 */
595	if (TAILQ_EMPTY(&sc->all_levels)) {
596		if (sc->max_mhz == CPUFREQ_VAL_UNKNOWN) {
597			sc->max_mhz = cpu_get_nominal_mhz(dev);
598			/*
599			 * If the CPU can't report a rate for 100%, hope
600			 * the CPU is running at its nominal rate right now,
601			 * and use that instead.
602			 */
603			if (sc->max_mhz <= 0) {
604				pc = cpu_get_pcpu(dev);
605				cpu_est_clockrate(pc->pc_cpuid, &rate);
606				sc->max_mhz = rate / 1000000;
607			}
608		}
609		memset(&sets[0], CPUFREQ_VAL_UNKNOWN, sizeof(*sets));
610		sets[0].freq = sc->max_mhz;
611		sets[0].dev = NULL;
612		error = cpufreq_insert_abs(sc, sets, 1);
613		if (error)
614			goto out;
615	}
616
617	/* Create a combined list of absolute + relative levels. */
618	TAILQ_FOREACH(set_arr, &rel_sets, link)
619		cpufreq_expand_set(sc, set_arr);
620
621	/* If the caller doesn't have enough space, return the actual count. */
622	if (sc->all_count > *count) {
623		*count = sc->all_count;
624		error = E2BIG;
625		goto out;
626	}
627
628	/* Finally, output the list of levels. */
629	i = 0;
630	TAILQ_FOREACH(lev, &sc->all_levels, link) {
631
632		/* Skip levels that have a frequency that is too low. */
633		if (lev->total_set.freq < cf_lowest_freq) {
634			sc->all_count--;
635			continue;
636		}
637
638		levels[i] = *lev;
639		i++;
640	}
641	*count = sc->all_count;
642	error = 0;
643
644out:
645	/* Clear all levels since we regenerate them each time. */
646	while ((lev = TAILQ_FIRST(&sc->all_levels)) != NULL) {
647		TAILQ_REMOVE(&sc->all_levels, lev, link);
648		free(lev, M_TEMP);
649	}
650	sc->all_count = 0;
651
652	CF_MTX_UNLOCK(&sc->lock);
653	while ((set_arr = TAILQ_FIRST(&rel_sets)) != NULL) {
654		TAILQ_REMOVE(&rel_sets, set_arr, link);
655		free(set_arr, M_TEMP);
656	}
657	free(devs, M_TEMP);
658	free(sets, M_TEMP);
659	return (error);
660}
661
662/*
663 * Create levels for an array of absolute settings and insert them in
664 * sorted order in the specified list.
665 */
666static int
667cpufreq_insert_abs(struct cpufreq_softc *sc, struct cf_setting *sets,
668    int count)
669{
670	struct cf_level_lst *list;
671	struct cf_level *level, *search;
672	int i;
673
674	CF_MTX_ASSERT(&sc->lock);
675
676	list = &sc->all_levels;
677	for (i = 0; i < count; i++) {
678		level = malloc(sizeof(*level), M_TEMP, M_NOWAIT | M_ZERO);
679		if (level == NULL)
680			return (ENOMEM);
681		level->abs_set = sets[i];
682		level->total_set = sets[i];
683		level->total_set.dev = NULL;
684		sc->all_count++;
685
686		if (TAILQ_EMPTY(list)) {
687			CF_DEBUG("adding abs setting %d at head\n",
688			    sets[i].freq);
689			TAILQ_INSERT_HEAD(list, level, link);
690			continue;
691		}
692
693		TAILQ_FOREACH_REVERSE(search, list, cf_level_lst, link) {
694			if (sets[i].freq <= search->total_set.freq) {
695				CF_DEBUG("adding abs setting %d after %d\n",
696				    sets[i].freq, search->total_set.freq);
697				TAILQ_INSERT_AFTER(list, search, level, link);
698				break;
699			}
700		}
701	}
702	return (0);
703}
704
705/*
706 * Expand a group of relative settings, creating derived levels from them.
707 */
708static int
709cpufreq_expand_set(struct cpufreq_softc *sc, struct cf_setting_array *set_arr)
710{
711	struct cf_level *fill, *search;
712	struct cf_setting *set;
713	int i;
714
715	CF_MTX_ASSERT(&sc->lock);
716
717	/*
718	 * Walk the set of all existing levels in reverse.  This is so we
719	 * create derived states from the lowest absolute settings first
720	 * and discard duplicates created from higher absolute settings.
721	 * For instance, a level of 50 Mhz derived from 100 Mhz + 50% is
722	 * preferable to 200 Mhz + 25% because absolute settings are more
723	 * efficient since they often change the voltage as well.
724	 */
725	TAILQ_FOREACH_REVERSE(search, &sc->all_levels, cf_level_lst, link) {
726		/* Add each setting to the level, duplicating if necessary. */
727		for (i = 0; i < set_arr->count; i++) {
728			set = &set_arr->sets[i];
729
730			/*
731			 * If this setting is less than 100%, split the level
732			 * into two and add this setting to the new level.
733			 */
734			fill = search;
735			if (set->freq < 10000) {
736				fill = cpufreq_dup_set(sc, search, set);
737
738				/*
739				 * The new level was a duplicate of an existing
740				 * level or its absolute setting is too high
741				 * so we freed it.  For example, we discard a
742				 * derived level of 1000 MHz/25% if a level
743				 * of 500 MHz/100% already exists.
744				 */
745				if (fill == NULL)
746					break;
747			}
748
749			/* Add this setting to the existing or new level. */
750			KASSERT(fill->rel_count < MAX_SETTINGS,
751			    ("cpufreq: too many relative drivers (%d)",
752			    MAX_SETTINGS));
753			fill->rel_set[fill->rel_count] = *set;
754			fill->rel_count++;
755			CF_DEBUG(
756			"expand set added rel setting %d%% to %d level\n",
757			    set->freq / 100, fill->total_set.freq);
758		}
759	}
760
761	return (0);
762}
763
764static struct cf_level *
765cpufreq_dup_set(struct cpufreq_softc *sc, struct cf_level *dup,
766    struct cf_setting *set)
767{
768	struct cf_level_lst *list;
769	struct cf_level *fill, *itr;
770	struct cf_setting *fill_set, *itr_set;
771	int i;
772
773	CF_MTX_ASSERT(&sc->lock);
774
775	/*
776	 * Create a new level, copy it from the old one, and update the
777	 * total frequency and power by the percentage specified in the
778	 * relative setting.
779	 */
780	fill = malloc(sizeof(*fill), M_TEMP, M_NOWAIT);
781	if (fill == NULL)
782		return (NULL);
783	*fill = *dup;
784	fill_set = &fill->total_set;
785	fill_set->freq =
786	    ((uint64_t)fill_set->freq * set->freq) / 10000;
787	if (fill_set->power != CPUFREQ_VAL_UNKNOWN) {
788		fill_set->power = ((uint64_t)fill_set->power * set->freq)
789		    / 10000;
790	}
791	if (set->lat != CPUFREQ_VAL_UNKNOWN) {
792		if (fill_set->lat != CPUFREQ_VAL_UNKNOWN)
793			fill_set->lat += set->lat;
794		else
795			fill_set->lat = set->lat;
796	}
797	CF_DEBUG("dup set considering derived setting %d\n", fill_set->freq);
798
799	/*
800	 * If we copied an old level that we already modified (say, at 100%),
801	 * we need to remove that setting before adding this one.  Since we
802	 * process each setting array in order, we know any settings for this
803	 * driver will be found at the end.
804	 */
805	for (i = fill->rel_count; i != 0; i--) {
806		if (fill->rel_set[i - 1].dev != set->dev)
807			break;
808		CF_DEBUG("removed last relative driver: %s\n",
809		    device_get_nameunit(set->dev));
810		fill->rel_count--;
811	}
812
813	/*
814	 * Insert the new level in sorted order.  If it is a duplicate of an
815	 * existing level (1) or has an absolute setting higher than the
816	 * existing level (2), do not add it.  We can do this since any such
817	 * level is guaranteed use less power.  For example (1), a level with
818	 * one absolute setting of 800 Mhz uses less power than one composed
819	 * of an absolute setting of 1600 Mhz and a relative setting at 50%.
820	 * Also for example (2), a level of 800 Mhz/75% is preferable to
821	 * 1600 Mhz/25% even though the latter has a lower total frequency.
822	 */
823	list = &sc->all_levels;
824	KASSERT(!TAILQ_EMPTY(list), ("all levels list empty in dup set"));
825	TAILQ_FOREACH_REVERSE(itr, list, cf_level_lst, link) {
826		itr_set = &itr->total_set;
827		if (CPUFREQ_CMP(fill_set->freq, itr_set->freq)) {
828			CF_DEBUG("dup set rejecting %d (dupe)\n",
829			    fill_set->freq);
830			itr = NULL;
831			break;
832		} else if (fill_set->freq < itr_set->freq) {
833			if (fill->abs_set.freq <= itr->abs_set.freq) {
834				CF_DEBUG(
835			"dup done, inserting new level %d after %d\n",
836				    fill_set->freq, itr_set->freq);
837				TAILQ_INSERT_AFTER(list, itr, fill, link);
838				sc->all_count++;
839			} else {
840				CF_DEBUG("dup set rejecting %d (abs too big)\n",
841				    fill_set->freq);
842				itr = NULL;
843			}
844			break;
845		}
846	}
847
848	/* We didn't find a good place for this new level so free it. */
849	if (itr == NULL) {
850		CF_DEBUG("dup set freeing new level %d (not optimal)\n",
851		    fill_set->freq);
852		free(fill, M_TEMP);
853		fill = NULL;
854	}
855
856	return (fill);
857}
858
859static int
860cpufreq_curr_sysctl(SYSCTL_HANDLER_ARGS)
861{
862	struct cpufreq_softc *sc;
863	struct cf_level *levels;
864	int best, count, diff, bdiff, devcount, error, freq, i, n;
865	device_t *devs;
866
867	devs = NULL;
868	sc = oidp->oid_arg1;
869	levels = sc->levels_buf;
870
871	error = CPUFREQ_GET(sc->dev, &levels[0]);
872	if (error)
873		goto out;
874	freq = levels[0].total_set.freq;
875	error = sysctl_handle_int(oidp, &freq, 0, req);
876	if (error != 0 || req->newptr == NULL)
877		goto out;
878
879	/*
880	 * While we only call cpufreq_get() on one device (assuming all
881	 * CPUs have equal levels), we call cpufreq_set() on all CPUs.
882	 * This is needed for some MP systems.
883	 */
884	error = devclass_get_devices(cpufreq_dc, &devs, &devcount);
885	if (error)
886		goto out;
887	for (n = 0; n < devcount; n++) {
888		count = CF_MAX_LEVELS;
889		error = CPUFREQ_LEVELS(devs[n], levels, &count);
890		if (error) {
891			if (error == E2BIG)
892				printf(
893			"cpufreq: need to increase CF_MAX_LEVELS\n");
894			break;
895		}
896		best = 0;
897		bdiff = 1 << 30;
898		for (i = 0; i < count; i++) {
899			diff = abs(levels[i].total_set.freq - freq);
900			if (diff < bdiff) {
901				bdiff = diff;
902				best = i;
903			}
904		}
905		error = CPUFREQ_SET(devs[n], &levels[best], CPUFREQ_PRIO_USER);
906	}
907
908out:
909	if (devs)
910		free(devs, M_TEMP);
911	return (error);
912}
913
914static int
915cpufreq_levels_sysctl(SYSCTL_HANDLER_ARGS)
916{
917	struct cpufreq_softc *sc;
918	struct cf_level *levels;
919	struct cf_setting *set;
920	struct sbuf sb;
921	int count, error, i;
922
923	sc = oidp->oid_arg1;
924	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
925
926	/* Get settings from the device and generate the output string. */
927	count = CF_MAX_LEVELS;
928	levels = sc->levels_buf;
929	if (levels == NULL) {
930		sbuf_delete(&sb);
931		return (ENOMEM);
932	}
933	error = CPUFREQ_LEVELS(sc->dev, levels, &count);
934	if (error) {
935		if (error == E2BIG)
936			printf("cpufreq: need to increase CF_MAX_LEVELS\n");
937		goto out;
938	}
939	if (count) {
940		for (i = 0; i < count; i++) {
941			set = &levels[i].total_set;
942			sbuf_printf(&sb, "%d/%d ", set->freq, set->power);
943		}
944	} else
945		sbuf_cpy(&sb, "0");
946	sbuf_trim(&sb);
947	sbuf_finish(&sb);
948	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
949
950out:
951	sbuf_delete(&sb);
952	return (error);
953}
954
955static int
956cpufreq_settings_sysctl(SYSCTL_HANDLER_ARGS)
957{
958	device_t dev;
959	struct cf_setting *sets;
960	struct sbuf sb;
961	int error, i, set_count;
962
963	dev = oidp->oid_arg1;
964	sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND);
965
966	/* Get settings from the device and generate the output string. */
967	set_count = MAX_SETTINGS;
968	sets = malloc(set_count * sizeof(*sets), M_TEMP, M_NOWAIT);
969	if (sets == NULL) {
970		sbuf_delete(&sb);
971		return (ENOMEM);
972	}
973	error = CPUFREQ_DRV_SETTINGS(dev, sets, &set_count);
974	if (error)
975		goto out;
976	if (set_count) {
977		for (i = 0; i < set_count; i++)
978			sbuf_printf(&sb, "%d/%d ", sets[i].freq, sets[i].power);
979	} else
980		sbuf_cpy(&sb, "0");
981	sbuf_trim(&sb);
982	sbuf_finish(&sb);
983	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
984
985out:
986	free(sets, M_TEMP);
987	sbuf_delete(&sb);
988	return (error);
989}
990
991int
992cpufreq_register(device_t dev)
993{
994	struct cpufreq_softc *sc;
995	device_t cf_dev, cpu_dev;
996
997	/* Add a sysctl to get each driver's settings separately. */
998	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
999	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1000	    OID_AUTO, "freq_settings", CTLTYPE_STRING | CTLFLAG_RD, dev, 0,
1001	    cpufreq_settings_sysctl, "A", "CPU frequency driver settings");
1002
1003	/*
1004	 * Add only one cpufreq device to each CPU.  Currently, all CPUs
1005	 * must offer the same levels and be switched at the same time.
1006	 */
1007	cpu_dev = device_get_parent(dev);
1008	if ((cf_dev = device_find_child(cpu_dev, "cpufreq", -1))) {
1009		sc = device_get_softc(cf_dev);
1010		sc->max_mhz = CPUFREQ_VAL_UNKNOWN;
1011		return (0);
1012	}
1013
1014	/* Add the child device and possibly sysctls. */
1015	cf_dev = BUS_ADD_CHILD(cpu_dev, 0, "cpufreq", -1);
1016	if (cf_dev == NULL)
1017		return (ENOMEM);
1018	device_quiet(cf_dev);
1019
1020	return (device_probe_and_attach(cf_dev));
1021}
1022
1023int
1024cpufreq_unregister(device_t dev)
1025{
1026	device_t cf_dev, *devs;
1027	int cfcount, devcount, error, i, type;
1028
1029	/*
1030	 * If this is the last cpufreq child device, remove the control
1031	 * device as well.  We identify cpufreq children by calling a method
1032	 * they support.
1033	 */
1034	error = device_get_children(device_get_parent(dev), &devs, &devcount);
1035	if (error)
1036		return (error);
1037	cf_dev = device_find_child(device_get_parent(dev), "cpufreq", -1);
1038	if (cf_dev == NULL) {
1039		device_printf(dev,
1040	"warning: cpufreq_unregister called with no cpufreq device active\n");
1041		free(devs, M_TEMP);
1042		return (0);
1043	}
1044	cfcount = 0;
1045	for (i = 0; i < devcount; i++) {
1046		if (!device_is_attached(devs[i]))
1047			continue;
1048		if (CPUFREQ_DRV_TYPE(devs[i], &type) == 0)
1049			cfcount++;
1050	}
1051	if (cfcount <= 1)
1052		device_delete_child(device_get_parent(cf_dev), cf_dev);
1053	free(devs, M_TEMP);
1054
1055	return (0);
1056}
1057
1058int
1059cpufreq_settings_changed(device_t dev)
1060{
1061
1062	EVENTHANDLER_INVOKE(cpufreq_levels_changed,
1063	    device_get_unit(device_get_parent(dev)));
1064	return (0);
1065}
1066