summaryrefslogtreecommitdiffstats
path: root/sys/x86/cpufreq
diff options
context:
space:
mode:
authorattilio <attilio@FreeBSD.org>2010-02-25 14:13:39 +0000
committerattilio <attilio@FreeBSD.org>2010-02-25 14:13:39 +0000
commit1b75a98556931f83129a222260dbc32a104b8bc3 (patch)
treed968b8185d78ca4c30d4e35c028883a9027fcb5f /sys/x86/cpufreq
parent2705e272e16f3352f585937363f0557635e5149f (diff)
downloadFreeBSD-src-1b75a98556931f83129a222260dbc32a104b8bc3.zip
FreeBSD-src-1b75a98556931f83129a222260dbc32a104b8bc3.tar.gz
Introduce the new kernel sub-tree x86 which should contain all the code
shared and generalized between our current amd64, i386 and pc98. This is just an initial step that should lead to a more complete effort. For the moment, a very simple porting of cpufreq modules, BIOS calls and the whole MD specific ISA bus part is added to the sub-tree but ideally a lot of code might be added and more shared support should grow. Sponsored by: Sandvine Incorporated Reviewed by: emaste, kib, jhb, imp Discussed on: arch MFC: 3 weeks
Diffstat (limited to 'sys/x86/cpufreq')
-rw-r--r--sys/x86/cpufreq/est.c1401
-rw-r--r--sys/x86/cpufreq/hwpstate.c507
-rw-r--r--sys/x86/cpufreq/p4tcc.c327
-rw-r--r--sys/x86/cpufreq/powernow.c970
-rw-r--r--sys/x86/cpufreq/smist.c514
5 files changed, 3719 insertions, 0 deletions
diff --git a/sys/x86/cpufreq/est.c b/sys/x86/cpufreq/est.c
new file mode 100644
index 0000000..6a7b514
--- /dev/null
+++ b/sys/x86/cpufreq/est.c
@@ -0,0 +1,1401 @@
+/*-
+ * Copyright (c) 2004 Colin Percival
+ * Copyright (c) 2005 Nate Lawson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted providing that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/smp.h>
+#include <sys/systm.h>
+
+#include "cpufreq_if.h"
+#include <machine/clock.h>
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+#include "acpi_if.h"
+
+/* Status/control registers (from the IA-32 System Programming Guide). */
+#define MSR_PERF_STATUS 0x198
+#define MSR_PERF_CTL 0x199
+
+/* Register and bit for enabling SpeedStep. */
+#define MSR_MISC_ENABLE 0x1a0
+#define MSR_SS_ENABLE (1<<16)
+
+/* Frequency and MSR control values. */
+typedef struct {
+ uint16_t freq;
+ uint16_t volts;
+ uint16_t id16;
+ int power;
+} freq_info;
+
+/* Identifying characteristics of a processor and supported frequencies. */
+typedef struct {
+ const u_int vendor_id;
+ uint32_t id32;
+ freq_info *freqtab;
+} cpu_info;
+
+struct est_softc {
+ device_t dev;
+ int acpi_settings;
+ int msr_settings;
+ freq_info *freq_list;
+};
+
+/* Convert MHz and mV into IDs for passing to the MSR. */
+#define ID16(MHz, mV, bus_clk) \
+ (((MHz / bus_clk) << 8) | ((mV ? mV - 700 : 0) >> 4))
+#define ID32(MHz_hi, mV_hi, MHz_lo, mV_lo, bus_clk) \
+ ((ID16(MHz_lo, mV_lo, bus_clk) << 16) | (ID16(MHz_hi, mV_hi, bus_clk)))
+
+/* Format for storing IDs in our table. */
+#define FREQ_INFO_PWR(MHz, mV, bus_clk, mW) \
+ { MHz, mV, ID16(MHz, mV, bus_clk), mW }
+#define FREQ_INFO(MHz, mV, bus_clk) \
+ FREQ_INFO_PWR(MHz, mV, bus_clk, CPUFREQ_VAL_UNKNOWN)
+#define INTEL(tab, zhi, vhi, zlo, vlo, bus_clk) \
+ { CPU_VENDOR_INTEL, ID32(zhi, vhi, zlo, vlo, bus_clk), tab }
+#define CENTAUR(tab, zhi, vhi, zlo, vlo, bus_clk) \
+ { CPU_VENDOR_CENTAUR, ID32(zhi, vhi, zlo, vlo, bus_clk), tab }
+
+static int msr_info_enabled = 0;
+TUNABLE_INT("hw.est.msr_info", &msr_info_enabled);
+static int strict = -1;
+TUNABLE_INT("hw.est.strict", &strict);
+
+/* Default bus clock value for Centrino processors. */
+#define INTEL_BUS_CLK 100
+
+/* XXX Update this if new CPUs have more settings. */
+#define EST_MAX_SETTINGS 10
+CTASSERT(EST_MAX_SETTINGS <= MAX_SETTINGS);
+
+/* Estimate in microseconds of latency for performing a transition. */
+#define EST_TRANS_LAT 1000
+
+/*
+ * Frequency (MHz) and voltage (mV) settings. Data from the
+ * Intel Pentium M Processor Datasheet (Order Number 252612), Table 5.
+ *
+ * Dothan processors have multiple VID#s with different settings for
+ * each VID#. Since we can't uniquely identify this info
+ * without undisclosed methods from Intel, we can't support newer
+ * processors with this table method. If ACPI Px states are supported,
+ * we get info from them.
+ */
+static freq_info PM17_130[] = {
+ /* 130nm 1.70GHz Pentium M */
+ FREQ_INFO(1700, 1484, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1308, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1004, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM16_130[] = {
+ /* 130nm 1.60GHz Pentium M */
+ FREQ_INFO(1600, 1484, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1420, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1164, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM15_130[] = {
+ /* 130nm 1.50GHz Pentium M */
+ FREQ_INFO(1500, 1484, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1452, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1356, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1228, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM14_130[] = {
+ /* 130nm 1.40GHz Pentium M */
+ FREQ_INFO(1400, 1484, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1436, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1308, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1180, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM13_130[] = {
+ /* 130nm 1.30GHz Pentium M */
+ FREQ_INFO(1300, 1388, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1356, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1292, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1260, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM13_LV_130[] = {
+ /* 130nm 1.30GHz Low Voltage Pentium M */
+ FREQ_INFO(1300, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 1100, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1020, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 1004, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM12_LV_130[] = {
+ /* 130 nm 1.20GHz Low Voltage Pentium M */
+ FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 1164, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 1020, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1004, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM11_LV_130[] = {
+ /* 130 nm 1.10GHz Low Voltage Pentium M */
+ FREQ_INFO(1100, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1164, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 1100, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1020, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM11_ULV_130[] = {
+ /* 130 nm 1.10GHz Ultra Low Voltage Pentium M */
+ FREQ_INFO(1100, 1004, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 972, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 956, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM10_ULV_130[] = {
+ /* 130 nm 1.00GHz Ultra Low Voltage Pentium M */
+ FREQ_INFO(1000, 1004, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 972, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+
+/*
+ * Data from "Intel Pentium M Processor on 90nm Process with
+ * 2-MB L2 Cache Datasheet", Order Number 302189, Table 5.
+ */
+static freq_info PM_765A_90[] = {
+ /* 90 nm 2.10GHz Pentium M, VID #A */
+ FREQ_INFO(2100, 1340, INTEL_BUS_CLK),
+ FREQ_INFO(1800, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_765B_90[] = {
+ /* 90 nm 2.10GHz Pentium M, VID #B */
+ FREQ_INFO(2100, 1324, INTEL_BUS_CLK),
+ FREQ_INFO(1800, 1260, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1212, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_765C_90[] = {
+ /* 90 nm 2.10GHz Pentium M, VID #C */
+ FREQ_INFO(2100, 1308, INTEL_BUS_CLK),
+ FREQ_INFO(1800, 1244, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1212, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1164, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1116, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_765E_90[] = {
+ /* 90 nm 2.10GHz Pentium M, VID #E */
+ FREQ_INFO(2100, 1356, INTEL_BUS_CLK),
+ FREQ_INFO(1800, 1292, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1244, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1196, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1148, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_755A_90[] = {
+ /* 90 nm 2.00GHz Pentium M, VID #A */
+ FREQ_INFO(2000, 1340, INTEL_BUS_CLK),
+ FREQ_INFO(1800, 1292, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1244, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1196, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1148, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_755B_90[] = {
+ /* 90 nm 2.00GHz Pentium M, VID #B */
+ FREQ_INFO(2000, 1324, INTEL_BUS_CLK),
+ FREQ_INFO(1800, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_755C_90[] = {
+ /* 90 nm 2.00GHz Pentium M, VID #C */
+ FREQ_INFO(2000, 1308, INTEL_BUS_CLK),
+ FREQ_INFO(1800, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_755D_90[] = {
+ /* 90 nm 2.00GHz Pentium M, VID #D */
+ FREQ_INFO(2000, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1800, 1244, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1196, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1164, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1116, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_745A_90[] = {
+ /* 90 nm 1.80GHz Pentium M, VID #A */
+ FREQ_INFO(1800, 1340, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1292, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_745B_90[] = {
+ /* 90 nm 1.80GHz Pentium M, VID #B */
+ FREQ_INFO(1800, 1324, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1212, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_745C_90[] = {
+ /* 90 nm 1.80GHz Pentium M, VID #C */
+ FREQ_INFO(1800, 1308, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1260, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1212, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1148, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_745D_90[] = {
+ /* 90 nm 1.80GHz Pentium M, VID #D */
+ FREQ_INFO(1800, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1600, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1132, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1084, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_735A_90[] = {
+ /* 90 nm 1.70GHz Pentium M, VID #A */
+ FREQ_INFO(1700, 1340, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1244, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_735B_90[] = {
+ /* 90 nm 1.70GHz Pentium M, VID #B */
+ FREQ_INFO(1700, 1324, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1244, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_735C_90[] = {
+ /* 90 nm 1.70GHz Pentium M, VID #C */
+ FREQ_INFO(1700, 1308, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_735D_90[] = {
+ /* 90 nm 1.70GHz Pentium M, VID #D */
+ FREQ_INFO(1700, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1212, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1148, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1100, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_725A_90[] = {
+ /* 90 nm 1.60GHz Pentium M, VID #A */
+ FREQ_INFO(1600, 1340, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1212, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1132, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_725B_90[] = {
+ /* 90 nm 1.60GHz Pentium M, VID #B */
+ FREQ_INFO(1600, 1324, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1260, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1196, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1132, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_725C_90[] = {
+ /* 90 nm 1.60GHz Pentium M, VID #C */
+ FREQ_INFO(1600, 1308, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1244, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_725D_90[] = {
+ /* 90 nm 1.60GHz Pentium M, VID #D */
+ FREQ_INFO(1600, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1164, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_715A_90[] = {
+ /* 90 nm 1.50GHz Pentium M, VID #A */
+ FREQ_INFO(1500, 1340, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1228, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1148, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_715B_90[] = {
+ /* 90 nm 1.50GHz Pentium M, VID #B */
+ FREQ_INFO(1500, 1324, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1212, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1148, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_715C_90[] = {
+ /* 90 nm 1.50GHz Pentium M, VID #C */
+ FREQ_INFO(1500, 1308, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1212, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1132, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1068, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_715D_90[] = {
+ /* 90 nm 1.50GHz Pentium M, VID #D */
+ FREQ_INFO(1500, 1276, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1180, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1116, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_778_90[] = {
+ /* 90 nm 1.60GHz Low Voltage Pentium M */
+ FREQ_INFO(1600, 1116, INTEL_BUS_CLK),
+ FREQ_INFO(1500, 1116, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1100, INTEL_BUS_CLK),
+ FREQ_INFO(1300, 1084, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1068, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 1052, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1020, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_758_90[] = {
+ /* 90 nm 1.50GHz Low Voltage Pentium M */
+ FREQ_INFO(1500, 1116, INTEL_BUS_CLK),
+ FREQ_INFO(1400, 1116, INTEL_BUS_CLK),
+ FREQ_INFO(1300, 1100, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1084, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 1068, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1020, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_738_90[] = {
+ /* 90 nm 1.40GHz Low Voltage Pentium M */
+ FREQ_INFO(1400, 1116, INTEL_BUS_CLK),
+ FREQ_INFO(1300, 1116, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 1100, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 1068, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 1052, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 1036, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 1020, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 988, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_773G_90[] = {
+ /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #G */
+ FREQ_INFO(1300, 956, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 940, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 924, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 908, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_773H_90[] = {
+ /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #H */
+ FREQ_INFO(1300, 940, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 924, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 908, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 892, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_773I_90[] = {
+ /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #I */
+ FREQ_INFO(1300, 924, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 908, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 892, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_773J_90[] = {
+ /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #J */
+ FREQ_INFO(1300, 908, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 908, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 892, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_773K_90[] = {
+ /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #K */
+ FREQ_INFO(1300, 892, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 892, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 876, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_773L_90[] = {
+ /* 90 nm 1.30GHz Ultra Low Voltage Pentium M, VID #L */
+ FREQ_INFO(1300, 876, INTEL_BUS_CLK),
+ FREQ_INFO(1200, 876, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 860, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_753G_90[] = {
+ /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #G */
+ FREQ_INFO(1200, 956, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 940, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 908, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 892, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_753H_90[] = {
+ /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #H */
+ FREQ_INFO(1200, 940, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 924, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 908, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_753I_90[] = {
+ /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #I */
+ FREQ_INFO(1200, 924, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 908, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 892, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_753J_90[] = {
+ /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #J */
+ FREQ_INFO(1200, 908, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 892, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_753K_90[] = {
+ /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #K */
+ FREQ_INFO(1200, 892, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 892, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_753L_90[] = {
+ /* 90 nm 1.20GHz Ultra Low Voltage Pentium M, VID #L */
+ FREQ_INFO(1200, 876, INTEL_BUS_CLK),
+ FREQ_INFO(1100, 876, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+
+static freq_info PM_733JG_90[] = {
+ /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #G */
+ FREQ_INFO(1100, 956, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 940, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 908, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_733JH_90[] = {
+ /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #H */
+ FREQ_INFO(1100, 940, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 924, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 892, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_733JI_90[] = {
+ /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #I */
+ FREQ_INFO(1100, 924, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 908, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 892, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_733JJ_90[] = {
+ /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #J */
+ FREQ_INFO(1100, 908, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 892, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_733JK_90[] = {
+ /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #K */
+ FREQ_INFO(1100, 892, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_733JL_90[] = {
+ /* 90 nm 1.10GHz Ultra Low Voltage Pentium M, VID #L */
+ FREQ_INFO(1100, 876, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 860, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 844, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+};
+static freq_info PM_733_90[] = {
+ /* 90 nm 1.10GHz Ultra Low Voltage Pentium M */
+ FREQ_INFO(1100, 940, INTEL_BUS_CLK),
+ FREQ_INFO(1000, 924, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 892, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+static freq_info PM_723_90[] = {
+ /* 90 nm 1.00GHz Ultra Low Voltage Pentium M */
+ FREQ_INFO(1000, 940, INTEL_BUS_CLK),
+ FREQ_INFO( 900, 908, INTEL_BUS_CLK),
+ FREQ_INFO( 800, 876, INTEL_BUS_CLK),
+ FREQ_INFO( 600, 812, INTEL_BUS_CLK),
+ FREQ_INFO( 0, 0, 1),
+};
+
+/*
+ * VIA C7-M 500 MHz FSB, 400 MHz FSB, and ULV variants.
+ * Data from the "VIA C7-M Processor BIOS Writer's Guide (v2.17)" datasheet.
+ */
+static freq_info C7M_795[] = {
+ /* 2.00GHz Centaur C7-M 533 Mhz FSB */
+ FREQ_INFO_PWR(2000, 1148, 133, 20000),
+ FREQ_INFO_PWR(1867, 1132, 133, 18000),
+ FREQ_INFO_PWR(1600, 1100, 133, 15000),
+ FREQ_INFO_PWR(1467, 1052, 133, 13000),
+ FREQ_INFO_PWR(1200, 1004, 133, 10000),
+ FREQ_INFO_PWR( 800, 844, 133, 7000),
+ FREQ_INFO_PWR( 667, 844, 133, 6000),
+ FREQ_INFO_PWR( 533, 844, 133, 5000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_785[] = {
+ /* 1.80GHz Centaur C7-M 533 Mhz FSB */
+ FREQ_INFO_PWR(1867, 1148, 133, 18000),
+ FREQ_INFO_PWR(1600, 1100, 133, 15000),
+ FREQ_INFO_PWR(1467, 1052, 133, 13000),
+ FREQ_INFO_PWR(1200, 1004, 133, 10000),
+ FREQ_INFO_PWR( 800, 844, 133, 7000),
+ FREQ_INFO_PWR( 667, 844, 133, 6000),
+ FREQ_INFO_PWR( 533, 844, 133, 5000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_765[] = {
+ /* 1.60GHz Centaur C7-M 533 Mhz FSB */
+ FREQ_INFO_PWR(1600, 1084, 133, 15000),
+ FREQ_INFO_PWR(1467, 1052, 133, 13000),
+ FREQ_INFO_PWR(1200, 1004, 133, 10000),
+ FREQ_INFO_PWR( 800, 844, 133, 7000),
+ FREQ_INFO_PWR( 667, 844, 133, 6000),
+ FREQ_INFO_PWR( 533, 844, 133, 5000),
+ FREQ_INFO(0, 0, 1),
+};
+
+static freq_info C7M_794[] = {
+ /* 2.00GHz Centaur C7-M 400 Mhz FSB */
+ FREQ_INFO_PWR(2000, 1148, 100, 20000),
+ FREQ_INFO_PWR(1800, 1132, 100, 18000),
+ FREQ_INFO_PWR(1600, 1100, 100, 15000),
+ FREQ_INFO_PWR(1400, 1052, 100, 13000),
+ FREQ_INFO_PWR(1000, 1004, 100, 10000),
+ FREQ_INFO_PWR( 800, 844, 100, 7000),
+ FREQ_INFO_PWR( 600, 844, 100, 6000),
+ FREQ_INFO_PWR( 400, 844, 100, 5000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_784[] = {
+ /* 1.80GHz Centaur C7-M 400 Mhz FSB */
+ FREQ_INFO_PWR(1800, 1148, 100, 18000),
+ FREQ_INFO_PWR(1600, 1100, 100, 15000),
+ FREQ_INFO_PWR(1400, 1052, 100, 13000),
+ FREQ_INFO_PWR(1000, 1004, 100, 10000),
+ FREQ_INFO_PWR( 800, 844, 100, 7000),
+ FREQ_INFO_PWR( 600, 844, 100, 6000),
+ FREQ_INFO_PWR( 400, 844, 100, 5000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_764[] = {
+ /* 1.60GHz Centaur C7-M 400 Mhz FSB */
+ FREQ_INFO_PWR(1600, 1084, 100, 15000),
+ FREQ_INFO_PWR(1400, 1052, 100, 13000),
+ FREQ_INFO_PWR(1000, 1004, 100, 10000),
+ FREQ_INFO_PWR( 800, 844, 100, 7000),
+ FREQ_INFO_PWR( 600, 844, 100, 6000),
+ FREQ_INFO_PWR( 400, 844, 100, 5000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_754[] = {
+ /* 1.50GHz Centaur C7-M 400 Mhz FSB */
+ FREQ_INFO_PWR(1500, 1004, 100, 12000),
+ FREQ_INFO_PWR(1400, 988, 100, 11000),
+ FREQ_INFO_PWR(1000, 940, 100, 9000),
+ FREQ_INFO_PWR( 800, 844, 100, 7000),
+ FREQ_INFO_PWR( 600, 844, 100, 6000),
+ FREQ_INFO_PWR( 400, 844, 100, 5000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_771[] = {
+ /* 1.20GHz Centaur C7-M 400 Mhz FSB */
+ FREQ_INFO_PWR(1200, 860, 100, 7000),
+ FREQ_INFO_PWR(1000, 860, 100, 6000),
+ FREQ_INFO_PWR( 800, 844, 100, 5500),
+ FREQ_INFO_PWR( 600, 844, 100, 5000),
+ FREQ_INFO_PWR( 400, 844, 100, 4000),
+ FREQ_INFO(0, 0, 1),
+};
+
+static freq_info C7M_775_ULV[] = {
+ /* 1.50GHz Centaur C7-M ULV */
+ FREQ_INFO_PWR(1500, 956, 100, 7500),
+ FREQ_INFO_PWR(1400, 940, 100, 6000),
+ FREQ_INFO_PWR(1000, 860, 100, 5000),
+ FREQ_INFO_PWR( 800, 828, 100, 2800),
+ FREQ_INFO_PWR( 600, 796, 100, 2500),
+ FREQ_INFO_PWR( 400, 796, 100, 2000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_772_ULV[] = {
+ /* 1.20GHz Centaur C7-M ULV */
+ FREQ_INFO_PWR(1200, 844, 100, 5000),
+ FREQ_INFO_PWR(1000, 844, 100, 4000),
+ FREQ_INFO_PWR( 800, 828, 100, 2800),
+ FREQ_INFO_PWR( 600, 796, 100, 2500),
+ FREQ_INFO_PWR( 400, 796, 100, 2000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_779_ULV[] = {
+ /* 1.00GHz Centaur C7-M ULV */
+ FREQ_INFO_PWR(1000, 796, 100, 3500),
+ FREQ_INFO_PWR( 800, 796, 100, 2800),
+ FREQ_INFO_PWR( 600, 796, 100, 2500),
+ FREQ_INFO_PWR( 400, 796, 100, 2000),
+ FREQ_INFO(0, 0, 1),
+};
+static freq_info C7M_770_ULV[] = {
+ /* 1.00GHz Centaur C7-M ULV */
+ FREQ_INFO_PWR(1000, 844, 100, 5000),
+ FREQ_INFO_PWR( 800, 796, 100, 2800),
+ FREQ_INFO_PWR( 600, 796, 100, 2500),
+ FREQ_INFO_PWR( 400, 796, 100, 2000),
+ FREQ_INFO(0, 0, 1),
+};
+
+static cpu_info ESTprocs[] = {
+ INTEL(PM17_130, 1700, 1484, 600, 956, INTEL_BUS_CLK),
+ INTEL(PM16_130, 1600, 1484, 600, 956, INTEL_BUS_CLK),
+ INTEL(PM15_130, 1500, 1484, 600, 956, INTEL_BUS_CLK),
+ INTEL(PM14_130, 1400, 1484, 600, 956, INTEL_BUS_CLK),
+ INTEL(PM13_130, 1300, 1388, 600, 956, INTEL_BUS_CLK),
+ INTEL(PM13_LV_130, 1300, 1180, 600, 956, INTEL_BUS_CLK),
+ INTEL(PM12_LV_130, 1200, 1180, 600, 956, INTEL_BUS_CLK),
+ INTEL(PM11_LV_130, 1100, 1180, 600, 956, INTEL_BUS_CLK),
+ INTEL(PM11_ULV_130, 1100, 1004, 600, 844, INTEL_BUS_CLK),
+ INTEL(PM10_ULV_130, 1000, 1004, 600, 844, INTEL_BUS_CLK),
+ INTEL(PM_765A_90, 2100, 1340, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_765B_90, 2100, 1324, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_765C_90, 2100, 1308, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_765E_90, 2100, 1356, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_755A_90, 2000, 1340, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_755B_90, 2000, 1324, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_755C_90, 2000, 1308, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_755D_90, 2000, 1276, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_745A_90, 1800, 1340, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_745B_90, 1800, 1324, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_745C_90, 1800, 1308, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_745D_90, 1800, 1276, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_735A_90, 1700, 1340, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_735B_90, 1700, 1324, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_735C_90, 1700, 1308, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_735D_90, 1700, 1276, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_725A_90, 1600, 1340, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_725B_90, 1600, 1324, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_725C_90, 1600, 1308, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_725D_90, 1600, 1276, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_715A_90, 1500, 1340, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_715B_90, 1500, 1324, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_715C_90, 1500, 1308, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_715D_90, 1500, 1276, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_778_90, 1600, 1116, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_758_90, 1500, 1116, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_738_90, 1400, 1116, 600, 988, INTEL_BUS_CLK),
+ INTEL(PM_773G_90, 1300, 956, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_773H_90, 1300, 940, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_773I_90, 1300, 924, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_773J_90, 1300, 908, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_773K_90, 1300, 892, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_773L_90, 1300, 876, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_753G_90, 1200, 956, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_753H_90, 1200, 940, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_753I_90, 1200, 924, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_753J_90, 1200, 908, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_753K_90, 1200, 892, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_753L_90, 1200, 876, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_733JG_90, 1100, 956, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_733JH_90, 1100, 940, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_733JI_90, 1100, 924, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_733JJ_90, 1100, 908, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_733JK_90, 1100, 892, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_733JL_90, 1100, 876, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_733_90, 1100, 940, 600, 812, INTEL_BUS_CLK),
+ INTEL(PM_723_90, 1000, 940, 600, 812, INTEL_BUS_CLK),
+
+ CENTAUR(C7M_795, 2000, 1148, 533, 844, 133),
+ CENTAUR(C7M_794, 2000, 1148, 400, 844, 100),
+ CENTAUR(C7M_785, 1867, 1148, 533, 844, 133),
+ CENTAUR(C7M_784, 1800, 1148, 400, 844, 100),
+ CENTAUR(C7M_765, 1600, 1084, 533, 844, 133),
+ CENTAUR(C7M_764, 1600, 1084, 400, 844, 100),
+ CENTAUR(C7M_754, 1500, 1004, 400, 844, 100),
+ CENTAUR(C7M_775_ULV, 1500, 956, 400, 796, 100),
+ CENTAUR(C7M_771, 1200, 860, 400, 844, 100),
+ CENTAUR(C7M_772_ULV, 1200, 844, 400, 796, 100),
+ CENTAUR(C7M_779_ULV, 1000, 796, 400, 796, 100),
+ CENTAUR(C7M_770_ULV, 1000, 844, 400, 796, 100),
+ { 0, 0, NULL },
+};
+
+static void est_identify(driver_t *driver, device_t parent);
+static int est_features(driver_t *driver, u_int *features);
+static int est_probe(device_t parent);
+static int est_attach(device_t parent);
+static int est_detach(device_t parent);
+static int est_get_info(device_t dev);
+static int est_acpi_info(device_t dev, freq_info **freqs);
+static int est_table_info(device_t dev, uint64_t msr, freq_info **freqs);
+static int est_msr_info(device_t dev, uint64_t msr, freq_info **freqs);
+static freq_info *est_get_current(freq_info *freq_list);
+static int est_settings(device_t dev, struct cf_setting *sets, int *count);
+static int est_set(device_t dev, const struct cf_setting *set);
+static int est_get(device_t dev, struct cf_setting *set);
+static int est_type(device_t dev, int *type);
+static int est_set_id16(device_t dev, uint16_t id16, int need_check);
+static void est_get_id16(uint16_t *id16_p);
+
+static device_method_t est_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, est_identify),
+ DEVMETHOD(device_probe, est_probe),
+ DEVMETHOD(device_attach, est_attach),
+ DEVMETHOD(device_detach, est_detach),
+
+ /* cpufreq interface */
+ DEVMETHOD(cpufreq_drv_set, est_set),
+ DEVMETHOD(cpufreq_drv_get, est_get),
+ DEVMETHOD(cpufreq_drv_type, est_type),
+ DEVMETHOD(cpufreq_drv_settings, est_settings),
+
+ /* ACPI interface */
+ DEVMETHOD(acpi_get_features, est_features),
+
+ {0, 0}
+};
+
+static driver_t est_driver = {
+ "est",
+ est_methods,
+ sizeof(struct est_softc),
+};
+
+static devclass_t est_devclass;
+DRIVER_MODULE(est, cpu, est_driver, est_devclass, 0, 0);
+
+static int
+est_features(driver_t *driver, u_int *features)
+{
+
+ /* Notify the ACPI CPU that we support direct access to MSRs */
+ *features = ACPI_CAP_PERF_MSRS;
+ return (0);
+}
+
+static void
+est_identify(driver_t *driver, device_t parent)
+{
+ device_t child;
+
+ /* Make sure we're not being doubly invoked. */
+ if (device_find_child(parent, "est", -1) != NULL)
+ return;
+
+ /* Check that CPUID is supported and the vendor is Intel.*/
+ if (cpu_high == 0 || (cpu_vendor_id != CPU_VENDOR_INTEL &&
+ cpu_vendor_id != CPU_VENDOR_CENTAUR))
+ return;
+
+ /*
+ * Check if the CPU supports EST.
+ */
+ if (!(cpu_feature2 & CPUID2_EST))
+ return;
+
+ /*
+ * We add a child for each CPU since settings must be performed
+ * on each CPU in the SMP case.
+ */
+ child = BUS_ADD_CHILD(parent, 10, "est", -1);
+ if (child == NULL)
+ device_printf(parent, "add est child failed\n");
+}
+
+static int
+est_probe(device_t dev)
+{
+ device_t perf_dev;
+ uint64_t msr;
+ int error, type;
+
+ if (resource_disabled("est", 0))
+ return (ENXIO);
+
+ /*
+ * If the ACPI perf driver has attached and is not just offering
+ * info, let it manage things.
+ */
+ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
+ if (perf_dev && device_is_attached(perf_dev)) {
+ error = CPUFREQ_DRV_TYPE(perf_dev, &type);
+ if (error == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0)
+ return (ENXIO);
+ }
+
+ /* Attempt to enable SpeedStep if not currently enabled. */
+ msr = rdmsr(MSR_MISC_ENABLE);
+ if ((msr & MSR_SS_ENABLE) == 0) {
+ wrmsr(MSR_MISC_ENABLE, msr | MSR_SS_ENABLE);
+ if (bootverbose)
+ device_printf(dev, "enabling SpeedStep\n");
+
+ /* Check if the enable failed. */
+ msr = rdmsr(MSR_MISC_ENABLE);
+ if ((msr & MSR_SS_ENABLE) == 0) {
+ device_printf(dev, "failed to enable SpeedStep\n");
+ return (ENXIO);
+ }
+ }
+
+ device_set_desc(dev, "Enhanced SpeedStep Frequency Control");
+ return (0);
+}
+
+static int
+est_attach(device_t dev)
+{
+ struct est_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ /* On SMP system we can't guarantie independent freq setting. */
+ if (strict == -1 && mp_ncpus > 1)
+ strict = 0;
+ /* Check CPU for supported settings. */
+ if (est_get_info(dev))
+ return (ENXIO);
+
+ cpufreq_register(dev);
+ return (0);
+}
+
+static int
+est_detach(device_t dev)
+{
+ struct est_softc *sc;
+ int error;
+
+ error = cpufreq_unregister(dev);
+ if (error)
+ return (error);
+
+ sc = device_get_softc(dev);
+ if (sc->acpi_settings || sc->msr_settings)
+ free(sc->freq_list, M_DEVBUF);
+ return (0);
+}
+
+/*
+ * Probe for supported CPU settings. First, check our static table of
+ * settings. If no match, try using the ones offered by acpi_perf
+ * (i.e., _PSS). We use ACPI second because some systems (IBM R/T40
+ * series) export both legacy SMM IO-based access and direct MSR access
+ * but the direct access specifies invalid values for _PSS.
+ */
+static int
+est_get_info(device_t dev)
+{
+ struct est_softc *sc;
+ uint64_t msr;
+ int error;
+
+ sc = device_get_softc(dev);
+ msr = rdmsr(MSR_PERF_STATUS);
+ error = est_table_info(dev, msr, &sc->freq_list);
+ if (error)
+ error = est_acpi_info(dev, &sc->freq_list);
+ if (error)
+ error = est_msr_info(dev, msr, &sc->freq_list);
+
+ if (error) {
+ printf(
+ "est: CPU supports Enhanced Speedstep, but is not recognized.\n"
+ "est: cpu_vendor %s, msr %0jx\n", cpu_vendor, msr);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+est_acpi_info(device_t dev, freq_info **freqs)
+{
+ struct est_softc *sc;
+ struct cf_setting *sets;
+ freq_info *table;
+ device_t perf_dev;
+ int count, error, i, j;
+ uint16_t saved_id16;
+
+ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
+ if (perf_dev == NULL || !device_is_attached(perf_dev))
+ return (ENXIO);
+
+ /* Fetch settings from acpi_perf. */
+ sc = device_get_softc(dev);
+ table = NULL;
+ sets = malloc(MAX_SETTINGS * sizeof(*sets), M_TEMP, M_NOWAIT);
+ if (sets == NULL)
+ return (ENOMEM);
+ count = MAX_SETTINGS;
+ error = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count);
+ if (error)
+ goto out;
+
+ /* Parse settings into our local table format. */
+ table = malloc((count + 1) * sizeof(freq_info), M_DEVBUF, M_NOWAIT);
+ if (table == NULL) {
+ error = ENOMEM;
+ goto out;
+ }
+ est_get_id16(&saved_id16);
+ for (i = 0, j = 0; i < count; i++) {
+ /*
+ * Confirm id16 value is correct.
+ */
+ if (sets[i].freq > 0) {
+ error = est_set_id16(dev, sets[i].spec[0], 1);
+ if (error != 0 && strict) {
+ if (bootverbose)
+ device_printf(dev, "Invalid freq %u, "
+ "ignored.\n", sets[i].freq);
+ continue;
+ } else if (error != 0 && bootverbose) {
+ device_printf(dev, "Can't check freq %u, "
+ "it may be invalid\n",
+ sets[i].freq);
+ }
+ table[j].freq = sets[i].freq;
+ table[j].volts = sets[i].volts;
+ table[j].id16 = sets[i].spec[0];
+ table[j].power = sets[i].power;
+ ++j;
+ }
+ }
+ /* restore saved setting */
+ est_set_id16(dev, saved_id16, 0);
+
+ /* Mark end of table with a terminator. */
+ bzero(&table[j], sizeof(freq_info));
+
+ sc->acpi_settings = TRUE;
+ *freqs = table;
+ error = 0;
+
+out:
+ if (sets)
+ free(sets, M_TEMP);
+ if (error && table)
+ free(table, M_DEVBUF);
+ return (error);
+}
+
+static int
+est_table_info(device_t dev, uint64_t msr, freq_info **freqs)
+{
+ cpu_info *p;
+ uint32_t id;
+
+ /* Find a table which matches (vendor, id32). */
+ id = msr >> 32;
+ for (p = ESTprocs; p->id32 != 0; p++) {
+ if (p->vendor_id == cpu_vendor_id && p->id32 == id)
+ break;
+ }
+ if (p->id32 == 0)
+ return (EOPNOTSUPP);
+
+ /* Make sure the current setpoint is valid. */
+ if (est_get_current(p->freqtab) == NULL) {
+ device_printf(dev, "current setting not found in table\n");
+ return (EOPNOTSUPP);
+ }
+
+ *freqs = p->freqtab;
+ return (0);
+}
+
+static int
+bus_speed_ok(int bus)
+{
+
+ switch (bus) {
+ case 100:
+ case 133:
+ case 333:
+ return (1);
+ default:
+ return (0);
+ }
+}
+
+/*
+ * Flesh out a simple rate table containing the high and low frequencies
+ * based on the current clock speed and the upper 32 bits of the MSR.
+ */
+static int
+est_msr_info(device_t dev, uint64_t msr, freq_info **freqs)
+{
+ struct est_softc *sc;
+ freq_info *fp;
+ int bus, freq, volts;
+ uint16_t id;
+
+ if (!msr_info_enabled)
+ return (EOPNOTSUPP);
+
+ /* Figure out the bus clock. */
+ freq = tsc_freq / 1000000;
+ id = msr >> 32;
+ bus = freq / (id >> 8);
+ device_printf(dev, "Guessed bus clock (high) of %d MHz\n", bus);
+ if (!bus_speed_ok(bus)) {
+ /* We may be running on the low frequency. */
+ id = msr >> 48;
+ bus = freq / (id >> 8);
+ device_printf(dev, "Guessed bus clock (low) of %d MHz\n", bus);
+ if (!bus_speed_ok(bus))
+ return (EOPNOTSUPP);
+
+ /* Calculate high frequency. */
+ id = msr >> 32;
+ freq = ((id >> 8) & 0xff) * bus;
+ }
+
+ /* Fill out a new freq table containing just the high and low freqs. */
+ sc = device_get_softc(dev);
+ fp = malloc(sizeof(freq_info) * 3, M_DEVBUF, M_WAITOK | M_ZERO);
+
+ /* First, the high frequency. */
+ volts = id & 0xff;
+ if (volts != 0) {
+ volts <<= 4;
+ volts += 700;
+ }
+ fp[0].freq = freq;
+ fp[0].volts = volts;
+ fp[0].id16 = id;
+ fp[0].power = CPUFREQ_VAL_UNKNOWN;
+ device_printf(dev, "Guessed high setting of %d MHz @ %d Mv\n", freq,
+ volts);
+
+ /* Second, the low frequency. */
+ id = msr >> 48;
+ freq = ((id >> 8) & 0xff) * bus;
+ volts = id & 0xff;
+ if (volts != 0) {
+ volts <<= 4;
+ volts += 700;
+ }
+ fp[1].freq = freq;
+ fp[1].volts = volts;
+ fp[1].id16 = id;
+ fp[1].power = CPUFREQ_VAL_UNKNOWN;
+ device_printf(dev, "Guessed low setting of %d MHz @ %d Mv\n", freq,
+ volts);
+
+ /* Table is already terminated due to M_ZERO. */
+ sc->msr_settings = TRUE;
+ *freqs = fp;
+ return (0);
+}
+
+static void
+est_get_id16(uint16_t *id16_p)
+{
+ *id16_p = rdmsr(MSR_PERF_STATUS) & 0xffff;
+}
+
+static int
+est_set_id16(device_t dev, uint16_t id16, int need_check)
+{
+ uint64_t msr;
+ uint16_t new_id16;
+ int ret = 0;
+
+ /* Read the current register, mask out the old, set the new id. */
+ msr = rdmsr(MSR_PERF_CTL);
+ msr = (msr & ~0xffff) | id16;
+ wrmsr(MSR_PERF_CTL, msr);
+
+ /* Wait a short while for the new setting. XXX Is this necessary? */
+ DELAY(EST_TRANS_LAT);
+
+ if (need_check) {
+ est_get_id16(&new_id16);
+ if (new_id16 != id16) {
+ if (bootverbose)
+ device_printf(dev, "Invalid id16 (set, cur) "
+ "= (%u, %u)\n", id16, new_id16);
+ ret = ENXIO;
+ }
+ }
+ return (ret);
+}
+
+static freq_info *
+est_get_current(freq_info *freq_list)
+{
+ freq_info *f;
+ int i;
+ uint16_t id16;
+
+ /*
+ * Try a few times to get a valid value. Sometimes, if the CPU
+ * is in the middle of an asynchronous transition (i.e., P4TCC),
+ * we get a temporary invalid result.
+ */
+ for (i = 0; i < 5; i++) {
+ est_get_id16(&id16);
+ for (f = freq_list; f->id16 != 0; f++) {
+ if (f->id16 == id16)
+ return (f);
+ }
+ DELAY(100);
+ }
+ return (NULL);
+}
+
+static int
+est_settings(device_t dev, struct cf_setting *sets, int *count)
+{
+ struct est_softc *sc;
+ freq_info *f;
+ int i;
+
+ sc = device_get_softc(dev);
+ if (*count < EST_MAX_SETTINGS)
+ return (E2BIG);
+
+ i = 0;
+ for (f = sc->freq_list; f->freq != 0; f++, i++) {
+ sets[i].freq = f->freq;
+ sets[i].volts = f->volts;
+ sets[i].power = f->power;
+ sets[i].lat = EST_TRANS_LAT;
+ sets[i].dev = dev;
+ }
+ *count = i;
+
+ return (0);
+}
+
+static int
+est_set(device_t dev, const struct cf_setting *set)
+{
+ struct est_softc *sc;
+ freq_info *f;
+
+ /* Find the setting matching the requested one. */
+ sc = device_get_softc(dev);
+ for (f = sc->freq_list; f->freq != 0; f++) {
+ if (f->freq == set->freq)
+ break;
+ }
+ if (f->freq == 0)
+ return (EINVAL);
+
+ /* Read the current register, mask out the old, set the new id. */
+ est_set_id16(dev, f->id16, 0);
+
+ return (0);
+}
+
+static int
+est_get(device_t dev, struct cf_setting *set)
+{
+ struct est_softc *sc;
+ freq_info *f;
+
+ sc = device_get_softc(dev);
+ f = est_get_current(sc->freq_list);
+ if (f == NULL)
+ return (ENXIO);
+
+ set->freq = f->freq;
+ set->volts = f->volts;
+ set->power = f->power;
+ set->lat = EST_TRANS_LAT;
+ set->dev = dev;
+ return (0);
+}
+
+static int
+est_type(device_t dev, int *type)
+{
+
+ if (type == NULL)
+ return (EINVAL);
+
+ *type = CPUFREQ_TYPE_ABSOLUTE;
+ return (0);
+}
diff --git a/sys/x86/cpufreq/hwpstate.c b/sys/x86/cpufreq/hwpstate.c
new file mode 100644
index 0000000..3790b76
--- /dev/null
+++ b/sys/x86/cpufreq/hwpstate.c
@@ -0,0 +1,507 @@
+/*-
+ * Copyright (c) 2005 Nate Lawson
+ * Copyright (c) 2004 Colin Percival
+ * Copyright (c) 2004-2005 Bruno Durcot
+ * Copyright (c) 2004 FUKUDA Nobuhiko
+ * Copyright (c) 2009 Michael Reifenberger
+ * Copyright (c) 2009 Norikatsu Shigemura
+ * Copyright (c) 2008-2009 Gen Otsuji
+ *
+ * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
+ * in various parts. The authors of these files are Nate Lawson,
+ * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
+ * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
+ * Thank you.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted providing that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For more info:
+ * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
+ * 31116 Rev 3.20 February 04, 2009
+ * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
+ * 41256 Rev 3.00 - July 07, 2008
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <sys/sched.h>
+
+#include <machine/md_var.h>
+#include <machine/cputypes.h>
+#include <machine/specialreg.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+
+#include "acpi_if.h"
+#include "cpufreq_if.h"
+
+#define MSR_AMD_10H_11H_LIMIT 0xc0010061
+#define MSR_AMD_10H_11H_CONTROL 0xc0010062
+#define MSR_AMD_10H_11H_STATUS 0xc0010063
+#define MSR_AMD_10H_11H_CONFIG 0xc0010064
+
+#define AMD_10H_11H_MAX_STATES 16
+
+/* for MSR_AMD_10H_11H_LIMIT C001_0061 */
+#define AMD_10H_11H_GET_PSTATE_MAX_VAL(msr) (((msr) >> 4) & 0x7)
+#define AMD_10H_11H_GET_PSTATE_LIMIT(msr) (((msr)) & 0x7)
+/* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
+#define AMD_10H_11H_CUR_VID(msr) (((msr) >> 9) & 0x7F)
+#define AMD_10H_11H_CUR_DID(msr) (((msr) >> 6) & 0x07)
+#define AMD_10H_11H_CUR_FID(msr) ((msr) & 0x3F)
+
+#define HWPSTATE_DEBUG(dev, msg...) \
+ do{ \
+ if(hwpstate_verbose) \
+ device_printf(dev, msg); \
+ }while(0)
+
+struct hwpstate_setting {
+ int freq; /* CPU clock in Mhz or 100ths of a percent. */
+ int volts; /* Voltage in mV. */
+ int power; /* Power consumed in mW. */
+ int lat; /* Transition latency in us. */
+ int pstate_id; /* P-State id */
+};
+
+struct hwpstate_softc {
+ device_t dev;
+ struct hwpstate_setting hwpstate_settings[AMD_10H_11H_MAX_STATES];
+ int cfnum;
+};
+
+static void hwpstate_identify(driver_t *driver, device_t parent);
+static int hwpstate_probe(device_t dev);
+static int hwpstate_attach(device_t dev);
+static int hwpstate_detach(device_t dev);
+static int hwpstate_set(device_t dev, const struct cf_setting *cf);
+static int hwpstate_get(device_t dev, struct cf_setting *cf);
+static int hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
+static int hwpstate_type(device_t dev, int *type);
+static int hwpstate_shutdown(device_t dev);
+static int hwpstate_features(driver_t *driver, u_int *features);
+static int hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
+static int hwpstate_get_info_from_msr(device_t dev);
+static int hwpstate_goto_pstate(device_t dev, int pstate_id);
+
+static int hwpstate_verbose = 0;
+SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RDTUN,
+ &hwpstate_verbose, 0, "Debug hwpstate");
+
+static device_method_t hwpstate_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, hwpstate_identify),
+ DEVMETHOD(device_probe, hwpstate_probe),
+ DEVMETHOD(device_attach, hwpstate_attach),
+ DEVMETHOD(device_detach, hwpstate_detach),
+ DEVMETHOD(device_shutdown, hwpstate_shutdown),
+
+ /* cpufreq interface */
+ DEVMETHOD(cpufreq_drv_set, hwpstate_set),
+ DEVMETHOD(cpufreq_drv_get, hwpstate_get),
+ DEVMETHOD(cpufreq_drv_settings, hwpstate_settings),
+ DEVMETHOD(cpufreq_drv_type, hwpstate_type),
+
+ /* ACPI interface */
+ DEVMETHOD(acpi_get_features, hwpstate_features),
+
+ {0, 0}
+};
+
+static devclass_t hwpstate_devclass;
+static driver_t hwpstate_driver = {
+ "hwpstate",
+ hwpstate_methods,
+ sizeof(struct hwpstate_softc),
+};
+
+DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
+
+/*
+ * Go to Px-state on all cpus considering the limit.
+ */
+static int
+hwpstate_goto_pstate(device_t dev, int pstate)
+{
+ struct pcpu *pc;
+ int i;
+ uint64_t msr;
+ int j;
+ int limit;
+ int id = pstate;
+ int error;
+
+ /* get the current pstate limit */
+ msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
+ limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
+ if(limit > id)
+ id = limit;
+
+ error = 0;
+ /*
+ * We are going to the same Px-state on all cpus.
+ */
+ for (i = 0; i < mp_ncpus; i++) {
+ /* Find each cpu. */
+ pc = pcpu_find(i);
+ if (pc == NULL)
+ return (ENXIO);
+ thread_lock(curthread);
+ /* Bind to each cpu. */
+ sched_bind(curthread, pc->pc_cpuid);
+ thread_unlock(curthread);
+ HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n",
+ id, PCPU_GET(cpuid));
+ /* Go To Px-state */
+ wrmsr(MSR_AMD_10H_11H_CONTROL, id);
+ /* wait loop (100*100 usec is enough ?) */
+ for(j = 0; j < 100; j++){
+ msr = rdmsr(MSR_AMD_10H_11H_STATUS);
+ if(msr == id){
+ break;
+ }
+ DELAY(100);
+ }
+ /* get the result. not assure msr=id */
+ msr = rdmsr(MSR_AMD_10H_11H_STATUS);
+ HWPSTATE_DEBUG(dev, "result P%d-state on cpu%d\n",
+ (int)msr, PCPU_GET(cpuid));
+ if (msr != id) {
+ HWPSTATE_DEBUG(dev, "error: loop is not enough.\n");
+ error = ENXIO;
+ }
+ thread_lock(curthread);
+ sched_unbind(curthread);
+ thread_unlock(curthread);
+ }
+ return (error);
+}
+
+static int
+hwpstate_set(device_t dev, const struct cf_setting *cf)
+{
+ struct hwpstate_softc *sc;
+ struct hwpstate_setting *set;
+ int i;
+
+ if (cf == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+ set = sc->hwpstate_settings;
+ for (i = 0; i < sc->cfnum; i++)
+ if (CPUFREQ_CMP(cf->freq, set[i].freq))
+ break;
+ if (i == sc->cfnum)
+ return (EINVAL);
+
+ return (hwpstate_goto_pstate(dev, set[i].pstate_id));
+}
+
+static int
+hwpstate_get(device_t dev, struct cf_setting *cf)
+{
+ struct hwpstate_softc *sc;
+ struct hwpstate_setting set;
+ uint64_t msr;
+
+ sc = device_get_softc(dev);
+ if (cf == NULL)
+ return (EINVAL);
+ msr = rdmsr(MSR_AMD_10H_11H_STATUS);
+ if(msr >= sc->cfnum)
+ return (EINVAL);
+ set = sc->hwpstate_settings[msr];
+
+ cf->freq = set.freq;
+ cf->volts = set.volts;
+ cf->power = set.power;
+ cf->lat = set.lat;
+ cf->dev = dev;
+ return (0);
+}
+
+static int
+hwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
+{
+ struct hwpstate_softc *sc;
+ struct hwpstate_setting set;
+ int i;
+
+ if (sets == NULL || count == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+ if (*count < sc->cfnum)
+ return (E2BIG);
+ for (i = 0; i < sc->cfnum; i++, sets++) {
+ set = sc->hwpstate_settings[i];
+ sets->freq = set.freq;
+ sets->volts = set.volts;
+ sets->power = set.power;
+ sets->lat = set.lat;
+ sets->dev = dev;
+ }
+ *count = sc->cfnum;
+
+ return (0);
+}
+
+static int
+hwpstate_type(device_t dev, int *type)
+{
+
+ if (type == NULL)
+ return (EINVAL);
+
+ *type = CPUFREQ_TYPE_ABSOLUTE;
+ return (0);
+}
+
+static void
+hwpstate_identify(driver_t *driver, device_t parent)
+{
+
+ if (device_find_child(parent, "hwpstate", -1) != NULL)
+ return;
+
+ if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10)
+ return;
+
+ /*
+ * Check if hardware pstate enable bit is set.
+ */
+ if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
+ HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
+ return;
+ }
+
+ if (resource_disabled("hwpstate", 0))
+ return;
+
+ if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL)
+ device_printf(parent, "hwpstate: add child failed\n");
+}
+
+static int
+hwpstate_probe(device_t dev)
+{
+ struct hwpstate_softc *sc;
+ device_t perf_dev;
+ uint64_t msr;
+ int error, type;
+
+ /*
+ * Only hwpstate0.
+ * It goes well with acpi_throttle.
+ */
+ if (device_get_unit(dev) != 0)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ /*
+ * Check if acpi_perf has INFO only flag.
+ */
+ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
+ error = TRUE;
+ if (perf_dev && device_is_attached(perf_dev)) {
+ error = CPUFREQ_DRV_TYPE(perf_dev, &type);
+ if (error == 0) {
+ if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
+ /*
+ * If acpi_perf doesn't have INFO_ONLY flag,
+ * it will take care of pstate transitions.
+ */
+ HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
+ return (ENXIO);
+ } else {
+ /*
+ * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
+ * we can get _PSS info from acpi_perf
+ * without going into ACPI.
+ */
+ HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
+ error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
+ }
+ }
+ }
+
+ if (error == 0) {
+ /*
+ * Now we get _PSS info from acpi_perf without error.
+ * Let's check it.
+ */
+ msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
+ if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
+ HWPSTATE_DEBUG(dev, "msr and acpi _PSS count mismatch.\n");
+ error = TRUE;
+ }
+ }
+
+ /*
+ * If we cannot get info from acpi_perf,
+ * Let's get info from MSRs.
+ */
+ if (error)
+ error = hwpstate_get_info_from_msr(dev);
+ if (error)
+ return (error);
+
+ device_set_desc(dev, "Cool`n'Quiet 2.0");
+ return (0);
+}
+
+static int
+hwpstate_attach(device_t dev)
+{
+
+ return (cpufreq_register(dev));
+}
+
+static int
+hwpstate_get_info_from_msr(device_t dev)
+{
+ struct hwpstate_softc *sc;
+ struct hwpstate_setting *hwpstate_set;
+ uint64_t msr;
+ int family, i, fid, did;
+
+ family = CPUID_TO_FAMILY(cpu_id);
+ sc = device_get_softc(dev);
+ /* Get pstate count */
+ msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
+ sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
+ hwpstate_set = sc->hwpstate_settings;
+ for (i = 0; i < sc->cfnum; i++) {
+ msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
+ if ((msr & ((uint64_t)1 << 63)) != ((uint64_t)1 << 63)) {
+ HWPSTATE_DEBUG(dev, "msr is not valid.\n");
+ return (ENXIO);
+ }
+ did = AMD_10H_11H_CUR_DID(msr);
+ fid = AMD_10H_11H_CUR_FID(msr);
+ switch(family) {
+ case 0x11:
+ /* fid/did to frequency */
+ hwpstate_set[i].freq = 100 * (fid + 0x08) / (1 << did);
+ break;
+ case 0x10:
+ /* fid/did to frequency */
+ hwpstate_set[i].freq = 100 * (fid + 0x10) / (1 << did);
+ break;
+ default:
+ HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family %d CPU's are not implemented yet. sorry.\n", family);
+ return (ENXIO);
+ break;
+ }
+ hwpstate_set[i].pstate_id = i;
+ /* There was volts calculation, but deleted it. */
+ hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
+ hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
+ hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
+ }
+ return (0);
+}
+
+static int
+hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
+{
+ struct hwpstate_softc *sc;
+ struct cf_setting *perf_set;
+ struct hwpstate_setting *hwpstate_set;
+ int count, error, i;
+
+ perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
+ if (perf_set == NULL) {
+ HWPSTATE_DEBUG(dev, "nomem\n");
+ return (ENOMEM);
+ }
+ /*
+ * Fetch settings from acpi_perf.
+ * Now it is attached, and has info only flag.
+ */
+ count = MAX_SETTINGS;
+ error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
+ if (error) {
+ HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
+ goto out;
+ }
+ sc = device_get_softc(dev);
+ sc->cfnum = count;
+ hwpstate_set = sc->hwpstate_settings;
+ for (i = 0; i < count; i++) {
+ if (i == perf_set[i].spec[0]) {
+ hwpstate_set[i].pstate_id = i;
+ hwpstate_set[i].freq = perf_set[i].freq;
+ hwpstate_set[i].volts = perf_set[i].volts;
+ hwpstate_set[i].power = perf_set[i].power;
+ hwpstate_set[i].lat = perf_set[i].lat;
+ } else {
+ HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
+ error = ENXIO;
+ goto out;
+ }
+ }
+out:
+ if (perf_set)
+ free(perf_set, M_TEMP);
+ return (error);
+}
+
+static int
+hwpstate_detach(device_t dev)
+{
+
+ hwpstate_goto_pstate(dev, 0);
+ return (cpufreq_unregister(dev));
+}
+
+static int
+hwpstate_shutdown(device_t dev)
+{
+
+ /* hwpstate_goto_pstate(dev, 0); */
+ return (0);
+}
+
+static int
+hwpstate_features(driver_t *driver, u_int *features)
+{
+
+ /* Notify the ACPI CPU that we support direct access to MSRs */
+ *features = ACPI_CAP_PERF_MSRS;
+ return (0);
+}
diff --git a/sys/x86/cpufreq/p4tcc.c b/sys/x86/cpufreq/p4tcc.c
new file mode 100644
index 0000000..29279e3
--- /dev/null
+++ b/sys/x86/cpufreq/p4tcc.c
@@ -0,0 +1,327 @@
+/*-
+ * Copyright (c) 2005 Nate Lawson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Throttle clock frequency by using the thermal control circuit. This
+ * operates independently of SpeedStep and ACPI throttling and is supported
+ * on Pentium 4 and later models (feature TM).
+ *
+ * Reference: Intel Developer's manual v.3 #245472-012
+ *
+ * The original version of this driver was written by Ted Unangst for
+ * OpenBSD and imported by Maxim Sobolev. It was rewritten by Nate Lawson
+ * for use with the cpufreq framework.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+
+#include "cpufreq_if.h"
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+#include <dev/acpica/acpivar.h>
+#include "acpi_if.h"
+
+struct p4tcc_softc {
+ device_t dev;
+ int set_count;
+ int lowest_val;
+ int auto_mode;
+};
+
+#define TCC_NUM_SETTINGS 8
+
+#define TCC_ENABLE_ONDEMAND (1<<4)
+#define TCC_REG_OFFSET 1
+#define TCC_SPEED_PERCENT(x) ((10000 * (x)) / TCC_NUM_SETTINGS)
+
+static int p4tcc_features(driver_t *driver, u_int *features);
+static void p4tcc_identify(driver_t *driver, device_t parent);
+static int p4tcc_probe(device_t dev);
+static int p4tcc_attach(device_t dev);
+static int p4tcc_settings(device_t dev, struct cf_setting *sets,
+ int *count);
+static int p4tcc_set(device_t dev, const struct cf_setting *set);
+static int p4tcc_get(device_t dev, struct cf_setting *set);
+static int p4tcc_type(device_t dev, int *type);
+
+static device_method_t p4tcc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, p4tcc_identify),
+ DEVMETHOD(device_probe, p4tcc_probe),
+ DEVMETHOD(device_attach, p4tcc_attach),
+
+ /* cpufreq interface */
+ DEVMETHOD(cpufreq_drv_set, p4tcc_set),
+ DEVMETHOD(cpufreq_drv_get, p4tcc_get),
+ DEVMETHOD(cpufreq_drv_type, p4tcc_type),
+ DEVMETHOD(cpufreq_drv_settings, p4tcc_settings),
+
+ /* ACPI interface */
+ DEVMETHOD(acpi_get_features, p4tcc_features),
+
+ {0, 0}
+};
+
+static driver_t p4tcc_driver = {
+ "p4tcc",
+ p4tcc_methods,
+ sizeof(struct p4tcc_softc),
+};
+
+static devclass_t p4tcc_devclass;
+DRIVER_MODULE(p4tcc, cpu, p4tcc_driver, p4tcc_devclass, 0, 0);
+
+static int
+p4tcc_features(driver_t *driver, u_int *features)
+{
+
+ /* Notify the ACPI CPU that we support direct access to MSRs */
+ *features = ACPI_CAP_THR_MSRS;
+ return (0);
+}
+
+static void
+p4tcc_identify(driver_t *driver, device_t parent)
+{
+
+ if ((cpu_feature & (CPUID_ACPI | CPUID_TM)) != (CPUID_ACPI | CPUID_TM))
+ return;
+
+ /* Make sure we're not being doubly invoked. */
+ if (device_find_child(parent, "p4tcc", -1) != NULL)
+ return;
+
+ /*
+ * We attach a p4tcc child for every CPU since settings need to
+ * be performed on every CPU in the SMP case. See section 13.15.3
+ * of the IA32 Intel Architecture Software Developer's Manual,
+ * Volume 3, for more info.
+ */
+ if (BUS_ADD_CHILD(parent, 10, "p4tcc", -1) == NULL)
+ device_printf(parent, "add p4tcc child failed\n");
+}
+
+static int
+p4tcc_probe(device_t dev)
+{
+
+ if (resource_disabled("p4tcc", 0))
+ return (ENXIO);
+
+ device_set_desc(dev, "CPU Frequency Thermal Control");
+ return (0);
+}
+
+static int
+p4tcc_attach(device_t dev)
+{
+ struct p4tcc_softc *sc;
+ struct cf_setting set;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->set_count = TCC_NUM_SETTINGS;
+
+ /*
+ * On boot, the TCC is usually in Automatic mode where reading the
+ * current performance level is likely to produce bogus results.
+ * We record that state here and don't trust the contents of the
+ * status MSR until we've set it ourselves.
+ */
+ sc->auto_mode = TRUE;
+
+ /*
+ * XXX: After a cursory glance at various Intel specification
+ * XXX: updates it seems like these tests for errata is bogus.
+ * XXX: As far as I can tell, the failure mode is benign, in
+ * XXX: that cpus with no errata will have their bottom two
+ * XXX: STPCLK# rates disabled, so rather than waste more time
+ * XXX: hunting down intel docs, just document it and punt. /phk
+ */
+ switch (cpu_id & 0xff) {
+ case 0x22:
+ case 0x24:
+ case 0x25:
+ case 0x27:
+ case 0x29:
+ /*
+ * These CPU models hang when set to 12.5%.
+ * See Errata O50, P44, and Z21.
+ */
+ sc->set_count -= 1;
+ break;
+ case 0x07: /* errata N44 and P18 */
+ case 0x0a:
+ case 0x12:
+ case 0x13:
+ case 0x62: /* Pentium D B1: errata AA21 */
+ case 0x64: /* Pentium D C1: errata AA21 */
+ case 0x65: /* Pentium D D0: errata AA21 */
+ /*
+ * These CPU models hang when set to 12.5% or 25%.
+ * See Errata N44, P18l and AA21.
+ */
+ sc->set_count -= 2;
+ break;
+ }
+ sc->lowest_val = TCC_NUM_SETTINGS - sc->set_count + 1;
+
+ /*
+ * Before we finish attach, switch to 100%. It's possible the BIOS
+ * set us to a lower rate. The user can override this after boot.
+ */
+ set.freq = 10000;
+ p4tcc_set(dev, &set);
+
+ cpufreq_register(dev);
+ return (0);
+}
+
+static int
+p4tcc_settings(device_t dev, struct cf_setting *sets, int *count)
+{
+ struct p4tcc_softc *sc;
+ int i, val;
+
+ sc = device_get_softc(dev);
+ if (sets == NULL || count == NULL)
+ return (EINVAL);
+ if (*count < sc->set_count)
+ return (E2BIG);
+
+ /* Return a list of valid settings for this driver. */
+ memset(sets, CPUFREQ_VAL_UNKNOWN, sizeof(*sets) * sc->set_count);
+ val = TCC_NUM_SETTINGS;
+ for (i = 0; i < sc->set_count; i++, val--) {
+ sets[i].freq = TCC_SPEED_PERCENT(val);
+ sets[i].dev = dev;
+ }
+ *count = sc->set_count;
+
+ return (0);
+}
+
+static int
+p4tcc_set(device_t dev, const struct cf_setting *set)
+{
+ struct p4tcc_softc *sc;
+ uint64_t mask, msr;
+ int val;
+
+ if (set == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+
+ /*
+ * Validate requested state converts to a setting that is an integer
+ * from [sc->lowest_val .. TCC_NUM_SETTINGS].
+ */
+ val = set->freq * TCC_NUM_SETTINGS / 10000;
+ if (val * 10000 != set->freq * TCC_NUM_SETTINGS ||
+ val < sc->lowest_val || val > TCC_NUM_SETTINGS)
+ return (EINVAL);
+
+ /*
+ * Read the current register and mask off the old setting and
+ * On-Demand bit. If the new val is < 100%, set it and the On-Demand
+ * bit, otherwise just return to Automatic mode.
+ */
+ msr = rdmsr(MSR_THERM_CONTROL);
+ mask = (TCC_NUM_SETTINGS - 1) << TCC_REG_OFFSET;
+ msr &= ~(mask | TCC_ENABLE_ONDEMAND);
+ if (val < TCC_NUM_SETTINGS)
+ msr |= (val << TCC_REG_OFFSET) | TCC_ENABLE_ONDEMAND;
+ wrmsr(MSR_THERM_CONTROL, msr);
+
+ /*
+ * Record whether we're now in Automatic or On-Demand mode. We have
+ * to cache this since there is no reliable way to check if TCC is in
+ * Automatic mode (i.e., at 100% or possibly 50%). Reading bit 4 of
+ * the ACPI Thermal Monitor Control Register produces 0 no matter
+ * what the current mode.
+ */
+ if (msr & TCC_ENABLE_ONDEMAND)
+ sc->auto_mode = TRUE;
+ else
+ sc->auto_mode = FALSE;
+
+ return (0);
+}
+
+static int
+p4tcc_get(device_t dev, struct cf_setting *set)
+{
+ struct p4tcc_softc *sc;
+ uint64_t msr;
+ int val;
+
+ if (set == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+
+ /*
+ * Read the current register and extract the current setting. If
+ * in automatic mode, assume we're at TCC_NUM_SETTINGS (100%).
+ *
+ * XXX This is not completely reliable since at high temperatures
+ * the CPU may be automatically throttling to 50% but it's the best
+ * we can do.
+ */
+ if (!sc->auto_mode) {
+ msr = rdmsr(MSR_THERM_CONTROL);
+ val = (msr >> TCC_REG_OFFSET) & (TCC_NUM_SETTINGS - 1);
+ } else
+ val = TCC_NUM_SETTINGS;
+
+ memset(set, CPUFREQ_VAL_UNKNOWN, sizeof(*set));
+ set->freq = TCC_SPEED_PERCENT(val);
+ set->dev = dev;
+
+ return (0);
+}
+
+static int
+p4tcc_type(device_t dev, int *type)
+{
+
+ if (type == NULL)
+ return (EINVAL);
+
+ *type = CPUFREQ_TYPE_RELATIVE;
+ return (0);
+}
diff --git a/sys/x86/cpufreq/powernow.c b/sys/x86/cpufreq/powernow.c
new file mode 100644
index 0000000..b248cc8
--- /dev/null
+++ b/sys/x86/cpufreq/powernow.c
@@ -0,0 +1,970 @@
+/*-
+ * Copyright (c) 2004-2005 Bruno Ducrot
+ * Copyright (c) 2004 FUKUDA Nobuhiko <nfukuda@spa.is.uec.ac.jp>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Many thanks to Nate Lawson for his helpful comments on this driver and
+ * to Jung-uk Kim for testing.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/pcpu.h>
+#include <sys/systm.h>
+
+#include <machine/pc/bios.h>
+#include <machine/md_var.h>
+#include <machine/specialreg.h>
+#include <machine/cputypes.h>
+#include <machine/vmparam.h>
+#include <sys/rman.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "cpufreq_if.h"
+
+#define PN7_TYPE 0
+#define PN8_TYPE 1
+
+/* Flags for some hardware bugs. */
+#define A0_ERRATA 0x1 /* Bugs for the rev. A0 of Athlon (K7):
+ * Interrupts must be disabled and no half
+ * multipliers are allowed */
+#define PENDING_STUCK 0x2 /* With some buggy chipset and some newer AMD64
+ * processor (Rev. G?):
+ * the pending bit from the msr FIDVID_STATUS
+ * is set forever. No workaround :( */
+
+/* Legacy configuration via BIOS table PSB. */
+#define PSB_START 0
+#define PSB_STEP 0x10
+#define PSB_SIG "AMDK7PNOW!"
+#define PSB_LEN 10
+#define PSB_OFF 0
+
+struct psb_header {
+ char signature[10];
+ uint8_t version;
+ uint8_t flags;
+ uint16_t settlingtime;
+ uint8_t res1;
+ uint8_t numpst;
+} __packed;
+
+struct pst_header {
+ uint32_t cpuid;
+ uint8_t fsb;
+ uint8_t maxfid;
+ uint8_t startvid;
+ uint8_t numpstates;
+} __packed;
+
+/*
+ * MSRs and bits used by Powernow technology
+ */
+#define MSR_AMDK7_FIDVID_CTL 0xc0010041
+#define MSR_AMDK7_FIDVID_STATUS 0xc0010042
+
+/* Bitfields used by K7 */
+
+#define PN7_CTR_FID(x) ((x) & 0x1f)
+#define PN7_CTR_VID(x) (((x) & 0x1f) << 8)
+#define PN7_CTR_FIDC 0x00010000
+#define PN7_CTR_VIDC 0x00020000
+#define PN7_CTR_FIDCHRATIO 0x00100000
+#define PN7_CTR_SGTC(x) (((uint64_t)(x) & 0x000fffff) << 32)
+
+#define PN7_STA_CFID(x) ((x) & 0x1f)
+#define PN7_STA_SFID(x) (((x) >> 8) & 0x1f)
+#define PN7_STA_MFID(x) (((x) >> 16) & 0x1f)
+#define PN7_STA_CVID(x) (((x) >> 32) & 0x1f)
+#define PN7_STA_SVID(x) (((x) >> 40) & 0x1f)
+#define PN7_STA_MVID(x) (((x) >> 48) & 0x1f)
+
+/* ACPI ctr_val status register to powernow k7 configuration */
+#define ACPI_PN7_CTRL_TO_FID(x) ((x) & 0x1f)
+#define ACPI_PN7_CTRL_TO_VID(x) (((x) >> 5) & 0x1f)
+#define ACPI_PN7_CTRL_TO_SGTC(x) (((x) >> 10) & 0xffff)
+
+/* Bitfields used by K8 */
+
+#define PN8_CTR_FID(x) ((x) & 0x3f)
+#define PN8_CTR_VID(x) (((x) & 0x1f) << 8)
+#define PN8_CTR_PENDING(x) (((x) & 1) << 32)
+
+#define PN8_STA_CFID(x) ((x) & 0x3f)
+#define PN8_STA_SFID(x) (((x) >> 8) & 0x3f)
+#define PN8_STA_MFID(x) (((x) >> 16) & 0x3f)
+#define PN8_STA_PENDING(x) (((x) >> 31) & 0x01)
+#define PN8_STA_CVID(x) (((x) >> 32) & 0x1f)
+#define PN8_STA_SVID(x) (((x) >> 40) & 0x1f)
+#define PN8_STA_MVID(x) (((x) >> 48) & 0x1f)
+
+/* Reserved1 to powernow k8 configuration */
+#define PN8_PSB_TO_RVO(x) ((x) & 0x03)
+#define PN8_PSB_TO_IRT(x) (((x) >> 2) & 0x03)
+#define PN8_PSB_TO_MVS(x) (((x) >> 4) & 0x03)
+#define PN8_PSB_TO_BATT(x) (((x) >> 6) & 0x03)
+
+/* ACPI ctr_val status register to powernow k8 configuration */
+#define ACPI_PN8_CTRL_TO_FID(x) ((x) & 0x3f)
+#define ACPI_PN8_CTRL_TO_VID(x) (((x) >> 6) & 0x1f)
+#define ACPI_PN8_CTRL_TO_VST(x) (((x) >> 11) & 0x1f)
+#define ACPI_PN8_CTRL_TO_MVS(x) (((x) >> 18) & 0x03)
+#define ACPI_PN8_CTRL_TO_PLL(x) (((x) >> 20) & 0x7f)
+#define ACPI_PN8_CTRL_TO_RVO(x) (((x) >> 28) & 0x03)
+#define ACPI_PN8_CTRL_TO_IRT(x) (((x) >> 30) & 0x03)
+
+
+#define WRITE_FIDVID(fid, vid, ctrl) \
+ wrmsr(MSR_AMDK7_FIDVID_CTL, \
+ (((ctrl) << 32) | (1ULL << 16) | ((vid) << 8) | (fid)))
+
+#define COUNT_OFF_IRT(irt) DELAY(10 * (1 << (irt)))
+#define COUNT_OFF_VST(vst) DELAY(20 * (vst))
+
+#define FID_TO_VCO_FID(fid) \
+ (((fid) < 8) ? (8 + ((fid) << 1)) : (fid))
+
+/*
+ * Divide each value by 10 to get the processor multiplier.
+ * Some of those tables are the same as the Linux powernow-k7
+ * implementation by Dave Jones.
+ */
+static int pn7_fid_to_mult[32] = {
+ 110, 115, 120, 125, 50, 55, 60, 65,
+ 70, 75, 80, 85, 90, 95, 100, 105,
+ 30, 190, 40, 200, 130, 135, 140, 210,
+ 150, 225, 160, 165, 170, 180, 0, 0,
+};
+
+
+static int pn8_fid_to_mult[64] = {
+ 40, 45, 50, 55, 60, 65, 70, 75,
+ 80, 85, 90, 95, 100, 105, 110, 115,
+ 120, 125, 130, 135, 140, 145, 150, 155,
+ 160, 165, 170, 175, 180, 185, 190, 195,
+ 200, 205, 210, 215, 220, 225, 230, 235,
+ 240, 245, 250, 255, 260, 265, 270, 275,
+ 280, 285, 290, 295, 300, 305, 310, 315,
+ 320, 325, 330, 335, 340, 345, 350, 355,
+};
+
+/*
+ * Units are in mV.
+ */
+/* Mobile VRM (K7) */
+static int pn7_mobile_vid_to_volts[] = {
+ 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
+ 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
+ 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
+ 1075, 1050, 1025, 1000, 975, 950, 925, 0,
+};
+/* Desktop VRM (K7) */
+static int pn7_desktop_vid_to_volts[] = {
+ 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
+ 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
+ 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
+ 1075, 1050, 1025, 1000, 975, 950, 925, 0,
+};
+/* Desktop and Mobile VRM (K8) */
+static int pn8_vid_to_volts[] = {
+ 1550, 1525, 1500, 1475, 1450, 1425, 1400, 1375,
+ 1350, 1325, 1300, 1275, 1250, 1225, 1200, 1175,
+ 1150, 1125, 1100, 1075, 1050, 1025, 1000, 975,
+ 950, 925, 900, 875, 850, 825, 800, 0,
+};
+
+#define POWERNOW_MAX_STATES 16
+
+struct powernow_state {
+ int freq;
+ int power;
+ int fid;
+ int vid;
+};
+
+struct pn_softc {
+ device_t dev;
+ int pn_type;
+ struct powernow_state powernow_states[POWERNOW_MAX_STATES];
+ u_int fsb;
+ u_int sgtc;
+ u_int vst;
+ u_int mvs;
+ u_int pll;
+ u_int rvo;
+ u_int irt;
+ int low;
+ int powernow_max_states;
+ u_int powernow_state;
+ u_int errata;
+ int *vid_to_volts;
+};
+
+/*
+ * Offsets in struct cf_setting array for private values given by
+ * acpi_perf driver.
+ */
+#define PX_SPEC_CONTROL 0
+#define PX_SPEC_STATUS 1
+
+static void pn_identify(driver_t *driver, device_t parent);
+static int pn_probe(device_t dev);
+static int pn_attach(device_t dev);
+static int pn_detach(device_t dev);
+static int pn_set(device_t dev, const struct cf_setting *cf);
+static int pn_get(device_t dev, struct cf_setting *cf);
+static int pn_settings(device_t dev, struct cf_setting *sets,
+ int *count);
+static int pn_type(device_t dev, int *type);
+
+static device_method_t pn_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, pn_identify),
+ DEVMETHOD(device_probe, pn_probe),
+ DEVMETHOD(device_attach, pn_attach),
+ DEVMETHOD(device_detach, pn_detach),
+
+ /* cpufreq interface */
+ DEVMETHOD(cpufreq_drv_set, pn_set),
+ DEVMETHOD(cpufreq_drv_get, pn_get),
+ DEVMETHOD(cpufreq_drv_settings, pn_settings),
+ DEVMETHOD(cpufreq_drv_type, pn_type),
+
+ {0, 0}
+};
+
+static devclass_t pn_devclass;
+static driver_t pn_driver = {
+ "powernow",
+ pn_methods,
+ sizeof(struct pn_softc),
+};
+
+DRIVER_MODULE(powernow, cpu, pn_driver, pn_devclass, 0, 0);
+
+static int
+pn7_setfidvid(struct pn_softc *sc, int fid, int vid)
+{
+ int cfid, cvid;
+ uint64_t status, ctl;
+
+ status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
+ cfid = PN7_STA_CFID(status);
+ cvid = PN7_STA_CVID(status);
+
+ /* We're already at the requested level. */
+ if (fid == cfid && vid == cvid)
+ return (0);
+
+ ctl = rdmsr(MSR_AMDK7_FIDVID_CTL) & PN7_CTR_FIDCHRATIO;
+
+ ctl |= PN7_CTR_FID(fid);
+ ctl |= PN7_CTR_VID(vid);
+ ctl |= PN7_CTR_SGTC(sc->sgtc);
+
+ if (sc->errata & A0_ERRATA)
+ disable_intr();
+
+ if (pn7_fid_to_mult[fid] < pn7_fid_to_mult[cfid]) {
+ wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_FIDC);
+ if (vid != cvid)
+ wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_VIDC);
+ } else {
+ wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_VIDC);
+ if (fid != cfid)
+ wrmsr(MSR_AMDK7_FIDVID_CTL, ctl | PN7_CTR_FIDC);
+ }
+
+ if (sc->errata & A0_ERRATA)
+ enable_intr();
+
+ return (0);
+}
+
+static int
+pn8_read_pending_wait(uint64_t *status)
+{
+ int i = 10000;
+
+ do
+ *status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
+ while (PN8_STA_PENDING(*status) && --i);
+
+ return (i == 0 ? ENXIO : 0);
+}
+
+static int
+pn8_write_fidvid(u_int fid, u_int vid, uint64_t ctrl, uint64_t *status)
+{
+ int i = 100;
+
+ do
+ WRITE_FIDVID(fid, vid, ctrl);
+ while (pn8_read_pending_wait(status) && --i);
+
+ return (i == 0 ? ENXIO : 0);
+}
+
+static int
+pn8_setfidvid(struct pn_softc *sc, int fid, int vid)
+{
+ uint64_t status;
+ int cfid, cvid;
+ int rvo;
+ int rv;
+ u_int val;
+
+ rv = pn8_read_pending_wait(&status);
+ if (rv)
+ return (rv);
+
+ cfid = PN8_STA_CFID(status);
+ cvid = PN8_STA_CVID(status);
+
+ if (fid == cfid && vid == cvid)
+ return (0);
+
+ /*
+ * Phase 1: Raise core voltage to requested VID if frequency is
+ * going up.
+ */
+ while (cvid > vid) {
+ val = cvid - (1 << sc->mvs);
+ rv = pn8_write_fidvid(cfid, (val > 0) ? val : 0, 1ULL, &status);
+ if (rv) {
+ sc->errata |= PENDING_STUCK;
+ return (rv);
+ }
+ cvid = PN8_STA_CVID(status);
+ COUNT_OFF_VST(sc->vst);
+ }
+
+ /* ... then raise to voltage + RVO (if required) */
+ for (rvo = sc->rvo; rvo > 0 && cvid > 0; --rvo) {
+ /* XXX It's not clear from spec if we have to do that
+ * in 0.25 step or in MVS. Therefore do it as it's done
+ * under Linux */
+ rv = pn8_write_fidvid(cfid, cvid - 1, 1ULL, &status);
+ if (rv) {
+ sc->errata |= PENDING_STUCK;
+ return (rv);
+ }
+ cvid = PN8_STA_CVID(status);
+ COUNT_OFF_VST(sc->vst);
+ }
+
+ /* Phase 2: change to requested core frequency */
+ if (cfid != fid) {
+ u_int vco_fid, vco_cfid, fid_delta;
+
+ vco_fid = FID_TO_VCO_FID(fid);
+ vco_cfid = FID_TO_VCO_FID(cfid);
+
+ while (abs(vco_fid - vco_cfid) > 2) {
+ fid_delta = (vco_cfid & 1) ? 1 : 2;
+ if (fid > cfid) {
+ if (cfid > 7)
+ val = cfid + fid_delta;
+ else
+ val = FID_TO_VCO_FID(cfid) + fid_delta;
+ } else
+ val = cfid - fid_delta;
+ rv = pn8_write_fidvid(val, cvid,
+ sc->pll * (uint64_t) sc->fsb,
+ &status);
+ if (rv) {
+ sc->errata |= PENDING_STUCK;
+ return (rv);
+ }
+ cfid = PN8_STA_CFID(status);
+ COUNT_OFF_IRT(sc->irt);
+
+ vco_cfid = FID_TO_VCO_FID(cfid);
+ }
+
+ rv = pn8_write_fidvid(fid, cvid,
+ sc->pll * (uint64_t) sc->fsb,
+ &status);
+ if (rv) {
+ sc->errata |= PENDING_STUCK;
+ return (rv);
+ }
+ cfid = PN8_STA_CFID(status);
+ COUNT_OFF_IRT(sc->irt);
+ }
+
+ /* Phase 3: change to requested voltage */
+ if (cvid != vid) {
+ rv = pn8_write_fidvid(cfid, vid, 1ULL, &status);
+ cvid = PN8_STA_CVID(status);
+ COUNT_OFF_VST(sc->vst);
+ }
+
+ /* Check if transition failed. */
+ if (cfid != fid || cvid != vid)
+ rv = ENXIO;
+
+ return (rv);
+}
+
+static int
+pn_set(device_t dev, const struct cf_setting *cf)
+{
+ struct pn_softc *sc;
+ int fid, vid;
+ int i;
+ int rv;
+
+ if (cf == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+
+ if (sc->errata & PENDING_STUCK)
+ return (ENXIO);
+
+ for (i = 0; i < sc->powernow_max_states; ++i)
+ if (CPUFREQ_CMP(sc->powernow_states[i].freq / 1000, cf->freq))
+ break;
+
+ fid = sc->powernow_states[i].fid;
+ vid = sc->powernow_states[i].vid;
+
+ rv = ENODEV;
+
+ switch (sc->pn_type) {
+ case PN7_TYPE:
+ rv = pn7_setfidvid(sc, fid, vid);
+ break;
+ case PN8_TYPE:
+ rv = pn8_setfidvid(sc, fid, vid);
+ break;
+ }
+
+ return (rv);
+}
+
+static int
+pn_get(device_t dev, struct cf_setting *cf)
+{
+ struct pn_softc *sc;
+ u_int cfid = 0, cvid = 0;
+ int i;
+ uint64_t status;
+
+ if (cf == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+ if (sc->errata & PENDING_STUCK)
+ return (ENXIO);
+
+ status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
+
+ switch (sc->pn_type) {
+ case PN7_TYPE:
+ cfid = PN7_STA_CFID(status);
+ cvid = PN7_STA_CVID(status);
+ break;
+ case PN8_TYPE:
+ cfid = PN8_STA_CFID(status);
+ cvid = PN8_STA_CVID(status);
+ break;
+ }
+ for (i = 0; i < sc->powernow_max_states; ++i)
+ if (cfid == sc->powernow_states[i].fid &&
+ cvid == sc->powernow_states[i].vid)
+ break;
+
+ if (i < sc->powernow_max_states) {
+ cf->freq = sc->powernow_states[i].freq / 1000;
+ cf->power = sc->powernow_states[i].power;
+ cf->lat = 200;
+ cf->volts = sc->vid_to_volts[cvid];
+ cf->dev = dev;
+ } else {
+ memset(cf, CPUFREQ_VAL_UNKNOWN, sizeof(*cf));
+ cf->dev = NULL;
+ }
+
+ return (0);
+}
+
+static int
+pn_settings(device_t dev, struct cf_setting *sets, int *count)
+{
+ struct pn_softc *sc;
+ int i;
+
+ if (sets == NULL|| count == NULL)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+ if (*count < sc->powernow_max_states)
+ return (E2BIG);
+ for (i = 0; i < sc->powernow_max_states; ++i) {
+ sets[i].freq = sc->powernow_states[i].freq / 1000;
+ sets[i].power = sc->powernow_states[i].power;
+ sets[i].lat = 200;
+ sets[i].volts = sc->vid_to_volts[sc->powernow_states[i].vid];
+ sets[i].dev = dev;
+ }
+ *count = sc->powernow_max_states;
+
+ return (0);
+}
+
+static int
+pn_type(device_t dev, int *type)
+{
+ if (type == NULL)
+ return (EINVAL);
+
+ *type = CPUFREQ_TYPE_ABSOLUTE;
+
+ return (0);
+}
+
+/*
+ * Given a set of pair of fid/vid, and number of performance states,
+ * compute powernow_states via an insertion sort.
+ */
+static int
+decode_pst(struct pn_softc *sc, uint8_t *p, int npstates)
+{
+ int i, j, n;
+ struct powernow_state state;
+
+ for (i = 0; i < POWERNOW_MAX_STATES; ++i)
+ sc->powernow_states[i].freq = CPUFREQ_VAL_UNKNOWN;
+
+ for (n = 0, i = 0; i < npstates; ++i) {
+ state.fid = *p++;
+ state.vid = *p++;
+ state.power = CPUFREQ_VAL_UNKNOWN;
+
+ switch (sc->pn_type) {
+ case PN7_TYPE:
+ state.freq = 100 * pn7_fid_to_mult[state.fid] * sc->fsb;
+ if ((sc->errata & A0_ERRATA) &&
+ (pn7_fid_to_mult[state.fid] % 10) == 5)
+ continue;
+ break;
+ case PN8_TYPE:
+ state.freq = 100 * pn8_fid_to_mult[state.fid] * sc->fsb;
+ break;
+ }
+
+ j = n;
+ while (j > 0 && sc->powernow_states[j - 1].freq < state.freq) {
+ memcpy(&sc->powernow_states[j],
+ &sc->powernow_states[j - 1],
+ sizeof(struct powernow_state));
+ --j;
+ }
+ memcpy(&sc->powernow_states[j], &state,
+ sizeof(struct powernow_state));
+ ++n;
+ }
+
+ /*
+ * Fix powernow_max_states, if errata a0 give us less states
+ * than expected.
+ */
+ sc->powernow_max_states = n;
+
+ if (bootverbose)
+ for (i = 0; i < sc->powernow_max_states; ++i) {
+ int fid = sc->powernow_states[i].fid;
+ int vid = sc->powernow_states[i].vid;
+
+ printf("powernow: %2i %8dkHz FID %02x VID %02x\n",
+ i,
+ sc->powernow_states[i].freq,
+ fid,
+ vid);
+ }
+
+ return (0);
+}
+
+static int
+cpuid_is_k7(u_int cpuid)
+{
+
+ switch (cpuid) {
+ case 0x760:
+ case 0x761:
+ case 0x762:
+ case 0x770:
+ case 0x771:
+ case 0x780:
+ case 0x781:
+ case 0x7a0:
+ return (TRUE);
+ }
+ return (FALSE);
+}
+
+static int
+pn_decode_pst(device_t dev)
+{
+ int maxpst;
+ struct pn_softc *sc;
+ u_int cpuid, maxfid, startvid;
+ u_long sig;
+ struct psb_header *psb;
+ uint8_t *p;
+ u_int regs[4];
+ uint64_t status;
+
+ sc = device_get_softc(dev);
+
+ do_cpuid(0x80000001, regs);
+ cpuid = regs[0];
+
+ if ((cpuid & 0xfff) == 0x760)
+ sc->errata |= A0_ERRATA;
+
+ status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
+
+ switch (sc->pn_type) {
+ case PN7_TYPE:
+ maxfid = PN7_STA_MFID(status);
+ startvid = PN7_STA_SVID(status);
+ break;
+ case PN8_TYPE:
+ maxfid = PN8_STA_MFID(status);
+ /*
+ * we should actually use a variable named 'maxvid' if K8,
+ * but why introducing a new variable for that?
+ */
+ startvid = PN8_STA_MVID(status);
+ break;
+ default:
+ return (ENODEV);
+ }
+
+ if (bootverbose) {
+ device_printf(dev, "STATUS: 0x%jx\n", status);
+ device_printf(dev, "STATUS: maxfid: 0x%02x\n", maxfid);
+ device_printf(dev, "STATUS: %s: 0x%02x\n",
+ sc->pn_type == PN7_TYPE ? "startvid" : "maxvid",
+ startvid);
+ }
+
+ sig = bios_sigsearch(PSB_START, PSB_SIG, PSB_LEN, PSB_STEP, PSB_OFF);
+ if (sig) {
+ struct pst_header *pst;
+
+ psb = (struct psb_header*)(uintptr_t)BIOS_PADDRTOVADDR(sig);
+
+ switch (psb->version) {
+ default:
+ return (ENODEV);
+ case 0x14:
+ /*
+ * We can't be picky about numpst since at least
+ * some systems have a value of 1 and some have 2.
+ * We trust that cpuid_is_k7() will be better at
+ * catching that we're on a K8 anyway.
+ */
+ if (sc->pn_type != PN8_TYPE)
+ return (EINVAL);
+ sc->vst = psb->settlingtime;
+ sc->rvo = PN8_PSB_TO_RVO(psb->res1),
+ sc->irt = PN8_PSB_TO_IRT(psb->res1),
+ sc->mvs = PN8_PSB_TO_MVS(psb->res1),
+ sc->low = PN8_PSB_TO_BATT(psb->res1);
+ if (bootverbose) {
+ device_printf(dev, "PSB: VST: %d\n",
+ psb->settlingtime);
+ device_printf(dev, "PSB: RVO %x IRT %d "
+ "MVS %d BATT %d\n",
+ sc->rvo,
+ sc->irt,
+ sc->mvs,
+ sc->low);
+ }
+ break;
+ case 0x12:
+ if (sc->pn_type != PN7_TYPE)
+ return (EINVAL);
+ sc->sgtc = psb->settlingtime * sc->fsb;
+ if (sc->sgtc < 100 * sc->fsb)
+ sc->sgtc = 100 * sc->fsb;
+ break;
+ }
+
+ p = ((uint8_t *) psb) + sizeof(struct psb_header);
+ pst = (struct pst_header*) p;
+
+ maxpst = 200;
+
+ do {
+ struct pst_header *pst = (struct pst_header*) p;
+
+ if (cpuid == pst->cpuid &&
+ maxfid == pst->maxfid &&
+ startvid == pst->startvid) {
+ sc->powernow_max_states = pst->numpstates;
+ switch (sc->pn_type) {
+ case PN7_TYPE:
+ if (abs(sc->fsb - pst->fsb) > 5)
+ continue;
+ break;
+ case PN8_TYPE:
+ break;
+ }
+ return (decode_pst(sc,
+ p + sizeof(struct pst_header),
+ sc->powernow_max_states));
+ }
+
+ p += sizeof(struct pst_header) + (2 * pst->numpstates);
+ } while (cpuid_is_k7(pst->cpuid) && maxpst--);
+
+ device_printf(dev, "no match for extended cpuid %.3x\n", cpuid);
+ }
+
+ return (ENODEV);
+}
+
+static int
+pn_decode_acpi(device_t dev, device_t perf_dev)
+{
+ int i, j, n;
+ uint64_t status;
+ uint32_t ctrl;
+ u_int cpuid;
+ u_int regs[4];
+ struct pn_softc *sc;
+ struct powernow_state state;
+ struct cf_setting sets[POWERNOW_MAX_STATES];
+ int count = POWERNOW_MAX_STATES;
+ int type;
+ int rv;
+
+ if (perf_dev == NULL)
+ return (ENXIO);
+
+ rv = CPUFREQ_DRV_SETTINGS(perf_dev, sets, &count);
+ if (rv)
+ return (ENXIO);
+ rv = CPUFREQ_DRV_TYPE(perf_dev, &type);
+ if (rv || (type & CPUFREQ_FLAG_INFO_ONLY) == 0)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+
+ do_cpuid(0x80000001, regs);
+ cpuid = regs[0];
+ if ((cpuid & 0xfff) == 0x760)
+ sc->errata |= A0_ERRATA;
+
+ ctrl = 0;
+ sc->sgtc = 0;
+ for (n = 0, i = 0; i < count; ++i) {
+ ctrl = sets[i].spec[PX_SPEC_CONTROL];
+ switch (sc->pn_type) {
+ case PN7_TYPE:
+ state.fid = ACPI_PN7_CTRL_TO_FID(ctrl);
+ state.vid = ACPI_PN7_CTRL_TO_VID(ctrl);
+ if ((sc->errata & A0_ERRATA) &&
+ (pn7_fid_to_mult[state.fid] % 10) == 5)
+ continue;
+ state.freq = 100 * pn7_fid_to_mult[state.fid] * sc->fsb;
+ break;
+ case PN8_TYPE:
+ state.fid = ACPI_PN8_CTRL_TO_FID(ctrl);
+ state.vid = ACPI_PN8_CTRL_TO_VID(ctrl);
+ state.freq = 100 * pn8_fid_to_mult[state.fid] * sc->fsb;
+ break;
+ }
+
+ state.power = sets[i].power;
+
+ j = n;
+ while (j > 0 && sc->powernow_states[j - 1].freq < state.freq) {
+ memcpy(&sc->powernow_states[j],
+ &sc->powernow_states[j - 1],
+ sizeof(struct powernow_state));
+ --j;
+ }
+ memcpy(&sc->powernow_states[j], &state,
+ sizeof(struct powernow_state));
+ ++n;
+ }
+
+ sc->powernow_max_states = n;
+ state = sc->powernow_states[0];
+ status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
+
+ switch (sc->pn_type) {
+ case PN7_TYPE:
+ sc->sgtc = ACPI_PN7_CTRL_TO_SGTC(ctrl);
+ /*
+ * XXX Some bios forget the max frequency!
+ * This maybe indicates we have the wrong tables. Therefore,
+ * don't implement a quirk, but fallback to BIOS legacy
+ * tables instead.
+ */
+ if (PN7_STA_MFID(status) != state.fid) {
+ device_printf(dev, "ACPI MAX frequency not found\n");
+ return (EINVAL);
+ }
+ break;
+ case PN8_TYPE:
+ sc->vst = ACPI_PN8_CTRL_TO_VST(ctrl),
+ sc->mvs = ACPI_PN8_CTRL_TO_MVS(ctrl),
+ sc->pll = ACPI_PN8_CTRL_TO_PLL(ctrl),
+ sc->rvo = ACPI_PN8_CTRL_TO_RVO(ctrl),
+ sc->irt = ACPI_PN8_CTRL_TO_IRT(ctrl);
+ sc->low = 0; /* XXX */
+
+ /*
+ * powernow k8 supports only one low frequency.
+ */
+ if (sc->powernow_max_states >= 2 &&
+ (sc->powernow_states[sc->powernow_max_states - 2].fid < 8))
+ return (EINVAL);
+ break;
+ }
+
+ return (0);
+}
+
+static void
+pn_identify(driver_t *driver, device_t parent)
+{
+
+ if ((amd_pminfo & AMDPM_FID) == 0 || (amd_pminfo & AMDPM_VID) == 0)
+ return;
+ switch (cpu_id & 0xf00) {
+ case 0x600:
+ case 0xf00:
+ break;
+ default:
+ return;
+ }
+ if (device_find_child(parent, "powernow", -1) != NULL)
+ return;
+ if (BUS_ADD_CHILD(parent, 10, "powernow", -1) == NULL)
+ device_printf(parent, "powernow: add child failed\n");
+}
+
+static int
+pn_probe(device_t dev)
+{
+ struct pn_softc *sc;
+ uint64_t status;
+ uint64_t rate;
+ struct pcpu *pc;
+ u_int sfid, mfid, cfid;
+
+ sc = device_get_softc(dev);
+ sc->errata = 0;
+ status = rdmsr(MSR_AMDK7_FIDVID_STATUS);
+
+ pc = cpu_get_pcpu(dev);
+ if (pc == NULL)
+ return (ENODEV);
+
+ cpu_est_clockrate(pc->pc_cpuid, &rate);
+
+ switch (cpu_id & 0xf00) {
+ case 0x600:
+ sfid = PN7_STA_SFID(status);
+ mfid = PN7_STA_MFID(status);
+ cfid = PN7_STA_CFID(status);
+ sc->pn_type = PN7_TYPE;
+ sc->fsb = rate / 100000 / pn7_fid_to_mult[cfid];
+
+ /*
+ * If start FID is different to max FID, then it is a
+ * mobile processor. If not, it is a low powered desktop
+ * processor.
+ */
+ if (PN7_STA_SFID(status) != PN7_STA_MFID(status)) {
+ sc->vid_to_volts = pn7_mobile_vid_to_volts;
+ device_set_desc(dev, "PowerNow! K7");
+ } else {
+ sc->vid_to_volts = pn7_desktop_vid_to_volts;
+ device_set_desc(dev, "Cool`n'Quiet K7");
+ }
+ break;
+
+ case 0xf00:
+ sfid = PN8_STA_SFID(status);
+ mfid = PN8_STA_MFID(status);
+ cfid = PN8_STA_CFID(status);
+ sc->pn_type = PN8_TYPE;
+ sc->vid_to_volts = pn8_vid_to_volts;
+ sc->fsb = rate / 100000 / pn8_fid_to_mult[cfid];
+
+ if (PN8_STA_SFID(status) != PN8_STA_MFID(status))
+ device_set_desc(dev, "PowerNow! K8");
+ else
+ device_set_desc(dev, "Cool`n'Quiet K8");
+ break;
+ default:
+ return (ENODEV);
+ }
+
+ return (0);
+}
+
+static int
+pn_attach(device_t dev)
+{
+ int rv;
+ device_t child;
+
+ child = device_find_child(device_get_parent(dev), "acpi_perf", -1);
+ if (child) {
+ rv = pn_decode_acpi(dev, child);
+ if (rv)
+ rv = pn_decode_pst(dev);
+ } else
+ rv = pn_decode_pst(dev);
+
+ if (rv != 0)
+ return (ENXIO);
+ cpufreq_register(dev);
+ return (0);
+}
+
+static int
+pn_detach(device_t dev)
+{
+
+ return (cpufreq_unregister(dev));
+}
diff --git a/sys/x86/cpufreq/smist.c b/sys/x86/cpufreq/smist.c
new file mode 100644
index 0000000..5cfd72b
--- /dev/null
+++ b/sys/x86/cpufreq/smist.c
@@ -0,0 +1,514 @@
+/*-
+ * Copyright (c) 2005 Bruno Ducrot
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This driver is based upon information found by examining speedstep-0.5
+ * from Marc Lehman, which includes all the reverse engineering effort of
+ * Malik Martin (function 1 and 2 of the GSI).
+ *
+ * The correct way for the OS to take ownership from the BIOS was found by
+ * Hiroshi Miura (function 0 of the GSI).
+ *
+ * Finally, the int 15h call interface was (partially) documented by Intel.
+ *
+ * Many thanks to Jon Noack for testing and debugging this driver.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/cpu.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+
+#include <machine/bus.h>
+#include <machine/cputypes.h>
+#include <machine/md_var.h>
+#include <machine/vm86.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "cpufreq_if.h"
+
+#if 0
+#define DPRINT(dev, x...) device_printf(dev, x)
+#else
+#define DPRINT(dev, x...)
+#endif
+
+struct smist_softc {
+ device_t dev;
+ int smi_cmd;
+ int smi_data;
+ int command;
+ int flags;
+ struct cf_setting sets[2]; /* Only two settings. */
+};
+
+static char smist_magic[] = "Copyright (c) 1999 Intel Corporation";
+
+static void smist_identify(driver_t *driver, device_t parent);
+static int smist_probe(device_t dev);
+static int smist_attach(device_t dev);
+static int smist_detach(device_t dev);
+static int smist_settings(device_t dev, struct cf_setting *sets,
+ int *count);
+static int smist_set(device_t dev, const struct cf_setting *set);
+static int smist_get(device_t dev, struct cf_setting *set);
+static int smist_type(device_t dev, int *type);
+
+static device_method_t smist_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, smist_identify),
+ DEVMETHOD(device_probe, smist_probe),
+ DEVMETHOD(device_attach, smist_attach),
+ DEVMETHOD(device_detach, smist_detach),
+
+ /* cpufreq interface */
+ DEVMETHOD(cpufreq_drv_set, smist_set),
+ DEVMETHOD(cpufreq_drv_get, smist_get),
+ DEVMETHOD(cpufreq_drv_type, smist_type),
+ DEVMETHOD(cpufreq_drv_settings, smist_settings),
+
+ {0, 0}
+};
+
+static driver_t smist_driver = {
+ "smist", smist_methods, sizeof(struct smist_softc)
+};
+static devclass_t smist_devclass;
+DRIVER_MODULE(smist, cpu, smist_driver, smist_devclass, 0, 0);
+
+struct piix4_pci_device {
+ uint16_t vendor;
+ uint16_t device;
+ char *desc;
+};
+
+static struct piix4_pci_device piix4_pci_devices[] = {
+ {0x8086, 0x7113, "Intel PIIX4 ISA bridge"},
+ {0x8086, 0x719b, "Intel PIIX4 ISA bridge (embedded in MX440 chipset)"},
+
+ {0, 0, NULL},
+};
+
+#define SET_OWNERSHIP 0
+#define GET_STATE 1
+#define SET_STATE 2
+
+static int
+int15_gsic_call(int *sig, int *smi_cmd, int *command, int *smi_data, int *flags)
+{
+ struct vm86frame vmf;
+
+ bzero(&vmf, sizeof(vmf));
+ vmf.vmf_eax = 0x0000E980; /* IST support */
+ vmf.vmf_edx = 0x47534943; /* 'GSIC' in ASCII */
+ vm86_intcall(0x15, &vmf);
+
+ if (vmf.vmf_eax == 0x47534943) {
+ *sig = vmf.vmf_eax;
+ *smi_cmd = vmf.vmf_ebx & 0xff;
+ *command = (vmf.vmf_ebx >> 16) & 0xff;
+ *smi_data = vmf.vmf_ecx;
+ *flags = vmf.vmf_edx;
+ } else {
+ *sig = -1;
+ *smi_cmd = -1;
+ *command = -1;
+ *smi_data = -1;
+ *flags = -1;
+ }
+
+ return (0);
+}
+
+/* Temporary structure to hold mapped page and status. */
+struct set_ownership_data {
+ int smi_cmd;
+ int command;
+ int result;
+ void *buf;
+};
+
+/* Perform actual SMI call to enable SpeedStep. */
+static void
+set_ownership_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct set_ownership_data *data;
+
+ data = arg;
+ if (error) {
+ data->result = error;
+ return;
+ }
+
+ /* Copy in the magic string and send it by writing to the SMI port. */
+ strlcpy(data->buf, smist_magic, PAGE_SIZE);
+ __asm __volatile(
+ "movl $-1, %%edi\n\t"
+ "out %%al, (%%dx)\n"
+ : "=D" (data->result)
+ : "a" (data->command),
+ "b" (0),
+ "c" (0),
+ "d" (data->smi_cmd),
+ "S" ((uint32_t)segs[0].ds_addr)
+ );
+}
+
+static int
+set_ownership(device_t dev)
+{
+ struct smist_softc *sc;
+ struct set_ownership_data cb_data;
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+
+ /*
+ * Specify the region to store the magic string. Since its address is
+ * passed to the BIOS in a 32-bit register, we have to make sure it is
+ * located in a physical page below 4 GB (i.e., for PAE.)
+ */
+ sc = device_get_softc(dev);
+ if (bus_dma_tag_create(/*parent*/ NULL,
+ /*alignment*/ PAGE_SIZE, /*no boundary*/ 0,
+ /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, /*highaddr*/ BUS_SPACE_MAXADDR,
+ NULL, NULL, /*maxsize*/ PAGE_SIZE, /*segments*/ 1,
+ /*maxsegsize*/ PAGE_SIZE, 0, busdma_lock_mutex, &Giant,
+ &tag) != 0) {
+ device_printf(dev, "can't create mem tag\n");
+ return (ENXIO);
+ }
+ if (bus_dmamem_alloc(tag, &cb_data.buf, BUS_DMA_NOWAIT, &map) != 0) {
+ bus_dma_tag_destroy(tag);
+ device_printf(dev, "can't alloc mapped mem\n");
+ return (ENXIO);
+ }
+
+ /* Load the physical page map and take ownership in the callback. */
+ cb_data.smi_cmd = sc->smi_cmd;
+ cb_data.command = sc->command;
+ if (bus_dmamap_load(tag, map, cb_data.buf, PAGE_SIZE, set_ownership_cb,
+ &cb_data, BUS_DMA_NOWAIT) != 0) {
+ bus_dmamem_free(tag, cb_data.buf, map);
+ bus_dma_tag_destroy(tag);
+ device_printf(dev, "can't load mem\n");
+ return (ENXIO);
+ };
+ DPRINT(dev, "taking ownership over BIOS return %d\n", cb_data.result);
+ bus_dmamap_unload(tag, map);
+ bus_dmamem_free(tag, cb_data.buf, map);
+ bus_dma_tag_destroy(tag);
+ return (cb_data.result ? ENXIO : 0);
+}
+
+static int
+getset_state(struct smist_softc *sc, int *state, int function)
+{
+ int new_state;
+ int result;
+ int eax;
+
+ if (!sc)
+ return (ENXIO);
+
+ if (function != GET_STATE && function != SET_STATE)
+ return (EINVAL);
+
+ DPRINT(sc->dev, "calling GSI\n");
+
+ __asm __volatile(
+ "movl $-1, %%edi\n\t"
+ "out %%al, (%%dx)\n"
+ : "=a" (eax),
+ "=b" (new_state),
+ "=D" (result)
+ : "a" (sc->command),
+ "b" (function),
+ "c" (*state),
+ "d" (sc->smi_cmd)
+ );
+
+ DPRINT(sc->dev, "GSI returned: eax %.8x ebx %.8x edi %.8x\n",
+ eax, new_state, result);
+
+ *state = new_state & 1;
+
+ switch (function) {
+ case GET_STATE:
+ if (eax)
+ return (ENXIO);
+ break;
+ case SET_STATE:
+ if (result)
+ return (ENXIO);
+ break;
+ }
+ return (0);
+}
+
+static void
+smist_identify(driver_t *driver, device_t parent)
+{
+ struct piix4_pci_device *id;
+ device_t piix4 = NULL;
+
+ if (resource_disabled("ichst", 0))
+ return;
+
+ /* Check for a supported processor */
+ if (cpu_vendor_id != CPU_VENDOR_INTEL)
+ return;
+ switch (cpu_id & 0xff0) {
+ case 0x680: /* Pentium III [coppermine] */
+ case 0x6a0: /* Pentium III [Tualatin] */
+ break;
+ default:
+ return;
+ }
+
+ /* Check for a supported PCI-ISA bridge */
+ for (id = piix4_pci_devices; id->desc != NULL; ++id) {
+ if ((piix4 = pci_find_device(id->vendor, id->device)) != NULL)
+ break;
+ }
+ if (!piix4)
+ return;
+
+ if (bootverbose)
+ printf("smist: found supported isa bridge %s\n", id->desc);
+
+ if (device_find_child(parent, "smist", -1) != NULL)
+ return;
+ if (BUS_ADD_CHILD(parent, 30, "smist", -1) == NULL)
+ device_printf(parent, "smist: add child failed\n");
+}
+
+static int
+smist_probe(device_t dev)
+{
+ struct smist_softc *sc;
+ device_t ichss_dev, perf_dev;
+ int sig, smi_cmd, command, smi_data, flags;
+ int type;
+ int rv;
+
+ if (resource_disabled("smist", 0))
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+
+ /*
+ * If the ACPI perf or ICH SpeedStep drivers have attached and not
+ * just offering info, let them manage things.
+ */
+ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
+ if (perf_dev && device_is_attached(perf_dev)) {
+ rv = CPUFREQ_DRV_TYPE(perf_dev, &type);
+ if (rv == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0)
+ return (ENXIO);
+ }
+ ichss_dev = device_find_child(device_get_parent(dev), "ichss", -1);
+ if (ichss_dev && device_is_attached(ichss_dev))
+ return (ENXIO);
+
+ int15_gsic_call(&sig, &smi_cmd, &command, &smi_data, &flags);
+ if (bootverbose)
+ device_printf(dev, "sig %.8x smi_cmd %.4x command %.2x "
+ "smi_data %.4x flags %.8x\n",
+ sig, smi_cmd, command, smi_data, flags);
+
+ if (sig != -1) {
+ sc->smi_cmd = smi_cmd;
+ sc->smi_data = smi_data;
+
+ /*
+ * Sometimes int 15h 'GSIC' returns 0x80 for command, when
+ * it is actually 0x82. The Windows driver will overwrite
+ * this value given by the registry.
+ */
+ if (command == 0x80) {
+ device_printf(dev,
+ "GSIC returned cmd 0x80, should be 0x82\n");
+ command = 0x82;
+ }
+ sc->command = (sig & 0xffffff00) | (command & 0xff);
+ sc->flags = flags;
+ } else {
+ /* Give some default values */
+ sc->smi_cmd = 0xb2;
+ sc->smi_data = 0xb3;
+ sc->command = 0x47534982;
+ sc->flags = 0;
+ }
+
+ device_set_desc(dev, "SpeedStep SMI");
+
+ return (-1500);
+}
+
+static int
+smist_attach(device_t dev)
+{
+ struct smist_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ /* If we can't take ownership over BIOS, then bail out */
+ if (set_ownership(dev) != 0)
+ return (ENXIO);
+
+ /* Setup some defaults for our exported settings. */
+ sc->sets[0].freq = CPUFREQ_VAL_UNKNOWN;
+ sc->sets[0].volts = CPUFREQ_VAL_UNKNOWN;
+ sc->sets[0].power = CPUFREQ_VAL_UNKNOWN;
+ sc->sets[0].lat = 1000;
+ sc->sets[0].dev = dev;
+ sc->sets[1] = sc->sets[0];
+
+ cpufreq_register(dev);
+
+ return (0);
+}
+
+static int
+smist_detach(device_t dev)
+{
+
+ return (cpufreq_unregister(dev));
+}
+
+static int
+smist_settings(device_t dev, struct cf_setting *sets, int *count)
+{
+ struct smist_softc *sc;
+ struct cf_setting set;
+ int first, i;
+
+ if (sets == NULL || count == NULL)
+ return (EINVAL);
+ if (*count < 2) {
+ *count = 2;
+ return (E2BIG);
+ }
+ sc = device_get_softc(dev);
+
+ /*
+ * Estimate frequencies for both levels, temporarily switching to
+ * the other one if we haven't calibrated it yet.
+ */
+ for (i = 0; i < 2; i++) {
+ if (sc->sets[i].freq == CPUFREQ_VAL_UNKNOWN) {
+ first = (i == 0) ? 1 : 0;
+ smist_set(dev, &sc->sets[i]);
+ smist_get(dev, &set);
+ smist_set(dev, &sc->sets[first]);
+ }
+ }
+
+ bcopy(sc->sets, sets, sizeof(sc->sets));
+ *count = 2;
+
+ return (0);
+}
+
+static int
+smist_set(device_t dev, const struct cf_setting *set)
+{
+ struct smist_softc *sc;
+ int rv, state, req_state, try;
+
+ /* Look up appropriate bit value based on frequency. */
+ sc = device_get_softc(dev);
+ if (CPUFREQ_CMP(set->freq, sc->sets[0].freq))
+ req_state = 0;
+ else if (CPUFREQ_CMP(set->freq, sc->sets[1].freq))
+ req_state = 1;
+ else
+ return (EINVAL);
+
+ DPRINT(dev, "requested setting %d\n", req_state);
+
+ rv = getset_state(sc, &state, GET_STATE);
+ if (state == req_state)
+ return (0);
+
+ try = 3;
+ do {
+ rv = getset_state(sc, &req_state, SET_STATE);
+
+ /* Sleep for 200 microseconds. This value is just a guess. */
+ if (rv)
+ DELAY(200);
+ } while (rv && --try);
+ DPRINT(dev, "set_state return %d, tried %d times\n",
+ rv, 4 - try);
+
+ return (rv);
+}
+
+static int
+smist_get(device_t dev, struct cf_setting *set)
+{
+ struct smist_softc *sc;
+ uint64_t rate;
+ int state;
+ int rv;
+
+ sc = device_get_softc(dev);
+ rv = getset_state(sc, &state, GET_STATE);
+ if (rv != 0)
+ return (rv);
+
+ /* If we haven't changed settings yet, estimate the current value. */
+ if (sc->sets[state].freq == CPUFREQ_VAL_UNKNOWN) {
+ cpu_est_clockrate(0, &rate);
+ sc->sets[state].freq = rate / 1000000;
+ DPRINT(dev, "get calibrated new rate of %d\n",
+ sc->sets[state].freq);
+ }
+ *set = sc->sets[state];
+
+ return (0);
+}
+
+static int
+smist_type(device_t dev, int *type)
+{
+
+ if (type == NULL)
+ return (EINVAL);
+
+ *type = CPUFREQ_TYPE_ABSOLUTE;
+ return (0);
+}
OpenPOWER on IntegriCloud