Module Name: src
Committed By: jruoho
Date: Fri Aug 13 16:21:51 UTC 2010
Modified Files:
src/sys/arch/x86/acpi: acpi_cpu_md.c
src/sys/dev/acpi: acpi_cpu.c acpi_cpu.h acpi_cpu_cstate.c
acpi_cpu_pstate.c files.acpi
src/sys/modules/acpicpu: Makefile
Added Files:
src/sys/dev/acpi: acpi_cpu_tstate.c
Log Message:
Merge T-state a.k.a. throttling support for acpicpu(4).
Remarks:
1. Native instructions are supported only on Intel. Native support for
other x86 vendors will be investigated. By assumption, AMD and others
use the I/O based approach.
2. The existing code, INTEL_ONDEMAND_CLOCKMOD, must be disabled in
order to use acpicpu(4). Otherwise fatal MSR races may occur.
Unlike with P-states, no attempt is done to disable the existing
implementation.
3. There is no rationale to export controls to user land.
4. Throttling is an artefact from the past. T-states will not be used for
power management per se. For CPU frequency management, P-states are
preferred in all circumstances. No noticeable additional power savings
were observed in various experiments. When the system has been scaled
to the highest (i.e. lowest power) P-state, it is preferable to move
from C0 to deeper C-states than it is to actively throttle the CPU.
5. But T-states need to be implemented for passive cooling via acpitz(4).
As specified by ACPI and Intel documents, these can be used as the
last line of defence against critical thermal conditions. Support
for this will be added later.
To generate a diff of this commit:
cvs rdiff -u -r1.9 -r1.10 src/sys/arch/x86/acpi/acpi_cpu_md.c
cvs rdiff -u -r1.14 -r1.15 src/sys/dev/acpi/acpi_cpu.c \
src/sys/dev/acpi/acpi_cpu_pstate.c
cvs rdiff -u -r1.13 -r1.14 src/sys/dev/acpi/acpi_cpu.h
cvs rdiff -u -r1.23 -r1.24 src/sys/dev/acpi/acpi_cpu_cstate.c
cvs rdiff -u -r0 -r1.1 src/sys/dev/acpi/acpi_cpu_tstate.c
cvs rdiff -u -r1.78 -r1.79 src/sys/dev/acpi/files.acpi
cvs rdiff -u -r1.2 -r1.3 src/sys/modules/acpicpu/Makefile
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/arch/x86/acpi/acpi_cpu_md.c
diff -u src/sys/arch/x86/acpi/acpi_cpu_md.c:1.9 src/sys/arch/x86/acpi/acpi_cpu_md.c:1.10
--- src/sys/arch/x86/acpi/acpi_cpu_md.c:1.9 Mon Aug 9 15:46:17 2010
+++ src/sys/arch/x86/acpi/acpi_cpu_md.c Fri Aug 13 16:21:50 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: acpi_cpu_md.c,v 1.9 2010/08/09 15:46:17 jruoho Exp $ */
+/* $NetBSD: acpi_cpu_md.c,v 1.10 2010/08/13 16:21:50 jruoho Exp $ */
/*-
* Copyright (c) 2010 Jukka Ruohonen <[email protected]>
@@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.9 2010/08/09 15:46:17 jruoho Exp $");
+__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.10 2010/08/13 16:21:50 jruoho Exp $");
#include <sys/param.h>
#include <sys/bus.h>
@@ -52,6 +52,7 @@
static int acpicpu_md_pstate_sysctl_get(SYSCTLFN_PROTO);
static int acpicpu_md_pstate_sysctl_set(SYSCTLFN_PROTO);
static int acpicpu_md_pstate_sysctl_all(SYSCTLFN_PROTO);
+static int acpicpu_md_tstate_get_status(uint64_t *);
extern uint32_t cpus_running;
extern struct acpicpu_softc **acpicpu_sc;
@@ -78,11 +79,14 @@
val |= ACPICPU_PDC_C_C1_FFH | ACPICPU_PDC_C_C2C3_FFH;
/*
- * Set native P-states if EST is available.
+ * Set native P- and T-states, if available.
*/
if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
val |= ACPICPU_PDC_P_FFH;
+ if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
+ val |= ACPICPU_PDC_T_FFH;
+
return val;
}
@@ -107,6 +111,9 @@
if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
val |= ACPICPU_FLAG_P_FFH;
+ if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
+ val |= ACPICPU_FLAG_T_FFH;
+
/*
* Bus master arbitration is not
* needed on some recent Intel CPUs.
@@ -123,7 +130,7 @@
case CPUVENDOR_AMD:
/*
- * XXX: Deal with the AMD C1E extension here.
+ * XXX: Deal with PowerNow! and C1E here.
*/
break;
}
@@ -310,13 +317,6 @@
uint32_t freq;
int err;
- /*
- * We can use any ACPI CPU to manipulate the
- * frequencies. In MP environments all CPUs
- * are mandated to support the same number of
- * P-states and each state must have identical
- * parameters across processors.
- */
sc = acpicpu_sc[ci->ci_acpiid];
if (sc == NULL)
@@ -493,3 +493,93 @@
return EAGAIN;
}
+
+int
+acpicpu_md_tstate_get(struct acpicpu_softc *sc, uint32_t *percent)
+{
+ struct acpicpu_tstate *ts;
+ uint64_t val;
+ uint32_t i;
+ int rv;
+
+ rv = acpicpu_md_tstate_get_status(&val);
+
+ if (rv != 0)
+ return rv;
+
+ mutex_enter(&sc->sc_mtx);
+
+ for (i = 0; i < sc->sc_tstate_count; i++) {
+
+ ts = &sc->sc_tstate[i];
+
+ if (ts->ts_percent == 0)
+ continue;
+
+ if (val == ts->ts_control || val == ts->ts_status) {
+ mutex_exit(&sc->sc_mtx);
+ *percent = ts->ts_percent;
+ return 0;
+ }
+ }
+
+ mutex_exit(&sc->sc_mtx);
+
+ return EIO;
+}
+
+static int
+acpicpu_md_tstate_get_status(uint64_t *val)
+{
+
+ switch (cpu_vendor) {
+
+ case CPUVENDOR_INTEL:
+ *val = rdmsr(MSR_THERM_CONTROL);
+ break;
+
+ default:
+ return ENODEV;
+ }
+
+ return 0;
+}
+
+int
+acpicpu_md_tstate_set(struct acpicpu_tstate *ts)
+{
+ struct msr_rw_info msr;
+ uint64_t xc, val;
+ int i;
+
+ switch (cpu_vendor) {
+
+ case CPUVENDOR_INTEL:
+ msr.msr_read = true;
+ msr.msr_type = MSR_THERM_CONTROL;
+ msr.msr_value = ts->ts_control;
+ msr.msr_mask = __BITS(1, 4);
+ break;
+
+ default:
+ return ENODEV;
+ }
+
+ xc = xc_broadcast(0, (xcfunc_t)x86_msr_xcall, &msr, NULL);
+ xc_wait(xc);
+
+ if (ts->ts_status == 0)
+ return 0;
+
+ for (i = val = 0; i < ACPICPU_T_STATE_RETRY; i++) {
+
+ (void)acpicpu_md_tstate_get_status(&val);
+
+ if (val == ts->ts_status)
+ return 0;
+
+ DELAY(ts->ts_latency);
+ }
+
+ return EAGAIN;
+}
Index: src/sys/dev/acpi/acpi_cpu.c
diff -u src/sys/dev/acpi/acpi_cpu.c:1.14 src/sys/dev/acpi/acpi_cpu.c:1.15
--- src/sys/dev/acpi/acpi_cpu.c:1.14 Wed Aug 11 16:22:18 2010
+++ src/sys/dev/acpi/acpi_cpu.c Fri Aug 13 16:21:50 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: acpi_cpu.c,v 1.14 2010/08/11 16:22:18 jruoho Exp $ */
+/* $NetBSD: acpi_cpu.c,v 1.15 2010/08/13 16:21:50 jruoho Exp $ */
/*-
* Copyright (c) 2010 Jukka Ruohonen <[email protected]>
@@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.14 2010/08/11 16:22:18 jruoho Exp $");
+__KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.15 2010/08/13 16:21:50 jruoho Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@@ -157,9 +157,11 @@
acpicpu_cstate_attach(self);
acpicpu_pstate_attach(self);
+ acpicpu_tstate_attach(self);
(void)config_finalize_register(self, acpicpu_cstate_start);
(void)config_finalize_register(self, acpicpu_pstate_start);
+ (void)config_finalize_register(self, acpicpu_tstate_start);
(void)acpi_register_notify(sc->sc_node, acpicpu_notify);
(void)pmf_device_register(self, acpicpu_suspend, acpicpu_resume);
@@ -191,6 +193,12 @@
if (rv != 0)
return rv;
+ if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
+ rv = acpicpu_tstate_detach(self);
+
+ if (rv != 0)
+ return rv;
+
rv = RUN_ONCE(&once_detach, acpicpu_once_detach);
if (rv != 0)
@@ -469,7 +477,7 @@
if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
return;
- func = NULL;
+ func = acpicpu_tstate_callback;
break;
default:
@@ -493,6 +501,9 @@
if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
(void)acpicpu_pstate_suspend(self);
+ if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
+ (void)acpicpu_tstate_suspend(self);
+
return true;
}
@@ -507,6 +518,9 @@
if ((sc->sc_flags & ACPICPU_FLAG_P) != 0)
(void)acpicpu_pstate_resume(self);
+ if ((sc->sc_flags & ACPICPU_FLAG_T) != 0)
+ (void)acpicpu_tstate_resume(self);
+
sc->sc_cold = false;
return true;
Index: src/sys/dev/acpi/acpi_cpu_pstate.c
diff -u src/sys/dev/acpi/acpi_cpu_pstate.c:1.14 src/sys/dev/acpi/acpi_cpu_pstate.c:1.15
--- src/sys/dev/acpi/acpi_cpu_pstate.c:1.14 Thu Aug 12 06:17:14 2010
+++ src/sys/dev/acpi/acpi_cpu_pstate.c Fri Aug 13 16:21:50 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: acpi_cpu_pstate.c,v 1.14 2010/08/12 06:17:14 jruoho Exp $ */
+/* $NetBSD: acpi_cpu_pstate.c,v 1.15 2010/08/13 16:21:50 jruoho Exp $ */
/*-
* Copyright (c) 2010 Jukka Ruohonen <[email protected]>
@@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.14 2010/08/12 06:17:14 jruoho Exp $");
+__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.15 2010/08/13 16:21:50 jruoho Exp $");
#include <sys/param.h>
#include <sys/evcnt.h>
@@ -59,6 +59,10 @@
const char *str;
ACPI_STATUS rv;
+ /*
+ * Three control methods are mandatory
+ * for P-states; _PSS, _PCT, and _PPC.
+ */
rv = acpicpu_pstate_pss(sc);
if (ACPI_FAILURE(rv)) {
@@ -68,11 +72,6 @@
rv = acpicpu_pstate_pct(sc);
- if (rv == AE_SUPPORT) {
- aprint_error_dev(sc->sc_dev, "CPU not supported\n");
- return;
- }
-
if (ACPI_FAILURE(rv)) {
str = "_PCT";
goto fail;
@@ -80,8 +79,10 @@
rv = acpicpu_pstate_max(sc);
- if (rv == 0)
- sc->sc_flags |= ACPICPU_FLAG_P_PPC;
+ if (rv != 0) {
+ str = "_PPC";
+ goto fail;
+ }
sc->sc_flags |= ACPICPU_FLAG_P;
sc->sc_pstate_current = sc->sc_pstate[0].ps_freq;
@@ -93,8 +94,19 @@
return;
fail:
- aprint_error_dev(sc->sc_dev, "failed to evaluate "
- "%s: %s\n", str, AcpiFormatException(rv));
+ switch (rv) {
+
+ case AE_NOT_FOUND:
+ return;
+
+ case AE_SUPPORT:
+ aprint_verbose_dev(sc->sc_dev, "P-states not supported\n");
+ return;
+
+ default:
+ aprint_error_dev(sc->sc_dev, "failed to evaluate "
+ "%s: %s\n", str, AcpiFormatException(rv));
+ }
}
static void
@@ -119,8 +131,8 @@
continue;
aprint_debug_dev(sc->sc_dev, "P%d: %3s, "
- "lat %3u us, pow %5u mW, %4u MHz\n",
- i, str, ps->ps_latency, ps->ps_power, ps->ps_freq);
+ "lat %3u us, pow %5u mW, %4u MHz\n", i, str,
+ ps->ps_latency, ps->ps_power, ps->ps_freq);
}
once = true;
@@ -214,10 +226,7 @@
static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_pstate_callback;
struct acpicpu_softc *sc = device_private(self);
- KASSERT((sc->sc_flags & ACPICPU_FLAG_P) != 0);
-
- if ((sc->sc_flags & ACPICPU_FLAG_P_PPC) != 0)
- (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
+ (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
return true;
}
@@ -231,9 +240,6 @@
sc = device_private(self);
- if ((sc->sc_flags & ACPICPU_FLAG_P_PPC) == 0)
- return;
-
mutex_enter(&sc->sc_mtx);
old = sc->sc_pstate_max;
@@ -467,8 +473,8 @@
goto out;
}
- (void)memcpy(&sc->sc_pstate_control, reg[0], size); /* PERF_CTRL */
- (void)memcpy(&sc->sc_pstate_status, reg[1], size); /* PERF_STATUS */
+ (void)memcpy(&sc->sc_pstate_control, reg[0], size);
+ (void)memcpy(&sc->sc_pstate_status, reg[1], size);
out:
if (buf.Pointer != NULL)
@@ -604,7 +610,7 @@
mutex_exit(&sc->sc_mtx);
- if (ps == NULL) {
+ if (__predict_false(ps == NULL)) {
rv = EIO;
goto fail;
}
@@ -669,7 +675,7 @@
mutex_exit(&sc->sc_mtx);
- if (ps == NULL) {
+ if (__predict_false(ps == NULL)) {
rv = EINVAL;
goto fail;
}
Index: src/sys/dev/acpi/acpi_cpu.h
diff -u src/sys/dev/acpi/acpi_cpu.h:1.13 src/sys/dev/acpi/acpi_cpu.h:1.14
--- src/sys/dev/acpi/acpi_cpu.h:1.13 Wed Aug 11 11:48:21 2010
+++ src/sys/dev/acpi/acpi_cpu.h Fri Aug 13 16:21:50 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: acpi_cpu.h,v 1.13 2010/08/11 11:48:21 jruoho Exp $ */
+/* $NetBSD: acpi_cpu.h,v 1.14 2010/08/13 16:21:50 jruoho Exp $ */
/*-
* Copyright (c) 2010 Jukka Ruohonen <[email protected]>
@@ -35,15 +35,6 @@
*
* Intel Corporation: Intel Processor-Specific ACPI
* Interface Specification, September 2006, Revision 005.
- *
- * http://download.intel.com/technology/IAPC/acpi/downloads/30222305.pdf
- *
- * For other relevant reading, see for instance:
- *
- * Advanced Micro Devices: Using ACPI to Report APML P-State
- * Limit Changes to Operating Systems and VMM's. August 7, 2009.
- *
- * http://developer.amd.com/Assets/ACPI-APML-PState-rev12.pdf
*/
#define ACPICPU_PDC_REVID 0x1
#define ACPICPU_PDC_SMP 0xA
@@ -89,23 +80,31 @@
#define ACPICPU_P_STATE_UNKNOWN 0x0
/*
+ * T-states.
+ */
+#define ACPICPU_T_STATE_MAX 0x8
+#define ACPICPU_T_STATE_RETRY 0xA
+#define ACPICPU_T_STATE_UNKNOWN 255
+
+/*
* Flags.
*/
#define ACPICPU_FLAG_C __BIT(0) /* C-states supported */
#define ACPICPU_FLAG_P __BIT(1) /* P-states supported */
#define ACPICPU_FLAG_T __BIT(2) /* T-states supported */
-#define ACPICPU_FLAG_C_CST __BIT(3) /* C-states with _CST */
+#define ACPICPU_FLAG_C_FFH __BIT(3) /* Native C-states */
#define ACPICPU_FLAG_C_FADT __BIT(4) /* C-states with FADT */
#define ACPICPU_FLAG_C_BM __BIT(5) /* Bus master control */
#define ACPICPU_FLAG_C_BM_STS __BIT(6) /* Bus master check required */
#define ACPICPU_FLAG_C_ARB __BIT(7) /* Bus master arbitration */
#define ACPICPU_FLAG_C_NOC3 __BIT(8) /* C3 disabled (quirk) */
-#define ACPICPU_FLAG_C_FFH __BIT(9) /* MONITOR/MWAIT supported */
-#define ACPICPU_FLAG_C_C1E __BIT(10) /* AMD C1E detected */
+#define ACPICPU_FLAG_C_C1E __BIT(9) /* AMD C1E detected */
+
+#define ACPICPU_FLAG_P_FFH __BIT(10) /* Native P-states */
-#define ACPICPU_FLAG_P_PPC __BIT(11) /* Dynamic freq. with _PPC */
-#define ACPICPU_FLAG_P_FFH __BIT(12) /* EST etc. supported */
+#define ACPICPU_FLAG_T_FFH __BIT(11) /* Native throttling */
+#define ACPICPU_FLAG_T_FADT __BIT(12) /* Throttling with FADT */
/*
* This is AML_RESOURCE_GENERIC_REGISTER,
@@ -142,6 +141,16 @@
uint32_t ps_status;
};
+struct acpicpu_tstate {
+ struct evcnt ts_evcnt;
+ char ts_name[EVCNT_STRING_MAX];
+ uint32_t ts_percent; /* % */
+ uint32_t ts_power; /* mW */
+ uint32_t ts_latency; /* us */
+ uint32_t ts_control;
+ uint32_t ts_status;
+};
+
struct acpicpu_object {
uint32_t ao_procid;
uint32_t ao_pblklen;
@@ -163,6 +172,14 @@
uint32_t sc_pstate_count;
uint32_t sc_pstate_max;
+ struct acpicpu_tstate *sc_tstate;
+ struct acpicpu_reg sc_tstate_control;
+ struct acpicpu_reg sc_tstate_status;
+ uint32_t sc_tstate_current;
+ uint32_t sc_tstate_count;
+ uint32_t sc_tstate_max;
+ uint32_t sc_tstate_min;
+
kmutex_t sc_mtx;
bus_space_tag_t sc_iot;
bus_space_handle_t sc_ioh;
@@ -191,6 +208,15 @@
int acpicpu_pstate_get(struct acpicpu_softc *, uint32_t *);
int acpicpu_pstate_set(struct acpicpu_softc *, uint32_t);
+void acpicpu_tstate_attach(device_t);
+int acpicpu_tstate_detach(device_t);
+int acpicpu_tstate_start(device_t);
+bool acpicpu_tstate_suspend(device_t);
+bool acpicpu_tstate_resume(device_t);
+void acpicpu_tstate_callback(void *);
+int acpicpu_tstate_get(struct acpicpu_softc *, uint32_t *);
+int acpicpu_tstate_set(struct acpicpu_softc *, uint32_t);
+
uint32_t acpicpu_md_cap(void);
uint32_t acpicpu_md_quirks(void);
uint32_t acpicpu_md_cpus_running(void);
@@ -201,5 +227,7 @@
int acpicpu_md_pstate_stop(void);
int acpicpu_md_pstate_get(struct acpicpu_softc *, uint32_t *);
int acpicpu_md_pstate_set(struct acpicpu_pstate *);
+int acpicpu_md_tstate_get(struct acpicpu_softc *, uint32_t *);
+int acpicpu_md_tstate_set(struct acpicpu_tstate *);
#endif /* !_SYS_DEV_ACPI_ACPI_CPU_H */
Index: src/sys/dev/acpi/acpi_cpu_cstate.c
diff -u src/sys/dev/acpi/acpi_cpu_cstate.c:1.23 src/sys/dev/acpi/acpi_cpu_cstate.c:1.24
--- src/sys/dev/acpi/acpi_cpu_cstate.c:1.23 Wed Aug 11 16:41:19 2010
+++ src/sys/dev/acpi/acpi_cpu_cstate.c Fri Aug 13 16:21:50 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: acpi_cpu_cstate.c,v 1.23 2010/08/11 16:41:19 jruoho Exp $ */
+/* $NetBSD: acpi_cpu_cstate.c,v 1.24 2010/08/13 16:21:50 jruoho Exp $ */
/*-
* Copyright (c) 2010 Jukka Ruohonen <[email protected]>
@@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.23 2010/08/11 16:41:19 jruoho Exp $");
+__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.24 2010/08/13 16:21:50 jruoho Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@@ -94,7 +94,6 @@
switch (rv) {
case AE_OK:
- sc->sc_flags |= ACPICPU_FLAG_C_CST;
acpicpu_cstate_cst_bios();
break;
@@ -146,9 +145,8 @@
}
aprint_debug_dev(sc->sc_dev, "C%d: %3s, "
- "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n",
- i, str, cs->cs_latency, cs->cs_power,
- (uint32_t)cs->cs_addr, cs->cs_flags);
+ "lat %3u us, pow %5u mW, flags 0x%02x\n", i, str,
+ cs->cs_latency, cs->cs_power, cs->cs_flags);
}
once = true;
@@ -249,7 +247,7 @@
static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
struct acpicpu_softc *sc = device_private(self);
- if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0)
+ if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) == 0)
(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
return true;
@@ -263,10 +261,8 @@
sc = device_private(self);
- if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) {
- KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0);
+ if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0)
return;
- }
mutex_enter(&sc->sc_mtx);
(void)acpicpu_cstate_cst(sc);
@@ -556,10 +552,15 @@
if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0)
cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
- if ((acpicpu_md_cpus_running() > 1) &&
- (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
+ if (sc->sc_object.ao_pblkaddr == 0)
return;
+ if (acpicpu_md_cpus_running() > 1) {
+
+ if ((AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
+ return;
+ }
+
cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
Index: src/sys/dev/acpi/files.acpi
diff -u src/sys/dev/acpi/files.acpi:1.78 src/sys/dev/acpi/files.acpi:1.79
--- src/sys/dev/acpi/files.acpi:1.78 Sun Aug 8 17:21:14 2010
+++ src/sys/dev/acpi/files.acpi Fri Aug 13 16:21:50 2010
@@ -1,4 +1,4 @@
-# $NetBSD: files.acpi,v 1.78 2010/08/08 17:21:14 jruoho Exp $
+# $NetBSD: files.acpi,v 1.79 2010/08/13 16:21:50 jruoho Exp $
include "dev/acpi/acpica/files.acpica"
@@ -63,6 +63,7 @@
file dev/acpi/acpi_cpu.c acpicpu
file dev/acpi/acpi_cpu_cstate.c acpicpu
file dev/acpi/acpi_cpu_pstate.c acpicpu
+file dev/acpi/acpi_cpu_tstate.c acpicpu
# ACPI Thermal Zone
device acpitz: sysmon_envsys
Index: src/sys/modules/acpicpu/Makefile
diff -u src/sys/modules/acpicpu/Makefile:1.2 src/sys/modules/acpicpu/Makefile:1.3
--- src/sys/modules/acpicpu/Makefile:1.2 Sun Aug 8 16:58:42 2010
+++ src/sys/modules/acpicpu/Makefile Fri Aug 13 16:21:50 2010
@@ -1,4 +1,4 @@
-# $NetBSD: Makefile,v 1.2 2010/08/08 16:58:42 jruoho Exp $
+# $NetBSD: Makefile,v 1.3 2010/08/13 16:21:50 jruoho Exp $
.include "../Makefile.inc"
@@ -6,7 +6,11 @@
.PATH: ${S}/arch/x86/acpi
KMOD= acpicpu
-SRCS= acpi_cpu.c acpi_cpu_cstate.c acpi_cpu_pstate.c acpi_cpu_md.c
+SRCS= acpi_cpu.c \
+ acpi_cpu_md.c \
+ acpi_cpu_cstate.c \
+ acpi_cpu_pstate.c \
+ acpi_cpu_tstate.c
WARNS= 4
Added files:
Index: src/sys/dev/acpi/acpi_cpu_tstate.c
diff -u /dev/null src/sys/dev/acpi/acpi_cpu_tstate.c:1.1
--- /dev/null Fri Aug 13 16:21:51 2010
+++ src/sys/dev/acpi/acpi_cpu_tstate.c Fri Aug 13 16:21:50 2010
@@ -0,0 +1,836 @@
+/* $NetBSD: acpi_cpu_tstate.c,v 1.1 2010/08/13 16:21:50 jruoho Exp $ */
+
+/*-
+ * Copyright (c) 2010 Jukka Ruohonen <[email protected]>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_tstate.c,v 1.1 2010/08/13 16:21:50 jruoho Exp $");
+
+#include <sys/param.h>
+#include <sys/kmem.h>
+
+#include <dev/acpi/acpireg.h>
+#include <dev/acpi/acpivar.h>
+#include <dev/acpi/acpi_cpu.h>
+
+#define _COMPONENT ACPI_BUS_COMPONENT
+ACPI_MODULE_NAME ("acpi_cpu_tstate")
+
+#define ACPI_ADR_SPACE_FADT 0xFF
+
+static void acpicpu_tstate_attach_print(struct acpicpu_softc *);
+static void acpicpu_tstate_attach_evcnt(struct acpicpu_softc *);
+static void acpicpu_tstate_detach_evcnt(struct acpicpu_softc *);
+static ACPI_STATUS acpicpu_tstate_tss(struct acpicpu_softc *);
+static ACPI_STATUS acpicpu_tstate_tss_add(struct acpicpu_tstate *,
+ ACPI_OBJECT *);
+static ACPI_STATUS acpicpu_tstate_ptc(struct acpicpu_softc *);
+static ACPI_STATUS acpicpu_tstate_fadt(struct acpicpu_softc *);
+static ACPI_STATUS acpicpu_tstate_change(struct acpicpu_softc *);
+
+void
+acpicpu_tstate_attach(device_t self)
+{
+ struct acpicpu_softc *sc = device_private(self);
+ const char *str;
+ ACPI_STATUS rv;
+
+ /*
+ * If either _TSS, _PTC, or _TPC is not
+ * available, we have to resort to FADT.
+ */
+ rv = acpicpu_tstate_tss(sc);
+
+ if (ACPI_FAILURE(rv)) {
+ str = "_TSS";
+ goto out;
+ }
+
+ rv = acpicpu_tstate_ptc(sc);
+
+ if (ACPI_FAILURE(rv)) {
+ str = "_PTC";
+ goto out;
+ }
+
+ rv = acpicpu_tstate_change(sc);
+
+ if (ACPI_FAILURE(rv)) {
+ str = "_TPC";
+ goto out;
+ }
+
+out:
+ if (ACPI_FAILURE(rv)) {
+
+ if (rv != AE_NOT_FOUND)
+ aprint_error_dev(sc->sc_dev, "failed to evaluate "
+ "%s: %s\n", str, AcpiFormatException(rv));
+
+ rv = acpicpu_tstate_fadt(sc);
+
+ if (ACPI_FAILURE(rv))
+ return;
+
+ sc->sc_flags |= ACPICPU_FLAG_T_FADT;
+ }
+
+ sc->sc_flags |= ACPICPU_FLAG_T;
+
+ acpicpu_tstate_attach_evcnt(sc);
+ acpicpu_tstate_attach_print(sc);
+}
+
+static void
+acpicpu_tstate_attach_print(struct acpicpu_softc *sc)
+{
+ const uint8_t method = sc->sc_tstate_control.reg_spaceid;
+ struct acpicpu_tstate *ts;
+ static bool once = false;
+ const char *str;
+ uint32_t i;
+
+ if (once != false)
+ return;
+
+ str = (method != ACPI_ADR_SPACE_FIXED_HARDWARE) ? "I/O" : "FFH";
+
+ for (i = 0; i < sc->sc_tstate_count; i++) {
+
+ ts = &sc->sc_tstate[i];
+
+ if (ts->ts_percent == 0)
+ continue;
+
+ aprint_debug_dev(sc->sc_dev, "T%u: %3s, "
+ "lat %3u us, pow %5u mW, %3u %%\n", i, str,
+ ts->ts_latency, ts->ts_power, ts->ts_percent);
+ }
+
+ once = true;
+}
+
+static void
+acpicpu_tstate_attach_evcnt(struct acpicpu_softc *sc)
+{
+ struct acpicpu_tstate *ts;
+ uint32_t i;
+
+ for (i = 0; i < sc->sc_tstate_count; i++) {
+
+ ts = &sc->sc_tstate[i];
+
+ if (ts->ts_percent == 0)
+ continue;
+
+ (void)snprintf(ts->ts_name, sizeof(ts->ts_name),
+ "T%u (%u %%)", i, ts->ts_percent);
+
+ evcnt_attach_dynamic(&ts->ts_evcnt, EVCNT_TYPE_MISC,
+ NULL, device_xname(sc->sc_dev), ts->ts_name);
+ }
+}
+
+int
+acpicpu_tstate_detach(device_t self)
+{
+ struct acpicpu_softc *sc = device_private(self);
+ size_t size;
+
+ if ((sc->sc_flags & ACPICPU_FLAG_T) == 0)
+ return 0;
+
+ size = sc->sc_tstate_count * sizeof(*sc->sc_tstate);
+
+ if (sc->sc_tstate != NULL)
+ kmem_free(sc->sc_tstate, size);
+
+ sc->sc_flags &= ~ACPICPU_FLAG_T;
+ acpicpu_tstate_detach_evcnt(sc);
+
+ return 0;
+}
+
+static void
+acpicpu_tstate_detach_evcnt(struct acpicpu_softc *sc)
+{
+ struct acpicpu_tstate *ts;
+ uint32_t i;
+
+ for (i = 0; i < sc->sc_tstate_count; i++) {
+
+ ts = &sc->sc_tstate[i];
+
+ if (ts->ts_percent != 0)
+ evcnt_detach(&ts->ts_evcnt);
+ }
+}
+
+int
+acpicpu_tstate_start(device_t self)
+{
+
+ return 0;
+}
+
+bool
+acpicpu_tstate_suspend(device_t self)
+{
+
+ return true;
+}
+
+bool
+acpicpu_tstate_resume(device_t self)
+{
+ static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_tstate_callback;
+ struct acpicpu_softc *sc = device_private(self);
+
+ (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
+
+ return true;
+}
+
+void
+acpicpu_tstate_callback(void *aux)
+{
+ struct acpicpu_softc *sc;
+ device_t self = aux;
+ uint32_t omax, omin;
+ int i;
+
+ sc = device_private(self);
+
+ if ((sc->sc_flags & ACPICPU_FLAG_T_FADT) != 0)
+ return;
+
+ mutex_enter(&sc->sc_mtx);
+
+ /*
+ * If P-states are in use, we should ignore
+ * the interrupt unless we are in the highest
+ * P-state (see ACPI 4.0, section 8.4.3.3).
+ */
+ if ((sc->sc_flags & ACPICPU_FLAG_P) != 0) {
+
+ for (i = sc->sc_pstate_count - 1; i >= 0; i--) {
+
+ if (sc->sc_pstate[i].ps_freq != 0)
+ break;
+ }
+
+ if (sc->sc_pstate_current != sc->sc_pstate[i].ps_freq) {
+ mutex_exit(&sc->sc_mtx);
+ return;
+ }
+ }
+
+ omax = sc->sc_tstate_max;
+ omin = sc->sc_tstate_min;
+
+ (void)acpicpu_tstate_change(sc);
+
+ if (omax != sc->sc_tstate_max || omin != sc->sc_tstate_min) {
+
+ aprint_debug_dev(sc->sc_dev, "throttling window "
+ "changed from %u-%u %% to %u-%u %%\n",
+ sc->sc_tstate[omax].ts_percent,
+ sc->sc_tstate[omin].ts_percent,
+ sc->sc_tstate[sc->sc_tstate_max].ts_percent,
+ sc->sc_tstate[sc->sc_tstate_min].ts_percent);
+ }
+
+ mutex_exit(&sc->sc_mtx);
+}
+
+static ACPI_STATUS
+acpicpu_tstate_tss(struct acpicpu_softc *sc)
+{
+ struct acpicpu_tstate *ts;
+ ACPI_OBJECT *obj;
+ ACPI_BUFFER buf;
+ ACPI_STATUS rv;
+ uint32_t count;
+ uint32_t i, j;
+
+ rv = acpi_eval_struct(sc->sc_node->ad_handle, "_TSS", &buf);
+
+ if (ACPI_FAILURE(rv))
+ return rv;
+
+ obj = buf.Pointer;
+
+ if (obj->Type != ACPI_TYPE_PACKAGE) {
+ rv = AE_TYPE;
+ goto out;
+ }
+
+ sc->sc_tstate_count = obj->Package.Count;
+
+ if (sc->sc_tstate_count == 0) {
+ rv = AE_NOT_EXIST;
+ goto out;
+ }
+
+ if (sc->sc_tstate_count > ACPICPU_T_STATE_MAX) {
+ rv = AE_LIMIT;
+ goto out;
+ }
+
+ sc->sc_tstate = kmem_zalloc(sc->sc_tstate_count *
+ sizeof(struct acpicpu_tstate), KM_SLEEP);
+
+ if (sc->sc_tstate == NULL) {
+ rv = AE_NO_MEMORY;
+ goto out;
+ }
+
+ for (count = i = 0; i < sc->sc_tstate_count; i++) {
+
+ ts = &sc->sc_tstate[i];
+ rv = acpicpu_tstate_tss_add(ts, &obj->Package.Elements[i]);
+
+ if (ACPI_FAILURE(rv)) {
+ ts->ts_percent = 0;
+ continue;
+ }
+
+ for (j = 0; j < i; j++) {
+
+ if (ts->ts_percent >= sc->sc_tstate[j].ts_percent) {
+ ts->ts_percent = 0;
+ break;
+ }
+ }
+
+ if (ts->ts_percent != 0)
+ count++;
+ }
+
+ if (count == 0) {
+ rv = AE_NOT_EXIST;
+ goto out;
+ }
+
+ /*
+ * There must be an entry with the percent
+ * field of 100. If this is not true, and if
+ * this entry is not in the expected index,
+ * invalidate the use of T-states via _TSS.
+ */
+ if (sc->sc_tstate[0].ts_percent != 100) {
+ rv = AE_BAD_DECIMAL_CONSTANT;
+ goto out;
+ }
+
+ /*
+ * The first entry with 100 % duty cycle
+ * should have zero in the control field.
+ */
+ if (sc->sc_tstate[0].ts_control != 0) {
+ rv = AE_AML_BAD_RESOURCE_VALUE;
+ goto out;
+ }
+
+out:
+ if (buf.Pointer != NULL)
+ ACPI_FREE(buf.Pointer);
+
+ return rv;
+}
+
+static ACPI_STATUS
+acpicpu_tstate_tss_add(struct acpicpu_tstate *ts, ACPI_OBJECT *obj)
+{
+ ACPI_OBJECT *elm;
+ uint32_t val[5];
+ uint32_t *p;
+ int i;
+
+ if (obj->Type != ACPI_TYPE_PACKAGE)
+ return AE_TYPE;
+
+ if (obj->Package.Count != 5)
+ return AE_BAD_DATA;
+
+ elm = obj->Package.Elements;
+
+ for (i = 0; i < 5; i++) {
+
+ if (elm[i].Type != ACPI_TYPE_INTEGER)
+ return AE_TYPE;
+
+ if (elm[i].Integer.Value > UINT32_MAX)
+ return AE_AML_NUMERIC_OVERFLOW;
+
+ val[i] = elm[i].Integer.Value;
+ }
+
+ CTASSERT(sizeof(val) == sizeof(struct acpicpu_tstate) -
+ offsetof(struct acpicpu_tstate, ts_percent));
+
+ p = &ts->ts_percent;
+
+ for (i = 0; i < 5; i++, p++)
+ *p = val[i];
+
+ if (ts->ts_percent < 1 || ts->ts_percent > 100)
+ return AE_BAD_DECIMAL_CONSTANT;
+
+ if (ts->ts_latency < 1)
+ ts->ts_latency = 1;
+
+ return AE_OK;
+}
+
+ACPI_STATUS
+acpicpu_tstate_ptc(struct acpicpu_softc *sc)
+{
+ static const size_t size = sizeof(struct acpicpu_reg);
+ struct acpicpu_reg *reg[2];
+ ACPI_OBJECT *elm, *obj;
+ ACPI_BUFFER buf;
+ ACPI_STATUS rv;
+ int i;
+
+ rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PTC", &buf);
+
+ if (ACPI_FAILURE(rv))
+ return rv;
+
+ obj = buf.Pointer;
+
+ if (obj->Type != ACPI_TYPE_PACKAGE) {
+ rv = AE_TYPE;
+ goto out;
+ }
+
+ if (obj->Package.Count != 2) {
+ rv = AE_LIMIT;
+ goto out;
+ }
+
+ for (i = 0; i < 2; i++) {
+
+ elm = &obj->Package.Elements[i];
+
+ if (elm->Type != ACPI_TYPE_BUFFER) {
+ rv = AE_TYPE;
+ goto out;
+ }
+
+ if (size > elm->Buffer.Length) {
+ rv = AE_AML_BAD_RESOURCE_LENGTH;
+ goto out;
+ }
+
+ reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
+
+ switch (reg[i]->reg_spaceid) {
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+
+ if (reg[i]->reg_addr == 0) {
+ rv = AE_AML_ILLEGAL_ADDRESS;
+ goto out;
+ }
+
+ /*
+ * Check that the values match the IA32 clock
+ * modulation MSR, where the bit 0 is reserved,
+ * bits 1 through 3 define the duty cycle, and
+ * the fourth bit enables the modulation.
+ */
+ if (reg[i]->reg_bitwidth != 4) {
+ rv = AE_AML_BAD_RESOURCE_VALUE;
+ goto out;
+ }
+
+ if (reg[i]->reg_bitoffset != 1) {
+ rv = AE_AML_BAD_RESOURCE_VALUE;
+ goto out;
+ }
+
+ break;
+
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+
+ if ((sc->sc_flags & ACPICPU_FLAG_T_FFH) == 0) {
+ rv = AE_SUPPORT;
+ goto out;
+ }
+
+ break;
+
+ default:
+ rv = AE_AML_INVALID_SPACE_ID;
+ goto out;
+ }
+ }
+
+ if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
+ rv = AE_AML_INVALID_SPACE_ID;
+ goto out;
+ }
+
+ (void)memcpy(&sc->sc_tstate_control, reg[0], size);
+ (void)memcpy(&sc->sc_tstate_status, reg[1], size);
+
+out:
+ if (buf.Pointer != NULL)
+ ACPI_FREE(buf.Pointer);
+
+ return rv;
+}
+
+static ACPI_STATUS
+acpicpu_tstate_fadt(struct acpicpu_softc *sc)
+{
+ static const size_t size = sizeof(struct acpicpu_tstate);
+ const uint8_t offset = AcpiGbl_FADT.DutyOffset;
+ const uint8_t width = AcpiGbl_FADT.DutyWidth;
+ uint8_t beta, count, i;
+
+ if (sc->sc_object.ao_pblkaddr == 0)
+ return AE_AML_ILLEGAL_ADDRESS;
+
+ if (width == 0 || width + offset > 4)
+ return AE_AML_BAD_RESOURCE_VALUE;
+
+ count = 1 << width;
+
+ if (count > ACPICPU_T_STATE_MAX)
+ return AE_LIMIT;
+
+ if (sc->sc_tstate != NULL)
+ kmem_free(sc->sc_tstate, sc->sc_tstate_count * size);
+
+ sc->sc_tstate = kmem_zalloc(count * size, KM_SLEEP);
+
+ if (sc->sc_tstate == NULL)
+ return ENOMEM;
+
+ sc->sc_tstate_count = count;
+
+ /*
+ * Approximate duty cycles and set the MSR values.
+ */
+ for (beta = 100 / count, i = 0; i < count; i++) {
+ sc->sc_tstate[i].ts_percent = 100 - beta * i;
+ sc->sc_tstate[i].ts_latency = 1;
+ }
+
+ for (i = 1; i < count; i++)
+ sc->sc_tstate[i].ts_control = (count - i) | __BIT(3);
+
+ /*
+ * Fake values for THROTTLE_CTLR.
+ */
+ sc->sc_tstate_control.reg_bitwidth = width;
+ sc->sc_tstate_control.reg_bitoffset = offset;
+ sc->sc_tstate_control.reg_spaceid = ACPI_ADR_SPACE_FADT;
+
+ CTASSERT(ACPI_ADR_SPACE_FADT > ACPI_ADR_SPACE_FIXED_HARDWARE);
+
+ return AE_OK;
+}
+
+static ACPI_STATUS
+acpicpu_tstate_change(struct acpicpu_softc *sc)
+{
+ ACPI_INTEGER val;
+ ACPI_STATUS rv;
+
+ sc->sc_tstate_max = 0;
+ sc->sc_tstate_min = sc->sc_tstate_count - 1;
+
+ /*
+ * Evaluate the available T-state window:
+ *
+ * _TPC : either this maximum or any lower power
+ * (i.e. higher numbered) state may be used.
+ *
+ * _TDL : either this minimum or any higher power
+ * (i.e. lower numbered) state may be used.
+ *
+ * _TDL >= _TPC || _TDL >= _TSS[last entry].
+ */
+ rv = acpi_eval_integer(sc->sc_node->ad_handle, "_TPC", &val);
+
+ if (ACPI_FAILURE(rv))
+ return rv;
+
+ if (val < sc->sc_tstate_count) {
+
+ if (sc->sc_tstate[val].ts_percent != 0)
+ sc->sc_tstate_max = val;
+ }
+
+ rv = acpi_eval_integer(sc->sc_node->ad_handle, "_TDL", &val);
+
+ if (ACPI_SUCCESS(rv) && val < sc->sc_tstate_count) {
+
+ if (val >= sc->sc_tstate_max &&
+ sc->sc_tstate[val].ts_percent != 0)
+ sc->sc_tstate_min = val;
+ }
+
+ return AE_OK;
+}
+
+int
+acpicpu_tstate_get(struct acpicpu_softc *sc, uint32_t *percent)
+{
+ const uint8_t method = sc->sc_tstate_control.reg_spaceid;
+ struct acpicpu_tstate *ts = NULL;
+ uint32_t i, val = 0;
+ uint8_t offset;
+ uint64_t addr;
+ int rv;
+
+ if (sc->sc_cold != false) {
+ rv = EBUSY;
+ goto fail;
+ }
+
+ if ((sc->sc_flags & ACPICPU_FLAG_T) == 0) {
+ rv = ENODEV;
+ goto fail;
+ }
+
+ mutex_enter(&sc->sc_mtx);
+
+ if (sc->sc_tstate_current != ACPICPU_T_STATE_UNKNOWN) {
+ *percent = sc->sc_tstate_current;
+ mutex_exit(&sc->sc_mtx);
+ return 0;
+ }
+
+ mutex_exit(&sc->sc_mtx);
+
+ switch (method) {
+
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+
+ rv = acpicpu_md_tstate_get(sc, percent);
+
+ if (rv != 0)
+ goto fail;
+
+ break;
+
+ case ACPI_ADR_SPACE_FADT:
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+
+ addr = sc->sc_tstate_status.reg_addr;
+ offset = sc->sc_tstate_status.reg_bitoffset;
+
+ if (method == ACPI_ADR_SPACE_FADT)
+ addr = sc->sc_object.ao_pblkaddr;
+
+ (void)AcpiOsReadPort(addr, &val, 8);
+
+ val = (val >> offset) & 0x0F;
+
+ mutex_enter(&sc->sc_mtx);
+
+ for (i = 0; i < sc->sc_tstate_count; i++) {
+
+ if (sc->sc_tstate[i].ts_percent == 0)
+ continue;
+
+ /*
+ * As the status field may be zero, compare
+ * against the control field value as well.
+ */
+ if (val == sc->sc_tstate[i].ts_control) {
+ ts = &sc->sc_tstate[i];
+ break;
+ }
+
+ if (val == sc->sc_tstate[i].ts_status) {
+ ts = &sc->sc_tstate[i];
+ break;
+ }
+ }
+
+ mutex_exit(&sc->sc_mtx);
+
+ if (__predict_false(ts == NULL)) {
+ rv = EIO;
+ goto fail;
+ }
+
+ *percent = ts->ts_percent;
+ break;
+
+ default:
+ rv = ENOTTY;
+ goto fail;
+ }
+
+ mutex_enter(&sc->sc_mtx);
+ sc->sc_tstate_current = *percent;
+ mutex_exit(&sc->sc_mtx);
+
+ return 0;
+
+fail:
+ aprint_error_dev(sc->sc_dev, "failed "
+ "to get T-state (err %d)\n", rv);
+
+ mutex_enter(&sc->sc_mtx);
+ *percent = sc->sc_tstate_current = ACPICPU_T_STATE_UNKNOWN;
+ mutex_exit(&sc->sc_mtx);
+
+ return rv;
+}
+
+int
+acpicpu_tstate_set(struct acpicpu_softc *sc, uint32_t percent)
+{
+ const uint8_t method = sc->sc_tstate_control.reg_spaceid;
+ struct acpicpu_tstate *ts = NULL;
+ uint32_t i, val;
+ uint8_t offset;
+ uint64_t addr;
+ int rv;
+
+ if (sc->sc_cold != false) {
+ rv = EBUSY;
+ goto fail;
+ }
+
+ if ((sc->sc_flags & ACPICPU_FLAG_T) == 0) {
+ rv = ENODEV;
+ goto fail;
+ }
+
+ mutex_enter(&sc->sc_mtx);
+
+ for (i = sc->sc_tstate_max; i <= sc->sc_tstate_min; i++) {
+
+ if (sc->sc_tstate[i].ts_percent == 0)
+ continue;
+
+ if (sc->sc_tstate[i].ts_percent == percent) {
+ ts = &sc->sc_tstate[i];
+ break;
+ }
+ }
+
+ mutex_exit(&sc->sc_mtx);
+
+ if (__predict_false(ts == NULL)) {
+ rv = EINVAL;
+ goto fail;
+ }
+
+ switch (method) {
+
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+
+ rv = acpicpu_md_tstate_set(ts);
+
+ if (rv != 0)
+ goto fail;
+
+ break;
+
+ case ACPI_ADR_SPACE_FADT:
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+
+ addr = sc->sc_tstate_control.reg_addr;
+ offset = sc->sc_tstate_control.reg_bitoffset;
+
+ if (method == ACPI_ADR_SPACE_FADT)
+ addr = sc->sc_object.ao_pblkaddr;
+
+ val = (ts->ts_control & 0x0F) << offset;
+
+ if (ts->ts_percent != 100 && (val & __BIT(4)) == 0) {
+ rv = EINVAL;
+ goto fail;
+ }
+
+ (void)AcpiOsWritePort(addr, val, 8);
+
+ /*
+ * If the status field is zero, the transition is
+ * specified to be "asynchronous" and there is no
+ * need to check the status (ACPI 4.0, 8.4.3.2).
+ */
+ if (ts->ts_status == 0)
+ break;
+
+ addr = sc->sc_tstate_status.reg_addr;
+ offset = sc->sc_tstate_status.reg_bitoffset;
+
+ if (method == ACPI_ADR_SPACE_FADT)
+ addr = sc->sc_object.ao_pblkaddr;
+
+ for (i = val = 0; i < ACPICPU_T_STATE_RETRY; i++) {
+
+ (void)AcpiOsReadPort(addr, &val, 8);
+
+ val = (val >> offset) & 0x0F;
+
+ if (val == ts->ts_status)
+ break;
+
+ DELAY(ts->ts_latency);
+ }
+
+ if (i == ACPICPU_T_STATE_RETRY) {
+ rv = EAGAIN;
+ goto fail;
+ }
+
+ break;
+
+ default:
+ rv = ENOTTY;
+ goto fail;
+ }
+
+ ts->ts_evcnt.ev_count++;
+
+ mutex_enter(&sc->sc_mtx);
+ sc->sc_tstate_current = percent;
+ mutex_exit(&sc->sc_mtx);
+
+ return 0;
+
+fail:
+ aprint_error_dev(sc->sc_dev, "failed to "
+ "throttle to %u %% (err %d)\n", percent, rv);
+
+ mutex_enter(&sc->sc_mtx);
+ sc->sc_tstate_current = ACPICPU_T_STATE_UNKNOWN;
+ mutex_exit(&sc->sc_mtx);
+
+ return rv;
+}