/drivers/sensors/amd_nb.c |
---|
0,0 → 1,580 |
// SPDX-License-Identifier: GPL-2.0-only |
/* |
* Shared support code for AMD K8 northbridges and derivatives. |
* Copyright 2006 Andi Kleen, SUSE Labs. |
*/ |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
#include <linux/types.h> |
#include <linux/slab.h> |
#include <linux/init.h> |
#include <linux/errno.h> |
#include <linux/export.h> |
#include <linux/spinlock.h> |
#include <linux/pci_ids.h> |
#include <asm/amd_nb.h> |
#include <asm/msr.h> |
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 |
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 |
#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 |
#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 |
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 |
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec |
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 |
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c |
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 |
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 |
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e |
#ifndef topology_die_id |
#define topology_die_id(cpu) ((void)(cpu), -1) |
#endif |
const struct pci_device_id *pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) |
{ |
if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) && |
(id->device == PCI_ANY_ID || id->device == dev->device) && |
(id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) && |
(id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) && |
!((id->class ^ dev->class) & id->class_mask)) |
return id; |
return NULL; |
} |
const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, |
struct pci_dev *dev) |
{ |
if (ids) { |
while (ids->vendor || ids->subvendor || ids->class_mask) { |
if (pci_match_one_device(ids, dev)) |
return ids; |
ids++; |
} |
} |
return NULL; |
} |
/* Protect the PCI config register pairs used for SMN and DF indirect access. */ |
static DEFINE_MUTEX(smn_mutex); |
static u32 *flush_words; |
static const struct pci_device_id amd_root_ids[] = { |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, |
{} |
}; |
#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 |
static const struct pci_device_id amd_nb_misc_ids[] = { |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, |
{} |
}; |
static const struct pci_device_id amd_nb_link_ids[] = { |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, |
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, |
{} |
}; |
static const struct pci_device_id hygon_root_ids[] = { |
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, |
{} |
}; |
static const struct pci_device_id hygon_nb_misc_ids[] = { |
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
{} |
}; |
static const struct pci_device_id hygon_nb_link_ids[] = { |
{ PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
{} |
}; |
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { |
{ 0x00, 0x18, 0x20 }, |
{ 0xff, 0x00, 0x20 }, |
{ 0xfe, 0x00, 0x20 }, |
{ } |
}; |
static struct amd_northbridge_info amd_northbridges; |
u16 amd_nb_num(void) |
{ |
return amd_northbridges.num; |
} |
EXPORT_SYMBOL_GPL(amd_nb_num); |
bool amd_nb_has_feature(unsigned int feature) |
{ |
return ((amd_northbridges.flags & feature) == feature); |
} |
EXPORT_SYMBOL_GPL(amd_nb_has_feature); |
struct amd_northbridge *node_to_amd_nb(int node) |
{ |
return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; |
} |
EXPORT_SYMBOL_GPL(node_to_amd_nb); |
static struct pci_dev *next_northbridge(struct pci_dev *dev, |
const struct pci_device_id *ids) |
{ |
do { |
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); |
if (!dev) |
break; |
} while (!pci_match_id(ids, dev)); |
return dev; |
} |
static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) |
{ |
struct pci_dev *root; |
int err = -ENODEV; |
if (node >= amd_northbridges.num) |
goto out; |
root = node_to_amd_nb(node)->root; |
/* printk("Northbridge PCI device %x:%x bus:%x devfn:%x\n", |
root->vendor, |
root->device, |
root->busnr, |
root->devfn); |
*/ |
if (!root) |
goto out; |
mutex_lock(&smn_mutex); |
err = pci_write_config_dword(root, 0x60, address); |
if (err) { |
pr_warn("Error programming SMN address 0x%x.\n", address); |
goto out_unlock; |
} |
err = (write ? pci_write_config_dword(root, 0x64, *value) |
: pci_read_config_dword(root, 0x64, value)); |
if (err) |
pr_warn("Error %s SMN address 0x%x.\n", |
(write ? "writing to" : "reading from"), address); |
out_unlock: |
mutex_unlock(&smn_mutex); |
out: |
return err; |
} |
int amd_smn_read(u16 node, u32 address, u32 *value) |
{ |
return __amd_smn_rw(node, address, value, false); |
} |
EXPORT_SYMBOL_GPL(amd_smn_read); |
int amd_smn_write(u16 node, u32 address, u32 value) |
{ |
return __amd_smn_rw(node, address, &value, true); |
} |
EXPORT_SYMBOL_GPL(amd_smn_write); |
/* |
* Data Fabric Indirect Access uses FICAA/FICAD. |
* |
* Fabric Indirect Configuration Access Address (FICAA): Constructed based |
* on the device's Instance Id and the PCI function and register offset of |
* the desired register. |
* |
* Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO |
* and FICAD HI registers but so far we only need the LO register. |
*/ |
int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) |
{ |
struct pci_dev *F4; |
u32 ficaa; |
int err = -ENODEV; |
if (node >= amd_northbridges.num) |
goto out; |
F4 = node_to_amd_nb(node)->link; |
if (!F4) |
goto out; |
ficaa = 1; |
ficaa |= reg & 0x3FC; |
ficaa |= (func & 0x7) << 11; |
ficaa |= instance_id << 16; |
mutex_lock(&smn_mutex); |
err = pci_write_config_dword(F4, 0x5C, ficaa); |
if (err) { |
pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa); |
goto out_unlock; |
} |
err = pci_read_config_dword(F4, 0x98, lo); |
if (err) |
pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa); |
out_unlock: |
mutex_unlock(&smn_mutex); |
out: |
return err; |
} |
EXPORT_SYMBOL_GPL(amd_df_indirect_read); |
int amd_cache_northbridges(void) |
{ |
const struct pci_device_id *misc_ids = amd_nb_misc_ids; |
const struct pci_device_id *link_ids = amd_nb_link_ids; |
const struct pci_device_id *root_ids = amd_root_ids; |
struct pci_dev *root, *misc, *link; |
struct amd_northbridge *nb; |
u16 roots_per_misc = 0; |
u16 misc_count = 0; |
u16 root_count = 0; |
u16 i, j; |
if (amd_northbridges.num) |
return 0; |
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { |
root_ids = hygon_root_ids; |
misc_ids = hygon_nb_misc_ids; |
link_ids = hygon_nb_link_ids; |
} |
misc = NULL; |
while ((misc = next_northbridge(misc, misc_ids)) != NULL) |
misc_count++; |
if (!misc_count) |
return -ENODEV; |
root = NULL; |
while ((root = next_northbridge(root, root_ids)) != NULL) |
root_count++; |
if (root_count) { |
roots_per_misc = root_count / misc_count; |
/* |
* There should be _exactly_ N roots for each DF/SMN |
* interface. |
*/ |
if (!roots_per_misc || (root_count % roots_per_misc)) { |
pr_info("Unsupported AMD DF/PCI configuration found\n"); |
return -ENODEV; |
} |
} |
nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); |
if (!nb) |
return -ENOMEM; |
amd_northbridges.nb = nb; |
amd_northbridges.num = misc_count; |
link = misc = root = NULL; |
for (i = 0; i < amd_northbridges.num; i++) { |
node_to_amd_nb(i)->root = root = |
next_northbridge(root, root_ids); |
node_to_amd_nb(i)->misc = misc = |
next_northbridge(misc, misc_ids); |
node_to_amd_nb(i)->link = link = |
next_northbridge(link, link_ids); |
/* |
* If there are more PCI root devices than data fabric/ |
* system management network interfaces, then the (N) |
* PCI roots per DF/SMN interface are functionally the |
* same (for DF/SMN access) and N-1 are redundant. N-1 |
* PCI roots should be skipped per DF/SMN interface so |
* the following DF/SMN interfaces get mapped to |
* correct PCI roots. |
*/ |
for (j = 1; j < roots_per_misc; j++) |
root = next_northbridge(root, root_ids); |
} |
if (amd_gart_present()) |
amd_northbridges.flags |= AMD_NB_GART; |
/* |
* Check for L3 cache presence. |
*/ |
if (!cpuid_edx(0x80000006)) |
return 0; |
/* |
* Some CPU families support L3 Cache Index Disable. There are some |
* limitations because of E382 and E388 on family 0x10. |
*/ |
if (boot_cpu_data.x86 == 0x10 && |
boot_cpu_data.x86_model >= 0x8 && |
(boot_cpu_data.x86_model > 0x9/* || |
boot_cpu_data.x86_stepping >= 0x1*/)) |
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
if (boot_cpu_data.x86 == 0x15) |
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; |
/* L3 cache partitioning is supported on family 0x15 */ |
if (boot_cpu_data.x86 == 0x15) |
amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; |
return 0; |
} |
EXPORT_SYMBOL_GPL(amd_cache_northbridges); |
/* |
* Ignores subdevice/subvendor but as far as I can figure out |
* they're useless anyways |
*/ |
bool __init early_is_amd_nb(u32 device) |
{ |
const struct pci_device_id *misc_ids = amd_nb_misc_ids; |
const struct pci_device_id *id; |
u32 vendor = device & 0xffff; |
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
return false; |
if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) |
misc_ids = hygon_nb_misc_ids; |
device >>= 16; |
for (id = misc_ids; id->vendor; id++) |
if (vendor == id->vendor && device == id->device) |
return true; |
return false; |
} |
struct resource *amd_get_mmconfig_range(struct resource *res) |
{ |
u32 address; |
u64 base, msr; |
unsigned int segn_busn_bits; |
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
return NULL; |
/* assume all cpus from fam10h have mmconfig */ |
if (boot_cpu_data.x86 < 0x10) |
return NULL; |
address = MSR_FAM10H_MMIO_CONF_BASE; |
rdmsrl(address, msr); |
/* mmconfig is not enabled */ |
if (!(msr & FAM10H_MMIO_CONF_ENABLE)) |
return NULL; |
base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); |
segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & |
FAM10H_MMIO_CONF_BUSRANGE_MASK; |
res->flags = IORESOURCE_MEM; |
res->start = base; |
res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; |
return res; |
} |
int amd_get_subcaches(int cpu) |
{ |
struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link; |
unsigned int mask; |
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) |
return 0; |
pci_read_config_dword(link, 0x1d4, &mask); |
return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; |
} |
int amd_set_subcaches(int cpu, unsigned long mask) |
{ |
static unsigned int reset, ban; |
struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu)); |
unsigned int reg; |
int cuid; |
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) |
return -EINVAL; |
/* if necessary, collect reset state of L3 partitioning and BAN mode */ |
if (reset == 0) { |
pci_read_config_dword(nb->link, 0x1d4, &reset); |
pci_read_config_dword(nb->misc, 0x1b8, &ban); |
ban &= 0x180000; |
} |
/* deactivate BAN mode if any subcaches are to be disabled */ |
if (mask != 0xf) { |
pci_read_config_dword(nb->misc, 0x1b8, ®); |
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); |
} |
cuid = cpu_data(cpu).cpu_core_id; |
mask <<= 4 * cuid; |
mask |= (0xf ^ (1 << cuid)) << 26; |
pci_write_config_dword(nb->link, 0x1d4, mask); |
/* reset BAN mode if L3 partitioning returned to reset state */ |
pci_read_config_dword(nb->link, 0x1d4, ®); |
if (reg == reset) { |
pci_read_config_dword(nb->misc, 0x1b8, ®); |
reg &= ~0x180000; |
pci_write_config_dword(nb->misc, 0x1b8, reg | ban); |
} |
return 0; |
} |
static void amd_cache_gart(void) |
{ |
u16 i; |
if (!amd_nb_has_feature(AMD_NB_GART)) |
return; |
flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); |
if (!flush_words) { |
amd_northbridges.flags &= ~AMD_NB_GART; |
pr_notice("Cannot initialize GART flush words, GART support disabled\n"); |
return; |
} |
for (i = 0; i != amd_northbridges.num; i++) |
pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); |
} |
void amd_flush_garts(void) |
{ |
int flushed, i; |
unsigned long flags; |
static DEFINE_SPINLOCK(gart_lock); |
if (!amd_nb_has_feature(AMD_NB_GART)) |
return; |
/* |
* Avoid races between AGP and IOMMU. In theory it's not needed |
* but I'm not sure if the hardware won't lose flush requests |
* when another is pending. This whole thing is so expensive anyways |
* that it doesn't matter to serialize more. -AK |
*/ |
spin_lock_irqsave(&gart_lock, flags); |
flushed = 0; |
for (i = 0; i < amd_northbridges.num; i++) { |
pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, |
flush_words[i] | 1); |
flushed++; |
} |
for (i = 0; i < amd_northbridges.num; i++) { |
u32 w; |
/* Make sure the hardware actually executed the flush*/ |
for (;;) { |
pci_read_config_dword(node_to_amd_nb(i)->misc, |
0x9c, &w); |
if (!(w & 1)) |
break; |
cpu_relax(); |
} |
} |
spin_unlock_irqrestore(&gart_lock, flags); |
if (!flushed) |
pr_notice("nothing to flush?\n"); |
} |
EXPORT_SYMBOL_GPL(amd_flush_garts); |
static void __fix_erratum_688(void *info) |
{ |
#define MSR_AMD64_IC_CFG 0xC0011021 |
// msr_set_bit(MSR_AMD64_IC_CFG, 3); |
// msr_set_bit(MSR_AMD64_IC_CFG, 14); |
} |
/* Apply erratum 688 fix so machines without a BIOS fix work. */ |
static __init void fix_erratum_688(void) |
{ |
struct pci_dev *F4; |
u32 val; |
if (boot_cpu_data.x86 != 0x14) |
return; |
if (!amd_northbridges.num) |
return; |
F4 = node_to_amd_nb(0)->link; |
if (!F4) |
return; |
if (pci_read_config_dword(F4, 0x164, &val)) |
return; |
if (val & BIT(2)) |
return; |
on_each_cpu(__fix_erratum_688, NULL, 0); |
pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); |
} |
__init int init_amd_nbs(void) |
{ |
amd_cache_northbridges(); |
amd_cache_gart(); |
fix_erratum_688(); |
return 0; |
} |
/* This has to go after the PCI subsystem */ |
//fs_initcall(init_amd_nbs); |
/drivers/sensors/coretmp/Makefile |
---|
0,0 → 1,42 |
CC = kos32-gcc |
LD = kos32-ld |
KPACK = kpack |
DDK_TOPDIR = ../../ddk |
DRV_INCLUDES = ../../include |
INCLUDES = -I$(DRV_INCLUDES) \ |
-I$(DRV_INCLUDES)/asm \ |
-I$(DRV_INCLUDES)/uapi \ |
-I$(DRV_INCLUDES)/drm |
NAME=coretemp |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI -DCONFIG_TINY_RCU |
DEFINES+= -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
DEFINES+= -DCONFIG_PRINTK |
CFLAGS= -c -O2 $(DEFINES) $(INCLUDES) -march=i686 -fno-ident -msse2 -fomit-frame-pointer -fno-builtin-printf |
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields |
LIBPATH = -L $(DDK_TOPDIR) |
LIBPATH+= -L ../../../contrib/sdk/lib |
LIBS:= -lddk -lcore -lgcc |
PE_FLAGS = --major-os-version 0 --minor-os-version 7 --major-subsystem-version 0 \ |
--minor-subsystem-version 5 --subsystem native |
LDFLAGS = -nostdlib -shared -s $(PE_FLAGS) --image-base 0\ |
--file-alignment 512 --section-alignment 4096 |
all: $(NAME).dll |
$(NAME).dll: |
$(CC) $(CFLAGS) coretemp.c |
$(LD) $(LIBPATH) $(LDFLAGS) -T ../drv.lds coretemp.o -o $@ $(NAME_OBJS) $(LIBS) |
$(KPACK) $(NAME).dll |
clean: |
rm -rf *.o *.dll |
/drivers/sensors/coretmp/coretemp.c |
---|
0,0 → 1,507 |
#include <ddk.h> |
#include <syscall.h> |
#include <pci.h> |
#define CPUID_VENDOR_LENGTH 3 /* 3 GPRs hold vendor ID */ |
#define CPUID_VENDOR_STR_LENGTH (CPUID_VENDOR_LENGTH * sizeof(uint32_t) + 1) |
#define CPUID_BRAND_LENGTH 12 /* 12 GPRs hold vendor ID */ |
#define CPUID_BRAND_STR_LENGTH (CPUID_BRAND_LENGTH * sizeof(uint32_t) + 1) |
typedef union { |
unsigned char ch[48]; |
uint32_t uint[12]; |
struct { |
uint32_t fill1:24; /* Bit 0 */ |
uint32_t l1_i_sz:8; |
uint32_t fill2:24; |
uint32_t l1_d_sz:8; |
uint32_t fill3:16; |
uint32_t l2_sz:16; |
uint32_t fill4:18; |
uint32_t l3_sz:14; |
uint32_t fill5[8]; |
} amd; |
} cpuid_cache_info_t; |
/* Typedef for storing the CPUID Vendor String */ |
typedef union { |
/* Note: the extra byte in the char array is for '\0'. */ |
char char_array[CPUID_VENDOR_STR_LENGTH]; |
uint32_t uint32_array[CPUID_VENDOR_LENGTH]; |
} cpuid_vendor_string_t; |
/* Typedef for storing the CPUID Brand String */ |
typedef union { |
/* Note: the extra byte in the char array is for '\0'. */ |
char char_array[CPUID_BRAND_STR_LENGTH]; |
uint32_t uint32_array[CPUID_BRAND_LENGTH]; |
} cpuid_brand_string_t; |
/* Typedef for storing CPUID Version */ |
typedef union { |
uint32_t flat; |
struct { |
uint32_t stepping:4; /* Bit 0 */ |
uint32_t model:4; |
uint32_t family:4; |
uint32_t processorType:2; |
uint32_t reserved1514:2; |
uint32_t extendedModel:4; |
uint32_t extendedFamily:8; |
uint32_t reserved3128:4; /* Bit 31 */ |
} bits; |
} cpuid_version_t; |
/* Typedef for storing CPUID Processor Information */ |
typedef union { |
uint32_t flat; |
struct { |
uint32_t brandIndex:8; /* Bit 0 */ |
uint32_t cflushLineSize:8; |
uint32_t logicalProcessorCount:8; |
uint32_t apicID:8; /* Bit 31 */ |
} bits; |
} cpuid_proc_info_t; |
/* Typedef for storing CPUID Feature flags */ |
typedef union { |
uint32_t flat; |
struct { |
uint32_t :1; |
} bits; |
} cpuid_custom_features; |
/* Typedef for storing CPUID Feature flags */ |
typedef union { |
uint32_t uint32_array[3]; |
struct { |
uint32_t fpu:1; /* EDX feature flags, bit 0 */ |
uint32_t vme:1; |
uint32_t de:1; |
uint32_t pse:1; |
uint32_t rdtsc:1; |
uint32_t msr:1; |
uint32_t pae:1; |
uint32_t mce:1; |
uint32_t cx8:1; |
uint32_t apic:1; |
uint32_t bit10:1; |
uint32_t sep:1; |
uint32_t mtrr:1; |
uint32_t pge:1; |
uint32_t mca:1; |
uint32_t cmov:1; |
uint32_t pat:1; |
uint32_t pse36:1; |
uint32_t psn:1; |
uint32_t cflush:1; |
uint32_t bit20:1; |
uint32_t ds:1; |
uint32_t acpi:1; |
uint32_t mmx:1; |
uint32_t fxsr:1; |
uint32_t sse:1; |
uint32_t sse2:1; |
uint32_t ss:1; |
uint32_t htt:1; |
uint32_t tm:1; |
uint32_t bit30:1; |
uint32_t pbe:1; /* EDX feature flags, bit 31 */ |
uint32_t sse3:1; /* ECX feature flags, bit 0 */ |
uint32_t mulq:1; |
uint32_t bit2:1; |
uint32_t mon:1; |
uint32_t dscpl:1; |
uint32_t vmx:1; |
uint32_t smx:1; |
uint32_t eist:1; |
uint32_t tm2:1; |
uint32_t bits_9_31:23; |
uint32_t bits0_28:29; /* EDX extended feature flags, bit 0 */ |
uint32_t lm:1; /* Long Mode */ |
uint32_t bits_30_31:2; /* EDX extended feature flags, bit 32 */ |
} bits; |
} cpuid_feature_flags_t; |
/* An overall structure to cache all of the CPUID information */ |
struct cpu_ident { |
uint32_t max_cpuid; |
uint32_t max_xcpuid; |
uint32_t dts_pmp; |
cpuid_version_t vers; |
cpuid_proc_info_t info; |
cpuid_feature_flags_t fid; |
cpuid_vendor_string_t vend_id; |
cpuid_brand_string_t brand_id; |
cpuid_cache_info_t cache_info; |
cpuid_custom_features custom; |
}; |
struct cpuid4_eax { |
uint32_t ctype:5; |
uint32_t level:3; |
uint32_t is_self_initializing:1; |
uint32_t is_fully_associative:1; |
uint32_t reserved:4; |
uint32_t num_threads_sharing:12; |
uint32_t num_cores_on_die:6; |
}; |
struct cpuid4_ebx { |
uint32_t coherency_line_size:12; |
uint32_t physical_line_partition:10; |
uint32_t ways_of_associativity:10; |
}; |
struct cpuid4_ecx { |
uint32_t number_of_sets:32; |
}; |
unsigned imc_type=0; |
struct cpu_ident cpu_id; |
bool temp_out_disable=false; |
#define PCI_CONF_TYPE_NONE 0 |
#define PCI_CONF_TYPE_1 1 |
#define PCI_CONF_TYPE_2 2 |
extern struct cpu_ident cpu_id; |
static unsigned char pci_conf_type = PCI_CONF_TYPE_NONE; |
#define PCI_CONF1_ADDRESS(bus, dev, fn, reg) \ |
(0x80000000 | (bus << 16) | (dev << 11) | (fn << 8) | (reg & ~3)) |
#define PCI_CONF2_ADDRESS(dev, reg) (unsigned short)(0xC000 | (dev << 8) | reg) |
#define PCI_CONF3_ADDRESS(bus, dev, fn, reg) \ |
(0x80000000 | (((reg >> 8) & 0xF) << 24) | (bus << 16) | ((dev & 0x1F) << 11) | (fn << 8) | (reg & 0xFF)) |
int pci_conf_read(unsigned bus, unsigned dev, unsigned fn, unsigned reg, unsigned len, unsigned long *value) |
{ |
int result; |
if (!value || (bus > 255) || (dev > 31) || (fn > 7) || (reg > 255 && pci_conf_type != PCI_CONF_TYPE_1)) |
return -1; |
result = -1; |
switch(pci_conf_type) { |
case PCI_CONF_TYPE_1: |
if(reg < 256){ |
outl(PCI_CONF1_ADDRESS(bus, dev, fn, reg), 0xCF8); |
}else{ |
outl(PCI_CONF3_ADDRESS(bus, dev, fn, reg), 0xCF8); |
} |
switch(len) { |
case 1: *value = inb(0xCFC + (reg & 3)); result = 0; break; |
case 2: *value = inw(0xCFC + (reg & 2)); result = 0; break; |
case 4: *value = inl(0xCFC); result = 0; break; |
} |
break; |
case PCI_CONF_TYPE_2: |
outb(0xF0 | (fn << 1), 0xCF8); |
outb(bus, 0xCFA); |
switch(len) { |
case 1: *value = inb(PCI_CONF2_ADDRESS(dev, reg)); result = 0; break; |
case 2: *value = inw(PCI_CONF2_ADDRESS(dev, reg)); result = 0; break; |
case 4: *value = inl(PCI_CONF2_ADDRESS(dev, reg)); result = 0; break; |
} |
outb(0, 0xCF8); |
break; |
} |
return result; |
} |
void detect_imc(void) |
{ |
// Check AMD IMC |
if(cpu_id.vend_id.char_array[0] == 'A' && cpu_id.vers.bits.family == 0xF) |
{ |
printk("extended family = %x\n", cpu_id.vers.bits.extendedFamily); |
switch(cpu_id.vers.bits.extendedFamily) |
{ |
case 0x0: |
imc_type = 0x0100; // Old K8 |
break; |
case 0x1: |
case 0x2: |
imc_type = 0x0101; // K10 (Family 10h & 11h) |
break; |
case 0x3: |
imc_type = 0x0102; // A-Series APU (Family 12h) |
break; |
case 0x5: |
imc_type = 0x0103; // C- / E- / Z- Series APU (Family 14h) |
break; |
case 0x6: |
imc_type = 0x0104; // FX Series (Family 15h) |
break; |
case 0x7: |
imc_type = 0x0105; // Kabini & related (Family 16h) |
break; |
} |
return; |
} |
// Check Intel IMC |
if(cpu_id.vend_id.char_array[0] == 'G' && cpu_id.vers.bits.family == 6 && cpu_id.vers.bits.extendedModel) |
{ |
switch(cpu_id.vers.bits.model) |
{ |
case 0x5: |
if(cpu_id.vers.bits.extendedModel == 2) { imc_type = 0x0003; } // Core i3/i5 1st Gen 45 nm (NHM) |
if(cpu_id.vers.bits.extendedModel == 3) { temp_out_disable=true; } // Atom Clover Trail |
if(cpu_id.vers.bits.extendedModel == 4) { imc_type = 0x0007; } // HSW-ULT |
break; |
case 0x6: |
if(cpu_id.vers.bits.extendedModel == 3) { |
imc_type = 0x0009; // Atom Cedar Trail |
temp_out_disable=true; |
//v->fail_safe |= 4; // Disable Core temp |
} |
break; |
case 0xA: |
switch(cpu_id.vers.bits.extendedModel) |
{ |
case 0x1: |
imc_type = 0x0001; // Core i7 1st Gen 45 nm (NHME) |
break; |
case 0x2: |
imc_type = 0x0004; // Core 2nd Gen (SNB) |
break; |
case 0x3: |
imc_type = 0x0006; // Core 3nd Gen (IVB) |
break; |
} |
break; |
case 0xC: |
switch(cpu_id.vers.bits.extendedModel) |
{ |
case 0x1: |
if(cpu_id.vers.bits.stepping > 9) { imc_type = 0x0008; } // Atom PineView |
//v->fail_safe |= 4; // Disable Core temp |
temp_out_disable=true; |
break; |
case 0x2: |
imc_type = 0x0002; // Core i7 1st Gen 32 nm (WMR) |
break; |
case 0x3: |
imc_type = 0x0007; // Core 4nd Gen (HSW) |
break; |
} |
break; |
case 0xD: |
imc_type = 0x0005; // SNB-E |
break; |
case 0xE: |
imc_type = 0x0001; // Core i7 1st Gen 45 nm (NHM) |
break; |
} |
//if(imc_type) { tsc_invariable = 1; } |
return; |
} |
} |
static int pci_check_direct(void) |
{ |
unsigned char tmpCFB; |
unsigned int tmpCF8; |
if (cpu_id.vend_id.char_array[0] == 'A' && cpu_id.vers.bits.family == 0xF) { |
pci_conf_type = PCI_CONF_TYPE_1; |
return 0; |
} else { |
/* Check if configuration type 1 works. */ |
pci_conf_type = PCI_CONF_TYPE_1; |
tmpCFB = inb(0xCFB); |
outb(0x01, 0xCFB); |
tmpCF8 = inl(0xCF8); |
outl(0x80000000, 0xCF8); |
if ((inl(0xCF8) == 0x80000000) && (pci_sanity_check() == 0)) { |
outl(tmpCF8, 0xCF8); |
outb(tmpCFB, 0xCFB); |
return 0; |
} |
outl(tmpCF8, 0xCF8); |
/* Check if configuration type 2 works. */ |
pci_conf_type = PCI_CONF_TYPE_2; |
outb(0x00, 0xCFB); |
outb(0x00, 0xCF8); |
outb(0x00, 0xCFA); |
if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00 && (pci_sanity_check() == 0)) { |
outb(tmpCFB, 0xCFB); |
return 0; |
} |
outb(tmpCFB, 0xCFB); |
/* Nothing worked return an error */ |
pci_conf_type = PCI_CONF_TYPE_NONE; |
return -1; |
} |
} |
#define PCI_BASE_CLASS_BRIDGE 0x06 |
#define PCI_CLASS_BRIDGE_HOST 0x0600 |
#define PCI_CLASS_DEVICE 0x0a /* Device class */ |
int pci_sanity_check(void) |
{ |
unsigned long value; |
int result; |
/* Do a trivial check to make certain we can see a host bridge. |
* There are reportedly some buggy chipsets from intel and |
* compaq where this test does not work, I will worry about |
* that when we support them. |
*/ |
result = pci_conf_read(0, 0, 0, PCI_CLASS_DEVICE, 2, &value); |
if (result == 0) { |
result = -1; |
if (value == PCI_CLASS_BRIDGE_HOST) { |
result = 0; |
} |
} |
return result; |
} |
int pci_init(void) |
{ |
int result; |
/* For now just make certain we can directly |
* use the pci functions. |
*/ |
result = pci_check_direct(); |
return result; |
} |
void get_cpuid() |
{ |
unsigned int *v, dummy[3]; |
char *p, *q; |
/* Get max std cpuid & vendor ID */ |
cpuid(0x0, &cpu_id.max_cpuid, &cpu_id.vend_id.uint32_array[0], |
&cpu_id.vend_id.uint32_array[2], &cpu_id.vend_id.uint32_array[1]); |
cpu_id.vend_id.char_array[11] = 0; |
/* Get processor family information & feature flags */ |
if (cpu_id.max_cpuid >= 1) { |
cpuid(0x00000001, &cpu_id.vers.flat, &cpu_id.info.flat, |
&cpu_id.fid.uint32_array[1], &cpu_id.fid.uint32_array[0]); |
} |
/* Get the digital thermal sensor & power management status bits */ |
if(cpu_id.max_cpuid >= 6) { |
cpuid(0x00000006, &cpu_id.dts_pmp, &dummy[0], &dummy[1], &dummy[2]); |
} |
/* Get the max extended cpuid */ |
cpuid(0x80000000, &cpu_id.max_xcpuid, &dummy[0], &dummy[1], &dummy[2]); |
/* Get extended feature flags, only save EDX */ |
if (cpu_id.max_xcpuid >= 0x80000001) { |
cpuid(0x80000001, &dummy[0], &dummy[1], |
&dummy[2], &cpu_id.fid.uint32_array[2]); |
} |
/* Get the brand ID */ |
if (cpu_id.max_xcpuid >= 0x80000004) { |
v = (unsigned int *)&cpu_id.brand_id; |
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
cpu_id.brand_id.char_array[47] = 0; |
} |
/* |
* Intel chips right-justify this string for some dumb reason; |
* undo that brain damage: |
*/ |
p = q = &cpu_id.brand_id.char_array[0]; |
while (*p == ' ') |
p++; |
if (p != q) { |
while (*p) |
*q++ = *p++; |
while (q <= &cpu_id.brand_id.char_array[48]) |
*q++ = '\0'; /* Zero-pad the rest */ |
} |
/* Get cache information */ |
switch(cpu_id.vend_id.char_array[0]) { |
case 'A': |
/* AMD Processors */ |
/* The cache information is only in ecx and edx so only save |
* those registers */ |
if (cpu_id.max_xcpuid >= 0x80000005) { |
cpuid(0x80000005, &dummy[0], &dummy[1], |
&cpu_id.cache_info.uint[0], &cpu_id.cache_info.uint[1]); |
} |
if (cpu_id.max_xcpuid >= 0x80000006) { |
cpuid(0x80000006, &dummy[0], &dummy[1], |
&cpu_id.cache_info.uint[2], &cpu_id.cache_info.uint[3]); |
} |
break; |
case 'G': |
/* Intel Processors, Need to do this in init.c */ |
break; |
} |
/* Turn off mon bit since monitor based spin wait may not be reliable */ |
cpu_id.fid.bits.mon = 0; |
} |
void coretemp(void) |
{ |
unsigned int msrl, msrh; |
unsigned int tjunc, tabs, tnow; |
unsigned long rtcr; |
long amd_raw_temp=524322; |
// Only enable coretemp if IMC is known |
if(imc_type == 0) { return; } |
tnow = 0; |
// Intel CPU |
if(cpu_id.vend_id.char_array[0] == 'G' && cpu_id.max_cpuid >= 6) |
{ |
if(cpu_id.dts_pmp & 1){ |
rdmsr(MSR_IA32_THERM_STATUS, msrl, msrh); |
tabs = ((msrl >> 16) & 0x7F); |
rdmsr(MSR_IA32_TEMPERATURE_TARGET, msrl, msrh); |
tjunc = ((msrl >> 16) & 0x7F); |
if(tjunc < 50 || tjunc > 125) { tjunc = 90; } // assume Tjunc = 90°C if boggus value received. |
tnow = tjunc - tabs; |
//dprint(LINE_CPU+1, 30, v->check_temp, 3, 0); |
printk("temp=%d\n", tnow); |
} |
return; |
} |
// AMD CPU |
if(cpu_id.vend_id.char_array[0] == 'A' && cpu_id.vers.bits.extendedFamily > 0) |
{ |
pci_conf_read(0, 24, 3, 0xA4, 4, &rtcr); |
amd_raw_temp = ((rtcr >> 21) & 0x7FF); |
printk("temp=%d\n", amd_raw_temp/8); |
} |
} |
unsigned drvEntry(int action, char *cmdline){ |
get_cpuid(); |
pci_init(); |
detect_imc(); |
if(!temp_out_disable){ |
coretemp(); |
} |
} |
/drivers/sensors/coretmp/cpuid.h |
---|
0,0 → 1,196 |
/* |
* cpuid.h -- |
* contains the data structures required for CPUID |
* implementation. |
*/ |
#define CPUID_VENDOR_LENGTH 3 /* 3 GPRs hold vendor ID */ |
#define CPUID_VENDOR_STR_LENGTH (CPUID_VENDOR_LENGTH * sizeof(uint32_t) + 1) |
#define CPUID_BRAND_LENGTH 12 /* 12 GPRs hold vendor ID */ |
#define CPUID_BRAND_STR_LENGTH (CPUID_BRAND_LENGTH * sizeof(uint32_t) + 1) |
extern struct cpu_ident cpu_id; |
/* |
static inline void __cpuid(unsigned int *eax, unsigned int *ebx, |
unsigned int *ecx, unsigned int *edx) |
{ |
/* ecx is often an input as well as an output. */ |
asm volatile("\t" |
"push %%ebx; cpuid; mov %%ebx, %%edi; pop %%ebx" |
: "=a" (*eax), |
"=D" (*ebx), |
"=c" (*ecx), |
"=d" (*edx) |
: "0" (*eax), "2" (*ecx)); |
} |
static inline void cpuid(unsigned int op, |
unsigned int *eax, unsigned int *ebx, |
unsigned int *ecx, unsigned int *edx) |
{ |
*eax = op; |
*ecx = 0; |
__cpuid(eax, ebx, ecx, edx); |
} |
/* Some CPUID calls want 'count' to be placed in ecx */ |
/*static inline void cpuid_count(unsigned int op, int count, |
unsigned int *eax, unsigned int *ebx, |
unsigned int *ecx, unsigned int *edx) |
{ |
*eax = op; |
*ecx = count; |
__cpuid(eax, ebx, ecx, edx); |
}*/ |
/* Typedef for storing the Cache Information */ |
typedef union { |
unsigned char ch[48]; |
uint32_t uint[12]; |
struct { |
uint32_t fill1:24; /* Bit 0 */ |
uint32_t l1_i_sz:8; |
uint32_t fill2:24; |
uint32_t l1_d_sz:8; |
uint32_t fill3:16; |
uint32_t l2_sz:16; |
uint32_t fill4:18; |
uint32_t l3_sz:14; |
uint32_t fill5[8]; |
} amd; |
} cpuid_cache_info_t; |
/* Typedef for storing the CPUID Vendor String */ |
typedef union { |
/* Note: the extra byte in the char array is for '\0'. */ |
char char_array[CPUID_VENDOR_STR_LENGTH]; |
uint32_t uint32_array[CPUID_VENDOR_LENGTH]; |
} cpuid_vendor_string_t; |
/* Typedef for storing the CPUID Brand String */ |
typedef union { |
/* Note: the extra byte in the char array is for '\0'. */ |
char char_array[CPUID_BRAND_STR_LENGTH]; |
uint32_t uint32_array[CPUID_BRAND_LENGTH]; |
} cpuid_brand_string_t; |
/* Typedef for storing CPUID Version */ |
typedef union { |
uint32_t flat; |
struct { |
uint32_t stepping:4; /* Bit 0 */ |
uint32_t model:4; |
uint32_t family:4; |
uint32_t processorType:2; |
uint32_t reserved1514:2; |
uint32_t extendedModel:4; |
uint32_t extendedFamily:8; |
uint32_t reserved3128:4; /* Bit 31 */ |
} bits; |
} cpuid_version_t; |
/* Typedef for storing CPUID Processor Information */ |
typedef union { |
uint32_t flat; |
struct { |
uint32_t brandIndex:8; /* Bit 0 */ |
uint32_t cflushLineSize:8; |
uint32_t logicalProcessorCount:8; |
uint32_t apicID:8; /* Bit 31 */ |
} bits; |
} cpuid_proc_info_t; |
/* Typedef for storing CPUID Feature flags */ |
typedef union { |
uint32_t flat; |
struct { |
uint32_t :1; |
} bits; |
} cpuid_custom_features; |
/* Typedef for storing CPUID Feature flags */ |
typedef union { |
uint32_t uint32_array[3]; |
struct { |
uint32_t fpu:1; /* EDX feature flags, bit 0 */ |
uint32_t vme:1; |
uint32_t de:1; |
uint32_t pse:1; |
uint32_t rdtsc:1; |
uint32_t msr:1; |
uint32_t pae:1; |
uint32_t mce:1; |
uint32_t cx8:1; |
uint32_t apic:1; |
uint32_t bit10:1; |
uint32_t sep:1; |
uint32_t mtrr:1; |
uint32_t pge:1; |
uint32_t mca:1; |
uint32_t cmov:1; |
uint32_t pat:1; |
uint32_t pse36:1; |
uint32_t psn:1; |
uint32_t cflush:1; |
uint32_t bit20:1; |
uint32_t ds:1; |
uint32_t acpi:1; |
uint32_t mmx:1; |
uint32_t fxsr:1; |
uint32_t sse:1; |
uint32_t sse2:1; |
uint32_t ss:1; |
uint32_t htt:1; |
uint32_t tm:1; |
uint32_t bit30:1; |
uint32_t pbe:1; /* EDX feature flags, bit 31 */ |
uint32_t sse3:1; /* ECX feature flags, bit 0 */ |
uint32_t mulq:1; |
uint32_t bit2:1; |
uint32_t mon:1; |
uint32_t dscpl:1; |
uint32_t vmx:1; |
uint32_t smx:1; |
uint32_t eist:1; |
uint32_t tm2:1; |
uint32_t bits_9_31:23; |
uint32_t bits0_28:29; /* EDX extended feature flags, bit 0 */ |
uint32_t lm:1; /* Long Mode */ |
uint32_t bits_30_31:2; /* EDX extended feature flags, bit 32 */ |
} bits; |
} cpuid_feature_flags_t; |
/* An overall structure to cache all of the CPUID information */ |
struct cpu_ident { |
uint32_t max_cpuid; |
uint32_t max_xcpuid; |
uint32_t dts_pmp; |
cpuid_version_t vers; |
cpuid_proc_info_t info; |
cpuid_feature_flags_t fid; |
cpuid_vendor_string_t vend_id; |
cpuid_brand_string_t brand_id; |
cpuid_cache_info_t cache_info; |
cpuid_custom_features custom; |
}; |
struct cpuid4_eax { |
uint32_t ctype:5; |
uint32_t level:3; |
uint32_t is_self_initializing:1; |
uint32_t is_fully_associative:1; |
uint32_t reserved:4; |
uint32_t num_threads_sharing:12; |
uint32_t num_cores_on_die:6; |
}; |
struct cpuid4_ebx { |
uint32_t coherency_line_size:12; |
uint32_t physical_line_partition:10; |
uint32_t ways_of_associativity:10; |
}; |
struct cpuid4_ecx { |
uint32_t number_of_sets:32; |
}; |
/drivers/sensors/cpu_detect.c |
---|
0,0 → 1,20 |
#include <ddk.h> |
#include <syscall.h> |
#include <linux/pci.h> |
#include <asm/processor.h> |
void cpu_detect(struct cpuinfo_x86 *c) |
{ |
static u32 eax, dummy; |
cpuid(1, &eax, &dummy, &dummy, (int *) &c->x86_capability); |
c->x86 = (eax >> 8) & 0xf; |
c->x86_model = (eax >> 4) & 0xf; |
if (c->x86 == 0xf){ |
c->x86 += (eax >> 20) & 0xff; |
} |
if (c->x86 >= 0x6){ |
c->x86_model += ((eax >> 16) & 0xf) << 4; |
} |
c->x86_mask = eax & 0xf; |
} |
/drivers/sensors/drv.lds |
---|
0,0 → 1,57 |
OUTPUT_FORMAT(pei-i386) |
ENTRY("_drvEntry") |
SECTIONS |
{ |
. = SIZEOF_HEADERS; |
. = ALIGN(__section_alignment__); |
.text __image_base__ + ( __section_alignment__ < 0x1000 ? . : __section_alignment__ ) : |
{ |
*(.text) *(.rdata) |
} |
.data ALIGN(__section_alignment__) : |
{ |
*(.data) |
} |
.bss ALIGN(__section_alignment__): |
{ |
*(.bss) |
*(COMMON) |
} |
/DISCARD/ : |
{ |
*(.debug$S) |
*(.debug$T) |
*(.debug$F) |
*(.drectve) |
*(.edata) |
*(.eh_frame) |
} |
.idata ALIGN(__section_alignment__): |
{ |
SORT(*)(.idata$2) |
SORT(*)(.idata$3) |
/* These zeroes mark the end of the import list. */ |
LONG (0); LONG (0); LONG (0); LONG (0); LONG (0); |
SORT(*)(.idata$4) |
SORT(*)(.idata$5) |
SORT(*)(.idata$6) |
SORT(*)(.idata$7) |
} |
.reloc ALIGN(__section_alignment__) : |
{ |
*(.reloc) |
} |
} |
/drivers/sensors/k10temp/Makefile |
---|
0,0 → 1,43 |
CC = kos32-gcc |
LD = kos32-ld |
KPACK = kpack |
DDK_TOPDIR = ../../ddk |
DRV_INCLUDES = ../../include |
INCLUDES = -I$(DRV_INCLUDES) \ |
-I$(DRV_INCLUDES)/asm \ |
-I$(DRV_INCLUDES)/uapi \ |
-I$(DRV_INCLUDES)/drm |
NAME=k10temp |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI -DCONFIG_TINY_RCU |
DEFINES+= -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
DEFINES+= -DCONFIG_PRINTK -DCONFIG_PCI -DCONFIG_PCI -DCONFIG_AMD_NB -DKBUILD_MODNAME=\"k10temp\" |
CFLAGS= -c -O2 -march=i686 -fno-ident -msse2 -fomit-frame-pointer -fno-builtin-printf |
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields |
LIBPATH = -L $(DDK_TOPDIR) |
LIBPATH+= -L ../../../contrib/sdk/lib -L |
LIBS:= -lddk -lcore -lgcc |
LDFLAGS = -nostdlib -shared -s --major-os-version 0 --minor-os-version 7 \ |
--major-subsystem-version 0 --minor-subsystem-version 5 --subsystem native \ |
--image-base 0 --file-alignment 512 --section-alignment 4096 |
OBJS = k10temp.o ../pci.o ../amd_nb.o ../cpu_detect.o |
all: $(OBJS) $(NAME).dll |
$(NAME).dll: $(OBJS) |
$(LD) $(LIBPATH) $(LDFLAGS) -T ../drv.lds $(OBJS) -o $@ $(NAME_OBJS) $(LIBS) |
$(KPACK) $(NAME).dll |
%.o : %.c Makefile |
$(CC) $(CFLAGS) $(DEFINES) $(INCLUDES) -o $@ $< |
clean: |
rm -f $(OBJS) $(NAME).dll |
/drivers/sensors/k10temp/k10temp.c |
---|
0,0 → 1,555 |
// SPDX-License-Identifier: GPL-2.0-or-later |
/* |
* k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h |
* processor hardware monitoring |
* |
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> |
* Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net> |
* |
* Implementation notes: |
* - CCD register address information as well as the calculation to |
* convert raw register values is from https://github.com/ocerman/zenpower. |
* The information is not confirmed from chip datasheets, but experiments |
* suggest that it provides reasonable temperature values. |
*/ |
#include <ddk.h> |
#include <syscall.h> |
#include <linux/bitops.h> |
#include <linux/err.h> |
#include <linux/hwmon.h> |
#include <linux/init.h> |
#include <linux/pci.h> |
#include <linux/pci_ids.h> |
#include <asm/amd_nb.h> |
#include <asm/processor.h> |
struct cpuinfo_x86 boot_cpu_data; |
bool force; |
/* CPUID function 0x80000001, ebx */ |
#define CPUID_PKGTYPE_MASK GENMASK(31, 28) |
#define CPUID_PKGTYPE_F 0x00000000 |
#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 |
/* DRAM controller (PCI function 2) */ |
#define REG_DCT0_CONFIG_HIGH 0x094 |
#define DDR3_MODE BIT(8) |
/* miscellaneous (PCI function 3) */ |
#define REG_HARDWARE_THERMAL_CONTROL 0x64 |
#define HTC_ENABLE BIT(0) |
#define REG_REPORTED_TEMPERATURE 0xa4 |
#define REG_NORTHBRIDGE_CAPABILITIES 0xe8 |
#define NB_CAP_HTC BIT(10) |
/* |
* For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL |
* and REG_REPORTED_TEMPERATURE have been moved to |
* D0F0xBC_xD820_0C64 [Hardware Temperature Control] |
* D0F0xBC_xD820_0CA4 [Reported Temperature Control] |
*/ |
#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64 |
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 |
/* Common for Zen CPU families (Family 17h and 18h) */ |
#define ZEN_REPORTED_TEMP_CTRL_OFFSET 0x00059800 |
#define ZEN_CCD_TEMP(x) (0x00059954 + ((x) * 4)) |
#define ZEN_CCD_TEMP_VALID BIT(11) |
#define ZEN_CCD_TEMP_MASK GENMASK(10, 0) |
#define ZEN_CUR_TEMP_SHIFT 21 |
#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19) |
#define ZEN_SVI_BASE 0x0005A000 |
/* F17h thermal registers through SMN */ |
#define F17H_M01H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0xc) |
#define F17H_M01H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10) |
#define F17H_M31H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14) |
#define F17H_M31H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10) |
#define F17H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */ |
#define F17H_M01H_CFACTOR_ISOC 250000 /* 0.25A / LSB */ |
#define F17H_M31H_CFACTOR_ICORE 1000000 /* 1A / LSB */ |
#define F17H_M31H_CFACTOR_ISOC 310000 /* 0.31A / LSB */ |
/* F19h thermal registers through SMN */ |
#define F19H_M01_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14) |
#define F19H_M01_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10) |
#define F19H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */ |
#define F19H_M01H_CFACTOR_ISOC 310000 /* 0.31A / LSB */ |
/* Provide lock for writing to NB_SMU_IND_ADDR */ |
DEFINE_MUTEX(nb_smu_ind_mutex); |
DEFINE_MUTEX(smn_mutex); |
struct k10temp_data { |
struct pci_dev *pdev; |
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); |
void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); |
int temp_offset; |
u32 temp_adjust_mask; |
u32 show_temp; |
bool is_zen; |
}; |
#define TCTL_BIT 0 |
#define TDIE_BIT 1 |
#define TCCD_BIT(x) ((x) + 2) |
#define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel)) |
#define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT) |
struct tctl_offset { |
u8 model; |
char const *id; |
int offset; |
}; |
const struct tctl_offset tctl_offset_table[] = { |
{ 0x17, "AMD Ryzen 5 1600X", 20000 }, |
{ 0x17, "AMD Ryzen 7 1700X", 20000 }, |
{ 0x17, "AMD Ryzen 7 1800X", 20000 }, |
{ 0x17, "AMD Ryzen 7 2700X", 10000 }, |
{ 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */ |
{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ |
}; |
void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) |
{ |
pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); |
} |
void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) |
{ |
pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); |
} |
void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn, |
unsigned int base, int offset, u32 *val) |
{ |
mutex_lock(&nb_smu_ind_mutex); |
pci_bus_write_config_dword(pdev->bus, devfn, |
base, offset); |
pci_bus_read_config_dword(pdev->bus, devfn, |
base + 4, val); |
mutex_unlock(&nb_smu_ind_mutex); |
} |
void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval) |
{ |
amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, |
F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval); |
} |
void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) |
{ |
amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, |
F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval); |
} |
void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval) |
{ |
amd_smn_read(amd_pci_dev_to_node_id(pdev), |
ZEN_REPORTED_TEMP_CTRL_OFFSET, regval); |
} |
long get_raw_temp(struct k10temp_data *data) |
{ |
u32 regval; |
long temp; |
//printk("b30\n"); |
data->read_tempreg(data->pdev, ®val); |
temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125; |
if (regval & data->temp_adjust_mask) |
temp -= 49000; |
return temp; |
} |
const char *k10temp_temp_label[] = { |
"Tctl", |
"Tdie", |
"Tccd1", |
"Tccd2", |
"Tccd3", |
"Tccd4", |
"Tccd5", |
"Tccd6", |
"Tccd7", |
"Tccd8", |
}; |
int k10temp_read_labels(struct device *dev, |
enum hwmon_sensor_types type, |
u32 attr, int channel, const char **str) |
{ |
switch (type) { |
case hwmon_temp: |
*str = k10temp_temp_label[channel]; |
break; |
default: |
return -EOPNOTSUPP; |
} |
return 0; |
} |
int k10temp_read_temp(struct device *dev, u32 attr, int channel, |
long *val) |
{ |
struct k10temp_data *data = dev_get_drvdata(dev); |
u32 regval; |
switch (attr) { |
case hwmon_temp_input: |
switch (channel) { |
case 0: /* Tctl */ |
*val = get_raw_temp(data); |
if (*val < 0) |
*val = 0; |
break; |
case 1: /* Tdie */ |
*val = get_raw_temp(data) - data->temp_offset; |
if (*val < 0) |
*val = 0; |
break; |
case 2 ... 9: /* Tccd{1-8} */ |
amd_smn_read(amd_pci_dev_to_node_id(data->pdev), |
ZEN_CCD_TEMP(channel - 2), ®val); |
*val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; |
break; |
default: |
return -EOPNOTSUPP; |
} |
break; |
case hwmon_temp_max: |
*val = 70 * 1000; |
break; |
case hwmon_temp_crit: |
data->read_htcreg(data->pdev, ®val); |
*val = ((regval >> 16) & 0x7f) * 500 + 52000; |
break; |
case hwmon_temp_crit_hyst: |
data->read_htcreg(data->pdev, ®val); |
*val = (((regval >> 16) & 0x7f) |
- ((regval >> 24) & 0xf)) * 500 + 52000; |
break; |
default: |
return -EOPNOTSUPP; |
} |
return 0; |
} |
int k10temp_read(struct device *dev, enum hwmon_sensor_types type, |
u32 attr, int channel, long *val) |
{ |
switch (type) { |
case hwmon_temp: |
return k10temp_read_temp(dev, attr, channel, val); |
default: |
return -EOPNOTSUPP; |
} |
} |
umode_t k10temp_is_visible(const void *_data, |
enum hwmon_sensor_types type, |
u32 attr, int channel) |
{ |
const struct k10temp_data *data = _data; |
struct pci_dev *pdev = data->pdev; |
u32 reg; |
switch (type) { |
case hwmon_temp: |
switch (attr) { |
case hwmon_temp_input: |
if (!HAVE_TEMP(data, channel)){ |
return 0; |
} |
break; |
case hwmon_temp_max: |
if (channel || data->is_zen) |
return 0; |
break; |
case hwmon_temp_crit: |
case hwmon_temp_crit_hyst: |
if (channel || !data->read_htcreg) |
return 0; |
pci_read_config_dword(pdev, |
REG_NORTHBRIDGE_CAPABILITIES, |
®); |
if (!(reg & NB_CAP_HTC)) |
return 0; |
data->read_htcreg(data->pdev, ®); |
if (!(reg & HTC_ENABLE)) |
return 0; |
break; |
case hwmon_temp_label: |
/* Show temperature labels only on Zen CPUs */ |
if (!data->is_zen || !HAVE_TEMP(data, channel)) |
return 0; |
break; |
default: |
return 0; |
} |
break; |
default: |
return 0; |
} |
return 0444; |
} |
bool has_erratum_319(struct pci_dev *pdev) |
{ |
u32 pkg_type, reg_dram_cfg; |
if (boot_cpu_data.x86 != 0x10) |
return false; |
/* |
* Erratum 319: The thermal sensor of Socket F/AM2+ processors |
* may be unreliable. |
*/ |
pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK; |
if (pkg_type == CPUID_PKGTYPE_F) |
return true; |
if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3) |
return false; |
/* DDR3 memory implies socket AM3, which is good */ |
pci_bus_read_config_dword(pdev->bus, |
PCI_DEVFN(PCI_SLOT(pdev->devfn), 2), |
REG_DCT0_CONFIG_HIGH, ®_dram_cfg); |
if (reg_dram_cfg & DDR3_MODE) |
return false; |
/* |
* Unfortunately it is possible to run a socket AM3 CPU with DDR2 |
* memory. We blacklist all the cores which do exist in socket AM2+ |
* format. It still isn't perfect, as RB-C2 cores exist in both AM2+ |
* and AM3 formats, but that's the best we can do. |
*/ |
return boot_cpu_data.x86_model < 4; |
} |
const struct hwmon_channel_info *k10temp_info[] = { |
HWMON_CHANNEL_INFO(temp, |
HWMON_T_INPUT | HWMON_T_MAX | |
HWMON_T_CRIT | HWMON_T_CRIT_HYST | |
HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL, |
HWMON_T_INPUT | HWMON_T_LABEL), |
HWMON_CHANNEL_INFO(in, |
HWMON_I_INPUT | HWMON_I_LABEL, |
HWMON_I_INPUT | HWMON_I_LABEL), |
HWMON_CHANNEL_INFO(curr, |
HWMON_C_INPUT | HWMON_C_LABEL, |
HWMON_C_INPUT | HWMON_C_LABEL), |
NULL |
}; |
/* |
const struct hwmon_ops k10temp_hwmon_ops = { |
.is_visible = k10temp_is_visible, |
.read = k10temp_read, |
.read_string = k10temp_read_labels, |
};*/ |
/* |
const struct hwmon_chip_info k10temp_chip_info = { |
.ops = &k10temp_hwmon_ops, |
.info = k10temp_info, |
};*/ |
void k10temp_get_ccd_support(struct pci_dev *pdev, |
struct k10temp_data *data, int limit) |
{ |
u32 regval; |
int i; |
for (i = 0; i < limit; i++) { |
amd_smn_read(amd_pci_dev_to_node_id(pdev), |
ZEN_CCD_TEMP(i), ®val); |
if (regval & ZEN_CCD_TEMP_VALID) |
data->show_temp |= BIT(TCCD_BIT(i)); |
} |
} |
int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id, struct device *hwmon_dev) |
{ |
int unreliable = has_erratum_319(pdev); |
struct device *dev = &pdev->dev; |
struct k10temp_data *data; |
int i; |
if (unreliable) { |
if (!force) { |
dev_err(dev,"unreliable CPU thermal sensor; monitoring disabled\n"); |
return -ENODEV; |
} |
dev_warn(dev, |
"unreliable CPU thermal sensor; check erratum 319\n"); |
} |
data = kzalloc(sizeof(struct k10temp_data), GFP_KERNEL); |
memset(data, 0x0, sizeof(struct k10temp_data)); |
if (!data) |
return -ENOMEM; |
data->pdev = pdev; |
data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */ |
if (boot_cpu_data.x86 == 0x15 && |
((boot_cpu_data.x86_model & 0xf0) == 0x60 || |
(boot_cpu_data.x86_model & 0xf0) == 0x70)) { |
data->read_htcreg = read_htcreg_nb_f15; |
data->read_tempreg = read_tempreg_nb_f15; |
} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { |
data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; |
data->read_tempreg = read_tempreg_nb_zen; |
data->show_temp |= BIT(TDIE_BIT); /* show Tdie */ |
data->is_zen = true; |
switch (boot_cpu_data.x86_model) { |
case 0x1: /* Zen */ |
case 0x8: /* Zen+ */ |
case 0x11: /* Zen APU */ |
case 0x18: /* Zen+ APU */ |
k10temp_get_ccd_support(pdev, data, 4); |
break; |
case 0x31: /* Zen2 Threadripper */ |
case 0x71: /* Zen2 */ |
k10temp_get_ccd_support(pdev, data, 8); |
break; |
} |
} else if (boot_cpu_data.x86 == 0x19) { |
data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; |
data->read_tempreg = read_tempreg_nb_zen; |
data->show_temp |= BIT(TDIE_BIT); |
data->is_zen = true; |
switch (boot_cpu_data.x86_model) { |
case 0x0 ... 0x1: /* Zen3 SP3/TR */ |
case 0x21: /* Zen3 Ryzen Desktop */ |
k10temp_get_ccd_support(pdev, data, 8); |
break; |
} |
} else { |
data->read_htcreg = read_htcreg_pci; |
data->read_tempreg = read_tempreg_pci; |
} |
for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { |
const struct tctl_offset *entry = &tctl_offset_table[i]; |
if (boot_cpu_data.x86 == entry->model && |
strstr(boot_cpu_data.x86_model_id, entry->id)) { |
data->temp_offset = entry->offset; |
break; |
} |
} |
hwmon_dev->driver_data=data; |
return PTR_ERR_OR_ZERO(hwmon_dev); |
} |
const struct pci_device_id k10temp_id_table[] = { |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, |
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, |
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
{} |
}; |
int __stdcall service_proc(ioctl_t *my_ctl){ |
return 0; |
} |
void show_temp_info(struct device *dev, u32 attr, int channel, char* label){ |
long temp=0; |
if(k10temp_is_visible(dev->driver_data, hwmon_temp, attr, channel)){ |
k10temp_read_temp(dev, attr, channel, &temp); |
printk("%s = %d\n",label, temp); |
} |
} |
void show_all_info(struct device* dev){ |
const char *hwmon_label=NULL; |
int i=0; |
for(i=0; i<=9; i++){ |
if(k10temp_is_visible(dev->driver_data, hwmon_temp, hwmon_temp_label, i)){ |
k10temp_read_labels(dev, hwmon_temp, 0, i, &hwmon_label); |
printk("%s:\n",hwmon_label); |
} |
show_temp_info(dev, hwmon_temp_input, i, "temp"); |
show_temp_info(dev, hwmon_temp_max, i, "temp_max"); |
show_temp_info(dev, hwmon_temp_crit, i, "temp_crit"); |
show_temp_info(dev, hwmon_temp_crit_hyst, i, "temp_crit_hyst"); |
} |
} |
uint32_t drvEntry(int action, char *cmdline){ |
if(action != 1){ |
return 0; |
} |
struct device k10temp_device; |
pci_dev_t device; |
struct pci_device_id *k10temp_id; |
int err; |
cpu_detect(&boot_cpu_data); |
err = enum_pci_devices(); |
if(unlikely(err != 0)) { |
printk("k10temp: Device enumeration failed\n"); |
return -1; |
} |
k10temp_id = find_pci_device(&device, k10temp_id_table); |
if( unlikely(k10temp_id == NULL) ){ |
printk("k10temp: Device not found\n"); |
return -ENODEV; |
} |
init_amd_nbs(); |
k10temp_probe(&device.pci_dev, k10temp_id, &k10temp_device); |
long temp; |
/* if(k10temp_is_visible(k10temp_device.driver_data, hwmon_temp, hwmon_temp_input, 0)){ |
k10temp_read_temp(&k10temp_device, hwmon_temp_input, 0, &temp); |
printk("Temp = %d C\n", temp); |
} |
// if(k10temp_is_visible(&k10temp_device.driver_data, hwmon_temp, hwmon_temp_input, 1)){ |
k10temp_read_temp(&k10temp_device, hwmon_temp_input, 1, &temp); |
printk("Temp = %d C\n", temp); |
// } |
*/ |
show_all_info(&k10temp_device); |
return RegService("k10temp", service_proc); |
} |
/drivers/sensors/pci.c |
---|
0,0 → 1,681 |
#include <syscall.h> |
#include <linux/kernel.h> |
#include <linux/mutex.h> |
#include <linux/mod_devicetable.h> |
#include <linux/slab.h> |
#include <linux/pm.h> |
#include <asm/msr.h> |
#include <linux/pci.h> |
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); |
LIST_HEAD(devices); |
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ |
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ |
/* |
* Translate the low bits of the PCI base |
* to the resource type |
*/ |
/* |
//int pci_scan_filter(u32 id, u32 busnr, u32 devfn) |
{ |
u16 vendor, device; |
u32 class; |
int ret = 0; |
vendor = id & 0xffff; |
device = (id >> 16) & 0xffff; |
if(vendor == 0x15AD ) |
{ |
class = PciRead32(busnr, devfn, PCI_CLASS_REVISION); |
class >>= 16; |
if( class == PCI_CLASS_DISPLAY_VGA ) |
ret = 1; |
} |
return ret; |
};*/ |
static inline unsigned int pci_calc_resource_flags(unsigned int flags) |
{ |
if (flags & PCI_BASE_ADDRESS_SPACE_IO) |
return IORESOURCE_IO; |
if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) |
return IORESOURCE_MEM | IORESOURCE_PREFETCH; |
return IORESOURCE_MEM; |
} |
static u32 pci_size(u32 base, u32 maxbase, u32 mask) |
{ |
u32 size = mask & maxbase; /* Find the significant bits */ |
if (!size) |
return 0; |
/* Get the lowest of them to find the decode size, and |
from that the extent. */ |
size = (size & ~(size-1)) - 1; |
/* base == maxbase can be valid only if the BAR has |
already been programmed with all 1s. */ |
if (base == maxbase && ((base | size) & mask) != mask) |
return 0; |
return size; |
} |
static u64 pci_size64(u64 base, u64 maxbase, u64 mask) |
{ |
u64 size = mask & maxbase; /* Find the significant bits */ |
if (!size) |
return 0; |
/* Get the lowest of them to find the decode size, and |
from that the extent. */ |
size = (size & ~(size-1)) - 1; |
/* base == maxbase can be valid only if the BAR has |
already been programmed with all 1s. */ |
if (base == maxbase && ((base | size) & mask) != mask) |
return 0; |
return size; |
} |
static inline int is_64bit_memory(u32 mask) |
{ |
if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
return 1; |
return 0; |
} |
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
{ |
u32 pos, reg, next; |
u32 l, sz; |
struct resource *res; |
for(pos=0; pos < howmany; pos = next) |
{ |
u64 l64; |
u64 sz64; |
u32 raw_sz; |
next = pos + 1; |
res = &dev->resource[pos]; |
reg = PCI_BASE_ADDRESS_0 + (pos << 2); |
l = PciRead32(dev->busnr, dev->devfn, reg); |
PciWrite32(dev->busnr, dev->devfn, reg, ~0); |
sz = PciRead32(dev->busnr, dev->devfn, reg); |
PciWrite32(dev->busnr, dev->devfn, reg, l); |
if (!sz || sz == 0xffffffff) |
continue; |
if (l == 0xffffffff) |
l = 0; |
raw_sz = sz; |
if ((l & PCI_BASE_ADDRESS_SPACE) == |
PCI_BASE_ADDRESS_SPACE_MEMORY) |
{ |
sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); |
/* |
* For 64bit prefetchable memory sz could be 0, if the |
* real size is bigger than 4G, so we need to check |
* szhi for that. |
*/ |
if (!is_64bit_memory(l) && !sz) |
continue; |
res->start = l & PCI_BASE_ADDRESS_MEM_MASK; |
res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; |
} |
else { |
sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); |
if (!sz) |
continue; |
res->start = l & PCI_BASE_ADDRESS_IO_MASK; |
res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; |
} |
res->end = res->start + (unsigned long) sz; |
res->flags |= pci_calc_resource_flags(l); |
if (is_64bit_memory(l)) |
{ |
u32 szhi, lhi; |
lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
sz64 = ((u64)szhi << 32) | raw_sz; |
l64 = ((u64)lhi << 32) | l; |
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
next++; |
#if BITS_PER_LONG == 64 |
if (!sz64) { |
res->start = 0; |
res->end = 0; |
res->flags = 0; |
continue; |
} |
res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; |
res->end = res->start + sz64; |
#else |
if (sz64 > 0x100000000ULL) { |
printk(KERN_ERR "PCI: Unable to handle 64-bit " |
"BAR for device %s\n", pci_name(dev)); |
res->start = 0; |
res->flags = 0; |
} |
else if (lhi) |
{ |
/* 64-bit wide address, treat as disabled */ |
PciWrite32(dev->busnr, dev->devfn, reg, |
l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); |
PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
res->start = 0; |
res->end = sz; |
} |
#endif |
} |
} |
if ( rom ) |
{ |
dev->rom_base_reg = rom; |
res = &dev->resource[PCI_ROM_RESOURCE]; |
l = PciRead32(dev->busnr, dev->devfn, rom); |
PciWrite32(dev->busnr, dev->devfn, rom, ~PCI_ROM_ADDRESS_ENABLE); |
sz = PciRead32(dev->busnr, dev->devfn, rom); |
PciWrite32(dev->busnr, dev->devfn, rom, l); |
if (l == 0xffffffff) |
l = 0; |
if (sz && sz != 0xffffffff) |
{ |
sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); |
if (sz) |
{ |
res->flags = (l & IORESOURCE_ROM_ENABLE) | |
IORESOURCE_MEM | IORESOURCE_PREFETCH | |
IORESOURCE_READONLY | IORESOURCE_CACHEABLE; |
res->start = l & PCI_ROM_ADDRESS_MASK; |
res->end = res->start + (unsigned long) sz; |
} |
} |
} |
} |
static void pci_read_irq(struct pci_dev *dev) |
{ |
u8 irq; |
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
dev->pin = irq; |
if (irq) |
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_LINE); |
dev->irq = irq; |
}; |
int pci_setup_device(struct pci_dev *dev) |
{ |
u32 class; |
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
dev->revision = class & 0xff; |
class >>= 8; /* upper 3 bytes */ |
dev->class = class; |
/* "Unknown power state" */ |
// dev->current_state = PCI_UNKNOWN; |
/* Early fixups, before probing the BARs */ |
// pci_fixup_device(pci_fixup_early, dev); |
class = dev->class >> 8; |
switch (dev->hdr_type) |
{ |
case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
if (class == PCI_CLASS_BRIDGE_PCI) |
goto bad; |
pci_read_irq(dev); |
pci_read_bases(dev, 6, PCI_ROM_ADDRESS); |
dev->subsystem_vendor = PciRead16(dev->busnr, dev->devfn,PCI_SUBSYSTEM_VENDOR_ID); |
dev->subsystem_device = PciRead16(dev->busnr, dev->devfn, PCI_SUBSYSTEM_ID); |
/* |
* Do the ugly legacy mode stuff here rather than broken chip |
* quirk code. Legacy mode ATA controllers have fixed |
* addresses. These are not always echoed in BAR0-3, and |
* BAR0-3 in a few cases contain junk! |
*/ |
if (class == PCI_CLASS_STORAGE_IDE) |
{ |
u8 progif; |
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
if ((progif & 1) == 0) |
{ |
dev->resource[0].start = 0x1F0; |
dev->resource[0].end = 0x1F7; |
dev->resource[0].flags = LEGACY_IO_RESOURCE; |
dev->resource[1].start = 0x3F6; |
dev->resource[1].end = 0x3F6; |
dev->resource[1].flags = LEGACY_IO_RESOURCE; |
} |
if ((progif & 4) == 0) |
{ |
dev->resource[2].start = 0x170; |
dev->resource[2].end = 0x177; |
dev->resource[2].flags = LEGACY_IO_RESOURCE; |
dev->resource[3].start = 0x376; |
dev->resource[3].end = 0x376; |
dev->resource[3].flags = LEGACY_IO_RESOURCE; |
}; |
} |
break; |
case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
if (class != PCI_CLASS_BRIDGE_PCI) |
goto bad; |
/* The PCI-to-PCI bridge spec requires that subtractive |
decoding (i.e. transparent) bridge must have programming |
interface code of 0x01. */ |
pci_read_irq(dev); |
dev->transparent = ((dev->class & 0xff) == 1); |
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
break; |
case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ |
if (class != PCI_CLASS_BRIDGE_CARDBUS) |
goto bad; |
pci_read_irq(dev); |
pci_read_bases(dev, 1, 0); |
dev->subsystem_vendor = PciRead16(dev->busnr, |
dev->devfn, |
PCI_CB_SUBSYSTEM_VENDOR_ID); |
dev->subsystem_device = PciRead16(dev->busnr, |
dev->devfn, |
PCI_CB_SUBSYSTEM_ID); |
break; |
default: /* unknown header */ |
printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", |
pci_name(dev), dev->hdr_type); |
return -1; |
bad: |
printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", |
pci_name(dev), class, dev->hdr_type); |
dev->class = PCI_CLASS_NOT_DEFINED; |
} |
/* We found a fine healthy device, go go go... */ |
return 0; |
}; |
static pci_dev_t* pci_scan_device(u32 busnr, int devfn) |
{ |
pci_dev_t *dev; |
u32 id; |
u8 hdr; |
int timeout = 10; |
id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
/* some broken boards return 0 or ~0 if a slot is empty: */ |
if (id == 0xffffffff || id == 0x00000000 || |
id == 0x0000ffff || id == 0xffff0000) |
return NULL; |
while (id == 0xffff0001) |
{ |
delay(timeout/10); |
timeout *= 2; |
id = PciRead32(busnr, devfn, PCI_VENDOR_ID); |
/* Card hasn't responded in 60 seconds? Must be stuck. */ |
if (timeout > 60 * 100) |
{ |
printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " |
"responding\n", busnr,PCI_SLOT(devfn),PCI_FUNC(devfn)); |
return NULL; |
} |
}; |
/* if( pci_scan_filter(id, busnr, devfn) == 0) |
return NULL;*/ |
hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE); |
dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0); |
if(unlikely(dev == NULL)) |
return NULL; |
INIT_LIST_HEAD(&dev->link); |
dev->pci_dev.busnr = busnr; |
dev->pci_dev.devfn = devfn; |
dev->pci_dev.hdr_type = hdr & 0x7f; |
dev->pci_dev.multifunction = !!(hdr & 0x80); |
dev->pci_dev.vendor = id & 0xffff; |
dev->pci_dev.device = (id >> 16) & 0xffff; |
pci_setup_device(&dev->pci_dev); |
return dev; |
}; |
int _pci_scan_slot(u32 bus, int devfn) |
{ |
int func, nr = 0; |
for (func = 0; func < 8; func++, devfn++) |
{ |
pci_dev_t *dev; |
dev = pci_scan_device(bus, devfn); |
if( dev ) |
{ |
list_add(&dev->link, &devices); |
nr++; |
/* |
* If this is a single function device, |
* don't scan past the first function. |
*/ |
if (!dev->pci_dev.multifunction) |
{ |
if (func > 0) { |
dev->pci_dev.multifunction = 1; |
} |
else { |
break; |
} |
} |
} |
else { |
if (func == 0) |
break; |
} |
}; |
return nr; |
}; |
#define PCI_FIND_CAP_TTL 48 |
static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn, |
u8 pos, int cap, int *ttl) |
{ |
u8 id; |
while ((*ttl)--) { |
pos = PciRead8(bus, devfn, pos); |
if (pos < 0x40) |
break; |
pos &= ~3; |
id = PciRead8(bus, devfn, pos + PCI_CAP_LIST_ID); |
if (id == 0xff) |
break; |
if (id == cap) |
return pos; |
pos += PCI_CAP_LIST_NEXT; |
} |
return 0; |
} |
static int __pci_find_next_cap(unsigned int bus, unsigned int devfn, |
u8 pos, int cap) |
{ |
int ttl = PCI_FIND_CAP_TTL; |
return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
} |
static int __pci_bus_find_cap_start(unsigned int bus, |
unsigned int devfn, u8 hdr_type) |
{ |
u16 status; |
status = PciRead16(bus, devfn, PCI_STATUS); |
if (!(status & PCI_STATUS_CAP_LIST)) |
return 0; |
switch (hdr_type) { |
case PCI_HEADER_TYPE_NORMAL: |
case PCI_HEADER_TYPE_BRIDGE: |
return PCI_CAPABILITY_LIST; |
case PCI_HEADER_TYPE_CARDBUS: |
return PCI_CB_CAPABILITY_LIST; |
default: |
return 0; |
} |
return 0; |
} |
int pci_find_capability(struct pci_dev *dev, int cap) |
{ |
int pos; |
pos = __pci_bus_find_cap_start(dev->busnr, dev->devfn, dev->hdr_type); |
if (pos) |
pos = __pci_find_next_cap(dev->busnr, dev->devfn, pos, cap); |
return pos; |
} |
int enum_pci_devices() |
{ |
pci_dev_t *dev; |
u32 last_bus; |
u32 bus = 0 , devfn = 0; |
last_bus = PciApi(1); |
if( unlikely(last_bus == -1)) |
return -1; |
for(;bus <= last_bus; bus++) |
{ |
for (devfn = 0; devfn < 0x100; devfn += 8){ |
_pci_scan_slot(bus, devfn); |
} |
} |
dev = (pci_dev_t*)devices.next; |
while(&dev->link != &devices) |
{ |
/*printk("PCI device %x:%x bus:%x devfn:%x\n", |
dev->pci_dev.vendor, |
dev->pci_dev.device, |
dev->pci_dev.busnr, |
dev->pci_dev.devfn);*/ |
dev = (pci_dev_t*)dev->link.next; |
} |
return 0; |
} |
const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist) |
{ |
pci_dev_t *dev; |
const struct pci_device_id *ent; |
for(dev = (pci_dev_t*)devices.next; |
&dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if( dev->pci_dev.vendor != idlist->vendor ) |
continue; |
for(ent = idlist; ent->vendor != 0; ent++) |
{ |
if(unlikely(ent->device == dev->pci_dev.device)) |
{ |
pdev->pci_dev = dev->pci_dev; |
return ent; |
} |
}; |
} |
return NULL; |
}; |
struct pci_dev * |
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
{ |
pci_dev_t *dev; |
dev = (pci_dev_t*)devices.next; |
if(from != NULL) |
{ |
for(; &dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if( &dev->pci_dev == from) |
{ |
dev = (pci_dev_t*)dev->link.next; |
break; |
}; |
} |
}; |
for(; &dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if((dev->pci_dev.vendor != vendor) && (vendor != PCI_ANY_ID)) |
continue; |
if((dev->pci_dev.device == device || device == PCI_ANY_ID)) |
{ |
return &dev->pci_dev; |
} |
} |
return NULL; |
}; |
struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
{ |
pci_dev_t *dev; |
for(dev = (pci_dev_t*)devices.next; |
&dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
return &dev->pci_dev; |
} |
return NULL; |
} |
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
{ |
pci_dev_t *dev; |
dev = (pci_dev_t*)devices.next; |
if(from != NULL) |
{ |
for(; &dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if( &dev->pci_dev == from) |
{ |
dev = (pci_dev_t*)dev->link.next; |
break; |
}; |
} |
}; |
for(; &dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if( dev->pci_dev.class == class) |
{ |
return &dev->pci_dev; |
} |
} |
return NULL; |
} |
int pci_bus_read_config_byte (struct pci_bus *bus, u32 devfn, int pos, u8 *value) |
{ |
// raw_spin_lock_irqsave(&pci_lock, flags); |
*value = PciRead8(bus->number, devfn, pos); |
// raw_spin_unlock_irqrestore(&pci_lock, flags); |
return 0; |
} |
int pci_bus_read_config_word (struct pci_bus *bus, u32 devfn, int pos, u16 *value) |
{ |
if ( pos & 1) |
return PCIBIOS_BAD_REGISTER_NUMBER; |
// raw_spin_lock_irqsave(&pci_lock, flags); |
*value = PciRead16(bus->number, devfn, pos); |
// raw_spin_unlock_irqrestore(&pci_lock, flags); |
return 0; |
} |
int pci_bus_read_config_dword (struct pci_bus *bus, u32 devfn, int pos, u32 *value) |
{ |
if ( pos & 3) |
return PCIBIOS_BAD_REGISTER_NUMBER; |
// raw_spin_lock_irqsave(&pci_lock, flags); |
*value = PciRead32(bus->number, devfn, pos); |
// raw_spin_unlock_irqrestore(&pci_lock, flags); |
return 0; |
} |
int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val) |
{ |
if ( where & 3) |
return PCIBIOS_BAD_REGISTER_NUMBER; |
// raw_spin_lock_irqsave(&pci_lock, flags); |
PciWrite32(bus->number, devfn,where, val); |
// raw_spin_unlock_irqrestore(&pci_lock, flags); |
return 0; |
} |