Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 9077 → Rev 9078

/drivers/include/acpi/acpi_io.h
4,10 → 4,10
#include <linux/io.h>
 
#include <asm/acpi.h>
#include <asm/percpu.h>
 
 
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
 
int acpi_os_map_generic_address(struct acpi_generic_address *addr);
/drivers/include/asm/amd_nb.h
0,0 → 1,127
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_AMD_NB_H
#define _ASM_X86_AMD_NB_H
 
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/refcount.h>
 
struct amd_nb_bus_dev_range {
u8 bus;
u8 dev_base;
u8 dev_limit;
};
 
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
 
extern bool early_is_amd_nb(u32 value);
extern struct resource *amd_get_mmconfig_range(struct resource *res);
extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
extern int amd_set_subcaches(int, unsigned long);
 
extern int amd_smn_read(u16 node, u32 address, u32 *value);
extern int amd_smn_write(u16 node, u32 address, u32 value);
extern int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo);
 
struct amd_l3_cache {
unsigned indices;
u8 subcaches[4];
};
 
struct threshold_block {
unsigned int block; /* Number within bank */
unsigned int bank; /* MCA bank the block belongs to */
unsigned int cpu; /* CPU which controls MCA bank */
u32 address; /* MSR address for the block */
u16 interrupt_enable; /* Enable/Disable APIC interrupt */
bool interrupt_capable; /* Bank can generate an interrupt. */
 
u16 threshold_limit; /*
* Value upon which threshold
* interrupt is generated.
*/
 
struct kobject kobj; /* sysfs object */
struct list_head miscj; /*
* List of threshold blocks
* within a bank.
*/
};
 
struct threshold_bank {
struct kobject *kobj;
struct threshold_block *blocks;
 
/* initialized to the number of CPUs on the node sharing this bank */
refcount_t cpus;
unsigned int shared;
};
 
struct amd_northbridge {
struct pci_dev *root;
struct pci_dev *misc;
struct pci_dev *link;
struct amd_l3_cache l3_cache;
struct threshold_bank *bank4;
};
 
struct amd_northbridge_info {
u16 num;
u64 flags;
struct amd_northbridge *nb;
};
 
#define AMD_NB_GART BIT(0)
#define AMD_NB_L3_INDEX_DISABLE BIT(1)
#define AMD_NB_L3_PARTITIONING BIT(2)
 
#ifdef CONFIG_AMD_NB
 
u16 amd_nb_num(void);
bool amd_nb_has_feature(unsigned int feature);
struct amd_northbridge *node_to_amd_nb(int node);
 
static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
{
struct pci_dev *misc;
int i;
 
for (i = 0; i != amd_nb_num(); i++) {
misc = node_to_amd_nb(i)->misc;
 
if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
return i;
}
 
WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
return 0;
}
 
static inline bool amd_gart_present(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return false;
 
/* GART present only on Fam15h, upto model 0fh */
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
(boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
return true;
 
return false;
}
 
#else
 
#define amd_nb_num(x) 0
#define amd_nb_has_feature(x) false
#define node_to_amd_nb(x) NULL
#define amd_gart_present(x) false
 
#endif
 
 
#endif /* _ASM_X86_AMD_NB_H */
/drivers/include/asm/processor.h
18,6 → 18,7
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/msr.h>
#include <asm/msr-index.h>
#include <asm/desc_defs.h>
#include <asm/nops.h>
#include <asm/special_insns.h>
145,7 → 146,7
#define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8
#define X86_VENDOR_NUM 9
 
#define X86_VENDOR_HYGON 9
#define X86_VENDOR_UNKNOWN 0xff
 
/*
/drivers/include/ddk.h
8,6 → 8,7
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
//#include <stdint.h>
 
#define OS_BASE 0x80000000
 
/drivers/include/linux/hwmon.h
0,0 → 1,485
/* SPDX-License-Identifier: GPL-2.0-only */
/*
hwmon.h - part of lm_sensors, Linux kernel modules for hardware monitoring
 
This file declares helper functions for the sysfs class "hwmon",
for use by sensors drivers.
 
Copyright (C) 2005 Mark M. Hoffman <mhoffman@lightlink.com>
 
*/
 
#ifndef _HWMON_H_
#define _HWMON_H_
 
#include <linux/bitops.h>
 
struct device;
struct attribute_group;
 
enum hwmon_sensor_types {
hwmon_chip,
hwmon_temp,
hwmon_in,
hwmon_curr,
hwmon_power,
hwmon_energy,
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
hwmon_intrusion,
hwmon_max,
};
 
enum hwmon_chip_attributes {
hwmon_chip_temp_reset_history,
hwmon_chip_in_reset_history,
hwmon_chip_curr_reset_history,
hwmon_chip_power_reset_history,
hwmon_chip_register_tz,
hwmon_chip_update_interval,
hwmon_chip_alarms,
hwmon_chip_samples,
hwmon_chip_curr_samples,
hwmon_chip_in_samples,
hwmon_chip_power_samples,
hwmon_chip_temp_samples,
};
 
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
#define HWMON_C_IN_RESET_HISTORY BIT(hwmon_chip_in_reset_history)
#define HWMON_C_CURR_RESET_HISTORY BIT(hwmon_chip_curr_reset_history)
#define HWMON_C_POWER_RESET_HISTORY BIT(hwmon_chip_power_reset_history)
#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz)
#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval)
#define HWMON_C_ALARMS BIT(hwmon_chip_alarms)
#define HWMON_C_SAMPLES BIT(hwmon_chip_samples)
#define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples)
#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples)
#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
 
enum hwmon_temp_attributes {
hwmon_temp_enable,
hwmon_temp_input,
hwmon_temp_type,
hwmon_temp_lcrit,
hwmon_temp_lcrit_hyst,
hwmon_temp_min,
hwmon_temp_min_hyst,
hwmon_temp_max,
hwmon_temp_max_hyst,
hwmon_temp_crit,
hwmon_temp_crit_hyst,
hwmon_temp_emergency,
hwmon_temp_emergency_hyst,
hwmon_temp_alarm,
hwmon_temp_lcrit_alarm,
hwmon_temp_min_alarm,
hwmon_temp_max_alarm,
hwmon_temp_crit_alarm,
hwmon_temp_emergency_alarm,
hwmon_temp_fault,
hwmon_temp_offset,
hwmon_temp_label,
hwmon_temp_lowest,
hwmon_temp_highest,
hwmon_temp_reset_history,
hwmon_temp_rated_min,
hwmon_temp_rated_max,
};
 
#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
#define HWMON_T_INPUT BIT(hwmon_temp_input)
#define HWMON_T_TYPE BIT(hwmon_temp_type)
#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit)
#define HWMON_T_LCRIT_HYST BIT(hwmon_temp_lcrit_hyst)
#define HWMON_T_MIN BIT(hwmon_temp_min)
#define HWMON_T_MIN_HYST BIT(hwmon_temp_min_hyst)
#define HWMON_T_MAX BIT(hwmon_temp_max)
#define HWMON_T_MAX_HYST BIT(hwmon_temp_max_hyst)
#define HWMON_T_CRIT BIT(hwmon_temp_crit)
#define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst)
#define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency)
#define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst)
#define HWMON_T_ALARM BIT(hwmon_temp_alarm)
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm)
#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
#define HWMON_T_FAULT BIT(hwmon_temp_fault)
#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
#define HWMON_T_LABEL BIT(hwmon_temp_label)
#define HWMON_T_LOWEST BIT(hwmon_temp_lowest)
#define HWMON_T_HIGHEST BIT(hwmon_temp_highest)
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
#define HWMON_T_RATED_MIN BIT(hwmon_temp_rated_min)
#define HWMON_T_RATED_MAX BIT(hwmon_temp_rated_max)
 
enum hwmon_in_attributes {
hwmon_in_enable,
hwmon_in_input,
hwmon_in_min,
hwmon_in_max,
hwmon_in_lcrit,
hwmon_in_crit,
hwmon_in_average,
hwmon_in_lowest,
hwmon_in_highest,
hwmon_in_reset_history,
hwmon_in_label,
hwmon_in_alarm,
hwmon_in_min_alarm,
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
hwmon_in_rated_min,
hwmon_in_rated_max,
};
 
#define HWMON_I_ENABLE BIT(hwmon_in_enable)
#define HWMON_I_INPUT BIT(hwmon_in_input)
#define HWMON_I_MIN BIT(hwmon_in_min)
#define HWMON_I_MAX BIT(hwmon_in_max)
#define HWMON_I_LCRIT BIT(hwmon_in_lcrit)
#define HWMON_I_CRIT BIT(hwmon_in_crit)
#define HWMON_I_AVERAGE BIT(hwmon_in_average)
#define HWMON_I_LOWEST BIT(hwmon_in_lowest)
#define HWMON_I_HIGHEST BIT(hwmon_in_highest)
#define HWMON_I_RESET_HISTORY BIT(hwmon_in_reset_history)
#define HWMON_I_LABEL BIT(hwmon_in_label)
#define HWMON_I_ALARM BIT(hwmon_in_alarm)
#define HWMON_I_MIN_ALARM BIT(hwmon_in_min_alarm)
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
#define HWMON_I_RATED_MIN BIT(hwmon_in_rated_min)
#define HWMON_I_RATED_MAX BIT(hwmon_in_rated_max)
 
enum hwmon_curr_attributes {
hwmon_curr_enable,
hwmon_curr_input,
hwmon_curr_min,
hwmon_curr_max,
hwmon_curr_lcrit,
hwmon_curr_crit,
hwmon_curr_average,
hwmon_curr_lowest,
hwmon_curr_highest,
hwmon_curr_reset_history,
hwmon_curr_label,
hwmon_curr_alarm,
hwmon_curr_min_alarm,
hwmon_curr_max_alarm,
hwmon_curr_lcrit_alarm,
hwmon_curr_crit_alarm,
hwmon_curr_rated_min,
hwmon_curr_rated_max,
};
 
#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
#define HWMON_C_INPUT BIT(hwmon_curr_input)
#define HWMON_C_MIN BIT(hwmon_curr_min)
#define HWMON_C_MAX BIT(hwmon_curr_max)
#define HWMON_C_LCRIT BIT(hwmon_curr_lcrit)
#define HWMON_C_CRIT BIT(hwmon_curr_crit)
#define HWMON_C_AVERAGE BIT(hwmon_curr_average)
#define HWMON_C_LOWEST BIT(hwmon_curr_lowest)
#define HWMON_C_HIGHEST BIT(hwmon_curr_highest)
#define HWMON_C_RESET_HISTORY BIT(hwmon_curr_reset_history)
#define HWMON_C_LABEL BIT(hwmon_curr_label)
#define HWMON_C_ALARM BIT(hwmon_curr_alarm)
#define HWMON_C_MIN_ALARM BIT(hwmon_curr_min_alarm)
#define HWMON_C_MAX_ALARM BIT(hwmon_curr_max_alarm)
#define HWMON_C_LCRIT_ALARM BIT(hwmon_curr_lcrit_alarm)
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
#define HWMON_C_RATED_MIN BIT(hwmon_curr_rated_min)
#define HWMON_C_RATED_MAX BIT(hwmon_curr_rated_max)
 
enum hwmon_power_attributes {
hwmon_power_enable,
hwmon_power_average,
hwmon_power_average_interval,
hwmon_power_average_interval_max,
hwmon_power_average_interval_min,
hwmon_power_average_highest,
hwmon_power_average_lowest,
hwmon_power_average_max,
hwmon_power_average_min,
hwmon_power_input,
hwmon_power_input_highest,
hwmon_power_input_lowest,
hwmon_power_reset_history,
hwmon_power_accuracy,
hwmon_power_cap,
hwmon_power_cap_hyst,
hwmon_power_cap_max,
hwmon_power_cap_min,
hwmon_power_min,
hwmon_power_max,
hwmon_power_crit,
hwmon_power_lcrit,
hwmon_power_label,
hwmon_power_alarm,
hwmon_power_cap_alarm,
hwmon_power_min_alarm,
hwmon_power_max_alarm,
hwmon_power_lcrit_alarm,
hwmon_power_crit_alarm,
hwmon_power_rated_min,
hwmon_power_rated_max,
};
 
#define HWMON_P_ENABLE BIT(hwmon_power_enable)
#define HWMON_P_AVERAGE BIT(hwmon_power_average)
#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval)
#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max)
#define HWMON_P_AVERAGE_INTERVAL_MIN BIT(hwmon_power_average_interval_min)
#define HWMON_P_AVERAGE_HIGHEST BIT(hwmon_power_average_highest)
#define HWMON_P_AVERAGE_LOWEST BIT(hwmon_power_average_lowest)
#define HWMON_P_AVERAGE_MAX BIT(hwmon_power_average_max)
#define HWMON_P_AVERAGE_MIN BIT(hwmon_power_average_min)
#define HWMON_P_INPUT BIT(hwmon_power_input)
#define HWMON_P_INPUT_HIGHEST BIT(hwmon_power_input_highest)
#define HWMON_P_INPUT_LOWEST BIT(hwmon_power_input_lowest)
#define HWMON_P_RESET_HISTORY BIT(hwmon_power_reset_history)
#define HWMON_P_ACCURACY BIT(hwmon_power_accuracy)
#define HWMON_P_CAP BIT(hwmon_power_cap)
#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
#define HWMON_P_MIN BIT(hwmon_power_min)
#define HWMON_P_MAX BIT(hwmon_power_max)
#define HWMON_P_LCRIT BIT(hwmon_power_lcrit)
#define HWMON_P_CRIT BIT(hwmon_power_crit)
#define HWMON_P_LABEL BIT(hwmon_power_label)
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
#define HWMON_P_MIN_ALARM BIT(hwmon_power_min_alarm)
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
#define HWMON_P_RATED_MIN BIT(hwmon_power_rated_min)
#define HWMON_P_RATED_MAX BIT(hwmon_power_rated_max)
 
enum hwmon_energy_attributes {
hwmon_energy_enable,
hwmon_energy_input,
hwmon_energy_label,
};
 
#define HWMON_E_ENABLE BIT(hwmon_energy_enable)
#define HWMON_E_INPUT BIT(hwmon_energy_input)
#define HWMON_E_LABEL BIT(hwmon_energy_label)
 
enum hwmon_humidity_attributes {
hwmon_humidity_enable,
hwmon_humidity_input,
hwmon_humidity_label,
hwmon_humidity_min,
hwmon_humidity_min_hyst,
hwmon_humidity_max,
hwmon_humidity_max_hyst,
hwmon_humidity_alarm,
hwmon_humidity_fault,
hwmon_humidity_rated_min,
hwmon_humidity_rated_max,
};
 
#define HWMON_H_ENABLE BIT(hwmon_humidity_enable)
#define HWMON_H_INPUT BIT(hwmon_humidity_input)
#define HWMON_H_LABEL BIT(hwmon_humidity_label)
#define HWMON_H_MIN BIT(hwmon_humidity_min)
#define HWMON_H_MIN_HYST BIT(hwmon_humidity_min_hyst)
#define HWMON_H_MAX BIT(hwmon_humidity_max)
#define HWMON_H_MAX_HYST BIT(hwmon_humidity_max_hyst)
#define HWMON_H_ALARM BIT(hwmon_humidity_alarm)
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
#define HWMON_H_RATED_MIN BIT(hwmon_humidity_rated_min)
#define HWMON_H_RATED_MAX BIT(hwmon_humidity_rated_max)
 
enum hwmon_fan_attributes {
hwmon_fan_enable,
hwmon_fan_input,
hwmon_fan_label,
hwmon_fan_min,
hwmon_fan_max,
hwmon_fan_div,
hwmon_fan_pulses,
hwmon_fan_target,
hwmon_fan_alarm,
hwmon_fan_min_alarm,
hwmon_fan_max_alarm,
hwmon_fan_fault,
};
 
#define HWMON_F_ENABLE BIT(hwmon_fan_enable)
#define HWMON_F_INPUT BIT(hwmon_fan_input)
#define HWMON_F_LABEL BIT(hwmon_fan_label)
#define HWMON_F_MIN BIT(hwmon_fan_min)
#define HWMON_F_MAX BIT(hwmon_fan_max)
#define HWMON_F_DIV BIT(hwmon_fan_div)
#define HWMON_F_PULSES BIT(hwmon_fan_pulses)
#define HWMON_F_TARGET BIT(hwmon_fan_target)
#define HWMON_F_ALARM BIT(hwmon_fan_alarm)
#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm)
#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm)
#define HWMON_F_FAULT BIT(hwmon_fan_fault)
 
enum hwmon_pwm_attributes {
hwmon_pwm_input,
hwmon_pwm_enable,
hwmon_pwm_mode,
hwmon_pwm_freq,
};
 
#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
 
enum hwmon_intrusion_attributes {
hwmon_intrusion_alarm,
hwmon_intrusion_beep,
};
#define HWMON_INTRUSION_ALARM BIT(hwmon_intrusion_alarm)
#define HWMON_INTRUSION_BEEP BIT(hwmon_intrusion_beep)
 
/**
* struct hwmon_ops - hwmon device operations
* @is_visible: Callback to return attribute visibility. Mandatory.
* Parameters are:
* @const void *drvdata:
* Pointer to driver-private data structure passed
* as argument to hwmon_device_register_with_info().
* @type: Sensor type
* @attr: Sensor attribute
* @channel:
* Channel number
* The function returns the file permissions.
* If the return value is 0, no attribute will be created.
* @read: Read callback for data attributes. Mandatory if readable
* data attributes are present.
* Parameters are:
* @dev: Pointer to hardware monitoring device
* @type: Sensor type
* @attr: Sensor attribute
* @channel:
* Channel number
* @val: Pointer to returned value
* The function returns 0 on success or a negative error number.
* @read_string:
* Read callback for string attributes. Mandatory if string
* attributes are present.
* Parameters are:
* @dev: Pointer to hardware monitoring device
* @type: Sensor type
* @attr: Sensor attribute
* @channel:
* Channel number
* @str: Pointer to returned string
* The function returns 0 on success or a negative error number.
* @write: Write callback for data attributes. Mandatory if writeable
* data attributes are present.
* Parameters are:
* @dev: Pointer to hardware monitoring device
* @type: Sensor type
* @attr: Sensor attribute
* @channel:
* Channel number
* @val: Value to write
* The function returns 0 on success or a negative error number.
*/
struct hwmon_ops {
umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel);
int (*read)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val);
int (*read_string)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, const char **str);
int (*write)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val);
};
 
/**
* Channel information
* @type: Channel type.
* @config: Pointer to NULL-terminated list of channel parameters.
* Use for per-channel attributes.
*/
struct hwmon_channel_info {
enum hwmon_sensor_types type;
const u32 *config;
};
 
#define HWMON_CHANNEL_INFO(stype, ...) \
(&(struct hwmon_channel_info) { \
.type = hwmon_##stype, \
.config = (u32 []) { \
__VA_ARGS__, 0 \
} \
})
 
/**
* Chip configuration
* @ops: Pointer to hwmon operations.
* @info: Null-terminated list of channel information.
*/
struct hwmon_chip_info {
const struct hwmon_ops *ops;
const struct hwmon_channel_info **info;
};
 
/* hwmon_device_register() is deprecated */
struct device *hwmon_device_register(struct device *dev);
 
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
const struct attribute_group **groups);
struct device *
devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
const struct attribute_group **groups);
struct device *
hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
struct device *
devm_hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
 
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
 
int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel);
 
/**
* hwmon_is_bad_char - Is the char invalid in a hwmon name
* @ch: the char to be considered
*
* hwmon_is_bad_char() can be used to determine if the given character
* may not be used in a hwmon name.
*
* Returns true if the char is invalid, false otherwise.
*/
static inline bool hwmon_is_bad_char(const char ch)
{
switch (ch) {
case '-':
case '*':
case ' ':
case '\t':
case '\n':
return true;
default:
return false;
}
}
 
#endif
/drivers/include/linux/pci.h
917,6 → 917,8
 
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
 
 
#if 0
static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
{
*val = PciRead8(dev->busnr, dev->devfn, where);
949,7 → 951,56
PciWrite32(dev->busnr, dev->devfn, where, val);
return 1;
}
#endif
 
static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
{
*val = PciRead8(dev->busnr, dev->devfn, where);
return 0;
}
 
static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val)
{
 
if ( where & 1)
return PCIBIOS_BAD_REGISTER_NUMBER;
*val = PciRead16(dev->busnr, dev->devfn, where);
return 0;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where, u32 *val)
{
 
if ( where & 3)
return PCIBIOS_BAD_REGISTER_NUMBER;
*val = PciRead32(dev->busnr, dev->devfn, where);
return 0;
}
 
static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val)
{
PciWrite8(dev->busnr, dev->devfn, where, val);
return 0;
};
 
static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val)
{
if ( where & 1)
return PCIBIOS_BAD_REGISTER_NUMBER;
PciWrite16(dev->busnr, dev->devfn, where, val);
return 0;
}
 
static inline int pci_write_config_dword(struct pci_dev *dev, int where,
u32 val)
{
if ( where & 3)
return PCIBIOS_BAD_REGISTER_NUMBER;
PciWrite32(dev->busnr, dev->devfn, where, val);
return 0;
}
 
 
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1436,11 → 1487,11
_PCI_NOP(o, dword, u32 x)
_PCI_NOP_ALL(read, *)
_PCI_NOP_ALL(write,)
 
/*
static inline struct pci_dev *pci_get_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
{ return NULL; }
{ return NULL; }*/
 
static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
unsigned int device,
/drivers/include/linux/pci_ids.h
535,6 → 535,8
#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
3016,4 → 3018,13
 
#define PCI_VENDOR_ID_OCZ 0x1b85
 
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_VENDOR_ID_HYGON 0x1d94
#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
 
#endif /* _LINUX_PCI_IDS_H */
/drivers/include/linux/refcount.h
0,0 → 1,369
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Variant of atomic_t specialized for reference counts.
*
* The interface matches the atomic_t interface (to aid in porting) but only
* provides the few functions one should use for reference counting.
*
* Saturation semantics
* ====================
*
* refcount_t differs from atomic_t in that the counter saturates at
* REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
* counter and causing 'spurious' use-after-free issues. In order to avoid the
* cost associated with introducing cmpxchg() loops into all of the saturating
* operations, we temporarily allow the counter to take on an unchecked value
* and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
* or overflow has occurred. Although this is racy when multiple threads
* access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
* equidistant from 0 and INT_MAX we minimise the scope for error:
*
* INT_MAX REFCOUNT_SATURATED UINT_MAX
* 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
* +--------------------------------+----------------+----------------+
* <---------- bad value! ---------->
*
* (in a signed view of the world, the "bad value" range corresponds to
* a negative counter value).
*
* As an example, consider a refcount_inc() operation that causes the counter
* to overflow:
*
* int old = atomic_fetch_add_relaxed(r);
* // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
* if (old < 0)
* atomic_set(r, REFCOUNT_SATURATED);
*
* If another thread also performs a refcount_inc() operation between the two
* atomic operations, then the count will continue to edge closer to 0. If it
* reaches a value of 1 before /any/ of the threads reset it to the saturated
* value, then a concurrent refcount_dec_and_test() may erroneously free the
* underlying object.
* Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
* 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
* With the current PID limit, if no batched refcounting operations are used and
* the attacker can't repeatedly trigger kernel oopses in the middle of refcount
* operations, this makes it impossible for a saturated refcount to leave the
* saturation range, even if it is possible for multiple uses of the same
* refcount to nest in the context of a single task:
*
* (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
* 0x40000000 / 0x400000 = 0x100 = 256
*
* If hundreds of references are added/removed with a single refcounting
* operation, it may potentially be possible to leave the saturation range; but
* given the precise timing details involved with the round-robin scheduling of
* each thread manipulating the refcount and the need to hit the race multiple
* times in succession, there doesn't appear to be a practical avenue of attack
* even if using refcount_add() operations with larger increments.
*
* Memory ordering
* ===============
*
* Memory ordering rules are slightly relaxed wrt regular atomic_t functions
* and provide only what is strictly required for refcounts.
*
* The increments are fully relaxed; these will not provide ordering. The
* rationale is that whatever is used to obtain the object we're increasing the
* reference count on will provide the ordering. For locked data structures,
* its the lock acquire, for RCU/lockless data structures its the dependent
* load.
*
* Do note that inc_not_zero() provides a control dependency which will order
* future stores against the inc, this ensures we'll never modify the object
* if we did not in fact acquire a reference.
*
* The decrements will provide release order, such that all the prior loads and
* stores will be issued before, it also provides a control dependency, which
* will order us against the subsequent free().
*
* The control dependency is against the load of the cmpxchg (ll/sc) that
* succeeded. This means the stores aren't fully ordered, but this is fine
* because the 1->0 transition indicates no concurrency.
*
* Note that the allocator is responsible for ordering things between free()
* and alloc().
*
* The decrements dec_and_test() and sub_and_test() also provide acquire
* ordering on success.
*
*/
 
#ifndef _LINUX_REFCOUNT_H
#define _LINUX_REFCOUNT_H
 
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/compiler.h>
//#include <linux/limits.h>
#include <linux/spinlock_types.h>
 
struct mutex;
 
/**
* typedef refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at REFCOUNT_SATURATED and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
 
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
#define REFCOUNT_MAX INT_MAX
#define REFCOUNT_SATURATED (INT_MIN / 2)
 
enum refcount_saturation_type {
REFCOUNT_ADD_NOT_ZERO_OVF,
REFCOUNT_ADD_OVF,
REFCOUNT_ADD_UAF,
REFCOUNT_SUB_UAF,
REFCOUNT_DEC_LEAK,
};
 
void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
 
/**
* refcount_set - set a refcount's value
* @r: the refcount
* @n: value to which the refcount will be set
*/
static inline void refcount_set(refcount_t *r, int n)
{
atomic_set(&r->refs, n);
}
 
/**
* refcount_read - get a refcount's value
* @r: the refcount
*
* Return: the refcount's value
*/
static inline unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
}
 
static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
{
int old = refcount_read(r);
 
do {
if (!old)
break;
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
 
if (oldp)
*oldp = old;
 
if (unlikely(old < 0 || old + i < 0))
refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
 
return old;
}
 
/**
* refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
* Will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*
* Return: false if the passed refcount is 0, true otherwise
*/
static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
{
return __refcount_add_not_zero(i, r, NULL);
}
 
static inline void __refcount_add(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_add_relaxed(i, &r->refs);
 
if (oldp)
*oldp = old;
 
if (unlikely(!old))
refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
else if (unlikely(old < 0 || old + i < 0))
refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
 
/**
* refcount_add - add a value to a refcount
* @i: the value to add to the refcount
* @r: the refcount
*
* Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*/
static inline void refcount_add(int i, refcount_t *r)
{
__refcount_add(i, r, NULL);
}
 
static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
{
return __refcount_add_not_zero(1, r, oldp);
}
 
/**
* refcount_inc_not_zero - increment a refcount unless it is 0
* @r: the refcount to increment
*
* Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
* and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*
* Return: true if the increment was successful, false otherwise
*/
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
return __refcount_inc_not_zero(r, NULL);
}
 
static inline void __refcount_inc(refcount_t *r, int *oldp)
{
__refcount_add(1, r, oldp);
}
 
/**
* refcount_inc - increment a refcount
* @r: the refcount to increment
*
* Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller already has a
* reference on the object.
*
* Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition.
*/
static inline void refcount_inc(refcount_t *r)
{
__refcount_inc(r, NULL);
}
 
static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(i, &r->refs);
 
if (oldp)
*oldp = old;
 
if (old == i) {
smp_acquire__after_ctrl_dep();
return true;
}
 
if (unlikely(old < 0 || old - i < 0))
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
 
return false;
}
 
/**
* refcount_sub_and_test - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount
* @r: the refcount
*
* Similar to atomic_dec_and_test(), but it will WARN, return false and
* ultimately leak on underflow and will fail to decrement when saturated
* at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_dec(), or one of its variants, should instead be used to
* decrement a reference count.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
{
return __refcount_sub_and_test(i, r, NULL);
}
 
static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
{
return __refcount_sub_and_test(1, r, oldp);
}
 
/**
* refcount_dec_and_test - decrement a refcount and test if it is 0
* @r: the refcount
*
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
* decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides an acquire ordering on success such that free()
* must come after.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return __refcount_dec_and_test(r, NULL);
}
 
static inline void __refcount_dec(refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(1, &r->refs);
 
if (oldp)
*oldp = old;
 
if (unlikely(old <= 1))
refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
 
/**
* refcount_dec - decrement a refcount
* @r: the refcount
*
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement
* when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before.
*/
static inline void refcount_dec(refcount_t *r)
{
__refcount_dec(r, NULL);
}
 
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags);
#endif /* _LINUX_REFCOUNT_H */