/drivers/include/linux/acpi.h |
---|
0,0 → 1,935 |
/* |
* acpi.h - ACPI Interface |
* |
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
* |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
*/ |
#ifndef _LINUX_ACPI_H |
#define _LINUX_ACPI_H |
#include <linux/errno.h> |
#include <linux/ioport.h> /* for struct resource */ |
#include <linux/resource_ext.h> |
#include <linux/device.h> |
#include <linux/property.h> |
#ifndef _LINUX |
#define _LINUX |
#endif |
#include <acpi/acpi.h> |
#ifdef CONFIG_ACPI |
#include <linux/list.h> |
#include <linux/mod_devicetable.h> |
#include <linux/dynamic_debug.h> |
#include <acpi/acpi_bus.h> |
#include <acpi/acpi_drivers.h> |
#include <acpi/acpi_numa.h> |
#include <acpi/acpi_io.h> |
#include <asm/acpi.h> |
static inline acpi_handle acpi_device_handle(struct acpi_device *adev) |
{ |
return adev ? adev->handle : NULL; |
} |
#define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) |
#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ |
acpi_fwnode_handle(adev) : NULL) |
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) |
/** |
* ACPI_DEVICE_CLASS - macro used to describe an ACPI device with |
* the PCI-defined class-code information |
* |
* @_cls : the class, subclass, prog-if triple for this device |
* @_msk : the class mask for this device |
* |
* This macro is used to create a struct acpi_device_id that matches a |
* specific PCI class. The .id and .driver_data fields will be left |
* initialized with the default value. |
*/ |
#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk), |
//static inline bool has_acpi_companion(struct device *dev) |
//{ |
// return is_acpi_device_node(dev->fwnode); |
//} |
//static inline void acpi_preset_companion(struct device *dev, |
// struct acpi_device *parent, u64 addr) |
//{ |
// ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL)); |
//} |
static inline const char *acpi_dev_name(struct acpi_device *adev) |
{ |
return dev_name(&adev->dev); |
} |
enum acpi_irq_model_id { |
ACPI_IRQ_MODEL_PIC = 0, |
ACPI_IRQ_MODEL_IOAPIC, |
ACPI_IRQ_MODEL_IOSAPIC, |
ACPI_IRQ_MODEL_PLATFORM, |
ACPI_IRQ_MODEL_GIC, |
ACPI_IRQ_MODEL_COUNT |
}; |
extern enum acpi_irq_model_id acpi_irq_model; |
enum acpi_interrupt_id { |
ACPI_INTERRUPT_PMI = 1, |
ACPI_INTERRUPT_INIT, |
ACPI_INTERRUPT_CPEI, |
ACPI_INTERRUPT_COUNT |
}; |
#define ACPI_SPACE_MEM 0 |
enum acpi_address_range_id { |
ACPI_ADDRESS_RANGE_MEMORY = 1, |
ACPI_ADDRESS_RANGE_RESERVED = 2, |
ACPI_ADDRESS_RANGE_ACPI = 3, |
ACPI_ADDRESS_RANGE_NVS = 4, |
ACPI_ADDRESS_RANGE_COUNT |
}; |
/* Table Handlers */ |
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); |
typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header, |
const unsigned long end); |
#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE |
void acpi_initrd_override(void *data, size_t size); |
#else |
static inline void acpi_initrd_override(void *data, size_t size) |
{ |
} |
#endif |
#define BAD_MADT_ENTRY(entry, end) ( \ |
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \ |
((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) |
struct acpi_subtable_proc { |
int id; |
acpi_tbl_entry_handler handler; |
int count; |
}; |
char * __acpi_map_table (unsigned long phys_addr, unsigned long size); |
void __acpi_unmap_table(char *map, unsigned long size); |
int early_acpi_boot_init(void); |
int acpi_boot_init (void); |
void acpi_boot_table_init (void); |
int acpi_mps_check (void); |
int acpi_numa_init (void); |
int acpi_table_init (void); |
int acpi_table_parse(char *id, acpi_tbl_table_handler handler); |
int __init acpi_parse_entries(char *id, unsigned long table_size, |
acpi_tbl_entry_handler handler, |
struct acpi_table_header *table_header, |
int entry_id, unsigned int max_entries); |
int __init acpi_table_parse_entries(char *id, unsigned long table_size, |
int entry_id, |
acpi_tbl_entry_handler handler, |
unsigned int max_entries); |
int __init acpi_table_parse_entries(char *id, unsigned long table_size, |
int entry_id, |
acpi_tbl_entry_handler handler, |
unsigned int max_entries); |
int __init acpi_table_parse_entries_array(char *id, unsigned long table_size, |
struct acpi_subtable_proc *proc, int proc_num, |
unsigned int max_entries); |
int acpi_table_parse_madt(enum acpi_madt_type id, |
acpi_tbl_entry_handler handler, |
unsigned int max_entries); |
int acpi_parse_mcfg (struct acpi_table_header *header); |
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); |
/* the following four functions are architecture-dependent */ |
void acpi_numa_slit_init (struct acpi_table_slit *slit); |
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); |
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); |
int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); |
void acpi_numa_arch_fixup(void); |
#ifndef PHYS_CPUID_INVALID |
typedef u32 phys_cpuid_t; |
#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) |
#endif |
static inline bool invalid_logical_cpuid(u32 cpuid) |
{ |
return (int)cpuid < 0; |
} |
static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) |
{ |
return phys_id == PHYS_CPUID_INVALID; |
} |
#ifdef CONFIG_ACPI_HOTPLUG_CPU |
/* Arch dependent functions for cpu hotplug support */ |
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); |
int acpi_unmap_cpu(int cpu); |
#endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC |
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); |
#endif |
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); |
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); |
int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); |
void acpi_irq_stats_init(void); |
extern u32 acpi_irq_handled; |
extern u32 acpi_irq_not_handled; |
extern unsigned int acpi_sci_irq; |
#define INVALID_ACPI_IRQ ((unsigned)-1) |
static inline bool acpi_sci_irq_valid(void) |
{ |
return acpi_sci_irq != INVALID_ACPI_IRQ; |
} |
extern int sbf_port; |
extern unsigned long acpi_realmode_flags; |
int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); |
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); |
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); |
void acpi_set_irq_model(enum acpi_irq_model_id model, |
struct fwnode_handle *fwnode); |
#ifdef CONFIG_X86_IO_APIC |
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); |
#else |
#define acpi_get_override_irq(gsi, trigger, polarity) (-1) |
#endif |
/* |
* This function undoes the effect of one call to acpi_register_gsi(). |
* If this matches the last registration, any IRQ resources for gsi |
* are freed. |
*/ |
void acpi_unregister_gsi (u32 gsi); |
struct pci_dev; |
int acpi_pci_irq_enable (struct pci_dev *dev); |
void acpi_penalize_isa_irq(int irq, int active); |
bool acpi_isa_irq_available(int irq); |
void acpi_penalize_sci_irq(int irq, int trigger, int polarity); |
void acpi_pci_irq_disable (struct pci_dev *dev); |
extern int ec_read(u8 addr, u8 *val); |
extern int ec_write(u8 addr, u8 val); |
extern int ec_transaction(u8 command, |
const u8 *wdata, unsigned wdata_len, |
u8 *rdata, unsigned rdata_len); |
extern acpi_handle ec_get_handle(void); |
extern bool acpi_is_pnp_device(struct acpi_device *); |
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) |
typedef void (*wmi_notify_handler) (u32 value, void *context); |
extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, |
u32 method_id, |
const struct acpi_buffer *in, |
struct acpi_buffer *out); |
extern acpi_status wmi_query_block(const char *guid, u8 instance, |
struct acpi_buffer *out); |
extern acpi_status wmi_set_block(const char *guid, u8 instance, |
const struct acpi_buffer *in); |
extern acpi_status wmi_install_notify_handler(const char *guid, |
wmi_notify_handler handler, void *data); |
extern acpi_status wmi_remove_notify_handler(const char *guid); |
extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); |
extern bool wmi_has_guid(const char *guid); |
#endif /* CONFIG_ACPI_WMI */ |
#define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 |
#define ACPI_VIDEO_DEVICE_POSTING 0x0002 |
#define ACPI_VIDEO_ROM_AVAILABLE 0x0004 |
#define ACPI_VIDEO_BACKLIGHT 0x0008 |
#define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 |
#define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 |
#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 |
#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 |
#define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 |
#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 |
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 |
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 |
extern char acpi_video_backlight_string[]; |
extern long acpi_is_video_device(acpi_handle handle); |
extern int acpi_blacklisted(void); |
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); |
extern void acpi_osi_setup(char *str); |
extern bool acpi_osi_is_win8(void); |
#ifdef CONFIG_ACPI_NUMA |
int acpi_map_pxm_to_online_node(int pxm); |
int acpi_get_node(acpi_handle handle); |
#else |
static inline int acpi_map_pxm_to_online_node(int pxm) |
{ |
return 0; |
} |
static inline int acpi_get_node(acpi_handle handle) |
{ |
return 0; |
} |
#endif |
extern int acpi_paddr_to_node(u64 start_addr, u64 size); |
extern int pnpacpi_disabled; |
#define PXM_INVAL (-1) |
bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); |
bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); |
bool acpi_dev_resource_address_space(struct acpi_resource *ares, |
struct resource_win *win); |
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, |
struct resource_win *win); |
unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); |
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, |
struct resource *res); |
void acpi_dev_free_resource_list(struct list_head *list); |
int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, |
int (*preproc)(struct acpi_resource *, void *), |
void *preproc_data); |
int acpi_dev_filter_resource_type(struct acpi_resource *ares, |
unsigned long types); |
static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, |
void *arg) |
{ |
return acpi_dev_filter_resource_type(ares, (unsigned long)arg); |
} |
int acpi_check_resource_conflict(const struct resource *res); |
int acpi_check_region(resource_size_t start, resource_size_t n, |
const char *name); |
int acpi_resources_are_enforced(void); |
#ifdef CONFIG_HIBERNATION |
void __init acpi_no_s4_hw_signature(void); |
#endif |
#ifdef CONFIG_PM_SLEEP |
void __init acpi_old_suspend_ordering(void); |
void __init acpi_nvs_nosave(void); |
void __init acpi_nvs_nosave_s3(void); |
#endif /* CONFIG_PM_SLEEP */ |
struct acpi_osc_context { |
char *uuid_str; /* UUID string */ |
int rev; |
struct acpi_buffer cap; /* list of DWORD capabilities */ |
struct acpi_buffer ret; /* free by caller if success */ |
}; |
acpi_status acpi_str_to_uuid(char *str, u8 *uuid); |
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); |
/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */ |
#define OSC_QUERY_DWORD 0 /* DWORD 1 */ |
#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */ |
#define OSC_CONTROL_DWORD 2 /* DWORD 3 */ |
/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */ |
#define OSC_QUERY_ENABLE 0x00000001 /* input */ |
#define OSC_REQUEST_ERROR 0x00000002 /* return */ |
#define OSC_INVALID_UUID_ERROR 0x00000004 /* return */ |
#define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */ |
#define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */ |
/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */ |
#define OSC_SB_PAD_SUPPORT 0x00000001 |
#define OSC_SB_PPC_OST_SUPPORT 0x00000002 |
#define OSC_SB_PR3_SUPPORT 0x00000004 |
#define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 |
#define OSC_SB_APEI_SUPPORT 0x00000010 |
#define OSC_SB_CPC_SUPPORT 0x00000020 |
extern bool osc_sb_apei_support_acked; |
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ |
#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 |
#define OSC_PCI_ASPM_SUPPORT 0x00000002 |
#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 |
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 |
#define OSC_PCI_MSI_SUPPORT 0x00000010 |
#define OSC_PCI_SUPPORT_MASKS 0x0000001f |
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ |
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 |
#define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002 |
#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 |
#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 |
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 |
#define OSC_PCI_CONTROL_MASKS 0x0000001f |
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 |
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 |
#define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006 |
#define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008 |
#define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A |
#define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B |
#define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C |
#define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D |
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E |
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F |
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, |
u32 *mask, u32 req); |
/* Enable _OST when all relevant hotplug operations are enabled */ |
#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ |
defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ |
defined(CONFIG_ACPI_CONTAINER) |
#define ACPI_HOTPLUG_OST |
#endif |
/* _OST Source Event Code (OSPM Action) */ |
#define ACPI_OST_EC_OSPM_SHUTDOWN 0x100 |
#define ACPI_OST_EC_OSPM_EJECT 0x103 |
#define ACPI_OST_EC_OSPM_INSERTION 0x200 |
/* _OST General Processing Status Code */ |
#define ACPI_OST_SC_SUCCESS 0x0 |
#define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1 |
#define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2 |
/* _OST OS Shutdown Processing (0x100) Status Code */ |
#define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80 |
#define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81 |
#define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82 |
#define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83 |
/* _OST Ejection Request (0x3, 0x103) Status Code */ |
#define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80 |
#define ACPI_OST_SC_DEVICE_IN_USE 0x81 |
#define ACPI_OST_SC_DEVICE_BUSY 0x82 |
#define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83 |
#define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84 |
/* _OST Insertion Request (0x200) Status Code */ |
#define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80 |
#define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 |
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 |
extern void acpi_early_init(void); |
extern void acpi_subsystem_init(void); |
extern int acpi_nvs_register(__u64 start, __u64 size); |
extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), |
void *data); |
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, |
const struct device *dev); |
extern bool acpi_driver_match_device(struct device *dev, |
const struct device_driver *drv); |
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); |
int acpi_device_modalias(struct device *, char *, int); |
void acpi_walk_dep_device_list(acpi_handle handle); |
struct platform_device *acpi_create_platform_device(struct acpi_device *); |
#define ACPI_PTR(_ptr) (_ptr) |
#else /* !CONFIG_ACPI */ |
#define acpi_disabled 1 |
#define ACPI_COMPANION(dev) (NULL) |
#define ACPI_COMPANION_SET(dev, adev) do { } while (0) |
#define ACPI_HANDLE(dev) (NULL) |
#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), |
struct fwnode_handle; |
static inline bool is_acpi_node(struct fwnode_handle *fwnode) |
{ |
return false; |
} |
static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) |
{ |
return false; |
} |
static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) |
{ |
return NULL; |
} |
static inline bool is_acpi_data_node(struct fwnode_handle *fwnode) |
{ |
return false; |
} |
static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) |
{ |
return NULL; |
} |
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) |
{ |
return NULL; |
} |
static inline bool has_acpi_companion(struct device *dev) |
{ |
return false; |
} |
static inline void acpi_preset_companion(struct device *dev, |
struct acpi_device *parent, u64 addr) |
{ |
} |
static inline const char *acpi_dev_name(struct acpi_device *adev) |
{ |
return NULL; |
} |
static inline void acpi_early_init(void) { } |
static inline void acpi_subsystem_init(void) { } |
static inline int early_acpi_boot_init(void) |
{ |
return 0; |
} |
static inline int acpi_boot_init(void) |
{ |
return 0; |
} |
static inline void acpi_boot_table_init(void) |
{ |
return; |
} |
static inline int acpi_mps_check(void) |
{ |
return 0; |
} |
static inline int acpi_check_resource_conflict(struct resource *res) |
{ |
return 0; |
} |
static inline int acpi_check_region(resource_size_t start, resource_size_t n, |
const char *name) |
{ |
return 0; |
} |
struct acpi_table_header; |
static inline int acpi_table_parse(char *id, |
int (*handler)(struct acpi_table_header *)) |
{ |
return -ENODEV; |
} |
static inline int acpi_nvs_register(__u64 start, __u64 size) |
{ |
return 0; |
} |
static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), |
void *data) |
{ |
return 0; |
} |
struct acpi_device_id; |
static inline const struct acpi_device_id *acpi_match_device( |
const struct acpi_device_id *ids, const struct device *dev) |
{ |
return NULL; |
} |
static inline bool acpi_driver_match_device(struct device *dev, |
const struct device_driver *drv) |
{ |
return false; |
} |
static inline int acpi_device_uevent_modalias(struct device *dev, |
struct kobj_uevent_env *env) |
{ |
return -ENODEV; |
} |
static inline int acpi_device_modalias(struct device *dev, |
char *buf, int size) |
{ |
return -ENODEV; |
} |
static inline bool acpi_dma_supported(struct acpi_device *adev) |
{ |
return false; |
} |
static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) |
{ |
return DEV_DMA_NOT_SUPPORTED; |
} |
#define ACPI_PTR(_ptr) (NULL) |
#endif /* !CONFIG_ACPI */ |
#ifdef CONFIG_ACPI |
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, |
u32 pm1a_ctrl, u32 pm1b_ctrl)); |
acpi_status acpi_os_prepare_sleep(u8 sleep_state, |
u32 pm1a_control, u32 pm1b_control); |
void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, |
u32 val_a, u32 val_b)); |
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, |
u32 val_a, u32 val_b); |
#ifdef CONFIG_X86 |
void arch_reserve_mem_area(acpi_physical_address addr, size_t size); |
#else |
static inline void arch_reserve_mem_area(acpi_physical_address addr, |
size_t size) |
{ |
} |
#endif /* CONFIG_X86 */ |
#else |
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) |
#endif |
#if defined(CONFIG_ACPI) && defined(CONFIG_PM) |
int acpi_dev_runtime_suspend(struct device *dev); |
int acpi_dev_runtime_resume(struct device *dev); |
int acpi_subsys_runtime_suspend(struct device *dev); |
int acpi_subsys_runtime_resume(struct device *dev); |
struct acpi_device *acpi_dev_pm_get_node(struct device *dev); |
int acpi_dev_pm_attach(struct device *dev, bool power_on); |
#else |
static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } |
static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } |
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } |
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } |
static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) |
{ |
return NULL; |
} |
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) |
{ |
return -ENODEV; |
} |
#endif |
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) |
int acpi_dev_suspend_late(struct device *dev); |
int acpi_dev_resume_early(struct device *dev); |
int acpi_subsys_prepare(struct device *dev); |
void acpi_subsys_complete(struct device *dev); |
int acpi_subsys_suspend_late(struct device *dev); |
int acpi_subsys_resume_early(struct device *dev); |
int acpi_subsys_suspend(struct device *dev); |
int acpi_subsys_freeze(struct device *dev); |
#else |
static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } |
static inline int acpi_dev_resume_early(struct device *dev) { return 0; } |
static inline int acpi_subsys_prepare(struct device *dev) { return 0; } |
static inline void acpi_subsys_complete(struct device *dev) {} |
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } |
static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } |
static inline int acpi_subsys_suspend(struct device *dev) { return 0; } |
static inline int acpi_subsys_freeze(struct device *dev) { return 0; } |
#endif |
#ifdef CONFIG_ACPI |
__printf(3, 4) |
void acpi_handle_printk(const char *level, acpi_handle handle, |
const char *fmt, ...); |
#else /* !CONFIG_ACPI */ |
static inline __printf(3, 4) void |
acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} |
#endif /* !CONFIG_ACPI */ |
#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) |
__printf(3, 4) |
void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); |
#else |
#define __acpi_handle_debug(descriptor, handle, fmt, ...) \ |
acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); |
#endif |
/* |
* acpi_handle_<level>: Print message with ACPI prefix and object path |
* |
* These interfaces acquire the global namespace mutex to obtain an object |
* path. In interrupt context, it shows the object path as <n/a>. |
*/ |
#define acpi_handle_emerg(handle, fmt, ...) \ |
acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) |
#define acpi_handle_alert(handle, fmt, ...) \ |
acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) |
#define acpi_handle_crit(handle, fmt, ...) \ |
acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) |
#define acpi_handle_err(handle, fmt, ...) \ |
acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) |
#define acpi_handle_warn(handle, fmt, ...) \ |
acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) |
#define acpi_handle_notice(handle, fmt, ...) \ |
acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) |
#define acpi_handle_info(handle, fmt, ...) \ |
acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) |
#if defined(DEBUG) |
#define acpi_handle_debug(handle, fmt, ...) \ |
acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) |
#else |
#if defined(CONFIG_DYNAMIC_DEBUG) |
#define acpi_handle_debug(handle, fmt, ...) \ |
do { \ |
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ |
__acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \ |
##__VA_ARGS__); \ |
} while (0) |
#else |
#define acpi_handle_debug(handle, fmt, ...) \ |
({ \ |
if (0) \ |
acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ |
0; \ |
}) |
#endif |
#endif |
struct acpi_gpio_params { |
unsigned int crs_entry_index; |
unsigned int line_index; |
bool active_low; |
}; |
struct acpi_gpio_mapping { |
const char *name; |
const struct acpi_gpio_params *data; |
unsigned int size; |
}; |
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) |
int acpi_dev_add_driver_gpios(struct acpi_device *adev, |
const struct acpi_gpio_mapping *gpios); |
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) |
{ |
if (adev) |
adev->driver_gpios = NULL; |
} |
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index); |
#else |
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, |
const struct acpi_gpio_mapping *gpios) |
{ |
return -ENXIO; |
} |
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} |
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) |
{ |
return -ENXIO; |
} |
#endif |
/* Device properties */ |
#define MAX_ACPI_REFERENCE_ARGS 8 |
struct acpi_reference_args { |
struct acpi_device *adev; |
size_t nargs; |
u64 args[MAX_ACPI_REFERENCE_ARGS]; |
}; |
#ifdef CONFIG_ACPI |
int acpi_dev_get_property(struct acpi_device *adev, const char *name, |
acpi_object_type type, const union acpi_object **obj); |
int acpi_node_get_property_reference(struct fwnode_handle *fwnode, |
const char *name, size_t index, |
struct acpi_reference_args *args); |
int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname, |
void **valptr); |
int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, |
enum dev_prop_type proptype, void *val); |
int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname, |
enum dev_prop_type proptype, void *val, size_t nval); |
int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, |
enum dev_prop_type proptype, void *val, size_t nval); |
struct fwnode_handle *acpi_get_next_subnode(struct device *dev, |
struct fwnode_handle *subnode); |
struct acpi_probe_entry; |
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, |
struct acpi_probe_entry *); |
#define ACPI_TABLE_ID_LEN 5 |
/** |
* struct acpi_probe_entry - boot-time probing entry |
* @id: ACPI table name |
* @type: Optional subtable type to match |
* (if @id contains subtables) |
* @subtable_valid: Optional callback to check the validity of |
* the subtable |
* @probe_table: Callback to the driver being probed when table |
* match is successful |
* @probe_subtbl: Callback to the driver being probed when table and |
* subtable match (and optional callback is successful) |
* @driver_data: Sideband data provided back to the driver |
*/ |
struct acpi_probe_entry { |
__u8 id[ACPI_TABLE_ID_LEN]; |
__u8 type; |
acpi_probe_entry_validate_subtbl subtable_valid; |
union { |
acpi_tbl_table_handler probe_table; |
acpi_tbl_entry_handler probe_subtbl; |
}; |
kernel_ulong_t driver_data; |
}; |
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ |
static const struct acpi_probe_entry __acpi_probe_##name \ |
__used __section(__##table##_acpi_probe_table) \ |
= { \ |
.id = table_id, \ |
.type = subtable, \ |
.subtable_valid = valid, \ |
.probe_table = (acpi_tbl_table_handler)fn, \ |
.driver_data = data, \ |
} |
#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table |
#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end |
int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); |
#define acpi_probe_device_table(t) \ |
({ \ |
extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ |
ACPI_PROBE_TABLE_END(t); \ |
__acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ |
(&ACPI_PROBE_TABLE_END(t) - \ |
&ACPI_PROBE_TABLE(t))); \ |
}) |
#else |
static inline int acpi_dev_get_property(struct acpi_device *adev, |
const char *name, acpi_object_type type, |
const union acpi_object **obj) |
{ |
return -ENXIO; |
} |
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, |
const char *name, size_t index, |
struct acpi_reference_args *args) |
{ |
return -ENXIO; |
} |
static inline int acpi_node_prop_get(struct fwnode_handle *fwnode, |
const char *propname, |
void **valptr) |
{ |
return -ENXIO; |
} |
static inline int acpi_dev_prop_get(struct acpi_device *adev, |
const char *propname, |
void **valptr) |
{ |
return -ENXIO; |
} |
static inline int acpi_dev_prop_read_single(struct acpi_device *adev, |
const char *propname, |
enum dev_prop_type proptype, |
void *val) |
{ |
return -ENXIO; |
} |
static inline int acpi_node_prop_read(struct fwnode_handle *fwnode, |
const char *propname, |
enum dev_prop_type proptype, |
void *val, size_t nval) |
{ |
return -ENXIO; |
} |
static inline int acpi_dev_prop_read(struct acpi_device *adev, |
const char *propname, |
enum dev_prop_type proptype, |
void *val, size_t nval) |
{ |
return -ENXIO; |
} |
static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, |
struct fwnode_handle *subnode) |
{ |
return NULL; |
} |
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ |
static const void * __acpi_table_##name[] \ |
__attribute__((unused)) \ |
= { (void *) table_id, \ |
(void *) subtable, \ |
(void *) valid, \ |
(void *) fn, \ |
(void *) data } |
#define acpi_probe_device_table(t) ({ int __r = 0; __r;}) |
#endif |
#endif /*_LINUX_ACPI_H*/ |
/drivers/include/linux/bug.h |
---|
3,6 → 3,8 |
#include <linux/compiler.h> |
int printf(const char *fmt, ...); |
#define __WARN() printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
//#define __WARN_printf(arg...) printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
#define __WARN_printf(arg...) do { printf(arg); __WARN(); } while (0) |
/drivers/include/linux/clocksource.h |
---|
0,0 → 1,257 |
/* linux/include/linux/clocksource.h |
* |
* This file contains the structure definitions for clocksources. |
* |
* If you are not a clocksource, or timekeeping code, you should |
* not be including this file! |
*/ |
#ifndef _LINUX_CLOCKSOURCE_H |
#define _LINUX_CLOCKSOURCE_H |
#include <linux/types.h> |
#include <linux/timex.h> |
#include <linux/time.h> |
#include <linux/list.h> |
#include <linux/cache.h> |
#include <linux/init.h> |
#include <asm/div64.h> |
#include <asm/io.h> |
struct clocksource; |
struct module; |
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA |
#include <asm/clocksource.h> |
#endif |
/** |
* struct clocksource - hardware abstraction for a free running counter |
* Provides mostly state-free accessors to the underlying hardware. |
* This is the structure used for system time. |
* |
* @name: ptr to clocksource name |
* @list: list head for registration |
* @rating: rating value for selection (higher is better) |
* To avoid rating inflation the following |
* list should give you a guide as to how |
* to assign your clocksource a rating |
* 1-99: Unfit for real use |
* Only available for bootup and testing purposes. |
* 100-199: Base level usability. |
* Functional for real use, but not desired. |
* 200-299: Good. |
* A correct and usable clocksource. |
* 300-399: Desired. |
* A reasonably fast and accurate clocksource. |
* 400-499: Perfect |
* The ideal clocksource. A must-use where |
* available. |
* @read: returns a cycle value, passes clocksource as argument |
* @enable: optional function to enable the clocksource |
* @disable: optional function to disable the clocksource |
* @mask: bitmask for two's complement |
* subtraction of non 64 bit counters |
* @mult: cycle to nanosecond multiplier |
* @shift: cycle to nanosecond divisor (power of two) |
* @max_idle_ns: max idle time permitted by the clocksource (nsecs) |
* @maxadj: maximum adjustment value to mult (~11%) |
* @max_cycles: maximum safe cycle value which won't overflow on multiplication |
* @flags: flags describing special properties |
* @archdata: arch-specific data |
* @suspend: suspend function for the clocksource, if necessary |
* @resume: resume function for the clocksource, if necessary |
* @owner: module reference, must be set by clocksource in modules |
*/ |
struct clocksource { |
/* |
* Hotpath data, fits in a single cache line when the |
* clocksource itself is cacheline aligned. |
*/ |
cycle_t (*read)(struct clocksource *cs); |
cycle_t mask; |
u32 mult; |
u32 shift; |
u64 max_idle_ns; |
u32 maxadj; |
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA |
struct arch_clocksource_data archdata; |
#endif |
u64 max_cycles; |
const char *name; |
struct list_head list; |
int rating; |
int (*enable)(struct clocksource *cs); |
void (*disable)(struct clocksource *cs); |
unsigned long flags; |
void (*suspend)(struct clocksource *cs); |
void (*resume)(struct clocksource *cs); |
/* private: */ |
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
/* Watchdog related data, used by the framework */ |
struct list_head wd_list; |
cycle_t cs_last; |
cycle_t wd_last; |
#endif |
struct module *owner; |
} ____cacheline_aligned; |
/* |
* Clock source flags bits:: |
*/ |
#define CLOCK_SOURCE_IS_CONTINUOUS 0x01 |
#define CLOCK_SOURCE_MUST_VERIFY 0x02 |
#define CLOCK_SOURCE_WATCHDOG 0x10 |
#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 |
#define CLOCK_SOURCE_UNSTABLE 0x40 |
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80 |
#define CLOCK_SOURCE_RESELECT 0x100 |
/* simplify initialization of mask field */ |
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
/** |
* clocksource_khz2mult - calculates mult from khz and shift |
* @khz: Clocksource frequency in KHz |
* @shift_constant: Clocksource shift factor |
* |
* Helper functions that converts a khz counter frequency to a timsource |
* multiplier, given the clocksource shift value |
*/ |
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) |
{ |
/* khz = cyc/(Million ns) |
* mult/2^shift = ns/cyc |
* mult = ns/cyc * 2^shift |
* mult = 1Million/khz * 2^shift |
* mult = 1000000 * 2^shift / khz |
* mult = (1000000<<shift) / khz |
*/ |
u64 tmp = ((u64)1000000) << shift_constant; |
tmp += khz/2; /* round for do_div */ |
do_div(tmp, khz); |
return (u32)tmp; |
} |
/** |
* clocksource_hz2mult - calculates mult from hz and shift |
* @hz: Clocksource frequency in Hz |
* @shift_constant: Clocksource shift factor |
* |
* Helper functions that converts a hz counter |
* frequency to a timsource multiplier, given the |
* clocksource shift value |
*/ |
static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) |
{ |
/* hz = cyc/(Billion ns) |
* mult/2^shift = ns/cyc |
* mult = ns/cyc * 2^shift |
* mult = 1Billion/hz * 2^shift |
* mult = 1000000000 * 2^shift / hz |
* mult = (1000000000<<shift) / hz |
*/ |
u64 tmp = ((u64)1000000000) << shift_constant; |
tmp += hz/2; /* round for do_div */ |
do_div(tmp, hz); |
return (u32)tmp; |
} |
/** |
* clocksource_cyc2ns - converts clocksource cycles to nanoseconds |
* @cycles: cycles |
* @mult: cycle to nanosecond multiplier |
* @shift: cycle to nanosecond divisor (power of two) |
* |
* Converts cycles to nanoseconds, using the given mult and shift. |
* |
* XXX - This could use some mult_lxl_ll() asm optimization |
*/ |
static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) |
{ |
return ((u64) cycles * mult) >> shift; |
} |
extern int clocksource_unregister(struct clocksource*); |
extern void clocksource_touch_watchdog(void); |
extern void clocksource_change_rating(struct clocksource *cs, int rating); |
extern void clocksource_suspend(void); |
extern void clocksource_resume(void); |
extern struct clocksource * __init clocksource_default_clock(void); |
extern void clocksource_mark_unstable(struct clocksource *cs); |
extern u64 |
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); |
extern void |
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); |
/* |
* Don't call __clocksource_register_scale directly, use |
* clocksource_register_hz/khz |
*/ |
extern int |
__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); |
extern void |
__clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq); |
/* |
* Don't call this unless you are a default clocksource |
* (AKA: jiffies) and absolutely have to. |
*/ |
static inline int __clocksource_register(struct clocksource *cs) |
{ |
return __clocksource_register_scale(cs, 1, 0); |
} |
static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) |
{ |
return __clocksource_register_scale(cs, 1, hz); |
} |
static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) |
{ |
return __clocksource_register_scale(cs, 1000, khz); |
} |
static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz) |
{ |
__clocksource_update_freq_scale(cs, 1, hz); |
} |
static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz) |
{ |
__clocksource_update_freq_scale(cs, 1000, khz); |
} |
extern int timekeeping_notify(struct clocksource *clock); |
extern cycle_t clocksource_mmio_readl_up(struct clocksource *); |
extern cycle_t clocksource_mmio_readl_down(struct clocksource *); |
extern cycle_t clocksource_mmio_readw_up(struct clocksource *); |
extern cycle_t clocksource_mmio_readw_down(struct clocksource *); |
extern int clocksource_mmio_init(void __iomem *, const char *, |
unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); |
extern int clocksource_i8253_init(void); |
#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \ |
OF_DECLARE_1(clksrc, name, compat, fn) |
#ifdef CONFIG_CLKSRC_PROBE |
extern void clocksource_probe(void); |
#else |
static inline void clocksource_probe(void) {} |
#endif |
#define CLOCKSOURCE_ACPI_DECLARE(name, table_id, fn) \ |
ACPI_DECLARE_PROBE_ENTRY(clksrc, name, table_id, 0, NULL, 0, fn) |
#endif /* _LINUX_CLOCKSOURCE_H */ |
/drivers/include/linux/compiler-gcc.h |
---|
199,7 → 199,7 |
#define unreachable() __builtin_unreachable() |
/* Mark a function definition as prohibited from being cloned. */ |
#define __noclone __attribute__((__noclone__)) |
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) |
#endif /* GCC_VERSION >= 40500 */ |
/drivers/include/linux/dma-mapping.h |
---|
5,6 → 5,7 |
#include <linux/string.h> |
#include <linux/device.h> |
#include <linux/err.h> |
#include <linux/dma-attrs.h> |
#include <linux/dma-direction.h> |
#include <linux/scatterlist.h> |
26,6 → 27,12 |
{ |
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; |
} |
#ifdef CONFIG_HAS_DMA |
#include <asm/dma-mapping.h> |
#else |
#include <asm-generic/dma-mapping-broken.h> |
#endif |
#ifndef dma_max_pfn |
static inline unsigned long dma_max_pfn(struct device *dev) |
{ |
/drivers/include/linux/dynamic_debug.h |
---|
0,0 → 1,137 |
#ifndef _DYNAMIC_DEBUG_H |
#define _DYNAMIC_DEBUG_H |
/* |
* An instance of this structure is created in a special |
* ELF section at every dynamic debug callsite. At runtime, |
* the special section is treated as an array of these. |
*/ |
struct _ddebug { |
/* |
* These fields are used to drive the user interface |
* for selecting and displaying debug callsites. |
*/ |
const char *modname; |
const char *function; |
const char *filename; |
const char *format; |
unsigned int lineno:18; |
/* |
* The flags field controls the behaviour at the callsite. |
* The bits here are changed dynamically when the user |
* writes commands to <debugfs>/dynamic_debug/control |
*/ |
#define _DPRINTK_FLAGS_NONE 0 |
#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ |
#define _DPRINTK_FLAGS_INCL_MODNAME (1<<1) |
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2) |
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3) |
#define _DPRINTK_FLAGS_INCL_TID (1<<4) |
#if defined DEBUG |
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT |
#else |
#define _DPRINTK_FLAGS_DEFAULT 0 |
#endif |
unsigned int flags:8; |
} __attribute__((aligned(8))); |
int ddebug_add_module(struct _ddebug *tab, unsigned int n, |
const char *modname); |
#if defined(CONFIG_DYNAMIC_DEBUG) |
extern int ddebug_remove_module(const char *mod_name); |
extern __printf(2, 3) |
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); |
extern int ddebug_dyndbg_module_param_cb(char *param, char *val, |
const char *modname); |
struct device; |
extern __printf(3, 4) |
void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, |
const char *fmt, ...); |
struct net_device; |
extern __printf(3, 4) |
void __dynamic_netdev_dbg(struct _ddebug *descriptor, |
const struct net_device *dev, |
const char *fmt, ...); |
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ |
static struct _ddebug __aligned(8) \ |
__attribute__((section("__verbose"))) name = { \ |
.modname = KBUILD_MODNAME, \ |
.function = __func__, \ |
.filename = __FILE__, \ |
.format = (fmt), \ |
.lineno = __LINE__, \ |
.flags = _DPRINTK_FLAGS_DEFAULT, \ |
} |
#define dynamic_pr_debug(fmt, ...) \ |
do { \ |
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ |
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ |
##__VA_ARGS__); \ |
} while (0) |
#define dynamic_dev_dbg(dev, fmt, ...) \ |
do { \ |
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ |
__dynamic_dev_dbg(&descriptor, dev, fmt, \ |
##__VA_ARGS__); \ |
} while (0) |
#define dynamic_netdev_dbg(dev, fmt, ...) \ |
do { \ |
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ |
__dynamic_netdev_dbg(&descriptor, dev, fmt, \ |
##__VA_ARGS__); \ |
} while (0) |
#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ |
groupsize, buf, len, ascii) \ |
do { \ |
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \ |
__builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\ |
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ |
print_hex_dump(KERN_DEBUG, prefix_str, \ |
prefix_type, rowsize, groupsize, \ |
buf, len, ascii); \ |
} while (0) |
#else |
#include <linux/string.h> |
#include <linux/errno.h> |
static inline int ddebug_remove_module(const char *mod) |
{ |
return 0; |
} |
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, |
const char *modname) |
{ |
if (strstr(param, "dyndbg")) { |
/* avoid pr_warn(), which wants pr_fmt() fully defined */ |
printk(KERN_WARNING "dyndbg param is supported only in " |
"CONFIG_DYNAMIC_DEBUG builds\n"); |
return 0; /* allow and ignore */ |
} |
return -EINVAL; |
} |
#define dynamic_pr_debug(fmt, ...) \ |
do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) |
#define dynamic_dev_dbg(dev, fmt, ...) \ |
do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) |
#endif |
#endif |
/drivers/include/linux/fwnode.h |
---|
0,0 → 1,29 |
/* |
* fwnode.h - Firmware device node object handle type definition. |
* |
* Copyright (C) 2015, Intel Corporation |
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License version 2 as |
* published by the Free Software Foundation. |
*/ |
#ifndef _LINUX_FWNODE_H_ |
#define _LINUX_FWNODE_H_ |
enum fwnode_type { |
FWNODE_INVALID = 0, |
FWNODE_OF, |
FWNODE_ACPI, |
FWNODE_ACPI_DATA, |
FWNODE_PDATA, |
FWNODE_IRQCHIP, |
}; |
struct fwnode_handle { |
enum fwnode_type type; |
struct fwnode_handle *secondary; |
}; |
#endif |
/drivers/include/linux/init.h |
---|
1,0 → 0,0 |
//stub |
#ifndef _LINUX_INIT_H |
#define _LINUX_INIT_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
#define __initdata |
#define __initdata |
#define __initconst |
#endif /* _LINUX_INIT_H */ |
/drivers/include/linux/nodemask.h |
---|
0,0 → 1,527 |
#ifndef __LINUX_NODEMASK_H |
#define __LINUX_NODEMASK_H |
/* |
* Nodemasks provide a bitmap suitable for representing the |
* set of Node's in a system, one bit position per Node number. |
* |
* See detailed comments in the file linux/bitmap.h describing the |
* data type on which these nodemasks are based. |
* |
* For details of nodemask_parse_user(), see bitmap_parse_user() in |
* lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), |
* also in bitmap.c. For details of node_remap(), see bitmap_bitremap in |
* lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in |
* lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in |
* lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in |
* lib/bitmap.c. |
* |
* The available nodemask operations are: |
* |
* void node_set(node, mask) turn on bit 'node' in mask |
* void node_clear(node, mask) turn off bit 'node' in mask |
* void nodes_setall(mask) set all bits |
* void nodes_clear(mask) clear all bits |
* int node_isset(node, mask) true iff bit 'node' set in mask |
* int node_test_and_set(node, mask) test and set bit 'node' in mask |
* |
* void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] |
* void nodes_or(dst, src1, src2) dst = src1 | src2 [union] |
* void nodes_xor(dst, src1, src2) dst = src1 ^ src2 |
* void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 |
* void nodes_complement(dst, src) dst = ~src |
* |
* int nodes_equal(mask1, mask2) Does mask1 == mask2? |
* int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? |
* int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? |
* int nodes_empty(mask) Is mask empty (no bits sets)? |
* int nodes_full(mask) Is mask full (all bits sets)? |
* int nodes_weight(mask) Hamming weight - number of set bits |
* |
* void nodes_shift_right(dst, src, n) Shift right |
* void nodes_shift_left(dst, src, n) Shift left |
* |
* int first_node(mask) Number lowest set bit, or MAX_NUMNODES |
* int next_node(node, mask) Next node past 'node', or MAX_NUMNODES |
* int first_unset_node(mask) First node not set in mask, or |
* MAX_NUMNODES. |
* |
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set |
* NODE_MASK_ALL Initializer - all bits set |
* NODE_MASK_NONE Initializer - no bits set |
* unsigned long *nodes_addr(mask) Array of unsigned long's in mask |
* |
* int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask |
* int nodelist_parse(buf, map) Parse ascii string as nodelist |
* int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) |
* void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) |
* void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap |
* void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz |
* |
* for_each_node_mask(node, mask) for-loop node over mask |
* |
* int num_online_nodes() Number of online Nodes |
* int num_possible_nodes() Number of all possible Nodes |
* |
* int node_random(mask) Random node with set bit in mask |
* |
* int node_online(node) Is some node online? |
* int node_possible(node) Is some node possible? |
* |
* node_set_online(node) set bit 'node' in node_online_map |
* node_set_offline(node) clear bit 'node' in node_online_map |
* |
* for_each_node(node) for-loop node over node_possible_map |
* for_each_online_node(node) for-loop node over node_online_map |
* |
* Subtlety: |
* 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) |
* to generate slightly worse code. So use a simple one-line #define |
* for node_isset(), instead of wrapping an inline inside a macro, the |
* way we do the other calls. |
* |
* NODEMASK_SCRATCH |
* When doing above logical AND, OR, XOR, Remap operations the callers tend to |
* need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, |
* nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper |
* for such situations. See below and CPUMASK_ALLOC also. |
*/ |
#include <linux/kernel.h> |
#include <linux/threads.h> |
#include <linux/bitmap.h> |
#include <linux/numa.h> |
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; |
extern nodemask_t _unused_nodemask_arg_; |
/** |
* nodemask_pr_args - printf args to output a nodemask |
* @maskp: nodemask to be printed |
* |
* Can be used to provide arguments for '%*pb[l]' when printing a nodemask. |
*/ |
#define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits |
/* |
* The inline keyword gives the compiler room to decide to inline, or |
* not inline a function as it sees best. However, as these functions |
* are called in both __init and non-__init functions, if they are not |
* inlined we will end up with a section mis-match error (of the type of |
* freeable items not being freed). So we must use __always_inline here |
* to fix the problem. If other functions in the future also end up in |
* this situation they will also need to be annotated as __always_inline |
*/ |
#define node_set(node, dst) __node_set((node), &(dst)) |
static __always_inline void __node_set(int node, volatile nodemask_t *dstp) |
{ |
set_bit(node, dstp->bits); |
} |
#define node_clear(node, dst) __node_clear((node), &(dst)) |
static inline void __node_clear(int node, volatile nodemask_t *dstp) |
{ |
clear_bit(node, dstp->bits); |
} |
#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) |
static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits) |
{ |
bitmap_fill(dstp->bits, nbits); |
} |
#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) |
static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) |
{ |
bitmap_zero(dstp->bits, nbits); |
} |
/* No static inline type checking - see Subtlety (1) above. */ |
#define node_isset(node, nodemask) test_bit((node), (nodemask).bits) |
#define node_test_and_set(node, nodemask) \ |
__node_test_and_set((node), &(nodemask)) |
static inline int __node_test_and_set(int node, nodemask_t *addr) |
{ |
return test_and_set_bit(node, addr->bits); |
} |
#define nodes_and(dst, src1, src2) \ |
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) |
static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, |
const nodemask_t *src2p, unsigned int nbits) |
{ |
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define nodes_or(dst, src1, src2) \ |
__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) |
static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, |
const nodemask_t *src2p, unsigned int nbits) |
{ |
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define nodes_xor(dst, src1, src2) \ |
__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) |
static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, |
const nodemask_t *src2p, unsigned int nbits) |
{ |
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define nodes_andnot(dst, src1, src2) \ |
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) |
static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, |
const nodemask_t *src2p, unsigned int nbits) |
{ |
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define nodes_complement(dst, src) \ |
__nodes_complement(&(dst), &(src), MAX_NUMNODES) |
static inline void __nodes_complement(nodemask_t *dstp, |
const nodemask_t *srcp, unsigned int nbits) |
{ |
bitmap_complement(dstp->bits, srcp->bits, nbits); |
} |
#define nodes_equal(src1, src2) \ |
__nodes_equal(&(src1), &(src2), MAX_NUMNODES) |
static inline int __nodes_equal(const nodemask_t *src1p, |
const nodemask_t *src2p, unsigned int nbits) |
{ |
return bitmap_equal(src1p->bits, src2p->bits, nbits); |
} |
#define nodes_intersects(src1, src2) \ |
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES) |
static inline int __nodes_intersects(const nodemask_t *src1p, |
const nodemask_t *src2p, unsigned int nbits) |
{ |
return bitmap_intersects(src1p->bits, src2p->bits, nbits); |
} |
#define nodes_subset(src1, src2) \ |
__nodes_subset(&(src1), &(src2), MAX_NUMNODES) |
static inline int __nodes_subset(const nodemask_t *src1p, |
const nodemask_t *src2p, unsigned int nbits) |
{ |
return bitmap_subset(src1p->bits, src2p->bits, nbits); |
} |
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) |
static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits) |
{ |
return bitmap_empty(srcp->bits, nbits); |
} |
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) |
static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits) |
{ |
return bitmap_full(srcp->bits, nbits); |
} |
#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) |
static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) |
{ |
return bitmap_weight(srcp->bits, nbits); |
} |
#define nodes_shift_right(dst, src, n) \ |
__nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES) |
static inline void __nodes_shift_right(nodemask_t *dstp, |
const nodemask_t *srcp, int n, int nbits) |
{ |
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); |
} |
#define nodes_shift_left(dst, src, n) \ |
__nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES) |
static inline void __nodes_shift_left(nodemask_t *dstp, |
const nodemask_t *srcp, int n, int nbits) |
{ |
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
} |
/* FIXME: better would be to fix all architectures to never return |
> MAX_NUMNODES, then the silly min_ts could be dropped. */ |
#define first_node(src) __first_node(&(src)) |
static inline int __first_node(const nodemask_t *srcp) |
{ |
return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); |
} |
#define next_node(n, src) __next_node((n), &(src)) |
static inline int __next_node(int n, const nodemask_t *srcp) |
{ |
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); |
} |
static inline void init_nodemask_of_node(nodemask_t *mask, int node) |
{ |
nodes_clear(*mask); |
node_set(node, *mask); |
} |
#define nodemask_of_node(node) \ |
({ \ |
typeof(_unused_nodemask_arg_) m; \ |
if (sizeof(m) == sizeof(unsigned long)) { \ |
m.bits[0] = 1UL << (node); \ |
} else { \ |
init_nodemask_of_node(&m, (node)); \ |
} \ |
m; \ |
}) |
#define first_unset_node(mask) __first_unset_node(&(mask)) |
static inline int __first_unset_node(const nodemask_t *maskp) |
{ |
return min_t(int,MAX_NUMNODES, |
find_first_zero_bit(maskp->bits, MAX_NUMNODES)); |
} |
#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) |
#if MAX_NUMNODES <= BITS_PER_LONG |
#define NODE_MASK_ALL \ |
((nodemask_t) { { \ |
[BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ |
} }) |
#else |
#define NODE_MASK_ALL \ |
((nodemask_t) { { \ |
[0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ |
[BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ |
} }) |
#endif |
#define NODE_MASK_NONE \ |
((nodemask_t) { { \ |
[0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ |
} }) |
#define nodes_addr(src) ((src).bits) |
#define nodemask_parse_user(ubuf, ulen, dst) \ |
__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) |
static inline int __nodemask_parse_user(const char __user *buf, int len, |
nodemask_t *dstp, int nbits) |
{ |
return bitmap_parse_user(buf, len, dstp->bits, nbits); |
} |
#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) |
static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) |
{ |
return bitmap_parselist(buf, dstp->bits, nbits); |
} |
#define node_remap(oldbit, old, new) \ |
__node_remap((oldbit), &(old), &(new), MAX_NUMNODES) |
static inline int __node_remap(int oldbit, |
const nodemask_t *oldp, const nodemask_t *newp, int nbits) |
{ |
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); |
} |
#define nodes_remap(dst, src, old, new) \ |
__nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) |
static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, |
const nodemask_t *oldp, const nodemask_t *newp, int nbits) |
{ |
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); |
} |
#define nodes_onto(dst, orig, relmap) \ |
__nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) |
static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, |
const nodemask_t *relmapp, int nbits) |
{ |
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); |
} |
#define nodes_fold(dst, orig, sz) \ |
__nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) |
static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, |
int sz, int nbits) |
{ |
bitmap_fold(dstp->bits, origp->bits, sz, nbits); |
} |
#if MAX_NUMNODES > 1 |
#define for_each_node_mask(node, mask) \ |
for ((node) = first_node(mask); \ |
(node) < MAX_NUMNODES; \ |
(node) = next_node((node), (mask))) |
#else /* MAX_NUMNODES == 1 */ |
#define for_each_node_mask(node, mask) \ |
if (!nodes_empty(mask)) \ |
for ((node) = 0; (node) < 1; (node)++) |
#endif /* MAX_NUMNODES */ |
/* |
* Bitmasks that are kept for all the nodes. |
*/ |
enum node_states { |
N_POSSIBLE, /* The node could become online at some point */ |
N_ONLINE, /* The node is online */ |
N_NORMAL_MEMORY, /* The node has regular memory */ |
#ifdef CONFIG_HIGHMEM |
N_HIGH_MEMORY, /* The node has regular or high memory */ |
#else |
N_HIGH_MEMORY = N_NORMAL_MEMORY, |
#endif |
#ifdef CONFIG_MOVABLE_NODE |
N_MEMORY, /* The node has memory(regular, high, movable) */ |
#else |
N_MEMORY = N_HIGH_MEMORY, |
#endif |
N_CPU, /* The node has one or more cpus */ |
NR_NODE_STATES |
}; |
/* |
* The following particular system nodemasks and operations |
* on them manage all possible and online nodes. |
*/ |
extern nodemask_t node_states[NR_NODE_STATES]; |
#if MAX_NUMNODES > 1 |
static inline int node_state(int node, enum node_states state) |
{ |
return node_isset(node, node_states[state]); |
} |
static inline void node_set_state(int node, enum node_states state) |
{ |
__node_set(node, &node_states[state]); |
} |
static inline void node_clear_state(int node, enum node_states state) |
{ |
__node_clear(node, &node_states[state]); |
} |
static inline int num_node_state(enum node_states state) |
{ |
return nodes_weight(node_states[state]); |
} |
#define for_each_node_state(__node, __state) \ |
for_each_node_mask((__node), node_states[__state]) |
#define first_online_node first_node(node_states[N_ONLINE]) |
#define first_memory_node first_node(node_states[N_MEMORY]) |
static inline int next_online_node(int nid) |
{ |
return next_node(nid, node_states[N_ONLINE]); |
} |
static inline int next_memory_node(int nid) |
{ |
return next_node(nid, node_states[N_MEMORY]); |
} |
extern int nr_node_ids; |
extern int nr_online_nodes; |
static inline void node_set_online(int nid) |
{ |
node_set_state(nid, N_ONLINE); |
nr_online_nodes = num_node_state(N_ONLINE); |
} |
static inline void node_set_offline(int nid) |
{ |
node_clear_state(nid, N_ONLINE); |
nr_online_nodes = num_node_state(N_ONLINE); |
} |
#else |
static inline int node_state(int node, enum node_states state) |
{ |
return node == 0; |
} |
static inline void node_set_state(int node, enum node_states state) |
{ |
} |
static inline void node_clear_state(int node, enum node_states state) |
{ |
} |
static inline int num_node_state(enum node_states state) |
{ |
return 1; |
} |
#define for_each_node_state(node, __state) \ |
for ( (node) = 0; (node) == 0; (node) = 1) |
#define first_online_node 0 |
#define first_memory_node 0 |
#define next_online_node(nid) (MAX_NUMNODES) |
#define nr_node_ids 1 |
#define nr_online_nodes 1 |
#define node_set_online(node) node_set_state((node), N_ONLINE) |
#define node_set_offline(node) node_clear_state((node), N_ONLINE) |
#endif |
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) |
extern int node_random(const nodemask_t *maskp); |
#else |
static inline int node_random(const nodemask_t *mask) |
{ |
return 0; |
} |
#endif |
#define node_online_map node_states[N_ONLINE] |
#define node_possible_map node_states[N_POSSIBLE] |
#define num_online_nodes() num_node_state(N_ONLINE) |
#define num_possible_nodes() num_node_state(N_POSSIBLE) |
#define node_online(node) node_state((node), N_ONLINE) |
#define node_possible(node) node_state((node), N_POSSIBLE) |
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE) |
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE) |
/* |
* For nodemask scrach area. |
* NODEMASK_ALLOC(type, name) allocates an object with a specified type and |
* name. |
*/ |
#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */ |
#define NODEMASK_ALLOC(type, name, gfp_flags) \ |
type *name = kmalloc(sizeof(*name), gfp_flags) |
#define NODEMASK_FREE(m) kfree(m) |
#else |
#define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name |
#define NODEMASK_FREE(m) do {} while (0) |
#endif |
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ |
struct nodemask_scratch { |
nodemask_t mask1; |
nodemask_t mask2; |
}; |
#define NODEMASK_SCRATCH(x) \ |
NODEMASK_ALLOC(struct nodemask_scratch, x, \ |
GFP_KERNEL | __GFP_NORETRY) |
#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) |
#endif /* __LINUX_NODEMASK_H */ |
/drivers/include/linux/numa.h |
---|
0,0 → 1,15 |
#ifndef _LINUX_NUMA_H |
#define _LINUX_NUMA_H |
#ifdef CONFIG_NODES_SHIFT |
#define NODES_SHIFT CONFIG_NODES_SHIFT |
#else |
#define NODES_SHIFT 0 |
#endif |
#define MAX_NUMNODES (1 << NODES_SHIFT) |
#define NUMA_NO_NODE (-1) |
#endif /* _LINUX_NUMA_H */ |
/drivers/include/linux/property.h |
---|
0,0 → 1,185 |
/* |
* property.h - Unified device property interface. |
* |
* Copyright (C) 2014, Intel Corporation |
* Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
* Mika Westerberg <mika.westerberg@linux.intel.com> |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License version 2 as |
* published by the Free Software Foundation. |
*/ |
#ifndef _LINUX_PROPERTY_H_ |
#define _LINUX_PROPERTY_H_ |
#include <linux/fwnode.h> |
#include <linux/types.h> |
struct device; |
enum dev_prop_type { |
DEV_PROP_U8, |
DEV_PROP_U16, |
DEV_PROP_U32, |
DEV_PROP_U64, |
DEV_PROP_STRING, |
DEV_PROP_MAX, |
}; |
enum dev_dma_attr { |
DEV_DMA_NOT_SUPPORTED, |
DEV_DMA_NON_COHERENT, |
DEV_DMA_COHERENT, |
}; |
bool device_property_present(struct device *dev, const char *propname); |
int device_property_read_u8_array(struct device *dev, const char *propname, |
u8 *val, size_t nval); |
int device_property_read_u16_array(struct device *dev, const char *propname, |
u16 *val, size_t nval); |
int device_property_read_u32_array(struct device *dev, const char *propname, |
u32 *val, size_t nval); |
int device_property_read_u64_array(struct device *dev, const char *propname, |
u64 *val, size_t nval); |
int device_property_read_string_array(struct device *dev, const char *propname, |
const char **val, size_t nval); |
int device_property_read_string(struct device *dev, const char *propname, |
const char **val); |
int device_property_match_string(struct device *dev, |
const char *propname, const char *string); |
bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); |
int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, |
const char *propname, u8 *val, |
size_t nval); |
int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, |
const char *propname, u16 *val, |
size_t nval); |
int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, |
const char *propname, u32 *val, |
size_t nval); |
int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, |
const char *propname, u64 *val, |
size_t nval); |
int fwnode_property_read_string_array(struct fwnode_handle *fwnode, |
const char *propname, const char **val, |
size_t nval); |
int fwnode_property_read_string(struct fwnode_handle *fwnode, |
const char *propname, const char **val); |
int fwnode_property_match_string(struct fwnode_handle *fwnode, |
const char *propname, const char *string); |
struct fwnode_handle *device_get_next_child_node(struct device *dev, |
struct fwnode_handle *child); |
#define device_for_each_child_node(dev, child) \ |
for (child = device_get_next_child_node(dev, NULL); child; \ |
child = device_get_next_child_node(dev, child)) |
void fwnode_handle_put(struct fwnode_handle *fwnode); |
unsigned int device_get_child_node_count(struct device *dev); |
static inline bool device_property_read_bool(struct device *dev, |
const char *propname) |
{ |
return device_property_present(dev, propname); |
} |
static inline int device_property_read_u8(struct device *dev, |
const char *propname, u8 *val) |
{ |
return device_property_read_u8_array(dev, propname, val, 1); |
} |
static inline int device_property_read_u16(struct device *dev, |
const char *propname, u16 *val) |
{ |
return device_property_read_u16_array(dev, propname, val, 1); |
} |
static inline int device_property_read_u32(struct device *dev, |
const char *propname, u32 *val) |
{ |
return device_property_read_u32_array(dev, propname, val, 1); |
} |
static inline int device_property_read_u64(struct device *dev, |
const char *propname, u64 *val) |
{ |
return device_property_read_u64_array(dev, propname, val, 1); |
} |
static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode, |
const char *propname) |
{ |
return fwnode_property_present(fwnode, propname); |
} |
static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode, |
const char *propname, u8 *val) |
{ |
return fwnode_property_read_u8_array(fwnode, propname, val, 1); |
} |
static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode, |
const char *propname, u16 *val) |
{ |
return fwnode_property_read_u16_array(fwnode, propname, val, 1); |
} |
static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode, |
const char *propname, u32 *val) |
{ |
return fwnode_property_read_u32_array(fwnode, propname, val, 1); |
} |
static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, |
const char *propname, u64 *val) |
{ |
return fwnode_property_read_u64_array(fwnode, propname, val, 1); |
} |
/** |
* struct property_entry - "Built-in" device property representation. |
* @name: Name of the property. |
* @type: Type of the property. |
* @nval: Number of items of type @type making up the value. |
* @value: Value of the property (an array of @nval items of type @type). |
*/ |
struct property_entry { |
const char *name; |
enum dev_prop_type type; |
size_t nval; |
union { |
void *raw_data; |
u8 *u8_data; |
u16 *u16_data; |
u32 *u32_data; |
u64 *u64_data; |
const char **str; |
} value; |
}; |
/** |
* struct property_set - Collection of "built-in" device properties. |
* @fwnode: Handle to be pointed to by the fwnode field of struct device. |
* @properties: Array of properties terminated with a null entry. |
*/ |
struct property_set { |
struct fwnode_handle fwnode; |
struct property_entry *properties; |
}; |
void device_add_property_set(struct device *dev, struct property_set *pset); |
bool device_dma_supported(struct device *dev); |
enum dev_dma_attr device_get_dma_attr(struct device *dev); |
int device_get_phy_mode(struct device *dev); |
void *device_get_mac_address(struct device *dev, char *addr, int alen); |
#endif /* _LINUX_PROPERTY_H_ */ |
/drivers/include/linux/rcupdate.h |
---|
217,6 → 217,7 |
void synchronize_sched(void); |
#define wait_rcu_gp(...) |
/** |
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period |
* @head: structure to be used for queueing the RCU updates. |
/drivers/include/linux/scatterlist.h |
---|
378,6 → 378,4 |
bool sg_miter_next(struct sg_mapping_iter *miter); |
void sg_miter_stop(struct sg_mapping_iter *miter); |
#define dma_unmap_sg(d, s, n, r) |
#endif /* _LINUX_SCATTERLIST_H */ |
/drivers/include/linux/timex.h |
---|
0,0 → 1,158 |
/***************************************************************************** |
* * |
* Copyright (c) David L. Mills 1993 * |
* * |
* Permission to use, copy, modify, and distribute this software and its * |
* documentation for any purpose and without fee is hereby granted, provided * |
* that the above copyright notice appears in all copies and that both the * |
* copyright notice and this permission notice appear in supporting * |
* documentation, and that the name University of Delaware not be used in * |
* advertising or publicity pertaining to distribution of the software * |
* without specific, written prior permission. The University of Delaware * |
* makes no representations about the suitability this software for any * |
* purpose. It is provided "as is" without express or implied warranty. * |
* * |
*****************************************************************************/ |
/* |
* Modification history timex.h |
* |
* 29 Dec 97 Russell King |
* Moved CLOCK_TICK_RATE, CLOCK_TICK_FACTOR and FINETUNE to asm/timex.h |
* for ARM machines |
* |
* 9 Jan 97 Adrian Sun |
* Shifted LATCH define to allow access to alpha machines. |
* |
* 26 Sep 94 David L. Mills |
* Added defines for hybrid phase/frequency-lock loop. |
* |
* 19 Mar 94 David L. Mills |
* Moved defines from kernel routines to header file and added new |
* defines for PPS phase-lock loop. |
* |
* 20 Feb 94 David L. Mills |
* Revised status codes and structures for external clock and PPS |
* signal discipline. |
* |
* 28 Nov 93 David L. Mills |
* Adjusted parameters to improve stability and increase poll |
* interval. |
* |
* 17 Sep 93 David L. Mills |
* Created file $NTP/include/sys/timex.h |
* 07 Oct 93 Torsten Duwe |
* Derived linux/timex.h |
* 1995-08-13 Torsten Duwe |
* kernel PLL updated to 1994-12-13 specs (rfc-1589) |
* 1997-08-30 Ulrich Windl |
* Added new constant NTP_PHASE_LIMIT |
* 2004-08-12 Christoph Lameter |
* Reworked time interpolation logic |
*/ |
#ifndef _LINUX_TIMEX_H |
#define _LINUX_TIMEX_H |
#define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */ |
#define ADJ_OFFSET_SINGLESHOT 0x0001 /* old-fashioned adjtime */ |
#define ADJ_OFFSET_READONLY 0x2000 /* read-only adjtime */ |
#include <linux/compiler.h> |
#include <linux/types.h> |
#ifndef random_get_entropy |
/* |
* The random_get_entropy() function is used by the /dev/random driver |
* in order to extract entropy via the relative unpredictability of |
* when an interrupt takes places versus a high speed, fine-grained |
* timing source or cycle counter. Since it will be occurred on every |
* single interrupt, it must have a very low cost/overhead. |
* |
* By default we use get_cycles() for this purpose, but individual |
* architectures may override this in their asm/timex.h header file. |
*/ |
#define random_get_entropy() get_cycles() |
#endif |
/* |
* SHIFT_PLL is used as a dampening factor to define how much we |
* adjust the frequency correction for a given offset in PLL mode. |
* It also used in dampening the offset correction, to define how |
* much of the current value in time_offset we correct for each |
* second. Changing this value changes the stiffness of the ntp |
* adjustment code. A lower value makes it more flexible, reducing |
* NTP convergence time. A higher value makes it stiffer, increasing |
* convergence time, but making the clock more stable. |
* |
* In David Mills' nanokernel reference implementation SHIFT_PLL is 4. |
* However this seems to increase convergence time much too long. |
* |
* https://lists.ntp.org/pipermail/hackers/2008-January/003487.html |
* |
* In the above mailing list discussion, it seems the value of 4 |
* was appropriate for other Unix systems with HZ=100, and that |
* SHIFT_PLL should be decreased as HZ increases. However, Linux's |
* clock steering implementation is HZ independent. |
* |
* Through experimentation, a SHIFT_PLL value of 2 was found to allow |
* for fast convergence (very similar to the NTPv3 code used prior to |
* v2.6.19), with good clock stability. |
* |
* |
* SHIFT_FLL is used as a dampening factor to define how much we |
* adjust the frequency correction for a given offset in FLL mode. |
* In David Mills' nanokernel reference implementation SHIFT_FLL is 2. |
* |
* MAXTC establishes the maximum time constant of the PLL. |
*/ |
#define SHIFT_PLL 2 /* PLL frequency factor (shift) */ |
#define SHIFT_FLL 2 /* FLL frequency factor (shift) */ |
#define MAXTC 10 /* maximum time constant (shift) */ |
/* |
* SHIFT_USEC defines the scaling (shift) of the time_freq and |
* time_tolerance variables, which represent the current frequency |
* offset and maximum frequency tolerance. |
*/ |
#define SHIFT_USEC 16 /* frequency offset scale (shift) */ |
#define PPM_SCALE ((s64)NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) |
#define PPM_SCALE_INV_SHIFT 19 |
#define PPM_SCALE_INV ((1LL << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ |
PPM_SCALE + 1) |
#define MAXPHASE 500000000L /* max phase error (ns) */ |
#define MAXFREQ 500000 /* max frequency error (ns/s) */ |
#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) |
#define MINSEC 256 /* min interval between updates (s) */ |
#define MAXSEC 2048 /* max interval between updates (s) */ |
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ |
/* |
* kernel variables |
* Note: maximum error = NTP synch distance = dispersion + delay / 2; |
* estimated error = NTP dispersion. |
*/ |
extern unsigned long tick_usec; /* USER_HZ period (usec) */ |
extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ |
/* Required to safely shift negative values */ |
#define shift_right(x, s) ({ \ |
__typeof__(x) __x = (x); \ |
__typeof__(s) __s = (s); \ |
__x < 0 ? -(-__x >> __s) : __x >> __s; \ |
}) |
#define NTP_SCALE_SHIFT 32 |
#define NTP_INTERVAL_FREQ (HZ) |
#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) |
int read_current_timer(unsigned long *timer_val); |
void ntp_notify_cmos_timer(void); |
/* The clock frequency of the i8253/i8254 PIT */ |
#define PIT_TICK_RATE 1193182ul |
#endif /* LINUX_TIMEX_H */ |