Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 7142 → Rev 7143

/drivers/ddk/linux/bitmap.c
7,12 → 7,16
*/
#include <syscall.h>
#include <linux/export.h>
//#include <linux/thread_info.h>
#include <linux/thread_info.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
 
#include <asm/page.h>
//#include <asm/uaccess.h>
 
/*
/drivers/ddk/linux/dmi_scan.c
0,0 → 1,1049
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/bootmem.h>
#include <linux/random.h>
#include <asm/dmi.h>
#include <asm/unaligned.h>
 
struct kobject *dmi_kobj;
EXPORT_SYMBOL_GPL(dmi_kobj);
 
/*
* DMI stands for "Desktop Management Interface". It is part
* of and an antecedent to, SMBIOS, which stands for System
* Management BIOS. See further: http://www.dmtf.org/standards
*/
static const char dmi_empty_string[] = " ";
 
static u32 dmi_ver __initdata;
static u32 dmi_len;
static u16 dmi_num;
static u8 smbios_entry_point[32];
static int smbios_entry_point_size;
 
/*
* Catch too early calls to dmi_check_system():
*/
static int dmi_initialized;
 
/* DMI system identification string used during boot */
static char dmi_ids_string[128] __initdata;
 
static struct dmi_memdev_info {
const char *device;
const char *bank;
u16 handle;
} *dmi_memdev;
static int dmi_memdev_nr;
 
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
 
if (s) {
s--;
while (s > 0 && *bp) {
bp += strlen(bp) + 1;
s--;
}
 
if (*bp != 0) {
size_t len = strlen(bp)+1;
size_t cmp_len = len > 8 ? 8 : len;
 
if (!memcmp(bp, dmi_empty_string, cmp_len))
return dmi_empty_string;
return bp;
}
}
 
return "";
}
 
static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
{
const char *bp = dmi_string_nosave(dm, s);
char *str;
size_t len;
 
if (bp == dmi_empty_string)
return dmi_empty_string;
 
len = strlen(bp) + 1;
str = dmi_alloc(len);
if (str != NULL)
strcpy(str, bp);
 
return str;
}
 
/*
* We have to be cautious here. We have seen BIOSes with DMI pointers
* pointing to completely the wrong place for example
*/
static void dmi_decode_table(u8 *buf,
void (*decode)(const struct dmi_header *, void *),
void *private_data)
{
u8 *data = buf;
int i = 0;
 
/*
* Stop when we have seen all the items the table claimed to have
* (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
* >= 3.0 only) OR we run off the end of the table (should never
* happen but sometimes does on bogus implementations.)
*/
while ((!dmi_num || i < dmi_num) &&
(data - buf + sizeof(struct dmi_header)) <= dmi_len) {
const struct dmi_header *dm = (const struct dmi_header *)data;
 
/*
* We want to know the total length (formatted area and
* strings) before decoding to make sure we won't run off the
* table in dmi_decode or dmi_string
*/
data += dm->length;
while ((data - buf < dmi_len - 1) && (data[0] || data[1]))
data++;
if (data - buf < dmi_len - 1)
decode(dm, private_data);
 
data += 2;
i++;
 
/*
* 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
* For tables behind a 64-bit entry point, we have no item
* count and no exact table length, so stop on end-of-table
* marker. For tables behind a 32-bit entry point, we have
* seen OEM structures behind the end-of-table marker on
* some systems, so don't trust it.
*/
if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
break;
}
 
/* Trim DMI table length if needed */
if (dmi_len > data - buf)
dmi_len = data - buf;
}
 
static phys_addr_t dmi_base;
 
static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
void *))
{
u8 *buf;
u32 orig_dmi_len = dmi_len;
 
buf = dmi_early_remap(dmi_base, orig_dmi_len);
if (buf == NULL)
return -1;
 
dmi_decode_table(buf, decode, NULL);
 
add_device_randomness(buf, dmi_len);
 
dmi_early_unmap(buf, orig_dmi_len);
return 0;
}
 
static int __init dmi_checksum(const u8 *buf, u8 len)
{
u8 sum = 0;
int a;
 
for (a = 0; a < len; a++)
sum += buf[a];
 
return sum == 0;
}
 
static const char *dmi_ident[DMI_STRING_MAX];
static LIST_HEAD(dmi_devices);
int dmi_available;
 
/*
* Save a DMI string
*/
static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
int string)
{
const char *d = (const char *) dm;
const char *p;
 
if (dmi_ident[slot])
return;
 
p = dmi_string(dm, d[string]);
if (p == NULL)
return;
 
dmi_ident[slot] = p;
}
 
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index)
{
const u8 *d = (u8 *) dm + index;
char *s;
int is_ff = 1, is_00 = 1, i;
 
if (dmi_ident[slot])
return;
 
for (i = 0; i < 16 && (is_ff || is_00); i++) {
if (d[i] != 0x00)
is_00 = 0;
if (d[i] != 0xFF)
is_ff = 0;
}
 
if (is_ff || is_00)
return;
 
s = dmi_alloc(16*2+4+1);
if (!s)
return;
 
/*
* As of version 2.6 of the SMBIOS specification, the first 3 fields of
* the UUID are supposed to be little-endian encoded. The specification
* says that this is the defacto standard.
*/
if (dmi_ver >= 0x020600)
sprintf(s, "%pUL", d);
else
sprintf(s, "%pUB", d);
 
dmi_ident[slot] = s;
}
 
static void __init dmi_save_type(const struct dmi_header *dm, int slot,
int index)
{
const u8 *d = (u8 *) dm + index;
char *s;
 
if (dmi_ident[slot])
return;
 
s = dmi_alloc(4);
if (!s)
return;
 
sprintf(s, "%u", *d & 0x7F);
dmi_ident[slot] = s;
}
 
static void __init dmi_save_one_device(int type, const char *name)
{
struct dmi_device *dev;
 
/* No duplicate device */
if (dmi_find_device(type, name, NULL))
return;
 
dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
if (!dev)
return;
 
dev->type = type;
strcpy((char *)(dev + 1), name);
dev->name = (char *)(dev + 1);
dev->device_data = NULL;
list_add(&dev->list, &dmi_devices);
}
 
static void __init dmi_save_devices(const struct dmi_header *dm)
{
int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
 
for (i = 0; i < count; i++) {
const char *d = (char *)(dm + 1) + (i * 2);
 
/* Skip disabled device */
if ((*d & 0x80) == 0)
continue;
 
dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1)));
}
}
 
static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
{
int i, count = *(u8 *)(dm + 1);
struct dmi_device *dev;
 
for (i = 1; i <= count; i++) {
const char *devname = dmi_string(dm, i);
 
if (devname == dmi_empty_string)
continue;
 
dev = dmi_alloc(sizeof(*dev));
if (!dev)
break;
 
dev->type = DMI_DEV_TYPE_OEM_STRING;
dev->name = devname;
dev->device_data = NULL;
 
list_add(&dev->list, &dmi_devices);
}
}
 
static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
{
struct dmi_device *dev;
void *data;
 
data = dmi_alloc(dm->length);
if (data == NULL)
return;
 
memcpy(data, dm, dm->length);
 
dev = dmi_alloc(sizeof(*dev));
if (!dev)
return;
 
dev->type = DMI_DEV_TYPE_IPMI;
dev->name = "IPMI controller";
dev->device_data = data;
 
list_add_tail(&dev->list, &dmi_devices);
}
 
static void __init dmi_save_dev_pciaddr(int instance, int segment, int bus,
int devfn, const char *name, int type)
{
struct dmi_dev_onboard *dev;
 
/* Ignore invalid values */
if (type == DMI_DEV_TYPE_DEV_SLOT &&
segment == 0xFFFF && bus == 0xFF && devfn == 0xFF)
return;
 
dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
if (!dev)
return;
 
dev->instance = instance;
dev->segment = segment;
dev->bus = bus;
dev->devfn = devfn;
 
strcpy((char *)&dev[1], name);
dev->dev.type = type;
dev->dev.name = (char *)&dev[1];
dev->dev.device_data = dev;
 
list_add(&dev->dev.list, &dmi_devices);
}
 
static void __init dmi_save_extended_devices(const struct dmi_header *dm)
{
const char *name;
const u8 *d = (u8 *)dm;
 
/* Skip disabled device */
if ((d[0x5] & 0x80) == 0)
return;
 
name = dmi_string_nosave(dm, d[0x4]);
dmi_save_dev_pciaddr(d[0x6], *(u16 *)(d + 0x7), d[0x9], d[0xA], name,
DMI_DEV_TYPE_DEV_ONBOARD);
dmi_save_one_device(d[0x5] & 0x7f, name);
}
 
static void __init dmi_save_system_slot(const struct dmi_header *dm)
{
const u8 *d = (u8 *)dm;
 
/* Need SMBIOS 2.6+ structure */
if (dm->length < 0x11)
return;
dmi_save_dev_pciaddr(*(u16 *)(d + 0x9), *(u16 *)(d + 0xD), d[0xF],
d[0x10], dmi_string_nosave(dm, d[0x4]),
DMI_DEV_TYPE_DEV_SLOT);
}
 
static void __init count_mem_devices(const struct dmi_header *dm, void *v)
{
if (dm->type != DMI_ENTRY_MEM_DEVICE)
return;
dmi_memdev_nr++;
}
 
static void __init save_mem_devices(const struct dmi_header *dm, void *v)
{
const char *d = (const char *)dm;
static int nr;
 
if (dm->type != DMI_ENTRY_MEM_DEVICE)
return;
if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
return;
}
dmi_memdev[nr].handle = get_unaligned(&dm->handle);
dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
nr++;
}
 
void __init dmi_memdev_walk(void)
{
if (!dmi_available)
return;
 
if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
if (dmi_memdev)
dmi_walk_early(save_mem_devices);
}
}
 
/*
* Process a DMI table entry. Right now all we care about are the BIOS
* and machine entries. For 2.5 we should pull the smbus controller info
* out of here.
*/
static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0: /* BIOS Information */
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
dmi_save_ident(dm, DMI_BIOS_DATE, 8);
break;
case 1: /* System Information */
dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
break;
case 2: /* Base Board Information */
dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
dmi_save_ident(dm, DMI_BOARD_NAME, 5);
dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
dmi_save_ident(dm, DMI_BOARD_SERIAL, 7);
dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8);
break;
case 3: /* Chassis Information */
dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4);
dmi_save_type(dm, DMI_CHASSIS_TYPE, 5);
dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6);
dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7);
dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8);
break;
case 9: /* System Slots */
dmi_save_system_slot(dm);
break;
case 10: /* Onboard Devices Information */
dmi_save_devices(dm);
break;
case 11: /* OEM Strings */
dmi_save_oem_strings_devices(dm);
break;
case 38: /* IPMI Device Information */
dmi_save_ipmi_device(dm);
break;
case 41: /* Onboard Devices Extended Information */
dmi_save_extended_devices(dm);
}
}
 
static int __init print_filtered(char *buf, size_t len, const char *info)
{
int c = 0;
const char *p;
 
if (!info)
return c;
 
for (p = info; *p; p++)
if (isprint(*p))
c += scnprintf(buf + c, len - c, "%c", *p);
else
c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff);
return c;
}
 
static void __init dmi_format_ids(char *buf, size_t len)
{
int c = 0;
const char *board; /* Board Name is optional */
 
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_SYS_VENDOR));
c += scnprintf(buf + c, len - c, " ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_PRODUCT_NAME));
 
board = dmi_get_system_info(DMI_BOARD_NAME);
if (board) {
c += scnprintf(buf + c, len - c, "/");
c += print_filtered(buf + c, len - c, board);
}
c += scnprintf(buf + c, len - c, ", BIOS ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_BIOS_VERSION));
c += scnprintf(buf + c, len - c, " ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_BIOS_DATE));
}
 
/*
* Check for DMI/SMBIOS headers in the system firmware image. Any
* SMBIOS header must start 16 bytes before the DMI header, so take a
* 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
* 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
* takes precedence) and return 0. Otherwise return 1.
*/
static int __init dmi_present(const u8 *buf)
{
u32 smbios_ver;
 
if (memcmp(buf, "_SM_", 4) == 0 &&
buf[5] < 32 && dmi_checksum(buf, buf[5])) {
smbios_ver = get_unaligned_be16(buf + 6);
smbios_entry_point_size = buf[5];
memcpy(smbios_entry_point, buf, smbios_entry_point_size);
 
/* Some BIOS report weird SMBIOS version, fix that up */
switch (smbios_ver) {
case 0x021F:
case 0x0221:
pr_debug("SMBIOS version fixup (2.%d->2.%d)\n",
smbios_ver & 0xFF, 3);
smbios_ver = 0x0203;
break;
case 0x0233:
pr_debug("SMBIOS version fixup (2.%d->2.%d)\n", 51, 6);
smbios_ver = 0x0206;
break;
}
} else {
smbios_ver = 0;
}
 
buf += 16;
 
if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
if (smbios_ver)
dmi_ver = smbios_ver;
else
dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
dmi_ver <<= 8;
dmi_num = get_unaligned_le16(buf + 12);
dmi_len = get_unaligned_le16(buf + 6);
dmi_base = get_unaligned_le32(buf + 8);
 
if (dmi_walk_early(dmi_decode) == 0) {
if (smbios_ver) {
pr_info("SMBIOS %d.%d present.\n",
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
} else {
smbios_entry_point_size = 15;
memcpy(smbios_entry_point, buf,
smbios_entry_point_size);
pr_info("Legacy DMI %d.%d present.\n",
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
}
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
return 0;
}
}
 
return 1;
}
 
/*
* Check for the SMBIOS 3.0 64-bit entry point signature. Unlike the legacy
* 32-bit entry point, there is no embedded DMI header (_DMI_) in here.
*/
static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
smbios_entry_point_size = buf[6];
memcpy(smbios_entry_point, buf, smbios_entry_point_size);
 
if (dmi_walk_early(dmi_decode) == 0) {
pr_info("SMBIOS %d.%d.%d present.\n",
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
dmi_ver & 0xFF);
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
pr_debug("DMI: %s\n", dmi_ids_string);
return 0;
}
}
return 1;
}
 
void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
char buf[32];
 
if (efi_enabled(EFI_CONFIG_TABLES)) {
/*
* According to the DMTF SMBIOS reference spec v3.0.0, it is
* allowed to define both the 64-bit entry point (smbios3) and
* the 32-bit entry point (smbios), in which case they should
* either both point to the same SMBIOS structure table, or the
* table pointed to by the 64-bit entry point should contain a
* superset of the table contents pointed to by the 32-bit entry
* point (section 5.2)
* This implies that the 64-bit entry point should have
* precedence if it is defined and supported by the OS. If we
* have the 64-bit entry point, but fail to decode it, fall
* back to the legacy one (if available)
*/
if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) {
p = dmi_early_remap(efi.smbios3, 32);
if (p == NULL)
goto error;
memcpy_fromio(buf, p, 32);
dmi_early_unmap(p, 32);
 
if (!dmi_smbios3_present(buf)) {
dmi_available = 1;
goto out;
}
}
if (efi.smbios == EFI_INVALID_TABLE_ADDR)
goto error;
 
/* This is called as a core_initcall() because it isn't
* needed during early boot. This also means we can
* iounmap the space when we're done with it.
*/
p = dmi_early_remap(efi.smbios, 32);
if (p == NULL)
goto error;
memcpy_fromio(buf, p, 32);
dmi_early_unmap(p, 32);
 
if (!dmi_present(buf)) {
dmi_available = 1;
goto out;
}
} else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
p = dmi_early_remap(0xF0000, 0x10000);
if (p == NULL)
goto error;
 
/*
* Iterate over all possible DMI header addresses q.
* Maintain the 32 bytes around q in buf. On the
* first iteration, substitute zero for the
* out-of-range bytes so there is no chance of falsely
* detecting an SMBIOS header.
*/
memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
if (!dmi_smbios3_present(buf) || !dmi_present(buf)) {
dmi_available = 1;
dmi_early_unmap(p, 0x10000);
goto out;
}
memcpy(buf, buf + 16, 16);
}
dmi_early_unmap(p, 0x10000);
}
error:
pr_info("DMI not present or invalid.\n");
out:
dmi_initialized = 1;
}
 
static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
memcpy(buf, attr->private + pos, count);
return count;
}
 
static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0);
static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0);
 
static int __init dmi_init(void)
{
struct kobject *tables_kobj;
u8 *dmi_table;
int ret = -ENOMEM;
 
if (!dmi_available) {
ret = -ENODATA;
goto err;
}
 
/*
* Set up dmi directory at /sys/firmware/dmi. This entry should stay
* even after farther error, as it can be used by other modules like
* dmi-sysfs.
*/
dmi_kobj = kobject_create_and_add("dmi", firmware_kobj);
if (!dmi_kobj)
goto err;
 
tables_kobj = kobject_create_and_add("tables", dmi_kobj);
if (!tables_kobj)
goto err;
 
dmi_table = dmi_remap(dmi_base, dmi_len);
if (!dmi_table)
goto err_tables;
 
bin_attr_smbios_entry_point.size = smbios_entry_point_size;
bin_attr_smbios_entry_point.private = smbios_entry_point;
ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point);
if (ret)
goto err_unmap;
 
bin_attr_DMI.size = dmi_len;
bin_attr_DMI.private = dmi_table;
ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI);
if (!ret)
return 0;
 
sysfs_remove_bin_file(tables_kobj,
&bin_attr_smbios_entry_point);
err_unmap:
dmi_unmap(dmi_table);
err_tables:
kobject_del(tables_kobj);
kobject_put(tables_kobj);
err:
pr_err("dmi: Firmware registration failed.\n");
 
return ret;
}
subsys_initcall(dmi_init);
 
/**
* dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
*
* Invoke dump_stack_set_arch_desc() with DMI system information so that
* DMI identifiers are printed out on task dumps. Arch boot code should
* call this function after dmi_scan_machine() if it wants to print out DMI
* identifiers on task dumps.
*/
void __init dmi_set_dump_stack_arch_desc(void)
{
dump_stack_set_arch_desc("%s", dmi_ids_string);
}
 
/**
* dmi_matches - check if dmi_system_id structure matches system DMI data
* @dmi: pointer to the dmi_system_id structure to check
*/
static bool dmi_matches(const struct dmi_system_id *dmi)
{
int i;
 
WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n");
 
for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
int s = dmi->matches[i].slot;
if (s == DMI_NONE)
break;
if (dmi_ident[s]) {
if (!dmi->matches[i].exact_match &&
strstr(dmi_ident[s], dmi->matches[i].substr))
continue;
else if (dmi->matches[i].exact_match &&
!strcmp(dmi_ident[s], dmi->matches[i].substr))
continue;
}
 
/* No match */
return false;
}
return true;
}
 
/**
* dmi_is_end_of_table - check for end-of-table marker
* @dmi: pointer to the dmi_system_id structure to check
*/
static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
{
return dmi->matches[0].slot == DMI_NONE;
}
 
/**
* dmi_check_system - check system DMI data
* @list: array of dmi_system_id structures to match against
* All non-null elements of the list must match
* their slot's (field index's) data (i.e., each
* list string must be a substring of the specified
* DMI slot's string data) to be considered a
* successful match.
*
* Walk the blacklist table running matching functions until someone
* returns non zero or we hit the end. Callback function is called for
* each successful match. Returns the number of matches.
*/
int dmi_check_system(const struct dmi_system_id *list)
{
int count = 0;
const struct dmi_system_id *d;
 
for (d = list; !dmi_is_end_of_table(d); d++)
if (dmi_matches(d)) {
count++;
if (d->callback && d->callback(d))
break;
}
 
return count;
}
EXPORT_SYMBOL(dmi_check_system);
 
/**
* dmi_first_match - find dmi_system_id structure matching system DMI data
* @list: array of dmi_system_id structures to match against
* All non-null elements of the list must match
* their slot's (field index's) data (i.e., each
* list string must be a substring of the specified
* DMI slot's string data) to be considered a
* successful match.
*
* Walk the blacklist table until the first match is found. Return the
* pointer to the matching entry or NULL if there's no match.
*/
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
{
const struct dmi_system_id *d;
 
for (d = list; !dmi_is_end_of_table(d); d++)
if (dmi_matches(d))
return d;
 
return NULL;
}
EXPORT_SYMBOL(dmi_first_match);
 
/**
* dmi_get_system_info - return DMI data value
* @field: data index (see enum dmi_field)
*
* Returns one DMI data value, can be used to perform
* complex DMI data checks.
*/
const char *dmi_get_system_info(int field)
{
return dmi_ident[field];
}
EXPORT_SYMBOL(dmi_get_system_info);
 
/**
* dmi_name_in_serial - Check if string is in the DMI product serial information
* @str: string to check for
*/
int dmi_name_in_serial(const char *str)
{
int f = DMI_PRODUCT_SERIAL;
if (dmi_ident[f] && strstr(dmi_ident[f], str))
return 1;
return 0;
}
 
/**
* dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
* @str: Case sensitive Name
*/
int dmi_name_in_vendors(const char *str)
{
static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
int i;
for (i = 0; fields[i] != DMI_NONE; i++) {
int f = fields[i];
if (dmi_ident[f] && strstr(dmi_ident[f], str))
return 1;
}
return 0;
}
EXPORT_SYMBOL(dmi_name_in_vendors);
 
/**
* dmi_find_device - find onboard device by type/name
* @type: device type or %DMI_DEV_TYPE_ANY to match all device types
* @name: device name string or %NULL to match all
* @from: previous device found in search, or %NULL for new search.
*
* Iterates through the list of known onboard devices. If a device is
* found with a matching @type and @name, a pointer to its device
* structure is returned. Otherwise, %NULL is returned.
* A new search is initiated by passing %NULL as the @from argument.
* If @from is not %NULL, searches continue from next device.
*/
const struct dmi_device *dmi_find_device(int type, const char *name,
const struct dmi_device *from)
{
const struct list_head *head = from ? &from->list : &dmi_devices;
struct list_head *d;
 
for (d = head->next; d != &dmi_devices; d = d->next) {
const struct dmi_device *dev =
list_entry(d, struct dmi_device, list);
 
if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
((name == NULL) || (strcmp(dev->name, name) == 0)))
return dev;
}
 
return NULL;
}
EXPORT_SYMBOL(dmi_find_device);
 
/**
* dmi_get_date - parse a DMI date
* @field: data index (see enum dmi_field)
* @yearp: optional out parameter for the year
* @monthp: optional out parameter for the month
* @dayp: optional out parameter for the day
*
* The date field is assumed to be in the form resembling
* [mm[/dd]]/yy[yy] and the result is stored in the out
* parameters any or all of which can be omitted.
*
* If the field doesn't exist, all out parameters are set to zero
* and false is returned. Otherwise, true is returned with any
* invalid part of date set to zero.
*
* On return, year, month and day are guaranteed to be in the
* range of [0,9999], [0,12] and [0,31] respectively.
*/
bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
int year = 0, month = 0, day = 0;
bool exists;
const char *s, *y;
char *e;
 
s = dmi_get_system_info(field);
exists = s;
if (!exists)
goto out;
 
/*
* Determine year first. We assume the date string resembles
* mm/dd/yy[yy] but the original code extracted only the year
* from the end. Keep the behavior in the spirit of no
* surprises.
*/
y = strrchr(s, '/');
if (!y)
goto out;
 
y++;
year = simple_strtoul(y, &e, 10);
if (y != e && year < 100) { /* 2-digit year */
year += 1900;
if (year < 1996) /* no dates < spec 1.0 */
year += 100;
}
if (year > 9999) /* year should fit in %04d */
year = 0;
 
/* parse the mm and dd */
month = simple_strtoul(s, &e, 10);
if (s == e || *e != '/' || !month || month > 12) {
month = 0;
goto out;
}
 
s = e + 1;
day = simple_strtoul(s, &e, 10);
if (s == y || s == e || *e != '/' || day > 31)
day = 0;
out:
if (yearp)
*yearp = year;
if (monthp)
*monthp = month;
if (dayp)
*dayp = day;
return exists;
}
EXPORT_SYMBOL(dmi_get_date);
 
/**
* dmi_walk - Walk the DMI table and get called back for every record
* @decode: Callback function
* @private_data: Private data to be passed to the callback function
*
* Returns -1 when the DMI table can't be reached, 0 on success.
*/
int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data)
{
u8 *buf;
 
if (!dmi_available)
return -1;
 
buf = dmi_remap(dmi_base, dmi_len);
if (buf == NULL)
return -1;
 
dmi_decode_table(buf, decode, private_data);
 
dmi_unmap(buf);
return 0;
}
EXPORT_SYMBOL_GPL(dmi_walk);
 
/**
* dmi_match - compare a string to the dmi field (if exists)
* @f: DMI field identifier
* @str: string to compare the DMI field to
*
* Returns true if the requested field equals to the str (including NULL).
*/
bool dmi_match(enum dmi_field f, const char *str)
{
const char *info = dmi_get_system_info(f);
 
if (info == NULL || str == NULL)
return info == str;
 
return !strcmp(info, str);
}
EXPORT_SYMBOL_GPL(dmi_match);
 
void dmi_memdev_name(u16 handle, const char **bank, const char **device)
{
int n;
 
if (dmi_memdev == NULL)
return;
 
for (n = 0; n < dmi_memdev_nr; n++) {
if (handle == dmi_memdev[n].handle) {
*bank = dmi_memdev[n].bank;
*device = dmi_memdev[n].device;
break;
}
}
}
EXPORT_SYMBOL_GPL(dmi_memdev_name);
/drivers/ddk/linux/find_bit.c
0,0 → 1,193
/* bit search implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* Copyright (C) 2008 IBM Corporation
* 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
* (Inspired by David Howell's find_next_bit implementation)
*
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
* size and improve performance, 2015.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
 
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/kernel.h>
 
#if !defined(find_next_bit) || !defined(find_next_zero_bit)
 
/*
* This is a common helper function for find_next_bit and
* find_next_zero_bit. The difference is the "invert" argument, which
* is XORed with each fetched word before searching it for one bits.
*/
static unsigned long _find_next_bit(const unsigned long *addr,
unsigned long nbits, unsigned long start, unsigned long invert)
{
unsigned long tmp;
 
if (!nbits || start >= nbits)
return nbits;
 
tmp = addr[start / BITS_PER_LONG] ^ invert;
 
/* Handle 1st word. */
tmp &= BITMAP_FIRST_WORD_MASK(start);
start = round_down(start, BITS_PER_LONG);
 
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
 
tmp = addr[start / BITS_PER_LONG] ^ invert;
}
 
return min(start + __ffs(tmp), nbits);
}
#endif
 
#ifndef find_next_bit
/*
* Find the next set bit in a memory region.
*/
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, size, offset, 0UL);
}
EXPORT_SYMBOL(find_next_bit);
#endif
 
#ifndef find_next_zero_bit
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
return _find_next_bit(addr, size, offset, ~0UL);
}
EXPORT_SYMBOL(find_next_zero_bit);
#endif
 
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
 
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
if (addr[idx])
return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
}
 
return size;
}
EXPORT_SYMBOL(find_first_bit);
#endif
 
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
*/
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
unsigned long idx;
 
for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
if (addr[idx] != ~0UL)
return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
}
 
return size;
}
EXPORT_SYMBOL(find_first_zero_bit);
#endif
 
#ifndef find_last_bit
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (size) {
unsigned long val = BITMAP_LAST_WORD_MASK(size);
unsigned long idx = (size-1) / BITS_PER_LONG;
 
do {
val &= addr[idx];
if (val)
return idx * BITS_PER_LONG + __fls(val);
 
val = ~0ul;
} while (idx--);
}
return size;
}
EXPORT_SYMBOL(find_last_bit);
#endif
 
#ifdef __BIG_ENDIAN
 
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swab(const unsigned long y)
{
#if BITS_PER_LONG == 64
return (unsigned long) __swab64((u64) y);
#elif BITS_PER_LONG == 32
return (unsigned long) __swab32((u32) y);
#else
#error BITS_PER_LONG not defined
#endif
}
 
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
static unsigned long _find_next_bit_le(const unsigned long *addr,
unsigned long nbits, unsigned long start, unsigned long invert)
{
unsigned long tmp;
 
if (!nbits || start >= nbits)
return nbits;
 
tmp = addr[start / BITS_PER_LONG] ^ invert;
 
/* Handle 1st word. */
tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
start = round_down(start, BITS_PER_LONG);
 
while (!tmp) {
start += BITS_PER_LONG;
if (start >= nbits)
return nbits;
 
tmp = addr[start / BITS_PER_LONG] ^ invert;
}
 
return min(start + __ffs(ext2_swab(tmp)), nbits);
}
#endif
 
#ifndef find_next_zero_bit_le
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
return _find_next_bit_le(addr, size, offset, ~0UL);
}
EXPORT_SYMBOL(find_next_zero_bit_le);
#endif
 
#ifndef find_next_bit_le
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
return _find_next_bit_le(addr, size, offset, 0UL);
}
EXPORT_SYMBOL(find_next_bit_le);
#endif
 
#endif /* __BIG_ENDIAN */
/drivers/ddk/linux/list_sort.c
145,3 → 145,149
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
}
EXPORT_SYMBOL(list_sort);
 
#ifdef CONFIG_TEST_LIST_SORT
 
#include <linux/slab.h>
#include <linux/random.h>
 
/*
* The pattern of set bits in the list length determines which cases
* are hit in list_sort().
*/
#define TEST_LIST_LEN (512+128+2) /* not including head */
 
#define TEST_POISON1 0xDEADBEEF
#define TEST_POISON2 0xA324354C
 
struct debug_el {
unsigned int poison1;
struct list_head list;
unsigned int poison2;
int value;
unsigned serial;
};
 
/* Array, containing pointers to all elements in the test list */
static struct debug_el **elts __initdata;
 
static int __init check(struct debug_el *ela, struct debug_el *elb)
{
if (ela->serial >= TEST_LIST_LEN) {
pr_err("error: incorrect serial %d\n", ela->serial);
return -EINVAL;
}
if (elb->serial >= TEST_LIST_LEN) {
pr_err("error: incorrect serial %d\n", elb->serial);
return -EINVAL;
}
if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
pr_err("error: phantom element\n");
return -EINVAL;
}
if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
pr_err("error: bad poison: %#x/%#x\n",
ela->poison1, ela->poison2);
return -EINVAL;
}
if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
pr_err("error: bad poison: %#x/%#x\n",
elb->poison1, elb->poison2);
return -EINVAL;
}
return 0;
}
 
static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct debug_el *ela, *elb;
 
ela = container_of(a, struct debug_el, list);
elb = container_of(b, struct debug_el, list);
 
check(ela, elb);
return ela->value - elb->value;
}
 
static int __init list_sort_test(void)
{
int i, count = 1, err = -ENOMEM;
struct debug_el *el;
struct list_head *cur;
LIST_HEAD(head);
 
pr_debug("start testing list_sort()\n");
 
elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
if (!elts) {
pr_err("error: cannot allocate memory\n");
return err;
}
 
for (i = 0; i < TEST_LIST_LEN; i++) {
el = kmalloc(sizeof(*el), GFP_KERNEL);
if (!el) {
pr_err("error: cannot allocate memory\n");
goto exit;
}
/* force some equivalencies */
el->value = prandom_u32() % (TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
elts[i] = el;
list_add_tail(&el->list, &head);
}
 
list_sort(NULL, &head, cmp);
 
err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
 
if (cur->next->prev != cur) {
pr_err("error: list is corrupted\n");
goto exit;
}
 
cmp_result = cmp(NULL, cur, cur->next);
if (cmp_result > 0) {
pr_err("error: list is not sorted\n");
goto exit;
}
 
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
if (cmp_result == 0 && el->serial >= el1->serial) {
pr_err("error: order of equivalent elements not "
"preserved\n");
goto exit;
}
 
if (check(el, el1)) {
pr_err("error: element check failed\n");
goto exit;
}
count++;
}
if (head.prev != cur) {
pr_err("error: list is corrupted\n");
goto exit;
}
 
 
if (count != TEST_LIST_LEN) {
pr_err("error: bad list length %d", count);
goto exit;
}
 
err = 0;
exit:
for (i = 0; i < TEST_LIST_LEN; i++)
kfree(elts[i]);
kfree(elts);
return err;
}
late_initcall(list_sort_test);
#endif /* CONFIG_TEST_LIST_SORT */
/drivers/ddk/linux/workqueue.c
117,7 → 117,7
queue_work(wq, &dwork->work);
}
 
int queue_delayed_work(struct workqueue_struct *wq,
bool queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct work_struct *work = &dwork->work;
138,12 → 138,12
return queue_delayed_work(system_wq, dwork, delay);
}
 
bool mod_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(wq, dwork, delay);
}
//bool mod_delayed_work(struct workqueue_struct *wq,
// struct delayed_work *dwork,
// unsigned long delay)
//{
// return queue_delayed_work(wq, dwork, delay);
//}
 
int del_timer(struct timer_list *timer)
{
/drivers/ddk/malloc/malloc.c
649,9 → 649,9
#define NO_SEGMENT_TRAVERSAL 1
#define MALLOC_ALIGNMENT ((size_t)8U)
#define CHUNK_OVERHEAD (SIZE_T_SIZE)
#define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
#define DEFAULT_MMAP_THRESHOLD ((size_t)512U * (size_t)1024U)
#define DEFAULT_TRIM_THRESHOLD ((size_t)1024U * (size_t)1024U)
#define DEFAULT_GRANULARITY ((size_t)256U * (size_t)1024U)
#define DEFAULT_MMAP_THRESHOLD ((size_t)1024U * (size_t)1024U)
#define DEFAULT_TRIM_THRESHOLD ((size_t)2048U * (size_t)1024U)
 
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
/drivers/include/acpi/acbuffer.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acconfig.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acexcep.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
126,8 → 126,9
#define AE_OWNER_ID_LIMIT EXCEP_ENV (0x001B)
#define AE_NOT_CONFIGURED EXCEP_ENV (0x001C)
#define AE_ACCESS EXCEP_ENV (0x001D)
#define AE_IO_ERROR EXCEP_ENV (0x001E)
 
#define AE_CODE_ENV_MAX 0x001D
#define AE_CODE_ENV_MAX 0x001E
 
/*
* Programmer exceptions
263,7 → 264,8
"There are no more Owner IDs available for ACPI tables or control methods"),
EXCEP_TXT("AE_NOT_CONFIGURED",
"The interface is not part of the current subsystem configuration"),
EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation")
EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation"),
EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred")
};
 
static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
/drivers/include/acpi/acnames.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acoutput.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
262,7 → 262,7
#define ACPI_GET_FUNCTION_NAME _acpi_function_name
 
/*
* The Name parameter should be the procedure name as a quoted string.
* The Name parameter should be the procedure name as a non-quoted string.
* The function name is also used by the function exit macros below.
* Note: (const char) is used to be compatible with the debug interfaces
* and macros such as __func__.
/drivers/include/acpi/acpi.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acpi_bus.h
87,6 → 87,8
.package.elements = (eles) \
}
 
bool acpi_dev_present(const char *hid);
 
#ifdef CONFIG_ACPI
 
#define ACPI_BUS_FILE_ROOT "acpi"
389,13 → 391,13
 
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return fwnode && (fwnode->type == FWNODE_ACPI
return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
|| fwnode->type == FWNODE_ACPI_DATA);
}
 
static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
{
return fwnode && fwnode->type == FWNODE_ACPI;
return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
}
 
static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
626,7 → 628,9
 
static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
{
return adev->power.states[ACPI_STATE_D3_COLD].flags.valid;
return adev->power.states[ACPI_STATE_D3_COLD].flags.valid ||
((acpi_gbl_FADT.header.revision < 6) &&
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
 
#else /* CONFIG_ACPI */
/drivers/include/acpi/acpi_io.h
6,9 → 6,9
#include <asm/acpi.h>
 
 
void acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
 
int acpi_os_map_generic_address(struct acpi_generic_address *addr);
void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
/drivers/include/acpi/acpiosxf.h
7,7 → 7,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
349,12 → 349,28
#endif
 
/*
* Debug input
* Debug IO
*/
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line
acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read);
#endif
 
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_command_signals
acpi_status acpi_os_initialize_command_signals(void);
#endif
 
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_command_signals
void acpi_os_terminate_command_signals(void);
#endif
 
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_command_ready
acpi_status acpi_os_wait_command_ready(void);
#endif
 
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_notify_command_complete
acpi_status acpi_os_notify_command_complete(void);
#endif
 
/*
* Obtain ACPI table(s)
*/
/drivers/include/acpi/acpixf.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
46,7 → 46,7
 
/* Current ACPICA subsystem version in YYYYMMDD format */
 
#define ACPI_CA_VERSION 0x20150930
#define ACPI_CA_VERSION 0x20160108
 
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
190,6 → 190,11
ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
 
/*
* Optionally support group module level code.
*/
ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
 
/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
* (address mismatch) between the 32-bit and 64-bit versions of the
* address. Although ACPICA adheres to the ACPI specification which
263,7 → 268,20
ACPI_INIT_GLOBAL(u32, acpi_dbg_level, ACPI_DEBUG_DEFAULT);
ACPI_INIT_GLOBAL(u32, acpi_dbg_layer, 0);
 
/* Optionally enable timer output with Debug Object output */
 
ACPI_INIT_GLOBAL(u8, acpi_gbl_display_debug_timer, FALSE);
 
/*
* Debugger command handshake globals. Host OSes need to access these
* variables to implement their own command handshake mechanism.
*/
#ifdef ACPI_DEBUGGER
ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]);
#endif
 
/*
* Other miscellaneous globals
*/
ACPI_GLOBAL(struct acpi_table_fadt, acpi_gbl_FADT);
366,6 → 384,29
 
#endif /* ACPI_APPLICATION */
 
/*
* Debugger prototypes
*
* All interfaces used by debugger will be configured
* out of the ACPICA build unless the ACPI_DEBUGGER
* flag is defined.
*/
#ifdef ACPI_DEBUGGER
#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \
ACPI_EXTERNAL_RETURN_OK(prototype)
 
#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \
ACPI_EXTERNAL_RETURN_VOID(prototype)
 
#else
#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \
static ACPI_INLINE prototype {return(AE_OK);}
 
#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \
static ACPI_INLINE prototype {return;}
 
#endif /* ACPI_DEBUGGER */
 
/*****************************************************************************
*
* ACPICA public interface prototypes
822,17 → 863,9
ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
 
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vectors
acpi_set_firmware_waking_vector
(acpi_physical_address physical_address,
acpi_physical_address physical_address64))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vector(u32
physical_address))
#if ACPI_MACHINE_WIDTH == 64
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vector64(u64
physical_address))
#endif
/*
* ACPI Timer interfaces
*/
864,11 → 897,9
acpi_warning(const char *module_name,
u32 line_number,
const char *format, ...))
ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
void ACPI_INTERNAL_VAR_XFACE
acpi_info(const char *module_name,
u32 line_number,
const char *format, ...))
acpi_info(const char *format, ...))
ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
acpi_bios_error(const char *module_name,
929,6 → 960,8
void **data,
void (*callback)(void *)))
 
void acpi_run_debugger(char *batch_buffer);
 
void acpi_set_debugger_thread_id(acpi_thread_id thread_id);
 
#endif /* __ACXFACE_H__ */
/drivers/include/acpi/acrestyp.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actbl.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actbl1.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actbl2.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actbl3.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actypes.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
1148,7 → 1148,7
 
#define ACPI_PCICLS_STRING_SIZE 7 /* Includes null terminator */
 
/* Structures used for device/processor HID, UID, CID, and SUB */
/* Structures used for device/processor HID, UID, CID */
 
struct acpi_pnp_device_id {
u32 length; /* Length of string + null */
1178,7 → 1178,6
u64 address; /* _ADR value */
struct acpi_pnp_device_id hardware_id; /* _HID value */
struct acpi_pnp_device_id unique_id; /* _UID value */
struct acpi_pnp_device_id subsystem_id; /* _SUB value */
struct acpi_pnp_device_id class_code; /* _CLS value */
struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */
};
1193,13 → 1192,12
#define ACPI_VALID_ADR 0x0002
#define ACPI_VALID_HID 0x0004
#define ACPI_VALID_UID 0x0008
#define ACPI_VALID_SUB 0x0010
#define ACPI_VALID_CID 0x0020
#define ACPI_VALID_CLS 0x0040
#define ACPI_VALID_SXDS 0x0100
#define ACPI_VALID_SXWS 0x0200
 
/* Flags for _STA return value (current_status above) */
/* Flags for _STA method */
 
#define ACPI_STA_DEVICE_PRESENT 0x01
#define ACPI_STA_DEVICE_ENABLED 0x02
/drivers/include/acpi/platform/acenv.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/platform/acenvex.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/platform/acgcc.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/platform/aclinux.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
150,6 → 150,8
*/
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_command_signals
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_command_signals
 
/*
* OSL interfaces used by utilities
/drivers/include/acpi/platform/aclinuxex.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* Copyright (C) 2000 - 2016, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
127,6 → 127,16
return TRUE;
}
 
static inline acpi_status acpi_os_initialize_command_signals(void)
{
return AE_OK;
}
 
static inline void acpi_os_terminate_command_signals(void)
{
return;
}
 
/*
* OSL interfaces added by Linux
*/
/drivers/include/asm/bitsperlong.h
File deleted
/drivers/include/asm/types.h
File deleted
/drivers/include/asm/atomic_32.h
File deleted
/drivers/include/asm/swab.h
File deleted
/drivers/include/asm/scatterlist.h
File deleted
/drivers/include/asm/posix_types_32.h
File deleted
/drivers/include/asm/byteorder.h
File deleted
/drivers/include/asm/alternative.h
152,12 → 152,6
".popsection"
 
/*
* This must be included *after* the definition of ALTERNATIVE due to
* <asm/arch_hweight.h>
*/
#include <asm/cpufeature.h>
 
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
/drivers/include/asm/arch_hweight.h
1,6 → 1,8
#ifndef _ASM_X86_HWEIGHT_H
#define _ASM_X86_HWEIGHT_H
 
#include <asm/cpufeatures.h>
 
#ifdef CONFIG_64BIT
/* popcnt %edi, %eax -- redundant REX prefix for alignment */
#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
/drivers/include/asm/asm.h
44,19 → 44,22
 
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE(from,to) \
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
.pushsection "__ex_table","a" ; \
.balign 8 ; \
.balign 4 ; \
.long (from) - . ; \
.long (to) - . ; \
.long (handler) - . ; \
.popsection
 
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
 
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
 
# define _ASM_EXTABLE_EX(from,to) \
.pushsection "__ex_table","a" ; \
.balign 8 ; \
.long (from) - . ; \
.long (to) - . + 0x7ffffff0 ; \
.popsection
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
 
# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \
89,19 → 92,24
.endm
 
#else
# define _ASM_EXTABLE(from,to) \
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 8\n" \
" .balign 4\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - .\n" \
" .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
" .popsection\n"
 
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
 
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
 
# define _ASM_EXTABLE_EX(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 8\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - . + 0x7ffffff0\n" \
" .popsection\n"
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
 
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
 
/drivers/include/asm/barrier.h
6,7 → 6,7
 
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* And yes, this might be required on UP too when we're talking
* to devices.
*/
 
31,21 → 31,11
#endif
#define dma_wmb() barrier()
 
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#endif /* SMP */
#define __smp_mb() mb()
#define __smp_rmb() dma_rmb()
#define __smp_wmb() barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
 
#if defined(CONFIG_X86_PPRO_FENCE)
 
/*
/drivers/include/asm/bitops.h
91,7 → 91,7
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
128,13 → 128,13
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
 
static inline void __clear_bit(long nr, volatile unsigned long *addr)
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
151,7 → 151,7
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
166,7 → 166,7
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
180,7 → 180,7
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0"
201,7 → 201,7
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
}
228,7 → 228,7
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
 
247,7 → 247,7
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
}
268,7 → 268,7
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
 
280,7 → 280,7
}
 
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
 
300,7 → 300,7
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
}
311,7 → 311,7
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
 
static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
{
int oldbit;
 
343,7 → 343,7
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
static __always_inline unsigned long __ffs(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
357,7 → 357,7
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static inline unsigned long ffz(unsigned long word)
static __always_inline unsigned long ffz(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
371,7 → 371,7
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
static __always_inline unsigned long __fls(unsigned long word)
{
asm("bsr %1,%0"
: "=r" (word)
393,7 → 393,7
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
static inline int ffs(int x)
static __always_inline int ffs(int x)
{
int r;
 
434,7 → 434,7
* set bit if value is nonzero. The last (most significant) bit is
* at position 32.
*/
static inline int fls(int x)
static __always_inline int fls(int x)
{
int r;
 
/drivers/include/asm/cacheflush.h
4,6 → 4,7
/* Caches aren't brain-dead on the intel. */
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
#include <asm/uaccess.h>
 
/*
* The set_memory_* API can be used to change various attributes of a virtual
113,16 → 114,10
 
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
 
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;
extern int kernel_set_to_readonly;
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
#else
static inline void set_kernel_text_rw(void) { }
static inline void set_kernel_text_ro(void) { }
#endif
 
#ifdef CONFIG_DEBUG_RODATA_TEST
int rodata_test(void);
/drivers/include/asm/cmpxchg.h
2,6 → 2,7
#define ASM_X86_CMPXCHG_H
 
#include <linux/compiler.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
 
#define __HAVE_ARCH_CMPXCHG 1
/drivers/include/asm/cpufeature.h
1,289 → 1,8
/*
* Defines x86 CPU feature bits
*/
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
 
#ifndef _ASM_X86_REQUIRED_FEATURES_H
#include <asm/required-features.h>
#endif
#include <asm/processor.h>
 
#ifndef _ASM_X86_DISABLED_FEATURES_H
#include <asm/disabled-features.h>
#endif
 
#define NCAPINTS 16 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
 
/*
* Note: If the comment begins with a quoted string, that string is used
* in /proc/cpuinfo instead of the macro name. If the string is "",
* this feature bit is not displayed in /proc/cpuinfo at all.
*/
 
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
/* (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
 
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
 
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
 
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
 
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
 
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
 
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
 
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc, word 7.
*
* Reuse free bits when adding new feature flags!
*/
 
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
 
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
 
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
 
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
 
 
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
 
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
 
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
 
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
 
/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
 
/*
* BUG word(s)
*/
#define X86_BUG(x) (NCAPINTS*32 + (x))
 
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
 
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 
#include <asm/asm.h>
307,6 → 26,7
CPUID_8000_0008_EBX,
CPUID_6_EAX,
CPUID_8000_000A_EDX,
CPUID_7_ECX,
};
 
#ifdef CONFIG_X86_FEATURE_NAMES
338,7 → 58,14
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9 )) || \
(((bit)>>5)==10 && (1UL<<((bit)&31) & REQUIRED_MASK10)) || \
(((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \
(((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \
(((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \
(((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \
(((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \
(((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
 
#define DISABLED_MASK_BIT_SET(bit) \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
350,7 → 77,14
(((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
(((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
(((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9 )) || \
(((bit)>>5)==10 && (1UL<<((bit)&31) & DISABLED_MASK10)) || \
(((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \
(((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \
(((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \
(((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \
(((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \
(((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
 
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
369,8 → 103,7
* is not relevant.
*/
#define cpu_feature_enabled(bit) \
(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
cpu_has(&boot_cpu_data, bit))
(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
 
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
 
406,107 → 139,20
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
/*
* Do not add any more of those clumsy macros - use static_cpu_has_safe() for
* Do not add any more of those clumsy macros - use static_cpu_has() for
* fast paths and boot_cpu_has() otherwise!
*/
 
#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
extern void warn_pre_alternatives(void);
extern bool __static_cpu_has_safe(u16 bit);
 
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These are only valid after alternatives have run, but will statically
* patch the target code for additional performance.
* These will statically patch the target code for additional
* performance.
*/
static __always_inline __pure bool __static_cpu_has(u16 bit)
static __always_inline __pure bool _static_cpu_has(u16 bit)
{
#ifdef CC_HAVE_ASM_GOTO
 
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
 
/*
* Catch too early usage of this before alternatives
* have run.
*/
asm_volatile_goto("1: jmp %l[t_warn]\n"
asm_volatile_goto("1: jmp 6f\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 0\n" /* no replacement */
" .word %P0\n" /* 1: do replace */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (X86_FEATURE_ALWAYS) : : t_warn);
 
#endif
 
asm_volatile_goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (bit) : : t_no);
return true;
t_no:
return false;
 
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
t_warn:
warn_pre_alternatives();
return false;
#endif
 
#else /* CC_HAVE_ASM_GOTO */
 
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $0,%0\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 3f - .\n"
" .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"3: movb $1,%0\n"
"4:\n"
".previous\n"
: "=qm" (flag) : "i" (bit));
return flag;
 
#endif /* CC_HAVE_ASM_GOTO */
}
 
#define static_cpu_has(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
boot_cpu_has(bit) : \
__builtin_constant_p(bit) ? \
__static_cpu_has(bit) : \
boot_cpu_has(bit) \
)
 
static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
{
#ifdef CC_HAVE_ASM_GOTO
asm_volatile_goto("1: jmp %l[t_dynamic]\n"
"2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"
"3:\n"
530,66 → 176,34
" .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
: : "i" (bit), "i" (X86_FEATURE_ALWAYS)
: : t_dynamic, t_no);
".section .altinstr_aux,\"ax\"\n"
"6:\n"
" testb %[bitnum],%[cap_byte]\n"
" jnz %l[t_yes]\n"
" jmp %l[t_no]\n"
".previous\n"
: : "i" (bit), "i" (X86_FEATURE_ALWAYS),
[bitnum] "i" (1 << (bit & 7)),
[cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
: : t_yes, t_no);
t_yes:
return true;
t_no:
return false;
t_dynamic:
return __static_cpu_has_safe(bit);
#else
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $2,%0\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 3f - .\n" /* repl offset */
" .word %P2\n" /* always replace */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"3: movb $0,%0\n"
"4:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 5f - .\n" /* repl offset */
" .word %P1\n" /* feature bit */
" .byte 4b - 3b\n" /* src len */
" .byte 6f - 5f\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"5: movb $1,%0\n"
"6:\n"
".previous\n"
: "=qm" (flag)
: "i" (bit), "i" (X86_FEATURE_ALWAYS));
return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
#endif /* CC_HAVE_ASM_GOTO */
}
 
#define static_cpu_has_safe(bit) \
#define static_cpu_has(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
boot_cpu_has(bit) : \
_static_cpu_has_safe(bit) \
_static_cpu_has(bit) \
)
#else
/*
* gcc 3.x is too stupid to do the static test; fall back to dynamic.
* Fall back to dynamic for gcc versions which don't support asm goto. Should be
* a minority now anyway.
*/
#define static_cpu_has(bit) boot_cpu_has(bit)
#define static_cpu_has_safe(bit) boot_cpu_has(bit)
#endif
 
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
597,7 → 211,6
#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
 
#define static_cpu_has_bug(bit) static_cpu_has((bit))
#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
 
#define MAX_CPU_FEATURES (NCAPINTS * 32)
/drivers/include/asm/cpufeatures.h
0,0 → 1,306
#ifndef _ASM_X86_CPUFEATURES_H
#define _ASM_X86_CPUFEATURES_H
 
#ifndef _ASM_X86_REQUIRED_FEATURES_H
#include <asm/required-features.h>
#endif
 
#ifndef _ASM_X86_DISABLED_FEATURES_H
#include <asm/disabled-features.h>
#endif
 
/*
* Defines x86 CPU feature bits
*/
#define NCAPINTS 17 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
 
/*
* Note: If the comment begins with a quoted string, that string is used
* in /proc/cpuinfo instead of the macro name. If the string is "",
* this feature bit is not displayed in /proc/cpuinfo at all.
*/
 
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
/* (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
 
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
 
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
 
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_MCE_RECOVERY ( 3*32+31) /* cpu has recoverable machine checks */
 
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
 
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
 
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
 
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc, word 7.
*
* Reuse free bits when adding new feature flags!
*/
 
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
 
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
 
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
 
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
 
 
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
 
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
 
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
 
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
 
/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
 
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
 
/*
* BUG word(s)
*/
#define X86_BUG(x) (NCAPINTS*32 + (x))
 
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
 
#ifdef CONFIG_X86_32
/*
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
* to avoid confusion.
*/
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
#endif
 
#endif /* _ASM_X86_CPUFEATURES_H */
/drivers/include/asm/desc_defs.h
98,4 → 98,27
 
#endif /* !__ASSEMBLY__ */
 
/* Access rights as returned by LAR */
#define AR_TYPE_RODATA (0 * (1 << 9))
#define AR_TYPE_RWDATA (1 * (1 << 9))
#define AR_TYPE_RODATA_EXPDOWN (2 * (1 << 9))
#define AR_TYPE_RWDATA_EXPDOWN (3 * (1 << 9))
#define AR_TYPE_XOCODE (4 * (1 << 9))
#define AR_TYPE_XRCODE (5 * (1 << 9))
#define AR_TYPE_XOCODE_CONF (6 * (1 << 9))
#define AR_TYPE_XRCODE_CONF (7 * (1 << 9))
#define AR_TYPE_MASK (7 * (1 << 9))
 
#define AR_DPL0 (0 * (1 << 13))
#define AR_DPL3 (3 * (1 << 13))
#define AR_DPL_MASK (3 * (1 << 13))
 
#define AR_A (1 << 8) /* "Accessed" */
#define AR_S (1 << 12) /* If clear, "System" segment */
#define AR_P (1 << 15) /* "Present" */
#define AR_AVL (1 << 20) /* "AVaiLable" (no HW effect) */
#define AR_L (1 << 21) /* "Long mode" for code segments */
#define AR_DB (1 << 22) /* D/B, effect depends on type */
#define AR_G (1 << 23) /* "Granularity" (limit in pages) */
 
#endif /* _ASM_X86_DESC_DEFS_H */
/drivers/include/asm/disabled-features.h
28,6 → 28,14
# define DISABLE_CENTAUR_MCR 0
#endif /* CONFIG_X86_64 */
 
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
# define DISABLE_PKU 0
# define DISABLE_OSPKE 0
#else
# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 
/*
* Make sure to add features to the correct mask
*/
41,5 → 49,12
#define DISABLED_MASK7 0
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_MPX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
 
#endif /* _ASM_X86_DISABLED_FEATURES_H */
/drivers/include/asm/dma-mapping.h
46,8 → 46,6
#define HAVE_ARCH_DMA_SUPPORTED 1
extern int dma_supported(struct device *hwdev, u64 mask);
 
#include <asm-generic/dma-mapping-common.h>
 
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
/drivers/include/asm/fixmap.h
138,7 → 138,7
extern int fixmaps_set;
 
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
#define kmap_prot PAGE_KERNEL
extern pte_t *pkmap_page_table;
 
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
/drivers/include/asm/fpu/types.h
108,6 → 108,8
XFEATURE_OPMASK,
XFEATURE_ZMM_Hi256,
XFEATURE_Hi16_ZMM,
XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
XFEATURE_PKRU,
 
XFEATURE_MAX,
};
120,6 → 122,7
#define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
#define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
 
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
212,6 → 215,15
struct reg_512_bit hi16_zmm[16];
} __packed;
 
/*
* State component 9: 32-bit PKRU register. The state is
* 8 bytes long but only 4 bytes is used currently.
*/
struct pkru_state {
u32 pkru;
u32 pad;
} __packed;
 
struct xstate_header {
u64 xfeatures;
u64 xcomp_bv;
/drivers/include/asm/io.h
152,7 → 152,7
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
//extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
#define ioremap_uc ioremap_uc
 
163,12 → 163,12
/*
* The default ioremap() behavior is non-cached:
*/
//static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
//{
// return ioremap_nocache(offset, size);
//}
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
 
//extern void iounmap(volatile void __iomem *addr);
extern void iounmap(volatile void __iomem *addr);
 
extern void set_iounmap_nonlazy(void);
 
296,7 → 296,7
 
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
enum page_cache_mode pcm);
//extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
 
extern bool is_early_ioremap_ptep(pte_t *ptep);
/drivers/include/asm/iomap.h
0,0 → 1,40
#ifndef _ASM_X86_IOMAP_H
#define _ASM_X86_IOMAP_H
 
/*
* Copyright © 2008 Ingo Molnar
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
 
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
 
void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 
void
iounmap_atomic(void __iomem *kvaddr);
 
int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
 
void
iomap_free(resource_size_t base, unsigned long size);
 
#endif /* _ASM_X86_IOMAP_H */
/drivers/include/asm/msr-index.h
1,7 → 1,12
#ifndef _ASM_X86_MSR_INDEX_H
#define _ASM_X86_MSR_INDEX_H
 
/* CPU model specific register (MSR) numbers */
/*
* CPU model specific register (MSR) numbers.
*
* Do not add new entries to this file unless the definitions are shared
* between multiple compilation units.
*/
 
/* x86-64 specific MSRs */
#define MSR_EFER 0xc0000080 /* extended feature register */
162,6 → 167,14
#define MSR_PKG_C9_RESIDENCY 0x00000631
#define MSR_PKG_C10_RESIDENCY 0x00000632
 
/* Interrupt Response Limit */
#define MSR_PKGC3_IRTL 0x0000060a
#define MSR_PKGC6_IRTL 0x0000060b
#define MSR_PKGC7_IRTL 0x0000060c
#define MSR_PKGC8_IRTL 0x00000633
#define MSR_PKGC9_IRTL 0x00000634
#define MSR_PKGC10_IRTL 0x00000635
 
/* Run Time Average Power Limiting (RAPL) Interface */
 
#define MSR_RAPL_POWER_UNIT 0x00000606
185,6 → 198,7
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
 
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A
205,13 → 219,6
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
 
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL1 0x00000649
#define MSR_CONFIG_TDP_LEVEL2 0x0000064A
#define MSR_CONFIG_TDP_CONTROL 0x0000064B
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
 
/* Hardware P state interface */
#define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f
230,10 → 237,10
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
 
/* IA32_HWP_CAPABILITIES */
#define HWP_HIGHEST_PERF(x) (x & 0xff)
#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8)
#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16)
#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24)
#define HWP_HIGHEST_PERF(x) (((x) >> 0) & 0xff)
#define HWP_GUARANTEED_PERF(x) (((x) >> 8) & 0xff)
#define HWP_MOSTEFFICIENT_PERF(x) (((x) >> 16) & 0xff)
#define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff)
 
/* IA32_HWP_REQUEST */
#define HWP_MIN_PERF(x) (x & 0xff)
/drivers/include/asm/msr.h
42,14 → 42,6
struct saved_msr *array;
};
 
static inline unsigned long long native_read_tscp(unsigned int *aux)
{
unsigned long low, high;
asm volatile(".byte 0x0f,0x01,0xf9"
: "=a" (low), "=d" (high), "=c" (*aux));
return low | ((u64)high << 32);
}
 
/*
* both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
* constraint has different meanings. For i386, "A" means exactly
67,11 → 59,34
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
 
#ifdef CONFIG_TRACEPOINTS
/*
* Be very careful with includes. This header is prone to include loops.
*/
#include <asm/atomic.h>
#include <linux/tracepoint-defs.h>
 
extern struct tracepoint __tracepoint_read_msr;
extern struct tracepoint __tracepoint_write_msr;
extern struct tracepoint __tracepoint_rdpmc;
#define msr_tracepoint_active(t) static_key_false(&(t).key)
extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
#else
#define msr_tracepoint_active(t) false
static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
#endif
 
static inline unsigned long long native_read_msr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
 
asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
return EAX_EDX_VAL(val, low, high);
}
 
88,6 → 103,8
_ASM_EXTABLE(2b, 3b)
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), [fault] "i" (-EIO));
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
return EAX_EDX_VAL(val, low, high);
}
 
95,6 → 112,8
unsigned low, unsigned high)
{
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
 
/* Can be uninlined because referenced by paravirt */
112,6 → 131,8
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "memory");
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}
 
136,11 → 157,42
return EAX_EDX_VAL(val, low, high);
}
 
/**
* rdtsc_ordered() - read the current TSC in program order
*
* rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
* It is ordered like a load to a global in-memory counter. It should
* be impossible to observe non-monotonic rdtsc_unordered() behavior
* across multiple CPUs as long as the TSC is synced.
*/
static __always_inline unsigned long long rdtsc_ordered(void)
{
/*
* The RDTSC instruction is not ordered relative to memory
* access. The Intel SDM and the AMD APM are both vague on this
* point, but empirically an RDTSC instruction can be
* speculatively executed before prior loads. An RDTSC
* immediately after an appropriate barrier appears to be
* ordered as a normal load, that is, it provides the same
* ordering guarantees as reading from a global memory location
* that some other imaginary CPU is updating continuously with a
* time stamp.
*/
alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
"lfence", X86_FEATURE_LFENCE_RDTSC);
return rdtsc();
}
 
/* Deprecated, keep it for a cycle for easier merging: */
#define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
 
static inline unsigned long long native_read_pmc(int counter)
{
DECLARE_ARGS(val, low, high);
 
asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
if (msr_tracepoint_active(__tracepoint_rdpmc))
do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
return EAX_EDX_VAL(val, low, high);
}
 
/drivers/include/asm/pci.h
20,6 → 20,9
#ifdef CONFIG_X86_64
void *iommu; /* IOMMU private data */
#endif
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
void *fwnode; /* IRQ domain for MSI assignment */
#endif
};
 
extern int pci_routeirq;
32,6 → 35,7
static inline int pci_domain_nr(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
 
return sd->domain;
}
 
41,6 → 45,17
}
#endif
 
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
static inline void *_pci_root_bus_fwnode(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
 
return sd->fwnode;
}
 
#define pci_root_bus_fwnode _pci_root_bus_fwnode
#endif
 
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
105,9 → 120,6
#include <asm/pci_64.h>
#endif
 
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
 
/* generic pci stuff */
#include <asm-generic/pci.h>
 
/drivers/include/asm/pgtable.h
487,18 → 487,7
#endif
 
#define pte_accessible pte_accessible
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{
if (pte_flags(a) & _PAGE_PRESENT)
return true;
 
if ((pte_flags(a) & _PAGE_PROTNONE) &&
mm_tlb_flush_pending(mm))
return true;
 
return false;
}
 
static inline int pte_hidden(pte_t pte)
{
return pte_flags(pte) & _PAGE_HIDDEN;
/drivers/include/asm/pgtable_32.h
14,6 → 14,7
*/
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <linux/threads.h>
 
#include <linux/bitops.h>
/drivers/include/asm/pgtable_types.h
20,13 → 20,18
#define _PAGE_BIT_SOFTW2 10 /* " */
#define _PAGE_BIT_SOFTW3 11 /* " */
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SOFTW4 58 /* available for programmer */
#define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */
#define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */
#define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */
#define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
 
#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_SOFTW4 58 /* available for programmer */
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
 
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
47,8 → 52,24
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
#else
#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0))
#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0))
#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0))
#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0))
#endif
#define __HAVE_ARCH_PTE_SPECIAL
 
#define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
_PAGE_PKEY_BIT1 | \
_PAGE_PKEY_BIT2 | \
_PAGE_PKEY_BIT3)
 
#ifdef CONFIG_KMEMCHECK
#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
#else
99,7 → 120,12
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
_PAGE_DIRTY)
 
/* Set of bits not changed in pte_modify */
/*
* Set of bits not changed in pte_modify. The pte's
* protection key is treated like _PAGE_RW, for
* instance, and is *not* included in this mask since
* pte_modify() does modify it.
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SOFT_DIRTY)
215,7 → 241,10
/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
 
/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
/*
* Extracts the flags from a (pte|pmd|pud|pgd)val_t
* This includes the protection key value.
*/
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
 
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
/drivers/include/asm/processor.h
13,7 → 13,7
#include <asm/types.h>
#include <uapi/asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
24,7 → 24,6
#include <asm/fpu/types.h>
 
#include <linux/personality.h>
#include <linux/cpumask.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
129,10 → 128,10
u16 booted_cores;
/* Physical processor id: */
u16 phys_proc_id;
/* Logical processor id: */
u16 logical_proc_id;
/* Core id: */
u16 cpu_core_id;
/* Compute unit id */
u8 compute_unit_id;
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
296,10 → 295,13
*/
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
 
#ifdef CONFIG_X86_32
/*
* Space for the temporary SYSENTER stack:
* Space for the temporary SYSENTER stack.
*/
unsigned long SYSENTER_stack_canary;
unsigned long SYSENTER_stack[64];
#endif
 
} ____cacheline_aligned;
 
660,10 → 662,9
*/
static inline void prefetch(const void *x)
{
alternative_input(BASE_PREFETCH,
"prefetchnta (%1)",
alternative_input(BASE_PREFETCH, "prefetchnta %P1",
X86_FEATURE_XMM,
"r" (x));
"m" (*(const char *)x));
}
 
/*
673,10 → 674,9
*/
static inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
alternative_input(BASE_PREFETCH, "prefetchw %P1",
X86_FEATURE_3DNOWPREFETCH,
"m" (*(const char *)x));
}
 
static inline void spin_lock_prefetch(const void *x)
757,7 → 757,7
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
*/
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
#define thread_saved_pc(t) READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
 
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
/drivers/include/asm/pvclock.h
65,10 → 65,5
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
 
int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
int size);
struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
 
#endif /* _ASM_X86_PVCLOCK_H */
/drivers/include/asm/required-features.h
92,5 → 92,12
#define REQUIRED_MASK7 0
#define REQUIRED_MASK8 0
#define REQUIRED_MASK9 0
#define REQUIRED_MASK10 0
#define REQUIRED_MASK11 0
#define REQUIRED_MASK12 0
#define REQUIRED_MASK13 0
#define REQUIRED_MASK14 0
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 0
 
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
/drivers/include/asm/rwsem.h
25,7 → 25,7
* This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the
* that will be woken up; if there's a bunch of consecutive readers at the
* front, then they'll all be woken up, but no other readers will be.
*/
 
/drivers/include/asm/smap.h
0,0 → 1,79
/*
* Supervisor Mode Access Prevention support
*
* Copyright (C) 2012 Intel Corporation
* Author: H. Peter Anvin <hpa@linux.intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
 
#ifndef _ASM_X86_SMAP_H
#define _ASM_X86_SMAP_H
 
#include <linux/stringify.h>
#include <asm/nops.h>
#include <asm/cpufeatures.h>
 
/* "Raw" instruction opcodes */
#define __ASM_CLAC .byte 0x0f,0x01,0xca
#define __ASM_STAC .byte 0x0f,0x01,0xcb
 
#ifdef __ASSEMBLY__
 
#include <asm/alternative-asm.h>
 
#ifdef CONFIG_X86_SMAP
 
#define ASM_CLAC \
ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
 
#define ASM_STAC \
ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
 
#else /* CONFIG_X86_SMAP */
 
#define ASM_CLAC
#define ASM_STAC
 
#endif /* CONFIG_X86_SMAP */
 
#else /* __ASSEMBLY__ */
 
#include <asm/alternative.h>
 
#ifdef CONFIG_X86_SMAP
 
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
}
 
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
}
 
/* These macros can be used in asm() statements */
#define ASM_CLAC \
ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
#define ASM_STAC \
ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
 
#else /* CONFIG_X86_SMAP */
 
static inline void clac(void) { }
static inline void stac(void) { }
 
#define ASM_CLAC
#define ASM_STAC
 
#endif /* CONFIG_X86_SMAP */
 
#endif /* __ASSEMBLY__ */
 
#endif /* _ASM_X86_SMAP_H */
/drivers/include/asm/special_insns.h
4,6 → 4,8
 
#ifdef __KERNEL__
 
#include <asm/nops.h>
 
static inline void native_clts(void)
{
asm volatile("clts");
96,6 → 98,44
}
#endif
 
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
static inline u32 __read_pkru(void)
{
u32 ecx = 0;
u32 edx, pkru;
 
/*
* "rdpkru" instruction. Places PKRU contents in to EAX,
* clears EDX and requires that ecx=0.
*/
asm volatile(".byte 0x0f,0x01,0xee\n\t"
: "=a" (pkru), "=d" (edx)
: "c" (ecx));
return pkru;
}
 
static inline void __write_pkru(u32 pkru)
{
u32 ecx = 0, edx = 0;
 
/*
* "wrpkru" instruction. Loads contents in EAX to PKRU,
* requires that ecx = edx = 0.
*/
asm volatile(".byte 0x0f,0x01,0xef\n\t"
: : "a" (pkru), "c"(ecx), "d"(edx));
}
#else
static inline u32 __read_pkru(void)
{
return 0;
}
 
static inline void __write_pkru(u32 pkru)
{
}
#endif
 
static inline void native_wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
/drivers/include/asm/timex.h
1,7 → 1,7
#ifndef _ASM_X86_TIMEX_H
#define _ASM_X86_TIMEX_H
 
//#include <asm/processor.h>
#include <asm/processor.h>
//#include <asm/tsc.h>
 
/* Assume we use the PIT time source for the clock tick */
/drivers/include/asm/topology.h
119,6 → 119,7
 
extern const struct cpumask *cpu_coregroup_mask(int cpu);
 
#define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
 
125,6 → 126,16
#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
 
extern unsigned int __max_logical_packages;
#define topology_max_packages() (__max_logical_packages)
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
extern int topology_phys_to_logical_pkg(unsigned int pkg);
#else
#define topology_max_packages() (1)
static inline int
topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
#endif
 
static inline void arch_fix_phys_package_id(int num, u32 slot)
/drivers/include/asm/uaccess.h
0,0 → 1,808
#ifndef _ASM_X86_UACCESS_H
#define _ASM_X86_UACCESS_H
/*
* User space memory access functions
*/
#include <linux/errno.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/smap.h>
 
#define VERIFY_READ 0
#define VERIFY_WRITE 1
 
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*/
 
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
 
#define KERNEL_DS MAKE_MM_SEG(-1UL)
#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
 
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
 
#define segment_eq(a, b) ((a).seg == (b).seg)
 
#define user_addr_max() (current_thread_info()->addr_limit.seg)
#define __addr_ok(addr) \
((unsigned long __force)(addr) < user_addr_max())
 
/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
*/
static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
{
/*
* If we have used "sizeof()" for the size,
* we know it won't overflow the limit (but
* it might overflow the 'addr', so it's
* important to subtract the size from the
* limit, not add it to the address).
*/
if (__builtin_constant_p(size))
return unlikely(addr > limit - size);
 
/* Arbitrary sizes? Be careful about overflow */
addr += size;
if (unlikely(addr < size))
return true;
return unlikely(addr > limit);
}
 
#define __range_not_ok(addr, size, limit) \
({ \
__chk_user_ptr(addr); \
__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
 
/**
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
* %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
* to write to a block, it is always safe to read from it.
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Checks if a pointer to a block of memory in user space is valid.
*
* Returns true (nonzero) if the memory block may be valid, false (zero)
* if it is definitely invalid.
*
* Note that, depending on architecture, this function probably just
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
#define access_ok(type, addr, size) \
likely(!__range_not_ok(addr, size, user_addr_max()))
 
/*
* The exception table consists of triples of addresses relative to the
* exception table entry itself. The first address is of an instruction
* that is allowed to fault, the second is the target at which the program
* should continue. The third is a handler function to deal with the fault
* caused by the instruction in the first field.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
 
struct exception_table_entry {
int insn, fixup, handler;
};
 
#define ARCH_HAS_RELATIVE_EXTABLE
 
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->handler = (b)->handler + (delta); \
(b)->handler = (tmp).handler - (delta); \
} while (0)
 
extern int fixup_exception(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip);
extern int early_fixup_exception(unsigned long *ip);
 
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*
* This gets kind of ugly. We want to return _two_ values in "get_user()"
* and yet we don't want to do any pointers, because that is too much
* of a performance impact. Thus we have a few rather ugly macros here,
* and hide all the ugliness from the user.
*
* The "__xxx" versions of the user access functions are versions that
* do not verify the address space, that must have been done previously
* with a separate "access_ok()" call (this is used when we do multiple
* accesses to the same area of user memory).
*/
 
extern int __get_user_1(void);
extern int __get_user_2(void);
extern int __get_user_4(void);
extern int __get_user_8(void);
extern int __get_user_bad(void);
 
#define __uaccess_begin() stac()
#define __uaccess_end() clac()
 
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
*/
#define __inttype(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 
/**
* get_user: - Get a simple variable from user space.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
/*
* Careful: we have to cast the result to the type of the pointer
* for sign reasons.
*
* The use of _ASM_DX as the register specifier is a bit of a
* simplification, as gcc only cares about it as the starting point
* and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
* (%ecx being the next register in gcc's x86 register sequence), and
* %rdx on 64 bits.
*
* Clang/LLVM cares about the size of the register, but still wants
* the base register for something that ends up being a pair.
*/
#define get_user(x, ptr) \
({ \
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
register void *__sp asm(_ASM_SP); \
__chk_user_ptr(ptr); \
might_fault(); \
asm volatile("call __get_user_%P4" \
: "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
})
 
#define __put_user_x(size, x, ptr, __ret_pu) \
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 
 
 
#ifdef CONFIG_X86_32
#define __put_user_asm_u64(x, addr, err, errret) \
asm volatile("\n" \
"1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
"3:" \
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
" jmp 3b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (err) \
: "A" (x), "r" (addr), "i" (errret), "0" (err))
 
#define __put_user_asm_ex_u64(x, addr) \
asm volatile("\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
"3:" \
_ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE_EX(2b, 3b) \
: : "A" (x), "r" (addr))
 
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
#define __put_user_asm_u64(x, ptr, retval, errret) \
__put_user_asm(x, ptr, retval, "q", "", "er", errret)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "er")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
 
extern void __put_user_bad(void);
 
/*
* Strange magic calling convention: pointer in %ecx,
* value in %eax(:%edx), return value in %eax. clobbers %rbx
*/
extern void __put_user_1(void);
extern void __put_user_2(void);
extern void __put_user_4(void);
extern void __put_user_8(void);
 
/**
* put_user: - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Returns zero on success, or -EFAULT on error.
*/
#define put_user(x, ptr) \
({ \
int __ret_pu; \
__typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
might_fault(); \
__pu_val = x; \
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_x(1, __pu_val, ptr, __ret_pu); \
break; \
case 2: \
__put_user_x(2, __pu_val, ptr, __ret_pu); \
break; \
case 4: \
__put_user_x(4, __pu_val, ptr, __ret_pu); \
break; \
case 8: \
__put_user_x8(__pu_val, ptr, __ret_pu); \
break; \
default: \
__put_user_x(X, __pu_val, ptr, __ret_pu); \
break; \
} \
__builtin_expect(__ret_pu, 0); \
})
 
#define __put_user_size(x, ptr, size, retval, errret) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
break; \
case 2: \
__put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
break; \
case 4: \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
__put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
errret); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
 
/*
* This doesn't do __uaccess_begin/end - the exception handling
* around it must do that.
*/
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
break; \
case 2: \
__put_user_asm_ex(x, ptr, "w", "w", "ir"); \
break; \
case 4: \
__put_user_asm_ex(x, ptr, "l", "k", "ir"); \
break; \
case 8: \
__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
 
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
__get_user_asm_ex(x, ptr, "q", "", "=r")
#endif
 
#define __get_user_size(x, ptr, size, retval, errret) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
break; \
case 2: \
__get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
break; \
case 4: \
__get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
break; \
case 8: \
__get_user_asm_u64(x, ptr, retval, errret); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
 
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
asm volatile("\n" \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
 
/*
* This doesn't do __uaccess_begin/end - the exception handling
* around it must do that.
*/
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \
break; \
case 2: \
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \
break; \
case 4: \
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
 
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr)))
 
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
__uaccess_begin(); \
__put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
__uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})
 
#define __get_user_nocheck(x, ptr, size) \
({ \
int __gu_err; \
unsigned long __gu_val; \
__uaccess_begin(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
__uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
})
 
/* FIXME: this hack is definitely wrong -AK */
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x))
 
/*
* Tell gcc we read from memory instead of writing: this is because
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
asm volatile("\n" \
"1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
 
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: : ltype(x), "m" (__m(addr)))
 
/*
* uaccess_try and catch
*/
#define uaccess_try do { \
current_thread_info()->uaccess_err = 0; \
__uaccess_begin(); \
barrier();
 
#define uaccess_catch(err) \
__uaccess_end(); \
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
} while (0)
 
/**
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
 
#define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 
/**
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
*/
 
#define __put_user(x, ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user
 
/*
* {get|put}_user_try and catch
*
* get_user_try {
* get_user_ex(...);
* } get_user_catch(err)
*/
#define get_user_try uaccess_try
#define get_user_catch(err) uaccess_catch(err)
 
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr)))__gue_val; \
} while (0)
 
#define put_user_try uaccess_try
#define put_user_catch(err) uaccess_catch(err)
 
#define put_user_ex(x, ptr) \
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 
extern unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
 
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
 
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
 
extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
 
#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
({ \
int __ret = 0; \
__typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__uaccess_begin(); \
switch (size) { \
case 1: \
{ \
asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
"2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "q" (__new), "1" (__old) \
: "memory" \
); \
break; \
} \
case 2: \
{ \
asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
"2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
break; \
} \
case 4: \
{ \
asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
"2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
break; \
} \
case 8: \
{ \
if (!IS_ENABLED(CONFIG_X86_64)) \
__cmpxchg_wrong_size(); \
\
asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
"2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
break; \
} \
default: \
__cmpxchg_wrong_size(); \
} \
__uaccess_end(); \
*__uval = __old; \
__ret; \
})
 
#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
({ \
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
__user_atomic_cmpxchg_inatomic((uval), (ptr), \
(old), (new), sizeof(*(ptr))) : \
-EFAULT; \
})
 
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
#ifdef CONFIG_X86_INTEL_USERCOPY
extern struct movsl_mask {
int mask;
} ____cacheline_aligned_in_smp movsl_mask;
#endif
 
#define ARCH_HAS_NOCACHE_UACCESS 1
 
#ifdef CONFIG_X86_32
# include <asm/uaccess_32.h>
#else
# include <asm/uaccess_64.h>
#endif
 
unsigned long __must_check _copy_from_user(void *to, const void __user *from,
unsigned n);
unsigned long __must_check _copy_to_user(void __user *to, const void *from,
unsigned n);
 
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
# define copy_user_diag __compiletime_error
#else
# define copy_user_diag __compiletime_warning
#endif
 
extern void copy_user_diag("copy_from_user() buffer size is too small")
copy_from_user_overflow(void);
extern void copy_user_diag("copy_to_user() buffer size is too small")
copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
 
#undef copy_user_diag
 
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 
extern void
__compiletime_warning("copy_from_user() buffer size is not provably correct")
__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
 
extern void
__compiletime_warning("copy_to_user() buffer size is not provably correct")
__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
 
#else
 
static inline void
__copy_from_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
 
#define __copy_to_user_overflow __copy_from_user_overflow
 
#endif
 
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
 
might_fault();
 
/*
* While we would like to have the compiler do the checking for us
* even in the non-constant size case, any false positives there are
* a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
* without - the [hopefully] dangerous looking nature of the warning
* would make people go look at the respecitive call sites over and
* over again just to find that there's no problem).
*
* And there are cases where it's just not realistic for the compiler
* to prove the count to be in range. For example when multiple call
* sites of a helper function - perhaps in different source files -
* all doing proper range checking, yet the helper function not doing
* so again.
*
* Therefore limit the compile time checking to the constant size
* case, and do only runtime checking for non-constant sizes.
*/
 
if (likely(sz < 0 || sz >= n))
n = _copy_from_user(to, from, n);
else if(__builtin_constant_p(n))
copy_from_user_overflow();
else
__copy_from_user_overflow(sz, n);
 
return n;
}
 
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
 
might_fault();
 
/* See the comment in copy_from_user() above. */
if (likely(sz < 0 || sz >= n))
n = _copy_to_user(to, from, n);
else if(__builtin_constant_p(n))
copy_to_user_overflow();
else
__copy_to_user_overflow(sz, n);
 
return n;
}
 
#undef __copy_from_user_overflow
#undef __copy_to_user_overflow
 
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
*
* Caller must use pagefault_enable/disable, or run in interrupt context,
* and also do a uaccess_ok() check
*/
#define __copy_from_user_nmi __copy_from_user_inatomic
 
/*
* The "unsafe" user accesses aren't really "unsafe", but the naming
* is a big fat warning: you have to not only do the access_ok()
* checking before using them, but you have to surround them with the
* user_access_begin/end() pair.
*/
#define user_access_begin() __uaccess_begin()
#define user_access_end() __uaccess_end()
 
#define unsafe_put_user(x, ptr) \
({ \
int __pu_err; \
__put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
__builtin_expect(__pu_err, 0); \
})
 
#define unsafe_get_user(x, ptr) \
({ \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
})
 
#endif /* _ASM_X86_UACCESS_H */
 
/drivers/include/asm/uaccess_32.h
0,0 → 1,208
#ifndef _ASM_X86_UACCESS_32_H
#define _ASM_X86_UACCESS_32_H
 
/*
* User space memory access functions
*/
#include <linux/errno.h>
#include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm.h>
#include <asm/page.h>
 
unsigned long __must_check __copy_to_user_ll
(void __user *to, const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nozero
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache
(void *to, const void __user *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache_nozero
(void *to, const void __user *from, unsigned long n);
 
/**
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep.
*
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
* we return the initial request size (1, 2 or 4), as copy_*_user should do.
* If a store crosses a page boundary and gets a fault, the x86 will not write
* anything, so this is accurate.
*/
 
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
 
case 8:
*(u64 __force *)to = *(u64 *)from;
return 0;
 
default:
break;
}
}
 
__builtin_memcpy((void __force *)to, from, n);
return 0;
}
 
/**
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
return __copy_to_user_inatomic(to, from, n);
}
 
static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
/* Avoid zeroing the tail if the copy fails..
* If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
* but as the zeroing behaviour is only significant when n is not
* constant, that shouldn't be a problem.
*/
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 *)to = *(u8 __force *)from;
return 0;
case 2:
*(u16 *)to = *(u16 __force *)from;
return 0;
case 4:
*(u32 *)to = *(u32 __force *)from;
return 0;
 
case 8:
*(u64 *)to = *(u64 __force *)from;
return 0;
 
default:
break;
}
}
 
__builtin_memcpy(to, (const void __force *)from, n);
return 0;
}
 
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep if pagefaults are
* enabled.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*
* An alternate version - __copy_from_user_inatomic() - may be called from
* atomic context and will fail rather than sleep. In this case the
* uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
* for explanation of why this is needed.
*/
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 *)to = *(u8 __force *)from;
return 0;
case 2:
*(u16 *)to = *(u16 __force *)from;
return 0;
case 4:
*(u32 *)to = *(u32 __force *)from;
return 0;
 
case 8:
*(u64 *)to = *(u64 __force *)from;
return 0;
 
default:
break;
}
}
 
__builtin_memcpy(to, (const void __force *)from, n);
return 0;
}
 
static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
might_fault();
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 *)to = *(u8 __force *)from;
return 0;
case 2:
*(u16 *)to = *(u16 __force *)from;
return 0;
case 4:
*(u32 *)to = *(u32 __force *)from;
return 0;
default:
break;
}
}
__builtin_memcpy(to, (const void __force *)from, n);
return 0;
}
 
static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
 
#endif /* _ASM_X86_UACCESS_32_H */
/drivers/include/asm-generic/atomic-long.h
98,7 → 98,7
#define atomic_long_xchg(v, new) \
(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
 
static inline void atomic_long_inc(atomic_long_t *l)
static __always_inline void atomic_long_inc(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
105,7 → 105,7
ATOMIC_LONG_PFX(_inc)(v);
}
 
static inline void atomic_long_dec(atomic_long_t *l)
static __always_inline void atomic_long_dec(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
113,7 → 113,7
}
 
#define ATOMIC_LONG_OP(op) \
static inline void \
static __always_inline void \
atomic_long_##op(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
/drivers/include/asm-generic/bug.h
81,6 → 81,12
do { printk(arg); __WARN_TAINT(taint); } while (0)
#endif
 
/* used internally by panic.c */
struct warn_args;
 
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args);
 
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
110,9 → 116,10
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
if (unlikely(__ret_warn_once && !__warned)) { \
__warned = true; \
WARN_ON(1); \
} \
unlikely(__ret_warn_once); \
})
 
120,9 → 127,10
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
if (unlikely(__ret_warn_once && !__warned)) { \
__warned = true; \
WARN(1, format); \
} \
unlikely(__ret_warn_once); \
})
 
130,9 → 138,10
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_TAINT(!__warned, taint, format)) \
if (unlikely(__ret_warn_once && !__warned)) { \
__warned = true; \
WARN_TAINT(1, taint, format); \
} \
unlikely(__ret_warn_once); \
})
 
142,7 → 151,7
#endif
 
#ifndef HAVE_ARCH_BUG_ON
#define BUG_ON(condition) do { if (condition) ; } while (0)
#define BUG_ON(condition) do { if (condition) BUG(); } while (0)
#endif
 
#ifndef HAVE_ARCH_WARN_ON
/drivers/include/asm-generic/fixmap.h
72,10 → 72,10
/* Return a pointer with offset calculated */
#define __set_fixmap_offset(idx, phys, flags) \
({ \
unsigned long addr; \
unsigned long ________addr; \
__set_fixmap(idx, phys, flags); \
addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
addr; \
________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
________addr; \
})
 
#define set_fixmap_offset(idx, phys) \
/drivers/include/asm-generic/pci-dma-compat.h
54,11 → 54,7
{
 
}
#define pci_map_page(dev, page, offset, size, direction) \
(dma_addr_t)( (offset)+page_to_phys(page))
 
#define pci_unmap_page(dev, dma_address, size, direction)
 
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
/drivers/include/drm/i915_powerwell.h
File deleted
/drivers/include/drm/drmP.h
45,8 → 45,6
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/mm.h>
 
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/sched.h>
62,6 → 60,8
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
 
#include <asm/uaccess.h>
 
#include <uapi/drm/drm.h>
#include <uapi/drm/drm_mode.h>
 
190,6 → 190,8
drm_err(fmt, ##__VA_ARGS__); \
})
 
#if DRM_DEBUG_CODE
 
#define DRM_INFO(fmt, ...) \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
 
202,7 → 204,6
* \param fmt printf() like format string.
* \param arg arguments
*/
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, args...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
230,6 → 231,7
} while (0)
 
#else
#define DRM_INFO(fmt, ...) do { } while (0)
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
293,6 → 295,7
struct drm_pending_event {
struct drm_event *event;
struct list_head link;
struct list_head pending_link;
struct drm_file *file_priv;
pid_t pid; /* pid of requester, no guarantee it's valid by the time
we deliver the event, for tracing only */
351,8 → 354,11
struct list_head blobs;
 
wait_queue_head_t event_wait;
struct list_head pending_event_list;
struct list_head event_list;
int event_space;
 
struct mutex event_read_lock;
};
 
/**
801,16 → 807,26
unsigned int cmd, unsigned long arg);
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
 
/* Device support (drm_fops.h) */
extern int drm_open(struct inode *inode, struct file *filp);
extern ssize_t drm_read(struct file *filp, char __user *buffer,
/* File Operations (drm_fops.c) */
int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
int drm_release(struct inode *inode, struct file *filp);
int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
struct drm_event *e);
int drm_event_reserve_init(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
struct drm_event *e);
void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p);
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
 
/* Mapping support (drm_vm.h) */
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
 
/* Misc. IOCTL support (drm_ioctl.c) */
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/drivers/include/drm/drm_atomic_helper.h
146,6 → 146,9
struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t start, uint32_t size);
 
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
/drivers/include/drm/drm_crtc.h
305,12 → 305,20
* @mode_changed: crtc_state->mode or crtc_state->enable has been changed
* @active_changed: crtc_state->active has been toggled.
* @connectors_changed: connectors to this crtc have been updated
* @color_mgmt_changed: color management properties have changed (degamma or
* gamma LUT or CSC matrix)
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
* @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
* @degamma_lut: Lookup table for converting framebuffer pixel data
* before apply the conversion matrix
* @ctm: Transformation matrix
* @gamma_lut: Lookup table for converting pixel data after the
* conversion matrix
* @event: optional pointer to a DRM event to signal upon completion of the
* state update
* @state: backpointer to global drm_atomic_state
332,6 → 340,7
bool mode_changed : 1;
bool active_changed : 1;
bool connectors_changed : 1;
bool color_mgmt_changed : 1;
 
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
341,6 → 350,7
u32 plane_mask;
 
u32 connector_mask;
u32 encoder_mask;
 
/* last_vblank_count: for vblank waits before cleanup */
u32 last_vblank_count;
353,6 → 363,11
/* blob property to expose current mode to atomic userspace */
struct drm_property_blob *mode_blob;
 
/* blob property to expose color management to userspace */
struct drm_property_blob *degamma_lut;
struct drm_property_blob *ctm;
struct drm_property_blob *gamma_lut;
 
struct drm_pending_vblank_event *event;
 
struct drm_atomic_state *state;
755,7 → 770,7
int x, y;
const struct drm_crtc_funcs *funcs;
 
/* CRTC gamma size for reporting to userspace */
/* Legacy FB CRTC gamma size for reporting to userspace */
uint32_t gamma_size;
uint16_t *gamma_store;
 
1582,6 → 1597,8
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
* The disable callback is optional.
*/
void (*disable)(struct drm_bridge *bridge);
 
1598,6 → 1615,8
* The bridge must assume that the display pipe (i.e. clocks and timing
* singals) feeding it is no longer running when this callback is
* called.
*
* The post_disable callback is optional.
*/
void (*post_disable)(struct drm_bridge *bridge);
 
1626,6 → 1645,8
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
* The pre_enable callback is optional.
*/
void (*pre_enable)(struct drm_bridge *bridge);
 
1643,6 → 1664,8
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
* The enable callback is optional.
*/
void (*enable)(struct drm_bridge *bridge);
};
1675,6 → 1698,7
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
1688,6 → 1712,7
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool legacy_set_config : 1;
struct drm_plane **planes;
struct drm_plane_state **plane_states;
struct drm_crtc **crtcs;
2024,6 → 2049,15
* @property_blob_list: list of all the blob property objects
* @blob_lock: mutex for blob property allocation and management
* @*_property: core property tracking
* @degamma_lut_property: LUT used to convert the framebuffer's colors to linear
* gamma
* @degamma_lut_size_property: size of the degamma LUT as supported by the
* driver (read-only)
* @ctm_property: Matrix used to convert colors after the lookup in the
* degamma LUT
* @gamma_lut_property: LUT used to convert the colors, after the CSC matrix, to
* the gamma space of the connected screen (read-only)
* @gamma_lut_size_property: size of the gamma LUT as supported by the driver
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
* @async_page_flip: does this device support async flips on the primary plane?
2126,6 → 2160,13
struct drm_property *aspect_ratio_property;
struct drm_property *dirty_info_property;
 
/* Optional color correction properties */
struct drm_property *degamma_lut_property;
struct drm_property *degamma_lut_size_property;
struct drm_property *ctm_property;
struct drm_property *gamma_lut_property;
struct drm_property *gamma_lut_size_property;
 
/* properties for virtual machine layout */
struct drm_property *suggested_x_property;
struct drm_property *suggested_y_property;
2155,6 → 2196,17
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
 
/**
* drm_for_each_encoder_mask - iterate over encoders specified by bitmask
* @encoder: the loop cursor
* @dev: the DRM device
* @encoder_mask: bitmask of encoder indices
*
* Iterate over all encoders specified by bitmask.
*/
#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
 
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
2231,6 → 2283,7
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
extern unsigned int drm_encoder_index(struct drm_encoder *encoder);
 
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
2288,6 → 2341,8
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern struct edid *drm_edid_duplicate(const struct edid *edid);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_config_init(struct drm_device *dev);
2488,6 → 2543,8
extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
extern int drm_format_plane_width(int width, uint32_t format, int plane);
extern int drm_format_plane_height(int height, uint32_t format, int plane);
extern const char *drm_get_format_name(uint32_t format);
extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
unsigned int supported_rotations);
2536,6 → 2593,21
return mo ? obj_to_property(mo) : NULL;
}
 
/*
* Extract a degamma/gamma LUT value provided by user and round it to the
* precision supported by the hardware.
*/
static inline uint32_t drm_color_lut_extract(uint32_t user_input,
uint32_t bit_precision)
{
uint32_t val = user_input + (1 << (16 - bit_precision - 1));
uint32_t max = 0xffff >> (16 - bit_precision);
 
val >>= 16 - bit_precision;
 
return clamp_val(val, 0, max);
}
 
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
/drivers/include/drm/drm_crtc_helper.h
48,6 → 48,9
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int degamma_lut_size,
int gamma_lut_size);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
/drivers/include/drm/drm_dp_aux_dev.h
0,0 → 1,62
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Rafael Antognolli <rafael.antognolli@intel.com>
*
*/
 
#ifndef DRM_DP_AUX_DEV
#define DRM_DP_AUX_DEV
 
#include <drm/drm_dp_helper.h>
 
#ifdef CONFIG_DRM_DP_AUX_CHARDEV
 
int drm_dp_aux_dev_init(void);
void drm_dp_aux_dev_exit(void);
int drm_dp_aux_register_devnode(struct drm_dp_aux *aux);
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux);
 
#else
 
static inline int drm_dp_aux_dev_init(void)
{
return 0;
}
 
static inline void drm_dp_aux_dev_exit(void)
{
}
 
static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux)
{
return 0;
}
 
static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
}
 
#endif
 
#endif
/drivers/include/drm/drm_dp_dual_mode_helper.h
0,0 → 1,92
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#ifndef DRM_DP_DUAL_MODE_HELPER_H
#define DRM_DP_DUAL_MODE_HELPER_H
 
#include <linux/types.h>
 
/*
* Optional for type 1 DVI adaptors
* Mandatory for type 1 HDMI and type 2 adaptors
*/
#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */
#define DP_DUAL_MODE_HDMI_ID_LEN 16
/*
* Optional for type 1 adaptors
* Mandatory for type 2 adaptors
*/
#define DP_DUAL_MODE_ADAPTOR_ID 0x10
#define DP_DUAL_MODE_REV_MASK 0x07
#define DP_DUAL_MODE_REV_TYPE2 0x00
#define DP_DUAL_MODE_TYPE_MASK 0xf0
#define DP_DUAL_MODE_TYPE_TYPE2 0xa0
#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
#define DP_DUAL_IEEE_OUI_LEN 3
#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
#define DP_DUAL_DEVICE_ID_LEN 6
#define DP_DUAL_MODE_HARDWARE_REV 0x1a
#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b
#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c
#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d
#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e
#define DP_DUAL_MODE_TMDS_OEN 0x20
#define DP_DUAL_MODE_TMDS_DISABLE 0x01
#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21
#define DP_DUAL_MODE_CEC_ENABLE 0x01
#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
 
struct i2c_adapter;
 
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
u8 offset, void *buffer, size_t size);
ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
u8 offset, const void *buffer, size_t size);
 
/**
* enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
* @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
* @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
* @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
*/
enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_NONE,
DRM_DP_DUAL_MODE_UNKNOWN,
DRM_DP_DUAL_MODE_TYPE1_DVI,
DRM_DP_DUAL_MODE_TYPE1_HDMI,
DRM_DP_DUAL_MODE_TYPE2_DVI,
DRM_DP_DUAL_MODE_TYPE2_HDMI,
};
 
enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter);
int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool *enabled);
int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
 
#endif
/drivers/include/drm/drm_edid.h
403,6 → 403,18
return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
}
 
/**
* drm_eld_get_conn_type - Get device type hdmi/dp connected
* @eld: pointer to an ELD memory structure
*
* The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
* identify the display type connected.
*/
static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
{
return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
}
 
struct edid *drm_do_get_edid(struct drm_connector *connector,
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
size_t len),
/drivers/include/drm/drm_fb_helper.h
219,6 → 219,7
};
 
#ifdef CONFIG_DRM_FBDEV_EMULATION
int drm_fb_helper_modinit(void);
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
283,6 → 284,11
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
#else
static inline int drm_fb_helper_modinit(void)
{
return 0;
}
 
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
/drivers/include/drm/drm_mipi_dsi.h
96,14 → 96,17
* struct mipi_dsi_host - DSI host device
* @dev: driver model device node for this DSI host
* @ops: DSI host operations
* @list: list management
*/
struct mipi_dsi_host {
struct device *dev;
const struct mipi_dsi_host_ops *ops;
struct list_head list;
};
 
int mipi_dsi_host_register(struct mipi_dsi_host *host);
void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
 
/* DSI mode flags */
 
139,10 → 142,28
MIPI_DSI_FMT_RGB565,
};
 
#define DSI_DEV_NAME_SIZE 20
 
/**
* struct mipi_dsi_device_info - template for creating a mipi_dsi_device
* @type: DSI peripheral chip type
* @channel: DSI virtual channel assigned to peripheral
* @node: pointer to OF device node or NULL
*
* This is populated and passed to mipi_dsi_device_new to create a new
* DSI device
*/
struct mipi_dsi_device_info {
char type[DSI_DEV_NAME_SIZE];
u32 channel;
struct device_node *node;
};
 
/**
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
* @name: DSI peripheral chip type
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
* @lanes: number of active data lanes
152,6 → 173,7
struct mipi_dsi_host *host;
struct device dev;
 
char name[DSI_DEV_NAME_SIZE];
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
188,6 → 210,10
return -EINVAL;
}
 
struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
/drivers/include/drm/drm_modeset_helper_vtables.h
439,7 → 439,7
* can be modified by this callback and does not need to match mode.
*
* This function is used by both legacy CRTC helpers and atomic helpers.
* With atomic helpers it is optional.
* This hook is optional.
*
* NOTE:
*
/drivers/include/drm/i915_pciids.h
277,7 → 277,9
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
 
#define INTEL_SKL_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
 
296,7 → 298,9
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
INTEL_VGA_DEVICE(0x5A84, info)
INTEL_VGA_DEVICE(0x1A85, info), \
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
 
#define INTEL_KBL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
/drivers/include/drm/ttm/ttm_bo_api.h
316,7 → 316,21
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
 
/**
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
*
* @placement: Return immediately if buffer is busy.
* @mem: The struct ttm_mem_reg indicating the region where the bo resides
* @new_flags: Describes compatible placement found
*
* Returns true if the placement is compatible
*/
extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem,
uint32_t *new_flags);
 
/**
* ttm_bo_validate
*
* @bo: The buffer object.
/drivers/include/linux/atomic.h
34,7 → 34,12
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
* Besides, if an arch has a special barrier for acquire/release, it could
* implement its own __atomic_op_* and use the same framework for building
* variants
*/
#ifndef __atomic_op_acquire
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
41,13 → 46,17
smp_mb__after_atomic(); \
__ret; \
})
#endif
 
#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
#endif
 
#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
56,6 → 65,7
smp_mb__after_atomic(); \
__ret; \
})
#endif
 
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
548,6 → 558,27
}
#endif
 
/**
* atomic_fetch_or - perform *p |= mask and return old value of *p
* @p: pointer to atomic_t
* @mask: mask to OR on the atomic_t
*/
#ifndef atomic_fetch_or
static inline int atomic_fetch_or(atomic_t *p, int mask)
{
int old, val = atomic_read(p);
 
for (;;) {
old = atomic_cmpxchg(p, val, val | mask);
if (old == val)
break;
val = old;
}
 
return old;
}
#endif
 
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
/drivers/include/linux/bitmap.h
59,6 → 59,8
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
* bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
*/
 
/*
163,6 → 165,14
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
unsigned int nbits,
const u32 *buf,
unsigned int nwords);
extern unsigned int bitmap_to_u32array(u32 *buf,
unsigned int nwords,
const unsigned long *bitmap,
unsigned int nbits);
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
/drivers/include/linux/bug.h
20,6 → 20,7
#define BUILD_BUG_ON_MSG(cond, msg) (0)
#define BUILD_BUG_ON(condition) (0)
#define BUILD_BUG() (0)
#define MAYBE_BUILD_BUG_ON(cond) (0)
#else /* __CHECKER__ */
 
/* Force a compilation error if a constant expression is not a power of 2 */
83,6 → 84,14
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
 
#define MAYBE_BUILD_BUG_ON(cond) \
do { \
if (__builtin_constant_p((cond))) \
BUILD_BUG_ON(cond); \
else \
BUG_ON(cond); \
} while (0)
 
#endif /* __CHECKER__ */
 
#ifdef CONFIG_GENERIC_BUG
/drivers/include/linux/cache.h
12,10 → 12,24
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
 
/*
* __read_mostly is used to keep rarely changing variables out of frequently
* updated cachelines. If an architecture doesn't support it, ignore the
* hint.
*/
#ifndef __read_mostly
#define __read_mostly
#endif
 
/*
* __ro_after_init is used to mark things that are read-only after init (i.e.
* after mark_rodata_ro() has been called). These are effectively read-only,
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifndef __ro_after_init
#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
#endif
 
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
/drivers/include/linux/clocksource.h
118,6 → 118,23
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
 
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
/* freq = cyc/from
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = from/freq * 2^shift
* mult = from * 2^shift / freq
* mult = (from<<shift) / freq
*/
u64 tmp = ((u64)from) << shift_constant;
 
tmp += freq/2; /* round for do_div */
do_div(tmp, freq);
 
return (u32)tmp;
}
 
/**
* clocksource_khz2mult - calculates mult from khz and shift
* @khz: Clocksource frequency in KHz
128,19 → 145,7
*/
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
{
/* khz = cyc/(Million ns)
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = 1Million/khz * 2^shift
* mult = 1000000 * 2^shift / khz
* mult = (1000000<<shift) / khz
*/
u64 tmp = ((u64)1000000) << shift_constant;
 
tmp += khz/2; /* round for do_div */
do_div(tmp, khz);
 
return (u32)tmp;
return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
}
 
/**
154,19 → 159,7
*/
static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
{
/* hz = cyc/(Billion ns)
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = 1Billion/hz * 2^shift
* mult = 1000000000 * 2^shift / hz
* mult = (1000000000<<shift) / hz
*/
u64 tmp = ((u64)1000000000) << shift_constant;
 
tmp += hz/2; /* round for do_div */
do_div(tmp, hz);
 
return (u32)tmp;
return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
}
 
/**
/drivers/include/linux/compiler-gcc.h
246,7 → 246,7
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
/drivers/include/linux/compiler.h
20,12 → 20,14
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
#else /* CONFIG_SPARSE_RCU_POINTER */
# define __rcu
#endif
#endif /* CONFIG_SPARSE_RCU_POINTER */
# define __private __attribute__((noderef))
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else /* __CHECKER__ */
# define __user
# define __kernel
# define __safe
44,7 → 46,9
# define __percpu
# define __rcu
# define __pmem
#endif
# define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
#endif /* __CHECKER__ */
 
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
263,8 → 267,9
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
* least two memcpy()s: one for the __builtin_memcpy() and then one for
* the macro doing the copy of variable - '__u' allocated on the stack.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
/drivers/include/linux/cpumask.h
607,8 → 607,6
 
/**
* cpumask_size - size to allocate for a 'struct cpumask' in bytes
*
* This will eventually be a runtime variable, depending on nr_cpu_ids.
*/
static inline size_t cpumask_size(void)
{
/drivers/include/linux/device.h
122,6 → 122,9
dev->driver_data = data;
}
 
static inline __printf(2, 3)
void dev_notice(const struct device *dev, const char *fmt, ...)
{}
 
 
#endif /* _DEVICE_H_ */
/drivers/include/linux/dma-attrs.h
18,6 → 18,7
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_ALLOC_SINGLE_PAGES,
DMA_ATTR_MAX,
};
 
/drivers/include/linux/dma-buf.h
54,7 → 54,7
* @release: release this buffer; to be called after the last dma_buf_put.
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
* caches and allocate backing storage (if not yet done)
* respectively pin the objet into memory.
* respectively pin the object into memory.
* @end_cpu_access: [optional] called after cpu access to flush caches.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
93,10 → 93,8
/* after final dma_buf_put() */
void (*release)(struct dma_buf *);
 
int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
192,10 → 190,6
* kernel side. For example, an exporter that needs to keep a dmabuf ptr
* so that subsequent exports don't create a new dmabuf.
*/
static inline void get_dma_buf(struct dma_buf *dmabuf)
{
get_file(dmabuf->file);
}
 
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
212,9 → 206,9
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
/drivers/include/linux/fb.h
296,9 → 296,6
/* Draws cursor */
int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor);
 
/* Rotates the display */
void (*fb_rotate)(struct fb_info *info, int angle);
 
/* wait for blit idle, optional */
int (*fb_sync)(struct fb_info *info);
 
/drivers/include/linux/fence.h
79,6 → 79,8
unsigned long flags;
ktime_t timestamp;
int status;
struct list_head child_list;
struct list_head active_list;
};
 
enum fence_flag_bits {
292,7 → 294,7
if (WARN_ON(f1->context != f2->context))
return false;
 
return f1->seqno - f2->seqno < INT_MAX;
return (int)(f1->seqno - f2->seqno) > 0;
}
 
/**
/drivers/include/linux/file.h
12,6 → 12,11
struct file;
 
extern void fput(struct file *);
 
struct file_operations;
struct vfsmount;
struct dentry;
struct path;
struct fd {
struct file *file;
unsigned int flags;
/drivers/include/linux/firmware.h
13,6 → 13,10
struct firmware {
size_t size;
const u8 *data;
struct page **pages;
 
/* firmware loader private fields */
void *priv;
};
 
struct module;
/drivers/include/linux/gfp.h
8,6 → 8,11
 
struct vm_area_struct;
 
/*
* In case of changes, please don't forget to update
* include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
*/
 
/* Plain integer GFP bitmasks. Do not use this directly. */
#define ___GFP_DMA 0x01u
#define ___GFP_HIGHMEM 0x02u
47,7 → 52,6
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 
100,8 → 104,6
*
* __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the __GFP_MEMALLOC flag if both are set.
*
* __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
254,8 → 256,9
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
~__GFP_KSWAPD_RECLAIM)
~__GFP_RECLAIM)
 
 
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
309,7 → 312,7
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
*
* ZONES_SHIFT must be <= 2 on 32 bit platforms.
* GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
*/
 
#if 16 * ZONES_SHIFT > BITS_PER_LONG
/drivers/include/linux/intel-iommu.h
0,0 → 1,506
/*
* Copyright © 2006-2015, Intel Corporation.
*
* Authors: Ashok Raj <ashok.raj@intel.com>
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* David Woodhouse <David.Woodhouse@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
 
#ifndef _INTEL_IOMMU_H_
#define _INTEL_IOMMU_H_
 
#include <linux/types.h>
//#include <linux/iova.h>
#include <linux/io.h>
#include <linux/idr.h>
#include <linux/dma_remapping.h>
//#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <asm/cacheflush.h>
//#include <asm/iommu.h>
 
/*
* Intel IOMMU register specification per version 1.0 public spec.
*/
 
#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
#define DMAR_GCMD_REG 0x18 /* Global command register */
#define DMAR_GSTS_REG 0x1c /* Global status register */
#define DMAR_RTADDR_REG 0x20 /* Root entry table */
#define DMAR_CCMD_REG 0x28 /* Context command reg */
#define DMAR_FSTS_REG 0x34 /* Fault Status register */
#define DMAR_FECTL_REG 0x38 /* Fault control register */
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
#define DMAR_PRS_REG 0xdc /* Page request status register */
#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
 
#define OFFSET_STRIDE (9)
 
#ifdef CONFIG_64BIT
#define dmar_readq(a) readq(a)
#define dmar_writeq(a,v) writeq(v,a)
#else
static inline u64 dmar_readq(void __iomem *addr)
{
u32 lo, hi;
lo = readl(addr);
hi = readl(addr + 4);
return (((u64) hi) << 32) + lo;
}
 
static inline void dmar_writeq(void __iomem *addr, u64 val)
{
writel((u32)val, addr);
writel((u32)(val >> 32), addr + 4);
}
#endif
 
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
 
/*
* Decoding Capability Register
*/
#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
#define cap_pgsel_inv(c) (((c) >> 39) & 1)
 
#define cap_super_page_val(c) (((c) >> 34) & 0xf)
#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
* OFFSET_STRIDE) + 21)
 
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
#define cap_max_fault_reg_offset(c) \
(cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
 
#define cap_zlr(c) (((c) >> 22) & 1)
#define cap_isoch(c) (((c) >> 23) & 1)
#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
#define cap_sagaw(c) (((c) >> 8) & 0x1f)
#define cap_caching_mode(c) (((c) >> 7) & 1)
#define cap_phmr(c) (((c) >> 6) & 1)
#define cap_plmr(c) (((c) >> 5) & 1)
#define cap_rwbf(c) (((c) >> 4) & 1)
#define cap_afl(c) (((c) >> 3) & 1)
#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
/*
* Extended Capability Register
*/
 
#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
#define ecap_nwfs(e) ((e >> 33) & 0x1)
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
#define ecap_ecs(e) ((e >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
#define ecap_pass_through(e) ((e >> 6) & 0x1)
#define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1)
#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
 
/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
#define DMA_TLB_IVT (((u64)1) << 63)
#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
#define DMA_TLB_MAX_SIZE (0x3f)
 
/* INVALID_DESC */
#define DMA_CCMD_INVL_GRANU_OFFSET 61
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
#define DMA_ID_TLB_ADDR(addr) (addr)
#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
 
/* PMEN_REG */
#define DMA_PMEN_EPM (((u32)1)<<31)
#define DMA_PMEN_PRS (((u32)1)<<0)
 
/* GCMD_REG */
#define DMA_GCMD_TE (((u32)1) << 31)
#define DMA_GCMD_SRTP (((u32)1) << 30)
#define DMA_GCMD_SFL (((u32)1) << 29)
#define DMA_GCMD_EAFL (((u32)1) << 28)
#define DMA_GCMD_WBF (((u32)1) << 27)
#define DMA_GCMD_QIE (((u32)1) << 26)
#define DMA_GCMD_SIRTP (((u32)1) << 24)
#define DMA_GCMD_IRE (((u32) 1) << 25)
#define DMA_GCMD_CFI (((u32) 1) << 23)
 
/* GSTS_REG */
#define DMA_GSTS_TES (((u32)1) << 31)
#define DMA_GSTS_RTPS (((u32)1) << 30)
#define DMA_GSTS_FLS (((u32)1) << 29)
#define DMA_GSTS_AFLS (((u32)1) << 28)
#define DMA_GSTS_WBFS (((u32)1) << 27)
#define DMA_GSTS_QIES (((u32)1) << 26)
#define DMA_GSTS_IRTPS (((u32)1) << 24)
#define DMA_GSTS_IRES (((u32)1) << 25)
#define DMA_GSTS_CFIS (((u32)1) << 23)
 
/* DMA_RTADDR_REG */
#define DMA_RTADDR_RTT (((u64)1) << 11)
 
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
#define DMA_CCMD_MASK_NOBIT 0
#define DMA_CCMD_MASK_1BIT 1
#define DMA_CCMD_MASK_2BIT 2
#define DMA_CCMD_MASK_3BIT 3
#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
 
/* FECTL_REG */
#define DMA_FECTL_IM (((u32)1) << 31)
 
/* FSTS_REG */
#define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1)
#define DMA_FSTS_IQE (1 << 4)
#define DMA_FSTS_ICE (1 << 5)
#define DMA_FSTS_ITE (1 << 6)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
 
/* FRCD_REG, 32 bits access */
#define DMA_FRCD_F (((u32)1) << 31)
#define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff)
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
 
/* PRS_REG */
#define DMA_PRS_PPR ((u32)1)
 
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
while (1) { \
sts = op(iommu->reg + offset); \
if (cond) \
break; \
if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
panic("DMAR hardware is malfunctioning\n"); \
cpu_relax(); \
} \
} while (0)
 
#define QI_LENGTH 256 /* queue length */
 
enum {
QI_FREE,
QI_IN_USE,
QI_DONE,
QI_ABORT
};
 
#define QI_CC_TYPE 0x1
#define QI_IOTLB_TYPE 0x2
#define QI_DIOTLB_TYPE 0x3
#define QI_IEC_TYPE 0x4
#define QI_IWD_TYPE 0x5
#define QI_EIOTLB_TYPE 0x6
#define QI_PC_TYPE 0x7
#define QI_DEIOTLB_TYPE 0x8
#define QI_PGRP_RESP_TYPE 0x9
#define QI_PSTRM_RESP_TYPE 0xa
 
#define QI_IEC_SELECTIVE (((u64)1) << 4)
#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
 
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
 
#define QI_IOTLB_DID(did) (((u64)did) << 16)
#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
#define QI_IOTLB_AM(am) (((u8)am))
 
#define QI_CC_FM(fm) (((u64)fm) << 48)
#define QI_CC_SID(sid) (((u64)sid) << 32)
#define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
 
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
 
#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
#define QI_PC_DID(did) (((u64)did) << 16)
#define QI_PC_GRAN(gran) (((u64)gran) << 4)
 
#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
 
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
#define QI_EIOTLB_AM(am) (((u64)am))
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
#define QI_EIOTLB_DID(did) (((u64)did) << 16)
#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
 
#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
#define QI_DEV_EIOTLB_MAX_INVS 32
 
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32)
#define QI_PGRP_RESP_CODE(res) ((u64)(res))
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
#define QI_PGRP_DID(did) (((u64)(did)) << 16)
#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
 
#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK)
#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4)
#define QI_PSTRM_RESP_CODE(res) ((u64)(res))
#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55)
#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32)
#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24)
#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4)
 
#define QI_RESP_SUCCESS 0x0
#define QI_RESP_INVALID 0x1
#define QI_RESP_FAILURE 0xf
 
#define QI_GRAN_ALL_ALL 0
#define QI_GRAN_NONG_ALL 1
#define QI_GRAN_NONG_PASID 2
#define QI_GRAN_PSI_PASID 3
 
struct qi_desc {
u64 low, high;
};
 
struct q_inval {
raw_spinlock_t q_lock;
struct qi_desc *desc; /* invalidation queue */
int *desc_status; /* desc status */
int free_head; /* first free entry */
int free_tail; /* last free entry */
int free_cnt;
};
 
#ifdef CONFIG_IRQ_REMAP
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
 
#define INTR_REMAP_TABLE_ENTRIES 65536
 
struct irq_domain;
 
struct ir_table {
struct irte *base;
unsigned long *bitmap;
};
#endif
 
struct iommu_flush {
void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
};
 
enum {
SR_DMAR_FECTL_REG,
SR_DMAR_FEDATA_REG,
SR_DMAR_FEADDR_REG,
SR_DMAR_FEUADDR_REG,
MAX_SR_DMAR_REGS
};
 
#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
 
struct pasid_entry;
struct pasid_state_entry;
struct page_req_dsc;
 
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
u64 reg_size; /* size of hw register set */
u64 cap;
u64 ecap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
unsigned int irq, pr_irq;
u16 segment; /* PCI segment# */
unsigned char name[13]; /* Device Name */
 
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
 
struct iommu_flush flush;
#endif
#ifdef CONFIG_INTEL_IOMMU_SVM
/* These are large and need to be contiguous, so we allocate just
* one for now. We'll maybe want to rethink that if we truly give
* devices away to userspace processes (e.g. for DPDK) and don't
* want to trust that userspace will use *only* the PASID it was
* told to. But while it's all driver-arbitrated, we're fine. */
struct pasid_entry *pasid_table;
struct pasid_state_entry *pasid_state_table;
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
struct idr pasid_idr;
#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
 
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
struct irq_domain *ir_domain;
struct irq_domain *ir_msi_domain;
#endif
struct device *iommu_dev; /* IOMMU-sysfs device */
int node;
u32 flags; /* Software defined flags */
};
 
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(addr, size);
}
 
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
 
extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void dmar_disable_qi(struct intel_iommu *iommu);
extern int dmar_reenable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
 
extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
u64 addr, unsigned mask);
 
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
extern int dmar_ir_support(void);
 
#ifdef CONFIG_INTEL_IOMMU_SVM
extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
 
struct svm_dev_ops;
 
struct intel_svm_dev {
struct list_head list;
struct rcu_head rcu;
struct device *dev;
struct svm_dev_ops *ops;
int users;
u16 did;
u16 dev_iotlb:1;
u16 sid, qdep;
};
 
struct intel_svm {
struct mmu_notifier notifier;
struct mm_struct *mm;
struct intel_iommu *iommu;
int flags;
int pasid;
struct list_head devs;
};
 
extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#endif
 
extern const struct attribute_group *intel_iommu_groups[];
 
#endif
/drivers/include/linux/io-mapping.h
0,0 → 1,171
/*
* Copyright © 2008 Keith Packard <keithp@keithp.com>
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
 
#ifndef _LINUX_IO_MAPPING_H
#define _LINUX_IO_MAPPING_H
 
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <asm/page.h>
 
/*
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
* See Documentation/io-mapping.txt
*/
 
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
 
#include <asm/iomap.h>
 
struct io_mapping {
void *vaddr;
resource_size_t base;
unsigned long size;
};
 
/*
* For small address space machines, mapping large objects
* into the kernel virtual space isn't practical. Where
* available, use fixmap support to dynamically map pages
* of the object at run time.
*/
 
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base, unsigned long size)
{
struct io_mapping *iomap;
 
iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
if (!iomap)
goto out_err;
 
iomap->vaddr = AllocKernelSpace(4096);
if (iomap->vaddr == NULL)
goto out_free;
 
iomap->base = base;
iomap->size = size;
 
return iomap;
 
out_free:
kfree(iomap);
out_err:
return NULL;
}
 
static inline void
io_mapping_free(struct io_mapping *mapping)
{
FreeKernelSpace(mapping->vaddr);
kfree(mapping);
}
 
/* Atomic map/unmap */
static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
addr_t phys_addr;
 
BUG_ON(offset >= mapping->size);
phys_addr = (mapping->base + offset) & PAGE_MASK;
 
MapPage(mapping->vaddr, phys_addr, PG_WRITEC|PG_SW);
return mapping->vaddr;
}
 
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
MapPage(vaddr, 0, 0);
}
 
static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
addr_t phys_addr;
 
BUG_ON(offset >= mapping->size);
phys_addr = (mapping->base + offset) & PAGE_MASK;
 
MapPage(mapping->vaddr, phys_addr, PG_WRITEC|PG_SW);
return mapping->vaddr;
}
 
static inline void
io_mapping_unmap(void __iomem *vaddr)
{
MapPage(vaddr, 0, 0);
}
 
#else
 
#include <linux/uaccess.h>
 
/* this struct isn't actually defined anywhere */
struct io_mapping;
 
/* Create the io_mapping object*/
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base, unsigned long size)
{
return (struct io_mapping __force *) ioremap_wc(base, size);
}
 
static inline void
io_mapping_free(struct io_mapping *mapping)
{
iounmap((void __force __iomem *) mapping);
}
 
/* Atomic map/unmap */
static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
preempt_disable();
pagefault_disable();
return ((char __force __iomem *) mapping) + offset;
}
 
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
pagefault_enable();
preempt_enable();
}
 
/* Non-atomic map/unmap */
static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
return ((char __force __iomem *) mapping) + offset;
}
 
static inline void
io_mapping_unmap(void __iomem *vaddr)
{
}
 
#endif /* HAVE_ATOMIC_IOMAP */
 
#endif /* _LINUX_IO_MAPPING_H */
/drivers/include/linux/ioport.h
20,6 → 20,7
resource_size_t end;
const char *name;
unsigned long flags;
unsigned long desc;
struct resource *parent, *sibling, *child;
};
 
49,12 → 50,19
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
 
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
 
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
 
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
 
/* I/O resource extended types */
#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
 
/* PnP IRQ specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
98,13 → 106,27
 
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
#define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */
 
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
/*
* I/O Resource Descriptors
*
* Descriptors are used by walk_iomem_res_desc() and region_intersects()
* for searching a specific resource range in the iomem table. Assign
* a new descriptor when a resource range supports the search interfaces.
* Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
*/
enum {
IORES_DESC_NONE = 0,
IORES_DESC_CRASH_KERNEL = 1,
IORES_DESC_ACPI_TABLES = 2,
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
};
 
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
113,6 → 135,7
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
.desc = IORES_DESC_NONE, \
}
 
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
149,6 → 172,7
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int remove_resource(struct resource *old);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
170,6 → 194,10
{
return res->flags & IORESOURCE_TYPE_BITS;
}
static inline unsigned long resource_ext_type(const struct resource *res)
{
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2)
{
/drivers/include/linux/kernel.h
63,7 → 63,7
#define round_down(x, y) ((x) & ~__round_mask(x, y))
 
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_UP_ULL(ll,d) \
({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
 
788,64 → 788,6
})
 
 
static inline __must_check long __copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
default:
break;
}
}
 
__builtin_memcpy((void __force *)to, from, n);
return 0;
}
 
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;
 
switch (n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
default:
break;
}
}
__builtin_memcpy((void __force *)to, from, n);
}
 
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
 
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
return __copy_to_user(to, from, n);
}
 
#define CAP_SYS_ADMIN 21
 
static inline bool capable(int cap)
861,14 → 803,8
 
typedef u64 async_cookie_t;
 
//#define iowrite32(v, addr) writel((v), (addr))
 
#define __init
 
#define CONFIG_PAGE_OFFSET 0
 
typedef long long __kernel_long_t;
typedef unsigned long long __kernel_ulong_t;
#define __kernel_long_t __kernel_long_t
 
#endif
/drivers/include/linux/kernfs.h
0,0 → 1,140
/*
* kernfs.h - pseudo filesystem decoupled from vfs locking
*
* This file is released under the GPLv2.
*/
 
#ifndef __LINUX_KERNFS_H
#define __LINUX_KERNFS_H
 
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/wait.h>
 
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
struct super_block;
struct file_system_type;
 
struct kernfs_open_node;
struct kernfs_iattrs;
 
enum kernfs_node_type {
KERNFS_DIR = 0x0001,
KERNFS_FILE = 0x0002,
KERNFS_LINK = 0x0004,
};
 
#define KERNFS_TYPE_MASK 0x000f
#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
 
enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
KERNFS_NS = 0x0020,
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
};
 
/* @flags for kernfs_create_root() */
enum kernfs_root_flag {
/*
* kernfs_nodes are created in the deactivated state and invisible.
* They require explicit kernfs_activate() to become visible. This
* can be used to make related nodes become visible atomically
* after all nodes are created successfully.
*/
KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
 
/*
* For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
* succeeds regardless of the RW permissions. sysfs had an extra
* layer of enforcement where open(2) fails with -EACCES regardless
* of CAP_DAC_OVERRIDE if the permission doesn't have the
* respective read or write access at all (none of S_IRUGO or
* S_IWUGO) or the respective operation isn't implemented. The
* following flag enables that behavior.
*/
KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,
};
 
/* type-specific structures for kernfs_node union members */
struct kernfs_elem_dir {
unsigned long subdirs;
/* children rbtree starts here and goes through kn->rb */
struct rb_root children;
 
/*
* The kernfs hierarchy this directory belongs to. This fits
* better directly in kernfs_node but is here to save space.
*/
struct kernfs_root *root;
};
 
struct kernfs_elem_symlink {
struct kernfs_node *target_kn;
};
 
struct kernfs_elem_attr {
const struct kernfs_ops *ops;
struct kernfs_open_node *open;
loff_t size;
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
 
/*
* kernfs_node - the building block of kernfs hierarchy. Each and every
* kernfs node is represented by single kernfs_node. Most fields are
* private to kernfs and shouldn't be accessed directly by kernfs users.
*
* As long as s_count reference is held, the kernfs_node itself is
* accessible. Dereferencing elem or any other outer entity requires
* active reference.
*/
struct kernfs_node {
atomic_t count;
atomic_t active;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
/*
* Use kernfs_get_parent() and kernfs_name/path() instead of
* accessing the following two fields directly. If the node is
* never moved to a different parent, it is safe to access the
* parent directly.
*/
struct kernfs_node *parent;
const char *name;
 
struct rb_node rb;
 
const void *ns; /* namespace tag */
unsigned int hash; /* ns + name hash */
union {
struct kernfs_elem_dir dir;
struct kernfs_elem_symlink symlink;
struct kernfs_elem_attr attr;
};
 
void *priv;
 
unsigned short flags;
umode_t mode;
unsigned int ino;
struct kernfs_iattrs *iattr;
};
 
 
#endif /* __LINUX_KERNFS_H */
/drivers/include/linux/lockdep.h
196,9 → 196,11
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
u8 irq_context;
u8 depth;
u16 base;
/* see BUILD_BUG_ON()s in lookup_chain_cache() */
unsigned int irq_context : 2,
depth : 6,
base : 24;
/* 4 byte hole */
struct hlist_node entry;
u64 chain_key;
};
261,7 → 263,6
/*
* Initialization, self-test and debugging-output methods:
*/
extern void lockdep_init(void);
extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
392,7 → 393,6
# define lockdep_set_current_reclaim_state(g) do { } while (0)
# define lockdep_clear_current_reclaim_state() do { } while (0)
# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
/drivers/include/linux/mmdebug.h
9,8 → 9,7
struct mm_struct;
 
extern void dump_page(struct page *page, const char *reason);
extern void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags);
extern void __dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
 
/drivers/include/linux/pci-dma-compat.h
0,0 → 1,131
/* include this file if the platform implements the dma_ DMA Mapping API
* and wants to provide the pci_ DMA Mapping API in terms of it */
 
#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
#define _ASM_GENERIC_PCI_DMA_COMPAT_H
 
#include <linux/dma-mapping.h>
 
/* This defines the direction arg to the DMA mapping routines. */
#define PCI_DMA_BIDIRECTIONAL 0
#define PCI_DMA_TODEVICE 1
#define PCI_DMA_FROMDEVICE 2
#define PCI_DMA_NONE 3
 
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
 
static inline void *
pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
size, dma_handle, GFP_ATOMIC);
}
 
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
}
 
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
}
 
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
}
 
static inline dma_addr_t
pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
return (dma_addr_t)( (offset)+page_to_phys(page));
}
 
static inline void
pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
 
}
 
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
 
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
 
static inline void
pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
 
static inline void
pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
 
static inline void
pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
 
static inline void
pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
 
static inline int
pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
{
return dma_mapping_error(&pdev->dev, dma_addr);
}
 
#ifdef CONFIG_PCI
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{
return 0;
}
 
#else
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
unsigned int size)
{ return -EIO; }
static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
unsigned long mask)
{ return -EIO; }
#endif
 
#endif
/drivers/include/linux/pci.h
742,9 → 742,26
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
 
enum {
PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
};
 
/* these external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
 
extern unsigned int pci_flags;
 
static inline void pci_set_flags(int flags) { pci_flags = flags; }
static inline void pci_add_flags(int flags) { pci_flags |= flags; }
static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
static inline int pci_has_flag(int flag) { return pci_flags & flag; }
 
void pcie_bus_configure_settings(struct pci_bus *bus);
 
enum pcie_bus_config_types {
766,6 → 783,7
int no_pci_devices(void);
 
void pcibios_resource_survey_bus(struct pci_bus *bus);
void pcibios_bus_add_device(struct pci_dev *pdev);
void pcibios_add_bus(struct pci_bus *bus);
void pcibios_remove_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
1006,8 → 1024,6
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
int pci_wait_for_pending_transaction(struct pci_dev *dev);
int pcix_get_max_mmrbc(struct pci_dev *dev);
1100,6 → 1116,7
/* Vital product data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1231,6 → 1248,7
 
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
 
/* kmem_cache style wrapper around pci_alloc_consistent() */
 
#include <linux/pci-dma.h>
1398,6 → 1416,11
 
#else /* CONFIG_PCI is not enabled */
 
static inline void pci_set_flags(int flags) { }
static inline void pci_add_flags(int flags) { }
static inline void pci_clear_flags(int flags) { }
static inline int pci_has_flag(int flag) { return 0; }
 
/*
* If the system does not have PCI, clearly these return errors. Define
* these as simple inline functions to avoid hair in drivers.
1437,16 → 1460,6
static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
unsigned int size)
{ return -EIO; }
static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
unsigned long mask)
{ return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
static inline int __pci_register_driver(struct pci_driver *drv,
1508,6 → 1521,10
 
#include <asm/pci.h>
 
#ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL
#endif
 
/* these helpers provide future and backwards compatibility
* for accessing popular PCI BAR info */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1731,6 → 1748,8
 
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
1747,6 → 1766,12
}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
{
return -ENOSYS;
}
static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
int id, int reset) { }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
static inline int pci_vfs_assigned(struct pci_dev *dev)
1827,12 → 1852,13
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
 
/* Small Resource Data Type Tag Item Names */
#define PCI_VPD_STIN_END 0x78 /* End */
#define PCI_VPD_STIN_END 0x0f /* End */
 
#define PCI_VPD_SRDT_END PCI_VPD_STIN_END
#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
 
#define PCI_VPD_SRDT_TIN_MASK 0x78
#define PCI_VPD_SRDT_LEN_MASK 0x07
#define PCI_VPD_LRDT_TIN_MASK 0x7f
 
#define PCI_VPD_LRDT_TAG_SIZE 3
#define PCI_VPD_SRDT_TAG_SIZE 1
1856,6 → 1882,17
}
 
/**
* pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
* @lrdt: Pointer to the beginning of the Large Resource Data Type tag
*
* Returns the extracted Large Resource Data Type Tag item.
*/
static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
{
return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
}
 
/**
* pci_vpd_srdt_size - Extracts the Small Resource Data Type length
* @lrdt: Pointer to the beginning of the Small Resource Data Type tag
*
1867,6 → 1904,17
}
 
/**
* pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
* @lrdt: Pointer to the beginning of the Small Resource Data Type tag
*
* Returns the extracted Small Resource Data Type Tag Item.
*/
static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
{
return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
}
 
/**
* pci_vpd_info_field_size - Extracts the information field length
* @lrdt: Pointer to the beginning of an information field header
*
1983,6 → 2031,9
return bus->self && bus->self->ari_enabled;
}
 
/* provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
 
typedef struct
{
struct list_head link;
/drivers/include/linux/pci_ids.h
110,6 → 110,7
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
 
2506,6 → 2507,10
 
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
 
#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBDEVICE_ID_QEMU 0x1100
 
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
 
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
/drivers/include/linux/poison.h
30,7 → 30,11
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
 
/********** mm/debug-pagealloc.c **********/
#ifdef CONFIG_PAGE_POISONING_ZERO
#define PAGE_POISON 0x00
#else
#define PAGE_POISON 0xaa
#endif
 
/********** mm/page_alloc.c ************/
 
/drivers/include/linux/printk.h
242,10 → 242,10
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
/drivers/include/linux/pwm.h
6,6 → 6,7
//#include <linux/of.h>
 
struct device;
struct device_node;
struct pwm_device;
struct seq_file;
 
223,6 → 224,11
return ERR_PTR(-ENODEV);
}
 
static inline struct pwm_device *of_pwm_get(struct device_node *np,
const char *con_id)
{
return ERR_PTR(-ENODEV);
}
 
static inline void pwm_put(struct pwm_device *pwm)
{
234,6 → 240,12
return ERR_PTR(-ENODEV);
}
 
static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
struct device_node *np,
const char *con_id)
{
return ERR_PTR(-ENODEV);
}
 
static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
{
/drivers/include/linux/rculist.h
319,6 → 319,27
})
 
/**
* list_next_or_null_rcu - get the first element from a list
* @head: the head for the list.
* @ptr: the list head to take the next element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note that if the ptr is at the end of the list, NULL is returned.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_next_or_null_rcu(head, ptr, type, member) \
({ \
struct list_head *__head = (head); \
struct list_head *__ptr = (ptr); \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__next != __head) ? list_entry_rcu(__next, type, \
member) : NULL; \
})
 
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
/drivers/include/linux/rcupdate.h
294,9 → 294,7
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
void rcu_report_dead(unsigned int cpu);
 
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
322,8 → 320,6
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_NO_HZ_FULL */
 
#ifdef CONFIG_RCU_NOCB_CPU
/drivers/include/linux/slab.h
20,7 → 20,7
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
92,6 → 92,12
# define SLAB_ACCOUNT 0x00000000UL
#endif
 
#ifdef CONFIG_KASAN
#define SLAB_KASAN 0x08000000UL
#else
#define SLAB_KASAN 0x00000000UL
#endif
 
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/drivers/include/linux/spinlock.h
51,6 → 51,7
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
/drivers/include/linux/stat.h
0,0 → 1,37
#ifndef _LINUX_STAT_H
#define _LINUX_STAT_H
 
 
#include <asm/stat.h>
#include <uapi/linux/stat.h>
 
#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO)
#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH)
#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
 
#define UTIME_NOW ((1l << 30) - 1l)
#define UTIME_OMIT ((1l << 30) - 2l)
 
#include <linux/types.h>
#include <linux/time.h>
#include <linux/uidgid.h>
 
struct kstat {
u64 ino;
dev_t dev;
umode_t mode;
unsigned int nlink;
kuid_t uid;
kgid_t gid;
dev_t rdev;
loff_t size;
struct timespec atime;
struct timespec mtime;
struct timespec ctime;
unsigned long blksize;
unsigned long long blocks;
};
 
#endif
/drivers/include/linux/string.h
128,8 → 128,14
extern void argv_free(char **argv);
 
extern bool sysfs_streq(const char *s1, const char *s2);
extern int strtobool(const char *s, bool *res);
extern int kstrtobool(const char *s, bool *res);
static inline int strtobool(const char *s, bool *res)
{
return kstrtobool(s, res);
}
 
int match_string(const char * const *array, size_t n, const char *string);
 
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
/drivers/include/linux/sysfs.h
202,7 → 202,11
{
}
 
#define sysfs_create_link(kobj,target, name) (0)
static inline int sysfs_create_link(struct kobject *kobj,
struct kobject *target, const char *name)
{
return 0;
}
 
static inline int sysfs_create_link_nowarn(struct kobject *kobj,
struct kobject *target,
211,7 → 215,9
return 0;
}
 
#define sysfs_remove_link(kobj, name)
static inline void sysfs_remove_link(struct kobject *kobj, const char *name)
{
}
 
static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
const char *old_name,
/drivers/include/linux/thread_info.h
0,0 → 1,17
/* thread_info.h: common low-level thread information accessors
*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds
*/
 
#ifndef _LINUX_THREAD_INFO_H
#define _LINUX_THREAD_INFO_H
 
#include <linux/types.h>
#include <linux/bug.h>
 
struct timespec;
struct compat_timespec;
 
 
#endif /* _LINUX_THREAD_INFO_H */
/drivers/include/linux/uaccess.h
2,6 → 2,7
#define __LINUX_UACCESS_H__
 
#include <linux/sched.h>
#include <asm/uaccess.h>
/*
* These routines enable/disable the pagefault handler. If disabled, it will
* not take any locks and go straight to the fixup table.
16,5 → 17,20
static inline void pagefault_enable(void)
{
}
#ifndef ARCH_HAS_NOCACHE_UACCESS
 
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
 
static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
 
#endif /* ARCH_HAS_NOCACHE_UACCESS */
 
#endif /* __LINUX_UACCESS_H__ */
/drivers/include/linux/uidgid.h
0,0 → 1,27
#ifndef _LINUX_UIDGID_H
#define _LINUX_UIDGID_H
 
/*
* A set of types for the internal kernel types representing uids and gids.
*
* The types defined in this header allow distinguishing which uids and gids in
* the kernel are values used by userspace and which uid and gid values are
* the internal kernel values. With the addition of user namespaces the values
* can be different. Using the type system makes it possible for the compiler
* to detect when we overlook these differences.
*
*/
#include <linux/types.h>
typedef struct {
uid_t val;
} kuid_t;
 
 
typedef struct {
gid_t val;
} kgid_t;
 
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
 
#endif /* _LINUX_UIDGID_H */
/drivers/include/linux/unaligned/access_ok.h
4,62 → 4,62
#include <linux/kernel.h>
#include <asm/byteorder.h>
 
static inline u16 get_unaligned_le16(const void *p)
static __always_inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
 
static inline u32 get_unaligned_le32(const void *p)
static __always_inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
 
static inline u64 get_unaligned_le64(const void *p)
static __always_inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
 
static inline u16 get_unaligned_be16(const void *p)
static __always_inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
 
static inline u32 get_unaligned_be32(const void *p)
static __always_inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
 
static inline u64 get_unaligned_be64(const void *p)
static __always_inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
 
static inline void put_unaligned_le16(u16 val, void *p)
static __always_inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
 
static inline void put_unaligned_le32(u32 val, void *p)
static __always_inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
 
static inline void put_unaligned_le64(u64 val, void *p)
static __always_inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
 
static inline void put_unaligned_be16(u16 val, void *p)
static __always_inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
 
static inline void put_unaligned_be32(u32 val, void *p)
static __always_inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
 
static inline void put_unaligned_be64(u64 val, void *p)
static __always_inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
/drivers/include/linux/vga_switcheroo.h
0,0 → 1,200
/*
* vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
*
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
* Copyright (c) 2015 Lukas Wunner <lukas@wunner.de>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS
* IN THE SOFTWARE.
*
*/
 
#ifndef _LINUX_VGA_SWITCHEROO_H_
#define _LINUX_VGA_SWITCHEROO_H_
 
#include <linux/fb.h>
 
struct pci_dev;
 
/**
* enum vga_switcheroo_handler_flags_t - handler flags bitmask
* @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the
* DDC lines separately. This signals to clients that they should call
* drm_get_edid_switcheroo() to probe the EDID
* @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch
* the AUX channel separately. This signals to clients that the active
* GPU needs to train the link and communicate the link parameters to the
* inactive GPU (mediated by vga_switcheroo). The inactive GPU may then
* skip the AUX handshake and set up its output with these pre-calibrated
* values (DisplayPort specification v1.1a, section 2.5.3.3)
*
* Handler flags bitmask. Used by handlers to declare their capabilities upon
* registering with vga_switcheroo.
*/
enum vga_switcheroo_handler_flags_t {
VGA_SWITCHEROO_CAN_SWITCH_DDC = (1 << 0),
VGA_SWITCHEROO_NEEDS_EDP_CONFIG = (1 << 1),
};
 
/**
* enum vga_switcheroo_state - client power state
* @VGA_SWITCHEROO_OFF: off
* @VGA_SWITCHEROO_ON: on
* @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo.
* Only used in vga_switcheroo_get_client_state() which in turn is only
* called from hda_intel.c
*
* Client power state.
*/
enum vga_switcheroo_state {
VGA_SWITCHEROO_OFF,
VGA_SWITCHEROO_ON,
/* below are referred only from vga_switcheroo_get_client_state() */
VGA_SWITCHEROO_NOT_FOUND,
};
 
/**
* enum vga_switcheroo_client_id - client identifier
* @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients.
* Determining the id requires the handler, so GPUs are given their
* true id in a delayed fashion in vga_switcheroo_enable()
* @VGA_SWITCHEROO_IGD: integrated graphics device
* @VGA_SWITCHEROO_DIS: discrete graphics device
* @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported
*
* Client identifier. Audio clients use the same identifier & 0x100.
*/
enum vga_switcheroo_client_id {
VGA_SWITCHEROO_UNKNOWN_ID = -1,
VGA_SWITCHEROO_IGD,
VGA_SWITCHEROO_DIS,
VGA_SWITCHEROO_MAX_CLIENTS,
};
 
/**
* struct vga_switcheroo_handler - handler callbacks
* @init: initialize handler.
* Optional. This gets called when vga_switcheroo is enabled, i.e. when
* two vga clients have registered. It allows the handler to perform
* some delayed initialization that depends on the existence of the
* vga clients. Currently only the radeon and amdgpu drivers use this.
* The return value is ignored
* @switchto: switch outputs to given client.
* Mandatory. For muxless machines this should be a no-op. Returning 0
* denotes success, anything else failure (in which case the switch is
* aborted)
* @switch_ddc: switch DDC lines to given client.
* Optional. Should return the previous DDC owner on success or a
* negative int on failure
* @power_state: cut or reinstate power of given client.
* Optional. The return value is ignored
* @get_client_id: determine if given pci device is integrated or discrete GPU.
* Mandatory
*
* Handler callbacks. The multiplexer itself. The @switchto and @get_client_id
* methods are mandatory, all others may be set to NULL.
*/
struct vga_switcheroo_handler {
int (*init)(void);
int (*switchto)(enum vga_switcheroo_client_id id);
int (*switch_ddc)(enum vga_switcheroo_client_id id);
int (*power_state)(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state);
enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
};
 
/**
* struct vga_switcheroo_client_ops - client callbacks
* @set_gpu_state: do the equivalent of suspend/resume for the card.
* Mandatory. This should not cut power to the discrete GPU,
* which is the job of the handler
* @reprobe: poll outputs.
* Optional. This gets called after waking the GPU and switching
* the outputs to it
* @can_switch: check if the device is in a position to switch now.
* Mandatory. The client should return false if a user space process
* has one of its device files open
*
* Client callbacks. A client can be either a GPU or an audio device on a GPU.
* The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
* set to NULL. For audio clients, the @reprobe member is bogus.
*/
struct vga_switcheroo_client_ops {
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
void (*reprobe)(struct pci_dev *dev);
bool (*can_switch)(struct pci_dev *dev);
};
 
#if defined(CONFIG_VGA_SWITCHEROO)
void vga_switcheroo_unregister_client(struct pci_dev *dev);
int vga_switcheroo_register_client(struct pci_dev *dev,
const struct vga_switcheroo_client_ops *ops,
bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id);
 
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
 
int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
enum vga_switcheroo_handler_flags_t handler_flags);
void vga_switcheroo_unregister_handler(void);
enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void);
int vga_switcheroo_lock_ddc(struct pci_dev *pdev);
int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
 
int vga_switcheroo_process_delayed_switch(void);
 
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
 
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
 
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
#else
 
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
 
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
 
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
 
#endif
#endif /* _LINUX_VGA_SWITCHEROO_H_ */
/drivers/include/linux/vmalloc.h
4,6 → 4,7
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
 
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
/drivers/include/linux/wait.h
312,6 → 312,8
// wait_queue_head_t wait;
//};
 
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 
 
/drivers/include/linux/workqueue.h
11,7 → 11,7
#include <linux/lockdep.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
 
struct workqueue_struct;
 
239,10 → 239,20
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
 
bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
int queue_delayed_work(struct workqueue_struct *wq,
bool queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool cancel_work_sync(struct work_struct *work);
extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 
 
bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
static inline bool mod_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(wq, dwork, delay);
}
 
 
#define INIT_WORK(_work, _func) \
/drivers/include/syscall.h
488,27 → 488,7
 
void FASTCALL sysSetFramebuffer(void *fb)__asm__("SetFramebuffer");
 
static inline void __iomem *ioremap(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
static inline void __iomem *ioremap_nocache(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
static inline void __iomem *ioremap_wc(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_WRITEC|0x100);
}
 
 
static inline void iounmap(void *addr)
{
FreeKernelSpace(addr);
}
 
static inline void __SysMsgBoardStr(char *text)
{
__asm__ __volatile__(
/drivers/include/uapi/asm/bitsperlong.h
0,0 → 1,13
#ifndef __ASM_X86_BITSPERLONG_H
#define __ASM_X86_BITSPERLONG_H
 
#if defined(__x86_64__) && !defined(__ILP32__)
# define __BITS_PER_LONG 64
#else
# define __BITS_PER_LONG 32
#endif
 
#include <asm-generic/bitsperlong.h>
 
#endif /* __ASM_X86_BITSPERLONG_H */
 
/drivers/include/uapi/asm/byteorder.h
0,0 → 1,6
#ifndef _ASM_X86_BYTEORDER_H
#define _ASM_X86_BYTEORDER_H
 
#include <linux/byteorder/little_endian.h>
 
#endif /* _ASM_X86_BYTEORDER_H */
/drivers/include/uapi/asm/posix_types_32.h
0,0 → 1,25
#ifndef _ASM_X86_POSIX_TYPES_32_H
#define _ASM_X86_POSIX_TYPES_32_H
 
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
 
typedef unsigned short __kernel_mode_t;
#define __kernel_mode_t __kernel_mode_t
 
typedef unsigned short __kernel_ipc_pid_t;
#define __kernel_ipc_pid_t __kernel_ipc_pid_t
 
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
#define __kernel_uid_t __kernel_uid_t
 
typedef unsigned short __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
 
#include <asm-generic/posix_types.h>
 
#endif /* _ASM_X86_POSIX_TYPES_32_H */
/drivers/include/uapi/asm/posix_types_64.h
0,0 → 1,19
#ifndef _ASM_X86_POSIX_TYPES_64_H
#define _ASM_X86_POSIX_TYPES_64_H
 
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
 
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
#define __kernel_old_uid_t __kernel_old_uid_t
 
typedef unsigned long __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
 
#include <asm-generic/posix_types.h>
 
#endif /* _ASM_X86_POSIX_TYPES_64_H */
/drivers/include/uapi/asm/posix_types_x32.h
0,0 → 1,19
#ifndef _ASM_X86_POSIX_TYPES_X32_H
#define _ASM_X86_POSIX_TYPES_X32_H
 
/*
* This file is only used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*
* These types should generally match the ones used by the 64-bit kernel,
*
*/
 
typedef long long __kernel_long_t;
typedef unsigned long long __kernel_ulong_t;
#define __kernel_long_t __kernel_long_t
 
#include <asm/posix_types_64.h>
 
#endif /* _ASM_X86_POSIX_TYPES_X32_H */
/drivers/include/uapi/asm/processor-flags.h
118,6 → 118,8
#define X86_CR4_SMEP _BITUL(X86_CR4_SMEP_BIT)
#define X86_CR4_SMAP_BIT 21 /* enable SMAP support */
#define X86_CR4_SMAP _BITUL(X86_CR4_SMAP_BIT)
#define X86_CR4_PKE_BIT 22 /* enable Protection Keys support */
#define X86_CR4_PKE _BITUL(X86_CR4_PKE_BIT)
 
/*
* x86-64 Task Priority Register, CR8
/drivers/include/uapi/asm/sigcontext.h
256,7 → 256,7
__u16 cs;
__u16 gs;
__u16 fs;
__u16 __pad0;
__u16 ss;
__u64 err;
__u64 trapno;
__u64 oldmask;
341,9 → 341,37
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
 
/*
* Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
* Linux saved and restored fs and gs in these slots. This
* was counterproductive, as fsbase and gsbase were never
* saved, so arch_prctl was presumably unreliable.
*
* These slots should never be reused without extreme caution:
*
* - Some DOSEMU versions stash fs and gs in these slots manually,
* thus overwriting anything the kernel expects to be preserved
* in these slots.
*
* - If these slots are ever needed for any other purpose,
* there is some risk that very old 64-bit binaries could get
* confused. I doubt that many such binaries still work,
* though, since the same patch in 2.5.64 also removed the
* 64-bit set_thread_area syscall, so it appears that there
* is no TLS API beyond modify_ldt that works in both pre-
* and post-2.5.64 kernels.
*
* If the kernel ever adds explicit fs, gs, fsbase, and gsbase
* save/restore, it will most likely need to be opt-in and use
* different context slots.
*/
__u16 gs;
__u16 fs;
__u16 __pad0;
union {
__u16 ss; /* If UC_SIGCONTEXT_SS */
__u16 __pad0; /* Alias name for old (!UC_SIGCONTEXT_SS) user-space */
};
__u64 err;
__u64 trapno;
__u64 oldmask;
/drivers/include/uapi/asm/stat.h
0,0 → 1,137
#ifndef _ASM_X86_STAT_H
#define _ASM_X86_STAT_H
 
#include <asm/posix_types.h>
 
#define STAT_HAVE_NSEC 1
 
#ifdef __i386__
struct stat {
unsigned long st_dev;
unsigned long st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned long st_rdev;
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
 
/* We don't need to memset the whole thing just to initialize the padding */
#define INIT_STRUCT_STAT_PADDING(st) do { \
st.__unused4 = 0; \
st.__unused5 = 0; \
} while (0)
 
#define STAT64_HAS_BROKEN_ST_INO 1
 
/* This matches struct stat64 in glibc2.1, hence the absolutely
* insane amounts of padding around dev_t's.
*/
struct stat64 {
unsigned long long st_dev;
unsigned char __pad0[4];
 
unsigned long __st_ino;
 
unsigned int st_mode;
unsigned int st_nlink;
 
unsigned long st_uid;
unsigned long st_gid;
 
unsigned long long st_rdev;
unsigned char __pad3[4];
 
long long st_size;
unsigned long st_blksize;
 
/* Number 512-byte blocks allocated. */
unsigned long long st_blocks;
 
unsigned long st_atime;
unsigned long st_atime_nsec;
 
unsigned long st_mtime;
unsigned int st_mtime_nsec;
 
unsigned long st_ctime;
unsigned long st_ctime_nsec;
 
unsigned long long st_ino;
};
 
/* We don't need to memset the whole thing just to initialize the padding */
#define INIT_STRUCT_STAT64_PADDING(st) do { \
memset(&st.__pad0, 0, sizeof(st.__pad0)); \
memset(&st.__pad3, 0, sizeof(st.__pad3)); \
} while (0)
 
#else /* __i386__ */
 
struct stat {
__kernel_ulong_t st_dev;
__kernel_ulong_t st_ino;
__kernel_ulong_t st_nlink;
 
unsigned int st_mode;
unsigned int st_uid;
unsigned int st_gid;
unsigned int __pad0;
__kernel_ulong_t st_rdev;
__kernel_long_t st_size;
__kernel_long_t st_blksize;
__kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */
 
__kernel_ulong_t st_atime;
__kernel_ulong_t st_atime_nsec;
__kernel_ulong_t st_mtime;
__kernel_ulong_t st_mtime_nsec;
__kernel_ulong_t st_ctime;
__kernel_ulong_t st_ctime_nsec;
__kernel_long_t __unused[3];
};
 
/* We don't need to memset the whole thing just to initialize the padding */
#define INIT_STRUCT_STAT_PADDING(st) do { \
st.__pad0 = 0; \
st.__unused[0] = 0; \
st.__unused[1] = 0; \
st.__unused[2] = 0; \
} while (0)
 
#endif
 
/* for 32bit emulation and 32 bit kernels */
struct __old_kernel_stat {
unsigned short st_dev;
unsigned short st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
#ifdef __i386__
unsigned long st_size;
unsigned long st_atime;
unsigned long st_mtime;
unsigned long st_ctime;
#else
unsigned int st_size;
unsigned int st_atime;
unsigned int st_mtime;
unsigned int st_ctime;
#endif
};
 
#endif /* _ASM_X86_STAT_H */
/drivers/include/uapi/asm/swab.h
0,0 → 1,36
#ifndef _ASM_X86_SWAB_H
#define _ASM_X86_SWAB_H
 
#include <linux/types.h>
#include <linux/compiler.h>
 
static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
{
asm("bswapl %0" : "=r" (val) : "0" (val));
return val;
}
#define __arch_swab32 __arch_swab32
 
static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
{
#ifdef __i386__
union {
struct {
__u32 a;
__u32 b;
} s;
__u64 u;
} v;
v.u = val;
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
return v.u;
#else /* __i386__ */
asm("bswapq %0" : "=r" (val) : "0" (val));
return val;
#endif
}
#define __arch_swab64 __arch_swab64
 
#endif /* _ASM_X86_SWAB_H */
/drivers/include/uapi/asm/types.h
0,0 → 1,6
#ifndef _ASM_X86_TYPES_H
#define _ASM_X86_TYPES_H
 
#include <asm-generic/types.h>
 
#endif /* _ASM_X86_TYPES_H */
/drivers/include/uapi/drm/drm.h
669,6 → 669,7
__u64 value;
};
 
#define DRM_RDWR O_RDWR
#define DRM_CLOEXEC O_CLOEXEC
struct drm_prime_handle {
__u32 handle;
/drivers/include/uapi/drm/drm_mode.h
487,6 → 487,21
__u64 blue;
};
 
struct drm_color_ctm {
/* Conversion matrix in S31.32 format. */
__s64 matrix[9];
};
 
struct drm_color_lut {
/*
* Data is U0.16 fixed point format.
*/
__u16 red;
__u16 green;
__u16 blue;
__u16 reserved;
};
 
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
/drivers/include/uapi/drm/i915_drm.h
772,10 → 772,12
#define I915_EXEC_HANDLE_LUT (1<<12)
 
/** Used for switching BSD rings on the platforms with two BSD rings */
#define I915_EXEC_BSD_MASK (3<<13)
#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
#define I915_EXEC_BSD_RING1 (1<<13)
#define I915_EXEC_BSD_RING2 (2<<13)
#define I915_EXEC_BSD_SHIFT (13)
#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
/* default ping-pong mode */
#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
 
/** Tell the kernel that the batchbuffer is processed by
* the resource streamer.
812,10 → 814,35
/** Handle of the buffer to check for busy */
__u32 handle;
 
/** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
/** Return busy status
*
* A return of 0 implies that the object is idle (after
* having flushed any pending activity), and a non-zero return that
* the object is still in-flight on the GPU. (The GPU has not yet
* signaled completion for all pending requests that reference the
* object.)
*
* The returned dword is split into two fields to indicate both
* the engines on which the object is being read, and the
* engine on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
* to by any engine (there can only be one, as the GEM implicit
* synchronisation rules force writes to be serialised). Only the
* engine for the last write is reported.
*
* The high word (bits 16:31) are a bitmask of which engines are
* currently reading from the object. Multiple engines may be
* reading from the object simultaneously.
*
* The value of each engine is the same as specified in the
* EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
* Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
* the I915_EXEC_RENDER engine for execution, and so it is never
* reported as active itself. Some hardware may have parallel
* execution engines, e.g. multiple media engines, which are
* mapped to the same identifier in the EXECBUFFER2 ioctl and
* so are not separately reported for busyness.
*/
__u32 busy;
};
/drivers/include/uapi/linux/byteorder/little_endian.h
40,51 → 40,51
#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
 
static inline __le64 __cpu_to_le64p(const __u64 *p)
static __always_inline __le64 __cpu_to_le64p(const __u64 *p)
{
return (__force __le64)*p;
}
static inline __u64 __le64_to_cpup(const __le64 *p)
static __always_inline __u64 __le64_to_cpup(const __le64 *p)
{
return (__force __u64)*p;
}
static inline __le32 __cpu_to_le32p(const __u32 *p)
static __always_inline __le32 __cpu_to_le32p(const __u32 *p)
{
return (__force __le32)*p;
}
static inline __u32 __le32_to_cpup(const __le32 *p)
static __always_inline __u32 __le32_to_cpup(const __le32 *p)
{
return (__force __u32)*p;
}
static inline __le16 __cpu_to_le16p(const __u16 *p)
static __always_inline __le16 __cpu_to_le16p(const __u16 *p)
{
return (__force __le16)*p;
}
static inline __u16 __le16_to_cpup(const __le16 *p)
static __always_inline __u16 __le16_to_cpup(const __le16 *p)
{
return (__force __u16)*p;
}
static inline __be64 __cpu_to_be64p(const __u64 *p)
static __always_inline __be64 __cpu_to_be64p(const __u64 *p)
{
return (__force __be64)__swab64p(p);
}
static inline __u64 __be64_to_cpup(const __be64 *p)
static __always_inline __u64 __be64_to_cpup(const __be64 *p)
{
return __swab64p((__u64 *)p);
}
static inline __be32 __cpu_to_be32p(const __u32 *p)
static __always_inline __be32 __cpu_to_be32p(const __u32 *p)
{
return (__force __be32)__swab32p(p);
}
static inline __u32 __be32_to_cpup(const __be32 *p)
static __always_inline __u32 __be32_to_cpup(const __be32 *p)
{
return __swab32p((__u32 *)p);
}
static inline __be16 __cpu_to_be16p(const __u16 *p)
static __always_inline __be16 __cpu_to_be16p(const __u16 *p)
{
return (__force __be16)__swab16p(p);
}
static inline __u16 __be16_to_cpup(const __be16 *p)
static __always_inline __u16 __be16_to_cpup(const __be16 *p)
{
return __swab16p((__u16 *)p);
}
/drivers/include/uapi/linux/kernel.h
1,7 → 1,7
#ifndef _UAPI_LINUX_KERNEL_H
#define _UAPI_LINUX_KERNEL_H
 
//#include <linux/sysinfo.h>
#include <linux/sysinfo.h>
 
/*
* 'kernel.h' contains some often-used function prototypes etc
9,5 → 9,6
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
 
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
 
#endif /* _UAPI_LINUX_KERNEL_H */
/drivers/include/uapi/linux/stat.h
0,0 → 1,45
#ifndef _UAPI_LINUX_STAT_H
#define _UAPI_LINUX_STAT_H
 
 
#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
 
#define S_IFMT 00170000
#define S_IFSOCK 0140000
#define S_IFLNK 0120000
#define S_IFREG 0100000
#define S_IFBLK 0060000
#define S_IFDIR 0040000
#define S_IFCHR 0020000
#define S_IFIFO 0010000
#define S_ISUID 0004000
#define S_ISGID 0002000
#define S_ISVTX 0001000
 
#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK)
#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK)
 
#define S_IRWXU 00700
#define S_IRUSR 00400
#define S_IWUSR 00200
#define S_IXUSR 00100
 
#define S_IRWXG 00070
#define S_IRGRP 00040
#define S_IWGRP 00020
#define S_IXGRP 00010
 
#define S_IRWXO 00007
#define S_IROTH 00004
#define S_IWOTH 00002
#define S_IXOTH 00001
 
#endif
 
 
#endif /* _UAPI_LINUX_STAT_H */
/drivers/include/uapi/linux/stddef.h
1,0 → 0,0
#include <linux/compiler.h>
 
#ifndef __always_inline
#define __always_inline inline
#endif
/drivers/include/uapi/linux/swab.h
45,9 → 45,7
 
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
#ifdef __HAVE_BUILTIN_BSWAP16__
return __builtin_bswap16(val);
#elif defined (__arch_swab16)
#if defined (__arch_swab16)
return __arch_swab16(val);
#else
return ___constant_swab16(val);
56,9 → 54,7
 
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
#ifdef __HAVE_BUILTIN_BSWAP32__
return __builtin_bswap32(val);
#elif defined(__arch_swab32)
#if defined(__arch_swab32)
return __arch_swab32(val);
#else
return ___constant_swab32(val);
67,9 → 63,7
 
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
#ifdef __HAVE_BUILTIN_BSWAP64__
return __builtin_bswap64(val);
#elif defined (__arch_swab64)
#if defined (__arch_swab64)
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
102,28 → 96,40
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
#ifdef __HAVE_BUILTIN_BSWAP16__
#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
#else
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
#endif
 
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
#ifdef __HAVE_BUILTIN_BSWAP32__
#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
#else
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
#endif
 
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
#ifdef __HAVE_BUILTIN_BSWAP64__
#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
#else
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
#endif
 
/**
* __swahw32 - return a word-swapped 32-bit value
151,7 → 157,7
* __swab16p - return a byteswapped 16-bit value from a pointer
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline __u16 __swab16p(const __u16 *p)
static __always_inline __u16 __swab16p(const __u16 *p)
{
#ifdef __arch_swab16p
return __arch_swab16p(p);
164,7 → 170,7
* __swab32p - return a byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline __u32 __swab32p(const __u32 *p)
static __always_inline __u32 __swab32p(const __u32 *p)
{
#ifdef __arch_swab32p
return __arch_swab32p(p);
177,7 → 183,7
* __swab64p - return a byteswapped 64-bit value from a pointer
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline __u64 __swab64p(const __u64 *p)
static __always_inline __u64 __swab64p(const __u64 *p)
{
#ifdef __arch_swab64p
return __arch_swab64p(p);
232,7 → 238,7
* __swab32s - byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline void __swab32s(__u32 *p)
static __always_inline void __swab32s(__u32 *p)
{
#ifdef __arch_swab32s
__arch_swab32s(p);
245,7 → 251,7
* __swab64s - byteswap a 64-bit value in-place
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline void __swab64s(__u64 *p)
static __always_inline void __swab64s(__u64 *p)
{
#ifdef __arch_swab64s
__arch_swab64s(p);