Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 7143 → Rev 7142

/drivers/ddk/linux/find_bit.c
File deleted
/drivers/ddk/linux/dmi_scan.c
File deleted
/drivers/ddk/linux/list_sort.c
145,149 → 145,3
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
}
EXPORT_SYMBOL(list_sort);
 
#ifdef CONFIG_TEST_LIST_SORT
 
#include <linux/slab.h>
#include <linux/random.h>
 
/*
* The pattern of set bits in the list length determines which cases
* are hit in list_sort().
*/
#define TEST_LIST_LEN (512+128+2) /* not including head */
 
#define TEST_POISON1 0xDEADBEEF
#define TEST_POISON2 0xA324354C
 
struct debug_el {
unsigned int poison1;
struct list_head list;
unsigned int poison2;
int value;
unsigned serial;
};
 
/* Array, containing pointers to all elements in the test list */
static struct debug_el **elts __initdata;
 
static int __init check(struct debug_el *ela, struct debug_el *elb)
{
if (ela->serial >= TEST_LIST_LEN) {
pr_err("error: incorrect serial %d\n", ela->serial);
return -EINVAL;
}
if (elb->serial >= TEST_LIST_LEN) {
pr_err("error: incorrect serial %d\n", elb->serial);
return -EINVAL;
}
if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
pr_err("error: phantom element\n");
return -EINVAL;
}
if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
pr_err("error: bad poison: %#x/%#x\n",
ela->poison1, ela->poison2);
return -EINVAL;
}
if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
pr_err("error: bad poison: %#x/%#x\n",
elb->poison1, elb->poison2);
return -EINVAL;
}
return 0;
}
 
static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct debug_el *ela, *elb;
 
ela = container_of(a, struct debug_el, list);
elb = container_of(b, struct debug_el, list);
 
check(ela, elb);
return ela->value - elb->value;
}
 
static int __init list_sort_test(void)
{
int i, count = 1, err = -ENOMEM;
struct debug_el *el;
struct list_head *cur;
LIST_HEAD(head);
 
pr_debug("start testing list_sort()\n");
 
elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
if (!elts) {
pr_err("error: cannot allocate memory\n");
return err;
}
 
for (i = 0; i < TEST_LIST_LEN; i++) {
el = kmalloc(sizeof(*el), GFP_KERNEL);
if (!el) {
pr_err("error: cannot allocate memory\n");
goto exit;
}
/* force some equivalencies */
el->value = prandom_u32() % (TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
elts[i] = el;
list_add_tail(&el->list, &head);
}
 
list_sort(NULL, &head, cmp);
 
err = -EINVAL;
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
 
if (cur->next->prev != cur) {
pr_err("error: list is corrupted\n");
goto exit;
}
 
cmp_result = cmp(NULL, cur, cur->next);
if (cmp_result > 0) {
pr_err("error: list is not sorted\n");
goto exit;
}
 
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
if (cmp_result == 0 && el->serial >= el1->serial) {
pr_err("error: order of equivalent elements not "
"preserved\n");
goto exit;
}
 
if (check(el, el1)) {
pr_err("error: element check failed\n");
goto exit;
}
count++;
}
if (head.prev != cur) {
pr_err("error: list is corrupted\n");
goto exit;
}
 
 
if (count != TEST_LIST_LEN) {
pr_err("error: bad list length %d", count);
goto exit;
}
 
err = 0;
exit:
for (i = 0; i < TEST_LIST_LEN; i++)
kfree(elts[i]);
kfree(elts);
return err;
}
late_initcall(list_sort_test);
#endif /* CONFIG_TEST_LIST_SORT */
/drivers/ddk/linux/workqueue.c
117,7 → 117,7
queue_work(wq, &dwork->work);
}
 
bool queue_delayed_work(struct workqueue_struct *wq,
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
struct work_struct *work = &dwork->work;
138,12 → 138,12
return queue_delayed_work(system_wq, dwork, delay);
}
 
//bool mod_delayed_work(struct workqueue_struct *wq,
// struct delayed_work *dwork,
// unsigned long delay)
//{
// return queue_delayed_work(wq, dwork, delay);
//}
bool mod_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(wq, dwork, delay);
}
 
int del_timer(struct timer_list *timer)
{
/drivers/ddk/linux/bitmap.c
7,16 → 7,12
*/
#include <syscall.h>
#include <linux/export.h>
#include <linux/thread_info.h>
//#include <linux/thread_info.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
 
#include <asm/page.h>
//#include <asm/uaccess.h>
 
/*
/drivers/ddk/malloc/malloc.c
649,9 → 649,9
#define NO_SEGMENT_TRAVERSAL 1
#define MALLOC_ALIGNMENT ((size_t)8U)
#define CHUNK_OVERHEAD (SIZE_T_SIZE)
#define DEFAULT_GRANULARITY ((size_t)256U * (size_t)1024U)
#define DEFAULT_MMAP_THRESHOLD ((size_t)1024U * (size_t)1024U)
#define DEFAULT_TRIM_THRESHOLD ((size_t)2048U * (size_t)1024U)
#define DEFAULT_GRANULARITY ((size_t)128U * (size_t)1024U)
#define DEFAULT_MMAP_THRESHOLD ((size_t)512U * (size_t)1024U)
#define DEFAULT_TRIM_THRESHOLD ((size_t)1024U * (size_t)1024U)
 
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
/drivers/include/asm/iomap.h
File deleted
/drivers/include/asm/uaccess_32.h
File deleted
/drivers/include/asm/uaccess.h
File deleted
/drivers/include/asm/cpufeatures.h
File deleted
/drivers/include/asm/smap.h
File deleted
/drivers/include/asm/barrier.h
6,7 → 6,7
 
/*
* Force strict CPU ordering.
* And yes, this might be required on UP too when we're talking
* And yes, this is required on UP too when we're talking
* to devices.
*/
 
31,11 → 31,21
#endif
#define dma_wmb() barrier()
 
#define __smp_mb() mb()
#define __smp_rmb() dma_rmb()
#define __smp_wmb() barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#endif /* SMP */
 
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
 
#if defined(CONFIG_X86_PPRO_FENCE)
 
/*
/drivers/include/asm/cpufeature.h
1,8 → 1,289
/*
* Defines x86 CPU feature bits
*/
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
 
#include <asm/processor.h>
#ifndef _ASM_X86_REQUIRED_FEATURES_H
#include <asm/required-features.h>
#endif
 
#ifndef _ASM_X86_DISABLED_FEATURES_H
#include <asm/disabled-features.h>
#endif
 
#define NCAPINTS 16 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
 
/*
* Note: If the comment begins with a quoted string, that string is used
* in /proc/cpuinfo instead of the macro name. If the string is "",
* this feature bit is not displayed in /proc/cpuinfo at all.
*/
 
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
/* (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
 
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
 
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
 
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
 
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
 
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
 
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
 
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc, word 7.
*
* Reuse free bits when adding new feature flags!
*/
 
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
 
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
 
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
 
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
 
 
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
 
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
 
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
 
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
 
/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
 
/*
* BUG word(s)
*/
#define X86_BUG(x) (NCAPINTS*32 + (x))
 
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
 
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 
#include <asm/asm.h>
26,7 → 307,6
CPUID_8000_0008_EBX,
CPUID_6_EAX,
CPUID_8000_000A_EDX,
CPUID_7_ECX,
};
 
#ifdef CONFIG_X86_FEATURE_NAMES
58,14 → 338,7
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6 )) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7 )) || \
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8 )) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9 )) || \
(((bit)>>5)==10 && (1UL<<((bit)&31) & REQUIRED_MASK10)) || \
(((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \
(((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \
(((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \
(((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \
(((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \
(((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
 
#define DISABLED_MASK_BIT_SET(bit) \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0 )) || \
77,14 → 350,7
(((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6 )) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7 )) || \
(((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8 )) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9 )) || \
(((bit)>>5)==10 && (1UL<<((bit)&31) & DISABLED_MASK10)) || \
(((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \
(((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \
(((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \
(((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \
(((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \
(((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
(((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
 
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
103,7 → 369,8
* is not relevant.
*/
#define cpu_feature_enabled(bit) \
(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
cpu_has(&boot_cpu_data, bit))
 
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
 
139,20 → 406,107
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
/*
* Do not add any more of those clumsy macros - use static_cpu_has() for
* Do not add any more of those clumsy macros - use static_cpu_has_safe() for
* fast paths and boot_cpu_has() otherwise!
*/
 
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
extern void warn_pre_alternatives(void);
extern bool __static_cpu_has_safe(u16 bit);
 
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These will statically patch the target code for additional
* performance.
* These are only valid after alternatives have run, but will statically
* patch the target code for additional performance.
*/
static __always_inline __pure bool _static_cpu_has(u16 bit)
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
asm_volatile_goto("1: jmp 6f\n"
#ifdef CC_HAVE_ASM_GOTO
 
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
 
/*
* Catch too early usage of this before alternatives
* have run.
*/
asm_volatile_goto("1: jmp %l[t_warn]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 0\n" /* no replacement */
" .word %P0\n" /* 1: do replace */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (X86_FEATURE_ALWAYS) : : t_warn);
 
#endif
 
asm_volatile_goto("1: jmp %l[t_no]\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (bit) : : t_no);
return true;
t_no:
return false;
 
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
t_warn:
warn_pre_alternatives();
return false;
#endif
 
#else /* CC_HAVE_ASM_GOTO */
 
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $0,%0\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n"
" .long 3f - .\n"
" .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"3: movb $1,%0\n"
"4:\n"
".previous\n"
: "=qm" (flag) : "i" (bit));
return flag;
 
#endif /* CC_HAVE_ASM_GOTO */
}
 
#define static_cpu_has(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
boot_cpu_has(bit) : \
__builtin_constant_p(bit) ? \
__static_cpu_has(bit) : \
boot_cpu_has(bit) \
)
 
static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
{
#ifdef CC_HAVE_ASM_GOTO
asm_volatile_goto("1: jmp %l[t_dynamic]\n"
"2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"
"3:\n"
176,34 → 530,66
" .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
".section .altinstr_aux,\"ax\"\n"
"6:\n"
" testb %[bitnum],%[cap_byte]\n"
" jnz %l[t_yes]\n"
" jmp %l[t_no]\n"
".previous\n"
: : "i" (bit), "i" (X86_FEATURE_ALWAYS),
[bitnum] "i" (1 << (bit & 7)),
[cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
: : t_yes, t_no);
t_yes:
: : "i" (bit), "i" (X86_FEATURE_ALWAYS)
: : t_dynamic, t_no);
return true;
t_no:
return false;
t_dynamic:
return __static_cpu_has_safe(bit);
#else
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $2,%0\n"
"2:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 3f - .\n" /* repl offset */
" .word %P2\n" /* always replace */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"3: movb $0,%0\n"
"4:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 5f - .\n" /* repl offset */
" .word %P1\n" /* feature bit */
" .byte 4b - 3b\n" /* src len */
" .byte 6f - 5f\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"5: movb $1,%0\n"
"6:\n"
".previous\n"
: "=qm" (flag)
: "i" (bit), "i" (X86_FEATURE_ALWAYS));
return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
#endif /* CC_HAVE_ASM_GOTO */
}
 
#define static_cpu_has(bit) \
#define static_cpu_has_safe(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
boot_cpu_has(bit) : \
_static_cpu_has(bit) \
_static_cpu_has_safe(bit) \
)
#else
/*
* Fall back to dynamic for gcc versions which don't support asm goto. Should be
* a minority now anyway.
* gcc 3.x is too stupid to do the static test; fall back to dynamic.
*/
#define static_cpu_has(bit) boot_cpu_has(bit)
#define static_cpu_has_safe(bit) boot_cpu_has(bit)
#endif
 
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
211,6 → 597,7
#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
 
#define static_cpu_has_bug(bit) static_cpu_has((bit))
#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
 
#define MAX_CPU_FEATURES (NCAPINTS * 32)
/drivers/include/asm/fixmap.h
138,7 → 138,7
extern int fixmaps_set;
 
extern pte_t *kmap_pte;
#define kmap_prot PAGE_KERNEL
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
 
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
/drivers/include/asm/io.h
152,7 → 152,7
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
//extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
#define ioremap_uc ioremap_uc
 
163,12 → 163,12
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
//static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
//{
// return ioremap_nocache(offset, size);
//}
 
extern void iounmap(volatile void __iomem *addr);
//extern void iounmap(volatile void __iomem *addr);
 
extern void set_iounmap_nonlazy(void);
 
296,7 → 296,7
 
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
enum page_cache_mode pcm);
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
//extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
 
extern bool is_early_ioremap_ptep(pte_t *ptep);
/drivers/include/asm/msr-index.h
1,12 → 1,7
#ifndef _ASM_X86_MSR_INDEX_H
#define _ASM_X86_MSR_INDEX_H
 
/*
* CPU model specific register (MSR) numbers.
*
* Do not add new entries to this file unless the definitions are shared
* between multiple compilation units.
*/
/* CPU model specific register (MSR) numbers */
 
/* x86-64 specific MSRs */
#define MSR_EFER 0xc0000080 /* extended feature register */
167,14 → 162,6
#define MSR_PKG_C9_RESIDENCY 0x00000631
#define MSR_PKG_C10_RESIDENCY 0x00000632
 
/* Interrupt Response Limit */
#define MSR_PKGC3_IRTL 0x0000060a
#define MSR_PKGC6_IRTL 0x0000060b
#define MSR_PKGC7_IRTL 0x0000060c
#define MSR_PKGC8_IRTL 0x00000633
#define MSR_PKGC9_IRTL 0x00000634
#define MSR_PKGC10_IRTL 0x00000635
 
/* Run Time Average Power Limiting (RAPL) Interface */
 
#define MSR_RAPL_POWER_UNIT 0x00000606
198,7 → 185,6
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
 
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A
219,6 → 205,13
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
 
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL1 0x00000649
#define MSR_CONFIG_TDP_LEVEL2 0x0000064A
#define MSR_CONFIG_TDP_CONTROL 0x0000064B
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
 
/* Hardware P state interface */
#define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f
237,10 → 230,10
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
 
/* IA32_HWP_CAPABILITIES */
#define HWP_HIGHEST_PERF(x) (((x) >> 0) & 0xff)
#define HWP_GUARANTEED_PERF(x) (((x) >> 8) & 0xff)
#define HWP_MOSTEFFICIENT_PERF(x) (((x) >> 16) & 0xff)
#define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff)
#define HWP_HIGHEST_PERF(x) (x & 0xff)
#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8)
#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16)
#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24)
 
/* IA32_HWP_REQUEST */
#define HWP_MIN_PERF(x) (x & 0xff)
/drivers/include/asm/msr.h
42,6 → 42,14
struct saved_msr *array;
};
 
static inline unsigned long long native_read_tscp(unsigned int *aux)
{
unsigned long low, high;
asm volatile(".byte 0x0f,0x01,0xf9"
: "=a" (low), "=d" (high), "=c" (*aux));
return low | ((u64)high << 32);
}
 
/*
* both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
* constraint has different meanings. For i386, "A" means exactly
59,34 → 67,11
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
 
#ifdef CONFIG_TRACEPOINTS
/*
* Be very careful with includes. This header is prone to include loops.
*/
#include <asm/atomic.h>
#include <linux/tracepoint-defs.h>
 
extern struct tracepoint __tracepoint_read_msr;
extern struct tracepoint __tracepoint_write_msr;
extern struct tracepoint __tracepoint_rdpmc;
#define msr_tracepoint_active(t) static_key_false(&(t).key)
extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
#else
#define msr_tracepoint_active(t) false
static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
#endif
 
static inline unsigned long long native_read_msr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
 
asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
return EAX_EDX_VAL(val, low, high);
}
 
103,8 → 88,6
_ASM_EXTABLE(2b, 3b)
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), [fault] "i" (-EIO));
if (msr_tracepoint_active(__tracepoint_read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
return EAX_EDX_VAL(val, low, high);
}
 
112,8 → 95,6
unsigned low, unsigned high)
{
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
 
/* Can be uninlined because referenced by paravirt */
131,8 → 112,6
: "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EIO)
: "memory");
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
return err;
}
 
157,42 → 136,11
return EAX_EDX_VAL(val, low, high);
}
 
/**
* rdtsc_ordered() - read the current TSC in program order
*
* rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
* It is ordered like a load to a global in-memory counter. It should
* be impossible to observe non-monotonic rdtsc_unordered() behavior
* across multiple CPUs as long as the TSC is synced.
*/
static __always_inline unsigned long long rdtsc_ordered(void)
{
/*
* The RDTSC instruction is not ordered relative to memory
* access. The Intel SDM and the AMD APM are both vague on this
* point, but empirically an RDTSC instruction can be
* speculatively executed before prior loads. An RDTSC
* immediately after an appropriate barrier appears to be
* ordered as a normal load, that is, it provides the same
* ordering guarantees as reading from a global memory location
* that some other imaginary CPU is updating continuously with a
* time stamp.
*/
alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
"lfence", X86_FEATURE_LFENCE_RDTSC);
return rdtsc();
}
 
/* Deprecated, keep it for a cycle for easier merging: */
#define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
 
static inline unsigned long long native_read_pmc(int counter)
{
DECLARE_ARGS(val, low, high);
 
asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
if (msr_tracepoint_active(__tracepoint_rdpmc))
do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
return EAX_EDX_VAL(val, low, high);
}
 
/drivers/include/asm/pgtable.h
487,7 → 487,18
#endif
 
#define pte_accessible pte_accessible
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{
if (pte_flags(a) & _PAGE_PRESENT)
return true;
 
if ((pte_flags(a) & _PAGE_PROTNONE) &&
mm_tlb_flush_pending(mm))
return true;
 
return false;
}
 
static inline int pte_hidden(pte_t pte)
{
return pte_flags(pte) & _PAGE_HIDDEN;
/drivers/include/asm/pgtable_types.h
20,18 → 20,13
#define _PAGE_BIT_SOFTW2 10 /* " */
#define _PAGE_BIT_SOFTW3 11 /* " */
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SOFTW4 58 /* available for programmer */
#define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */
#define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */
#define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */
#define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
 
#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_SOFTW4 58 /* available for programmer */
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
 
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
52,24 → 47,8
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
#else
#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0))
#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0))
#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0))
#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0))
#endif
#define __HAVE_ARCH_PTE_SPECIAL
 
#define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
_PAGE_PKEY_BIT1 | \
_PAGE_PKEY_BIT2 | \
_PAGE_PKEY_BIT3)
 
#ifdef CONFIG_KMEMCHECK
#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
#else
120,12 → 99,7
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
_PAGE_DIRTY)
 
/*
* Set of bits not changed in pte_modify. The pte's
* protection key is treated like _PAGE_RW, for
* instance, and is *not* included in this mask since
* pte_modify() does modify it.
*/
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SOFT_DIRTY)
241,10 → 215,7
/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
 
/*
* Extracts the flags from a (pte|pmd|pud|pgd)val_t
* This includes the protection key value.
*/
/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
 
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
/drivers/include/asm/timex.h
1,7 → 1,7
#ifndef _ASM_X86_TIMEX_H
#define _ASM_X86_TIMEX_H
 
#include <asm/processor.h>
//#include <asm/processor.h>
//#include <asm/tsc.h>
 
/* Assume we use the PIT time source for the clock tick */
/drivers/include/asm/pvclock.h
65,5 → 65,10
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
 
int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
int size);
struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
 
#endif /* _ASM_X86_PVCLOCK_H */
/drivers/include/asm/scatterlist.h
0,0 → 1,39
#ifndef __ASM_GENERIC_SCATTERLIST_H
#define __ASM_GENERIC_SCATTERLIST_H
 
#include <linux/types.h>
 
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
};
 
/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries pci_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
 
#ifdef CONFIG_NEED_SG_DMA_LENGTH
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
#endif
 
#define ARCH_HAS_SG_CHAIN
 
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir);
 
#endif /* __ASM_GENERIC_SCATTERLIST_H */
/drivers/include/asm/topology.h
119,7 → 119,6
 
extern const struct cpumask *cpu_coregroup_mask(int cpu);
 
#define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
 
126,16 → 125,6
#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
 
extern unsigned int __max_logical_packages;
#define topology_max_packages() (__max_logical_packages)
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
extern int topology_phys_to_logical_pkg(unsigned int pkg);
#else
#define topology_max_packages() (1)
static inline int
topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
#endif
 
static inline void arch_fix_phys_package_id(int num, u32 slot)
/drivers/include/asm/dma-mapping.h
46,6 → 46,8
#define HAVE_ARCH_DMA_SUPPORTED 1
extern int dma_supported(struct device *hwdev, u64 mask);
 
#include <asm-generic/dma-mapping-common.h>
 
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
/drivers/include/asm/pci.h
20,9 → 20,6
#ifdef CONFIG_X86_64
void *iommu; /* IOMMU private data */
#endif
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
void *fwnode; /* IRQ domain for MSI assignment */
#endif
};
 
extern int pci_routeirq;
35,7 → 32,6
static inline int pci_domain_nr(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
 
return sd->domain;
}
 
45,17 → 41,6
}
#endif
 
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
static inline void *_pci_root_bus_fwnode(struct pci_bus *bus)
{
struct pci_sysdata *sd = bus->sysdata;
 
return sd->fwnode;
}
 
#define pci_root_bus_fwnode _pci_root_bus_fwnode
#endif
 
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
120,6 → 105,9
#include <asm/pci_64.h>
#endif
 
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
 
/* generic pci stuff */
#include <asm-generic/pci.h>
 
/drivers/include/asm/rwsem.h
25,7 → 25,7
* This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consecutive readers at the
* that will be woken up; if there's a bunch of consequtive readers at the
* front, then they'll all be woken up, but no other readers will be.
*/
 
/drivers/include/asm/alternative.h
152,6 → 152,12
".popsection"
 
/*
* This must be included *after* the definition of ALTERNATIVE due to
* <asm/arch_hweight.h>
*/
#include <asm/cpufeature.h>
 
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
/drivers/include/asm/arch_hweight.h
1,8 → 1,6
#ifndef _ASM_X86_HWEIGHT_H
#define _ASM_X86_HWEIGHT_H
 
#include <asm/cpufeatures.h>
 
#ifdef CONFIG_64BIT
/* popcnt %edi, %eax -- redundant REX prefix for alignment */
#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
/drivers/include/asm/asm.h
44,22 → 44,19
 
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
# define _ASM_EXTABLE(from,to) \
.pushsection "__ex_table","a" ; \
.balign 4 ; \
.balign 8 ; \
.long (from) - . ; \
.long (to) - . ; \
.long (handler) - . ; \
.popsection
 
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
 
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
 
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
.pushsection "__ex_table","a" ; \
.balign 8 ; \
.long (from) - . ; \
.long (to) - . + 0x7ffffff0 ; \
.popsection
 
# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \
92,24 → 89,19
.endm
 
#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
# define _ASM_EXTABLE(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \
" .balign 8\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - .\n" \
" .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
" .popsection\n"
 
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
 
# define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
 
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
 
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 8\n" \
" .long (" #from ") - .\n" \
" .long (" #to ") - . + 0x7ffffff0\n" \
" .popsection\n"
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
 
/drivers/include/asm/cacheflush.h
4,7 → 4,6
/* Caches aren't brain-dead on the intel. */
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
#include <asm/uaccess.h>
 
/*
* The set_memory_* API can be used to change various attributes of a virtual
114,10 → 113,16
 
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
 
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;
extern int kernel_set_to_readonly;
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
#else
static inline void set_kernel_text_rw(void) { }
static inline void set_kernel_text_ro(void) { }
#endif
 
#ifdef CONFIG_DEBUG_RODATA_TEST
int rodata_test(void);
/drivers/include/asm/fpu/types.h
108,8 → 108,6
XFEATURE_OPMASK,
XFEATURE_ZMM_Hi256,
XFEATURE_Hi16_ZMM,
XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
XFEATURE_PKRU,
 
XFEATURE_MAX,
};
122,7 → 120,6
#define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
#define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
 
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
215,15 → 212,6
struct reg_512_bit hi16_zmm[16];
} __packed;
 
/*
* State component 9: 32-bit PKRU register. The state is
* 8 bytes long but only 4 bytes is used currently.
*/
struct pkru_state {
u32 pkru;
u32 pad;
} __packed;
 
struct xstate_header {
u64 xfeatures;
u64 xcomp_bv;
/drivers/include/asm/processor.h
13,7 → 13,7
#include <asm/types.h>
#include <uapi/asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeatures.h>
#include <asm/cpufeature.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
24,6 → 24,7
#include <asm/fpu/types.h>
 
#include <linux/personality.h>
#include <linux/cpumask.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
128,10 → 129,10
u16 booted_cores;
/* Physical processor id: */
u16 phys_proc_id;
/* Logical processor id: */
u16 logical_proc_id;
/* Core id: */
u16 cpu_core_id;
/* Compute unit id */
u8 compute_unit_id;
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
295,13 → 296,10
*/
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
 
#ifdef CONFIG_X86_32
/*
* Space for the temporary SYSENTER stack.
* Space for the temporary SYSENTER stack:
*/
unsigned long SYSENTER_stack_canary;
unsigned long SYSENTER_stack[64];
#endif
 
} ____cacheline_aligned;
 
662,9 → 660,10
*/
static inline void prefetch(const void *x)
{
alternative_input(BASE_PREFETCH, "prefetchnta %P1",
alternative_input(BASE_PREFETCH,
"prefetchnta (%1)",
X86_FEATURE_XMM,
"m" (*(const char *)x));
"r" (x));
}
 
/*
674,9 → 673,10
*/
static inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH, "prefetchw %P1",
X86_FEATURE_3DNOWPREFETCH,
"m" (*(const char *)x));
alternative_input(BASE_PREFETCH,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}
 
static inline void spin_lock_prefetch(const void *x)
757,7 → 757,7
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
*/
#define thread_saved_pc(t) READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
 
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
/drivers/include/asm/special_insns.h
4,8 → 4,6
 
#ifdef __KERNEL__
 
#include <asm/nops.h>
 
static inline void native_clts(void)
{
asm volatile("clts");
98,44 → 96,6
}
#endif
 
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
static inline u32 __read_pkru(void)
{
u32 ecx = 0;
u32 edx, pkru;
 
/*
* "rdpkru" instruction. Places PKRU contents in to EAX,
* clears EDX and requires that ecx=0.
*/
asm volatile(".byte 0x0f,0x01,0xee\n\t"
: "=a" (pkru), "=d" (edx)
: "c" (ecx));
return pkru;
}
 
static inline void __write_pkru(u32 pkru)
{
u32 ecx = 0, edx = 0;
 
/*
* "wrpkru" instruction. Loads contents in EAX to PKRU,
* requires that ecx = edx = 0.
*/
asm volatile(".byte 0x0f,0x01,0xef\n\t"
: : "a" (pkru), "c"(ecx), "d"(edx));
}
#else
static inline u32 __read_pkru(void)
{
return 0;
}
 
static inline void __write_pkru(u32 pkru)
{
}
#endif
 
static inline void native_wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
/drivers/include/asm/atomic_32.h
0,0 → 1,441
#ifndef _ASM_X86_ATOMIC_32_H
#define _ASM_X86_ATOMIC_32_H
 
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/processor.h>
#include <asm/cmpxchg.h>
 
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
 
#define ATOMIC_INIT(i) { (i) }
 
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return v->counter;
}
 
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
 
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
 
/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
 
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
 
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
: "+m" (v->counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
}
 
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
 
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
}
 
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
 
asm volatile(LOCK_PREFIX "decl %0; sete %1"
: "+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}
 
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
 
asm volatile(LOCK_PREFIX "incl %0; sete %1"
: "+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}
 
/**
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
 
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
: "+m" (v->counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
}
 
/**
* atomic_add_return - add integer and return
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
int __i;
#ifdef CONFIG_M386
unsigned long flags;
if (unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
asm volatile(LOCK_PREFIX "xaddl %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
 
#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
local_irq_save(flags);
__i = atomic_read(v);
atomic_set(v, i + __i);
local_irq_restore(flags);
return i + __i;
#endif
}
 
/**
* atomic_sub_return - subtract integer and return
* @v: pointer of type atomic_t
* @i: integer value to subtract
*
* Atomically subtracts @i from @v and returns @v - @i
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
 
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
}
 
static inline int atomic_xchg(atomic_t *v, int new)
{
return xchg(&v->counter, new);
}
 
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
 
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
 
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)), "m" (*(addr)) : "memory")
 
#define atomic_set_mask(mask, addr) \
asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" (mask), "m" (*(addr)) : "memory")
 
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
 
/* An 64bit atomic type */
 
typedef struct {
u64 __aligned(8) counter;
} atomic64_t;
 
 
 
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
 
/**
* atomic64_xchg - xchg atomic64 variable
* @ptr: pointer to type atomic64_t
* @new_val: value to assign
*
* Atomically xchgs the value of @ptr to @new_val and returns
* the old value.
*/
static inline long long atomic64_xchg(atomic64_t *v, long long n)
{
long long o;
unsigned high = (unsigned)(n >> 32);
unsigned low = (unsigned)n;
 
asm volatile(
"1: \n\t"
"cmpxchg8b (%%esi) \n\t"
"jnz 1b \n\t"
:"=&A" (o)
:"S" (v), "b" (low), "c" (high)
: "memory", "cc");
return o;
}
 
/**
* atomic64_set - set atomic64 variable
* @ptr: pointer to type atomic64_t
* @new_val: value to assign
*
* Atomically sets the value of @ptr to @new_val.
*/
 
static inline void atomic64_set(atomic64_t *v, long long i)
{
unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i;
asm volatile (
"1: \n\t"
"cmpxchg8b (%%esi) \n\t"
"jnz 1b \n\t"
:
:"S" (v), "b" (low), "c" (high)
: "eax", "edx", "memory", "cc");
}
 
 
/**
* atomic64_read - read atomic64 variable
* @ptr: pointer to type atomic64_t
*
* Atomically reads the value of @ptr and returns it.
*/
static inline u64 atomic64_read(atomic64_t *ptr)
{
u64 res;
 
/*
* Note, we inline this atomic64_t primitive because
* it only clobbers EAX/EDX and leaves the others
* untouched. We also (somewhat subtly) rely on the
* fact that cmpxchg8b returns the current 64-bit value
* of the memory location we are touching:
*/
asm volatile(
"mov %%ebx, %%eax\n\t"
"mov %%ecx, %%edx\n\t"
LOCK_PREFIX "cmpxchg8b %1\n"
: "=&A" (res)
: "m" (*ptr)
);
 
return res;
}
 
 
/**
* atomic64_add_return - add and return
* @delta: integer value to add
* @ptr: pointer to type atomic64_t
*
* Atomically adds @delta to @ptr and returns @delta + *@ptr
*/
extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
 
/*
* Other variants with different arithmetic operators:
*/
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
extern u64 atomic64_inc_return(atomic64_t *ptr);
extern u64 atomic64_dec_return(atomic64_t *ptr);
 
/**
* atomic64_add - add integer to atomic64 variable
* @delta: integer value to add
* @ptr: pointer to type atomic64_t
*
* Atomically adds @delta to @ptr.
*/
extern void atomic64_add(u64 delta, atomic64_t *ptr);
 
/**
* atomic64_sub - subtract the atomic64 variable
* @delta: integer value to subtract
* @ptr: pointer to type atomic64_t
*
* Atomically subtracts @delta from @ptr.
*/
extern void atomic64_sub(u64 delta, atomic64_t *ptr);
 
/**
* atomic64_sub_and_test - subtract value from variable and test result
* @delta: integer value to subtract
* @ptr: pointer to type atomic64_t
*
* Atomically subtracts @delta from @ptr and returns
* true if the result is zero, or false for all
* other cases.
*/
extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
 
/**
* atomic64_inc - increment atomic64 variable
* @ptr: pointer to type atomic64_t
*
* Atomically increments @ptr by 1.
*/
extern void atomic64_inc(atomic64_t *ptr);
 
/**
* atomic64_dec - decrement atomic64 variable
* @ptr: pointer to type atomic64_t
*
* Atomically decrements @ptr by 1.
*/
extern void atomic64_dec(atomic64_t *ptr);
 
/**
* atomic64_dec_and_test - decrement and test
* @ptr: pointer to type atomic64_t
*
* Atomically decrements @ptr by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
extern int atomic64_dec_and_test(atomic64_t *ptr);
 
/**
* atomic64_inc_and_test - increment and test
* @ptr: pointer to type atomic64_t
*
* Atomically increments @ptr by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
extern int atomic64_inc_and_test(atomic64_t *ptr);
 
/**
* atomic64_add_negative - add and test if negative
* @delta: integer value to add
* @ptr: pointer to type atomic64_t
*
* Atomically adds @delta to @ptr and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
extern int atomic64_add_negative(u64 delta, atomic64_t *ptr);
 
#include <asm-generic/atomic-long.h>
#endif /* _ASM_X86_ATOMIC_32_H */
/drivers/include/asm/bitops.h
91,7 → 91,7
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
static inline void __set_bit(long nr, volatile unsigned long *addr)
{
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
128,13 → 128,13
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
 
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
151,7 → 151,7
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
166,7 → 166,7
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
static inline void __change_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
180,7 → 180,7
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static __always_inline void change_bit(long nr, volatile unsigned long *addr)
static inline void change_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0"
201,7 → 201,7
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
}
228,7 → 228,7
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
 
247,7 → 247,7
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
}
268,7 → 268,7
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
 
280,7 → 280,7
}
 
/* WARNING: non atomic and it can be reordered! */
static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
 
300,7 → 300,7
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
}
311,7 → 311,7
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
 
static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
{
int oldbit;
 
343,7 → 343,7
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __ffs(unsigned long word)
static inline unsigned long __ffs(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
357,7 → 357,7
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static __always_inline unsigned long ffz(unsigned long word)
static inline unsigned long ffz(unsigned long word)
{
asm("rep; bsf %1,%0"
: "=r" (word)
371,7 → 371,7
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static __always_inline unsigned long __fls(unsigned long word)
static inline unsigned long __fls(unsigned long word)
{
asm("bsr %1,%0"
: "=r" (word)
393,7 → 393,7
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
static __always_inline int ffs(int x)
static inline int ffs(int x)
{
int r;
 
434,7 → 434,7
* set bit if value is nonzero. The last (most significant) bit is
* at position 32.
*/
static __always_inline int fls(int x)
static inline int fls(int x)
{
int r;
 
/drivers/include/asm/bitsperlong.h
0,0 → 1,13
#ifndef __ASM_X86_BITSPERLONG_H
#define __ASM_X86_BITSPERLONG_H
 
#ifdef __x86_64__
# define __BITS_PER_LONG 64
#else
# define __BITS_PER_LONG 32
#endif
 
#include <asm-generic/bitsperlong.h>
 
#endif /* __ASM_X86_BITSPERLONG_H */
 
/drivers/include/asm/byteorder.h
0,0 → 1,6
#ifndef _ASM_X86_BYTEORDER_H
#define _ASM_X86_BYTEORDER_H
 
#include <linux/byteorder/little_endian.h>
 
#endif /* _ASM_X86_BYTEORDER_H */
/drivers/include/asm/cmpxchg.h
2,7 → 2,6
#define ASM_X86_CMPXCHG_H
 
#include <linux/compiler.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
 
#define __HAVE_ARCH_CMPXCHG 1
/drivers/include/asm/desc_defs.h
98,27 → 98,4
 
#endif /* !__ASSEMBLY__ */
 
/* Access rights as returned by LAR */
#define AR_TYPE_RODATA (0 * (1 << 9))
#define AR_TYPE_RWDATA (1 * (1 << 9))
#define AR_TYPE_RODATA_EXPDOWN (2 * (1 << 9))
#define AR_TYPE_RWDATA_EXPDOWN (3 * (1 << 9))
#define AR_TYPE_XOCODE (4 * (1 << 9))
#define AR_TYPE_XRCODE (5 * (1 << 9))
#define AR_TYPE_XOCODE_CONF (6 * (1 << 9))
#define AR_TYPE_XRCODE_CONF (7 * (1 << 9))
#define AR_TYPE_MASK (7 * (1 << 9))
 
#define AR_DPL0 (0 * (1 << 13))
#define AR_DPL3 (3 * (1 << 13))
#define AR_DPL_MASK (3 * (1 << 13))
 
#define AR_A (1 << 8) /* "Accessed" */
#define AR_S (1 << 12) /* If clear, "System" segment */
#define AR_P (1 << 15) /* "Present" */
#define AR_AVL (1 << 20) /* "AVaiLable" (no HW effect) */
#define AR_L (1 << 21) /* "Long mode" for code segments */
#define AR_DB (1 << 22) /* D/B, effect depends on type */
#define AR_G (1 << 23) /* "Granularity" (limit in pages) */
 
#endif /* _ASM_X86_DESC_DEFS_H */
/drivers/include/asm/disabled-features.h
28,14 → 28,6
# define DISABLE_CENTAUR_MCR 0
#endif /* CONFIG_X86_64 */
 
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
# define DISABLE_PKU 0
# define DISABLE_OSPKE 0
#else
# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
 
/*
* Make sure to add features to the correct mask
*/
49,12 → 41,5
#define DISABLED_MASK7 0
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_MPX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
 
#endif /* _ASM_X86_DISABLED_FEATURES_H */
/drivers/include/asm/pgtable_32.h
14,7 → 14,6
*/
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <linux/threads.h>
 
#include <linux/bitops.h>
/drivers/include/asm/posix_types_32.h
0,0 → 1,85
#ifndef _ASM_X86_POSIX_TYPES_32_H
#define _ASM_X86_POSIX_TYPES_32_H
 
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
 
typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
 
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
 
#ifdef __GNUC__
typedef long long __kernel_loff_t;
#endif
 
typedef struct {
int val[2];
} __kernel_fsid_t;
 
#if defined(__KERNEL__)
 
#undef __FD_SET
#define __FD_SET(fd,fdsetp) \
asm volatile("btsl %1,%0": \
"+m" (*(__kernel_fd_set *)(fdsetp)) \
: "r" ((int)(fd)))
 
#undef __FD_CLR
#define __FD_CLR(fd,fdsetp) \
asm volatile("btrl %1,%0": \
"+m" (*(__kernel_fd_set *)(fdsetp)) \
: "r" ((int) (fd)))
 
#undef __FD_ISSET
#define __FD_ISSET(fd,fdsetp) \
(__extension__ \
({ \
unsigned char __result; \
asm volatile("btl %1,%2 ; setb %0" \
: "=q" (__result) \
: "r" ((int)(fd)), \
"m" (*(__kernel_fd_set *)(fdsetp))); \
__result; \
}))
 
#undef __FD_ZERO
#define __FD_ZERO(fdsetp) \
do { \
int __d0, __d1; \
asm volatile("cld ; rep ; stosl" \
: "=m" (*(__kernel_fd_set *)(fdsetp)), \
"=&c" (__d0), "=&D" (__d1) \
: "a" (0), "1" (__FDSET_LONGS), \
"2" ((__kernel_fd_set *)(fdsetp)) \
: "memory"); \
} while (0)
 
#endif /* defined(__KERNEL__) */
 
#endif /* _ASM_X86_POSIX_TYPES_32_H */
/drivers/include/asm/required-features.h
92,12 → 92,5
#define REQUIRED_MASK7 0
#define REQUIRED_MASK8 0
#define REQUIRED_MASK9 0
#define REQUIRED_MASK10 0
#define REQUIRED_MASK11 0
#define REQUIRED_MASK12 0
#define REQUIRED_MASK13 0
#define REQUIRED_MASK14 0
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 0
 
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
/drivers/include/asm/swab.h
0,0 → 1,61
#ifndef _ASM_X86_SWAB_H
#define _ASM_X86_SWAB_H
 
#include <linux/types.h>
#include <linux/compiler.h>
 
static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
{
#ifdef __i386__
# ifdef CONFIG_X86_BSWAP
asm("bswap %0" : "=r" (val) : "0" (val));
# else
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
"rorl $16,%0\n\t" /* swap words */
"xchgb %b0,%h0" /* swap higher bytes */
: "=q" (val)
: "0" (val));
# endif
 
#else /* __i386__ */
asm("bswapl %0"
: "=r" (val)
: "0" (val));
#endif
return val;
}
#define __arch_swab32 __arch_swab32
 
static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
{
#ifdef __i386__
union {
struct {
__u32 a;
__u32 b;
} s;
__u64 u;
} v;
v.u = val;
# ifdef CONFIG_X86_BSWAP
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
# else
v.s.a = __arch_swab32(v.s.a);
v.s.b = __arch_swab32(v.s.b);
asm("xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
# endif
return v.u;
#else /* __i386__ */
asm("bswapq %0"
: "=r" (val)
: "0" (val));
return val;
#endif
}
#define __arch_swab64 __arch_swab64
 
#endif /* _ASM_X86_SWAB_H */
/drivers/include/asm/types.h
0,0 → 1,16
#ifndef _ASM_X86_TYPES_H
#define _ASM_X86_TYPES_H
 
#define dma_addr_t dma_addr_t
 
#include <asm-generic/types.h>
 
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
 
typedef u64 dma64_addr_t;
 
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
 
#endif /* _ASM_X86_TYPES_H */
/drivers/include/asm-generic/bug.h
81,12 → 81,6
do { printk(arg); __WARN_TAINT(taint); } while (0)
#endif
 
/* used internally by panic.c */
struct warn_args;
 
void __warn(const char *file, int line, void *caller, unsigned taint,
struct pt_regs *regs, struct warn_args *args);
 
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
116,10 → 110,9
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
__warned = true; \
WARN_ON(1); \
} \
unlikely(__ret_warn_once); \
})
 
127,10 → 120,9
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
__warned = true; \
WARN(1, format); \
} \
unlikely(__ret_warn_once); \
})
 
138,10 → 130,9
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
if (unlikely(__ret_warn_once)) \
if (WARN_TAINT(!__warned, taint, format)) \
__warned = true; \
WARN_TAINT(1, taint, format); \
} \
unlikely(__ret_warn_once); \
})
 
151,7 → 142,7
#endif
 
#ifndef HAVE_ARCH_BUG_ON
#define BUG_ON(condition) do { if (condition) BUG(); } while (0)
#define BUG_ON(condition) do { if (condition) ; } while (0)
#endif
 
#ifndef HAVE_ARCH_WARN_ON
/drivers/include/asm-generic/fixmap.h
72,10 → 72,10
/* Return a pointer with offset calculated */
#define __set_fixmap_offset(idx, phys, flags) \
({ \
unsigned long ________addr; \
unsigned long addr; \
__set_fixmap(idx, phys, flags); \
________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
________addr; \
addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
addr; \
})
 
#define set_fixmap_offset(idx, phys) \
/drivers/include/asm-generic/pci-dma-compat.h
54,7 → 54,11
{
 
}
#define pci_map_page(dev, page, offset, size, direction) \
(dma_addr_t)( (offset)+page_to_phys(page))
 
#define pci_unmap_page(dev, dma_address, size, direction)
 
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
/drivers/include/asm-generic/atomic-long.h
98,7 → 98,7
#define atomic_long_xchg(v, new) \
(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
 
static __always_inline void atomic_long_inc(atomic_long_t *l)
static inline void atomic_long_inc(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
105,7 → 105,7
ATOMIC_LONG_PFX(_inc)(v);
}
 
static __always_inline void atomic_long_dec(atomic_long_t *l)
static inline void atomic_long_dec(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
113,7 → 113,7
}
 
#define ATOMIC_LONG_OP(op) \
static __always_inline void \
static inline void \
atomic_long_##op(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
/drivers/include/drm/drm_dp_aux_dev.h
File deleted
/drivers/include/drm/drm_dp_dual_mode_helper.h
File deleted
/drivers/include/drm/drmP.h
45,6 → 45,8
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/mm.h>
 
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/sched.h>
60,8 → 62,6
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
 
#include <asm/uaccess.h>
 
#include <uapi/drm/drm.h>
#include <uapi/drm/drm_mode.h>
 
190,8 → 190,6
drm_err(fmt, ##__VA_ARGS__); \
})
 
#if DRM_DEBUG_CODE
 
#define DRM_INFO(fmt, ...) \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
 
204,6 → 202,7
* \param fmt printf() like format string.
* \param arg arguments
*/
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, args...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
231,7 → 230,6
} while (0)
 
#else
#define DRM_INFO(fmt, ...) do { } while (0)
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
295,7 → 293,6
struct drm_pending_event {
struct drm_event *event;
struct list_head link;
struct list_head pending_link;
struct drm_file *file_priv;
pid_t pid; /* pid of requester, no guarantee it's valid by the time
we deliver the event, for tracing only */
354,11 → 351,8
struct list_head blobs;
 
wait_queue_head_t event_wait;
struct list_head pending_event_list;
struct list_head event_list;
int event_space;
 
struct mutex event_read_lock;
};
 
/**
807,26 → 801,16
unsigned int cmd, unsigned long arg);
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
 
/* File Operations (drm_fops.c) */
int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
/* Device support (drm_fops.h) */
extern int drm_open(struct inode *inode, struct file *filp);
extern ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
struct drm_event *e);
int drm_event_reserve_init(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
struct drm_event *e);
void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p);
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
extern int drm_release(struct inode *inode, struct file *filp);
extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
 
/* Mapping support (drm_vm.h) */
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
 
/* Misc. IOCTL support (drm_ioctl.c) */
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/drivers/include/drm/drm_atomic_helper.h
146,9 → 146,6
struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t start, uint32_t size);
 
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
/drivers/include/drm/drm_crtc.h
305,20 → 305,12
* @mode_changed: crtc_state->mode or crtc_state->enable has been changed
* @active_changed: crtc_state->active has been toggled.
* @connectors_changed: connectors to this crtc have been updated
* @color_mgmt_changed: color management properties have changed (degamma or
* gamma LUT or CSC matrix)
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
* @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
* @degamma_lut: Lookup table for converting framebuffer pixel data
* before apply the conversion matrix
* @ctm: Transformation matrix
* @gamma_lut: Lookup table for converting pixel data after the
* conversion matrix
* @event: optional pointer to a DRM event to signal upon completion of the
* state update
* @state: backpointer to global drm_atomic_state
340,7 → 332,6
bool mode_changed : 1;
bool active_changed : 1;
bool connectors_changed : 1;
bool color_mgmt_changed : 1;
 
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
350,7 → 341,6
u32 plane_mask;
 
u32 connector_mask;
u32 encoder_mask;
 
/* last_vblank_count: for vblank waits before cleanup */
u32 last_vblank_count;
363,11 → 353,6
/* blob property to expose current mode to atomic userspace */
struct drm_property_blob *mode_blob;
 
/* blob property to expose color management to userspace */
struct drm_property_blob *degamma_lut;
struct drm_property_blob *ctm;
struct drm_property_blob *gamma_lut;
 
struct drm_pending_vblank_event *event;
 
struct drm_atomic_state *state;
770,7 → 755,7
int x, y;
const struct drm_crtc_funcs *funcs;
 
/* Legacy FB CRTC gamma size for reporting to userspace */
/* CRTC gamma size for reporting to userspace */
uint32_t gamma_size;
uint16_t *gamma_store;
 
1597,8 → 1582,6
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
* The disable callback is optional.
*/
void (*disable)(struct drm_bridge *bridge);
 
1615,8 → 1598,6
* The bridge must assume that the display pipe (i.e. clocks and timing
* singals) feeding it is no longer running when this callback is
* called.
*
* The post_disable callback is optional.
*/
void (*post_disable)(struct drm_bridge *bridge);
 
1645,8 → 1626,6
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
* The pre_enable callback is optional.
*/
void (*pre_enable)(struct drm_bridge *bridge);
 
1664,8 → 1643,6
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
* The enable callback is optional.
*/
void (*enable)(struct drm_bridge *bridge);
};
1698,7 → 1675,6
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
1712,7 → 1688,6
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool legacy_set_config : 1;
struct drm_plane **planes;
struct drm_plane_state **plane_states;
struct drm_crtc **crtcs;
2049,15 → 2024,6
* @property_blob_list: list of all the blob property objects
* @blob_lock: mutex for blob property allocation and management
* @*_property: core property tracking
* @degamma_lut_property: LUT used to convert the framebuffer's colors to linear
* gamma
* @degamma_lut_size_property: size of the degamma LUT as supported by the
* driver (read-only)
* @ctm_property: Matrix used to convert colors after the lookup in the
* degamma LUT
* @gamma_lut_property: LUT used to convert the colors, after the CSC matrix, to
* the gamma space of the connected screen (read-only)
* @gamma_lut_size_property: size of the gamma LUT as supported by the driver
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
* @async_page_flip: does this device support async flips on the primary plane?
2160,13 → 2126,6
struct drm_property *aspect_ratio_property;
struct drm_property *dirty_info_property;
 
/* Optional color correction properties */
struct drm_property *degamma_lut_property;
struct drm_property *degamma_lut_size_property;
struct drm_property *ctm_property;
struct drm_property *gamma_lut_property;
struct drm_property *gamma_lut_size_property;
 
/* properties for virtual machine layout */
struct drm_property *suggested_x_property;
struct drm_property *suggested_y_property;
2196,17 → 2155,6
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
 
/**
* drm_for_each_encoder_mask - iterate over encoders specified by bitmask
* @encoder: the loop cursor
* @dev: the DRM device
* @encoder_mask: bitmask of encoder indices
*
* Iterate over all encoders specified by bitmask.
*/
#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
 
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
2283,7 → 2231,6
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
extern unsigned int drm_encoder_index(struct drm_encoder *encoder);
 
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
2341,8 → 2288,6
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern struct edid *drm_edid_duplicate(const struct edid *edid);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_config_init(struct drm_device *dev);
2543,8 → 2488,6
extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
extern int drm_format_plane_width(int width, uint32_t format, int plane);
extern int drm_format_plane_height(int height, uint32_t format, int plane);
extern const char *drm_get_format_name(uint32_t format);
extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
unsigned int supported_rotations);
2593,21 → 2536,6
return mo ? obj_to_property(mo) : NULL;
}
 
/*
* Extract a degamma/gamma LUT value provided by user and round it to the
* precision supported by the hardware.
*/
static inline uint32_t drm_color_lut_extract(uint32_t user_input,
uint32_t bit_precision)
{
uint32_t val = user_input + (1 << (16 - bit_precision - 1));
uint32_t max = 0xffff >> (16 - bit_precision);
 
val >>= 16 - bit_precision;
 
return clamp_val(val, 0, max);
}
 
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
/drivers/include/drm/drm_crtc_helper.h
48,9 → 48,6
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int degamma_lut_size,
int gamma_lut_size);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
/drivers/include/drm/drm_fb_helper.h
219,7 → 219,6
};
 
#ifdef CONFIG_DRM_FBDEV_EMULATION
int drm_fb_helper_modinit(void);
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
284,11 → 283,6
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
#else
static inline int drm_fb_helper_modinit(void)
{
return 0;
}
 
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
/drivers/include/drm/drm_mipi_dsi.h
96,17 → 96,14
* struct mipi_dsi_host - DSI host device
* @dev: driver model device node for this DSI host
* @ops: DSI host operations
* @list: list management
*/
struct mipi_dsi_host {
struct device *dev;
const struct mipi_dsi_host_ops *ops;
struct list_head list;
};
 
int mipi_dsi_host_register(struct mipi_dsi_host *host);
void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
 
/* DSI mode flags */
 
142,28 → 139,10
MIPI_DSI_FMT_RGB565,
};
 
#define DSI_DEV_NAME_SIZE 20
 
/**
* struct mipi_dsi_device_info - template for creating a mipi_dsi_device
* @type: DSI peripheral chip type
* @channel: DSI virtual channel assigned to peripheral
* @node: pointer to OF device node or NULL
*
* This is populated and passed to mipi_dsi_device_new to create a new
* DSI device
*/
struct mipi_dsi_device_info {
char type[DSI_DEV_NAME_SIZE];
u32 channel;
struct device_node *node;
};
 
/**
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
* @name: DSI peripheral chip type
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
* @lanes: number of active data lanes
173,7 → 152,6
struct mipi_dsi_host *host;
struct device dev;
 
char name[DSI_DEV_NAME_SIZE];
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
210,10 → 188,6
return -EINVAL;
}
 
struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
/drivers/include/drm/drm_modeset_helper_vtables.h
439,7 → 439,7
* can be modified by this callback and does not need to match mode.
*
* This function is used by both legacy CRTC helpers and atomic helpers.
* This hook is optional.
* With atomic helpers it is optional.
*
* NOTE:
*
/drivers/include/drm/i915_pciids.h
277,9 → 277,7
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
 
#define INTEL_SKL_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
 
298,9 → 296,7
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
INTEL_VGA_DEVICE(0x1A85, info), \
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
INTEL_VGA_DEVICE(0x5A84, info)
 
#define INTEL_KBL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
/drivers/include/drm/ttm/ttm_bo_api.h
316,21 → 316,7
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
 
/**
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
*
* @placement: Return immediately if buffer is busy.
* @mem: The struct ttm_mem_reg indicating the region where the bo resides
* @new_flags: Describes compatible placement found
*
* Returns true if the placement is compatible
*/
extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem,
uint32_t *new_flags);
 
/**
* ttm_bo_validate
*
* @bo: The buffer object.
/drivers/include/drm/drm_edid.h
403,18 → 403,6
return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
}
 
/**
* drm_eld_get_conn_type - Get device type hdmi/dp connected
* @eld: pointer to an ELD memory structure
*
* The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
* identify the display type connected.
*/
static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
{
return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
}
 
struct edid *drm_do_get_edid(struct drm_connector *connector,
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
size_t len),
/drivers/include/drm/i915_powerwell.h
0,0 → 1,37
/**************************************************************************
*
* Copyright 2013 Intel Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
 
#ifndef _I915_POWERWELL_H_
#define _I915_POWERWELL_H_
 
/* For use by hda_i915 driver */
extern int i915_request_power_well(void);
extern int i915_release_power_well(void);
extern int i915_get_cdclk_freq(void);
 
#endif /* _I915_POWERWELL_H_ */
/drivers/include/linux/io-mapping.h
File deleted
/drivers/include/linux/kernfs.h
File deleted
/drivers/include/linux/uidgid.h
File deleted
/drivers/include/linux/vga_switcheroo.h
File deleted
/drivers/include/linux/thread_info.h
File deleted
/drivers/include/linux/intel-iommu.h
File deleted
/drivers/include/linux/stat.h
File deleted
/drivers/include/linux/pci-dma-compat.h
File deleted
/drivers/include/linux/bug.h
20,7 → 20,6
#define BUILD_BUG_ON_MSG(cond, msg) (0)
#define BUILD_BUG_ON(condition) (0)
#define BUILD_BUG() (0)
#define MAYBE_BUILD_BUG_ON(cond) (0)
#else /* __CHECKER__ */
 
/* Force a compilation error if a constant expression is not a power of 2 */
84,14 → 83,6
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
 
#define MAYBE_BUILD_BUG_ON(cond) \
do { \
if (__builtin_constant_p((cond))) \
BUILD_BUG_ON(cond); \
else \
BUG_ON(cond); \
} while (0)
 
#endif /* __CHECKER__ */
 
#ifdef CONFIG_GENERIC_BUG
/drivers/include/linux/clocksource.h
118,23 → 118,6
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
 
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
/* freq = cyc/from
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = from/freq * 2^shift
* mult = from * 2^shift / freq
* mult = (from<<shift) / freq
*/
u64 tmp = ((u64)from) << shift_constant;
 
tmp += freq/2; /* round for do_div */
do_div(tmp, freq);
 
return (u32)tmp;
}
 
/**
* clocksource_khz2mult - calculates mult from khz and shift
* @khz: Clocksource frequency in KHz
145,7 → 128,19
*/
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
{
return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
/* khz = cyc/(Million ns)
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = 1Million/khz * 2^shift
* mult = 1000000 * 2^shift / khz
* mult = (1000000<<shift) / khz
*/
u64 tmp = ((u64)1000000) << shift_constant;
 
tmp += khz/2; /* round for do_div */
do_div(tmp, khz);
 
return (u32)tmp;
}
 
/**
159,7 → 154,19
*/
static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
{
return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
/* hz = cyc/(Billion ns)
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = 1Billion/hz * 2^shift
* mult = 1000000000 * 2^shift / hz
* mult = (1000000000<<shift) / hz
*/
u64 tmp = ((u64)1000000000) << shift_constant;
 
tmp += hz/2; /* round for do_div */
do_div(tmp, hz);
 
return (u32)tmp;
}
 
/**
/drivers/include/linux/compiler-gcc.h
246,7 → 246,7
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if GCC_VERSION >= 40800
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
/drivers/include/linux/compiler.h
20,14 → 20,12
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else /* CONFIG_SPARSE_RCU_POINTER */
#else
# define __rcu
#endif /* CONFIG_SPARSE_RCU_POINTER */
# define __private __attribute__((noderef))
#endif
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else /* __CHECKER__ */
#else
# define __user
# define __kernel
# define __safe
46,9 → 44,7
# define __percpu
# define __rcu
# define __pmem
# define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
#endif /* __CHECKER__ */
#endif
 
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
267,9 → 263,8
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
* least two memcpy()s: one for the __builtin_memcpy() and then one for
* the macro doing the copy of variable - '__u' allocated on the stack.
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
/drivers/include/linux/cpumask.h
607,6 → 607,8
 
/**
* cpumask_size - size to allocate for a 'struct cpumask' in bytes
*
* This will eventually be a runtime variable, depending on nr_cpu_ids.
*/
static inline size_t cpumask_size(void)
{
/drivers/include/linux/dma-attrs.h
18,7 → 18,6
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_ALLOC_SINGLE_PAGES,
DMA_ATTR_MAX,
};
 
/drivers/include/linux/fb.h
296,6 → 296,9
/* Draws cursor */
int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor);
 
/* Rotates the display */
void (*fb_rotate)(struct fb_info *info, int angle);
 
/* wait for blit idle, optional */
int (*fb_sync)(struct fb_info *info);
 
/drivers/include/linux/gfp.h
8,11 → 8,6
 
struct vm_area_struct;
 
/*
* In case of changes, please don't forget to update
* include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
*/
 
/* Plain integer GFP bitmasks. Do not use this directly. */
#define ___GFP_DMA 0x01u
#define ___GFP_HIGHMEM 0x02u
52,6 → 47,7
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 
104,6 → 100,8
*
* __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the __GFP_MEMALLOC flag if both are set.
*
* __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
256,9 → 254,8
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
~__GFP_RECLAIM)
~__GFP_KSWAPD_RECLAIM)
 
 
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
312,7 → 309,7
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
*
* GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
* ZONES_SHIFT must be <= 2 on 32 bit platforms.
*/
 
#if 16 * ZONES_SHIFT > BITS_PER_LONG
/drivers/include/linux/ioport.h
20,7 → 20,6
resource_size_t end;
const char *name;
unsigned long flags;
unsigned long desc;
struct resource *parent, *sibling, *child;
};
 
50,19 → 49,12
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
 
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
 
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
 
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
 
/* I/O resource extended types */
#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
 
/* PnP IRQ specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
106,27 → 98,13
 
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
#define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
 
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
/*
* I/O Resource Descriptors
*
* Descriptors are used by walk_iomem_res_desc() and region_intersects()
* for searching a specific resource range in the iomem table. Assign
* a new descriptor when a resource range supports the search interfaces.
* Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
*/
enum {
IORES_DESC_NONE = 0,
IORES_DESC_CRASH_KERNEL = 1,
IORES_DESC_ACPI_TABLES = 2,
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
};
 
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
135,7 → 113,6
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
.desc = IORES_DESC_NONE, \
}
 
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
172,7 → 149,6
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int remove_resource(struct resource *old);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
194,10 → 170,6
{
return res->flags & IORESOURCE_TYPE_BITS;
}
static inline unsigned long resource_ext_type(const struct resource *res)
{
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2)
{
/drivers/include/linux/kernel.h
63,7 → 63,7
#define round_down(x, y) ((x) & ~__round_mask(x, y))
 
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define DIV_ROUND_UP_ULL(ll,d) \
({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
 
788,6 → 788,64
})
 
 
static inline __must_check long __copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
default:
break;
}
}
 
__builtin_memcpy((void __force *)to, from, n);
return 0;
}
 
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;
 
switch (n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
default:
break;
}
}
__builtin_memcpy((void __force *)to, from, n);
}
 
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
 
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
return __copy_to_user(to, from, n);
}
 
#define CAP_SYS_ADMIN 21
 
static inline bool capable(int cap)
803,8 → 861,14
 
typedef u64 async_cookie_t;
 
//#define iowrite32(v, addr) writel((v), (addr))
 
#define __init
 
#define CONFIG_PAGE_OFFSET 0
 
typedef long long __kernel_long_t;
typedef unsigned long long __kernel_ulong_t;
#define __kernel_long_t __kernel_long_t
 
#endif
/drivers/include/linux/lockdep.h
196,11 → 196,9
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
/* see BUILD_BUG_ON()s in lookup_chain_cache() */
unsigned int irq_context : 2,
depth : 6,
base : 24;
/* 4 byte hole */
u8 irq_context;
u8 depth;
u16 base;
struct hlist_node entry;
u64 chain_key;
};
263,6 → 261,7
/*
* Initialization, self-test and debugging-output methods:
*/
extern void lockdep_init(void);
extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
393,6 → 392,7
# define lockdep_set_current_reclaim_state(g) do { } while (0)
# define lockdep_clear_current_reclaim_state() do { } while (0)
# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
/drivers/include/linux/mmdebug.h
9,7 → 9,8
struct mm_struct;
 
extern void dump_page(struct page *page, const char *reason);
extern void __dump_page(struct page *page, const char *reason);
extern void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
 
/drivers/include/linux/pci.h
742,26 → 742,9
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
 
enum {
PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
};
 
/* these external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
 
extern unsigned int pci_flags;
 
static inline void pci_set_flags(int flags) { pci_flags = flags; }
static inline void pci_add_flags(int flags) { pci_flags |= flags; }
static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
static inline int pci_has_flag(int flag) { return pci_flags & flag; }
 
void pcie_bus_configure_settings(struct pci_bus *bus);
 
enum pcie_bus_config_types {
783,7 → 766,6
int no_pci_devices(void);
 
void pcibios_resource_survey_bus(struct pci_bus *bus);
void pcibios_bus_add_device(struct pci_dev *pdev);
void pcibios_add_bus(struct pci_bus *bus);
void pcibios_remove_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
1024,6 → 1006,8
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
int pci_wait_for_pending_transaction(struct pci_dev *dev);
int pcix_get_max_mmrbc(struct pci_dev *dev);
1116,7 → 1100,6
/* Vital product data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1248,7 → 1231,6
 
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
 
/* kmem_cache style wrapper around pci_alloc_consistent() */
 
#include <linux/pci-dma.h>
1416,11 → 1398,6
 
#else /* CONFIG_PCI is not enabled */
 
static inline void pci_set_flags(int flags) { }
static inline void pci_add_flags(int flags) { }
static inline void pci_clear_flags(int flags) { }
static inline int pci_has_flag(int flag) { return 0; }
 
/*
* If the system does not have PCI, clearly these return errors. Define
* these as simple inline functions to avoid hair in drivers.
1460,6 → 1437,16
static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
unsigned int size)
{ return -EIO; }
static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
unsigned long mask)
{ return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
static inline int __pci_register_driver(struct pci_driver *drv,
1521,10 → 1508,6
 
#include <asm/pci.h>
 
#ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL
#endif
 
/* these helpers provide future and backwards compatibility
* for accessing popular PCI BAR info */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1748,8 → 1731,6
 
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
1766,12 → 1747,6
}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
{
return -ENOSYS;
}
static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
int id, int reset) { }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
static inline int pci_vfs_assigned(struct pci_dev *dev)
1852,13 → 1827,12
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
 
/* Small Resource Data Type Tag Item Names */
#define PCI_VPD_STIN_END 0x0f /* End */
#define PCI_VPD_STIN_END 0x78 /* End */
 
#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
#define PCI_VPD_SRDT_END PCI_VPD_STIN_END
 
#define PCI_VPD_SRDT_TIN_MASK 0x78
#define PCI_VPD_SRDT_LEN_MASK 0x07
#define PCI_VPD_LRDT_TIN_MASK 0x7f
 
#define PCI_VPD_LRDT_TAG_SIZE 3
#define PCI_VPD_SRDT_TAG_SIZE 1
1882,17 → 1856,6
}
 
/**
* pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
* @lrdt: Pointer to the beginning of the Large Resource Data Type tag
*
* Returns the extracted Large Resource Data Type Tag item.
*/
static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
{
return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
}
 
/**
* pci_vpd_srdt_size - Extracts the Small Resource Data Type length
* @lrdt: Pointer to the beginning of the Small Resource Data Type tag
*
1904,17 → 1867,6
}
 
/**
* pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
* @lrdt: Pointer to the beginning of the Small Resource Data Type tag
*
* Returns the extracted Small Resource Data Type Tag Item.
*/
static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
{
return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
}
 
/**
* pci_vpd_info_field_size - Extracts the information field length
* @lrdt: Pointer to the beginning of an information field header
*
2031,9 → 1983,6
return bus->self && bus->self->ari_enabled;
}
 
/* provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
 
typedef struct
{
struct list_head link;
/drivers/include/linux/poison.h
30,11 → 30,7
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
 
/********** mm/debug-pagealloc.c **********/
#ifdef CONFIG_PAGE_POISONING_ZERO
#define PAGE_POISON 0x00
#else
#define PAGE_POISON 0xaa
#endif
 
/********** mm/page_alloc.c ************/
 
/drivers/include/linux/printk.h
242,10 → 242,10
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
/drivers/include/linux/pwm.h
6,7 → 6,6
//#include <linux/of.h>
 
struct device;
struct device_node;
struct pwm_device;
struct seq_file;
 
224,11 → 223,6
return ERR_PTR(-ENODEV);
}
 
static inline struct pwm_device *of_pwm_get(struct device_node *np,
const char *con_id)
{
return ERR_PTR(-ENODEV);
}
 
static inline void pwm_put(struct pwm_device *pwm)
{
240,12 → 234,6
return ERR_PTR(-ENODEV);
}
 
static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
struct device_node *np,
const char *con_id)
{
return ERR_PTR(-ENODEV);
}
 
static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
{
/drivers/include/linux/rculist.h
319,27 → 319,6
})
 
/**
* list_next_or_null_rcu - get the first element from a list
* @head: the head for the list.
* @ptr: the list head to take the next element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note that if the ptr is at the end of the list, NULL is returned.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_next_or_null_rcu(head, ptr, type, member) \
({ \
struct list_head *__head = (head); \
struct list_head *__ptr = (ptr); \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__next != __head) ? list_entry_rcu(__next, type, \
member) : NULL; \
})
 
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
/drivers/include/linux/rcupdate.h
294,7 → 294,9
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
struct notifier_block;
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
 
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
320,6 → 322,8
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_NO_HZ_FULL */
 
#ifdef CONFIG_RCU_NOCB_CPU
/drivers/include/linux/slab.h
20,7 → 20,7
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
92,12 → 92,6
# define SLAB_ACCOUNT 0x00000000UL
#endif
 
#ifdef CONFIG_KASAN
#define SLAB_KASAN 0x08000000UL
#else
#define SLAB_KASAN 0x00000000UL
#endif
 
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/drivers/include/linux/string.h
128,14 → 128,8
extern void argv_free(char **argv);
 
extern bool sysfs_streq(const char *s1, const char *s2);
extern int kstrtobool(const char *s, bool *res);
static inline int strtobool(const char *s, bool *res)
{
return kstrtobool(s, res);
}
extern int strtobool(const char *s, bool *res);
 
int match_string(const char * const *array, size_t n, const char *string);
 
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
/drivers/include/linux/sysfs.h
202,11 → 202,7
{
}
 
static inline int sysfs_create_link(struct kobject *kobj,
struct kobject *target, const char *name)
{
return 0;
}
#define sysfs_create_link(kobj,target, name) (0)
 
static inline int sysfs_create_link_nowarn(struct kobject *kobj,
struct kobject *target,
215,9 → 211,7
return 0;
}
 
static inline void sysfs_remove_link(struct kobject *kobj, const char *name)
{
}
#define sysfs_remove_link(kobj, name)
 
static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
const char *old_name,
/drivers/include/linux/vmalloc.h
4,7 → 4,6
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
 
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
/drivers/include/linux/file.h
12,11 → 12,6
struct file;
 
extern void fput(struct file *);
 
struct file_operations;
struct vfsmount;
struct dentry;
struct path;
struct fd {
struct file *file;
unsigned int flags;
/drivers/include/linux/pci_ids.h
110,7 → 110,6
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
 
2507,10 → 2506,6
 
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
 
#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBDEVICE_ID_QEMU 0x1100
 
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
 
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
/drivers/include/linux/uaccess.h
2,7 → 2,6
#define __LINUX_UACCESS_H__
 
#include <linux/sched.h>
#include <asm/uaccess.h>
/*
* These routines enable/disable the pagefault handler. If disabled, it will
* not take any locks and go straight to the fixup table.
17,20 → 16,5
static inline void pagefault_enable(void)
{
}
#ifndef ARCH_HAS_NOCACHE_UACCESS
 
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
 
static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
 
#endif /* ARCH_HAS_NOCACHE_UACCESS */
 
#endif /* __LINUX_UACCESS_H__ */
/drivers/include/linux/workqueue.h
11,7 → 11,7
#include <linux/lockdep.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/spinlock.h>
 
struct workqueue_struct;
 
239,20 → 239,10
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
 
bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
bool queue_delayed_work(struct workqueue_struct *wq,
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool cancel_work_sync(struct work_struct *work);
extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 
 
bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
static inline bool mod_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(wq, dwork, delay);
}
 
 
#define INIT_WORK(_work, _func) \
/drivers/include/linux/dma-buf.h
54,7 → 54,7
* @release: release this buffer; to be called after the last dma_buf_put.
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
* caches and allocate backing storage (if not yet done)
* respectively pin the object into memory.
* respectively pin the objet into memory.
* @end_cpu_access: [optional] called after cpu access to flush caches.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
93,8 → 93,10
/* after final dma_buf_put() */
void (*release)(struct dma_buf *);
 
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
190,6 → 192,10
* kernel side. For example, an exporter that needs to keep a dmabuf ptr
* so that subsequent exports don't create a new dmabuf.
*/
static inline void get_dma_buf(struct dma_buf *dmabuf)
{
get_file(dmabuf->file);
}
 
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
206,9 → 212,9
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
/drivers/include/linux/wait.h
312,8 → 312,6
// wait_queue_head_t wait;
//};
 
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 
 
/drivers/include/linux/device.h
122,9 → 122,6
dev->driver_data = data;
}
 
static inline __printf(2, 3)
void dev_notice(const struct device *dev, const char *fmt, ...)
{}
 
 
#endif /* _DEVICE_H_ */
/drivers/include/linux/firmware.h
13,10 → 13,6
struct firmware {
size_t size;
const u8 *data;
struct page **pages;
 
/* firmware loader private fields */
void *priv;
};
 
struct module;
/drivers/include/linux/atomic.h
34,12 → 34,7
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
* Besides, if an arch has a special barrier for acquire/release, it could
* implement its own __atomic_op_* and use the same framework for building
* variants
*/
#ifndef __atomic_op_acquire
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
46,17 → 41,13
smp_mb__after_atomic(); \
__ret; \
})
#endif
 
#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
#endif
 
#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
65,7 → 56,6
smp_mb__after_atomic(); \
__ret; \
})
#endif
 
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
558,27 → 548,6
}
#endif
 
/**
* atomic_fetch_or - perform *p |= mask and return old value of *p
* @p: pointer to atomic_t
* @mask: mask to OR on the atomic_t
*/
#ifndef atomic_fetch_or
static inline int atomic_fetch_or(atomic_t *p, int mask)
{
int old, val = atomic_read(p);
 
for (;;) {
old = atomic_cmpxchg(p, val, val | mask);
if (old == val)
break;
val = old;
}
 
return old;
}
#endif
 
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
/drivers/include/linux/bitmap.h
59,8 → 59,6
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
* bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
*/
 
/*
165,14 → 163,6
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
unsigned int nbits,
const u32 *buf,
unsigned int nwords);
extern unsigned int bitmap_to_u32array(u32 *buf,
unsigned int nwords,
const unsigned long *bitmap,
unsigned int nbits);
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
/drivers/include/linux/fence.h
79,8 → 79,6
unsigned long flags;
ktime_t timestamp;
int status;
struct list_head child_list;
struct list_head active_list;
};
 
enum fence_flag_bits {
294,7 → 292,7
if (WARN_ON(f1->context != f2->context))
return false;
 
return (int)(f1->seqno - f2->seqno) > 0;
return f1->seqno - f2->seqno < INT_MAX;
}
 
/**
/drivers/include/linux/spinlock.h
51,7 → 51,6
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
/drivers/include/linux/cache.h
12,24 → 12,10
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
 
/*
* __read_mostly is used to keep rarely changing variables out of frequently
* updated cachelines. If an architecture doesn't support it, ignore the
* hint.
*/
#ifndef __read_mostly
#define __read_mostly
#endif
 
/*
* __ro_after_init is used to mark things that are read-only after init (i.e.
* after mark_rodata_ro() has been called). These are effectively read-only,
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifndef __ro_after_init
#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
#endif
 
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
/drivers/include/linux/unaligned/access_ok.h
4,62 → 4,62
#include <linux/kernel.h>
#include <asm/byteorder.h>
 
static __always_inline u16 get_unaligned_le16(const void *p)
static inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
 
static __always_inline u32 get_unaligned_le32(const void *p)
static inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
 
static __always_inline u64 get_unaligned_le64(const void *p)
static inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
 
static __always_inline u16 get_unaligned_be16(const void *p)
static inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
 
static __always_inline u32 get_unaligned_be32(const void *p)
static inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
 
static __always_inline u64 get_unaligned_be64(const void *p)
static inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
 
static __always_inline void put_unaligned_le16(u16 val, void *p)
static inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
 
static __always_inline void put_unaligned_le32(u32 val, void *p)
static inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
 
static __always_inline void put_unaligned_le64(u64 val, void *p)
static inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
 
static __always_inline void put_unaligned_be16(u16 val, void *p)
static inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
 
static __always_inline void put_unaligned_be32(u32 val, void *p)
static inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
 
static __always_inline void put_unaligned_be64(u64 val, void *p)
static inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
/drivers/include/uapi/drm/drm.h
669,7 → 669,6
__u64 value;
};
 
#define DRM_RDWR O_RDWR
#define DRM_CLOEXEC O_CLOEXEC
struct drm_prime_handle {
__u32 handle;
/drivers/include/uapi/drm/drm_mode.h
487,21 → 487,6
__u64 blue;
};
 
struct drm_color_ctm {
/* Conversion matrix in S31.32 format. */
__s64 matrix[9];
};
 
struct drm_color_lut {
/*
* Data is U0.16 fixed point format.
*/
__u16 red;
__u16 green;
__u16 blue;
__u16 reserved;
};
 
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
/drivers/include/uapi/drm/i915_drm.h
772,12 → 772,10
#define I915_EXEC_HANDLE_LUT (1<<12)
 
/** Used for switching BSD rings on the platforms with two BSD rings */
#define I915_EXEC_BSD_SHIFT (13)
#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
/* default ping-pong mode */
#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
#define I915_EXEC_BSD_MASK (3<<13)
#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
#define I915_EXEC_BSD_RING1 (1<<13)
#define I915_EXEC_BSD_RING2 (2<<13)
 
/** Tell the kernel that the batchbuffer is processed by
* the resource streamer.
814,35 → 812,10
/** Handle of the buffer to check for busy */
__u32 handle;
 
/** Return busy status
*
* A return of 0 implies that the object is idle (after
* having flushed any pending activity), and a non-zero return that
* the object is still in-flight on the GPU. (The GPU has not yet
* signaled completion for all pending requests that reference the
* object.)
*
* The returned dword is split into two fields to indicate both
* the engines on which the object is being read, and the
* engine on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
* to by any engine (there can only be one, as the GEM implicit
* synchronisation rules force writes to be serialised). Only the
* engine for the last write is reported.
*
* The high word (bits 16:31) are a bitmask of which engines are
* currently reading from the object. Multiple engines may be
* reading from the object simultaneously.
*
* The value of each engine is the same as specified in the
* EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
* Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
* the I915_EXEC_RENDER engine for execution, and so it is never
* reported as active itself. Some hardware may have parallel
* execution engines, e.g. multiple media engines, which are
* mapped to the same identifier in the EXECBUFFER2 ioctl and
* so are not separately reported for busyness.
/** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
*/
__u32 busy;
};
/drivers/include/uapi/linux/stat.h
File deleted
/drivers/include/uapi/linux/byteorder/little_endian.h
40,51 → 40,51
#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
 
static __always_inline __le64 __cpu_to_le64p(const __u64 *p)
static inline __le64 __cpu_to_le64p(const __u64 *p)
{
return (__force __le64)*p;
}
static __always_inline __u64 __le64_to_cpup(const __le64 *p)
static inline __u64 __le64_to_cpup(const __le64 *p)
{
return (__force __u64)*p;
}
static __always_inline __le32 __cpu_to_le32p(const __u32 *p)
static inline __le32 __cpu_to_le32p(const __u32 *p)
{
return (__force __le32)*p;
}
static __always_inline __u32 __le32_to_cpup(const __le32 *p)
static inline __u32 __le32_to_cpup(const __le32 *p)
{
return (__force __u32)*p;
}
static __always_inline __le16 __cpu_to_le16p(const __u16 *p)
static inline __le16 __cpu_to_le16p(const __u16 *p)
{
return (__force __le16)*p;
}
static __always_inline __u16 __le16_to_cpup(const __le16 *p)
static inline __u16 __le16_to_cpup(const __le16 *p)
{
return (__force __u16)*p;
}
static __always_inline __be64 __cpu_to_be64p(const __u64 *p)
static inline __be64 __cpu_to_be64p(const __u64 *p)
{
return (__force __be64)__swab64p(p);
}
static __always_inline __u64 __be64_to_cpup(const __be64 *p)
static inline __u64 __be64_to_cpup(const __be64 *p)
{
return __swab64p((__u64 *)p);
}
static __always_inline __be32 __cpu_to_be32p(const __u32 *p)
static inline __be32 __cpu_to_be32p(const __u32 *p)
{
return (__force __be32)__swab32p(p);
}
static __always_inline __u32 __be32_to_cpup(const __be32 *p)
static inline __u32 __be32_to_cpup(const __be32 *p)
{
return __swab32p((__u32 *)p);
}
static __always_inline __be16 __cpu_to_be16p(const __u16 *p)
static inline __be16 __cpu_to_be16p(const __u16 *p)
{
return (__force __be16)__swab16p(p);
}
static __always_inline __u16 __be16_to_cpup(const __be16 *p)
static inline __u16 __be16_to_cpup(const __be16 *p)
{
return __swab16p((__u16 *)p);
}
/drivers/include/uapi/linux/swab.h
45,7 → 45,9
 
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
#if defined (__arch_swab16)
#ifdef __HAVE_BUILTIN_BSWAP16__
return __builtin_bswap16(val);
#elif defined (__arch_swab16)
return __arch_swab16(val);
#else
return ___constant_swab16(val);
54,7 → 56,9
 
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
#if defined(__arch_swab32)
#ifdef __HAVE_BUILTIN_BSWAP32__
return __builtin_bswap32(val);
#elif defined(__arch_swab32)
return __arch_swab32(val);
#else
return ___constant_swab32(val);
63,7 → 67,9
 
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
#if defined (__arch_swab64)
#ifdef __HAVE_BUILTIN_BSWAP64__
return __builtin_bswap64(val);
#elif defined (__arch_swab64)
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
96,40 → 102,28
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
#ifdef __HAVE_BUILTIN_BSWAP16__
#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
#else
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
#endif
 
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
#ifdef __HAVE_BUILTIN_BSWAP32__
#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
#else
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
#endif
 
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
#ifdef __HAVE_BUILTIN_BSWAP64__
#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
#else
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
#endif
 
/**
* __swahw32 - return a word-swapped 32-bit value
157,7 → 151,7
* __swab16p - return a byteswapped 16-bit value from a pointer
* @p: pointer to a naturally-aligned 16-bit value
*/
static __always_inline __u16 __swab16p(const __u16 *p)
static inline __u16 __swab16p(const __u16 *p)
{
#ifdef __arch_swab16p
return __arch_swab16p(p);
170,7 → 164,7
* __swab32p - return a byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*/
static __always_inline __u32 __swab32p(const __u32 *p)
static inline __u32 __swab32p(const __u32 *p)
{
#ifdef __arch_swab32p
return __arch_swab32p(p);
183,7 → 177,7
* __swab64p - return a byteswapped 64-bit value from a pointer
* @p: pointer to a naturally-aligned 64-bit value
*/
static __always_inline __u64 __swab64p(const __u64 *p)
static inline __u64 __swab64p(const __u64 *p)
{
#ifdef __arch_swab64p
return __arch_swab64p(p);
238,7 → 232,7
* __swab32s - byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*/
static __always_inline void __swab32s(__u32 *p)
static inline void __swab32s(__u32 *p)
{
#ifdef __arch_swab32s
__arch_swab32s(p);
251,7 → 245,7
* __swab64s - byteswap a 64-bit value in-place
* @p: pointer to a naturally-aligned 64-bit value
*/
static __always_inline void __swab64s(__u64 *p)
static inline void __swab64s(__u64 *p)
{
#ifdef __arch_swab64s
__arch_swab64s(p);
/drivers/include/uapi/linux/kernel.h
1,7 → 1,7
#ifndef _UAPI_LINUX_KERNEL_H
#define _UAPI_LINUX_KERNEL_H
 
#include <linux/sysinfo.h>
//#include <linux/sysinfo.h>
 
/*
* 'kernel.h' contains some often-used function prototypes etc
9,6 → 9,5
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
 
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
 
#endif /* _UAPI_LINUX_KERNEL_H */
/drivers/include/uapi/linux/stddef.h
1,5 → 1,0
#include <linux/compiler.h>
 
#ifndef __always_inline
#define __always_inline inline
#endif
/drivers/include/uapi/asm/byteorder.h
File deleted
/drivers/include/uapi/asm/posix_types_32.h
File deleted
/drivers/include/uapi/asm/bitsperlong.h
File deleted
/drivers/include/uapi/asm/posix_types_64.h
File deleted
/drivers/include/uapi/asm/types.h
File deleted
/drivers/include/uapi/asm/stat.h
File deleted
/drivers/include/uapi/asm/swab.h
File deleted
/drivers/include/uapi/asm/posix_types_x32.h
File deleted
/drivers/include/uapi/asm/processor-flags.h
118,8 → 118,6
#define X86_CR4_SMEP _BITUL(X86_CR4_SMEP_BIT)
#define X86_CR4_SMAP_BIT 21 /* enable SMAP support */
#define X86_CR4_SMAP _BITUL(X86_CR4_SMAP_BIT)
#define X86_CR4_PKE_BIT 22 /* enable Protection Keys support */
#define X86_CR4_PKE _BITUL(X86_CR4_PKE_BIT)
 
/*
* x86-64 Task Priority Register, CR8
/drivers/include/uapi/asm/sigcontext.h
256,7 → 256,7
__u16 cs;
__u16 gs;
__u16 fs;
__u16 ss;
__u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
341,37 → 341,9
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
 
/*
* Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
* Linux saved and restored fs and gs in these slots. This
* was counterproductive, as fsbase and gsbase were never
* saved, so arch_prctl was presumably unreliable.
*
* These slots should never be reused without extreme caution:
*
* - Some DOSEMU versions stash fs and gs in these slots manually,
* thus overwriting anything the kernel expects to be preserved
* in these slots.
*
* - If these slots are ever needed for any other purpose,
* there is some risk that very old 64-bit binaries could get
* confused. I doubt that many such binaries still work,
* though, since the same patch in 2.5.64 also removed the
* 64-bit set_thread_area syscall, so it appears that there
* is no TLS API beyond modify_ldt that works in both pre-
* and post-2.5.64 kernels.
*
* If the kernel ever adds explicit fs, gs, fsbase, and gsbase
* save/restore, it will most likely need to be opt-in and use
* different context slots.
*/
__u16 gs;
__u16 fs;
union {
__u16 ss; /* If UC_SIGCONTEXT_SS */
__u16 __pad0; /* Alias name for old (!UC_SIGCONTEXT_SS) user-space */
};
__u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
/drivers/include/acpi/platform/acenv.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/platform/aclinux.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
150,8 → 150,6
*/
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_readable
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_writable
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_command_signals
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_command_signals
 
/*
* OSL interfaces used by utilities
/drivers/include/acpi/platform/aclinuxex.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
127,16 → 127,6
return TRUE;
}
 
static inline acpi_status acpi_os_initialize_command_signals(void)
{
return AE_OK;
}
 
static inline void acpi_os_terminate_command_signals(void)
{
return;
}
 
/*
* OSL interfaces added by Linux
*/
/drivers/include/acpi/platform/acenvex.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/platform/acgcc.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actbl3.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acbuffer.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acconfig.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acexcep.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
126,9 → 126,8
#define AE_OWNER_ID_LIMIT EXCEP_ENV (0x001B)
#define AE_NOT_CONFIGURED EXCEP_ENV (0x001C)
#define AE_ACCESS EXCEP_ENV (0x001D)
#define AE_IO_ERROR EXCEP_ENV (0x001E)
 
#define AE_CODE_ENV_MAX 0x001E
#define AE_CODE_ENV_MAX 0x001D
 
/*
* Programmer exceptions
264,8 → 263,7
"There are no more Owner IDs available for ACPI tables or control methods"),
EXCEP_TXT("AE_NOT_CONFIGURED",
"The interface is not part of the current subsystem configuration"),
EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation"),
EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred")
EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation")
};
 
static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
/drivers/include/acpi/acnames.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acoutput.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
262,7 → 262,7
#define ACPI_GET_FUNCTION_NAME _acpi_function_name
 
/*
* The Name parameter should be the procedure name as a non-quoted string.
* The Name parameter should be the procedure name as a quoted string.
* The function name is also used by the function exit macros below.
* Note: (const char) is used to be compatible with the debug interfaces
* and macros such as __func__.
/drivers/include/acpi/acpi.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/acpi_bus.h
87,8 → 87,6
.package.elements = (eles) \
}
 
bool acpi_dev_present(const char *hid);
 
#ifdef CONFIG_ACPI
 
#define ACPI_BUS_FILE_ROOT "acpi"
391,13 → 389,13
 
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
return fwnode && (fwnode->type == FWNODE_ACPI
|| fwnode->type == FWNODE_ACPI_DATA);
}
 
static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
return fwnode && fwnode->type == FWNODE_ACPI;
}
 
static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
628,9 → 626,7
 
static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
{
return adev->power.states[ACPI_STATE_D3_COLD].flags.valid ||
((acpi_gbl_FADT.header.revision < 6) &&
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
return adev->power.states[ACPI_STATE_D3_COLD].flags.valid;
}
 
#else /* CONFIG_ACPI */
/drivers/include/acpi/acpi_io.h
6,9 → 6,9
#include <asm/acpi.h>
 
 
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
void acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
 
int acpi_os_map_generic_address(struct acpi_generic_address *addr);
void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
/drivers/include/acpi/acpiosxf.h
7,7 → 7,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
349,28 → 349,12
#endif
 
/*
* Debug IO
* Debug input
*/
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_line
acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read);
#endif
 
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_initialize_command_signals
acpi_status acpi_os_initialize_command_signals(void);
#endif
 
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_terminate_command_signals
void acpi_os_terminate_command_signals(void);
#endif
 
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_wait_command_ready
acpi_status acpi_os_wait_command_ready(void);
#endif
 
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_notify_command_complete
acpi_status acpi_os_notify_command_complete(void);
#endif
 
/*
* Obtain ACPI table(s)
*/
/drivers/include/acpi/acpixf.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
46,7 → 46,7
 
/* Current ACPICA subsystem version in YYYYMMDD format */
 
#define ACPI_CA_VERSION 0x20160108
#define ACPI_CA_VERSION 0x20150930
 
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
190,11 → 190,6
ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
 
/*
* Optionally support group module level code.
*/
ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
 
/*
* Optionally use 32-bit FADT addresses if and when there is a conflict
* (address mismatch) between the 32-bit and 64-bit versions of the
* address. Although ACPICA adheres to the ACPI specification which
268,20 → 263,7
ACPI_INIT_GLOBAL(u32, acpi_dbg_level, ACPI_DEBUG_DEFAULT);
ACPI_INIT_GLOBAL(u32, acpi_dbg_layer, 0);
 
/* Optionally enable timer output with Debug Object output */
 
ACPI_INIT_GLOBAL(u8, acpi_gbl_display_debug_timer, FALSE);
 
/*
* Debugger command handshake globals. Host OSes need to access these
* variables to implement their own command handshake mechanism.
*/
#ifdef ACPI_DEBUGGER
ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]);
#endif
 
/*
* Other miscellaneous globals
*/
ACPI_GLOBAL(struct acpi_table_fadt, acpi_gbl_FADT);
384,29 → 366,6
 
#endif /* ACPI_APPLICATION */
 
/*
* Debugger prototypes
*
* All interfaces used by debugger will be configured
* out of the ACPICA build unless the ACPI_DEBUGGER
* flag is defined.
*/
#ifdef ACPI_DEBUGGER
#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \
ACPI_EXTERNAL_RETURN_OK(prototype)
 
#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \
ACPI_EXTERNAL_RETURN_VOID(prototype)
 
#else
#define ACPI_DBR_DEPENDENT_RETURN_OK(prototype) \
static ACPI_INLINE prototype {return(AE_OK);}
 
#define ACPI_DBR_DEPENDENT_RETURN_VOID(prototype) \
static ACPI_INLINE prototype {return;}
 
#endif /* ACPI_DEBUGGER */
 
/*****************************************************************************
*
* ACPICA public interface prototypes
863,9 → 822,17
ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
 
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vector
acpi_set_firmware_waking_vectors
(acpi_physical_address physical_address,
acpi_physical_address physical_address64))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vector(u32
physical_address))
#if ACPI_MACHINE_WIDTH == 64
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_set_firmware_waking_vector64(u64
physical_address))
#endif
/*
* ACPI Timer interfaces
*/
897,9 → 864,11
acpi_warning(const char *module_name,
u32 line_number,
const char *format, ...))
ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
acpi_info(const char *format, ...))
acpi_info(const char *module_name,
u32 line_number,
const char *format, ...))
ACPI_MSG_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(3)
void ACPI_INTERNAL_VAR_XFACE
acpi_bios_error(const char *module_name,
960,8 → 929,6
void **data,
void (*callback)(void *)))
 
void acpi_run_debugger(char *batch_buffer);
 
void acpi_set_debugger_thread_id(acpi_thread_id thread_id);
 
#endif /* __ACXFACE_H__ */
/drivers/include/acpi/acrestyp.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actbl.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actbl1.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actbl2.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/drivers/include/acpi/actypes.h
5,7 → 5,7
*****************************************************************************/
 
/*
* Copyright (C) 2000 - 2016, Intel Corp.
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
1148,7 → 1148,7
 
#define ACPI_PCICLS_STRING_SIZE 7 /* Includes null terminator */
 
/* Structures used for device/processor HID, UID, CID */
/* Structures used for device/processor HID, UID, CID, and SUB */
 
struct acpi_pnp_device_id {
u32 length; /* Length of string + null */
1178,6 → 1178,7
u64 address; /* _ADR value */
struct acpi_pnp_device_id hardware_id; /* _HID value */
struct acpi_pnp_device_id unique_id; /* _UID value */
struct acpi_pnp_device_id subsystem_id; /* _SUB value */
struct acpi_pnp_device_id class_code; /* _CLS value */
struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */
};
1192,12 → 1193,13
#define ACPI_VALID_ADR 0x0002
#define ACPI_VALID_HID 0x0004
#define ACPI_VALID_UID 0x0008
#define ACPI_VALID_SUB 0x0010
#define ACPI_VALID_CID 0x0020
#define ACPI_VALID_CLS 0x0040
#define ACPI_VALID_SXDS 0x0100
#define ACPI_VALID_SXWS 0x0200
 
/* Flags for _STA method */
/* Flags for _STA return value (current_status above) */
 
#define ACPI_STA_DEVICE_PRESENT 0x01
#define ACPI_STA_DEVICE_ENABLED 0x02
/drivers/include/syscall.h
488,7 → 488,27
 
void FASTCALL sysSetFramebuffer(void *fb)__asm__("SetFramebuffer");
 
static inline void __iomem *ioremap(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
static inline void __iomem *ioremap_nocache(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
static inline void __iomem *ioremap_wc(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_WRITEC|0x100);
}
 
 
static inline void iounmap(void *addr)
{
FreeKernelSpace(addr);
}
 
static inline void __SysMsgBoardStr(char *text)
{
__asm__ __volatile__(