Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6082 → Rev 5352

/drivers/include/asm/fpu/types.h
File deleted
/drivers/include/asm/intel-mid.h
File deleted
/drivers/include/asm/msr-index.h
File deleted
/drivers/include/asm/page_types.h
File deleted
/drivers/include/asm/preempt.h
30,9 → 30,12
/*
* must be macros to avoid header recursion hell
*/
#define init_task_preempt_count(p) do { } while (0)
#define init_task_preempt_count(p) do { \
task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
} while (0)
 
#define init_idle_preempt_count(p, cpu) do { \
task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
} while (0)
 
87,9 → 90,9
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(int preempt_offset)
static __always_inline bool should_resched(void)
{
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
return unlikely(!raw_cpu_read_4(__preempt_count));
}
 
#ifdef CONFIG_PREEMPT
96,9 → 99,11
extern asmlinkage void ___preempt_schedule(void);
# define __preempt_schedule() asm ("call ___preempt_schedule")
extern asmlinkage void preempt_schedule(void);
extern asmlinkage void ___preempt_schedule_notrace(void);
# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace")
extern asmlinkage void preempt_schedule_notrace(void);
# ifdef CONFIG_CONTEXT_TRACKING
extern asmlinkage void ___preempt_schedule_context(void);
# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
extern asmlinkage void preempt_schedule_context(void);
#endif
#endif
 
#endif /* __ASM_PREEMPT_H */
/drivers/include/asm/alternative.h
5,7 → 5,6
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
#include <asm/ptrace.h>
 
/*
* Alternative inline assembly for SMP.
48,16 → 47,9
s32 repl_offset; /* offset to replacement instruction */
u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction */
u8 padlen; /* length of build-time padding */
} __packed;
u8 replacementlen; /* length of new instruction, <= instrlen */
};
 
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
*/
extern int alternatives_patched;
 
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
 
83,69 → 75,50
}
#endif /* CONFIG_SMP */
 
#define b_replacement(num) "664"#num
#define e_replacement(num) "665"#num
#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
 
#define alt_end_marker "663"
#define b_replacement(number) "663"#number
#define e_replacement(number) "664"#number
 
#define alt_slen "662b-661b"
#define alt_pad_len alt_end_marker"b-662b"
#define alt_total_slen alt_end_marker"b-661b"
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
 
#define __OLDINSTR(oldinstr, num) \
"661:\n\t" oldinstr "\n662:\n" \
".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
"((" alt_rlen(num) ")-(" alt_slen ")),0x90\n"
 
#define OLDINSTR(oldinstr, num) \
__OLDINSTR(oldinstr, num) \
alt_end_marker ":\n"
 
/*
* max without conditionals. Idea adapted from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
* The additional "-" is needed because gas works with s32s.
*/
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
 
/*
* Pad the second replacement alternative with additional NOPs if it is
* additionally longer than the first replacement alternative.
*/
#define OLDINSTR_2(oldinstr, num1, num2) \
"661:\n\t" oldinstr "\n662:\n" \
".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \
alt_end_marker ":\n"
 
#define ALTINSTR_ENTRY(feature, num) \
#define ALTINSTR_ENTRY(feature, number) \
" .long 661b - .\n" /* label */ \
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
" .long " b_replacement(number)"f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \
" .byte " alt_total_slen "\n" /* source len */ \
" .byte " alt_rlen(num) "\n" /* replacement len */ \
" .byte " alt_pad_len "\n" /* pad len */
" .byte " alt_slen "\n" /* source len */ \
" .byte " alt_rlen(number) "\n" /* replacement len */
 
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
#define DISCARD_ENTRY(number) /* rlen <= slen */ \
" .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
 
#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
 
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
OLDINSTR(oldinstr, 1) \
OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".popsection"
 
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
OLDINSTR_2(oldinstr, 1, 2) \
OLDINSTR(oldinstr) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
DISCARD_ENTRY(2) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
172,9 → 145,6
#define alternative(oldinstr, newinstr, feature) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
 
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
 
/*
* Alternative inline assembly with input.
*
/drivers/include/asm/arch_hweight.h
21,13 → 21,15
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
* compiler switches.
*/
static __always_inline unsigned int __arch_hweight32(unsigned int w)
static inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
unsigned int res = 0;
 
asm ("call __sw_hweight32"
: "="REG_OUT (res)
: REG_IN (w));
 
return res;
}
 
static inline unsigned int __arch_hweight16(unsigned int w)
40,23 → 42,20
return __arch_hweight32(w & 0xff);
}
 
#ifdef CONFIG_X86_32
static inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res = 0;
 
#ifdef CONFIG_X86_32
return __arch_hweight32((u32)w) +
__arch_hweight32((u32)(w >> 32));
}
#else
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res = 0;
 
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
: "="REG_OUT (res)
: REG_IN (w));
#endif /* CONFIG_X86_32 */
 
return res;
}
#endif /* CONFIG_X86_32 */
 
#endif
/drivers/include/asm/asm.h
63,31 → 63,6
_ASM_ALIGN ; \
_ASM_PTR (entry); \
.popsection
 
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
andl $7,%ecx
jz 102f /* already aligned */
subl $8,%ecx
negl %ecx
subl %ecx,%edx
100: movb (%rsi),%al
101: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz 100b
102:
.section .fixup,"ax"
103: addl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous
 
_ASM_EXTABLE(100b,103b)
_ASM_EXTABLE(101b,103b)
.endm
 
#else
# define _ASM_EXTABLE(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
/drivers/include/asm/atomic.h
22,9 → 22,9
*
* Atomically reads the value of @v.
*/
static __always_inline int atomic_read(const atomic_t *v)
static inline int atomic_read(const atomic_t *v)
{
return READ_ONCE((v)->counter);
return ACCESS_ONCE((v)->counter);
}
 
/**
34,9 → 34,9
*
* Atomically sets the value of @v to @i.
*/
static __always_inline void atomic_set(atomic_t *v, int i)
static inline void atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
v->counter = i;
}
 
/**
46,7 → 46,7
*
* Atomically adds @i to @v.
*/
static __always_inline void atomic_add(int i, atomic_t *v)
static inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
60,7 → 60,7
*
* Atomically subtracts @i from @v.
*/
static __always_inline void atomic_sub(int i, atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
76,7 → 76,7
* true if the result is zero, or false for all
* other cases.
*/
static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
}
87,7 → 87,7
*
* Atomically increments @v by 1.
*/
static __always_inline void atomic_inc(atomic_t *v)
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
99,7 → 99,7
*
* Atomically decrements @v by 1.
*/
static __always_inline void atomic_dec(atomic_t *v)
static inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
113,7 → 113,7
* returns true if the result is 0, or false for all other
* cases.
*/
static __always_inline int atomic_dec_and_test(atomic_t *v)
static inline int atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
126,7 → 126,7
* and returns true if the result is zero, or false for all
* other cases.
*/
static __always_inline int atomic_inc_and_test(atomic_t *v)
static inline int atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
}
140,7 → 140,7
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static __always_inline int atomic_add_negative(int i, atomic_t *v)
static inline int atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
}
152,7 → 152,7
*
* Atomically adds @i to @v and returns @i + @v
*/
static __always_inline int atomic_add_return(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
164,7 → 164,7
*
* Atomically subtracts @i from @v and returns @v - @i
*/
static __always_inline int atomic_sub_return(int i, atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
172,7 → 172,7
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
 
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
}
182,21 → 182,6
return xchg(&v->counter, new);
}
 
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
asm volatile(LOCK_PREFIX #op"l %1,%0" \
: "+m" (v->counter) \
: "ir" (i) \
: "memory"); \
}
 
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
 
#undef ATOMIC_OP
 
/**
* __atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
206,7 → 191,7
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns the old value of @v.
*/
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
228,12 → 213,22
* Atomically adds 1 to @v
* Returns the new value of @u
*/
static __always_inline short int atomic_inc_short(short int *v)
static inline short int atomic_inc_short(short int *v)
{
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
return *v;
}
 
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)), "m" (*(addr)) : "memory")
 
#define atomic_set_mask(mask, addr) \
asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)(mask)), "m" (*(addr)) \
: "memory")
 
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
/drivers/include/asm/atomic64_32.h
4,7 → 4,7
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/processor.h>
#include <asm/cmpxchg.h>
//#include <asm/cmpxchg.h>
 
/* An 64bit atomic type */
 
/drivers/include/asm/barrier.h
35,12 → 35,12
#define smp_mb() mb()
#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* SMP */
 
#define read_barrier_depends() do { } while (0)
57,12 → 57,12
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
WRITE_ONCE(*p, v); \
ACCESS_ONCE(*p) = (v); \
} while (0)
 
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
74,12 → 74,12
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
WRITE_ONCE(*p, v); \
ACCESS_ONCE(*p) = (v); \
} while (0)
 
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
91,4 → 91,17
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
 
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static __always_inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
 
#endif /* _ASM_X86_BARRIER_H */
/drivers/include/asm/cacheflush.h
8,7 → 8,7
/*
* The set_memory_* API can be used to change various attributes of a virtual
* address range. The attributes include:
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
* Cachability : UnCached, WriteCombining, WriteBack
* Executability : eXeutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
35,11 → 35,9
 
int _set_memory_uc(unsigned long addr, int numpages);
int _set_memory_wc(unsigned long addr, int numpages);
int _set_memory_wt(unsigned long addr, int numpages);
int _set_memory_wb(unsigned long addr, int numpages);
int set_memory_uc(unsigned long addr, int numpages);
int set_memory_wc(unsigned long addr, int numpages);
int set_memory_wt(unsigned long addr, int numpages);
int set_memory_wb(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
50,12 → 48,10
 
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
int set_memory_array_wt(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
 
int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray);
int set_pages_array_wt(struct page **pages, int addrinarray);
int set_pages_array_wb(struct page **pages, int addrinarray);
 
/*
109,10 → 105,9
};
 
 
 
void clflush_cache_range(void *addr, unsigned int size);
 
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
 
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;
/drivers/include/asm/cpufeature.h
12,7 → 12,7
#include <asm/disabled-features.h>
#endif
 
#define NCAPINTS 14 /* N 32-bit words worth of info */
#define NCAPINTS 11 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
 
/*
119,7 → 119,6
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
175,9 → 174,7
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
 
/*
* Auxiliary flags: Linux defined - For features scattered in various
193,11 → 190,10
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
 
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
216,7 → 212,6
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
 
 
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
230,19 → 225,15
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
 
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
250,15 → 241,6
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
 
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
 
/*
* BUG word(s)
*/
272,7 → 254,6
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
 
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 
407,7 → 388,6
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
 
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
436,7 → 416,6
" .word %P0\n" /* 1: do replace */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (X86_FEATURE_ALWAYS) : : t_warn);
451,7 → 430,6
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (bit) : : t_no);
477,7 → 455,6
" .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
504,30 → 481,31
static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
{
#ifdef CC_HAVE_ASM_GOTO
asm_volatile_goto("1: jmp %l[t_dynamic]\n"
/*
* We need to spell the jumps to the compiler because, depending on the offset,
* the replacement jump can be bigger than the original jump, and this we cannot
* have. Thus, we force the jump to the widest, 4-byte, signed relative
* offset even though the last would often fit in less bytes.
*/
asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
"2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"
"3:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 4f - .\n" /* repl offset */
" .long 3f - .\n" /* repl offset */
" .word %P1\n" /* always replace */
" .byte 3b - 1b\n" /* src len */
" .byte 5f - 4f\n" /* repl len */
" .byte 3b - 2b\n" /* pad len */
" .byte 2b - 1b\n" /* src len */
" .byte 4f - 3f\n" /* repl len */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"4: jmp %l[t_no]\n"
"5:\n"
"3: .byte 0xe9\n .long %l[t_no] - 2b\n"
"4:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
" .byte 3b - 1b\n" /* src len */
" .byte 2b - 1b\n" /* src len */
" .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
: : "i" (bit), "i" (X86_FEATURE_ALWAYS)
: : t_dynamic, t_no);
547,7 → 525,6
" .word %P2\n" /* always replace */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
562,7 → 539,6
" .word %P1\n" /* feature bit */
" .byte 4b - 3b\n" /* src len */
" .byte 6f - 5f\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
/drivers/include/asm/delay.h
4,6 → 4,5
#include <asm-generic/delay.h>
 
void use_tsc_delay(void);
void use_mwaitx_delay(void);
 
#endif /* _ASM_X86_DELAY_H */
/drivers/include/asm/e820.h
40,6 → 40,14
}
#endif
 
#ifdef CONFIG_MEMTEST
extern void early_memtest(unsigned long start, unsigned long end);
#else
static inline void early_memtest(unsigned long start, unsigned long end)
{
}
#endif
 
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern u64 early_reserve_e820(u64 sizet, u64 align);
/drivers/include/asm/irqflags.h
136,6 → 136,10
#define USERGS_SYSRET32 \
swapgs; \
sysretl
#define ENABLE_INTERRUPTS_SYSEXIT32 \
swapgs; \
sti; \
sysexit
 
#else
#define INTERRUPT_RETURN iret
159,27 → 163,22
 
return arch_irqs_disabled_flags(flags);
}
#endif /* !__ASSEMBLY__ */
 
#ifdef __ASSEMBLY__
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
#else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
 
# ifdef CONFIG_X86_64
# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
# define LOCKDEP_SYS_EXIT_IRQ \
#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
TRACE_IRQS_ON; \
sti; \
call lockdep_sys_exit_thunk; \
SAVE_REST; \
LOCKDEP_SYS_EXIT; \
RESTORE_REST; \
cli; \
TRACE_IRQS_OFF;
 
# else
# define LOCKDEP_SYS_EXIT \
#define ARCH_LOCKDEP_SYS_EXIT \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
187,12 → 186,24
popl %edx; \
popl %ecx; \
popl %eax;
# define LOCKDEP_SYS_EXIT_IRQ
 
#define ARCH_LOCKDEP_SYS_EXIT_IRQ
# endif
 
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
#else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
# else
# define LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ
#endif
 
#endif /* __ASSEMBLY__ */
 
#endif
/drivers/include/asm/math_emu.h
2,6 → 2,7
#define _ASM_X86_MATH_EMU_H
 
#include <asm/ptrace.h>
#include <asm/vm86.h>
 
/* This structure matches the layout of the data saved to the stack
following a device-not-present interrupt, part of it saved
9,6 → 10,9
*/
struct math_emu_info {
long ___orig_eip;
union {
struct pt_regs *regs;
struct kernel_vm86_regs *vm86;
};
};
#endif /* _ASM_X86_MATH_EMU_H */
/drivers/include/asm/msr.h
1,7 → 1,7
#ifndef _ASM_X86_MSR_H
#define _ASM_X86_MSR_H
 
#include "msr-index.h"
#include <uapi/asm/msr.h>
 
#ifndef __ASSEMBLY__
 
8,7 → 8,6
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/cpumask.h>
#include <uapi/asm/msr.h>
 
struct msr {
union {
47,13 → 46,14
* it means rax *or* rdx.
*/
#ifdef CONFIG_X86_64
/* Using 64-bit values saves one instruction clearing the high half of low */
#define DECLARE_ARGS(val, low, high) unsigned long low, high
#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
#define DECLARE_ARGS(val, low, high) unsigned low, high
#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
#else
#define DECLARE_ARGS(val, low, high) unsigned long long val
#define EAX_EDX_VAL(val, low, high) (val)
#define EAX_EDX_ARGS(val, low, high) "A" (val)
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
 
105,19 → 105,12
return err;
}
 
extern unsigned long long native_read_tsc(void);
 
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);
 
/**
* rdtsc() - returns the current TSC without ordering constraints
*
* rdtsc() returns the result of RDTSC as a 64-bit integer. The
* only ordering constraint it supplies is the ordering implied by
* "asm volatile": it will put the RDTSC in the place you expect. The
* CPU can and will speculatively execute that RDTSC, though, so the
* results can be non-monotonic if compared on different CPUs.
*/
static __always_inline unsigned long long rdtsc(void)
static __always_inline unsigned long long __native_read_tsc(void)
{
DECLARE_ARGS(val, low, high);
 
159,10 → 152,8
#define rdmsrl(msr, val) \
((val) = native_read_msr((msr)))
 
static inline void wrmsrl(unsigned msr, u64 val)
{
native_write_msr(msr, (u32)val, (u32)(val >> 32));
}
#define wrmsrl(msr, val) \
native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
 
/* wrmsr with exception handling */
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
188,6 → 179,12
return err;
}
 
#define rdtscl(low) \
((low) = (u32)__native_read_tsc())
 
#define rdtscll(val) \
((val) = __native_read_tsc())
 
#define rdpmc(counter, low, high) \
do { \
u64 _l = native_read_pmc((counter)); \
197,15 → 194,19
 
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
 
#define rdtscp(low, high, aux) \
do { \
unsigned long long _val = native_read_tscp(&(aux)); \
(low) = (u32)_val; \
(high) = (u32)(_val >> 32); \
} while (0)
 
#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
 
#endif /* !CONFIG_PARAVIRT */
 
/*
* 64-bit version of wrmsr_safe():
*/
static inline int wrmsrl_safe(u32 msr, u64 val)
{
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
}
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
(u32)((val) >> 32))
 
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
 
/drivers/include/asm/pgtable-2level.h
62,8 → 62,44
return ((value >> rightshift) & mask) << leftshift;
}
 
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range.
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
 
#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
 
#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
 
static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
{
return (pgoff_t)
(pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
}
 
static __always_inline pte_t pgoff_to_pte(pgoff_t off)
{
return (pte_t){
.pte_low =
pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
_PAGE_FILE,
};
}
 
/* Encode and de-code a swap entry */
#define SWP_TYPE_BITS 5
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
 
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
/drivers/include/asm/pgtable-2level_types.h
17,6 → 17,7
#endif /* !__ASSEMBLY__ */
 
#define SHARED_KERNEL_PMD 0
#define PAGETABLE_LEVELS 2
 
/*
* traditional i386 two-level paging structure:
/drivers/include/asm/pgtable.h
19,14 → 19,7
#include <asm/x86_init.h>
 
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
void ptdump_walk_pgd_level_checkwx(void);
 
#ifdef CONFIG_DEBUG_WX
#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
#else
#define debug_checkwx() do { } while (0)
#endif
 
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
122,6 → 115,11
return pte_flags(pte) & _PAGE_RW;
}
 
static inline int pte_file(pte_t pte)
{
return pte_flags(pte) & _PAGE_FILE;
}
 
static inline int pte_huge(pte_t pte)
{
return pte_flags(pte) & _PAGE_PSE;
139,7 → 137,13
 
static inline int pte_special(pte_t pte)
{
return pte_flags(pte) & _PAGE_SPECIAL;
/*
* See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
* On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
* __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
*/
return (pte_flags(pte) & _PAGE_SPECIAL) &&
(pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
}
 
static inline unsigned long pte_pfn(pte_t pte)
149,12 → 153,12
 
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
 
static inline unsigned long pud_pfn(pud_t pud)
{
return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
 
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
301,7 → 305,7
 
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
return pmd_clear_flags(pmd, _PAGE_PRESENT);
}
 
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
325,16 → 329,21
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
 
static inline pte_t pte_clear_soft_dirty(pte_t pte)
static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
}
 
static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
static inline pte_t pte_file_mksoft_dirty(pte_t pte)
{
return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
}
 
static inline int pte_file_soft_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_SOFT_DIRTY;
}
 
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 
/*
396,9 → 405,7
return __pgprot(preservebits | addbits);
}
 
#define pte_pgprot(x) __pgprot(pte_flags(x))
#define pmd_pgprot(x) __pgprot(pmd_flags(x))
#define pud_pgprot(x) __pgprot(pud_flags(x))
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
 
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
 
417,17 → 424,11
* requested memtype:
* - request is uncached, return cannot be write-back
* - request is write-combine, return cannot be write-back
* - request is write-through, return cannot be write-back
* - request is write-through, return cannot be write-combine
*/
if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WC &&
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WT &&
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WT &&
new_pcm == _PAGE_CACHE_MODE_WC)) {
new_pcm == _PAGE_CACHE_MODE_WB)) {
return 0;
}
 
462,6 → 463,13
 
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
_PAGE_NUMA);
}
 
#define pte_present_nonuma pte_present_nonuma
static inline int pte_present_nonuma(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
 
471,7 → 479,7
if (pte_flags(a) & _PAGE_PRESENT)
return true;
 
if ((pte_flags(a) & _PAGE_PROTNONE) &&
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
mm_tlb_flush_pending(mm))
return true;
 
491,27 → 499,10
* the _PAGE_PSE flag will remain set at all times while the
* _PAGE_PRESENT bit is clear).
*/
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
_PAGE_NUMA);
}
 
#ifdef CONFIG_NUMA_BALANCING
/*
* These work without NUMA balancing but the kernel does not care. See the
* comment in include/asm-generic/pgtable.h
*/
static inline int pte_protnone(pte_t pte)
{
return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
== _PAGE_PROTNONE;
}
 
static inline int pmd_protnone(pmd_t pmd)
{
return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
== _PAGE_PROTNONE;
}
#endif /* CONFIG_NUMA_BALANCING */
 
static inline int pmd_none(pmd_t pmd)
{
/* Only check low word on 32-bit platforms, since it might be
521,7 → 512,7
 
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
}
 
/*
528,8 → 519,7
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pmd_page(pmd) \
pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
 
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
569,6 → 559,11
 
static inline int pmd_bad(pmd_t pmd)
{
#ifdef CONFIG_NUMA_BALANCING
/* pmd_numa check */
if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
return 0;
#endif
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
 
577,7 → 572,7
return npg >> (20 - PAGE_SHIFT);
}
 
#if CONFIG_PGTABLE_LEVELS > 2
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
return native_pud_val(pud) == 0;
590,7 → 585,7
 
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
}
 
/*
597,8 → 592,7
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pud_page(pud) \
pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
 
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
621,9 → 615,9
{
return 0;
}
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#endif /* PAGETABLE_LEVELS > 2 */
 
#if CONFIG_PGTABLE_LEVELS > 3
#if PAGETABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd)
{
return pgd_flags(pgd) & _PAGE_PRESENT;
660,7 → 654,7
{
return !native_pgd_val(pgd);
}
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#endif /* PAGETABLE_LEVELS > 3 */
 
#endif /* __ASSEMBLY__ */
 
826,8 → 820,8
return pmd_flags(pmd) & _PAGE_RW;
}
 
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
888,16 → 882,19
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
VM_BUG_ON(pte_present_nonuma(pte));
return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
 
static inline int pte_swp_soft_dirty(pte_t pte)
{
VM_BUG_ON(pte_present_nonuma(pte));
return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
}
 
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
VM_BUG_ON(pte_present_nonuma(pte));
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
#endif
/drivers/include/asm/pgtable_types.h
4,7 → 4,7
#include <linux/const.h>
#include <asm/page_types.h>
 
#define FIRST_USER_ADDRESS 0UL
#define FIRST_USER_ADDRESS 0
 
#define _PAGE_BIT_PRESENT 0 /* is present */
#define _PAGE_BIT_RW 1 /* writeable */
27,9 → 27,19
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
 
/*
* Swap offsets on configurations that allow automatic NUMA balancing use the
* bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from
* swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the
* maximum possible swap space from 16TB to 8TB.
*/
#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1)
 
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
/* - set: nonlinear file mapping, saved PTE; unset:swap */
#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
 
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
68,6 → 78,21
#endif
 
/*
* _PAGE_NUMA distinguishes between a numa hinting minor fault and a page
* that is not present. The hinting fault gathers numa placement statistics
* (see pte_numa()). The bit is always zero when the PTE is not present.
*
* The bit picked must be always zero when the pmd is present and not
* present, so that we don't lose information when we set it while
* atomically clearing the present bit.
*/
#ifdef CONFIG_NUMA_BALANCING
#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA)
#else
#define _PAGE_NUMA (_AT(pteval_t, 0))
#endif
 
/*
* Tracking soft dirty bit when a page goes to a swap is tricky.
* We need a bit which can be stored in pte _and_ not conflict
* with swap entry format. On x86 bits 6 and 7 are *not* involved
89,6 → 114,7
#define _PAGE_NX (_AT(pteval_t, 0))
#endif
 
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
 
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
99,8 → 125,8
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SOFT_DIRTY)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
_PAGE_SOFT_DIRTY | _PAGE_NUMA)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA)
 
/*
* The cache modes defined here are used to translate between pure SW usage
209,10 → 235,10
 
#include <linux/types.h>
 
/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
 
/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
 
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
234,7 → 260,7
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
 
#if CONFIG_PGTABLE_LEVELS > 3
#if PAGETABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t;
 
static inline pud_t native_make_pud(pmdval_t val)
255,7 → 281,7
}
#endif
 
#if CONFIG_PGTABLE_LEVELS > 2
#if PAGETABLE_LEVELS > 2
typedef struct { pmdval_t pmd; } pmd_t;
 
static inline pmd_t native_make_pmd(pmdval_t val)
276,40 → 302,14
}
#endif
 
static inline pudval_t pud_pfn_mask(pud_t pud)
{
if (native_pud_val(pud) & _PAGE_PSE)
return PHYSICAL_PUD_PAGE_MASK;
else
return PTE_PFN_MASK;
}
 
static inline pudval_t pud_flags_mask(pud_t pud)
{
return ~pud_pfn_mask(pud);
}
 
static inline pudval_t pud_flags(pud_t pud)
{
return native_pud_val(pud) & pud_flags_mask(pud);
return native_pud_val(pud) & PTE_FLAGS_MASK;
}
 
static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
{
if (native_pmd_val(pmd) & _PAGE_PSE)
return PHYSICAL_PMD_PAGE_MASK;
else
return PTE_PFN_MASK;
}
 
static inline pmdval_t pmd_flags_mask(pmd_t pmd)
{
return ~pmd_pfn_mask(pmd);
}
 
static inline pmdval_t pmd_flags(pmd_t pmd)
{
return native_pmd_val(pmd) & pmd_flags_mask(pmd);
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
}
 
static inline pte_t native_make_pte(pteval_t val)
327,6 → 327,20
return native_pte_val(pte) & PTE_FLAGS_MASK;
}
 
#ifdef CONFIG_NUMA_BALANCING
/* Set of bits that distinguishes present, prot_none and numa ptes */
#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)
static inline pteval_t ptenuma_flags(pte_t pte)
{
return pte_flags(pte) & _PAGE_NUMA_MASK;
}
 
static inline pmdval_t pmdnuma_flags(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_NUMA_MASK;
}
#endif /* CONFIG_NUMA_BALANCING */
 
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
 
393,9 → 407,6
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
 
#define pgprot_writethrough pgprot_writethrough
extern pgprot_t pgprot_writethrough(pgprot_t prot);
 
/* Indicate that x86 has its own track and untrack pfn vma functions */
#define __HAVE_PFNMAP_TRACKING
 
/drivers/include/asm/processor.h
6,12 → 6,12
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
struct vm86;
 
#include <asm/vm86.h>
#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
#include <uapi/asm/sigcontext.h>
#include <asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
#include <asm/page.h>
21,7 → 21,6
#include <asm/desc_defs.h>
#include <asm/nops.h>
#include <asm/special_insns.h>
#include <asm/fpu/types.h>
 
#include <linux/personality.h>
#include <linux/cpumask.h>
53,11 → 52,6
return pc;
}
 
/*
* These alignment constraints are for performance in the vSMP case,
* but in the task_struct case we must also meet hardware imposed
* alignment requirements of the FPU state:
*/
#ifdef CONFIG_X86_VSMP
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
115,9 → 109,6
/* in KB - valid for CPUS which support this call: */
int x86_cache_size;
int x86_cache_alignment; /* In bytes */
/* Cache QoS architectural values: */
int x86_cache_max_rmid; /* max index */
int x86_cache_occ_scale; /* scale to bytes */
int x86_power;
unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */
169,7 → 160,10
 
extern const struct seq_operations cpuinfo_op;
 
#define cache_line_size() (x86_cache_alignment)
 
extern void cpu_detect(struct cpuinfo_x86 *c);
extern void fpu_detect(struct cpuinfo_x86 *c);
 
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
216,23 → 210,8
unsigned long sp0;
unsigned short ss0, __ss0h;
unsigned long sp1;
 
/*
* We don't use ring 1, so ss1 is a convenient scratch space in
* the same cacheline as sp0. We use ss1 to cache the value in
* MSR_IA32_SYSENTER_CS. When we context switch
* MSR_IA32_SYSENTER_CS, we first check if the new value being
* written matches ss1, and, if it's not, then we wrmsr the new
* value and update ss1.
*
* The only reason we context switch MSR_IA32_SYSENTER_CS is
* that we set it to zero in vm86 tasks to avoid corrupting the
* stack if we were to go through the sysenter path from vm86
* mode.
*/
unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
 
unsigned short __ss1h;
/* ss1 caches MSR_IA32_SYSENTER_CS: */
unsigned short ss1, __ss1h;
unsigned long sp2;
unsigned short ss2, __ss2h;
unsigned long __cr3;
297,18 → 276,14
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
 
/*
* Space for the temporary SYSENTER stack:
* .. and then another 0x100 bytes for the emergency kernel stack:
*/
unsigned long SYSENTER_stack[64];
unsigned long stack[64];
 
} ____cacheline_aligned;
 
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
 
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#endif
 
/*
* Save the original ist values for checking stack pointers during debugging
*/
316,6 → 291,128
unsigned long ist[7];
};
 
#define MXCSR_DEFAULT 0x1f80
 
struct i387_fsave_struct {
u32 cwd; /* FPU Control Word */
u32 swd; /* FPU Status Word */
u32 twd; /* FPU Tag Word */
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Pointer Offset */
u32 fos; /* FPU Operand Pointer Selector */
 
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
 
/* Software status information [not touched by FSAVE ]: */
u32 status;
};
 
struct i387_fxsave_struct {
u16 cwd; /* Control Word */
u16 swd; /* Status Word */
u16 twd; /* Tag Word */
u16 fop; /* Last Instruction Opcode */
union {
struct {
u64 rip; /* Instruction Pointer */
u64 rdp; /* Data Pointer */
};
struct {
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Offset */
u32 fos; /* FPU Operand Selector */
};
};
u32 mxcsr; /* MXCSR Register State */
u32 mxcsr_mask; /* MXCSR Mask */
 
/* 8*16 bytes for each FP-reg = 128 bytes: */
u32 st_space[32];
 
/* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
 
u32 padding[12];
 
union {
u32 padding1[12];
u32 sw_reserved[12];
};
 
} __attribute__((aligned(16)));
 
struct i387_soft_struct {
u32 cwd;
u32 swd;
u32 twd;
u32 fip;
u32 fcs;
u32 foo;
u32 fos;
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
u8 ftop;
u8 changed;
u8 lookahead;
u8 no_update;
u8 rm;
u8 alimit;
struct math_emu_info *info;
u32 entry_eip;
};
 
struct ymmh_struct {
/* 16 * 16 bytes for each YMMH-reg = 256 bytes */
u32 ymmh_space[64];
};
 
/* We don't support LWP yet: */
struct lwp_struct {
u8 reserved[128];
};
 
struct bndreg {
u64 lower_bound;
u64 upper_bound;
} __packed;
 
struct bndcsr {
u64 bndcfgu;
u64 bndstatus;
} __packed;
 
struct xsave_hdr_struct {
u64 xstate_bv;
u64 xcomp_bv;
u64 reserved[6];
} __attribute__((packed));
 
struct xsave_struct {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
struct ymmh_struct ymmh;
struct lwp_struct lwp;
struct bndreg bndreg[4];
struct bndcsr bndcsr;
/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));
 
union thread_xstate {
struct i387_fsave_struct fsave;
struct i387_fxsave_struct fxsave;
struct i387_soft_struct soft;
struct xsave_struct xsave;
};
 
struct fpu {
unsigned int last_cpu;
unsigned int has_fpu;
union thread_xstate *state;
};
 
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
 
364,6 → 461,8
#endif /* X86_64 */
 
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
 
struct perf_event;
 
375,6 → 474,7
#ifdef CONFIG_X86_32
unsigned long sysenter_cs;
#else
unsigned long usersp; /* Copy from PDA */
unsigned short es;
unsigned short ds;
unsigned short fsindex;
387,7 → 487,6
unsigned long fs;
#endif
unsigned long gs;
 
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
398,9 → 497,17
unsigned long cr2;
unsigned long trap_nr;
unsigned long error_code;
#ifdef CONFIG_VM86
/* floating point and extended processor state */
struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
struct vm86 *vm86;
struct vm86_struct __user *vm86_info;
unsigned long screen_bitmap;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
unsigned int saved_fs;
unsigned int saved_gs;
#endif
/* IO permissions: */
unsigned long *io_bitmap_ptr;
407,13 → 514,15
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
 
/* Floating point and extended processor state */
struct fpu fpu;
/*
* WARNING: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
};
 
/*
455,13 → 564,11
#endif
}
 
 
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __cpuid native_cpuid
#define paravirt_enabled() 0
#define paravirt_has(x) 0
 
static inline void load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
472,6 → 579,39
#define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT */
 
/*
* Save the cr4 feature set we're using (ie
* Pentium 4MB enable and PPro Global page
* enable), so that any CPU's that boot up
* after us can get the correct flags.
*/
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;
 
static inline void set_in_cr4(unsigned long mask)
{
unsigned long cr4;
 
mmu_cr4_features |= mask;
if (trampoline_cr4_features)
*trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 |= mask;
write_cr4(cr4);
}
 
static inline void clear_in_cr4(unsigned long mask)
{
unsigned long cr4;
 
mmu_cr4_features &= ~mask;
if (trampoline_cr4_features)
*trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 &= ~mask;
write_cr4(cr4);
}
 
typedef struct {
unsigned long seg;
} mm_segment_t;
546,12 → 686,12
}
 
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static __always_inline void rep_nop(void)
static inline void rep_nop(void)
{
asm volatile("rep; nop" ::: "memory");
}
 
static __always_inline void cpu_relax(void)
static inline void cpu_relax(void)
{
rep_nop();
}
635,6 → 775,14
 
extern void set_task_blockstep(struct task_struct *task, bool on);
 
/*
* from system description table in BIOS. Mostly for MCA use, but
* others may find it useful:
*/
extern unsigned int machine_id;
extern unsigned int machine_submodel_id;
extern unsigned int BIOS_revision;
 
/* Boot loader type from the setup header: */
extern int bootloader_type;
extern int bootloader_version;
646,10 → 794,10
#define ARCH_HAS_SPINLOCK_PREFETCH
 
#ifdef CONFIG_X86_32
# define BASE_PREFETCH ""
# define BASE_PREFETCH ASM_NOP4
# define ARCH_HAS_PREFETCH
#else
# define BASE_PREFETCH "prefetcht0 %P1"
# define BASE_PREFETCH "prefetcht0 (%1)"
#endif
 
/*
684,9 → 832,6
prefetchw(x);
}
 
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
TOP_OF_KERNEL_STACK_PADDING)
 
#ifdef CONFIG_X86_32
/*
* User space process size: 3GB (default).
697,15 → 842,39
#define STACK_TOP_MAX STACK_TOP
 
#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
.sp0 = sizeof(init_stack) + (long)&init_stack, \
.vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
}
 
/*
* Note that the .io_bitmap member must be extra-big. This is because
* the CPU will access an additional byte beyond the end of the IO
* permission bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
#define INIT_TSS { \
.x86_tss = { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \
.ss0 = __KERNEL_DS, \
.ss1 = __KERNEL_CS, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
}, \
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
}
 
extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
#define KSTK_TOP(info) \
({ \
unsigned long *__ptr = (unsigned long *)(info); \
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
})
 
/*
* TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
* The below -8 is to reserve 8 bytes on top of the ring0 stack.
* This is necessary to guarantee that the entire "struct pt_regs"
* is accessible even if the CPU haven't stored the SS/ESP registers
* on the stack (interrupt gate does not save these registers
716,9 → 885,9
*/
#define task_pt_regs(task) \
({ \
unsigned long __ptr = (unsigned long)task_stack_page(task); \
__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
((struct pt_regs *)__ptr) - 1; \
struct pt_regs *__regs__; \
__regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
__regs__ - 1; \
})
 
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
750,9 → 919,13
#define STACK_TOP_MAX TASK_SIZE_MAX
 
#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK \
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}
 
#define INIT_TSS { \
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}
 
/*
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
762,6 → 935,11
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
 
/*
* User space RSP while inside the SYSCALL fast path
*/
DECLARE_PER_CPU(unsigned long, old_rsp);
 
#endif /* CONFIG_X86_64 */
 
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
783,18 → 961,18
extern int set_tsc_mode(unsigned int val);
 
/* Register/unregister a process' MPX related resource */
#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
#define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk))
#define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk))
 
#ifdef CONFIG_X86_INTEL_MPX
extern int mpx_enable_management(void);
extern int mpx_disable_management(void);
extern int mpx_enable_management(struct task_struct *tsk);
extern int mpx_disable_management(struct task_struct *tsk);
#else
static inline int mpx_enable_management(void)
static inline int mpx_enable_management(struct task_struct *tsk)
{
return -EINVAL;
}
static inline int mpx_disable_management(void)
static inline int mpx_disable_management(struct task_struct *tsk)
{
return -EINVAL;
}
801,7 → 979,6
#endif /* CONFIG_X86_INTEL_MPX */
 
extern u16 amd_get_nb_id(int cpu);
extern u32 amd_get_nodes_per_socket(void);
 
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
/drivers/include/asm/sigcontext.h
1,8 → 1,79
#ifndef _ASM_X86_SIGCONTEXT_H
#define _ASM_X86_SIGCONTEXT_H
 
/* This is a legacy header - all kernel code includes <uapi/asm/sigcontext.h> directly. */
 
#include <uapi/asm/sigcontext.h>
 
#ifdef __i386__
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long di;
unsigned long si;
unsigned long bp;
unsigned long sp;
unsigned long bx;
unsigned long dx;
unsigned long cx;
unsigned long ax;
unsigned long trapno;
unsigned long err;
unsigned long ip;
unsigned short cs, __csh;
unsigned long flags;
unsigned long sp_at_signal;
unsigned short ss, __ssh;
 
/*
* fpstate is really (struct _fpstate *) or (struct _xstate *)
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
* of extended memory layout. See comments at the definition of
* (struct _fpx_sw_bytes)
*/
void __user *fpstate; /* zero when no FPU/extended context */
unsigned long oldmask;
unsigned long cr2;
};
#else /* __i386__ */
struct sigcontext {
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
unsigned long di;
unsigned long si;
unsigned long bp;
unsigned long bx;
unsigned long dx;
unsigned long ax;
unsigned long cx;
unsigned long sp;
unsigned long ip;
unsigned long flags;
unsigned short cs;
unsigned short gs;
unsigned short fs;
unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;
unsigned long cr2;
 
/*
* fpstate is really (struct _fpstate *) or (struct _xstate *)
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
* of extended memory layout. See comments at the definition of
* (struct _fpx_sw_bytes)
*/
void __user *fpstate; /* zero when no FPU/extended context */
unsigned long reserved1[8];
};
#endif /* !__i386__ */
#endif /* _ASM_X86_SIGCONTEXT_H */
/drivers/include/asm/special_insns.h
137,17 → 137,17
native_write_cr3(x);
}
 
static inline unsigned long __read_cr4(void)
static inline unsigned long read_cr4(void)
{
return native_read_cr4();
}
 
static inline unsigned long __read_cr4_safe(void)
static inline unsigned long read_cr4_safe(void)
{
return native_read_cr4_safe();
}
 
static inline void __write_cr4(unsigned long x)
static inline void write_cr4(unsigned long x)
{
native_write_cr4(x);
}
/drivers/include/asm/x86_init.h
1,6 → 1,7
#ifndef _ASM_X86_PLATFORM_H
#define _ASM_X86_PLATFORM_H
 
#include <asm/pgtable_types.h>
//#include <asm/bootparam.h>
 
struct mpc_bus;
170,17 → 171,38
};
 
struct pci_dev;
struct msi_msg;
 
struct x86_msi_ops {
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
unsigned int dest, struct msi_msg *msg,
u8 hpet_id);
void (*teardown_msi_irq)(unsigned int irq);
void (*teardown_msi_irqs)(struct pci_dev *dev);
void (*restore_msi_irqs)(struct pci_dev *dev);
int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
};
 
struct IO_APIC_route_entry;
struct io_apic_irq_attr;
struct irq_data;
struct cpumask;
 
struct x86_io_apic_ops {
void (*init) (void);
unsigned int (*read) (unsigned int apic, unsigned int reg);
void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
void (*disable)(void);
void (*print_entries)(unsigned int apic, unsigned int nr_entries);
int (*set_affinity)(struct irq_data *data,
const struct cpumask *mask,
bool force);
int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
unsigned int destination, int vector,
struct io_apic_irq_attr *attr);
void (*eoi_ioapic_pin)(int apic, int pin, int vector);
};
 
extern struct x86_init_ops x86_init;
/drivers/include/ddk.h
15,7 → 15,6
 
#define PG_SW 0x003
#define PG_UW 0x007
#define PG_WRITEC 0x008
#define PG_NOCACHE 0x018
#define PG_SHARED 0x200
 
64,4 → 63,7
u32 drvEntry(int, char *)__asm__("_drvEntry");
 
 
 
 
 
#endif /* DDK_H */
/drivers/include/linux/pm.h
File deleted
/drivers/include/linux/device.h
File deleted
/drivers/include/linux/gpio/consumer.h
File deleted
/drivers/include/linux/ktime.h
File deleted
/drivers/include/linux/component.h
File deleted
/drivers/include/linux/circ_buf.h
File deleted
/drivers/include/linux/pwm.h
File deleted
/drivers/include/linux/pm_runtime.h
File deleted
/drivers/include/linux/sfi.h
File deleted
/drivers/include/linux/bottom_half.h
2,6 → 2,7
#define _LINUX_BH_H
 
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
 
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
/drivers/include/linux/kernel.h
102,18 → 102,6
(((__x) - ((__d) / 2)) / (__d)); \
} \
)
/*
* Same as above but for u64 dividends. divisor must be a 32-bit
* number.
*/
#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
{ \
typeof(divisor) __d = divisor; \
unsigned long long _tmp = (x) + (__d) / 2; \
do_div(_tmp, __d); \
_tmp; \
} \
)
 
/*
* Multiplies an integer by a fraction, while avoiding unnecessary
161,52 → 149,14
*/
#define lower_32_bits(n) ((u32)(n))
 
#ifdef CONFIG_PREEMPT_VOLUNTARY
extern int _cond_resched(void);
# define might_resched() _cond_resched()
#else
# define might_resched() do { } while (0)
#endif
 
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
void ___might_sleep(const char *file, int line, int preempt_offset);
void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
* context (spinlock, irq-handler, ...).
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
/*
* abs() handles unsigned and signed longs, ints, shorts and chars. For all
* input types abs() returns a signed long.
* abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
* for those.
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
# define sched_annotate_sleep() do { } while (0)
#endif
 
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
 
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first
* (s64, long or int depending on its size).
*
* Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
* otherwise it is signed long.
*/
#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
}), ({ \
#define abs(x) ({ \
long ret; \
if (sizeof(x) == sizeof(long)) { \
long __x = (x); \
216,35 → 166,13
ret = (__x < 0) ? -__x : __x; \
} \
ret; \
}))
})
 
/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
* @val: value
* @ep_ro: right open interval endpoint
*
* Perform a "reciprocal multiplication" in order to "scale" a value into
* range [0, ep_ro), where the upper interval endpoint is right-open.
* This is useful, e.g. for accessing a index of an array containing
* ep_ro elements, for example. Think of it as sort of modulus, only that
* the result isn't that of modulo. ;) Note that if initial input is a
* small value, then result will return 0.
*
* Return: a result based on val in interval [0, ep_ro).
*/
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
return (u32)(((u64) val * ep_ro) >> 32);
}
#define abs64(x) ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
})
 
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
#define might_fault() __might_fault(__FILE__, __LINE__)
void __might_fault(const char *file, int line);
#else
static inline void might_fault(void) { }
#endif
 
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
253,15 → 181,6
#define KERN_NOTICE "<5>" /* normal but significant condition */
#define KERN_INFO "<6>" /* informational */
#define KERN_DEBUG "<7>" /* debug-level messages */
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
 
extern int num_to_str(char *buf, int size, unsigned long long num);
 
/* lib/printf utilities */
 
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
extern __printf(3, 4)
274,16 → 193,7
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
extern __printf(2, 0)
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __printf(2, 0)
const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
 
extern __scanf(2, 3)
int sscanf(const char *, const char *, ...);
extern __scanf(2, 0)
int vsscanf(const char *, const char *, va_list);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
enum lockdep_ok {
LOCKDEP_STILL_OK,
LOCKDEP_NOW_UNRELIABLE
319,7 → 229,6
#define TAINT_OOT_MODULE 12
#define TAINT_UNSIGNED_MODULE 13
#define TAINT_SOFTLOCKUP 14
#define TAINT_LIVEPATCH 15
 
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
368,6 → 277,12
*
* Most likely, you want to use tracing_on/tracing_off.
*/
#ifdef CONFIG_RING_BUFFER
/* trace_off_permanent stops recording with no way to bring it back */
void tracing_off_permanent(void);
#else
static inline void tracing_off_permanent(void) { }
#endif
 
enum ftrace_dump_mode {
DUMP_NONE,
511,10 → 426,10
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \
} while (0)
 
extern __printf(2, 0) int
extern int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
 
extern __printf(2, 0) int
extern int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
 
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
534,7 → 449,7
{
return 0;
}
static __printf(1, 0) inline int
static inline int
ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
651,12 → 566,10
#define VERIFY_OCTAL_PERMISSIONS(perms) \
(BUILD_BUG_ON_ZERO((perms) < 0) + \
BUILD_BUG_ON_ZERO((perms) > 0777) + \
/* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
/* USER_WRITABLE >= GROUP_WRITABLE */ \
BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
/* OTHER_WRITABLE? Generally considered a bad idea. */ \
/* User perms >= group perms >= other perms */ \
BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
/* Other writable? Generally considered a bad idea. */ \
BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
 
678,7 → 591,23
struct vm_area_struct {};
struct address_space {};
 
struct device
{
struct device *parent;
void *driver_data;
};
 
static inline void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}
 
static inline void *dev_get_drvdata(struct device *dev)
{
return dev->driver_data;
}
 
 
#define in_dbg_master() (0)
 
#define HZ 100
799,7 → 728,10
#define IS_ENABLED(a) 0
 
 
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
 
 
#define cpufreq_quick_get_max(x) GetCpuFreq()
 
extern unsigned int tsc_khz;
839,6 → 771,8
return 0;
}
 
struct seq_file;
 
void *kmap(struct page *page);
void *kmap_atomic(struct page *page);
void kunmap(struct page *page);
853,4 → 787,5
 
#define CONFIG_PAGE_OFFSET 0
 
 
#endif
/drivers/include/linux/preempt.h
10,124 → 10,17
#include <linux/list.h>
 
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count could in theory be the same as the number of
* interrupts in the system, but we run all interrupt handlers with
* interrupts disabled, so we cannot have nesting interrupts. Though
* there are a few palaeontologic drivers which reenable interrupts in
* the handler, so we need more than one bit here.
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000
* NMI_MASK: 0x00100000
* PREEMPT_NEED_RESCHED: 0x80000000
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
* the other bits -- can't include that header due to inclusion hell.
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
#define NMI_BITS 1
 
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
 
#define __IRQ_MASK(x) ((1UL << (x))-1)
 
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
 
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
 
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
 
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
 
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
 
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
 
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
* in_softirq - Are we currently processing softirq or have bh disabled?
* in_serving_softirq - Are we currently processing softirq?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
 
/*
* Are we in NMI context?
*/
#define in_nmi() (preempt_count() & NMI_MASK)
 
/*
* The preempt_count offset after preempt_disable();
*/
#if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
#else
# define PREEMPT_DISABLE_OFFSET 0
#endif
 
/*
* The preempt_count offset after spin_lock()
*/
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
 
/*
* The preempt_count offset needed for things like:
*
* spin_lock_bh()
*
* Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
* softirqs, such that unlock sequences of:
*
* spin_unlock();
* local_bh_enable();
*
* Work as expected.
*/
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
 
/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
* used in the general case to determine whether sleeping is possible.
* Do not use in_atomic() in driver code.
*/
#define in_atomic() (preempt_count() != 0)
 
/*
* Check whether we were atomic before we did preempt_disable():
* (used by the scheduler)
*/
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
 
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() \
({ preempt_count_sub(1); should_resched(0); })
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
156,8 → 49,6
 
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
 
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
166,46 → 57,52
__preempt_schedule(); \
} while (0)
 
#define preempt_enable_notrace() \
do { \
barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \
__preempt_schedule_notrace(); \
} while (0)
 
#define preempt_check_resched() \
do { \
if (should_resched(0)) \
if (should_resched()) \
__preempt_schedule(); \
} while (0)
 
#else /* !CONFIG_PREEMPT */
#else
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)
#define preempt_check_resched() do { } while (0)
#endif
 
#define preempt_enable_notrace() \
#define preempt_disable_notrace() \
do { \
__preempt_count_inc(); \
barrier(); \
} while (0)
 
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
 
#define preempt_check_resched() do { } while (0)
#endif /* CONFIG_PREEMPT */
#ifdef CONFIG_PREEMPT
 
#define preempt_disable_notrace() \
#ifndef CONFIG_CONTEXT_TRACKING
#define __preempt_schedule_context() __preempt_schedule()
#endif
 
#define preempt_enable_notrace() \
do { \
__preempt_count_inc(); \
barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \
__preempt_schedule_context(); \
} while (0)
 
#define preempt_enable_no_resched_notrace() \
#else
#define preempt_enable_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
#endif
 
#else /* !CONFIG_PREEMPT_COUNT */
 
224,7 → 121,6
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
#define preemptible() 0
 
#endif /* CONFIG_PREEMPT_COUNT */
 
284,8 → 180,6
struct preempt_ops *ops;
};
 
void preempt_notifier_inc(void);
void preempt_notifier_dec(void);
void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);
 
/drivers/include/linux/spinlock.h
119,7 → 119,7
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
* can not be reordered with LOADs and STOREs inside this section.
* can not be reordered with a LOAD inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
129,6 → 129,16
#define smp_mb__before_spinlock() smp_wmb()
#endif
 
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifndef smp_mb__after_unlock_lock
#define smp_mb__after_unlock_lock() do { } while (0)
#endif
 
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
179,8 → 189,6
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
# define raw_spin_lock_bh_nested(lock, subclass) \
_raw_spin_lock_bh_nested(lock, subclass)
 
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
196,7 → 204,6
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
 
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
285,7 → 292,7
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
 
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
296,17 → 303,17
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
 
static __always_inline void spin_lock(spinlock_t *lock)
static inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
 
static __always_inline void spin_lock_bh(spinlock_t *lock)
static inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
 
static __always_inline int spin_trylock(spinlock_t *lock)
static inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
316,17 → 323,12
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
 
#define spin_lock_bh_nested(lock, subclass) \
do { \
raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
} while (0)
 
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
 
static __always_inline void spin_lock_irq(spinlock_t *lock)
static inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
341,32 → 343,32
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
 
static __always_inline void spin_unlock(spinlock_t *lock)
static inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
 
static __always_inline void spin_unlock_bh(spinlock_t *lock)
static inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
 
static __always_inline void spin_unlock_irq(spinlock_t *lock)
static inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
 
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
 
static __always_inline int spin_trylock_bh(spinlock_t *lock)
static inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
 
static __always_inline int spin_trylock_irq(spinlock_t *lock)
static inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
376,22 → 378,22
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
 
static __always_inline void spin_unlock_wait(spinlock_t *lock)
static inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
 
static __always_inline int spin_is_locked(spinlock_t *lock)
static inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
 
static __always_inline int spin_is_contended(spinlock_t *lock)
static inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
 
static __always_inline int spin_can_lock(spinlock_t *lock)
static inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}
/drivers/include/linux/spinlock_api_up.h
57,7 → 57,6
 
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
/drivers/include/linux/atomic.h
2,427 → 2,7
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
#include <asm/barrier.h>
 
/*
* Relaxed variants of xchg, cmpxchg and some atomic operations.
*
* We support four variants:
*
* - Fully ordered: The default implementation, no suffix required.
* - Acquire: Provides ACQUIRE semantics, _acquire suffix.
* - Release: Provides RELEASE semantics, _release suffix.
* - Relaxed: No ordering guarantees, _relaxed suffix.
*
* For compound atomics performing both a load and a store, ACQUIRE
* semantics apply only to the load and RELEASE semantics only to the
* store portion of the operation. Note that a failed cmpxchg_acquire
* does -not- imply any memory ordering constraints.
*
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
 
#ifndef atomic_read_acquire
#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
 
#ifndef atomic_set_release
#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
 
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*/
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
 
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
 
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
smp_mb__before_atomic(); \
__ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
 
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
#define atomic_add_return_relaxed atomic_add_return
#define atomic_add_return_acquire atomic_add_return
#define atomic_add_return_release atomic_add_return
 
#else /* atomic_add_return_relaxed */
 
#ifndef atomic_add_return_acquire
#define atomic_add_return_acquire(...) \
__atomic_op_acquire(atomic_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic_add_return_release
#define atomic_add_return_release(...) \
__atomic_op_release(atomic_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic_add_return
#define atomic_add_return(...) \
__atomic_op_fence(atomic_add_return, __VA_ARGS__)
#endif
#endif /* atomic_add_return_relaxed */
 
/* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_relaxed
#define atomic_inc_return_relaxed atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#define atomic_inc_return_release atomic_inc_return
 
#else /* atomic_inc_return_relaxed */
 
#ifndef atomic_inc_return_acquire
#define atomic_inc_return_acquire(...) \
__atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic_inc_return_release
#define atomic_inc_return_release(...) \
__atomic_op_release(atomic_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic_inc_return
#define atomic_inc_return(...) \
__atomic_op_fence(atomic_inc_return, __VA_ARGS__)
#endif
#endif /* atomic_inc_return_relaxed */
 
/* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return
#define atomic_sub_return_acquire atomic_sub_return
#define atomic_sub_return_release atomic_sub_return
 
#else /* atomic_sub_return_relaxed */
 
#ifndef atomic_sub_return_acquire
#define atomic_sub_return_acquire(...) \
__atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic_sub_return_release
#define atomic_sub_return_release(...) \
__atomic_op_release(atomic_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic_sub_return
#define atomic_sub_return(...) \
__atomic_op_fence(atomic_sub_return, __VA_ARGS__)
#endif
#endif /* atomic_sub_return_relaxed */
 
/* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_relaxed
#define atomic_dec_return_relaxed atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#define atomic_dec_return_release atomic_dec_return
 
#else /* atomic_dec_return_relaxed */
 
#ifndef atomic_dec_return_acquire
#define atomic_dec_return_acquire(...) \
__atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic_dec_return_release
#define atomic_dec_return_release(...) \
__atomic_op_release(atomic_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic_dec_return
#define atomic_dec_return(...) \
__atomic_op_fence(atomic_dec_return, __VA_ARGS__)
#endif
#endif /* atomic_dec_return_relaxed */
 
/* atomic_xchg_relaxed */
#ifndef atomic_xchg_relaxed
#define atomic_xchg_relaxed atomic_xchg
#define atomic_xchg_acquire atomic_xchg
#define atomic_xchg_release atomic_xchg
 
#else /* atomic_xchg_relaxed */
 
#ifndef atomic_xchg_acquire
#define atomic_xchg_acquire(...) \
__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic_xchg_release
#define atomic_xchg_release(...) \
__atomic_op_release(atomic_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic_xchg
#define atomic_xchg(...) \
__atomic_op_fence(atomic_xchg, __VA_ARGS__)
#endif
#endif /* atomic_xchg_relaxed */
 
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_relaxed
#define atomic_cmpxchg_relaxed atomic_cmpxchg
#define atomic_cmpxchg_acquire atomic_cmpxchg
#define atomic_cmpxchg_release atomic_cmpxchg
 
#else /* atomic_cmpxchg_relaxed */
 
#ifndef atomic_cmpxchg_acquire
#define atomic_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic_cmpxchg_release
#define atomic_cmpxchg_release(...) \
__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic_cmpxchg
#define atomic_cmpxchg(...) \
__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
#endif
#endif /* atomic_cmpxchg_relaxed */
 
#ifndef atomic64_read_acquire
#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
 
#ifndef atomic64_set_release
#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
 
/* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_relaxed
#define atomic64_add_return_relaxed atomic64_add_return
#define atomic64_add_return_acquire atomic64_add_return
#define atomic64_add_return_release atomic64_add_return
 
#else /* atomic64_add_return_relaxed */
 
#ifndef atomic64_add_return_acquire
#define atomic64_add_return_acquire(...) \
__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_add_return_release
#define atomic64_add_return_release(...) \
__atomic_op_release(atomic64_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_add_return
#define atomic64_add_return(...) \
__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
#endif
#endif /* atomic64_add_return_relaxed */
 
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
 
#else /* atomic64_inc_return_relaxed */
 
#ifndef atomic64_inc_return_acquire
#define atomic64_inc_return_acquire(...) \
__atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_inc_return_release
#define atomic64_inc_return_release(...) \
__atomic_op_release(atomic64_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_inc_return
#define atomic64_inc_return(...) \
__atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
#endif
#endif /* atomic64_inc_return_relaxed */
 
 
/* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return
#define atomic64_sub_return_acquire atomic64_sub_return
#define atomic64_sub_return_release atomic64_sub_return
 
#else /* atomic64_sub_return_relaxed */
 
#ifndef atomic64_sub_return_acquire
#define atomic64_sub_return_acquire(...) \
__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_sub_return_release
#define atomic64_sub_return_release(...) \
__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_sub_return
#define atomic64_sub_return(...) \
__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
#endif
#endif /* atomic64_sub_return_relaxed */
 
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
 
#else /* atomic64_dec_return_relaxed */
 
#ifndef atomic64_dec_return_acquire
#define atomic64_dec_return_acquire(...) \
__atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_dec_return_release
#define atomic64_dec_return_release(...) \
__atomic_op_release(atomic64_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_dec_return
#define atomic64_dec_return(...) \
__atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
#endif
#endif /* atomic64_dec_return_relaxed */
 
/* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_relaxed
#define atomic64_xchg_relaxed atomic64_xchg
#define atomic64_xchg_acquire atomic64_xchg
#define atomic64_xchg_release atomic64_xchg
 
#else /* atomic64_xchg_relaxed */
 
#ifndef atomic64_xchg_acquire
#define atomic64_xchg_acquire(...) \
__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_xchg_release
#define atomic64_xchg_release(...) \
__atomic_op_release(atomic64_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_xchg
#define atomic64_xchg(...) \
__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
#endif
#endif /* atomic64_xchg_relaxed */
 
/* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_relaxed
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
#define atomic64_cmpxchg_acquire atomic64_cmpxchg
#define atomic64_cmpxchg_release atomic64_cmpxchg
 
#else /* atomic64_cmpxchg_relaxed */
 
#ifndef atomic64_cmpxchg_acquire
#define atomic64_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_cmpxchg_release
#define atomic64_cmpxchg_release(...) \
__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_cmpxchg
#define atomic64_cmpxchg(...) \
__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
#endif
#endif /* atomic64_cmpxchg_relaxed */
 
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define cmpxchg_relaxed cmpxchg
#define cmpxchg_acquire cmpxchg
#define cmpxchg_release cmpxchg
 
#else /* cmpxchg_relaxed */
 
#ifndef cmpxchg_acquire
#define cmpxchg_acquire(...) \
__atomic_op_acquire(cmpxchg, __VA_ARGS__)
#endif
 
#ifndef cmpxchg_release
#define cmpxchg_release(...) \
__atomic_op_release(cmpxchg, __VA_ARGS__)
#endif
 
#ifndef cmpxchg
#define cmpxchg(...) \
__atomic_op_fence(cmpxchg, __VA_ARGS__)
#endif
#endif /* cmpxchg_relaxed */
 
/* cmpxchg64_relaxed */
#ifndef cmpxchg64_relaxed
#define cmpxchg64_relaxed cmpxchg64
#define cmpxchg64_acquire cmpxchg64
#define cmpxchg64_release cmpxchg64
 
#else /* cmpxchg64_relaxed */
 
#ifndef cmpxchg64_acquire
#define cmpxchg64_acquire(...) \
__atomic_op_acquire(cmpxchg64, __VA_ARGS__)
#endif
 
#ifndef cmpxchg64_release
#define cmpxchg64_release(...) \
__atomic_op_release(cmpxchg64, __VA_ARGS__)
#endif
 
#ifndef cmpxchg64
#define cmpxchg64(...) \
__atomic_op_fence(cmpxchg64, __VA_ARGS__)
#endif
#endif /* cmpxchg64_relaxed */
 
/* xchg_relaxed */
#ifndef xchg_relaxed
#define xchg_relaxed xchg
#define xchg_acquire xchg
#define xchg_release xchg
 
#else /* xchg_relaxed */
 
#ifndef xchg_acquire
#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
#endif
 
#ifndef xchg_release
#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
#endif
 
#ifndef xchg
#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
#endif
#endif /* xchg_relaxed */
 
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
448,23 → 28,6
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
 
#ifndef atomic_andnot
static inline void atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
}
#endif
 
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
atomic_andnot(mask, v);
}
 
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
atomic_or(mask, v);
}
 
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
548,17 → 111,21
}
#endif
 
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
static inline void atomic_or(int i, atomic_t *v)
{
int old;
int new;
 
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
atomic64_and(~i, v);
do {
old = atomic_read(v);
new = old | i;
} while (atomic_cmpxchg(v, old, new) != old);
}
#endif
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
 
#include <asm-generic/atomic-long.h>
 
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
#endif /* _LINUX_ATOMIC_H */
/drivers/include/linux/bitmap.h
52,13 → 52,16
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
* bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
*/
 
/*
93,10 → 96,10
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits);
extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
144,55 → 147,59
align_mask, 0);
}
 
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern int bitmap_scnlistprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
const unsigned long *relmap, int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
#define bitmap_copy_le bitmap_copy
#endif
extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
 
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
)
 
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
 
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
static inline void bitmap_zero(unsigned long *dst, int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
 
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
static inline void bitmap_fill(unsigned long *dst, int nbits)
{
unsigned int nlongs = BITS_TO_LONGS(nbits);
size_t nlongs = BITS_TO_LONGS(nbits);
if (!small_const_nbits(nbits)) {
unsigned int len = (nlongs - 1) * sizeof(unsigned long);
int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
199,12 → 206,12
}
 
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
283,8 → 290,8
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
 
return find_first_bit(src, nbits) == nbits;
else
return __bitmap_empty(src, nbits);
}
 
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
291,11 → 298,11
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
 
return find_first_zero_bit(src, nbits) == nbits;
else
return __bitmap_full(src, nbits);
}
 
static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
302,22 → 309,22
return __bitmap_weight(src, nbits);
}
 
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, int nbits)
static inline void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
else
__bitmap_shift_right(dst, src, shift, nbits);
__bitmap_shift_right(dst, src, n, nbits);
}
 
static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
static inline void bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
*dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_shift_left(dst, src, shift, nbits);
__bitmap_shift_left(dst, src, n, nbits);
}
 
static inline int bitmap_parse(const char *buf, unsigned int buflen,
/drivers/include/linux/bitops.h
57,7 → 57,7
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
 
static inline int get_bitmask_order(unsigned int count)
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;
 
65,7 → 65,7
return order; /* We could be slightly more clever with -1 here... */
}
 
static inline int get_count_order(unsigned int count)
static __inline__ int get_count_order(unsigned int count)
{
int order;
 
75,7 → 75,7
return order;
}
 
static __always_inline unsigned long hweight_long(unsigned long w)
static inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
164,8 → 164,6
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<32) to sign bit
*
* This is safe to use for 16- and 8-bit types as well.
*/
static inline __s32 sign_extend32(__u32 value, int index)
{
173,17 → 171,6
return (__s32)(value << shift) >> shift;
}
 
/**
* sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
static inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}
 
static inline unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
231,9 → 218,9
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
* @size: The number of bits to search
* @size: The maximum size to search
*
* Returns the bit number of the last set bit, or size.
* Returns the bit number of the first set bit, or size.
*/
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
/drivers/include/linux/compiler.h
17,7 → 17,6
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
43,7 → 42,6
# define __cond_lock(x,c) (c)
# define __percpu
# define __rcu
# define __pmem
#endif
 
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
56,11 → 54,7
#include <linux/compiler-gcc.h>
#endif
 
#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
#define notrace __attribute__((hotpatch(0,0)))
#else
#define notrace __attribute__((no_instrument_function))
#endif
 
/* Intel compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
171,10 → 165,6
# define barrier() __memory_barrier()
#endif
 
#ifndef barrier_data
# define barrier_data(ptr) barrier()
#endif
 
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
198,56 → 188,46
 
#include <uapi/linux/types.h>
 
#define __READ_ONCE_SIZE \
({ \
switch (size) { \
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
default: \
barrier(); \
__builtin_memcpy((void *)res, (const void *)p, size); \
barrier(); \
} \
})
static __always_inline void data_access_exceeds_word_size(void)
#ifdef __compiletime_warning
__compiletime_warning("data access exceeds word size and won't be atomic")
#endif
;
 
static __always_inline
void __read_once_size(const volatile void *p, void *res, int size)
static __always_inline void data_access_exceeds_word_size(void)
{
__READ_ONCE_SIZE;
}
 
#ifdef CONFIG_KASAN
/*
* This function is not 'inline' because __no_sanitize_address confilcts
* with inlining. Attempt to inline it may cause a build failure.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
static __no_sanitize_address __maybe_unused
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
static __always_inline void __read_once_size(volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
switch (size) {
case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
#ifdef CONFIG_64BIT
case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
#endif
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
data_access_exceeds_word_size();
barrier();
}
#else
static __always_inline
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
#endif
 
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
#ifdef CONFIG_64BIT
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
#endif
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
data_access_exceeds_word_size();
barrier();
}
}
255,15 → 235,15
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
* ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
* compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
274,31 → 254,12
* required ordering.
*/
 
#define __READ_ONCE(x, check) \
({ \
union { typeof(x) __val; char __c[1]; } __u; \
if (check) \
__read_once_size(&(x), __u.__c, sizeof(x)); \
else \
__read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
#define READ_ONCE(x) __READ_ONCE(x, 1)
#define READ_ONCE(x) \
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
 
/*
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
* to hide memory access from KASAN.
*/
#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
#define ASSIGN_ONCE(val, x) \
({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
 
#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(x)) (val) }; \
__write_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
 
#endif /* __KERNEL__ */
 
#endif /* __ASSEMBLY__ */
417,14 → 378,6
#define __visible
#endif
 
/*
* Assume alignment of return value.
*/
#ifndef __assume_aligned
#define __assume_aligned(a, ...)
#endif
 
 
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
432,7 → 385,7
 
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
 
/* Compile time object size, -1 for unknown */
494,39 → 447,13
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
* ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
* on a union member will work as long as the size of the member matches the
* size of the union and the size is smaller than word size.
*
* The major use cases of ACCESS_ONCE used to be (1) Mediating communication
* between process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
* If possible use READ_ONCE()/WRITE_ONCE() instead.
* This macro does absolutely -nothing- to prevent the CPU from reordering,
* merging, or refetching absolutely anything at any time. Its main intended
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
*/
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
/**
* lockless_dereference() - safely load a pointer for later dereference
* @p: The pointer to load
*
* Similar to rcu_dereference(), but for situations where the pointed-to
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
/drivers/include/linux/cpumask.h
11,7 → 11,6
#include <linux/bitmap.h>
#include <linux/bug.h>
 
/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
 
/**
23,14 → 22,6
*/
#define cpumask_bits(maskp) ((maskp)->bits)
 
/**
* cpumask_pr_args - printf args to output a cpumask
* @maskp: cpumask to be printed
*
* Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
 
#if NR_CPUS == 1
#define nr_cpu_ids 1
#else
151,8 → 142,10
return 1;
}
 
static inline unsigned int cpumask_local_spread(unsigned int i, int node)
static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
{
set_bit(0, cpumask_bits(dstp));
 
return 0;
}
 
206,7 → 199,7
 
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
 
/**
* for_each_cpu - iterate over every cpu in a mask
288,11 → 281,11
* @cpumask: the cpumask pointer
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
*
* No static inline type checking - see Subtlety (1) above.
*/
static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
#define cpumask_test_cpu(cpu, cpumask) \
test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
 
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
546,6 → 539,21
#define cpumask_of(cpu) (get_cpu_mask(cpu))
 
/**
* cpumask_scnprintf - print a cpumask into a string as comma-separated hex
* @buf: the buffer to sprintf into
* @len: the length of the buffer
* @srcp: the cpumask to print
*
* If len is zero, returns zero. Otherwise returns the length of the
* (nul-terminated) @buf string.
*/
static inline int cpumask_scnprintf(char *buf, int len,
const struct cpumask *srcp)
{
return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
}
 
/**
* cpumask_parse_user - extract a cpumask from a user string
* @buf: the buffer to extract from
* @len: the length of the buffer
556,7 → 564,7
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
 
/**
571,10 → 579,26
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpu_ids);
nr_cpumask_bits);
}
 
/**
* cpulist_scnprintf - print a cpumask into a string as comma-separated list
* @buf: the buffer to sprintf into
* @len: the length of the buffer
* @srcp: the cpumask to print
*
* If len is zero, returns zero. Otherwise returns the length of the
* (nul-terminated) @buf string.
*/
static inline int cpulist_scnprintf(char *buf, int len,
const struct cpumask *srcp)
{
return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
nr_cpumask_bits);
}
 
/**
* cpumask_parse - extract a cpumask from from a string
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
586,7 → 610,7
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
 
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
 
/**
598,7 → 622,7
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
 
/**
608,7 → 632,9
*/
static inline size_t cpumask_size(void)
{
return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
/* FIXME: Once all cpumask assignments are eliminated, this
* can be nr_cpumask_bits */
return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
}
 
/*
765,7 → 791,7
#if NR_CPUS <= BITS_PER_LONG
#define CPU_BITS_ALL \
{ \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
}
 
#else /* NR_CPUS > BITS_PER_LONG */
773,7 → 799,7
#define CPU_BITS_ALL \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
}
#endif /* NR_CPUS > BITS_PER_LONG */
 
791,22 → 817,36
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
nr_cpu_ids);
nr_cpumask_bits);
}
 
/*
*
* From here down, all obsolete. Use cpumask_ variants!
*
*/
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
 
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
#if NR_CPUS <= BITS_PER_LONG
 
#define CPU_MASK_ALL \
(cpumask_t) { { \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
} }
 
#else
 
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
} }
#endif /* NR_CPUS > BITS_PER_LONG */
 
#endif
 
#define CPU_MASK_NONE \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
817,4 → 857,143
[0] = 1UL \
} }
 
#if NR_CPUS == 1
#define first_cpu(src) ({ (void)(src); 0; })
#define next_cpu(n, src) ({ (void)(src); 1; })
#define any_online_cpu(mask) 0
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#else /* NR_CPUS > 1 */
int __first_cpu(const cpumask_t *srcp);
int __next_cpu(int n, const cpumask_t *srcp);
 
#define first_cpu(src) __first_cpu(&(src))
#define next_cpu(n, src) __next_cpu((n), &(src))
#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = -1; \
(cpu) = next_cpu((cpu), (mask)), \
(cpu) < NR_CPUS; )
#endif /* SMP */
 
#if NR_CPUS <= 64
 
#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
 
#else /* NR_CPUS > 64 */
 
int __next_cpu_nr(int n, const cpumask_t *srcp);
#define for_each_cpu_mask_nr(cpu, mask) \
for ((cpu) = -1; \
(cpu) = __next_cpu_nr((cpu), &(mask)), \
(cpu) < nr_cpu_ids; )
 
#endif /* NR_CPUS > 64 */
 
#define cpus_addr(src) ((src).bits)
 
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpu, dstp->bits);
}
 
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpu, dstp->bits);
}
 
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
 
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
 
/* No static inline type checking - see Subtlety (1) above. */
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
 
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
{
return test_and_set_bit(cpu, addr->bits);
}
 
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_andnot(dst, src1, src2) \
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
static inline int __cpus_equal(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
static inline int __cpus_intersects(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
static inline int __cpus_subset(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
 
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
 
#define cpus_shift_left(dst, src, n) \
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_left(cpumask_t *dstp,
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
 
#endif /* __LINUX_CPUMASK_H */
/drivers/include/linux/dma-buf.h
115,8 → 115,6
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
* @exp_name: name of the exporter; useful for debugging.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
172,8 → 170,13
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
 
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *,
struct reservation_object *);
 
#define dma_buf_export(priv, ops, size, flags, resv) \
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
 
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
void dma_buf_put(struct dma_buf *dmabuf);
/drivers/include/linux/fence.h
77,7 → 77,7
spinlock_t *lock;
unsigned context, seqno;
unsigned long flags;
ktime_t timestamp;
// ktime_t timestamp;
int status;
};
 
280,22 → 280,6
}
 
/**
* fence_is_later - return if f1 is chronologically later than f2
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not re-used across contexts.
*/
static inline bool fence_is_later(struct fence *f1, struct fence *f2)
{
if (WARN_ON(f1->context != f2->context))
return false;
 
return f1->seqno - f2->seqno < INT_MAX;
}
 
/**
* fence_later - return the chronologically later fence
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
314,16 → 298,15
* set if enable_signaling wasn't called, and enabling that here is
* overkill.
*/
if (fence_is_later(f1, f2))
if (f2->seqno - f1->seqno <= INT_MAX)
return fence_is_signaled(f2) ? NULL : f2;
else
return fence_is_signaled(f1) ? NULL : f1;
else
return fence_is_signaled(f2) ? NULL : f2;
}
 
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
bool intr, signed long timeout);
 
 
/**
* fence_wait - sleep until the fence gets signaled
* @fence: [in] the fence to wait on
/drivers/include/linux/gfp.h
13,7 → 13,7
#define ___GFP_HIGHMEM 0x02u
#define ___GFP_DMA32 0x04u
#define ___GFP_MOVABLE 0x08u
#define ___GFP_RECLAIMABLE 0x10u
#define ___GFP_WAIT 0x10u
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
28,18 → 28,18
#define ___GFP_NOMEMALLOC 0x10000u
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_ATOMIC 0x80000u
#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
/*
* Physical address zone modifiers (see linux/mmzone.h - low four bits)
* GFP bitmasks..
*
* Zone modifiers (see linux/mmzone.h - low three bits)
*
* Do not put any conditional on these. If necessary modify the definitions
* without the underscores and use them consistently. The definitions here may
* be used in bit comparisons.
48,219 → 48,110
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 
/*
* Page mobility and placement hints
* Action modifiers - doesn't change the zoning
*
* These flags provide hints about how mobile the page is. Pages with similar
* mobility are placed within the same pageblocks to minimise problems due
* to external fragmentation.
*
* __GFP_MOVABLE (also a zone modifier) indicates that the page can be
* moved by page migration during memory compaction or can be reclaimed.
*
* __GFP_RECLAIMABLE is used for slab allocations that specify
* SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
*
* __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
* these pages will be spread between local zones to avoid all the dirty
* pages being in one zone (fair zone allocation policy).
*
* __GFP_HARDWALL enforces the cpuset memory allocation policy.
*
* __GFP_THISNODE forces the allocation to be satisified from the requested
* node with no fallbacks or placement policy enforcements.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
 
/*
* Watermark modifiers -- controls access to emergency reserves
*
* __GFP_HIGH indicates that the caller is high-priority and that granting
* the request is necessary before the system can make forward progress.
* For example, creating an IO context to clean pages.
*
* __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
* high priority. Users are typically interrupt handlers. This may be
* used in conjunction with __GFP_HIGH
*
* __GFP_MEMALLOC allows access to all memory. This should only be used when
* the caller guarantees the allocation will allow more memory to be freed
* very shortly e.g. process exiting or swapping. Users either should
* be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
*
* __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the __GFP_MEMALLOC flag if both are set.
*
* __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT)
 
/*
* Reclaim modifiers
*
* __GFP_IO can start physical IO.
*
* __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
* allocator recursing into the filesystem which might already be holding
* locks.
*
* __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
* This flag can be cleared to avoid unnecessary delays when a fallback
* option is available.
*
* __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
* the low watermark is reached and have it reclaim pages until the high
* watermark is reached. A caller may wish to clear this flag when fallback
* options are available and the reclaim is likely to disrupt the system. The
* canonical example is THP allocation where a fallback is cheap but
* reclaim/compaction may cause indirect stalls.
*
* __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
* cannot handle allocation failures. New users should be evaluated carefully
* (and the flag should be used only when there is no reasonable failure
* policy) but it is definitely preferable to use the flag rather than
* opencode endless loop around allocator.
* cannot handle allocation failures. This modifier is deprecated and no new
* users should be added.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely and will
* return NULL when direct reclaim and memory compaction have failed to allow
* the allocation to succeed. The OOM killer is not called with the current
* implementation.
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
* This takes precedence over the
* __GFP_MEMALLOC flag if both are
* set
*/
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
 
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
 
/*
* Action modifiers
*
* __GFP_COLD indicates that the caller does not expect to be used in the near
* future. Where possible, a cache-cold page will be returned.
*
* __GFP_NOWARN suppresses allocation failure reports.
*
* __GFP_COMP address compound page metadata.
*
* __GFP_ZERO returns a zeroed page on success.
*
* __GFP_NOTRACK avoids tracking with kmemcheck.
*
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
* distinguishing in the source between false positives and allocations that
* cannot be supported (e.g. page tables).
*
* __GFP_OTHER_NODE is for allocations that are on a remote node but that
* should not be accounted for as a remote allocation in vmstat. A
* typical user would be khugepaged collapsing a huge page on a remote
* node.
* This may seem redundant, but it's a way of annotating false positives vs.
* allocations that simply cannot be supported (e.g. page tables).
*/
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
 
/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
/*
* Useful GFP flag combinations that are commonly used. It is recommended
* that subsystems start with one of these combinations and then set/clear
* __GFP_FOO flags as necessary.
*
* GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
* watermark is applied to allow access to "atomic reserves"
*
* GFP_KERNEL is typical for kernel-internal allocations. The caller requires
* ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
*
* GFP_NOWAIT is for kernel allocations that should not stall for direct
* reclaim, start physical IO or use any filesystem callback.
*
* GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
*
* GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
*
* GFP_USER is for userspace allocations that also need to be directly
* accessibly by the kernel or hardware. It is typically used by hardware
* for buffers that are mapped to userspace (e.g. graphics) that hardware
* still must DMA to. cpuset limits are enforced for these allocations.
*
* GFP_DMA exists for historical reasons and should be avoided where possible.
* The flags indicates that the caller requires that the lowest zone be
* used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
* it would require careful auditing as some users really require it and
* others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
* lowest zone as a type of emergency reserve.
*
* GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
* address.
*
* GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
* do not need to be directly accessible by the kernel but that cannot
* move once in use. An example may be a hardware allocation that maps
* data directly into userspace but has no addressing limitations.
*
* GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
* need direct access to but can use kmap() when access is required. They
* are expected to be movable via page reclaim or page migration. Typically,
* pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
*
* GFP_TRANSHUGE is used for THP allocations. They are compound allocations
* that will fail quickly if memory is not available and will not wake
* kswapd on failure.
*/
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
/* This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE)
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_DMA __GFP_DMA
#define GFP_DMA32 __GFP_DMA32
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
~__GFP_KSWAPD_RECLAIM)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
 
/* Convert GFP flags to their corresponding migrate type */
/*
* GFP_THISNODE does not perform any reclaim, you most likely want to
* use __GFP_THISNODE to allocate from a given node without fallback!
*/
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#else
#define GFP_THISNODE ((__force gfp_t)0)
#endif
 
/* This mask makes up all the page movable related flags */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
 
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
/* Control page allocator reclaim behavior */
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
 
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return gfp_flags & __GFP_DIRECT_RECLAIM;
}
/* Control slab gfp mask during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
 
/* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 
/* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
 
#define GFP_DMA __GFP_DMA
 
/* 4GB DMA on some platforms */
#define GFP_DMA32 __GFP_DMA32
 
 
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
/drivers/include/linux/hdmi.h
25,7 → 25,6
#define __LINUX_HDMI_H_
 
#include <linux/types.h>
#include <linux/device.h>
 
enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
53,11 → 52,6
HDMI_COLORSPACE_RGB,
HDMI_COLORSPACE_YUV422,
HDMI_COLORSPACE_YUV444,
HDMI_COLORSPACE_YUV420,
HDMI_COLORSPACE_RESERVED4,
HDMI_COLORSPACE_RESERVED5,
HDMI_COLORSPACE_RESERVED6,
HDMI_COLORSPACE_IDO_DEFINED,
};
 
enum hdmi_scan_mode {
64,7 → 58,6
HDMI_SCAN_MODE_NONE,
HDMI_SCAN_MODE_OVERSCAN,
HDMI_SCAN_MODE_UNDERSCAN,
HDMI_SCAN_MODE_RESERVED,
};
 
enum hdmi_colorimetry {
78,7 → 71,6
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
HDMI_PICTURE_ASPECT_RESERVED,
};
 
enum hdmi_active_aspect {
100,11 → 92,6
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
 
/* The following EC values are only defined in CEA-861-F. */
HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
HDMI_EXTENDED_COLORIMETRY_BT2020,
HDMI_EXTENDED_COLORIMETRY_RESERVED,
};
 
enum hdmi_quantization_range {
111,7 → 98,6
HDMI_QUANTIZATION_RANGE_DEFAULT,
HDMI_QUANTIZATION_RANGE_LIMITED,
HDMI_QUANTIZATION_RANGE_FULL,
HDMI_QUANTIZATION_RANGE_RESERVED,
};
 
/* non-uniform picture scaling */
128,7 → 114,7
};
 
enum hdmi_content_type {
HDMI_CONTENT_TYPE_GRAPHICS,
HDMI_CONTENT_TYPE_NONE,
HDMI_CONTENT_TYPE_PHOTO,
HDMI_CONTENT_TYPE_CINEMA,
HDMI_CONTENT_TYPE_GAME,
208,7 → 194,6
HDMI_AUDIO_CODING_TYPE_MLP,
HDMI_AUDIO_CODING_TYPE_DST,
HDMI_AUDIO_CODING_TYPE_WMA_PRO,
HDMI_AUDIO_CODING_TYPE_CXT,
};
 
enum hdmi_audio_sample_size {
230,25 → 215,10
};
 
enum hdmi_audio_coding_type_ext {
/* Refer to Audio Coding Type (CT) field in Data Byte 1 */
HDMI_AUDIO_CODING_TYPE_EXT_CT,
 
/*
* The next three CXT values are defined in CEA-861-E only.
* They do not exist in older versions, and in CEA-861-F they are
* defined as 'Not in use'.
*/
HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
 
/* The following CXT values are only defined in CEA-861-F. */
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC,
HDMI_AUDIO_CODING_TYPE_EXT_DRA,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
};
 
struct hdmi_audio_infoframe {
329,8 → 299,5
 
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
void hdmi_infoframe_log(const char *level, struct device *dev,
union hdmi_infoframe *frame);
 
#endif /* _DRM_HDMI_H */
/drivers/include/linux/i2c.h
26,8 → 26,11
#ifndef _LINUX_I2C_H
#define _LINUX_I2C_H
 
#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/i2c-id.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
#include <linux/jiffies.h>
58,6 → 61,8
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @suspend: Callback for device suspend
* @resume: Callback for device resume
* @alert: Alert callback, for example for the SMBus alert protocol
* @command: Callback for bus-wide signaling (optional)
* @driver: Device driver model driver
100,6 → 105,8
 
/* driver model interfaces that don't relate to enumeration */
void (*shutdown)(struct i2c_client *);
// int (*suspend)(struct i2c_client *, pm_message_t mesg);
int (*resume)(struct i2c_client *);
 
/* Alert callback, for example for the SMBus alert protocol.
* The format and meaning of the data value depends on the protocol.
159,10 → 166,10
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
 
enum i2c_slave_event {
I2C_SLAVE_READ_REQUESTED,
I2C_SLAVE_WRITE_REQUESTED,
I2C_SLAVE_READ_PROCESSED,
I2C_SLAVE_WRITE_RECEIVED,
I2C_SLAVE_REQ_READ_START,
I2C_SLAVE_REQ_READ_END,
I2C_SLAVE_REQ_WRITE_START,
I2C_SLAVE_REQ_WRITE_END,
I2C_SLAVE_STOP,
};
/**
173,7 → 180,7
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
* @acpi_node: ACPI device node
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
299,8 → 306,8
((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
 
 
#endif /* __KERNEL__ */
 
 
/**
* struct i2c_msg - an I2C transaction segment beginning with START
* @addr: Slave address, either seven or ten bits. When this is a ten
/drivers/include/linux/irqflags.h
85,7 → 85,7
* The local_irq_*() APIs are equal to the raw_local_irq*()
* if !TRACE_IRQFLAGS.
*/
#ifdef CONFIG_TRACE_IRQFLAGS
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#define local_irq_enable() \
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
#define local_irq_disable() \
107,7 → 107,23
raw_local_irq_restore(flags); \
} \
} while (0)
#define local_save_flags(flags) \
do { \
raw_local_save_flags(flags); \
} while (0)
 
#define irqs_disabled_flags(flags) \
({ \
raw_irqs_disabled_flags(flags); \
})
 
#define irqs_disabled() \
({ \
unsigned long _flags; \
raw_local_save_flags(_flags); \
raw_irqs_disabled_flags(_flags); \
})
 
#define safe_halt() \
do { \
trace_hardirqs_on(); \
115,7 → 131,7
} while (0)
 
 
#else /* !CONFIG_TRACE_IRQFLAGS */
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
 
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
124,28 → 140,11
raw_local_irq_save(flags); \
} while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0)
#define irqs_disabled() (raw_irqs_disabled())
#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags))
#define safe_halt() do { raw_safe_halt(); } while (0)
 
#endif /* CONFIG_TRACE_IRQFLAGS */
 
#define local_save_flags(flags) raw_local_save_flags(flags)
 
/*
* Some architectures don't define arch_irqs_disabled(), so even if either
* definition would be fine we need to use different ones for the time being
* to avoid build issues.
*/
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#define irqs_disabled() \
({ \
unsigned long _flags; \
raw_local_save_flags(_flags); \
raw_irqs_disabled_flags(_flags); \
})
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
#define irqs_disabled() raw_irqs_disabled()
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
 
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
 
#endif
/drivers/include/linux/jiffies.h
292,145 → 292,11
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
 
extern unsigned long __msecs_to_jiffies(const unsigned int m);
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
* HZ is equal to or smaller than 1000, and 1000 is a nice round
* multiple of HZ, divide with the factor between them, but round
* upwards:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
}
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
/*
* HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
* simply multiply with the factor between them.
*
* But first make sure the multiplication result cannot overflow:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return m * (HZ / MSEC_PER_SEC);
}
#else
/*
* Generic case - multiply, round and divide. But first check that if
* we are doing a net multiplication, that we wouldn't overflow:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
}
#endif
/**
* msecs_to_jiffies: - convert milliseconds to jiffies
* @m: time in milliseconds
*
* conversion is done as follows:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
* for the details see __msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
* code, __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
* the HZ range specific helpers _msecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
static inline unsigned long msecs_to_jiffies(const unsigned int m)
{
if (__builtin_constant_p(m)) {
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
return _msecs_to_jiffies(m);
} else {
return __msecs_to_jiffies(m);
}
}
 
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
}
#else
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
>> USEC_TO_HZ_SHR32;
}
#endif
 
/**
* usecs_to_jiffies: - convert microseconds to jiffies
* @u: time in microseconds
*
* conversion is done as follows:
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows as for msecs_to_jiffies.
*
* usecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
* code, __usecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
* the HZ range specific helpers _usecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
if (__builtin_constant_p(u)) {
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return _usecs_to_jiffies(u);
} else {
return __usecs_to_jiffies(u);
}
}
 
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
static inline unsigned long timespec_to_jiffies(const struct timespec *value)
{
struct timespec64 ts = timespec_to_timespec64(*value);
 
return timespec64_to_jiffies(&ts);
}
 
static inline void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value)
{
struct timespec64 ts;
 
jiffies_to_timespec64(jiffies, &ts);
*value = timespec64_to_timespec(ts);
}
 
extern unsigned long msecs_to_jiffies(const unsigned int m);
extern unsigned long usecs_to_jiffies(const unsigned int u);
extern unsigned long timespec_to_jiffies(const struct timespec *value);
extern void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value);
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
/drivers/include/linux/kobject.h
80,8 → 80,7
 
extern __printf(2, 3)
int kobject_set_name(struct kobject *kobj, const char *name, ...);
extern __printf(2, 0)
int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs);
 
static inline const char *kobject_name(const struct kobject *kobj)
122,7 → 121,6
};
 
struct kobj_uevent_env {
char *argv[3];
char *envp[UEVENT_NUM_ENVP];
int envp_idx;
char buf[UEVENT_BUFFER_SIZE];
/drivers/include/linux/kref.h
25,147 → 25,10
atomic_t refcount;
};
 
/**
* kref_init - initialize object.
* @kref: object in question.
*/
static inline void kref_init(struct kref *kref)
{
atomic_set(&kref->refcount, 1);
}
void kref_init(struct kref *kref);
void kref_get(struct kref *kref);
int kref_put(struct kref *kref, void (*release) (struct kref *kref));
int kref_sub(struct kref *kref, unsigned int count,
void (*release) (struct kref *kref));
 
/**
* kref_get - increment refcount for object.
* @kref: object.
*/
static inline void kref_get(struct kref *kref)
{
/* If refcount was 0 before incrementing then we have a race
* condition when this kref is freeing by some other thread right now.
* In this case one should use kref_get_unless_zero()
*/
WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
}
 
/**
* kref_sub - subtract a number of refcounts for object.
* @kref: object.
* @count: Number of recounts to subtract.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function. If the caller does pass kfree to this
* function, you will be publicly mocked mercilessly by the kref
* maintainer, and anyone else who happens to notice it. You have
* been warned.
*
* Subtract @count from the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
* function returns 0, you still can not count on the kref from remaining in
* memory. Only use the return value if you want to see if the kref is now
* gone, not present.
*/
static inline int kref_sub(struct kref *kref, unsigned int count,
void (*release)(struct kref *kref))
{
WARN_ON(release == NULL);
 
if (atomic_sub_and_test((int) count, &kref->refcount)) {
release(kref);
return 1;
}
return 0;
}
 
/**
* kref_put - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function. If the caller does pass kfree to this
* function, you will be publicly mocked mercilessly by the kref
* maintainer, and anyone else who happens to notice it. You have
* been warned.
*
* Decrement the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
* function returns 0, you still can not count on the kref from remaining in
* memory. Only use the return value if you want to see if the kref is now
* gone, not present.
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
return kref_sub(kref, 1, release);
}
 
/**
* kref_put_spinlock_irqsave - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function.
* @lock: lock to take in release case
*
* Behaves identical to kref_put with one exception. If the reference count
* drops to zero, the lock will be taken atomically wrt dropping the reference
* count. The release function has to call spin_unlock() without _irqrestore.
*/
static inline int kref_put_spinlock_irqsave(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
unsigned long flags;
 
WARN_ON(release == NULL);
if (atomic_add_unless(&kref->refcount, -1, 1))
return 0;
spin_lock_irqsave(lock, flags);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
local_irq_restore(flags);
return 1;
}
spin_unlock_irqrestore(lock, flags);
return 0;
}
 
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
WARN_ON(release == NULL);
if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
mutex_lock(lock);
if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
mutex_unlock(lock);
return 0;
}
release(kref);
return 1;
}
return 0;
}
 
/**
* kref_get_unless_zero - Increment refcount for object unless it is zero.
* @kref: object.
*
* Return non-zero if the increment succeeded. Otherwise return 0.
*
* This function is intended to simplify locking around refcounting for
* objects that can be looked up from a lookup structure, and which are
* removed from that lookup structure in the object destructor.
* Operations on such objects require at least a read lock around
* lookup + kref_get, and a write lock around kref_put + remove from lookup
* structure. Furthermore, RCU implementations become extremely tricky.
* With a lookup followed by a kref_get_unless_zero *with return value check*
* locking in the kref_put path can be deferred to the actual removal from
* the lookup structure and RCU lookups become trivial.
*/
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
return atomic_add_unless(&kref->refcount, 1, 0);
}
#endif /* _KREF_H_ */
/drivers/include/linux/list.h
672,11 → 672,6
n->pprev = &n->next;
}
 
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
 
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
/drivers/include/linux/lockdep.h
130,8 → 130,8
};
 
struct lock_class_stats {
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
unsigned long contention_point[4];
unsigned long contending_point[4];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
255,7 → 255,6
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
unsigned int pin_count;
};
 
/*
355,9 → 354,6
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
 
extern void lock_pin_lock(struct lockdep_map *lock);
extern void lock_unpin_lock(struct lockdep_map *lock);
 
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
 
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
372,9 → 368,6
 
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
 
#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
 
#else /* !CONFIG_LOCKDEP */
 
static inline void lockdep_off(void)
427,9 → 420,6
 
#define lockdep_recursing(tsk) (0)
 
#define lockdep_pin_lock(l) do { (void)(l); } while (0)
#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
 
#endif /* !LOCKDEP */
 
#ifdef CONFIG_LOCK_STAT
541,13 → 531,8
# define might_lock_read(lock) do { } while (0)
#endif
 
#ifdef CONFIG_LOCKDEP
#ifdef CONFIG_PROVE_RCU
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
#else
static inline void
lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
}
#endif
 
#endif /* __LINUX_LOCKDEP_H */
/drivers/include/linux/mod_devicetable.h
53,9 → 53,9
 
/**
* struct usb_device_id - identifies USB devices for probing and hotplugging
* @match_flags: Bit mask controlling which of the other fields are used to
* match against new devices. Any field except for driver_info may be
* used, although some only make sense in conjunction with other fields.
* @match_flags: Bit mask controlling of the other fields are used to match
* against new devices. Any field except for driver_info may be used,
* although some only make sense in conjunction with other fields.
* This is usually set by a USB_DEVICE_*() macro, which sets all
* other fields in this structure except for driver_info.
* @idVendor: USB vendor ID for a device; numbers are assigned
189,8 → 189,6
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
kernel_ulong_t driver_data;
__u32 cls;
__u32 cls_msk;
};
 
#define PNP_ID_LEN 8
219,18 → 217,11
__u8 proto;
};
 
struct hda_device_id {
__u32 vendor_id;
__u32 rev_id;
__u8 api_version;
const char *name;
unsigned long driver_data;
};
 
/*
* Struct used for matching a device
*/
struct of_device_id {
struct of_device_id
{
char name[32];
char type[32];
char compatible[128];
374,6 → 365,8
} __attribute__((packed, aligned(2)));
#define SSB_DEVICE(_vendor, _coreid, _revision) \
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
#define SSB_DEVTABLE_END \
{ 0, },
 
#define SSB_ANY_VENDOR 0xFFFF
#define SSB_ANY_ID 0xFFFF
388,6 → 381,8
} __attribute__((packed,aligned(2)));
#define BCMA_CORE(_manuf, _id, _rev, _class) \
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
#define BCMA_CORETABLE_END \
{ 0, },
 
#define BCMA_ANY_MANUF 0xFFFF
#define BCMA_ANY_ID 0xFFFF
556,14 → 551,6
void *data;
};
 
/**
* struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus
* @type: Device type identifier.
*/
struct mips_cdmm_device_id {
__u8 type;
};
 
/*
* Match x86 CPUs for CPU specific drivers.
* See documentation of "x86_match_cpu" for details.
609,21 → 596,9
 
#define MEI_CL_MODULE_PREFIX "mei:"
#define MEI_CL_NAME_SIZE 32
#define MEI_CL_VERSION_ANY 0xff
 
/**
* struct mei_cl_device_id - MEI client device identifier
* @name: helper name
* @uuid: client uuid
* @version: client protocol version
* @driver_info: information used by the driver.
*
* identifies mei client device by uuid and name
*/
struct mei_cl_device_id {
char name[MEI_CL_NAME_SIZE];
uuid_le uuid;
__u8 version;
kernel_ulong_t driver_info;
};
 
651,10 → 626,4
kernel_ulong_t driver_data;
};
 
struct ulpi_device_id {
__u16 vendor;
__u16 product;
kernel_ulong_t driver_data;
};
 
#endif /* LINUX_MOD_DEVICETABLE_H */
/drivers/include/linux/module.h
9,8 → 9,9
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/compiler.h>
 
#include <linux/kobject.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/printk.h>
/drivers/include/linux/moduleparam.h
2,69 → 2,9
#define _LINUX_MODULE_PARAMS_H
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
#include <linux/kernel.h>
/**
* module_param - typesafe helper for a module/cmdline parameter
* @value: the variable to alter, and exposed parameter name.
* @type: the type of the parameter
* @perm: visibility in sysfs.
*
* @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
* ".") the kernel commandline parameter. Note that - is changed to _, so
* the user can use "foo-bar=1" even for variable "foo_bar".
*
* @perm is 0 if the the variable is not to appear in sysfs, or 0444
* for world-readable, 0644 for root-writable, etc. Note that if it
* is writable, you may need to use kernel_param_lock() around
* accesses (esp. charp, which can be kfreed when it changes).
*
* The @type is simply pasted to refer to a param_ops_##type and a
* param_check_##type: for convenience many standard types are provided but
* you can create your own by defining those variables.
*
* Standard types are:
* byte, short, ushort, int, uint, long, ulong
* charp: a character pointer
* bool: a bool, values 0/1, y/n, Y/N.
* invbool: the above, only sense-reversed (N = true).
*/
#define module_param(name, type, perm) \
module_param_named(name, name, type, perm)
 
/**
* module_param_unsafe - same as module_param but taints kernel
*/
#define module_param_unsafe(name, type, perm) \
module_param_named_unsafe(name, name, type, perm)
 
/**
* module_param_named - typesafe helper for a renamed module/cmdline parameter
* @name: a valid C identifier which is the parameter name.
* @value: the actual lvalue to alter.
* @type: the type of the parameter
* @perm: visibility in sysfs.
*
* Usually it's a good idea to have variable names and user-exposed names the
* same, but that's harder if the variable must be non-static or is inside a
* structure. This allows exposure under a different name.
*/
#define MODULE_PARM_DESC(_parm, desc)
#define module_param_named(name, value, type, perm)
/**
* module_param_named_unsafe - same as module_param_named but taints kernel
*/
 
#define module_param_named_unsafe(name, value, type, perm)
 
#define MODULE_PARM_DESC(_parm, desc)
 
#ifdef CONFIG_SYSFS
extern void kernel_param_lock(struct module *mod);
extern void kernel_param_unlock(struct module *mod);
#else
static inline void kernel_param_lock(struct module *mod)
{
}
static inline void kernel_param_unlock(struct module *mod)
{
}
#endif
#endif
/drivers/include/linux/pci.h
25,17 → 25,7
#include <linux/pci_regs.h> /* The pci register defines */
#include <linux/ioport.h>
 
#include <linux/device.h>
 
#if 0
struct device
{
struct device *parent;
void *driver_data;
};
 
#endif
 
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
 
214,10 → 204,6
*
* 7:3 = slot
* 2:0 = function
*
* PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
* In the interest of not exposing interfaces to user-space unnecessarily,
* the following kernel-only defines are being added here.
*/
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
441,12 → 427,11
D3cold, not set for devices
powered on/off by the
corresponding bridge */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
 
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
struct pcie_link_state *link_state; /* ASPM link state. */
#endif
 
pci_channel_state_t error_state; /* current connectivity state */
461,15 → 446,13
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
 
bool match_driver; /* Skip attaching driver */
/* These fields are used by common fixups */
unsigned int transparent:1; /* Subtractive decode PCI bridge */
unsigned int transparent:1; /* Transparent PCI bridge */
unsigned int multifunction:1;/* Part of multi-function device */
/* keep track of device state */
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
477,6 → 460,8
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
unsigned int is_pcie:1; /* Obsolete. Will be removed.
Use pci_is_pcie() instead */
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
/drivers/include/linux/percpu-defs.h
488,8 → 488,10
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
 
/*
* Operations with implied preemption/interrupt protection. These
* operations can be used without worrying about preemption or interrupt.
* Operations with implied preemption protection. These operations can be
* used without worrying about preemption. Note that interrupts may still
* occur while an operation is in progress and if the interrupt modifies
* the variable too then RMW actions may not be reliable.
*/
#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
/drivers/include/linux/personality.h
44,9 → 44,11
*/
#define personality(pers) (pers & PER_MASK)
 
 
/*
* Change personality of the currently running process.
*/
#define set_personality(pers) (current->personality = (pers))
#define set_personality(pers) \
((current->personality == (pers)) ? 0 : __set_personality(pers))
 
#endif /* _LINUX_PERSONALITY_H */
/drivers/include/linux/printk.h
65,10 → 65,6
*/
#define DEPRECATED "[Deprecated]: "
 
/*
* Dummy printk for disabled debugging statements to use whilst maintaining
* gcc's format and side-effect checking.
*/
static inline __printf(1, 2)
int no_printk(const char *fmt, ...)
{
107,11 → 103,6
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/*
* Like KERN_CONT, pr_cont() should only be used when continuing
* a line with no newline ('\n') enclosed. Otherwise it defaults
* back to KERN_DEFAULT.
*/
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
 
259,9 → 250,9
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii);
extern void hex_dump_to_buffer(const void *buf, size_t len,
int rowsize, int groupsize,
char *linebuf, size_t linebuflen, bool ascii);
 
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
/drivers/include/linux/rbtree_augmented.h
123,11 → 123,11
{
if (parent) {
if (parent->rb_left == old)
WRITE_ONCE(parent->rb_left, new);
parent->rb_left = new;
else
WRITE_ONCE(parent->rb_right, new);
parent->rb_right = new;
} else
WRITE_ONCE(root->rb_node, new);
root->rb_node = new;
}
 
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
137,8 → 137,7
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right;
struct rb_node *tmp = node->rb_left;
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
 
168,7 → 167,6
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
 
tmp = child->rb_left;
if (!tmp) {
/*
182,7 → 180,6
*/
parent = successor;
child2 = successor->rb_right;
 
augment->copy(node, successor);
} else {
/*
204,23 → 201,19
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
child2 = successor->rb_right;
WRITE_ONCE(parent->rb_left, child2);
WRITE_ONCE(successor->rb_right, child);
parent->rb_left = child2 = successor->rb_right;
successor->rb_right = child;
rb_set_parent(child, successor);
 
augment->copy(node, successor);
augment->propagate(parent, successor);
}
 
tmp = node->rb_left;
WRITE_ONCE(successor->rb_left, tmp);
successor->rb_left = tmp = node->rb_left;
rb_set_parent(tmp, successor);
 
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
 
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
/drivers/include/linux/rculist.h
29,8 → 29,8
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
WRITE_ONCE(list->next, list);
WRITE_ONCE(list->prev, list);
ACCESS_ONCE(list->next) = list;
ACCESS_ONCE(list->prev) = list;
}
 
/*
247,7 → 247,10
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
container_of(lockless_dereference(ptr), type, member)
({ \
typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
})
 
/**
* Where are list_empty_rcu() and list_first_entry_rcu()?
285,7 → 288,7
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
struct list_head *__next = READ_ONCE(__ptr->next); \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
 
521,11 → 524,11
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu(pos, member) \
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member); \
for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member))
 
/**
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
533,11 → 536,11
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member); \
for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member))
 
/**
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
546,8 → 549,8
*/
#define hlist_for_each_entry_from_rcu(pos, member) \
for (; pos; \
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member))
 
#endif /* __KERNEL__ */
#endif
/drivers/include/linux/rcupdate.h
44,32 → 44,10
//#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/ktime.h>
 
#include <asm/barrier.h>
 
extern int rcu_expedited; /* for sysctl */
 
#ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
{
return false;
}
 
static inline void rcu_expedite_gp(void)
{
}
 
static inline void rcu_unexpedite_gp(void)
{
}
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void);
#endif /* #else #ifdef CONFIG_TINY_RCU */
 
enum rcutorture_type {
RCU_FLAVOR,
RCU_BH_FLAVOR,
160,7 → 138,7
* more than one CPU).
*/
void call_rcu(struct rcu_head *head,
rcu_callback_t func);
void (*func)(struct rcu_head *head));
 
#else /* #ifdef CONFIG_PREEMPT_RCU */
 
191,7 → 169,7
* memory ordering guarantees.
*/
void call_rcu_bh(struct rcu_head *head,
rcu_callback_t func);
void (*func)(struct rcu_head *head));
 
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
213,7 → 191,7
* memory ordering guarantees.
*/
void call_rcu_sched(struct rcu_head *head,
rcu_callback_t func);
void (*func)(struct rcu_head *rcu));
 
void synchronize_sched(void);
 
235,7 → 213,7
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
 
258,13 → 236,11
 
static inline void __rcu_read_lock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
preempt_disable();
}
 
static inline void __rcu_read_unlock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
preempt_enable();
}
 
282,13 → 258,14
 
/* Internal to kernel */
void rcu_init(void);
void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
 
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
302,7 → 279,7
}
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
 
#ifdef CONFIG_NO_HZ_FULL
#ifdef CONFIG_RCU_USER_QS
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
310,7 → 287,7
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_NO_HZ_FULL */
#endif /* CONFIG_RCU_USER_QS */
 
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
354,13 → 331,12
extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#endif /* #else #ifdef CONFIG_TASKS_RCU */
 
/**
385,6 → 361,10
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
 
typedef void call_rcu_func_t(struct rcu_head *head,
void (*func)(struct rcu_head *head));
void wait_rcu_gp(call_rcu_func_t crf);
 
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
596,17 → 576,17
 
#define __rcu_access_pointer(p, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_check(p, c, space) \
({ \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
615,6 → 595,21
((typeof(*p) __force __kernel *)(p)); \
})
 
#define __rcu_access_index(p, space) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
rcu_dereference_sparse(p, space); \
(_________p1); \
})
#define __rcu_dereference_index_check(p, c) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
rcu_lockdep_assert(c, \
"suspicious rcu_dereference_index_check() usage"); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
622,6 → 617,21
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
 
/**
* lockless_dereference() - safely load a pointer for later dereference
* @p: The pointer to load
*
* Similar to rcu_dereference(), but for situations where the pointed-to
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
659,7 → 669,7
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
* smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
709,7 → 719,7
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
 
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
719,7 → 729,7
* This is the RCU-bh counterpart to rcu_dereference_check().
*/
#define rcu_dereference_bh_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
 
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
729,7 → 739,7
* This is the RCU-sched counterpart to rcu_dereference_check().
*/
#define rcu_dereference_sched_check(p, c) \
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
__rcu)
 
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
744,12 → 754,47
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
 
/**
* rcu_access_index() - fetch RCU index with no dereferencing
* @p: The index to read
*
* Return the value of the specified RCU-protected index, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
* when the value of this index is accessed, but the index is not
* dereferenced, for example, when testing an RCU-protected index against
* -1. Although rcu_access_index() may also be used in cases where
* update-side locks prevent the value of the index from changing, you
* should instead use rcu_dereference_index_protected() for this use case.
*/
#define rcu_access_index(p) __rcu_access_index((p), __rcu)
 
/**
* rcu_dereference_index_check() - rcu_dereference for indices with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Similar to rcu_dereference_check(), but omits the sparse checking.
* This allows rcu_dereference_index_check() to be used on integers,
* which can then be used as array indices. Attempting to use
* rcu_dereference_check() on an integer will give compiler warnings
* because the sparse address-space mechanism relies on dereferencing
* the RCU-protected pointer. Dereferencing integers is not something
* that even gcc will put up with.
*
* Note that this function does not implicitly check for RCU read-side
* critical sections. If this function gains lots of uses, it might
* make sense to provide versions for each flavor of RCU, but it does
* not make sense as of early 2010.
*/
#define rcu_dereference_index_check(p, c) \
__rcu_dereference_index_check((p), (c))
 
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the READ_ONCE(). This
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
887,9 → 932,9
{
rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
 
/**
1075,13 → 1120,13
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 
#ifdef CONFIG_TINY_RCU
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
{
*nextevt = KTIME_MAX;
*delta_jiffies = ULONG_MAX;
return 0;
}
#endif /* #ifdef CONFIG_TINY_RCU */
#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
 
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
/drivers/include/linux/rcutiny.h
92,53 → 92,21
}
 
/*
* Return the number of grace periods started.
* Return the number of grace periods.
*/
static inline unsigned long rcu_batches_started(void)
static inline long rcu_batches_completed(void)
{
return 0;
}
 
/*
* Return the number of bottom-half grace periods started.
* Return the number of bottom-half grace periods.
*/
static inline unsigned long rcu_batches_started_bh(void)
static inline long rcu_batches_completed_bh(void)
{
return 0;
}
 
/*
* Return the number of sched grace periods started.
*/
static inline unsigned long rcu_batches_started_sched(void)
{
return 0;
}
 
/*
* Return the number of grace periods completed.
*/
static inline unsigned long rcu_batches_completed(void)
{
return 0;
}
 
/*
* Return the number of bottom-half grace periods completed.
*/
static inline unsigned long rcu_batches_completed_bh(void)
{
return 0;
}
 
/*
* Return the number of sched grace periods completed.
*/
static inline unsigned long rcu_batches_completed_sched(void)
{
return 0;
}
 
static inline void rcu_force_quiescent_state(void)
{
}
159,22 → 127,6
{
}
 
static inline void rcu_idle_enter(void)
{
}
 
static inline void rcu_idle_exit(void)
{
}
 
static inline void rcu_irq_enter(void)
{
}
 
static inline void rcu_irq_exit(void)
{
}
 
static inline void exit_rcu(void)
{
}
202,10 → 154,7
return true;
}
 
 
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 
static inline void rcu_all_qs(void)
{
}
 
#endif /* __LINUX_RCUTINY_H */
/drivers/include/linux/scatterlist.h
2,38 → 2,13
#define _LINUX_SCATTERLIST_H
 
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/mm.h>
 
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
};
#include <asm/types.h>
#include <asm/scatterlist.h>
//#include <asm/io.h>
 
/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries dma_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
 
#ifdef CONFIG_NEED_SG_DMA_LENGTH
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
#endif
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
43,9 → 18,10
/*
* Notes on SG table design.
*
* We use the unsigned long page_link field in the scatterlist struct to place
* the page pointer AND encode information about the sg table as well. The two
* lower bits are reserved for this information.
* Architectures must provide an unsigned long page_link field in the
* scatterlist struct. We use that to place the page pointer AND encode
* information about the sg table as well. The two lower bits are reserved
* for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
132,14 → 108,14
* @buflen: Data length
*
**/
static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(!virt_addr_valid(buf));
#endif
sg_set_page(sg, (struct page*)((unsigned)buf&0xFFFFF000), buflen, offset_in_page(buf));
}
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
//#ifdef CONFIG_DEBUG_SG
// BUG_ON(!virt_addr_valid(buf));
//#endif
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
 
/*
* Loop over each sg element, following the pointer to a new list if necessary
160,6 → 136,10
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
BUG();
#endif
 
/*
* offset and length are unused for chain entry. Clear them.
*/
241,16 → 221,10
//}
 
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
int sg_split(struct scatterlist *in, const int in_mapped_nents,
const off_t skip, const int nb_splits,
const size_t *split_sizes,
struct scatterlist **out, int *out_mapped_nents,
gfp_t gfp_mask);
 
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
265,16 → 239,13
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
 
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
 
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen);
void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
 
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen, off_t skip);
void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
 
378,6 → 349,4
bool sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
 
#define dma_unmap_sg(d, s, n, r)
 
#endif /* _LINUX_SCATTERLIST_H */
/drivers/include/linux/sched.h
2,47 → 2,7
#define _LINUX_SCHED_H
 
 
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
* We have two separate sets of flags: task->state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
#define TASK_RUNNING 0
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
#define __TASK_STOPPED 4
#define __TASK_TRACED 8
/* in tsk->exit_state */
#define EXIT_DEAD 16
#define EXIT_ZOMBIE 32
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* in tsk->state again */
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
#define TASK_NOLOAD 1024
#define TASK_STATE_MAX 2048
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
 
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
 
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
 
/* get_task_state() */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
/* Task command name length */
#define TASK_COMM_LEN 16
 
/drivers/include/linux/seq_file.h
1,26 → 1,9
/* stub */
#ifndef _LINUX_SEQ_FILE_H
#define _LINUX_SEQ_FILE_H
 
#include <linux/types.h>
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/mutex.h>
 
struct seq_file {
char *buf;
size_t size;
size_t from;
size_t count;
size_t pad_until;
loff_t index;
loff_t read_pos;
u64 version;
void *private;
};
 
int seq_puts(struct seq_file *m, const char *s);
 
__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
 
 
#endif
/drivers/include/linux/seqlock.h
35,7 → 35,6
#include <linux/spinlock.h>
//#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <asm/processor.h>
 
/*
109,7 → 108,7
unsigned ret;
 
repeat:
ret = READ_ONCE(s->sequence);
ret = ACCESS_ONCE(s->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
128,7 → 127,7
*/
static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
unsigned ret = ACCESS_ONCE(s->sequence);
smp_rmb();
return ret;
}
180,7 → 179,7
*/
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
unsigned ret = ACCESS_ONCE(s->sequence);
smp_rmb();
return ret & ~1;
}
237,79 → 236,6
/*
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
* interrupt the modification -- e.g. the concurrency is strictly between CPUs
* -- you most likely do not need this.
*
* Where the traditional RCU/lockless data structures rely on atomic
* modifications to ensure queries observe either the old or the new state the
* latch allows the same for non-atomic updates. The trade-off is doubling the
* cost of storage; we have to maintain two copies of the entire data
* structure.
*
* Very simply put: we first modify one copy and then the other. This ensures
* there is always one copy in a stable state, ready to give us an answer.
*
* The basic form is a data structure like:
*
* struct latch_struct {
* seqcount_t seq;
* struct data_struct data[2];
* };
*
* Where a modification, which is assumed to be externally serialized, does the
* following:
*
* void latch_modify(struct latch_struct *latch, ...)
* {
* smp_wmb(); <- Ensure that the last data[1] update is visible
* latch->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible
*
* modify(latch->data[0], ...);
*
* smp_wmb(); <- Ensure that the data[0] update is visible
* latch->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible
*
* modify(latch->data[1], ...);
* }
*
* The query will have a form like:
*
* struct entry *latch_query(struct latch_struct *latch, ...)
* {
* struct entry *entry;
* unsigned seq, idx;
*
* do {
* seq = lockless_dereference(latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
*
* smp_rmb();
* } while (seq != latch->seq);
*
* return entry;
* }
*
* So during the modification, queries are first redirected to data[1]. Then we
* modify data[0]. When that is complete, we redirect queries back to data[0]
* and we can modify data[1].
*
* NOTE: The non-requirement for atomic modifications does _NOT_ include
* the publishing of new entries in the case where data is a dynamic
* data structure.
*
* An iteration might start in data[0] and get suspended long enough
* to miss an entire modification sequence, once it resumes it might
* observe the new entry.
*
* NOTE: When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
static inline void raw_write_seqcount_latch(seqcount_t *s)
{
340,13 → 266,13
}
 
/**
* write_seqcount_invalidate - invalidate in-progress read-side seq operations
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
*
* After write_seqcount_invalidate, no read-side seq operations will complete
* After write_seqcount_barrier, no read-side seq operations will complete
* successfully and see data older than this.
*/
static inline void write_seqcount_invalidate(seqcount_t *s)
static inline void write_seqcount_barrier(seqcount_t *s)
{
smp_wmb();
s->sequence+=2;
/drivers/include/linux/slab.h
18,7 → 18,7
 
/*
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
104,11 → 104,7
(unsigned long)ZERO_SIZE_PTR)
 
void __init kmem_cache_init(void);
bool slab_is_available(void);
 
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
int slab_is_available(void);
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
124,9 → 120,7
}
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
void *ret = __builtin_malloc(size);
memset(ret, 0, size);
return ret;
return __builtin_malloc(size);
}
 
/**
/drivers/include/linux/stddef.h
3,6 → 3,7
 
#include <uapi/linux/stddef.h>
 
 
#undef NULL
#define NULL ((void *)0)
 
17,14 → 18,4
#else
#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
#endif
 
/**
* offsetofend(TYPE, MEMBER)
*
* @TYPE: The type of the structure
* @MEMBER: The member within the structure to get the end offset of
*/
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
 
#endif
/drivers/include/linux/string.h
25,9 → 25,6
#ifndef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
ssize_t __must_check strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
43,6 → 40,9
#ifndef __HAVE_ARCH_STRNCMP
extern int strncmp(const char *,const char *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRNICMP
#define strnicmp strncasecmp
#endif
#ifndef __HAVE_ARCH_STRCASECMP
extern int strcasecmp(const char *s1, const char *s2);
#endif
114,12 → 114,8
extern void * memchr(const void *,int,__kernel_size_t);
#endif
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
 
extern void kfree_const(const void *x);
 
extern char *kstrdup(const char *s, gfp_t gfp);
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
 
/drivers/include/linux/time.h
110,19 → 110,6
return true;
}
 
static inline bool timeval_valid(const struct timeval *tv)
{
/* Dates before 1970 are bogus */
if (tv->tv_sec < 0)
return false;
 
/* Can't have more microseconds then a second */
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
return false;
 
return true;
}
 
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
 
#define CURRENT_TIME (current_kernel_time())
/drivers/include/linux/time64.h
2,7 → 2,6
#define _LINUX_TIME64_H
 
#include <uapi/linux/time.h>
#include <linux/math64.h>
 
typedef __s64 time64_t;
 
12,18 → 11,11
*/
#if __BITS_PER_LONG == 64
# define timespec64 timespec
#define itimerspec64 itimerspec
#else
struct timespec64 {
time64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
 
struct itimerspec64 {
struct timespec64 it_interval;
struct timespec64 it_value;
};
 
#endif
 
/* Parameters used to convert the timespec values: */
36,7 → 28,6
#define FSEC_PER_SEC 1000000000000000LL
 
/* Located here for timespec[64]_valid_strict */
#define TIME64_MAX ((s64)~((u64)1 << 63))
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
 
52,16 → 43,6
return ts;
}
 
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
{
return *its64;
}
 
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
{
return *its;
}
 
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
94,24 → 75,6
return ret;
}
 
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
{
struct itimerspec ret;
 
ret.it_interval = timespec64_to_timespec(its64->it_interval);
ret.it_value = timespec64_to_timespec(its64->it_value);
return ret;
}
 
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
{
struct itimerspec64 ret;
 
ret.it_interval = timespec_to_timespec64(its->it_interval);
ret.it_value = timespec_to_timespec64(its->it_value);
return ret;
}
 
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
/drivers/include/linux/types.h
135,25 → 135,26
#endif
 
/*
* The type of an index into the pagecache.
* The type of an index into the pagecache. Use a #define so asm/types.h
* can override it.
*/
#ifndef pgoff_t
#define pgoff_t unsigned long
#endif
 
/*
* A dma_addr_t can hold any valid DMA address, i.e., any address returned
* by the DMA API.
*
* If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
* bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
* but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
* so they don't care about the size of the actual bus addresses.
*/
/* A dma_addr_t can hold any valid DMA or bus address for the platform */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
#endif /* dma_addr_t */
 
#ifdef __CHECKER__
#else
#endif
 
#ifdef __CHECK_ENDIAN__
#else
#endif
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
205,32 → 206,12
* struct callback_head - callback structure for use with RCU and task_work
* @next: next update requests in a list
* @func: actual update function to call after the grace period.
*
* The struct is aligned to size of pointer. On most architectures it happens
* naturally due ABI requirements, but some architectures (like CRIS) have
* weird ABI and we need to ask it explicitly.
*
* The alignment is required to guarantee that bits 0 and 1 of @next will be
* clear under normal conditions -- as long as we use call_rcu(),
* call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
* - the structure shares storage spacer in struct page with @compound_head,
* which encode PageTail() in bit 0. The guarantee is needed to avoid
* false-positive PageTail().
*/
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
} __attribute__((aligned(sizeof(void *))));
};
#define rcu_head callback_head
 
typedef void (*rcu_callback_t)(struct rcu_head *head);
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
 
/* clocksource cycle base type */
typedef u64 cycle_t;
 
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
/drivers/include/linux/vgaarb.h
65,13 → 65,8
* out of the arbitration process (and can be safe to take
* interrupts at any time.
*/
#if defined(CONFIG_VGA_ARB)
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes);
#else
static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes) { };
#endif
 
/**
* vga_get - acquire & locks VGA resources
/drivers/include/linux/dmi.h
74,7 → 74,7
u8 type;
u8 length;
u16 handle;
} __packed;
};
 
struct dmi_device {
struct list_head list;
/drivers/include/linux/compiler-gcc.h
9,24 → 9,10
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
 
 
/* Optimization barrier */
 
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
/*
* This version is i.e. to prevent dead stores elimination on @ptr
* where gcc and llvm may behave differently when otherwise using
* normal barrier(): while gcc behavior gets along with a normal
* barrier(), llvm needs an explicit input variable to be assumed
* clobbered. The issue is as follows: while the inline asm might
* access any memory it wants, the compiler could have fit all of
* @ptr into memory registers instead, and since @ptr never escaped
* from that, it proofed that the inline asm wasn't touching any of
* it. This version works well with both compilers, i.e. we're telling
* the compiler that the inline asm absolutely may see the contents
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
*/
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
 
/*
* This macro obfuscates arithmetic on a variable address so that gcc
47,18 → 33,15
* case either is valid.
*/
#define RELOC_HIDE(ptr, off) \
({ \
unsigned long __ptr; \
({ unsigned long __ptr; \
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); \
})
(typeof(ptr)) (__ptr + (off)); })
 
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) \
__asm__ ("" : "=r" (var) : "0" (var))
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
 
#ifdef __CHECKER__
#define __must_be_array(a) 0
#define __must_be_array(arr) 0
#else
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
80,25 → 63,19
#define __inline __inline notrace
#endif
 
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))
 
#define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
#define __alias(symbol) __attribute__((alias(#symbol)))
 
/*
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
* stack and frame pointer being set up and there is no chance to
* restore the lr register to the value before mcount was called.
* it doesn't make sense on ARM (currently the only user of __naked) to trace
* naked functions because then mcount is called without stack and frame pointer
* being set up and there is no chance to restore the lr register to the value
* before mcount was called.
*
* The asm() bodies of naked functions often depend on standard calling
* conventions, therefore they must be noinline and noclone.
*
* GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
* See GCC PR44290.
* The asm() bodies of naked functions often depend on standard calling conventions,
* therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
* this, so we must do so ourselves. See GCC PR44290.
*/
#define __naked __attribute__((naked)) noinline __noclone notrace
 
118,166 → 95,24
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
 
/* gcc version specific checks */
#define __gcc_header(x) #x
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
#define gcc_header(x) _gcc_header(x)
#include gcc_header(__GNUC__)
 
#if GCC_VERSION < 30200
# error Sorry, your compiler is too old - please upgrade it.
#endif
 
#if GCC_VERSION < 30300
# define __used __attribute__((__unused__))
#else
# define __used __attribute__((__used__))
#endif
 
#ifdef CONFIG_GCOV_KERNEL
# if GCC_VERSION < 30400
# error "GCOV profiling support for gcc versions below 3.4 not included"
# endif /* __GNUC_MINOR__ */
#endif /* CONFIG_GCOV_KERNEL */
 
#if GCC_VERSION >= 30400
#define __must_check __attribute__((warn_unused_result))
#endif
 
#if GCC_VERSION >= 40000
 
/* GCC 4.1.[01] miscompiles __weak */
#ifdef __KERNEL__
# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
# error Your version of gcc miscompiles the __weak directive
# endif
#endif
 
#define __used __attribute__((__used__))
#define __compiler_offsetof(a, b) \
__builtin_offsetof(a, b)
 
#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
 
#if GCC_VERSION >= 40300
/* Mark functions as cold. gcc will assume any path leading to a call
* to them will be unlikely. This means a lot of manual unlikely()s
* are unnecessary now for any paths leading to the usual suspects
* like BUG(), printk(), panic() etc. [but let's keep them for now for
* older compilers]
*
* Early snapshots of gcc 4.3 don't support this and we can't detect this
* in the preprocessor, but we can live with this because they're unreleased.
* Maketime probing would be overkill here.
*
* gcc also has a __attribute__((__hot__)) to move hot functions into
* a special section, but I don't see any sense in this right now in
* the kernel context
*/
#define __cold __attribute__((__cold__))
 
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
 
#ifndef __CHECKER__
# define __compiletime_warning(message) __attribute__((warning(message)))
# define __compiletime_error(message) __attribute__((error(message)))
#endif /* __CHECKER__ */
#endif /* GCC_VERSION >= 40300 */
 
#if GCC_VERSION >= 40500
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
*
* Early snapshots of gcc 4.5 don't support this and we can't detect
* this in the preprocessor, but we can live with this because they're
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() __builtin_unreachable()
 
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__))
 
#endif /* GCC_VERSION >= 40500 */
 
#if GCC_VERSION >= 40600
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
* optimizer that something else uses this function or variable, thus preventing
* this.
*/
#define __visible __attribute__((externally_visible))
#endif
 
 
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
/*
* __assume_aligned(n, k): Tell the optimizer that the returned
* pointer can be assumed to be k modulo n. The second argument is
* optional (default 0), so we use a variadic macro to make the
* shorthand.
*
* Beware: Do not apply this to functions which may return
* ERR_PTRs. Also, it is probably unwise to apply it to functions
* returning extra information in the low bits (but in that case the
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*/
#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
#endif
 
/*
* GCC 'asm goto' miscompiles certain code sequences:
*
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
*
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
 
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
 
#if GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
#elif GCC_VERSION >= 40902
#define KASAN_ABI_VERSION 3
#endif
 
#if GCC_VERSION >= 40902
/*
* Tell the compiler that address safety instrumentation (KASAN)
* should not be applied to that function.
* Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
*/
#define __no_sanitize_address __attribute__((no_sanitize_address))
#endif
 
#endif /* gcc version >= 40000 specific checks */
 
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
 
#if !defined(__no_sanitize_address)
#define __no_sanitize_address
#endif
 
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
 
#define __always_inline inline __attribute__((always_inline))
/drivers/include/linux/fb.h
1111,9 → 1111,7
struct fb_videomode *fbmode);
 
/* drivers/video/modedb.c */
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
 
#define VESA_MODEDB_SIZE 34
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
1164,17 → 1162,9
u32 flag;
};
 
struct dmt_videomode {
u32 dmt_id;
u32 std_2byte_code;
u32 cvt_3byte_code;
const struct fb_videomode *mode;
};
 
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
extern const struct dmt_videomode dmt_modes[];
 
struct fb_modelist {
struct list_head list;
1188,16 → 1178,4
const struct fb_videomode *default_mode,
unsigned int default_bpp);
 
/* Convenience logging macros */
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_warn(fb_info, fmt, ...) \
pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_info(fb_info, fmt, ...) \
pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
 
#endif /* _LINUX_FB_H */
/drivers/include/linux/firmware.h
1,11 → 1,10
#ifndef _LINUX_FIRMWARE_H
#define _LINUX_FIRMWARE_H
 
#include <linux/module.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/cache.h>
#include <linux/module.h>
 
#define FW_ACTION_NOHOTPLUG 0
#define FW_ACTION_HOTPLUG 1
/drivers/include/linux/ioport.h
100,7 → 100,6
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
#define IORESOURCE_IO_FIXED (1<<1)
#define IORESOURCE_IO_SPARSE (1<<2)
 
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
111,63 → 110,10
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
 
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
{ \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
}
 
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
#define DEFINE_RES_IO(_start, _size) \
DEFINE_RES_IO_NAMED((_start), (_size), NULL)
 
#define DEFINE_RES_MEM_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM)
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
 
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
DEFINE_RES_IRQ_NAMED((_irq), NULL)
 
#define DEFINE_RES_DMA_NAMED(_dma, _name) \
DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA)
#define DEFINE_RES_DMA(_dma) \
DEFINE_RES_DMA_NAMED((_dma), NULL)
 
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
 
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
void release_child_resources(struct resource *new);
extern void reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name);
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
void *alignf_data);
struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
/drivers/include/linux/irqreturn.h
3,7 → 3,7
 
/**
* enum irqreturn
* @IRQ_NONE interrupt was not from this device or was not handled
* @IRQ_NONE interrupt was not from this device
* @IRQ_HANDLED interrupt was handled by this device
* @IRQ_WAKE_THREAD handler requests to wake the handler thread
*/
/drivers/include/linux/math64.h
142,13 → 142,6
}
#endif /* mul_u64_u32_shr */
 
#ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u64_shr */
 
#else
 
#ifndef mul_u64_u32_shr
168,79 → 161,6
}
#endif /* mul_u64_u32_shr */
 
#ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
{
union {
u64 ll;
struct {
#ifdef __BIG_ENDIAN
u32 high, low;
#else
u32 low, high;
#endif
} l;
} rl, rm, rn, rh, a0, b0;
u64 c;
 
a0.ll = a;
b0.ll = b;
 
rl.ll = (u64)a0.l.low * b0.l.low;
rm.ll = (u64)a0.l.low * b0.l.high;
rn.ll = (u64)a0.l.high * b0.l.low;
rh.ll = (u64)a0.l.high * b0.l.high;
 
/*
* Each of these lines computes a 64-bit intermediate result into "c",
* starting at bits 32-95. The low 32-bits go into the result of the
* multiplication, the high 32-bits are carried into the next step.
*/
rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
rh.l.high = (c >> 32) + rh.l.high;
 
/*
* The 128-bit result of the multiplication is in rl.ll and rh.ll,
* shift it right and throw away the high part of the result.
*/
if (shift == 0)
return rl.ll;
if (shift < 64)
return (rl.ll >> shift) | (rh.ll << (64 - shift));
return rh.ll >> (shift & 63);
}
#endif /* mul_u64_u64_shr */
 
#endif
 
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
union {
u64 ll;
struct {
#ifdef __BIG_ENDIAN
u32 high, low;
#else
u32 low, high;
#endif
} l;
} u, rl, rh;
 
u.ll = a;
rl.ll = (u64)u.l.low * mul;
rh.ll = (u64)u.l.high * mul + rl.l.high;
 
/* Bits 32-63 of the result will be in rh.l.low. */
rl.l.high = do_div(rh.ll, divisor);
 
/* Bits 0-31 of the result will be in rl.l.low. */
do_div(rl.ll, divisor);
 
rl.l.high = rh.l.low;
return rl.ll;
}
#endif /* mul_u64_u32_div */
 
#endif /* _LINUX_MATH64_H */
/drivers/include/linux/rbtree.h
51,7 → 51,7
 
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
 
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
85,15 → 85,6
*rb_link = node;
}
 
static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
 
rcu_assign_pointer(*rb_link, node);
}
 
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
100,21 → 91,13
})
 
/**
* rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
* given type allowing the backing memory of @pos to be invalidated
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
* given type safe against removal of rb_node entry
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
*
* rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
* list_for_each_entry_safe() and allows the iteration to continue independent
* of changes to @pos by the body of the loop.
*
* Note, however, that it cannot handle other modifications that re-order the
* rbtree it is iterating over. This includes calling rb_erase() on @pos, as
* rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
/drivers/include/linux/dma_remapping.h
20,15 → 20,7
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2
/* Extended context entry types */
#define CONTEXT_TT_PT_PASID 4
#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
#define CONTEXT_TT_MASK (7ULL << 2)
 
#define CONTEXT_DINVE (1ULL << 8)
#define CONTEXT_PRS (1ULL << 9)
#define CONTEXT_PASIDE (1ULL << 11)
 
struct intel_iommu;
struct dmar_domain;
struct root_entry;
/drivers/include/linux/dmapool.h
19,12 → 19,6
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
 
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
}
 
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
 
/*
/drivers/include/linux/poison.h
19,8 → 19,8
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
 
/********** include/linux/timer.h **********/
/*
69,6 → 69,10
#define ATM_POISON_FREE 0x12
#define ATM_POISON 0xdeadbeef
 
/********** net/ **********/
#define NEIGHBOR_DEAD 0xdeadbeef
#define NETFILTER_LINK_POISON 0xdead57ac
 
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
79,4 → 83,7
/********** security/ **********/
#define KEY_DESTROY 0xbd
 
/********** sound/oss/ **********/
#define OSS_POISON_FREE 0xAB
 
#endif
/drivers/include/linux/swab.h
1,8 → 1,284
#ifndef _LINUX_SWAB_H
#define _LINUX_SWAB_H
 
#include <uapi/linux/swab.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/swab.h>
 
/*
* casts are necessary for constants, because we never know how for sure
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
*/
#define ___constant_swab16(x) ((__u16)( \
(((__u16)(x) & (__u16)0x00ffU) << 8) | \
(((__u16)(x) & (__u16)0xff00U) >> 8)))
 
#define ___constant_swab32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
(((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
(((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
(((__u32)(x) & (__u32)0xff000000UL) >> 24)))
 
#define ___constant_swab64(x) ((__u64)( \
(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
 
#define ___constant_swahw32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
(((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
 
#define ___constant_swahb32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
(((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
 
/*
* Implement the following as inlines, but define the interface using
* macros to allow constant folding when possible:
* ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
*/
 
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
#ifdef __arch_swab16
return __arch_swab16(val);
#else
return ___constant_swab16(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
#ifdef __arch_swab32
return __arch_swab32(val);
#else
return ___constant_swab32(val);
#endif
}
 
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
#ifdef __arch_swab64
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
__u32 l = val & ((1ULL << 32) - 1);
return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
#else
return ___constant_swab64(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswahw32(__u32 val)
{
#ifdef __arch_swahw32
return __arch_swahw32(val);
#else
return ___constant_swahw32(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswahb32(__u32 val)
{
#ifdef __arch_swahb32
return __arch_swahb32(val);
#else
return ___constant_swahb32(val);
#endif
}
 
/**
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
 
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
 
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
 
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap
*
* __swahw32(0x12340000) is 0x00001234
*/
#define __swahw32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahw32(x) : \
__fswahw32(x))
 
/**
* __swahb32 - return a high and low byte-swapped 32-bit value
* @x: value to byteswap
*
* __swahb32(0x12345678) is 0x34127856
*/
#define __swahb32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahb32(x) : \
__fswahb32(x))
 
/**
* __swab16p - return a byteswapped 16-bit value from a pointer
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline __u16 __swab16p(const __u16 *p)
{
#ifdef __arch_swab16p
return __arch_swab16p(p);
#else
return __swab16(*p);
#endif
}
 
/**
* __swab32p - return a byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline __u32 __swab32p(const __u32 *p)
{
#ifdef __arch_swab32p
return __arch_swab32p(p);
#else
return __swab32(*p);
#endif
}
 
/**
* __swab64p - return a byteswapped 64-bit value from a pointer
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline __u64 __swab64p(const __u64 *p)
{
#ifdef __arch_swab64p
return __arch_swab64p(p);
#else
return __swab64(*p);
#endif
}
 
/**
* __swahw32p - return a wordswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping.
*/
static inline __u32 __swahw32p(const __u32 *p)
{
#ifdef __arch_swahw32p
return __arch_swahw32p(p);
#else
return __swahw32(*p);
#endif
}
 
/**
* __swahb32p - return a high and low byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high/low byteswapping.
*/
static inline __u32 __swahb32p(const __u32 *p)
{
#ifdef __arch_swahb32p
return __arch_swahb32p(p);
#else
return __swahb32(*p);
#endif
}
 
/**
* __swab16s - byteswap a 16-bit value in-place
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline void __swab16s(__u16 *p)
{
#ifdef __arch_swab16s
__arch_swab16s(p);
#else
*p = __swab16p(p);
#endif
}
/**
* __swab32s - byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline void __swab32s(__u32 *p)
{
#ifdef __arch_swab32s
__arch_swab32s(p);
#else
*p = __swab32p(p);
#endif
}
 
/**
* __swab64s - byteswap a 64-bit value in-place
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline void __swab64s(__u64 *p)
{
#ifdef __arch_swab64s
__arch_swab64s(p);
#else
*p = __swab64p(p);
#endif
}
 
/**
* __swahw32s - wordswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping
*/
static inline void __swahw32s(__u32 *p)
{
#ifdef __arch_swahw32s
__arch_swahw32s(p);
#else
*p = __swahw32p(p);
#endif
}
 
/**
* __swahb32s - high and low byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high and low byte swapping
*/
static inline void __swahb32s(__u32 *p)
{
#ifdef __arch_swahb32s
__arch_swahb32s(p);
#else
*p = __swahb32p(p);
#endif
}
 
#ifdef __KERNEL__
# define swab16 __swab16
# define swab32 __swab32
# define swab64 __swab64
18,4 → 294,6
# define swab64s __swab64s
# define swahw32s __swahw32s
# define swahb32s __swahb32s
#endif /* __KERNEL__ */
 
#endif /* _LINUX_SWAB_H */
/drivers/include/syscall.h
6,14 → 6,6
typedef u32 addr_t;
typedef u32 count_t;
 
typedef struct
{
int width;
int height;
int bpp;
int freq;
}videomode_t;
 
///////////////////////////////////////////////////////////////////////////////
 
#define STDCALL __attribute__ ((stdcall)) __attribute__ ((dllimport))
44,7 → 36,6
void* STDCALL GetDisplay(void)__asm__("GetDisplay");
 
u32 IMPORT GetTimerTicks(void)__asm__("GetTimerTicks");
u64 IMPORT GetClockNs(void)__asm__("GetClockNs");
 
addr_t STDCALL AllocPage(void)__asm__("AllocPage");
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages");
521,14 → 512,9
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
static inline void __iomem *ioremap_nocache(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
static inline void __iomem *ioremap_wc(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_WRITEC|0x100);
return (void __iomem*) MapIoMem(offset, size, PG_SW|0x100);
}
 
 
560,7 → 546,7
KernelFree(addr);
}
 
static inline int power_supply_is_system_supplied(void) { return -1; };
static inline int power_supply_is_system_supplied(void) { return -1; }
 
 
#endif
/drivers/include/uapi/drm/drm.h
630,7 → 630,6
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
 
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
655,13 → 654,6
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
 
/**
* DRM_CLIENT_CAP_ATOMIC
*
* If set to 1, the DRM core will expose atomic properties to userspace
*/
#define DRM_CLIENT_CAP_ATOMIC 3
 
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
785,9 → 777,6
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
 
/**
* Device specific ioctls should only be in their respective headers
/drivers/include/uapi/drm/drm_fourcc.h
34,13 → 34,6
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
 
/* 8 bpp Red */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
 
/* 16 bpp RG */
#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
 
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
116,6 → 109,9
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
 
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
 
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
136,97 → 132,4
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
 
 
/*
* Format Modifiers:
*
* Format modifiers describe, typically, a re-ordering or modification
* of the data in a plane of an FB. This can be used to express tiled/
* swizzled formats, or compression, or a combination of the two.
*
* The upper 8 bits of the format modifier are a vendor-id as assigned
* below. The lower 56 bits are assigned as vendor sees fit.
*/
 
/* Vendor Ids: */
#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
#define DRM_FORMAT_MOD_VENDOR_NV 0x03
#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
/* add more to the end as needed */
 
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
 
/*
* Format Modifier tokens:
*
* When adding a new token please document the layout with a code comment,
* similar to the fourcc codes above. drm_fourcc.h is considered the
* authoritative source for all of these.
*/
 
/* Intel framebuffer modifiers */
 
/*
* Intel X-tiling layout
*
* This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
* in row-major layout. Within the tile bytes are laid out row-major, with
* a platform-dependent stride. On top of that the memory can apply
* platform-depending swizzling of some higher address bits into bit6.
*
* This format is highly platforms specific and not useful for cross-driver
* sharing. It exists since on a given platform it does uniquely identify the
* layout in a simple way for i915-specific userspace.
*/
#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
 
/*
* Intel Y-tiling layout
*
* This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
* in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
* chunks column-major, with a platform-dependent height. On top of that the
* memory can apply platform-depending swizzling of some higher address bits
* into bit6.
*
* This format is highly platforms specific and not useful for cross-driver
* sharing. It exists since on a given platform it does uniquely identify the
* layout in a simple way for i915-specific userspace.
*/
#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
 
/*
* Intel Yf-tiling layout
*
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
* Each group therefore consits out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
* 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
* in pixel depends on the pixel depth.
*/
#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
 
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
* standard NV12 style.
* As for NV12, an image is the result of two frame buffers: one for Y,
* one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
* Alignment requirements are (for each buffer):
* - multiple of 128 pixels for the width
* - multiple of 32 pixels for the height
*
* For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
 
#endif /* DRM_FOURCC_H */
/drivers/include/uapi/drm/drm_mode.h
105,16 → 105,8
 
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay;
__u16 hsync_start;
__u16 hsync_end;
__u16 htotal;
__u16 hskew;
__u16 vdisplay;
__u16 vsync_start;
__u16 vsync_end;
__u16 vtotal;
__u16 vscan;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
 
__u32 vrefresh;
 
132,10 → 124,8
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width;
__u32 max_width;
__u32 min_height;
__u32 max_height;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
 
struct drm_mode_crtc {
145,8 → 135,7
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
 
__u32 x; /**< x Position on the framebuffer */
__u32 y; /**< y Position on the framebuffer */
__u32 x, y; /**< Position on the frameuffer */
 
__u32 gamma_size;
__u32 mode_valid;
164,16 → 153,12
__u32 flags; /* see above flags */
 
/* Signed dest location allows it to be partially off screen */
__s32 crtc_x;
__s32 crtc_y;
__u32 crtc_w;
__u32 crtc_h;
__s32 crtc_x, crtc_y;
__u32 crtc_w, crtc_h;
 
/* Source values are 16.16 fixed point */
__u32 src_x;
__u32 src_y;
__u32 src_h;
__u32 src_w;
__u32 src_x, src_y;
__u32 src_h, src_w;
};
 
struct drm_mode_get_plane {
259,8 → 244,7
__u32 connector_type_id;
 
__u32 connection;
__u32 mm_width; /**< width in millimeters */
__u32 mm_height; /**< height in millimeters */
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
 
__u32 pad;
288,13 → 272,6
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
 
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
* witout being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
 
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
343,8 → 320,7
 
struct drm_mode_fb_cmd {
__u32 fb_id;
__u32 width;
__u32 height;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
353,18 → 329,16
};
 
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
 
struct drm_mode_fb_cmd2 {
__u32 fb_id;
__u32 width;
__u32 height;
__u32 width, height;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
 
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offsets and pitches per plane.
* buffer objects with offets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
372,21 → 346,13
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offsets[0] and UV as
* offsets[1]. Note that offsets[0] will generally
* be 0 (but this is not required).
*
* To accommodate tiled, compressed, etc formats, a per-plane
* modifier can be specified. The default value of zero
* indicates "native" format as specified by the fourcc.
* Vendor specific modifier token. This allows, for example,
* different tiling/swizzling pattern on different planes.
* See discussion above of DRM_FORMAT_MOD_xxx.
* So it would consist of Y as offset[0] and UV as
* offeset[1]. Note that offset[0] will generally
* be 0.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
__u64 modifier[4]; /* ie, tiling, compressed (per plane) */
};
 
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
553,47 → 519,4
uint32_t handle;
};
 
/* page-flip flags are valid, plus: */
#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
 
#define DRM_MODE_ATOMIC_FLAGS (\
DRM_MODE_PAGE_FLIP_EVENT |\
DRM_MODE_PAGE_FLIP_ASYNC |\
DRM_MODE_ATOMIC_TEST_ONLY |\
DRM_MODE_ATOMIC_NONBLOCK |\
DRM_MODE_ATOMIC_ALLOW_MODESET)
 
struct drm_mode_atomic {
__u32 flags;
__u32 count_objs;
__u64 objs_ptr;
__u64 count_props_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
__u64 reserved;
__u64 user_data;
};
 
/**
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
struct drm_mode_create_blob {
/** Pointer to data to copy. */
__u64 data;
/** Length of data to copy. */
__u32 length;
/** Return: new property ID. */
__u32 blob_id;
};
 
/**
* Destroy a user-created blob property.
*/
struct drm_mode_destroy_blob {
__u32 blob_id;
};
 
#endif
/drivers/include/uapi/drm/i915_drm.h
171,12 → 171,8
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
 
/*
* i915 specific ioctls.
*
* The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
* [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
* against DRM_COMMAND_BASE and should be between [0x0, 0x60).
/* I915 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
228,8 → 224,6
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
 
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
274,7 → 268,7
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
281,8 → 275,6
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
 
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
349,20 → 341,9
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
#define I915_PARAM_MMAP_VERSION 30
#define I915_PARAM_HAS_BSD2 31
#define I915_PARAM_REVISION 32
#define I915_PARAM_SUBSLICE_TOTAL 33
#define I915_PARAM_EU_TOTAL 34
#define I915_PARAM_HAS_GPU_RESET 35
#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 
typedef struct drm_i915_getparam {
__s32 param;
/*
* WARNING: Using pointers instead of fixed-size u64 means we need to write
* compat32 code. Don't repeat this mistake.
*/
int param;
int __user *value;
} drm_i915_getparam_t;
 
507,14 → 488,6
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
 
/**
* Flags for extended behaviour.
*
* Added in version 2.
*/
__u64 flags;
#define I915_MMAP_WC 0x1
};
 
struct drm_i915_gem_mmap_gtt {
690,8 → 663,7
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
#define EXEC_OBJECT_WRITE (1<<2)
#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1)
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
__u64 flags;
 
__u64 rsvd1;
765,19 → 737,8
*/
#define I915_EXEC_HANDLE_LUT (1<<12)
 
/** Used for switching BSD rings on the platforms with two BSD rings */
#define I915_EXEC_BSD_MASK (3<<13)
#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
#define I915_EXEC_BSD_RING1 (1<<13)
#define I915_EXEC_BSD_RING2 (2<<13)
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
 
/** Tell the kernel that the batchbuffer is processed by
* the resource streamer.
*/
#define I915_EXEC_RESOURCE_STREAMER (1<<15)
 
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
 
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1012,7 → 973,6
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
1082,14 → 1042,6
__u64 offset;
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
* single instruction 8byte read, in order to workaround that use
* offset (0x2538 | 1) instead.
*
*/
 
struct drm_i915_reset_stats {
__u32 ctx_id;
1121,16 → 1073,6
__u32 handle;
};
 
struct drm_i915_gem_context_param {
__u32 ctx_id;
__u32 size;
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
__u64 value;
};
 
 
struct drm_i915_mask {
__u32 handle;
__u32 width;
1158,7 → 1100,6
__u32 height;
__u32 bo_pitch;
__u32 bo_map;
__u32 forced;
};
 
 
/drivers/include/uapi/drm/radeon_drm.h
33,7 → 33,7
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
 
#include "drm.h"
#include <drm/drm.h>
 
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
1034,12 → 1034,6
#define RADEON_INFO_VRAM_USAGE 0x1e
#define RADEON_INFO_GTT_USAGE 0x1f
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
#define RADEON_INFO_READ_REG 0x24
#define RADEON_INFO_VA_UNMAP_WORKING 0x25
#define RADEON_INFO_GPU_RESET_COUNTER 0x26
 
struct drm_radeon_info {
uint32_t request;
/drivers/include/uapi/drm/vmwgfx_drm.h
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
64,7 → 64,6
#define DRM_VMW_GB_SURFACE_CREATE 23
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
 
/*************************************************************************/
/**
89,8 → 88,6
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
#define DRM_VMW_PARAM_SCREEN_TARGET 11
#define DRM_VMW_PARAM_DX 12
 
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
299,7 → 296,7
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
 
#define DRM_VMW_EXECBUF_VERSION 2
#define DRM_VMW_EXECBUF_VERSION 1
 
struct drm_vmw_execbuf_arg {
uint64_t commands;
308,8 → 305,6
uint64_t fence_rep;
uint32_t version;
uint32_t flags;
uint32_t context_handle;
uint32_t pad64;
};
 
/**
830,6 → 825,7
enum drm_vmw_shader_type {
drm_vmw_shader_type_vs = 0,
drm_vmw_shader_type_ps,
drm_vmw_shader_type_gs
};
 
 
911,8 → 907,6
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
* if none.
* @base_size Size of the base mip level for all faces.
* @array_size Must be zero for non-DX hardware, and if non-zero
* svga3d_flags must have proper bind flags setup.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
925,7 → 919,7
uint32_t multisample_count;
uint32_t autogen_filter;
uint32_t buffer_handle;
uint32_t array_size;
uint32_t pad64;
struct drm_vmw_size base_size;
};
 
1065,28 → 1059,4
uint32_t pad64;
};
 
/*************************************************************************/
/**
* DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
*
* Allocates a device unique context id, and queues a create context command
* for the host. Does not wait for host completion.
*/
enum drm_vmw_extended_context {
drm_vmw_context_legacy,
drm_vmw_context_dx
};
 
/**
* union drm_vmw_extended_context_arg
*
* @req: Context type.
* @rep: Context identifier.
*
* Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
*/
union drm_vmw_extended_context_arg {
enum drm_vmw_extended_context req;
struct drm_vmw_context_arg rep;
};
#endif
/drivers/include/uapi/linux/swab.h
File deleted
/drivers/include/uapi/linux/media-bus-format.h
File deleted
/drivers/include/uapi/asm/ptrace-abi.h
File deleted
/drivers/include/uapi/asm/e820.h
32,18 → 32,7
#define E820_ACPI 3
#define E820_NVS 4
#define E820_UNUSABLE 5
#define E820_PMEM 7
 
/*
* This is a non-standardized way to represent ADR or NVDIMM regions that
* persist over a reboot. The kernel will ignore their special capabilities
* unless the CONFIG_X86_PMEM_LEGACY option is set.
*
* ( Note that older platforms also used 6 for the same type of memory,
* but newer versions switched to 12 as 6 was assigned differently. Some
* time they will learn... )
*/
#define E820_PRAM 12
 
/*
* reserved RAM used by kernel itself
/drivers/include/uapi/asm/msr.h
1,6 → 1,8
#ifndef _UAPI_ASM_X86_MSR_H
#define _UAPI_ASM_X86_MSR_H
 
#include <asm/msr-index.h>
 
#ifndef __ASSEMBLY__
 
#include <linux/types.h>
/drivers/include/uapi/asm/posix_types.h
1,9 → 1,5
#ifndef __KERNEL__
# ifdef __i386__
# ifdef CONFIG_X86_32
# include <asm/posix_types_32.h>
# elif defined(__ILP32__)
# include <asm/posix_types_x32.h>
# else
# include <asm/posix_types_64.h>
# endif
#endif
/drivers/include/uapi/asm/processor-flags.h
37,6 → 37,8
#define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT)
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT)
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT)
#define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */
#define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT)
#define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */
/drivers/include/uapi/asm/ptrace.h
1,85 → 1,262
#ifndef _UAPI_ASM_X86_PTRACE_H
#define _UAPI_ASM_X86_PTRACE_H
#ifndef _ASM_X86_PTRACE_H
#define _ASM_X86_PTRACE_H
 
#include <linux/compiler.h> /* For __user */
#include <asm/ptrace-abi.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
#include <asm/page_types.h>
#include <uapi/asm/ptrace.h>
 
 
#ifndef __ASSEMBLY__
 
#ifdef __i386__
/* this struct defines the way the registers are stored on the
stack during a system call. */
 
#ifndef __KERNEL__
 
struct pt_regs {
long ebx;
long ecx;
long edx;
long esi;
long edi;
long ebp;
long eax;
int xds;
int xes;
int xfs;
int xgs;
long orig_eax;
long eip;
int xcs;
long eflags;
long esp;
int xss;
unsigned long bx;
unsigned long cx;
unsigned long dx;
unsigned long si;
unsigned long di;
unsigned long bp;
unsigned long ax;
unsigned long ds;
unsigned long es;
unsigned long fs;
unsigned long gs;
unsigned long orig_ax;
unsigned long ip;
unsigned long cs;
unsigned long flags;
unsigned long sp;
unsigned long ss;
};
 
#endif /* __KERNEL__ */
 
#else /* __i386__ */
 
#ifndef __KERNEL__
 
struct pt_regs {
/*
* C ABI says these regs are callee-preserved. They aren't saved on kernel entry
* unless syscall needs a complete, fully filled "struct pt_regs".
*/
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long rbp;
unsigned long rbx;
/* These regs are callee-clobbered. Always saved on kernel entry. */
unsigned long bp;
unsigned long bx;
/* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long rax;
unsigned long rcx;
unsigned long rdx;
unsigned long rsi;
unsigned long rdi;
/*
* On syscall entry, this is syscall#. On CPU exception, this is error code.
* On hw interrupt, it's IRQ number:
*/
unsigned long orig_rax;
/* Return frame for iretq */
unsigned long rip;
unsigned long ax;
unsigned long cx;
unsigned long dx;
unsigned long si;
unsigned long di;
unsigned long orig_ax;
/* end of arguments */
/* cpu exception frame or undefined */
unsigned long ip;
unsigned long cs;
unsigned long eflags;
unsigned long rsp;
unsigned long flags;
unsigned long sp;
unsigned long ss;
/* top of stack page */
};
 
#endif /* __KERNEL__ */
#endif /* !__i386__ */
 
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h>
#endif
 
struct cpuinfo_x86;
struct task_struct;
 
extern unsigned long profile_pc(struct pt_regs *regs);
#define profile_pc profile_pc
 
extern unsigned long
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code);
 
 
extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch);
extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch,
unsigned long phase1_result);
 
extern long syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_leave(struct pt_regs *);
 
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
}
 
/*
* user_mode_vm(regs) determines whether a register set came from user mode.
* This is true if V8086 mode was enabled OR if the register set was from
* protected mode with RPL-3 CS value. This tricky test checks that with
* one comparison. Many places in the kernel can bypass this full check
* if they have already ruled out V8086 mode, so user_mode(regs) can be used.
*/
static inline int user_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
#else
return !!(regs->cs & 3);
#endif
}
 
static inline int user_mode_vm(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
USER_RPL;
#else
return user_mode(regs);
#endif
}
 
static inline int v8086_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (regs->flags & X86_VM_MASK);
#else
return 0; /* No V86 mode support in long mode */
#endif
}
 
#ifdef CONFIG_X86_64
static inline bool user_64bit_mode(struct pt_regs *regs)
{
#ifndef CONFIG_PARAVIRT
/*
* On non-paravirt systems, this is the only long mode CPL 3
* selector. We do not allow long mode selectors in the LDT.
*/
return regs->cs == __USER_CS;
#else
/* Headers are too twisted for this to go in paravirt.h. */
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
}
 
#define current_user_stack_pointer() this_cpu_read(old_rsp)
/* ia32 vs. x32 difference */
#define compat_user_stack_pointer() \
(test_thread_flag(TIF_IA32) \
? current_pt_regs()->sp \
: this_cpu_read(old_rsp))
#endif
 
#ifdef CONFIG_X86_32
extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
#else
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
#endif
 
#define GET_IP(regs) ((regs)->ip)
#define GET_FP(regs) ((regs)->bp)
#define GET_USP(regs) ((regs)->sp)
 
#include <asm-generic/ptrace.h>
 
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
 
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten.
* @offset: offset number of the register.
*
* regs_get_register returns the value of a register. The @offset is the
* offset of the register in struct pt_regs address which specified by @regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
#ifdef CONFIG_X86_32
/*
* Traps from the kernel do not save sp and ss.
* Use the helper function to retrieve sp.
*/
if (offset == offsetof(struct pt_regs, sp) &&
regs->cs == __KERNEL_CS)
return kernel_stack_pointer(regs);
#endif
return *(unsigned long *)((unsigned long)regs + offset);
}
 
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static inline int regs_within_kernel_stack(struct pt_regs *regs,
unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
 
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
 
#define arch_has_single_step() (1)
#ifdef CONFIG_X86_DEBUGCTLMSR
#define arch_has_block_step() (1)
#else
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
 
#define ARCH_HAS_USER_SINGLE_STEP_INFO
 
/*
* When hitting ptrace_stop(), we cannot return using SYSRET because
* that does not restore the full CPU state, only a minimal set. The
* ptracer can change arbitrary register values, which is usually okay
* because the usual ptrace stops run off the signal delivery path which
* forces IRET; however, ptrace_event() stops happen in arbitrary places
* in the kernel and don't force IRET path.
*
* So force IRET path after a ptrace stop.
*/
#define arch_ptrace_stop_needed(code, info) \
({ \
set_thread_flag(TIF_NOTIFY_RESUME); \
false; \
})
 
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
 
#endif /* !__ASSEMBLY__ */
 
#endif /* _UAPI_ASM_X86_PTRACE_H */
#endif /* _ASM_X86_PTRACE_H */
/drivers/include/uapi/asm/sigcontext.h
1,19 → 1,6
#ifndef _UAPI_ASM_X86_SIGCONTEXT_H
#define _UAPI_ASM_X86_SIGCONTEXT_H
 
/*
* Linux signal context definitions. The sigcontext includes a complex
* hierarchy of CPU and FPU state, available to user-space (on the stack) when
* a signal handler is executed.
*
* As over the years this ABI grew from its very simple roots towards
* supporting more and more CPU state organically, some of the details (which
* were rather clever hacks back in the days) became a bit quirky by today.
*
* The current ABI includes flexible provisions for future extensions, so we
* won't have to grow new quirks for quite some time. Promise!
*/
 
#include <linux/compiler.h>
#include <linux/types.h>
 
22,306 → 9,155
#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
 
/*
* Bytes 464..511 in the current 512-byte layout of the FXSAVE/FXRSTOR frame
* are reserved for SW usage. On CPUs supporting XSAVE/XRSTOR, these bytes are
* used to extend the fpstate pointer in the sigcontext, which now includes the
* extended state information along with fpstate information.
* bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
* are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
* are used to extended the fpstate pointer in the sigcontext, which now
* includes the extended state information along with fpstate information.
*
* If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then there's a
* sw_reserved.extended_size bytes large extended context area present. (The
* last 32-bit word of this extended area (at the
* fpstate+extended_size-FP_XSTATE_MAGIC2_SIZE address) is set to
* FP_XSTATE_MAGIC2 so that you can sanity check your size calculations.)
*
* This extended area typically grows with newer CPUs that have larger and
* larger XSAVE areas.
* Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
* area and FP_XSTATE_MAGIC2 at the end of memory layout
* (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
* extended state information in the memory layout pointed by the fpstate
* pointer in sigcontext.
*/
struct _fpx_sw_bytes {
/*
* If set to FP_XSTATE_MAGIC1 then this is an xstate context.
* 0 if a legacy frame.
__u32 magic1; /* FP_XSTATE_MAGIC1 */
__u32 extended_size; /* total size of the layout referred by
* fpstate pointer in the sigcontext.
*/
__u32 magic1;
 
/*
* Total size of the fpstate area:
*
* - if magic1 == 0 then it's sizeof(struct _fpstate)
* - if magic1 == FP_XSTATE_MAGIC1 then it's sizeof(struct _xstate)
* plus extensions (if any)
__u64 xstate_bv;
/* feature bit mask (including fp/sse/extended
* state) that is present in the memory
* layout.
*/
__u32 extended_size;
 
/*
* Feature bit mask (including FP/SSE/extended state) that is present
* in the memory layout:
__u32 xstate_size; /* actual xsave state size, based on the
* features saved in the layout.
* 'extended_size' will be greater than
* 'xstate_size'.
*/
__u64 xfeatures;
 
/*
* Actual XSAVE state size, based on the xfeatures saved in the layout.
* 'extended_size' is greater than 'xstate_size':
*/
__u32 xstate_size;
 
/* For future use: */
__u32 padding[7];
__u32 padding[7]; /* for future use. */
};
 
#ifdef __i386__
/*
* As documented in the iBCS2 standard:
* As documented in the iBCS2 standard..
*
* The first part of "struct _fpstate" is just the normal i387 hardware setup,
* the extra "status" word is used to save the coprocessor status word before
* entering the handler.
* The first part of "struct _fpstate" is just the normal i387
* hardware setup, the extra "status" word is used to save the
* coprocessor status word before entering the handler.
*
* The FPU state data structure has had to grow to accommodate the extended FPU
* state required by the Streaming SIMD Extensions. There is no documented
* standard to accomplish this at the moment.
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*
* The FPU state data structure has had to grow to accommodate the
* extended FPU state required by the Streaming SIMD Extensions.
* There is no documented standard to accomplish this at the moment.
*/
 
/* 10-byte legacy floating point register: */
struct _fpreg {
__u16 significand[4];
__u16 exponent;
unsigned short significand[4];
unsigned short exponent;
};
 
/* 16-byte floating point register: */
struct _fpxreg {
__u16 significand[4];
__u16 exponent;
__u16 padding[3];
unsigned short significand[4];
unsigned short exponent;
unsigned short padding[3];
};
 
/* 16-byte XMM register: */
struct _xmmreg {
__u32 element[4];
unsigned long element[4];
};
 
#define X86_FXSR_MAGIC 0x0000
 
/*
* The 32-bit FPU frame:
*/
struct _fpstate_32 {
/* Legacy FPU environment: */
__u32 cw;
__u32 sw;
__u32 tag;
__u32 ipoff;
__u32 cssel;
__u32 dataoff;
__u32 datasel;
struct _fpstate {
/* Regular FPU environment */
unsigned long cw;
unsigned long sw;
unsigned long tag;
unsigned long ipoff;
unsigned long cssel;
unsigned long dataoff;
unsigned long datasel;
struct _fpreg _st[8];
__u16 status;
__u16 magic; /* 0xffff: regular FPU data only */
/* 0x0000: FXSR FPU data */
unsigned short status;
unsigned short magic; /* 0xffff = regular FPU data only */
 
/* FXSR FPU environment */
__u32 _fxsr_env[6]; /* FXSR FPU env is ignored */
__u32 mxcsr;
__u32 reserved;
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
unsigned long mxcsr;
unsigned long reserved;
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct _xmmreg _xmm[8]; /* First 8 XMM registers */
union {
__u32 padding1[44]; /* Second 8 XMM registers plus padding */
__u32 padding[44]; /* Alias name for old user-space */
};
struct _xmmreg _xmm[8];
unsigned long padding1[44];
 
union {
__u32 padding2[12];
struct _fpx_sw_bytes sw_reserved; /* Potential extended state is encoded here */
unsigned long padding2[12];
struct _fpx_sw_bytes sw_reserved; /* represents the extended
* state info */
};
};
 
#define X86_FXSR_MAGIC 0x0000
 
#ifndef __KERNEL__
/*
* The 64-bit FPU frame. (FXSAVE format and later)
*
* Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is
* larger: 'struct _xstate'. Note that 'struct _xstate' embedds
* 'struct _fpstate' so that you can always assume the _fpstate portion
* exists so that you can check the magic value.
*
* Note2: Reserved fields may someday contain valuable data. Always
* save/restore them when you change signal frames.
* User-space might still rely on the old definition:
*/
struct _fpstate_64 {
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long edi;
unsigned long esi;
unsigned long ebp;
unsigned long esp;
unsigned long ebx;
unsigned long edx;
unsigned long ecx;
unsigned long eax;
unsigned long trapno;
unsigned long err;
unsigned long eip;
unsigned short cs, __csh;
unsigned long eflags;
unsigned long esp_at_signal;
unsigned short ss, __ssh;
struct _fpstate __user *fpstate;
unsigned long oldmask;
unsigned long cr2;
};
#endif /* !__KERNEL__ */
 
#else /* __i386__ */
 
/* FXSAVE frame */
/* Note: reserved1/2 may someday contain valuable data. Always save/restore
them when you change signal frames. */
struct _fpstate {
__u16 cwd;
__u16 swd;
/* Note this is not the same as the 32-bit/x87/FSAVE twd: */
__u16 twd;
__u16 twd; /* Note this is not the same as the
32bit/x87/FSAVE twd */
__u16 fop;
__u64 rip;
__u64 rdp;
__u32 mxcsr;
__u32 mxcsr_mask;
__u32 st_space[32]; /* 8x FP registers, 16 bytes each */
__u32 xmm_space[64]; /* 16x XMM registers, 16 bytes each */
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
__u32 reserved2[12];
union {
__u32 reserved3[12];
struct _fpx_sw_bytes sw_reserved; /* Potential extended state is encoded here */
struct _fpx_sw_bytes sw_reserved; /* represents the extended
* state information */
};
};
 
#ifdef __i386__
# define _fpstate _fpstate_32
#else
# define _fpstate _fpstate_64
#endif
 
struct _header {
__u64 xfeatures;
__u64 reserved1[2];
__u64 reserved2[5];
};
 
struct _ymmh_state {
/* 16x YMM registers, 16 bytes each: */
__u32 ymmh_space[64];
};
 
#ifndef __KERNEL__
/*
* Extended state pointed to by sigcontext::fpstate.
*
* In addition to the fpstate, information encoded in _xstate::xstate_hdr
* indicates the presence of other extended state information supported
* by the CPU and kernel:
* User-space might still rely on the old definition:
*/
struct _xstate {
struct _fpstate fpstate;
struct _header xstate_hdr;
struct _ymmh_state ymmh;
/* New processor state extensions go here: */
};
 
/*
* The 32-bit signal frame:
*/
struct sigcontext_32 {
__u16 gs, __gsh;
__u16 fs, __fsh;
__u16 es, __esh;
__u16 ds, __dsh;
__u32 di;
__u32 si;
__u32 bp;
__u32 sp;
__u32 bx;
__u32 dx;
__u32 cx;
__u32 ax;
__u32 trapno;
__u32 err;
__u32 ip;
__u16 cs, __csh;
__u32 flags;
__u32 sp_at_signal;
__u16 ss, __ssh;
 
/*
* fpstate is really (struct _fpstate *) or (struct _xstate *)
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
* of extended memory layout. See comments at the definition of
* (struct _fpx_sw_bytes)
*/
__u32 fpstate; /* Zero when no FPU/extended context */
__u32 oldmask;
__u32 cr2;
};
 
/*
* The 64-bit signal frame:
*/
struct sigcontext_64 {
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 di;
__u64 si;
__u64 bp;
__u64 bx;
__u64 dx;
__u64 ax;
__u64 cx;
__u64 sp;
__u64 ip;
__u64 flags;
__u16 cs;
__u16 gs;
__u16 fs;
__u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
__u64 cr2;
 
/*
* fpstate is really (struct _fpstate *) or (struct _xstate *)
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
* of extended memory layout. See comments at the definition of
* (struct _fpx_sw_bytes)
*/
__u64 fpstate; /* Zero when no FPU/extended context */
__u64 reserved1[8];
};
 
/*
* Create the real 'struct sigcontext' type:
*/
#ifdef __KERNEL__
# ifdef __i386__
# define sigcontext sigcontext_32
# else
# define sigcontext sigcontext_64
# endif
#endif
 
/*
* The old user-space sigcontext definition, just in case user-space still
* relies on it. The kernel definition (in asm/sigcontext.h) has unified
* field names but otherwise the same layout.
*/
#ifndef __KERNEL__
 
#define _fpstate_ia32 _fpstate_32
#define sigcontext_ia32 sigcontext_32
 
 
# ifdef __i386__
struct sigcontext {
__u16 gs, __gsh;
__u16 fs, __fsh;
__u16 es, __esh;
__u16 ds, __dsh;
__u32 edi;
__u32 esi;
__u32 ebp;
__u32 esp;
__u32 ebx;
__u32 edx;
__u32 ecx;
__u32 eax;
__u32 trapno;
__u32 err;
__u32 eip;
__u16 cs, __csh;
__u32 eflags;
__u32 esp_at_signal;
__u16 ss, __ssh;
struct _fpstate __user *fpstate;
__u32 oldmask;
__u32 cr2;
};
# else /* __x86_64__: */
struct sigcontext {
__u64 r8;
__u64 r9;
__u64 r10;
348,13 → 184,38
__u64 trapno;
__u64 oldmask;
__u64 cr2;
struct _fpstate __user *fpstate; /* Zero when no FPU context */
struct _fpstate __user *fpstate; /* zero when no FPU context */
# ifdef __ILP32__
__u32 __fpstate_pad;
# endif
__u64 reserved1[8];
};
# endif /* __x86_64__ */
#endif /* !__KERNEL__ */
 
#endif /* !__i386__ */
 
struct _xsave_hdr {
__u64 xstate_bv;
__u64 reserved1[2];
__u64 reserved2[5];
};
 
struct _ymmh_state {
/* 16 * 16 bytes for each YMMH-reg */
__u32 ymmh_space[64];
};
 
/*
* Extended state pointed by the fpstate pointer in the sigcontext.
* In addition to the fpstate, information encoded in the xstate_hdr
* indicates the presence of other extended state information
* supported by the processor and OS.
*/
struct _xstate {
struct _fpstate fpstate;
struct _xsave_hdr xstate_hdr;
struct _ymmh_state ymmh;
/* new processor state extensions go here */
};
 
#endif /* _UAPI_ASM_X86_SIGCONTEXT_H */
/drivers/include/uapi/asm-generic/errno.h
6,16 → 6,7
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
 
/*
* This error code is special: arch syscall entry code will return
* -ENOSYS if users try to call a syscall that doesn't exist. To keep
* failures of syscalls that really do exist distinguishable from
* failures due to attempts to use a nonexistent syscall, syscall
* implementations should refrain from returning -ENOSYS.
*/
#define ENOSYS 38 /* Invalid system call number */
 
#define ENOSYS 38 /* Function not implemented */
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
/drivers/include/asm-generic/atomic-long.h
23,168 → 23,236
typedef atomic64_t atomic_long_t;
 
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
#define ATOMIC_LONG_PFX(x) atomic64 ## x
 
#else
static inline long atomic_long_read(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
typedef atomic_t atomic_long_t;
return (long)atomic64_read(v);
}
 
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
#define ATOMIC_LONG_PFX(x) atomic ## x
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic64_t *v = (atomic64_t *)l;
 
#endif
atomic64_set(v, i);
}
 
#define ATOMIC_LONG_READ_OP(mo) \
static inline long atomic_long_read##mo(const atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_read##mo)(v); \
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
atomic64_inc(v);
}
ATOMIC_LONG_READ_OP()
ATOMIC_LONG_READ_OP(_acquire)
 
#undef ATOMIC_LONG_READ_OP
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
#define ATOMIC_LONG_SET_OP(mo) \
static inline void atomic_long_set##mo(atomic_long_t *l, long i) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
ATOMIC_LONG_PFX(_set##mo)(v, i); \
atomic64_dec(v);
}
ATOMIC_LONG_SET_OP()
ATOMIC_LONG_SET_OP(_release)
 
#undef ATOMIC_LONG_SET_OP
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \
static inline long \
atomic_long_##op##_return##mo(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \
atomic64_add(i, v);
}
ATOMIC_LONG_ADD_SUB_OP(add,)
ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
ATOMIC_LONG_ADD_SUB_OP(add, _release)
ATOMIC_LONG_ADD_SUB_OP(sub,)
ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
ATOMIC_LONG_ADD_SUB_OP(sub, _release)
 
#undef ATOMIC_LONG_ADD_SUB_OP
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
#define atomic_long_cmpxchg_relaxed(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
(old), (new)))
#define atomic_long_cmpxchg_acquire(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
(old), (new)))
#define atomic_long_cmpxchg_release(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
(old), (new)))
atomic64_sub(i, v);
}
 
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_sub_and_test(i, v);
}
 
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_dec_and_test(v);
}
 
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_inc_and_test(v);
}
 
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_add_negative(i, v);
}
 
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_add_return(i, v);
}
 
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_sub_return(i, v);
}
 
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_inc_return(v);
}
 
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_dec_return(v);
}
 
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_add_unless(v, a, u);
}
 
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
 
#define atomic_long_cmpxchg(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
 
#define atomic_long_xchg_relaxed(v, new) \
(ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
#define atomic_long_xchg_acquire(v, new) \
(ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
#define atomic_long_xchg_release(v, new) \
(ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
(atomic64_xchg((atomic64_t *)(v), (new)))
 
#else /* BITS_PER_LONG == 64 */
 
typedef atomic_t atomic_long_t;
 
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_read(v);
}
 
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic_t *v = (atomic_t *)l;
 
atomic_set(v, i);
}
 
static inline void atomic_long_inc(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
atomic_t *v = (atomic_t *)l;
 
ATOMIC_LONG_PFX(_inc)(v);
atomic_inc(v);
}
 
static inline void atomic_long_dec(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
atomic_t *v = (atomic_t *)l;
 
ATOMIC_LONG_PFX(_dec)(v);
atomic_dec(v);
}
 
#define ATOMIC_LONG_OP(op) \
static inline void \
atomic_long_##op(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
ATOMIC_LONG_PFX(_##op)(i, v); \
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
atomic_add(i, v);
}
 
ATOMIC_LONG_OP(add)
ATOMIC_LONG_OP(sub)
ATOMIC_LONG_OP(and)
ATOMIC_LONG_OP(or)
ATOMIC_LONG_OP(xor)
ATOMIC_LONG_OP(andnot)
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
#undef ATOMIC_LONG_OP
atomic_sub(i, v);
}
 
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
atomic_t *v = (atomic_t *)l;
 
return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
return atomic_sub_and_test(i, v);
}
 
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
atomic_t *v = (atomic_t *)l;
 
return ATOMIC_LONG_PFX(_dec_and_test)(v);
return atomic_dec_and_test(v);
}
 
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
atomic_t *v = (atomic_t *)l;
 
return ATOMIC_LONG_PFX(_inc_and_test)(v);
return atomic_inc_and_test(v);
}
 
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
atomic_t *v = (atomic_t *)l;
 
return ATOMIC_LONG_PFX(_add_negative)(i, v);
return atomic_add_negative(i, v);
}
 
#define ATOMIC_LONG_INC_DEC_OP(op, mo) \
static inline long \
atomic_long_##op##_return##mo(atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_add_return(i, v);
}
ATOMIC_LONG_INC_DEC_OP(inc,)
ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
ATOMIC_LONG_INC_DEC_OP(inc, _release)
ATOMIC_LONG_INC_DEC_OP(dec,)
ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
ATOMIC_LONG_INC_DEC_OP(dec, _release)
 
#undef ATOMIC_LONG_INC_DEC_OP
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_sub_return(i, v);
}
 
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_inc_return(v);
}
 
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_dec_return(v);
}
 
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
atomic_t *v = (atomic_t *)l;
 
return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
return (long)atomic_add_unless(v, a, u);
}
 
#define atomic_long_inc_not_zero(l) \
ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
 
#define atomic_long_cmpxchg(l, old, new) \
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic_xchg((atomic_t *)(v), (new)))
 
#endif /* BITS_PER_LONG == 64 */
 
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
/drivers/include/asm-generic/memory_model.h
69,12 → 69,6
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
 
/*
* Convert a physical address to a Page Frame Number and back
*/
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
 
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
 
/drivers/include/drm/i915_component.h
File deleted
/drivers/include/drm/drm_panel.h
File deleted
/drivers/include/drm/drm_mipi_dsi.h
File deleted
/drivers/include/drm/drmP.h
45,6 → 45,8
#include <linux/mm.h>
 
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/sched.h>
108,12 → 110,6
* PRIME: used in the prime code.
* This is the category used by the DRM_DEBUG_PRIME() macro.
*
* ATOMIC: used in the atomic code.
* This is the category used by the DRM_DEBUG_ATOMIC() macro.
*
* VBL: used for verbose debug message in the vblank code
* This is the category used by the DRM_DEBUG_VBL() macro.
*
* Enabling verbose debug messages is done through the drm.debug parameter,
* each category being enabled by a bit.
*
121,7 → 117,7
* drm.debug=0x2 will enable DRIVER messages
* drm.debug=0x3 will enable CORE and DRIVER messages
* ...
* drm.debug=0x3f will enable all messages
* drm.debug=0xf will enable all messages
*
* An interesting feature is that it's possible to enable verbose logging at
* run-time by echoing the debug value in its sysfs node:
131,8 → 127,6
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08
#define DRM_UT_ATOMIC 0x10
#define DRM_UT_VBL 0x20
 
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
155,8 → 149,6
#define DRIVER_MODESET 0x2000
#define DRIVER_PRIME 0x4000
#define DRIVER_RENDER 0x8000
#define DRIVER_ATOMIC 0x10000
#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
 
/***********************************************************************/
/** \name Macros to make printk easier */
169,7 → 161,7
* \param arg arguments
*/
#define DRM_ERROR(fmt, ...) \
printk("DRM Error" fmt, ##__VA_ARGS__)
drm_err(fmt, ##__VA_ARGS__)
 
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
217,23 → 209,13
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0)
#define DRM_DEBUG_ATOMIC(fmt, args...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0)
#define DRM_DEBUG_VBL(fmt, args...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0)
 
#else
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#define DRM_DEBUG_ATOMIC(fmt, args...) do { } while (0)
#define DRM_DEBUG_VBL(fmt, args...) do { } while (0)
#endif
 
/*@}*/
 
/***********************************************************************/
270,6 → 252,7
unsigned int cmd;
int flags;
drm_ioctl_t *func;
unsigned int cmd_drv;
const char *name;
};
 
279,12 → 262,7
*/
 
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
[DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
.cmd = DRM_IOCTL_##ioctl, \
.func = _func, \
.flags = _flags, \
.name = #ioctl \
}
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
 
/* Event queued up for userspace to read */
struct drm_pending_event {
314,10 → 292,7
* in the plane list
*/
unsigned universal_planes:1;
/* true if client understands atomic properties */
unsigned atomic:1;
struct list_head lhead;
struct drm_minor *minor;
unsigned long lock_count;
 
/** Mapping of mm object handles to object pointers. */
338,10 → 313,6
struct list_head fbs;
struct mutex fbs_lock;
 
/** User-created blob properties; this retains a reference on the
* property. */
struct list_head blobs;
 
wait_queue_head_t event_wait;
struct list_head event_list;
int event_space;
369,7 → 340,8
* @minor: Link back to minor char device we are master for. Immutable.
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
* @magic_map: Map of used authentication tokens. Protected by struct_mutex.
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
* @magicfree: List of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
* @driver_priv: Pointer to driver-private information.
*/
378,7 → 350,8
struct drm_minor *minor;
char *unique;
int unique_len;
struct idr magic_map;
struct drm_open_hash magiclist;
struct list_head magicfree;
struct drm_lock_data lock;
void *driver_priv;
};
410,7 → 383,7
/**
* get_vblank_counter - get raw hardware vblank counter
* @dev: DRM device
* @pipe: counter to fetch
* @crtc: counter to fetch
*
* Driver callback for fetching a raw hardware vblank counter for @crtc.
* If a device doesn't have a hardware counter, the driver can simply
424,12 → 397,12
* RETURNS
* Raw vblank counter value.
*/
u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
 
/**
* enable_vblank - enable vblank interrupt events
* @dev: DRM device
* @pipe: which irq to enable
* @crtc: which irq to enable
*
* Enable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
439,33 → 412,19
* Zero on success, appropriate errno if the given @crtc's vblank
* interrupt cannot be enabled.
*/
int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
int (*enable_vblank) (struct drm_device *dev, int crtc);
 
/**
* disable_vblank - disable vblank interrupt events
* @dev: DRM device
* @pipe: which irq to enable
* @crtc: which irq to enable
*
* Disable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*/
void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
 
void (*disable_vblank) (struct drm_device *dev, int crtc);
/**
* Called by \c drm_device_is_agp. Typically used to determine if a
* card is really attached to AGP or not.
*
* \param dev DRM device handle
*
* \returns
* One of three values is returned depending on whether or not the
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP
* (return of 1), or may or may not be AGP (return of 2).
*/
int (*device_is_agp) (struct drm_device *dev);
 
/**
* Called by vblank timestamping code.
*
* Return the current display scanout position from a crtc, and an
472,7 → 431,7
* optional accurate ktime_get timestamp of when position was measured.
*
* \param dev DRM device.
* \param pipe Id of the crtc to query.
* \param crtc Id of the crtc to query.
* \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
* \param *vpos Target location for current vertical scanout position.
* \param *hpos Target location for current horizontal scanout position.
480,7 → 439,6
* scanout position query. Can be NULL to skip timestamp.
* \param *etime Target location for timestamp taken immediately after
* scanout position query. Can be NULL to skip timestamp.
* \param mode Current display timings.
*
* Returns vpos as a positive number while in active scanout area.
* Returns vpos as a negative number inside vblank, counting the number
496,10 → 454,10
* but unknown small number of scanlines wrt. real scanout position.
*
*/
int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
unsigned int flags, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
int (*get_scanout_position) (struct drm_device *dev, int crtc,
unsigned int flags,
int *vpos, int *hpos, void *stime,
void *etime);
 
/**
* Called by \c drm_get_last_vbltimestamp. Should return a precise
515,7 → 473,7
* to the OpenML OML_sync_control extension specification.
*
* \param dev dev DRM device handle.
* \param pipe crtc for which timestamp should be returned.
* \param crtc crtc for which timestamp should be returned.
* \param *max_error Maximum allowable timestamp error in nanoseconds.
* Implementation should strive to provide timestamp
* with an error of at most *max_error nanoseconds.
531,7 → 489,7
* negative number on failure. A positive status code on success,
* which describes how the vblank_time timestamp was computed.
*/
int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
int *max_error,
struct timeval *vblank_time,
unsigned flags);
590,7 → 548,7
struct drm_minor {
int index; /**< Minor device number */
int type; /**< Control or render */
struct device *kdev; /**< Linux device */
// struct device kdev; /**< Linux device */
struct drm_device *dev;
 
struct dentry *debugfs_root;
600,12 → 558,13
 
/* currently active master for this node. Protected by master_mutex */
struct drm_master *master;
struct drm_mode_group mode_group;
};
 
 
struct drm_pending_vblank_event {
struct drm_pending_event base;
unsigned int pipe;
int pipe;
struct drm_event_vblank event;
};
 
612,21 → 571,15
struct drm_vblank_crtc {
struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
struct timer_list disable_timer; /* delayed disable timer */
 
/* vblank counter, protected by dev->vblank_time_lock for writes */
u32 count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
 
atomic_t count; /**< number of VBLANK interrupts */
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
u32 last_wait; /* Last vblank seqno waited per CRTC */
unsigned int inmodeset; /* Display driver is setting mode */
unsigned int pipe; /* crtc index */
int framedur_ns; /* frame/field duration in ns */
int linedur_ns; /* line duration in ns */
int crtc; /* crtc index */
bool enabled; /* so we don't call enable more than
once per disable */
};
645,9 → 598,7
struct device *dev; /**< Device structure of bus-device */
struct drm_driver *driver; /**< DRM driver managing the device */
void *dev_private; /**< DRM driver private data */
struct drm_minor *control; /**< Control node */
struct drm_minor *primary; /**< Primary node */
struct drm_minor *render; /**< Render node */
atomic_t unplugged; /**< Flag whether dev is dead */
/** \name Locks */
/*@{ */
687,6 → 638,8
 
/** \name Context support */
/*@{ */
bool irq_enabled; /**< True if irq handler is enabled */
int irq;
 
__volatile__ long context_flag; /**< Context swapping flag */
int last_context; /**< Last current context */
694,8 → 647,6
 
/** \name VBLANK IRQ support */
/*@{ */
bool irq_enabled;
int irq;
 
/*
* At load time, disabling the vblank interrupt won't be allowed since
783,7 → 734,6
/*@{*/
 
/* Driver support (drm_drv.h) */
extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
795,7 → 745,6
extern ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
 
/* Mapping support (drm_vm.h) */
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
803,8 → 752,6
/* Misc. IOCTL support (drm_ioctl.c) */
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_invalid_op(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
/* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
820,39 → 767,32
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
 
extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
 
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
int crtc, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_crtc *refcrtc,
const struct drm_display_mode *mode);
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
870,8 → 810,8
}
 
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
 
/* Stub support (drm_stub.h) */
extern struct drm_master *drm_master_get(struct drm_master *master);
880,7 → 820,6
extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
extern bool drm_atomic;
 
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
/drivers/include/drm/drm_agpsupport.h
12,6 → 12,9
struct drm_device;
struct drm_file;
 
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
defined(MODULE)))
 
struct drm_agp_head {
struct agp_kern_info agp_info;
struct list_head memory;
25,7 → 28,7
unsigned long page_mask;
};
 
#if IS_ENABLED(CONFIG_AGP)
#if __OS_HAS_AGP
 
void drm_free_agp(struct agp_memory * handle, int pages);
int drm_bind_agp(struct agp_memory * handle, unsigned int start);
63,7 → 66,7
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
#else /* CONFIG_AGP */
#else /* __OS_HAS_AGP */
 
static inline void drm_free_agp(struct agp_memory * handle, int pages)
{
102,11 → 105,23
return -ENODEV;
}
 
static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_release(struct drm_device *dev)
{
return -ENODEV;
}
 
static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_enable(struct drm_device *dev,
struct drm_agp_mode mode)
{
113,6 → 128,12
return -ENODEV;
}
 
static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_info(struct drm_device *dev,
struct drm_agp_info *info)
{
119,6 → 140,12
return -ENODEV;
}
 
static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_alloc(struct drm_device *dev,
struct drm_agp_buffer *request)
{
125,6 → 152,12
return -ENODEV;
}
 
static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_free(struct drm_device *dev,
struct drm_agp_buffer *request)
{
131,6 → 164,12
return -ENODEV;
}
 
static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_unbind(struct drm_device *dev,
struct drm_agp_binding *request)
{
137,6 → 176,12
return -ENODEV;
}
 
static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_bind(struct drm_device *dev,
struct drm_agp_binding *request)
{
143,6 → 188,12
return -ENODEV;
}
 
#endif /* CONFIG_AGP */
static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
#endif /* __OS_HAS_AGP */
 
#endif /* _DRM_AGPSUPPORT_H_ */
/drivers/include/drm/drm_atomic.h
35,89 → 35,19
void drm_atomic_state_clear(struct drm_atomic_state *state);
void drm_atomic_state_free(struct drm_atomic_state *state);
 
int __must_check
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
void drm_atomic_state_default_clear(struct drm_atomic_state *state);
void drm_atomic_state_default_release(struct drm_atomic_state *state);
 
struct drm_crtc_state * __must_check
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct drm_property *property,
uint64_t val);
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val);
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state, struct drm_property *property,
uint64_t val);
 
/**
* drm_atomic_get_existing_crtc_state - get crtc state, if it exists
* @state: global atomic state object
* @crtc: crtc to grab
*
* This function returns the crtc state for the given crtc, or NULL
* if the crtc is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtc_states[drm_crtc_index(crtc)];
}
 
/**
* drm_atomic_get_existing_plane_state - get plane state, if it exists
* @state: global atomic state object
* @plane: plane to grab
*
* This function returns the plane state for the given plane, or NULL
* if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->plane_states[drm_plane_index(plane)];
}
 
/**
* drm_atomic_get_existing_connector_state - get connector state, if it exists
* @state: global atomic state object
* @connector: connector to grab
*
* This function returns the connector state for the given connector,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
 
if (index >= state->num_connector)
return NULL;
 
return state->connector_states[index];
}
 
int __must_check
drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
struct drm_display_mode *mode);
int __must_check
drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob);
int __must_check
drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
struct drm_plane *plane, struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
int __must_check
126,10 → 56,6
int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
 
int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc);
136,42 → 62,8
 
void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
 
void
drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
 
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
 
#define for_each_connector_in_state(state, connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (state)->num_connector && \
((connector) = (state)->connectors[__i], \
(connector_state) = (state)->connector_states[__i], 1); \
(__i)++) \
if (connector)
 
#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
for ((__i) = 0; \
(__i) < (state)->dev->mode_config.num_crtc && \
((crtc) = (state)->crtcs[__i], \
(crtc_state) = (state)->crtc_states[__i], 1); \
(__i)++) \
if (crtc_state)
 
#define for_each_plane_in_state(state, plane, plane_state, __i) \
for ((__i) = 0; \
(__i) < (state)->dev->mode_config.num_total_plane && \
((plane) = (state)->planes[__i], \
(plane_state) = (state)->plane_states[__i], 1); \
(__i)++) \
if (plane_state)
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
{
return state->mode_changed || state->active_changed ||
state->connectors_changed;
}
 
 
#endif /* DRM_ATOMIC_H_ */
/drivers/include/drm/drm_atomic_helper.h
30,12 → 30,6
 
#include <drm/drm_crtc.h>
 
struct drm_atomic_state;
 
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
45,23 → 39,17
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state);
 
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_atomic_state *old_state);
 
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
struct drm_atomic_state *old_state);
 
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *state,
bool active_only);
struct drm_atomic_state *state);
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state);
void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
 
void drm_atomic_helper_swap_state(struct drm_device *dev,
struct drm_atomic_state *state);
75,11 → 63,7
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
int drm_atomic_helper_disable_plane(struct drm_plane *plane);
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_plane_state *plane_state);
int drm_atomic_helper_set_config(struct drm_mode_set *set);
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state);
 
int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property,
94,42 → 78,23
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags);
int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
int mode);
 
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
 
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
struct drm_plane_state *state);
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
 
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state);
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void
__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
 
158,41 → 123,4
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
 
/*
* drm_atomic_plane_disabling - check whether a plane is being disabled
* @plane: plane object
* @old_state: previous atomic state
*
* Checks the atomic state of a plane to determine whether it's being disabled
* or not. This also WARNs if it detects an invalid state (both CRTC and FB
* need to either both be NULL or both be non-NULL).
*
* RETURNS:
* True if the plane is being disabled, false otherwise.
*/
static inline bool
drm_atomic_plane_disabling(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
/*
* When disabling a plane, CRTC and FB should always be NULL together.
* Anything else should be considered a bug in the atomic core, so we
* gently warn about it.
*/
WARN_ON((plane->state->crtc == NULL && plane->state->fb != NULL) ||
(plane->state->crtc != NULL && plane->state->fb == NULL));
 
/*
* When using the transitional helpers, old_state may be NULL. If so,
* we know nothing about the current state and have to assume that it
* might be enabled.
*
* When using the atomic helpers, old_state won't be NULL. Therefore
* this check assumes that either the driver will have reconstructed
* the correct state in ->reset() or that the driver will have taken
* appropriate measures to disable all planes.
*/
return (!old_state || old_state->crtc) && !plane->state->crtc;
}
 
#endif /* DRM_ATOMIC_HELPER_H_ */
/drivers/include/drm/drm_crtc.h
31,7 → 31,6
#include <linux/idr.h>
#include <linux/fb.h>
#include <linux/hdmi.h>
#include <linux/media-bus-format.h>
#include <uapi/drm/drm_mode.h>
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
53,6 → 52,7
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
#define DRM_MODE_OBJECT_ANY 0
 
struct drm_mode_object {
63,16 → 63,8
 
#define DRM_OBJECT_MAX_PROPERTY 24
struct drm_object_properties {
int count, atomic_count;
/* NOTE: if we ever start dynamically destroying properties (ie.
* not at drm_mode_config_cleanup() time), then we'd have to do
* a better job of detaching property from mode objects to avoid
* dangling property pointers:
*/
struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
/* do not read/write values directly, but use drm_object_property_get_value()
* and drm_object_property_set_value():
*/
int count;
uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
 
86,12 → 78,10
}
 
/* rotation property bits */
#define DRM_ROTATE_MASK 0x0f
#define DRM_ROTATE_0 0
#define DRM_ROTATE_90 1
#define DRM_ROTATE_180 2
#define DRM_ROTATE_270 3
#define DRM_REFLECT_MASK (~DRM_ROTATE_MASK)
#define DRM_REFLECT_X 4
#define DRM_REFLECT_Y 5
 
141,9 → 131,6
enum subpixel_order subpixel_order;
u32 color_formats;
 
const u32 *bus_formats;
unsigned int num_bus_formats;
 
/* Mask of supported hdmi deep color modes */
u8 edid_hdmi_dc_modes;
 
203,7 → 190,6
const struct drm_framebuffer_funcs *funcs;
unsigned int pitches[4];
unsigned int offsets[4];
uint64_t modifier[4];
unsigned int width;
unsigned int height;
/* depth can be 15 or 16 */
212,14 → 198,13
int flags;
uint32_t pixel_format; /* fourcc format */
struct list_head filp_head;
/* if you are using the helper */
void *helper_private;
};
 
struct drm_property_blob {
struct drm_mode_object base;
struct drm_device *dev;
struct kref refcount;
struct list_head head_global;
struct list_head head_file;
struct list_head head;
size_t length;
unsigned char data[];
};
252,39 → 237,24
 
/**
* struct drm_crtc_state - mutable CRTC state
* @crtc: backpointer to the CRTC
* @enable: whether the CRTC should be enabled, gates all other state
* @active: whether the CRTC is actively displaying (used for DPMS)
* @planes_changed: planes on this crtc are updated
* @mode_changed: crtc_state->mode or crtc_state->enable has been changed
* @active_changed: crtc_state->active has been toggled.
* @connectors_changed: connectors to this crtc have been updated
* @mode_changed: for use by helpers and drivers when computing state updates
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
* @planes_changed: for use by helpers and drivers when computing state updates
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
* @event: optional pointer to a DRM event to signal upon completion of the
* state update
* @state: backpointer to global drm_atomic_state
*
* Note that the distinction between @enable and @active is rather subtile:
* Flipping @active while @enable is set without changing anything else may
* never return in a failure from the ->atomic_check callback. Userspace assumes
* that a DPMS On will always succeed. In other words: @enable controls resource
* assignment, @active controls the actual hardware state.
*/
struct drm_crtc_state {
struct drm_crtc *crtc;
 
bool enable;
bool active;
 
/* computed state bits used by helpers and drivers */
bool planes_changed : 1;
bool mode_changed : 1;
bool active_changed : 1;
bool connectors_changed : 1;
 
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
301,9 → 271,6
 
struct drm_display_mode mode;
 
/* blob property to expose current mode to atomic userspace */
struct drm_property_blob *mode_blob;
 
struct drm_pending_vblank_event *event;
 
struct drm_atomic_state *state;
325,9 → 292,6
* @atomic_duplicate_state: duplicate the atomic state for this CRTC
* @atomic_destroy_state: destroy an atomic state for this CRTC
* @atomic_set_property: set a property on an atomic state for this CRTC
* (do not call directly, use drm_atomic_crtc_set_property())
* @atomic_get_property: get a property on an atomic state for this CRTC
* (do not call directly, use drm_atomic_crtc_get_property())
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
387,10 → 351,6
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val);
int (*atomic_get_property)(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
uint64_t *val);
};
 
/**
407,11 → 367,17
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
* @invert_dimensions: for purposes of error checking crtc vs fb sizes,
* invert the width/height of the crtc. This is used if the driver
* is performing 90 or 270 degree rotated scanout
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
* @gamma_size: size of gamma ramp
* @gamma_store: gamma ramp values
* @framedur_ns: precise frame timing
* @linedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
* @state: current atomic state for this CRTC
455,6 → 421,8
*/
struct drm_display_mode hwmode;
 
bool invert_dimensions;
 
int x, y;
const struct drm_crtc_funcs *funcs;
 
462,8 → 430,11
uint32_t gamma_size;
uint16_t *gamma_store;
 
/* Constants needed for precise vblank and swap timestamping. */
int framedur_ns, linedur_ns, pixeldur_ns;
 
/* if you are using the helper */
const void *helper_private;
void *helper_private;
 
struct drm_object_properties properties;
 
478,14 → 449,11
 
/**
* struct drm_connector_state - mutable connector state
* @connector: backpointer to the connector
* @crtc: CRTC to connect connector to, NULL if disabled
* @best_encoder: can be used by helpers and drivers to select the encoder
* @state: backpointer to global drm_atomic_state
*/
struct drm_connector_state {
struct drm_connector *connector;
 
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
 
struct drm_encoder *best_encoder;
495,7 → 463,7
 
/**
* struct drm_connector_funcs - control connectors on a given device
* @dpms: set power state
* @dpms: set power state (see drm_crtc_funcs above)
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidated (e.g. resume)
507,9 → 475,6
* @atomic_duplicate_state: duplicate the atomic state for this connector
* @atomic_destroy_state: destroy an atomic state for this connector
* @atomic_set_property: set a property on an atomic state for this connector
* (do not call directly, use drm_atomic_connector_set_property())
* @atomic_get_property: get a property on an atomic state for this connector
* (do not call directly, use drm_atomic_connector_get_property())
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
516,7 → 481,7
* etc.
*/
struct drm_connector_funcs {
int (*dpms)(struct drm_connector *connector, int mode);
void (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
void (*reset)(struct drm_connector *connector);
543,10 → 508,6
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
int (*atomic_get_property)(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
};
 
/**
593,7 → 554,7
struct drm_crtc *crtc;
struct drm_bridge *bridge;
const struct drm_encoder_funcs *funcs;
const void *helper_private;
void *helper_private;
};
 
/* should we poll this connector for connects and disconnects */
644,7 → 605,6
* @audio_latency: audio latency info from ELD, if found
* @null_edid_counter: track sinks that give us all zeros for the EDID
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum
* @edid_corrupt: indicates whether the last read EDID was corrupt
* @debugfs_entry: debugfs directory for this connector
* @state: current atomic state for this connector
* @has_tile: is this connector connected to a tiled monitor
698,7 → 658,7
/* requested DPMS state */
int dpms;
 
const void *helper_private;
void *helper_private;
 
/* forced on connector */
struct drm_cmdline_mode cmdline_mode;
717,11 → 677,6
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
unsigned bad_edid_counter;
 
/* Flag for raw EDID header corruption - used in Displayport
* compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
*/
bool edid_corrupt;
 
struct dentry *debugfs_entry;
 
struct drm_connector_state *state;
738,7 → 693,6
 
/**
* struct drm_plane_state - mutable plane state
* @plane: backpointer to the plane
* @crtc: currently bound CRTC, NULL if disabled
* @fb: currently bound framebuffer
* @fence: optional fence to wait for before scanning out @fb
755,8 → 709,6
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
struct drm_plane *plane;
 
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
struct fence *fence;
769,9 → 721,6
uint32_t src_x, src_y;
uint32_t src_h, src_w;
 
/* Plane rotation */
unsigned int rotation;
 
struct drm_atomic_state *state;
};
 
786,9 → 735,6
* @atomic_duplicate_state: duplicate the atomic state for this plane
* @atomic_destroy_state: destroy an atomic state for this plane
* @atomic_set_property: set a property on an atomic state for this plane
* (do not call directly, use drm_atomic_plane_set_property())
* @atomic_get_property: get a property on an atomic state for this plane
* (do not call directly, use drm_atomic_plane_get_property())
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
812,10 → 758,6
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
int (*atomic_get_property)(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val);
};
 
enum drm_plane_type {
832,7 → 774,6
* @possible_crtcs: pipes this plane can be bound to
* @format_types: array of formats supported by this plane
* @format_count: number of formats supported
* @format_default: driver hasn't supplied supported formats for the plane
* @crtc: currently bound CRTC
* @fb: currently bound fb
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
852,8 → 793,7
 
uint32_t possible_crtcs;
uint32_t *format_types;
unsigned int format_count;
bool format_default;
uint32_t format_count;
 
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
866,7 → 806,7
 
enum drm_plane_type type;
 
const void *helper_private;
void *helper_private;
 
struct drm_plane_state *state;
};
873,7 → 813,6
 
/**
* struct drm_bridge_funcs - drm_bridge control functions
* @attach: Called during drm_bridge_attach
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
* @disable: Called right before encoder prepare, disables the bridge
* @post_disable: Called right after encoder prepare, for lockstepped disable
880,9 → 819,9
* @mode_set: Set this mode to the bridge
* @pre_enable: Called right before encoder commit, for lockstepped commit
* @enable: Called right after encoder commit, enables the bridge
* @destroy: make object go away
*/
struct drm_bridge_funcs {
int (*attach)(struct drm_bridge *bridge);
bool (*mode_fixup)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
893,36 → 832,31
struct drm_display_mode *adjusted_mode);
void (*pre_enable)(struct drm_bridge *bridge);
void (*enable)(struct drm_bridge *bridge);
void (*destroy)(struct drm_bridge *bridge);
};
 
/**
* struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
* @encoder: encoder to which this bridge is connected
* @next: the next bridge in the encoder chain
* @of_node: device node pointer to the bridge
* @list: to keep track of all added bridges
* @head: list management
* @base: base mode object
* @funcs: control functions
* @driver_private: pointer to the bridge driver's internal context
*/
struct drm_bridge {
struct drm_device *dev;
struct drm_encoder *encoder;
struct drm_bridge *next;
#ifdef CONFIG_OF
struct device_node *of_node;
#endif
struct list_head list;
struct list_head head;
 
struct drm_mode_object base;
 
const struct drm_bridge_funcs *funcs;
void *driver_private;
};
 
/**
* struct drm_atomic_state - the global state object for atomic updates
* struct struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics
* @flags: state flags like async update
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
934,8 → 868,7
*/
struct drm_atomic_state {
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
uint32_t flags;
struct drm_plane **planes;
struct drm_plane_state **plane_states;
struct drm_crtc **crtcs;
979,12 → 912,9
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
* @atomic_check: check whether a given atomic state update is possible
* @atomic_check: check whether a give atomic state update is possible
* @atomic_commit: commit an atomic state update previously verified with
* atomic_check()
* @atomic_state_alloc: allocate a new atomic state
* @atomic_state_clear: clear the atomic state
* @atomic_state_free: free the atomic state
*
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
* involve drivers.
1000,12 → 930,33
int (*atomic_commit)(struct drm_device *dev,
struct drm_atomic_state *a,
bool async);
struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
void (*atomic_state_clear)(struct drm_atomic_state *state);
void (*atomic_state_free)(struct drm_atomic_state *state);
};
 
/**
* struct drm_mode_group - group of mode setting resources for potential sub-grouping
* @num_crtcs: CRTC count
* @num_encoders: encoder count
* @num_connectors: connector count
* @num_bridges: bridge count
* @id_list: list of KMS object IDs in this group
*
* Currently this simply tracks the global mode setting state. But in the
* future it could allow groups of objects to be set aside into independent
* control groups for use by different user level processes (e.g. two X servers
* running simultaneously on different heads, each with their own mode
* configuration and freedom of mode setting).
*/
struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
uint32_t num_connectors;
uint32_t num_bridges;
 
/* list of object IDs for this group */
uint32_t *id_list;
};
 
/**
* struct drm_mode_config - Mode configuration control structure
* @mutex: mutex protecting KMS related lists and structures
* @connection_mutex: ww mutex protecting connector state and routing
1018,6 → 969,8
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
* @connector_list: list of connector objects
* @num_bridge: number of bridges on this device
* @bridge_list: list of bridge objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
* @num_overlay_plane: number of overlay planes on this device
1036,7 → 989,6
* @poll_running: track polling status for this device
* @output_poll_work: delayed work for polling in process context
* @property_blob_list: list of all the blob property objects
* @blob_lock: mutex for blob property allocation and management
* @*_property: core property tracking
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
1063,6 → 1015,8
 
int num_connector;
struct list_head connector_list;
int num_bridge;
struct list_head bridge_list;
int num_encoder;
struct list_head encoder_list;
 
1089,11 → 1043,8
/* output poll support */
bool poll_enabled;
bool poll_running;
bool delayed_event;
struct delayed_work output_poll_work;
 
struct mutex blob_lock;
 
/* pointers to standard properties */
struct list_head property_blob_list;
struct drm_property *edid_property;
1102,18 → 1053,6
struct drm_property *tile_property;
struct drm_property *plane_type_property;
struct drm_property *rotation_property;
struct drm_property *prop_src_x;
struct drm_property *prop_src_y;
struct drm_property *prop_src_w;
struct drm_property *prop_src_h;
struct drm_property *prop_crtc_x;
struct drm_property *prop_crtc_y;
struct drm_property *prop_crtc_w;
struct drm_property *prop_crtc_h;
struct drm_property *prop_fb_id;
struct drm_property *prop_crtc_id;
struct drm_property *prop_active;
struct drm_property *prop_mode_id;
 
/* DVI-I properties */
struct drm_property *dvi_i_subconnector_property;
1149,9 → 1088,6
/* whether async page flip is supported or not */
bool async_page_flip;
 
/* whether the driver supports fb modifiers */
bool allow_fb_modifiers;
 
/* cursor size */
uint32_t cursor_width, cursor_height;
};
1217,22 → 1153,10
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
 
extern int drm_bridge_add(struct drm_bridge *bridge);
extern void drm_bridge_remove(struct drm_bridge *bridge);
extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
const struct drm_bridge_funcs *funcs);
extern void drm_bridge_cleanup(struct drm_bridge *bridge);
 
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void drm_bridge_disable(struct drm_bridge *bridge);
void drm_bridge_post_disable(struct drm_bridge *bridge);
void drm_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void drm_bridge_pre_enable(struct drm_bridge *bridge);
void drm_bridge_enable(struct drm_bridge *bridge);
 
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
1256,22 → 1180,17
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
unsigned int format_count,
uint32_t format_count,
enum drm_plane_type type);
extern int drm_plane_init(struct drm_device *dev,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint32_t *formats, uint32_t format_count,
bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
extern void drm_plane_force_disable(struct drm_plane *plane);
extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
u32 format);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
1287,8 → 1206,9
extern const char *drm_get_tv_subconnector_name(int val);
extern const char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern void drm_property_destroy_user_blobs(struct drm_device *dev,
struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern void drm_mode_group_destroy(struct drm_mode_group *group);
extern void drm_reinit_primary_mode_group(struct drm_device *dev);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
1304,10 → 1224,6
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
 
extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
const u32 *formats,
unsigned int num_formats);
 
static inline bool drm_property_type_is(struct drm_property *property,
uint32_t type)
{
1363,15 → 1279,6
int64_t min, int64_t max);
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type);
struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
const char *name);
struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
size_t length,
const void *data);
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id);
struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
void drm_property_unreference_blob(struct drm_property_blob *blob);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
1378,15 → 1285,11
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
extern int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
const char * const modes[]);
char *modes[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
extern bool drm_property_change_valid_get(struct drm_property *property,
uint64_t value, struct drm_mode_object **ref);
extern void drm_property_change_valid_put(struct drm_property *property,
struct drm_mode_object *ref);
 
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
1431,10 → 1334,6
void *data, struct drm_file *file_priv);
extern int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_createblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_destroyblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getencoder(struct drm_device *dev,
1456,8 → 1355,7
int hpref, int vpref);
 
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
bool *edid_corrupt);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
 
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
1483,8 → 1381,6
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value);
extern int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
 
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
1540,46 → 1436,17
return mo ? obj_to_property(mo) : NULL;
}
 
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
 
#define drm_for_each_crtc(crtc, dev) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
 
static inline void
assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
static inline struct drm_property_blob *
drm_property_blob_find(struct drm_device *dev, uint32_t id)
{
/*
* The connector hotadd/remove code currently grabs both locks when
* updating lists. Hence readers need only hold either of them to be
* safe and the check amounts to
*
* WARN_ON(not_holding(A) && not_holding(B)).
*/
WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
!drm_modeset_is_locked(&mode_config->connection_mutex));
struct drm_mode_object *mo;
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
return mo ? obj_to_blob(mo) : NULL;
}
 
#define drm_for_each_connector(connector, dev) \
for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
connector = list_first_entry(&(dev)->mode_config.connector_list, \
struct drm_connector, head); \
&connector->head != (&(dev)->mode_config.connector_list); \
connector = list_next_entry(connector, head))
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, planelist) \
list_for_each_entry(plane, planelist, head) \
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 
#define drm_for_each_encoder(encoder, dev) \
list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
 
#define drm_for_each_fb(fb, dev) \
for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \
fb = list_first_entry(&(dev)->mode_config.fb_list, \
struct drm_framebuffer, head); \
&fb->head != (&(dev)->mode_config.fb_list); \
fb = list_next_entry(fb, head))
 
#endif /* __DRM_CRTC_H__ */
/drivers/include/drm/drm_crtc_helper.h
39,8 → 39,6
 
#include <linux/fb.h>
 
#include <drm/drm_crtc.h>
 
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
ENTER_ATOMIC_MODE_SET,
47,30 → 45,11
};
 
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
* @dpms: set power state
* @prepare: prepare the CRTC, called before @mode_set
* @commit: commit changes to CRTC, called after @mode_set
* @mode_fixup: try to fixup proposed mode for this CRTC
* drm_crtc_helper_funcs - helper operations for CRTCs
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
* @mode_set_nofb: set mode only (no scanout buffer attached)
* @mode_set_base: update the scanout buffer
* @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
* @load_lut: load color palette
* @disable: disable CRTC when no longer in use
* @enable: enable CRTC
* @atomic_check: check for validity of an atomic state
* @atomic_begin: begin atomic update
* @atomic_flush: flush atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
*
* Note that with atomic helpers @dpms, @prepare and @commit hooks are
* deprecated. Used @enable and @disable instead exclusively.
*
* With legacy crtc helpers there's a big semantic difference between @disable
* and the other hooks: @disable also needs to release any resources acquired in
* @mode_set (like shared PLLs).
*/
struct drm_crtc_helper_funcs {
/*
89,7 → 68,6
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
/* Actually set the mode for atomic helpers, optional */
void (*mode_set_nofb)(struct drm_crtc *crtc);
 
/* Move the crtc on the current fb to the given position *optional* */
102,41 → 80,22
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
 
/* disable crtc when not in use - more explicit than dpms off */
void (*disable)(struct drm_crtc *crtc);
void (*enable)(struct drm_crtc *crtc);
 
/* atomic helpers */
int (*atomic_check)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
void (*atomic_flush)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
void (*atomic_begin)(struct drm_crtc *crtc);
void (*atomic_flush)(struct drm_crtc *crtc);
};
 
/**
* struct drm_encoder_helper_funcs - helper operations for encoders
* @dpms: set power state
* @save: save connector state
* @restore: restore connector state
* drm_encoder_helper_funcs - helper operations for encoders
* @mode_fixup: try to fixup proposed mode for this connector
* @prepare: part of the disable sequence, called before the CRTC modeset
* @commit: called after the CRTC modeset
* @mode_set: set this mode, optional for atomic helpers
* @get_crtc: return CRTC that the encoder is currently attached to
* @detect: connection status detection
* @disable: disable encoder when not in use (overrides DPMS off)
* @enable: enable encoder
* @atomic_check: check for validity of an atomic update
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*
* Note that with atomic helpers @dpms, @prepare and @commit hooks are
* deprecated. Used @enable and @disable instead exclusively.
*
* With legacy crtc helpers there's a big semantic difference between @disable
* and the other hooks: @disable also needs to release any resources acquired in
* @mode_set (like shared PLLs).
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
155,22 → 114,14
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
/* disable encoder when not in use - more explicit than dpms off */
void (*disable)(struct drm_encoder *encoder);
 
void (*enable)(struct drm_encoder *encoder);
 
/* atomic helpers */
int (*atomic_check)(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
};
 
/**
* struct drm_connector_helper_funcs - helper operations for connectors
* drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector? (optional)
* @best_encoder: return the preferred encoder for this connector
* @atomic_best_encoder: atomic version of @best_encoder
* @mode_valid (optional): is this mode valid on the given connector?
*
* The helper operations are called by the mid-layer CRTC helper.
*/
179,8 → 130,6
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
struct drm_connector_state *connector_state);
};
 
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
192,7 → 141,7
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
 
202,19 → 151,19
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
crtc->helper_private = funcs;
crtc->helper_private = (void *)funcs;
}
 
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
encoder->helper_private = funcs;
encoder->helper_private = (void *)funcs;
}
 
static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
connector->helper_private = funcs;
connector->helper_private = (void *)funcs;
}
 
extern void drm_helper_resume_force_mode(struct drm_device *dev);
240,6 → 189,5
 
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
 
#endif
/drivers/include/drm/drm_dp_helper.h
42,11 → 42,9
* 1.2 formally includes both eDP and DPI definitions.
*/
 
#define DP_AUX_MAX_PAYLOAD_BYTES 16
 
#define DP_AUX_I2C_WRITE 0x0
#define DP_AUX_I2C_READ 0x1
#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2
#define DP_AUX_I2C_STATUS 0x2
#define DP_AUX_I2C_MOT 0x4
#define DP_AUX_NATIVE_WRITE 0x8
#define DP_AUX_NATIVE_READ 0x9
94,15 → 92,6
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
 
#define DP_RECEIVE_PORT_0_CAP_0 0x008
# define DP_LOCAL_EDID_PRESENT (1 << 1)
# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
 
#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
 
#define DP_RECEIVE_PORT_1_CAP_0 0x00a
#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b
 
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
112,19 → 101,8
# define DP_I2C_SPEED_1M 0x20
 
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0)
# define DP_FRAMING_CHANGE_CAP (1 << 1)
# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
 
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
 
#define DP_ADAPTER_CAP 0x00f /* 1.2 */
# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1)
 
#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */
# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */
 
/* Multiple stream transport */
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
132,56 → 110,10
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
 
#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
 
/* AV_SYNC_DATA_BLOCK 1.2 */
#define DP_AV_GRANULARITY 0x023
# define DP_AG_FACTOR_MASK (0xf << 0)
# define DP_AG_FACTOR_3MS (0 << 0)
# define DP_AG_FACTOR_2MS (1 << 0)
# define DP_AG_FACTOR_1MS (2 << 0)
# define DP_AG_FACTOR_500US (3 << 0)
# define DP_AG_FACTOR_200US (4 << 0)
# define DP_AG_FACTOR_100US (5 << 0)
# define DP_AG_FACTOR_10US (6 << 0)
# define DP_AG_FACTOR_1US (7 << 0)
# define DP_VG_FACTOR_MASK (0xf << 4)
# define DP_VG_FACTOR_3MS (0 << 4)
# define DP_VG_FACTOR_2MS (1 << 4)
# define DP_VG_FACTOR_1MS (2 << 4)
# define DP_VG_FACTOR_500US (3 << 4)
# define DP_VG_FACTOR_200US (4 << 4)
# define DP_VG_FACTOR_100US (5 << 4)
 
#define DP_AUD_DEC_LAT0 0x024
#define DP_AUD_DEC_LAT1 0x025
 
#define DP_AUD_PP_LAT0 0x026
#define DP_AUD_PP_LAT1 0x027
 
#define DP_VID_INTER_LAT 0x028
 
#define DP_VID_PROG_LAT 0x029
 
#define DP_REP_LAT 0x02a
 
#define DP_AUD_DEL_INS0 0x02b
#define DP_AUD_DEL_INS1 0x02c
#define DP_AUD_DEL_INS2 0x02d
/* End of AV_SYNC_DATA_BLOCK */
 
#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
# define DP_ALPM_CAP (1 << 0)
 
#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
 
#define DP_GUID 0x030 /* 1.2 */
 
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
 
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
221,7 → 153,6
 
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
237,12 → 168,11
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
 
/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2)
# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2)
# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2)
# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2)
# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
 
# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
285,63 → 215,17
/* bitmask as for DP_I2C_SPEED_CAP */
 
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0)
# define DP_FRAMING_CHANGE_ENABLE (1 << 1)
# define DP_PANEL_SELF_TEST_ENABLE (1 << 7)
 
#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */
#define DP_LINK_QUAL_LANE1_SET 0x10c
#define DP_LINK_QUAL_LANE2_SET 0x10d
#define DP_LINK_QUAL_LANE3_SET 0x10e
# define DP_LINK_QUAL_PATTERN_DISABLE 0
# define DP_LINK_QUAL_PATTERN_D10_2 1
# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
# define DP_LINK_QUAL_PATTERN_PRBS7 3
# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
# define DP_LINK_QUAL_PATTERN_MASK 7
 
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0)
# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4)
# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
 
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
 
#define DP_AUDIO_DELAY0 0x112 /* 1.2 */
#define DP_AUDIO_DELAY1 0x113
#define DP_AUDIO_DELAY2 0x114
 
#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */
# define DP_LINK_RATE_SET_SHIFT 0
# define DP_LINK_RATE_SET_MASK (7 << 0)
 
#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
# define DP_ALPM_ENABLE (1 << 0)
# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
 
#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
# define DP_IRQ_HPD_ENABLE (1 << 1)
 
#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
# define DP_PWR_NOT_NEEDED (1 << 0)
 
#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
 
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
 
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
420,7 → 304,7
 
#define DP_TEST_SINK_MISC 0x246
# define DP_TEST_CRC_SUPPORTED (1 << 5)
# define DP_TEST_COUNT_MASK 0xf
# define DP_TEST_COUNT_MASK 0x7
 
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
448,49 → 332,6
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
 
#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
# define DP_EDP_11 0x00
# define DP_EDP_12 0x01
# define DP_EDP_13 0x02
# define DP_EDP_14 0x03
 
#define DP_EDP_GENERAL_CAP_1 0x701
 
#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
 
#define DP_EDP_GENERAL_CAP_2 0x703
 
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
 
#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
 
#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
 
#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
 
#define DP_EDP_PWMGEN_BIT_COUNT 0x724
#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725
#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726
 
#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727
 
#define DP_EDP_BACKLIGHT_FREQ_SET 0x728
 
#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a
#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b
#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c
 
#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d
#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e
#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f
 
#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
 
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
 
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
509,7 → 350,6
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */
 
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
523,9 → 363,6
# define DP_PSR_SINK_INTERNAL_ERROR 7
# define DP_PSR_SINK_STATE_MASK 0x07
 
#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
 
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
568,10 → 405,6
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
 
/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
#define DP_MST_PHYSICAL_PORT_0 0
#define DP_MST_LOGICAL_PORT_0 8
 
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
582,7 → 415,6
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
 
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
 
638,13 → 470,6
(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
}
 
static inline bool
drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x12 &&
dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
}
 
/*
* DisplayPort AUX channel
*/
691,12 → 516,9
* An AUX channel can also be used to transport I2C messages to a sink. A
* typical application of that is to access an EDID that's present in the
* sink device. The .transfer() function can also be used to execute such
* transactions. The drm_dp_aux_register() function registers an I2C
* adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
* should call drm_dp_aux_unregister() to remove the I2C adapter.
* The I2C adapter uses long transfers by default; if a partial response is
* received, the adapter will drop down to the size given by the partial
* response for this transaction only.
* transactions. The drm_dp_aux_register_i2c_bus() function registers an
* I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
* should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
*
* Note that the aux helper code assumes that the .transfer() function
* only modifies the reply field of the drm_dp_aux_msg structure. The
764,7 → 586,6
 
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
 
int drm_dp_aux_register(struct drm_dp_aux *aux);
/drivers/include/drm/drm_dp_mst_helper.h
253,7 → 253,6
u8 *bytes;
};
 
#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
struct drm_dp_remote_i2c_read {
u8 num_transactions;
u8 port_number;
263,7 → 262,7
u8 *bytes;
u8 no_stop_bit;
u8 i2c_transaction_delay;
} transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
} transactions[4];
u8 read_i2c_device_id;
u8 num_bytes_read;
};
375,7 → 374,6
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
void (*register_connector)(struct drm_connector *connector);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
465,10 → 463,6
struct work_struct work;
 
struct work_struct tx_work;
 
struct list_head destroy_connector_list;
struct mutex destroy_connector_lock;
struct work_struct destroy_connector_work;
};
 
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
492,9 → 486,7
 
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
 
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
/drivers/include/drm/drm_edid.h
215,8 → 215,6
#define DRM_ELD_VER 0
# define DRM_ELD_VER_SHIFT 3
# define DRM_ELD_VER_MASK (0x1f << 3)
# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
# define DRM_ELD_VER_CANNED (0x1f << 3)
 
#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
 
326,8 → 324,9
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
int drm_load_edid_firmware(struct drm_connector *connector);
 
int
347,25 → 346,6
}
 
/**
* drm_eld_sad - Get ELD SAD structures.
* @eld: pointer to an eld memory structure with sad_count set
*/
static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
{
unsigned int ver, mnl;
 
ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
if (ver != 2 && ver != 31)
return NULL;
 
mnl = drm_eld_mnl(eld);
if (mnl > 16)
return NULL;
 
return eld + DRM_ELD_CEA_SAD(mnl, 0);
}
 
/**
* drm_eld_sad_count - Get ELD SAD count.
* @eld: pointer to an eld memory structure with sad_count set
*/
/drivers/include/drm/drm_fb_helper.h
44,25 → 44,6
int x, y;
};
 
/**
* struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
* @fb_width: fbdev width
* @fb_height: fbdev height
* @surface_width: scanout buffer width
* @surface_height: scanout buffer height
* @surface_bpp: scanout buffer bpp
* @surface_depth: scanout buffer depth
*
* Note that the scanout surface width/height may be larger than the fbdev
* width/height. In case of multiple displays, the scanout surface is sized
* according to the largest width/height (so it is large enough for all CRTCs
* to scanout). But the fbdev width/height is sized to the minimum width/
* height of all the displays. This ensures that fbcon fits on the smallest
* of the attached displays.
*
* So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height,
* rather than the surface size.
*/
struct drm_fb_helper_surface_size {
u32 fb_width;
u32 fb_height;
104,20 → 85,6
struct drm_connector *connector;
};
 
/**
* struct drm_fb_helper - helper to emulate fbdev on top of kms
* @fb: Scanout framebuffer object
* @dev: DRM device
* @crtc_count: number of possible CRTCs
* @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
* @connector_count: number of connected connectors
* @connector_info_alloc_count: size of connector_info
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
* @kernel_fb_list: list_head in kernel_fb_helper_list
* @delayed_hotplug: was there a hotplug while kms master active?
*/
struct drm_fb_helper {
struct drm_framebuffer *fb;
struct drm_device *dev;
134,20 → 101,8
/* we got a hotplug but fbdev wasn't running the console
delay until next set_par */
bool delayed_hotplug;
 
/**
* @atomic:
*
* Use atomic updates for restore_fbdev_mode(), etc. This defaults to
* true if driver has DRIVER_ATOMIC feature flag, but drivers can
* override it to true after drm_fb_helper_init() if they support atomic
* modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
* does not require ASYNC commits).
*/
bool atomic;
};
 
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
161,43 → 116,16
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
 
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
 
struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth);
 
void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
 
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
 
void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect);
void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area);
void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image);
 
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect);
void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area);
void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image);
 
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
 
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
211,188 → 139,4
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
{
}
 
static inline int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *helper, int crtc_count,
int max_conn)
{
return 0;
}
 
static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
{
}
 
static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
{
return 0;
}
 
static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
return 0;
}
 
static inline int drm_fb_helper_set_par(struct fb_info *info)
{
return 0;
}
 
static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
return 0;
}
 
static inline int
drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
return 0;
}
 
static inline struct fb_info *
drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
{
return NULL;
}
 
static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
{
}
static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
{
}
 
static inline void drm_fb_helper_fill_var(struct fb_info *info,
struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
}
 
static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
}
 
static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
struct fb_info *info)
{
return 0;
}
 
static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
{
}
 
static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
char __user *buf, size_t count,
loff_t *ppos)
{
return -ENODEV;
}
 
static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
const char __user *buf,
size_t count, loff_t *ppos)
{
return -ENODEV;
}
 
static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
}
 
static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
}
 
static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image)
{
}
 
static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
}
 
static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
}
 
static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
}
 
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
int state)
{
}
 
static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
return 0;
}
 
static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
int bpp_sel)
{
return 0;
}
 
static inline int
drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
return 0;
}
 
static inline int drm_fb_helper_debug_enter(struct fb_info *info)
{
return 0;
}
 
static inline int drm_fb_helper_debug_leave(struct fb_info *info)
{
return 0;
}
 
static inline struct drm_display_mode *
drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
int width, int height)
{
return NULL;
}
 
static inline struct drm_display_mode *
drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int width, int height)
{
return NULL;
}
 
static inline int
drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
return 0;
}
 
static inline int
drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
return 0;
}
#endif
#endif
/drivers/include/drm/drm_gem.h
119,6 → 119,13
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
 
/**
* dumb - created as dumb buffer
* Whether the gem object was created using the dumb buffer interface
* as such it may not be used for GPU rendering.
*/
bool dumb;
};
 
void drm_gem_object_release(struct drm_gem_object *obj);
/drivers/include/drm/drm_modeset_lock.h
43,7 → 43,7
 
struct ww_acquire_ctx ww_ctx;
 
/*
/**
* Contended lock: if a lock is contended you should only call
* drm_modeset_backoff() which drops locks and slow-locks the
* contended lock.
50,12 → 50,12
*/
struct drm_modeset_lock *contended;
 
/*
/**
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
 
/*
/**
* Trylock mode, use only for panic handlers!
*/
bool trylock_only;
70,12 → 70,12
* Used for locking CRTCs and other modeset resources.
*/
struct drm_modeset_lock {
/*
/**
* modeset lock
*/
struct ww_mutex mutex;
 
/*
/**
* Resources that are locked as part of an atomic update are added
* to a list (so we know what to unlock at the end).
*/
130,6 → 130,7
struct drm_plane;
 
void drm_modeset_lock_all(struct drm_device *dev);
int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
void drm_modeset_unlock_all(struct drm_device *dev);
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
struct drm_plane *plane);
/drivers/include/drm/drm_pciids.h
172,7 → 172,6
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
187,7 → 186,6
{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
/drivers/include/drm/drm_plane_helper.h
43,7 → 43,8
* planes.
*/
 
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
extern int drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
 
/**
51,32 → 52,29
* @prepare_fb: prepare a framebuffer for use by the plane
* @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
* @atomic_check: check that a given atomic state is valid and can be applied
* @atomic_update: apply an atomic state to the plane (mandatory)
* @atomic_disable: disable the plane
* @atomic_update: apply an atomic state to the plane
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_plane_helper_funcs {
int (*prepare_fb)(struct drm_plane *plane,
const struct drm_plane_state *new_state);
struct drm_framebuffer *fb);
void (*cleanup_fb)(struct drm_plane *plane,
const struct drm_plane_state *old_state);
struct drm_framebuffer *fb);
 
int (*atomic_check)(struct drm_plane *plane,
struct drm_plane_state *state);
void (*atomic_update)(struct drm_plane *plane,
struct drm_plane_state *old_state);
void (*atomic_disable)(struct drm_plane *plane,
struct drm_plane_state *old_state);
};
 
static inline void drm_plane_helper_add(struct drm_plane *plane,
const struct drm_plane_helper_funcs *funcs)
{
plane->helper_private = funcs;
plane->helper_private = (void *)funcs;
}
 
int drm_plane_helper_check_update(struct drm_plane *plane,
extern int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_rect *src,
87,7 → 85,7
bool can_position,
bool can_update_disabled,
bool *visible);
int drm_primary_helper_update(struct drm_plane *plane,
extern int drm_primary_helper_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
94,10 → 92,14
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
int drm_primary_helper_disable(struct drm_plane *plane);
void drm_primary_helper_destroy(struct drm_plane *plane);
extern int drm_primary_helper_disable(struct drm_plane *plane);
extern void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
const uint32_t *formats,
int num_formats);
 
 
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
/drivers/include/drm/i915_pciids.h
208,41 → 208,40
#define INTEL_VLV_D_IDS(info) \
INTEL_VGA_DEVICE(0x0155, info)
 
#define _INTEL_BDW_M(gt, id, info) \
INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
#define _INTEL_BDW_D(gt, id, info) \
INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
 
#define _INTEL_BDW_M_IDS(gt, info) \
_INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
_INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
_INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
_INTEL_BDW_M(gt, 0x160E, info) /* ULX */
 
#define _INTEL_BDW_D_IDS(gt, info) \
_INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
_INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
 
#define INTEL_BDW_GT12M_IDS(info) \
INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
_INTEL_BDW_M_IDS(1, info), \
_INTEL_BDW_M_IDS(2, info)
 
#define INTEL_BDW_GT12D_IDS(info) \
INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
_INTEL_BDW_D_IDS(1, info), \
_INTEL_BDW_D_IDS(2, info)
 
#define INTEL_BDW_GT3M_IDS(info) \
INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
INTEL_VGA_DEVICE(0x162E, info) /* ULX */
_INTEL_BDW_M_IDS(3, info)
 
#define INTEL_BDW_GT3D_IDS(info) \
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
_INTEL_BDW_D_IDS(3, info)
 
#define INTEL_BDW_RSVDM_IDS(info) \
INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
INTEL_VGA_DEVICE(0x163E, info) /* ULX */
_INTEL_BDW_M_IDS(4, info)
 
#define INTEL_BDW_RSVDD_IDS(info) \
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
_INTEL_BDW_D_IDS(4, info)
 
#define INTEL_BDW_M_IDS(info) \
INTEL_BDW_GT12M_IDS(info), \
260,35 → 259,21
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
 
#define INTEL_SKL_GT1_IDS(info) \
#define INTEL_SKL_IDS(info) \
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
 
#define INTEL_SKL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
 
#define INTEL_SKL_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
 
#define INTEL_SKL_IDS(info) \
INTEL_SKL_GT1_IDS(info), \
INTEL_SKL_GT2_IDS(info), \
INTEL_SKL_GT3_IDS(info)
 
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
INTEL_VGA_DEVICE(0x5A84, info)
 
#endif /* _I915_PCIIDS_H */
/drivers/include/drm/intel-gtt.h
3,8 → 3,8
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
 
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, u64 *mappable_end);
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, unsigned long *mappable_end);
 
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
/drivers/include/drm/ttm/ttm_bo_api.h
249,7 → 249,7
* either of these locks held.
*/
 
uint64_t offset; /* GPU address space is independent of CPU word size */
unsigned long offset;
uint32_t cur_placement;
 
struct sg_table *sg;
/drivers/include/drm/ttm/ttm_bo_driver.h
277,7 → 277,7
bool has_type;
bool use_type;
uint32_t flags;
uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
unsigned long gpu_offset;
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
/drivers/include/drm/ttm/ttm_lock.h
51,7 → 51,7
 
#include <ttm/ttm_object.h>
#include <linux/wait.h>
#include <linux/atomic.h>
//#include <linux/atomic.h>
 
/**
* struct ttm_lock
/drivers/include/drm/drm_mm.h
68,8 → 68,8
unsigned scanned_preceeds_hole : 1;
unsigned allocated : 1;
unsigned long color;
u64 start;
u64 size;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
};
 
82,16 → 82,16
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
u64 scan_size;
u64 scan_hit_start;
u64 scan_hit_end;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned long scan_hit_end;
unsigned scanned_blocks;
u64 scan_start;
u64 scan_end;
unsigned long scan_start;
unsigned long scan_end;
struct drm_mm_node *prev_scanned_node;
 
void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
u64 *start, u64 *end);
unsigned long *start, unsigned long *end);
};
 
/**
124,7 → 124,7
return mm->hole_stack.next;
}
 
static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
return hole_node->start + hole_node->size;
}
140,13 → 140,13
* Returns:
* Start of the subsequent hole.
*/
static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
BUG_ON(!hole_node->hole_follows);
return __drm_mm_hole_node_start(hole_node);
}
 
static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return list_entry(hole_node->node_list.next,
struct drm_mm_node, node_list)->start;
163,7 → 163,7
* Returns:
* End of the subsequent hole.
*/
static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return __drm_mm_hole_node_end(hole_node);
}
222,7 → 222,7
 
int drm_mm_insert_node_generic(struct drm_mm *mm,
struct drm_mm_node *node,
u64 size,
unsigned long size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags sflags,
245,7 → 245,7
*/
static inline int drm_mm_insert_node(struct drm_mm *mm,
struct drm_mm_node *node,
u64 size,
unsigned long size,
unsigned alignment,
enum drm_mm_search_flags flags)
{
255,11 → 255,11
 
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
struct drm_mm_node *node,
u64 size,
unsigned long size,
unsigned alignment,
unsigned long color,
u64 start,
u64 end,
unsigned long start,
unsigned long end,
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags);
/**
282,10 → 282,10
*/
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
struct drm_mm_node *node,
u64 size,
unsigned long size,
unsigned alignment,
u64 start,
u64 end,
unsigned long start,
unsigned long end,
enum drm_mm_search_flags flags)
{
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
296,21 → 296,21
void drm_mm_remove_node(struct drm_mm_node *node);
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
void drm_mm_init(struct drm_mm *mm,
u64 start,
u64 size);
unsigned long start,
unsigned long size);
void drm_mm_takedown(struct drm_mm *mm);
bool drm_mm_clean(struct drm_mm *mm);
 
void drm_mm_init_scan(struct drm_mm *mm,
u64 size,
unsigned long size,
unsigned alignment,
unsigned long color);
void drm_mm_init_scan_with_range(struct drm_mm *mm,
u64 size,
unsigned long size,
unsigned alignment,
unsigned long color,
u64 start,
u64 end);
unsigned long start,
unsigned long end);
bool drm_mm_scan_add_block(struct drm_mm_node *node);
bool drm_mm_scan_remove_block(struct drm_mm_node *node);
 
/drivers/include/drm/drm_modes.h
90,9 → 90,6
 
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
 
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
 
182,10 → 179,6
 
struct drm_display_mode *drm_mode_create(struct drm_device *dev);
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in);
int drm_mode_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in);
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
 
204,8 → 197,6
int GTF_K, int GTF_2J);
void drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode);
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm);
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode,
int index);
226,8 → 217,8
const struct drm_display_mode *mode2);
 
/* for use by the crtc helper probe functions */
enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY);
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
/drivers/include/drm/drm_vma_manager.h
54,6 → 54,9
unsigned long page_offset, unsigned long size);
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
 
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages);
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages);
68,12 → 71,12
struct file *filp);
 
/**
* drm_vma_offset_exact_lookup_locked() - Look up node by exact address
* drm_vma_offset_exact_lookup() - Look up node by exact address
* @mgr: Manager object
* @start: Start address (page-based, not byte-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
* Same as drm_vma_offset_lookup() but does not allow any offset into the node.
* It only returns the exact object with the given start address.
*
* RETURNS:
80,13 → 83,13
* Node at exact start address @start.
*/
static inline struct drm_vma_offset_node *
drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node;
 
node = drm_vma_offset_lookup_locked(mgr, start, pages);
node = drm_vma_offset_lookup(mgr, start, pages);
return (node && node->vm_node.start == start) ? node : NULL;
}
 
94,7 → 97,7
* drm_vma_offset_lock_lookup() - Lock lookup for extended private use
* @mgr: Manager object
*
* Lock VMA manager for extended lookups. Only locked VMA function calls
* Lock VMA manager for extended lookups. Only *_locked() VMA function calls
* are allowed while holding this lock. All other contexts are blocked from VMA
* until the lock is released via drm_vma_offset_unlock_lookup().
*
105,6 → 108,13
* not call any other VMA helpers while holding this lock.
*
* Note: You're in atomic-context while holding this lock!
*
* Example:
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
* kref_get_unless_zero(container_of(node, sth, entr));
* drm_vma_offset_unlock_lookup(mgr);
*/
static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
{
/drivers/include/video/display_timing.h
File deleted
/drivers/include/video/videomode.h
File deleted
/drivers/include/video/of_videomode.h
File deleted