Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5352 → Rev 6082

/drivers/include/asm/alternative.h
5,6 → 5,7
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
#include <asm/ptrace.h>
 
/*
* Alternative inline assembly for SMP.
47,9 → 48,16
s32 repl_offset; /* offset to replacement instruction */
u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
};
u8 replacementlen; /* length of new instruction */
u8 padlen; /* length of build-time padding */
} __packed;
 
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
*/
extern int alternatives_patched;
 
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
 
75,50 → 83,69
}
#endif /* CONFIG_SMP */
 
#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
#define b_replacement(num) "664"#num
#define e_replacement(num) "665"#num
 
#define b_replacement(number) "663"#number
#define e_replacement(number) "664"#number
 
#define alt_end_marker "663"
#define alt_slen "662b-661b"
#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
#define alt_pad_len alt_end_marker"b-662b"
#define alt_total_slen alt_end_marker"b-661b"
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
 
#define ALTINSTR_ENTRY(feature, number) \
#define __OLDINSTR(oldinstr, num) \
"661:\n\t" oldinstr "\n662:\n" \
".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
"((" alt_rlen(num) ")-(" alt_slen ")),0x90\n"
 
#define OLDINSTR(oldinstr, num) \
__OLDINSTR(oldinstr, num) \
alt_end_marker ":\n"
 
/*
* max without conditionals. Idea adapted from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
* The additional "-" is needed because gas works with s32s.
*/
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
 
/*
* Pad the second replacement alternative with additional NOPs if it is
* additionally longer than the first replacement alternative.
*/
#define OLDINSTR_2(oldinstr, num1, num2) \
"661:\n\t" oldinstr "\n662:\n" \
".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \
alt_end_marker ":\n"
 
#define ALTINSTR_ENTRY(feature, num) \
" .long 661b - .\n" /* label */ \
" .long " b_replacement(number)"f - .\n" /* new instruction */ \
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \
" .byte " alt_slen "\n" /* source len */ \
" .byte " alt_rlen(number) "\n" /* replacement len */
" .byte " alt_total_slen "\n" /* source len */ \
" .byte " alt_rlen(num) "\n" /* replacement len */ \
" .byte " alt_pad_len "\n" /* pad len */
 
#define DISCARD_ENTRY(number) /* rlen <= slen */ \
" .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
 
#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
 
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
OLDINSTR(oldinstr) \
OLDINSTR(oldinstr, 1) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".popsection"
 
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
OLDINSTR(oldinstr) \
OLDINSTR_2(oldinstr, 1, 2) \
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
DISCARD_ENTRY(2) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
145,6 → 172,9
#define alternative(oldinstr, newinstr, feature) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
 
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
 
/*
* Alternative inline assembly with input.
*
/drivers/include/asm/arch_hweight.h
21,15 → 21,13
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
* compiler switches.
*/
static inline unsigned int __arch_hweight32(unsigned int w)
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res = 0;
 
asm ("call __sw_hweight32"
: "="REG_OUT (res)
: REG_IN (w));
 
return res;
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
}
 
static inline unsigned int __arch_hweight16(unsigned int w)
42,20 → 40,23
return __arch_hweight32(w & 0xff);
}
 
#ifdef CONFIG_X86_32
static inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res = 0;
 
#ifdef CONFIG_X86_32
return __arch_hweight32((u32)w) +
__arch_hweight32((u32)(w >> 32));
}
#else
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res = 0;
 
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
: "="REG_OUT (res)
: REG_IN (w));
#endif /* CONFIG_X86_32 */
 
return res;
}
#endif /* CONFIG_X86_32 */
 
#endif
/drivers/include/asm/asm.h
63,6 → 63,31
_ASM_ALIGN ; \
_ASM_PTR (entry); \
.popsection
 
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
andl $7,%ecx
jz 102f /* already aligned */
subl $8,%ecx
negl %ecx
subl %ecx,%edx
100: movb (%rsi),%al
101: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz 100b
102:
.section .fixup,"ax"
103: addl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous
 
_ASM_EXTABLE(100b,103b)
_ASM_EXTABLE(101b,103b)
.endm
 
#else
# define _ASM_EXTABLE(from,to) \
" .pushsection \"__ex_table\",\"a\"\n" \
/drivers/include/asm/atomic.h
22,9 → 22,9
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
static __always_inline int atomic_read(const atomic_t *v)
{
return ACCESS_ONCE((v)->counter);
return READ_ONCE((v)->counter);
}
 
/**
34,9 → 34,9
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
static __always_inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
WRITE_ONCE(v->counter, i);
}
 
/**
46,7 → 46,7
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t *v)
static __always_inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
60,7 → 60,7
*
* Atomically subtracts @i from @v.
*/
static inline void atomic_sub(int i, atomic_t *v)
static __always_inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
76,7 → 76,7
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
}
87,7 → 87,7
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
static __always_inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
99,7 → 99,7
*
* Atomically decrements @v by 1.
*/
static inline void atomic_dec(atomic_t *v)
static __always_inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
113,7 → 113,7
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
static __always_inline int atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
126,7 → 126,7
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_inc_and_test(atomic_t *v)
static __always_inline int atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
}
140,7 → 140,7
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic_add_negative(int i, atomic_t *v)
static __always_inline int atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
}
152,7 → 152,7
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline int atomic_add_return(int i, atomic_t *v)
static __always_inline int atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
164,7 → 164,7
*
* Atomically subtracts @i from @v and returns @v - @i
*/
static inline int atomic_sub_return(int i, atomic_t *v)
static __always_inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
172,7 → 172,7
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
 
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
}
182,6 → 182,21
return xchg(&v->counter, new);
}
 
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
asm volatile(LOCK_PREFIX #op"l %1,%0" \
: "+m" (v->counter) \
: "ir" (i) \
: "memory"); \
}
 
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
 
#undef ATOMIC_OP
 
/**
* __atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
191,7 → 206,7
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns the old value of @v.
*/
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
213,22 → 228,12
* Atomically adds 1 to @v
* Returns the new value of @u
*/
static inline short int atomic_inc_short(short int *v)
static __always_inline short int atomic_inc_short(short int *v)
{
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
return *v;
}
 
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)), "m" (*(addr)) : "memory")
 
#define atomic_set_mask(mask, addr) \
asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)(mask)), "m" (*(addr)) \
: "memory")
 
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
/drivers/include/asm/atomic64_32.h
4,7 → 4,7
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/processor.h>
//#include <asm/cmpxchg.h>
#include <asm/cmpxchg.h>
 
/* An 64bit atomic type */
 
/drivers/include/asm/barrier.h
35,12 → 35,12
#define smp_mb() mb()
#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#endif /* SMP */
 
#define read_barrier_depends() do { } while (0)
57,12 → 57,12
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
WRITE_ONCE(*p, v); \
} while (0)
 
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
74,12 → 74,12
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
WRITE_ONCE(*p, v); \
} while (0)
 
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
91,17 → 91,4
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
 
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static __always_inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
 
#endif /* _ASM_X86_BARRIER_H */
/drivers/include/asm/cacheflush.h
8,7 → 8,7
/*
* The set_memory_* API can be used to change various attributes of a virtual
* address range. The attributes include:
* Cachability : UnCached, WriteCombining, WriteBack
* Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
* Executability : eXeutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
35,9 → 35,11
 
int _set_memory_uc(unsigned long addr, int numpages);
int _set_memory_wc(unsigned long addr, int numpages);
int _set_memory_wt(unsigned long addr, int numpages);
int _set_memory_wb(unsigned long addr, int numpages);
int set_memory_uc(unsigned long addr, int numpages);
int set_memory_wc(unsigned long addr, int numpages);
int set_memory_wt(unsigned long addr, int numpages);
int set_memory_wb(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
48,10 → 50,12
 
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
int set_memory_array_wt(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
 
int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray);
int set_pages_array_wt(struct page **pages, int addrinarray);
int set_pages_array_wb(struct page **pages, int addrinarray);
 
/*
105,9 → 109,10
};
 
 
 
void clflush_cache_range(void *addr, unsigned int size);
 
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
 
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;
/drivers/include/asm/cpufeature.h
12,7 → 12,7
#include <asm/disabled-features.h>
#endif
 
#define NCAPINTS 11 /* N 32-bit words worth of info */
#define NCAPINTS 14 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
 
/*
119,6 → 119,7
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
174,7 → 175,9
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
 
/*
* Auxiliary flags: Linux defined - For features scattered in various
190,10 → 193,11
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
 
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
212,6 → 216,7
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
 
 
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
225,15 → 230,19
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
 
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
241,6 → 250,15
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
 
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
 
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
 
/*
* BUG word(s)
*/
254,6 → 272,7
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
 
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 
388,6 → 407,7
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
 
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
416,6 → 436,7
" .word %P0\n" /* 1: do replace */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (X86_FEATURE_ALWAYS) : : t_warn);
430,6 → 451,7
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (bit) : : t_no);
455,6 → 477,7
" .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
481,31 → 504,30
static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
{
#ifdef CC_HAVE_ASM_GOTO
/*
* We need to spell the jumps to the compiler because, depending on the offset,
* the replacement jump can be bigger than the original jump, and this we cannot
* have. Thus, we force the jump to the widest, 4-byte, signed relative
* offset even though the last would often fit in less bytes.
*/
asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
asm_volatile_goto("1: jmp %l[t_dynamic]\n"
"2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"
"3:\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 3f - .\n" /* repl offset */
" .long 4f - .\n" /* repl offset */
" .word %P1\n" /* always replace */
" .byte 2b - 1b\n" /* src len */
" .byte 4f - 3f\n" /* repl len */
" .byte 3b - 1b\n" /* src len */
" .byte 5f - 4f\n" /* repl len */
" .byte 3b - 2b\n" /* pad len */
".previous\n"
".section .altinstr_replacement,\"ax\"\n"
"3: .byte 0xe9\n .long %l[t_no] - 2b\n"
"4:\n"
"4: jmp %l[t_no]\n"
"5:\n"
".previous\n"
".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */
" .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* src len */
" .byte 3b - 1b\n" /* src len */
" .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
: : "i" (bit), "i" (X86_FEATURE_ALWAYS)
: : t_dynamic, t_no);
525,6 → 547,7
" .word %P2\n" /* always replace */
" .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
539,6 → 562,7
" .word %P1\n" /* feature bit */
" .byte 4b - 3b\n" /* src len */
" .byte 6f - 5f\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
/drivers/include/asm/delay.h
4,5 → 4,6
#include <asm-generic/delay.h>
 
void use_tsc_delay(void);
void use_mwaitx_delay(void);
 
#endif /* _ASM_X86_DELAY_H */
/drivers/include/asm/e820.h
40,14 → 40,6
}
#endif
 
#ifdef CONFIG_MEMTEST
extern void early_memtest(unsigned long start, unsigned long end);
#else
static inline void early_memtest(unsigned long start, unsigned long end)
{
}
#endif
 
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern u64 early_reserve_e820(u64 sizet, u64 align);
/drivers/include/asm/fpu/types.h
0,0 → 1,355
/*
* FPU data structures:
*/
#ifndef _ASM_X86_FPU_H
#define _ASM_X86_FPU_H
 
/*
* The legacy x87 FPU state format, as saved by FSAVE and
* restored by the FRSTOR instructions:
*/
struct fregs_state {
u32 cwd; /* FPU Control Word */
u32 swd; /* FPU Status Word */
u32 twd; /* FPU Tag Word */
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Pointer Offset */
u32 fos; /* FPU Operand Pointer Selector */
 
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
 
/* Software status information [not touched by FSAVE]: */
u32 status;
};
 
/*
* The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and
* restored by the FXRSTOR instructions. It's similar to the FSAVE
* format, but differs in some areas, plus has extensions at
* the end for the XMM registers.
*/
struct fxregs_state {
u16 cwd; /* Control Word */
u16 swd; /* Status Word */
u16 twd; /* Tag Word */
u16 fop; /* Last Instruction Opcode */
union {
struct {
u64 rip; /* Instruction Pointer */
u64 rdp; /* Data Pointer */
};
struct {
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Offset */
u32 fos; /* FPU Operand Selector */
};
};
u32 mxcsr; /* MXCSR Register State */
u32 mxcsr_mask; /* MXCSR Mask */
 
/* 8*16 bytes for each FP-reg = 128 bytes: */
u32 st_space[32];
 
/* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
 
u32 padding[12];
 
union {
u32 padding1[12];
u32 sw_reserved[12];
};
 
} __attribute__((aligned(16)));
 
/* Default value for fxregs_state.mxcsr: */
#define MXCSR_DEFAULT 0x1f80
 
/*
* Software based FPU emulation state. This is arbitrary really,
* it matches the x87 format to make it easier to understand:
*/
struct swregs_state {
u32 cwd;
u32 swd;
u32 twd;
u32 fip;
u32 fcs;
u32 foo;
u32 fos;
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
u8 ftop;
u8 changed;
u8 lookahead;
u8 no_update;
u8 rm;
u8 alimit;
struct math_emu_info *info;
u32 entry_eip;
};
 
/*
* List of XSAVE features Linux knows about:
*/
enum xfeature {
XFEATURE_FP,
XFEATURE_SSE,
/*
* Values above here are "legacy states".
* Those below are "extended states".
*/
XFEATURE_YMM,
XFEATURE_BNDREGS,
XFEATURE_BNDCSR,
XFEATURE_OPMASK,
XFEATURE_ZMM_Hi256,
XFEATURE_Hi16_ZMM,
 
XFEATURE_MAX,
};
 
#define XFEATURE_MASK_FP (1 << XFEATURE_FP)
#define XFEATURE_MASK_SSE (1 << XFEATURE_SSE)
#define XFEATURE_MASK_YMM (1 << XFEATURE_YMM)
#define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS)
#define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR)
#define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
#define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
 
#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
| XFEATURE_MASK_ZMM_Hi256 \
| XFEATURE_MASK_Hi16_ZMM)
 
#define FIRST_EXTENDED_XFEATURE XFEATURE_YMM
 
struct reg_128_bit {
u8 regbytes[128/8];
};
struct reg_256_bit {
u8 regbytes[256/8];
};
struct reg_512_bit {
u8 regbytes[512/8];
};
 
/*
* State component 2:
*
* There are 16x 256-bit AVX registers named YMM0-YMM15.
* The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
* and are stored in 'struct fxregs_state::xmm_space[]' in the
* "legacy" area.
*
* The high 128 bits are stored here.
*/
struct ymmh_struct {
struct reg_128_bit hi_ymm[16];
} __packed;
 
/* Intel MPX support: */
 
struct mpx_bndreg {
u64 lower_bound;
u64 upper_bound;
} __packed;
/*
* State component 3 is used for the 4 128-bit bounds registers
*/
struct mpx_bndreg_state {
struct mpx_bndreg bndreg[4];
} __packed;
 
/*
* State component 4 is used for the 64-bit user-mode MPX
* configuration register BNDCFGU and the 64-bit MPX status
* register BNDSTATUS. We call the pair "BNDCSR".
*/
struct mpx_bndcsr {
u64 bndcfgu;
u64 bndstatus;
} __packed;
 
/*
* The BNDCSR state is padded out to be 64-bytes in size.
*/
struct mpx_bndcsr_state {
union {
struct mpx_bndcsr bndcsr;
u8 pad_to_64_bytes[64];
};
} __packed;
 
/* AVX-512 Components: */
 
/*
* State component 5 is used for the 8 64-bit opmask registers
* k0-k7 (opmask state).
*/
struct avx_512_opmask_state {
u64 opmask_reg[8];
} __packed;
 
/*
* State component 6 is used for the upper 256 bits of the
* registers ZMM0-ZMM15. These 16 256-bit values are denoted
* ZMM0_H-ZMM15_H (ZMM_Hi256 state).
*/
struct avx_512_zmm_uppers_state {
struct reg_256_bit zmm_upper[16];
} __packed;
 
/*
* State component 7 is used for the 16 512-bit registers
* ZMM16-ZMM31 (Hi16_ZMM state).
*/
struct avx_512_hi16_state {
struct reg_512_bit hi16_zmm[16];
} __packed;
 
struct xstate_header {
u64 xfeatures;
u64 xcomp_bv;
u64 reserved[6];
} __attribute__((packed));
 
/*
* This is our most modern FPU state format, as saved by the XSAVE
* and restored by the XRSTOR instructions.
*
* It consists of a legacy fxregs portion, an xstate header and
* subsequent areas as defined by the xstate header. Not all CPUs
* support all the extensions, so the size of the extended area
* can vary quite a bit between CPUs.
*/
struct xregs_state {
struct fxregs_state i387;
struct xstate_header header;
u8 extended_state_area[0];
} __attribute__ ((packed, aligned (64)));
 
/*
* This is a union of all the possible FPU state formats
* put together, so that we can pick the right one runtime.
*
* The size of the structure is determined by the largest
* member - which is the xsave area. The padding is there
* to ensure that statically-allocated task_structs (just
* the init_task today) have enough space.
*/
union fpregs_state {
struct fregs_state fsave;
struct fxregs_state fxsave;
struct swregs_state soft;
struct xregs_state xsave;
u8 __padding[PAGE_SIZE];
};
 
/*
* Highest level per task FPU state data structure that
* contains the FPU register state plus various FPU
* state fields:
*/
struct fpu {
/*
* @last_cpu:
*
* Records the last CPU on which this context was loaded into
* FPU registers. (In the lazy-restore case we might be
* able to reuse FPU registers across multiple context switches
* this way, if no intermediate task used the FPU.)
*
* A value of -1 is used to indicate that the FPU state in context
* memory is newer than the FPU state in registers, and that the
* FPU state should be reloaded next time the task is run.
*/
unsigned int last_cpu;
 
/*
* @fpstate_active:
*
* This flag indicates whether this context is active: if the task
* is not running then we can restore from this context, if the task
* is running then we should save into this context.
*/
unsigned char fpstate_active;
 
/*
* @fpregs_active:
*
* This flag determines whether a given context is actively
* loaded into the FPU's registers and that those registers
* represent the task's current FPU state.
*
* Note the interaction with fpstate_active:
*
* # task does not use the FPU:
* fpstate_active == 0
*
* # task uses the FPU and regs are active:
* fpstate_active == 1 && fpregs_active == 1
*
* # the regs are inactive but still match fpstate:
* fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
*
* The third state is what we use for the lazy restore optimization
* on lazy-switching CPUs.
*/
unsigned char fpregs_active;
 
/*
* @counter:
*
* This counter contains the number of consecutive context switches
* during which the FPU stays used. If this is over a threshold, the
* lazy FPU restore logic becomes eager, to save the trap overhead.
* This is an unsigned char so that after 256 iterations the counter
* wraps and the context switch behavior turns lazy again; this is to
* deal with bursty apps that only use the FPU for a short time:
*/
unsigned char counter;
/*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
* over context switches. If the task is using the FPU then
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
*
* After context switches there may be a (short) time period
* during which the in-FPU hardware registers are unchanged
* and still perfectly match this state, if the tasks
* scheduled afterwards are not using the FPU.
*
* This is the 'lazy restore' window of optimization, which
* we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
*
* We detect whether a subsequent task uses the FPU via setting
* CR0::TS to 1, which causes any FPU use to raise a #NM fault.
*
* During this window, if the task gets scheduled again, we
* might be able to skip having to do a restore from this
* memory buffer to the hardware registers - at the cost of
* incurring the overhead of #NM fault traps.
*
* Note that on modern CPUs that support the XSAVEOPT (or other
* optimized XSAVE instructions), we don't use #NM traps anymore,
* as the hardware can track whether FPU registers need saving
* or not. On such CPUs we activate the non-lazy ('eagerfpu')
* logic, which unconditionally saves/restores all FPU state
* across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
* WARNING: 'state' is dynamically-sized. Do not put
* anything after it here.
*/
};
 
#endif /* _ASM_X86_FPU_H */
/drivers/include/asm/intel-mid.h
0,0 → 1,151
/*
* intel-mid.h: Intel MID specific setup code
*
* (C) Copyright 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
#ifndef _ASM_X86_INTEL_MID_H
#define _ASM_X86_INTEL_MID_H
 
#include <linux/sfi.h>
//#include <linux/platform_device.h>
 
extern int intel_mid_pci_init(void);
extern int get_gpio_by_name(const char *name);
extern void intel_scu_device_register(struct platform_device *pdev);
extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
extern int sfi_mrtc_num;
extern struct sfi_rtc_table_entry sfi_mrtc_array[];
 
/*
* Here defines the array of devices platform data that IAFW would export
* through SFI "DEVS" table, we use name and type to match the device and
* its platform data.
*/
struct devs_id {
char name[SFI_NAME_LEN + 1];
u8 type;
u8 delay;
void *(*get_platform_data)(void *info);
/* Custom handler for devices */
void (*device_handler)(struct sfi_device_table_entry *pentry,
struct devs_id *dev);
};
 
#define sfi_device(i) \
static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
__attribute__((__section__(".x86_intel_mid_dev.init"))) = &i
 
/*
* Medfield is the follow-up of Moorestown, it combines two chip solution into
* one. Other than that it also added always-on and constant tsc and lapic
* timers. Medfield is the platform name, and the chip name is called Penwell
* we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
* identified via MSRs.
*/
enum intel_mid_cpu_type {
/* 1 was Moorestown */
INTEL_MID_CPU_CHIP_PENWELL = 2,
INTEL_MID_CPU_CHIP_CLOVERVIEW,
INTEL_MID_CPU_CHIP_TANGIER,
};
 
extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
 
/**
* struct intel_mid_ops - Interface between intel-mid & sub archs
* @arch_setup: arch_setup function to re-initialize platform
* structures (x86_init, x86_platform_init)
*
* This structure can be extended if any new interface is required
* between intel-mid & its sub arch files.
*/
struct intel_mid_ops {
void (*arch_setup)(void);
};
 
/* Helper API's for INTEL_MID_OPS_INIT */
#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
[cpuid] = get_##cpuname##_ops
 
/* Maximum number of CPU ops */
#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
 
/*
* For every new cpu addition, a weak get_<cpuname>_ops() function needs be
* declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
*/
#define INTEL_MID_OPS_INIT {\
DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
};
 
#ifdef CONFIG_X86_INTEL_MID
 
static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
{
return __intel_mid_cpu_chip;
}
 
static inline bool intel_mid_has_msic(void)
{
return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
}
 
#else /* !CONFIG_X86_INTEL_MID */
 
#define intel_mid_identify_cpu() (0)
#define intel_mid_has_msic() (0)
 
#endif /* !CONFIG_X86_INTEL_MID */
 
enum intel_mid_timer_options {
INTEL_MID_TIMER_DEFAULT,
INTEL_MID_TIMER_APBT_ONLY,
INTEL_MID_TIMER_LAPIC_APBT,
};
 
extern enum intel_mid_timer_options intel_mid_timer_options;
 
/*
* Penwell uses spread spectrum clock, so the freq number is not exactly
* the same as reported by MSR based on SDM.
*/
#define FSB_FREQ_83SKU 83200
#define FSB_FREQ_100SKU 99840
#define FSB_FREQ_133SKU 133000
 
#define FSB_FREQ_167SKU 167000
#define FSB_FREQ_200SKU 200000
#define FSB_FREQ_267SKU 267000
#define FSB_FREQ_333SKU 333000
#define FSB_FREQ_400SKU 400000
 
/* Bus Select SoC Fuse value */
#define BSEL_SOC_FUSE_MASK 0x7
#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */
#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */
#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */
 
#define SFI_MTMR_MAX_NUM 8
#define SFI_MRTC_MAX 8
 
extern void intel_scu_devices_create(void);
extern void intel_scu_devices_destroy(void);
 
/* VRTC timer */
#define MRST_VRTC_MAP_SZ (1024)
/*#define MRST_VRTC_PGOFFSET (0xc00) */
 
extern void intel_mid_rtc_init(void);
 
/* the offset for the mapping of global gpio pin to irq */
#define INTEL_MID_IRQ_OFFSET 0x100
 
#endif /* _ASM_X86_INTEL_MID_H */
/drivers/include/asm/irqflags.h
136,10 → 136,6
#define USERGS_SYSRET32 \
swapgs; \
sysretl
#define ENABLE_INTERRUPTS_SYSEXIT32 \
swapgs; \
sti; \
sysexit
 
#else
#define INTERRUPT_RETURN iret
163,22 → 159,27
 
return arch_irqs_disabled_flags(flags);
}
#endif /* !__ASSEMBLY__ */
 
#ifdef __ASSEMBLY__
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
#else
 
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#ifdef CONFIG_X86_64
#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
# define LOCKDEP_SYS_EXIT_IRQ \
TRACE_IRQS_ON; \
sti; \
SAVE_REST; \
LOCKDEP_SYS_EXIT; \
RESTORE_REST; \
call lockdep_sys_exit_thunk; \
cli; \
TRACE_IRQS_OFF;
 
#else
#define ARCH_LOCKDEP_SYS_EXIT \
# define LOCKDEP_SYS_EXIT \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
186,24 → 187,12
popl %edx; \
popl %ecx; \
popl %eax;
 
#define ARCH_LOCKDEP_SYS_EXIT_IRQ
# define LOCKDEP_SYS_EXIT_IRQ
#endif
 
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
#else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
# else
# define LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ
# endif
#endif /* __ASSEMBLY__ */
 
#endif /* __ASSEMBLY__ */
#endif
/drivers/include/asm/math_emu.h
2,7 → 2,6
#define _ASM_X86_MATH_EMU_H
 
#include <asm/ptrace.h>
#include <asm/vm86.h>
 
/* This structure matches the layout of the data saved to the stack
following a device-not-present interrupt, part of it saved
10,9 → 9,6
*/
struct math_emu_info {
long ___orig_eip;
union {
struct pt_regs *regs;
struct kernel_vm86_regs *vm86;
};
};
#endif /* _ASM_X86_MATH_EMU_H */
/drivers/include/asm/msr-index.h
0,0 → 1,694
#ifndef _ASM_X86_MSR_INDEX_H
#define _ASM_X86_MSR_INDEX_H
 
/* CPU model specific register (MSR) numbers */
 
/* x86-64 specific MSRs */
#define MSR_EFER 0xc0000080 /* extended feature register */
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
 
/* EFER bits: */
#define _EFER_SCE 0 /* SYSCALL/SYSRET */
#define _EFER_LME 8 /* Long mode enable */
#define _EFER_LMA 10 /* Long mode active (read-only) */
#define _EFER_NX 11 /* No execute enable */
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
 
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
#define EFER_LMA (1<<_EFER_LMA)
#define EFER_NX (1<<_EFER_NX)
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
 
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
#define MSR_PLATFORM_INFO 0x000000ce
 
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
 
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
 
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
 
#define MSR_IA32_MCG_CAP 0x00000179
#define MSR_IA32_MCG_STATUS 0x0000017a
#define MSR_IA32_MCG_CTL 0x0000017b
#define MSR_IA32_MCG_EXT_CTL 0x000004d0
 
#define MSR_OFFCORE_RSP_0 0x000001a6
#define MSR_OFFCORE_RSP_1 0x000001a7
#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad
#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae
#define MSR_TURBO_RATIO_LIMIT 0x000001ad
#define MSR_TURBO_RATIO_LIMIT1 0x000001ae
#define MSR_TURBO_RATIO_LIMIT2 0x000001af
 
#define MSR_LBR_SELECT 0x000001c8
#define MSR_LBR_TOS 0x000001c9
#define MSR_LBR_NHM_FROM 0x00000680
#define MSR_LBR_NHM_TO 0x000006c0
#define MSR_LBR_CORE_FROM 0x00000040
#define MSR_LBR_CORE_TO 0x00000060
 
#define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */
#define LBR_INFO_MISPRED BIT_ULL(63)
#define LBR_INFO_IN_TX BIT_ULL(62)
#define LBR_INFO_ABORT BIT_ULL(61)
#define LBR_INFO_CYCLES 0xffff
 
#define MSR_IA32_PEBS_ENABLE 0x000003f1
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
 
#define MSR_IA32_RTIT_CTL 0x00000570
#define RTIT_CTL_TRACEEN BIT(0)
#define RTIT_CTL_CYCLEACC BIT(1)
#define RTIT_CTL_OS BIT(2)
#define RTIT_CTL_USR BIT(3)
#define RTIT_CTL_CR3EN BIT(7)
#define RTIT_CTL_TOPA BIT(8)
#define RTIT_CTL_MTC_EN BIT(9)
#define RTIT_CTL_TSC_EN BIT(10)
#define RTIT_CTL_DISRETC BIT(11)
#define RTIT_CTL_BRANCH_EN BIT(13)
#define RTIT_CTL_MTC_RANGE_OFFSET 14
#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
#define RTIT_CTL_CYC_THRESH_OFFSET 19
#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET)
#define RTIT_CTL_PSB_FREQ_OFFSET 24
#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET)
#define MSR_IA32_RTIT_STATUS 0x00000571
#define RTIT_STATUS_CONTEXTEN BIT(1)
#define RTIT_STATUS_TRIGGEREN BIT(2)
#define RTIT_STATUS_ERROR BIT(4)
#define RTIT_STATUS_STOPPED BIT(5)
#define MSR_IA32_RTIT_CR3_MATCH 0x00000572
#define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560
#define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561
 
#define MSR_MTRRfix64K_00000 0x00000250
#define MSR_MTRRfix16K_80000 0x00000258
#define MSR_MTRRfix16K_A0000 0x00000259
#define MSR_MTRRfix4K_C0000 0x00000268
#define MSR_MTRRfix4K_C8000 0x00000269
#define MSR_MTRRfix4K_D0000 0x0000026a
#define MSR_MTRRfix4K_D8000 0x0000026b
#define MSR_MTRRfix4K_E0000 0x0000026c
#define MSR_MTRRfix4K_E8000 0x0000026d
#define MSR_MTRRfix4K_F0000 0x0000026e
#define MSR_MTRRfix4K_F8000 0x0000026f
#define MSR_MTRRdefType 0x000002ff
 
#define MSR_IA32_CR_PAT 0x00000277
 
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
#define MSR_IA32_LASTINTFROMIP 0x000001dd
#define MSR_IA32_LASTINTTOIP 0x000001de
 
/* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
#define DEBUGCTLMSR_TR (1UL << 6)
#define DEBUGCTLMSR_BTS (1UL << 7)
#define DEBUGCTLMSR_BTINT (1UL << 8)
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
 
#define MSR_PEBS_FRONTEND 0x000003f7
 
#define MSR_IA32_POWER_CTL 0x000001fc
 
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
#define MSR_IA32_MC0_ADDR 0x00000402
#define MSR_IA32_MC0_MISC 0x00000403
 
/* C-state Residency Counters */
#define MSR_PKG_C3_RESIDENCY 0x000003f8
#define MSR_PKG_C6_RESIDENCY 0x000003f9
#define MSR_PKG_C7_RESIDENCY 0x000003fa
#define MSR_CORE_C3_RESIDENCY 0x000003fc
#define MSR_CORE_C6_RESIDENCY 0x000003fd
#define MSR_CORE_C7_RESIDENCY 0x000003fe
#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff
#define MSR_PKG_C2_RESIDENCY 0x0000060d
#define MSR_PKG_C8_RESIDENCY 0x00000630
#define MSR_PKG_C9_RESIDENCY 0x00000631
#define MSR_PKG_C10_RESIDENCY 0x00000632
 
/* Run Time Average Power Limiting (RAPL) Interface */
 
#define MSR_RAPL_POWER_UNIT 0x00000606
 
#define MSR_PKG_POWER_LIMIT 0x00000610
#define MSR_PKG_ENERGY_STATUS 0x00000611
#define MSR_PKG_PERF_STATUS 0x00000613
#define MSR_PKG_POWER_INFO 0x00000614
 
#define MSR_DRAM_POWER_LIMIT 0x00000618
#define MSR_DRAM_ENERGY_STATUS 0x00000619
#define MSR_DRAM_PERF_STATUS 0x0000061b
#define MSR_DRAM_POWER_INFO 0x0000061c
 
#define MSR_PP0_POWER_LIMIT 0x00000638
#define MSR_PP0_ENERGY_STATUS 0x00000639
#define MSR_PP0_POLICY 0x0000063a
#define MSR_PP0_PERF_STATUS 0x0000063b
 
#define MSR_PP1_POWER_LIMIT 0x00000640
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
 
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A
#define MSR_CONFIG_TDP_CONTROL 0x0000064B
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
 
#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
#define MSR_PKG_ANY_CORE_C0_RES 0x00000659
#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A
#define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B
 
#define MSR_CORE_C1_RES 0x00000660
 
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
 
#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
 
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL1 0x00000649
#define MSR_CONFIG_TDP_LEVEL2 0x0000064A
#define MSR_CONFIG_TDP_CONTROL 0x0000064B
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
 
/* Hardware P state interface */
#define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f
#define MSR_PM_ENABLE 0x00000770
#define MSR_HWP_CAPABILITIES 0x00000771
#define MSR_HWP_REQUEST_PKG 0x00000772
#define MSR_HWP_INTERRUPT 0x00000773
#define MSR_HWP_REQUEST 0x00000774
#define MSR_HWP_STATUS 0x00000777
 
/* CPUID.6.EAX */
#define HWP_BASE_BIT (1<<7)
#define HWP_NOTIFICATIONS_BIT (1<<8)
#define HWP_ACTIVITY_WINDOW_BIT (1<<9)
#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11)
 
/* IA32_HWP_CAPABILITIES */
#define HWP_HIGHEST_PERF(x) (x & 0xff)
#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8)
#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16)
#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24)
 
/* IA32_HWP_REQUEST */
#define HWP_MIN_PERF(x) (x & 0xff)
#define HWP_MAX_PERF(x) ((x & 0xff) << 8)
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16)
#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24)
#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32)
#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42)
 
/* IA32_HWP_STATUS */
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1)
#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4)
 
/* IA32_HWP_INTERRUPT */
#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1)
#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2)
 
#define MSR_AMD64_MC0_MASK 0xc0010044
 
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
 
#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
 
/* These are consecutive and not in the normal 4er MCE bank block */
#define MSR_IA32_MC0_CTL2 0x00000280
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
 
#define MSR_P6_PERFCTR0 0x000000c1
#define MSR_P6_PERFCTR1 0x000000c2
#define MSR_P6_EVNTSEL0 0x00000186
#define MSR_P6_EVNTSEL1 0x00000187
 
#define MSR_KNC_PERFCTR0 0x00000020
#define MSR_KNC_PERFCTR1 0x00000021
#define MSR_KNC_EVNTSEL0 0x00000028
#define MSR_KNC_EVNTSEL1 0x00000029
 
/* Alternative perfctr range with full access. */
#define MSR_IA32_PMC0 0x000004c1
 
/* AMD64 MSRs. Not complete. See the architecture manual for a more
complete list. */
 
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_TSC_RATIO 0xc0000104
#define MSR_AMD64_NB_CFG 0xc001001f
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
#define MSR_AMD64_IBSFETCH_REG_COUNT 3
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
#define MSR_AMD64_IBSOPCTL 0xc0011033
#define MSR_AMD64_IBSOPRIP 0xc0011034
#define MSR_AMD64_IBSOPDATA 0xc0011035
#define MSR_AMD64_IBSOPDATA2 0xc0011036
#define MSR_AMD64_IBSOPDATA3 0xc0011037
#define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSOP_REG_COUNT 7
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
#define MSR_AMD64_IBSCTL 0xc001103a
#define MSR_AMD64_IBSBRTARGET 0xc001103b
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
 
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
#define MSR_F16H_DR1_ADDR_MASK 0xc0011019
#define MSR_F16H_DR2_ADDR_MASK 0xc001101a
#define MSR_F16H_DR3_ADDR_MASK 0xc001101b
#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
 
/* Fam 15h MSRs */
#define MSR_F15H_PERF_CTL 0xc0010200
#define MSR_F15H_PERF_CTR 0xc0010201
#define MSR_F15H_NB_PERF_CTL 0xc0010240
#define MSR_F15H_NB_PERF_CTR 0xc0010241
 
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
#define FAM10H_MMIO_CONF_ENABLE (1<<0)
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
 
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
#define MSR_K8_SYSCFG 0xc0010010
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
#define MSR_K8_TSEG_ADDR 0xc0010112
#define MSR_K8_TSEG_MASK 0xc0010113
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
 
/* K7 MSRs */
#define MSR_K7_EVNTSEL0 0xc0010000
#define MSR_K7_PERFCTR0 0xc0010004
#define MSR_K7_EVNTSEL1 0xc0010001
#define MSR_K7_PERFCTR1 0xc0010005
#define MSR_K7_EVNTSEL2 0xc0010002
#define MSR_K7_PERFCTR2 0xc0010006
#define MSR_K7_EVNTSEL3 0xc0010003
#define MSR_K7_PERFCTR3 0xc0010007
#define MSR_K7_CLK_CTL 0xc001001b
#define MSR_K7_HWCR 0xc0010015
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
 
/* K6 MSRs */
#define MSR_K6_WHCR 0xc0000082
#define MSR_K6_UWCCR 0xc0000085
#define MSR_K6_EPMR 0xc0000086
#define MSR_K6_PSOR 0xc0000087
#define MSR_K6_PFIR 0xc0000088
 
/* Centaur-Hauls/IDT defined MSRs. */
#define MSR_IDT_FCR1 0x00000107
#define MSR_IDT_FCR2 0x00000108
#define MSR_IDT_FCR3 0x00000109
#define MSR_IDT_FCR4 0x0000010a
 
#define MSR_IDT_MCR0 0x00000110
#define MSR_IDT_MCR1 0x00000111
#define MSR_IDT_MCR2 0x00000112
#define MSR_IDT_MCR3 0x00000113
#define MSR_IDT_MCR4 0x00000114
#define MSR_IDT_MCR5 0x00000115
#define MSR_IDT_MCR6 0x00000116
#define MSR_IDT_MCR7 0x00000117
#define MSR_IDT_MCR_CTRL 0x00000120
 
/* VIA Cyrix defined MSRs*/
#define MSR_VIA_FCR 0x00001107
#define MSR_VIA_LONGHAUL 0x0000110a
#define MSR_VIA_RNG 0x0000110b
#define MSR_VIA_BCR2 0x00001147
 
/* Transmeta defined MSRs */
#define MSR_TMTA_LONGRUN_CTRL 0x80868010
#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
#define MSR_TMTA_LRTI_READOUT 0x80868018
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
 
/* Intel defined MSRs. */
#define MSR_IA32_P5_MC_ADDR 0x00000000
#define MSR_IA32_P5_MC_TYPE 0x00000001
#define MSR_IA32_TSC 0x00000010
#define MSR_IA32_PLATFORM_ID 0x00000017
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_EBC_FREQUENCY_ID 0x0000002c
#define MSR_SMI_COUNT 0x00000034
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_BNDCFGS 0x00000d90
 
#define MSR_IA32_XSS 0x00000da0
 
#define FEATURE_CONTROL_LOCKED (1<<0)
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
#define FEATURE_CONTROL_LMCE (1<<20)
 
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
 
#define MSR_IA32_TSCDEADLINE 0x000006e0
 
#define MSR_IA32_UCODE_WRITE 0x00000079
#define MSR_IA32_UCODE_REV 0x0000008b
 
#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
#define MSR_IA32_SMBASE 0x0000009e
 
#define MSR_IA32_PERF_STATUS 0x00000198
#define MSR_IA32_PERF_CTL 0x00000199
#define INTEL_PERF_CTL_MASK 0xffff
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
#define MSR_AMD_PERF_STATUS 0xc0010063
#define MSR_AMD_PERF_CTL 0xc0010062
 
#define MSR_IA32_MPERF 0x000000e7
#define MSR_IA32_APERF 0x000000e8
 
#define MSR_IA32_THERM_CONTROL 0x0000019a
#define MSR_IA32_THERM_INTERRUPT 0x0000019b
 
#define THERM_INT_HIGH_ENABLE (1 << 0)
#define THERM_INT_LOW_ENABLE (1 << 1)
#define THERM_INT_PLN_ENABLE (1 << 24)
 
#define MSR_IA32_THERM_STATUS 0x0000019c
 
#define THERM_STATUS_PROCHOT (1 << 0)
#define THERM_STATUS_POWER_LIMIT (1 << 10)
 
#define MSR_THERM2_CTL 0x0000019d
 
#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
 
#define MSR_IA32_MISC_ENABLE 0x000001a0
 
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
 
#define MSR_MISC_PWR_MGMT 0x000001aa
 
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
#define ENERGY_PERF_BIAS_PERFORMANCE 0
#define ENERGY_PERF_BIAS_NORMAL 6
#define ENERGY_PERF_BIAS_POWERSAVE 15
 
#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
 
#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
 
#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
 
#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
 
/* Thermal Thresholds Support */
#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
#define THERM_SHIFT_THRESHOLD0 8
#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0)
#define THERM_INT_THRESHOLD1_ENABLE (1 << 23)
#define THERM_SHIFT_THRESHOLD1 16
#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1)
#define THERM_STATUS_THRESHOLD0 (1 << 6)
#define THERM_LOG_THRESHOLD0 (1 << 7)
#define THERM_STATUS_THRESHOLD1 (1 << 8)
#define THERM_LOG_THRESHOLD1 (1 << 9)
 
/* MISC_ENABLE bits: architectural */
#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
#define MSR_IA32_MISC_ENABLE_TCC_BIT 1
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
#define MSR_IA32_MISC_ENABLE_EMON_BIT 7
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
 
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
#define MSR_IA32_MISC_ENABLE_TM1_BIT 3
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_FERR_BIT 10
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
#define MSR_IA32_MISC_ENABLE_TM2_BIT 13
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
 
#define MSR_IA32_TSC_DEADLINE 0x000006E0
 
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
#define MSR_IA32_MCG_EBX 0x00000181
#define MSR_IA32_MCG_ECX 0x00000182
#define MSR_IA32_MCG_EDX 0x00000183
#define MSR_IA32_MCG_ESI 0x00000184
#define MSR_IA32_MCG_EDI 0x00000185
#define MSR_IA32_MCG_EBP 0x00000186
#define MSR_IA32_MCG_ESP 0x00000187
#define MSR_IA32_MCG_EFLAGS 0x00000188
#define MSR_IA32_MCG_EIP 0x00000189
#define MSR_IA32_MCG_RESERVED 0x0000018a
 
/* Pentium IV performance counter MSRs */
#define MSR_P4_BPU_PERFCTR0 0x00000300
#define MSR_P4_BPU_PERFCTR1 0x00000301
#define MSR_P4_BPU_PERFCTR2 0x00000302
#define MSR_P4_BPU_PERFCTR3 0x00000303
#define MSR_P4_MS_PERFCTR0 0x00000304
#define MSR_P4_MS_PERFCTR1 0x00000305
#define MSR_P4_MS_PERFCTR2 0x00000306
#define MSR_P4_MS_PERFCTR3 0x00000307
#define MSR_P4_FLAME_PERFCTR0 0x00000308
#define MSR_P4_FLAME_PERFCTR1 0x00000309
#define MSR_P4_FLAME_PERFCTR2 0x0000030a
#define MSR_P4_FLAME_PERFCTR3 0x0000030b
#define MSR_P4_IQ_PERFCTR0 0x0000030c
#define MSR_P4_IQ_PERFCTR1 0x0000030d
#define MSR_P4_IQ_PERFCTR2 0x0000030e
#define MSR_P4_IQ_PERFCTR3 0x0000030f
#define MSR_P4_IQ_PERFCTR4 0x00000310
#define MSR_P4_IQ_PERFCTR5 0x00000311
#define MSR_P4_BPU_CCCR0 0x00000360
#define MSR_P4_BPU_CCCR1 0x00000361
#define MSR_P4_BPU_CCCR2 0x00000362
#define MSR_P4_BPU_CCCR3 0x00000363
#define MSR_P4_MS_CCCR0 0x00000364
#define MSR_P4_MS_CCCR1 0x00000365
#define MSR_P4_MS_CCCR2 0x00000366
#define MSR_P4_MS_CCCR3 0x00000367
#define MSR_P4_FLAME_CCCR0 0x00000368
#define MSR_P4_FLAME_CCCR1 0x00000369
#define MSR_P4_FLAME_CCCR2 0x0000036a
#define MSR_P4_FLAME_CCCR3 0x0000036b
#define MSR_P4_IQ_CCCR0 0x0000036c
#define MSR_P4_IQ_CCCR1 0x0000036d
#define MSR_P4_IQ_CCCR2 0x0000036e
#define MSR_P4_IQ_CCCR3 0x0000036f
#define MSR_P4_IQ_CCCR4 0x00000370
#define MSR_P4_IQ_CCCR5 0x00000371
#define MSR_P4_ALF_ESCR0 0x000003ca
#define MSR_P4_ALF_ESCR1 0x000003cb
#define MSR_P4_BPU_ESCR0 0x000003b2
#define MSR_P4_BPU_ESCR1 0x000003b3
#define MSR_P4_BSU_ESCR0 0x000003a0
#define MSR_P4_BSU_ESCR1 0x000003a1
#define MSR_P4_CRU_ESCR0 0x000003b8
#define MSR_P4_CRU_ESCR1 0x000003b9
#define MSR_P4_CRU_ESCR2 0x000003cc
#define MSR_P4_CRU_ESCR3 0x000003cd
#define MSR_P4_CRU_ESCR4 0x000003e0
#define MSR_P4_CRU_ESCR5 0x000003e1
#define MSR_P4_DAC_ESCR0 0x000003a8
#define MSR_P4_DAC_ESCR1 0x000003a9
#define MSR_P4_FIRM_ESCR0 0x000003a4
#define MSR_P4_FIRM_ESCR1 0x000003a5
#define MSR_P4_FLAME_ESCR0 0x000003a6
#define MSR_P4_FLAME_ESCR1 0x000003a7
#define MSR_P4_FSB_ESCR0 0x000003a2
#define MSR_P4_FSB_ESCR1 0x000003a3
#define MSR_P4_IQ_ESCR0 0x000003ba
#define MSR_P4_IQ_ESCR1 0x000003bb
#define MSR_P4_IS_ESCR0 0x000003b4
#define MSR_P4_IS_ESCR1 0x000003b5
#define MSR_P4_ITLB_ESCR0 0x000003b6
#define MSR_P4_ITLB_ESCR1 0x000003b7
#define MSR_P4_IX_ESCR0 0x000003c8
#define MSR_P4_IX_ESCR1 0x000003c9
#define MSR_P4_MOB_ESCR0 0x000003aa
#define MSR_P4_MOB_ESCR1 0x000003ab
#define MSR_P4_MS_ESCR0 0x000003c0
#define MSR_P4_MS_ESCR1 0x000003c1
#define MSR_P4_PMH_ESCR0 0x000003ac
#define MSR_P4_PMH_ESCR1 0x000003ad
#define MSR_P4_RAT_ESCR0 0x000003bc
#define MSR_P4_RAT_ESCR1 0x000003bd
#define MSR_P4_SAAT_ESCR0 0x000003ae
#define MSR_P4_SAAT_ESCR1 0x000003af
#define MSR_P4_SSU_ESCR0 0x000003be
#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
 
#define MSR_P4_TBPU_ESCR0 0x000003c2
#define MSR_P4_TBPU_ESCR1 0x000003c3
#define MSR_P4_TC_ESCR0 0x000003c4
#define MSR_P4_TC_ESCR1 0x000003c5
#define MSR_P4_U2L_ESCR0 0x000003b0
#define MSR_P4_U2L_ESCR1 0x000003b1
 
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
 
/* Intel Core-based CPU performance counters */
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
 
/* Geode defined MSRs */
#define MSR_GEODE_BUSCONT_CONF0 0x00001900
 
/* Intel VT MSRs */
#define MSR_IA32_VMX_BASIC 0x00000480
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
#define MSR_IA32_VMX_MISC 0x00000485
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
#define MSR_IA32_VMX_VMFUNC 0x00000491
 
/* VMX_BASIC bits and bitmasks */
#define VMX_BASIC_VMCS_SIZE_SHIFT 32
#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
#define VMX_BASIC_64 0x0001000000000000LLU
#define VMX_BASIC_MEM_TYPE_SHIFT 50
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
#define VMX_BASIC_MEM_TYPE_WB 6LLU
#define VMX_BASIC_INOUT 0x0040000000000000LLU
 
/* MSR_IA32_VMX_MISC bits */
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
/* AMD-V MSRs */
 
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
 
#endif /* _ASM_X86_MSR_INDEX_H */
/drivers/include/asm/msr.h
1,7 → 1,7
#ifndef _ASM_X86_MSR_H
#define _ASM_X86_MSR_H
 
#include <uapi/asm/msr.h>
#include "msr-index.h"
 
#ifndef __ASSEMBLY__
 
8,6 → 8,7
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/cpumask.h>
#include <uapi/asm/msr.h>
 
struct msr {
union {
46,14 → 47,13
* it means rax *or* rdx.
*/
#ifdef CONFIG_X86_64
#define DECLARE_ARGS(val, low, high) unsigned low, high
#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
/* Using 64-bit values saves one instruction clearing the high half of low */
#define DECLARE_ARGS(val, low, high) unsigned long low, high
#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
#else
#define DECLARE_ARGS(val, low, high) unsigned long long val
#define EAX_EDX_VAL(val, low, high) (val)
#define EAX_EDX_ARGS(val, low, high) "A" (val)
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
 
105,12 → 105,19
return err;
}
 
extern unsigned long long native_read_tsc(void);
 
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);
 
static __always_inline unsigned long long __native_read_tsc(void)
/**
* rdtsc() - returns the current TSC without ordering constraints
*
* rdtsc() returns the result of RDTSC as a 64-bit integer. The
* only ordering constraint it supplies is the ordering implied by
* "asm volatile": it will put the RDTSC in the place you expect. The
* CPU can and will speculatively execute that RDTSC, though, so the
* results can be non-monotonic if compared on different CPUs.
*/
static __always_inline unsigned long long rdtsc(void)
{
DECLARE_ARGS(val, low, high);
 
152,8 → 159,10
#define rdmsrl(msr, val) \
((val) = native_read_msr((msr)))
 
#define wrmsrl(msr, val) \
native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
static inline void wrmsrl(unsigned msr, u64 val)
{
native_write_msr(msr, (u32)val, (u32)(val >> 32));
}
 
/* wrmsr with exception handling */
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
179,12 → 188,6
return err;
}
 
#define rdtscl(low) \
((low) = (u32)__native_read_tsc())
 
#define rdtscll(val) \
((val) = __native_read_tsc())
 
#define rdpmc(counter, low, high) \
do { \
u64 _l = native_read_pmc((counter)); \
194,19 → 197,15
 
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
 
#define rdtscp(low, high, aux) \
do { \
unsigned long long _val = native_read_tscp(&(aux)); \
(low) = (u32)_val; \
(high) = (u32)(_val >> 32); \
} while (0)
 
#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
 
#endif /* !CONFIG_PARAVIRT */
 
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
(u32)((val) >> 32))
/*
* 64-bit version of wrmsr_safe():
*/
static inline int wrmsrl_safe(u32 msr, u64 val)
{
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
}
 
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
 
/drivers/include/asm/page_types.h
0,0 → 1,75
#ifndef _ASM_X86_PAGE_DEFS_H
#define _ASM_X86_PAGE_DEFS_H
 
#include <linux/const.h>
#include <linux/types.h>
 
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
 
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
 
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
 
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
 
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
 
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
 
#define HUGE_MAX_HSTATE 2
 
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
 
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \
CONFIG_PHYSICAL_ALIGN)
 
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
 
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#define IOREMAP_MAX_ORDER (PUD_SHIFT)
#else
#include <asm/page_32_types.h>
#define IOREMAP_MAX_ORDER (PMD_SHIFT)
#endif /* CONFIG_X86_64 */
 
#ifndef __ASSEMBLY__
 
extern int devmem_is_allowed(unsigned long pagenr);
 
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
 
static inline phys_addr_t get_max_mapped(void)
{
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}
 
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
 
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
 
extern void initmem_init(void);
 
#endif /* !__ASSEMBLY__ */
 
#endif /* _ASM_X86_PAGE_DEFS_H */
/drivers/include/asm/pgtable-2level.h
62,44 → 62,8
return ((value >> rightshift) & mask) << leftshift;
}
 
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range.
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
 
#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
 
#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
 
static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
{
return (pgoff_t)
(pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
}
 
static __always_inline pte_t pgoff_to_pte(pgoff_t off)
{
return (pte_t){
.pte_low =
pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
_PAGE_FILE,
};
}
 
/* Encode and de-code a swap entry */
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
#define SWP_TYPE_BITS 5
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
 
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
/drivers/include/asm/pgtable-2level_types.h
17,7 → 17,6
#endif /* !__ASSEMBLY__ */
 
#define SHARED_KERNEL_PMD 0
#define PAGETABLE_LEVELS 2
 
/*
* traditional i386 two-level paging structure:
/drivers/include/asm/pgtable.h
19,7 → 19,14
#include <asm/x86_init.h>
 
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
void ptdump_walk_pgd_level_checkwx(void);
 
#ifdef CONFIG_DEBUG_WX
#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
#else
#define debug_checkwx() do { } while (0)
#endif
 
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
115,11 → 122,6
return pte_flags(pte) & _PAGE_RW;
}
 
static inline int pte_file(pte_t pte)
{
return pte_flags(pte) & _PAGE_FILE;
}
 
static inline int pte_huge(pte_t pte)
{
return pte_flags(pte) & _PAGE_PSE;
137,13 → 139,7
 
static inline int pte_special(pte_t pte)
{
/*
* See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
* On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
* __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
*/
return (pte_flags(pte) & _PAGE_SPECIAL) &&
(pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
return pte_flags(pte) & _PAGE_SPECIAL;
}
 
static inline unsigned long pte_pfn(pte_t pte)
153,12 → 149,12
 
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
}
 
static inline unsigned long pud_pfn(pud_t pud)
{
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
}
 
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
305,7 → 301,7
 
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_PRESENT);
return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
}
 
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
329,21 → 325,16
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
 
static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
}
 
static inline pte_t pte_file_mksoft_dirty(pte_t pte)
static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
}
 
static inline int pte_file_soft_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_SOFT_DIRTY;
}
 
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 
/*
405,7 → 396,9
return __pgprot(preservebits | addbits);
}
 
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
#define pte_pgprot(x) __pgprot(pte_flags(x))
#define pmd_pgprot(x) __pgprot(pmd_flags(x))
#define pud_pgprot(x) __pgprot(pud_flags(x))
 
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
 
424,11 → 417,17
* requested memtype:
* - request is uncached, return cannot be write-back
* - request is write-combine, return cannot be write-back
* - request is write-through, return cannot be write-back
* - request is write-through, return cannot be write-combine
*/
if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WC &&
new_pcm == _PAGE_CACHE_MODE_WB)) {
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WT &&
new_pcm == _PAGE_CACHE_MODE_WB) ||
(pcm == _PAGE_CACHE_MODE_WT &&
new_pcm == _PAGE_CACHE_MODE_WC)) {
return 0;
}
 
463,13 → 462,6
 
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
_PAGE_NUMA);
}
 
#define pte_present_nonuma pte_present_nonuma
static inline int pte_present_nonuma(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
 
479,7 → 471,7
if (pte_flags(a) & _PAGE_PRESENT)
return true;
 
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
if ((pte_flags(a) & _PAGE_PROTNONE) &&
mm_tlb_flush_pending(mm))
return true;
 
499,10 → 491,27
* the _PAGE_PSE flag will remain set at all times while the
* _PAGE_PRESENT bit is clear).
*/
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
_PAGE_NUMA);
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
}
 
#ifdef CONFIG_NUMA_BALANCING
/*
* These work without NUMA balancing but the kernel does not care. See the
* comment in include/asm-generic/pgtable.h
*/
static inline int pte_protnone(pte_t pte)
{
return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
== _PAGE_PROTNONE;
}
 
static inline int pmd_protnone(pmd_t pmd)
{
return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
== _PAGE_PROTNONE;
}
#endif /* CONFIG_NUMA_BALANCING */
 
static inline int pmd_none(pmd_t pmd)
{
/* Only check low word on 32-bit platforms, since it might be
512,7 → 521,7
 
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
}
 
/*
519,7 → 528,8
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
#define pmd_page(pmd) \
pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
 
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
559,11 → 569,6
 
static inline int pmd_bad(pmd_t pmd)
{
#ifdef CONFIG_NUMA_BALANCING
/* pmd_numa check */
if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
return 0;
#endif
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
 
572,7 → 577,7
return npg >> (20 - PAGE_SHIFT);
}
 
#if PAGETABLE_LEVELS > 2
#if CONFIG_PGTABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
return native_pud_val(pud) == 0;
585,7 → 590,7
 
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
}
 
/*
592,7 → 597,8
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
#define pud_page(pud) \
pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
 
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
615,9 → 621,9
{
return 0;
}
#endif /* PAGETABLE_LEVELS > 2 */
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
 
#if PAGETABLE_LEVELS > 3
#if CONFIG_PGTABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd)
{
return pgd_flags(pgd) & _PAGE_PRESENT;
654,7 → 660,7
{
return !native_pgd_val(pgd);
}
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
 
#endif /* __ASSEMBLY__ */
 
820,8 → 826,8
return pmd_flags(pmd) & _PAGE_RW;
}
 
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
882,19 → 888,16
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
VM_BUG_ON(pte_present_nonuma(pte));
return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
 
static inline int pte_swp_soft_dirty(pte_t pte)
{
VM_BUG_ON(pte_present_nonuma(pte));
return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
}
 
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
VM_BUG_ON(pte_present_nonuma(pte));
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
#endif
/drivers/include/asm/pgtable_types.h
4,7 → 4,7
#include <linux/const.h>
#include <asm/page_types.h>
 
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_ADDRESS 0UL
 
#define _PAGE_BIT_PRESENT 0 /* is present */
#define _PAGE_BIT_RW 1 /* writeable */
27,19 → 27,9
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
 
/*
* Swap offsets on configurations that allow automatic NUMA balancing use the
* bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from
* swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the
* maximum possible swap space from 16TB to 8TB.
*/
#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1)
 
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
/* - set: nonlinear file mapping, saved PTE; unset:swap */
#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
 
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
78,21 → 68,6
#endif
 
/*
* _PAGE_NUMA distinguishes between a numa hinting minor fault and a page
* that is not present. The hinting fault gathers numa placement statistics
* (see pte_numa()). The bit is always zero when the PTE is not present.
*
* The bit picked must be always zero when the pmd is present and not
* present, so that we don't lose information when we set it while
* atomically clearing the present bit.
*/
#ifdef CONFIG_NUMA_BALANCING
#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA)
#else
#define _PAGE_NUMA (_AT(pteval_t, 0))
#endif
 
/*
* Tracking soft dirty bit when a page goes to a swap is tricky.
* We need a bit which can be stored in pte _and_ not conflict
* with swap entry format. On x86 bits 6 and 7 are *not* involved
114,7 → 89,6
#define _PAGE_NX (_AT(pteval_t, 0))
#endif
 
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
 
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
125,8 → 99,8
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SOFT_DIRTY | _PAGE_NUMA)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA)
_PAGE_SOFT_DIRTY)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
 
/*
* The cache modes defined here are used to translate between pure SW usage
235,10 → 209,10
 
#include <linux/types.h>
 
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
 
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
 
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
260,7 → 234,7
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
 
#if PAGETABLE_LEVELS > 3
#if CONFIG_PGTABLE_LEVELS > 3
typedef struct { pudval_t pud; } pud_t;
 
static inline pud_t native_make_pud(pmdval_t val)
281,7 → 255,7
}
#endif
 
#if PAGETABLE_LEVELS > 2
#if CONFIG_PGTABLE_LEVELS > 2
typedef struct { pmdval_t pmd; } pmd_t;
 
static inline pmd_t native_make_pmd(pmdval_t val)
302,14 → 276,40
}
#endif
 
static inline pudval_t pud_pfn_mask(pud_t pud)
{
if (native_pud_val(pud) & _PAGE_PSE)
return PHYSICAL_PUD_PAGE_MASK;
else
return PTE_PFN_MASK;
}
 
static inline pudval_t pud_flags_mask(pud_t pud)
{
return ~pud_pfn_mask(pud);
}
 
static inline pudval_t pud_flags(pud_t pud)
{
return native_pud_val(pud) & PTE_FLAGS_MASK;
return native_pud_val(pud) & pud_flags_mask(pud);
}
 
static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
{
if (native_pmd_val(pmd) & _PAGE_PSE)
return PHYSICAL_PMD_PAGE_MASK;
else
return PTE_PFN_MASK;
}
 
static inline pmdval_t pmd_flags_mask(pmd_t pmd)
{
return ~pmd_pfn_mask(pmd);
}
 
static inline pmdval_t pmd_flags(pmd_t pmd)
{
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
return native_pmd_val(pmd) & pmd_flags_mask(pmd);
}
 
static inline pte_t native_make_pte(pteval_t val)
327,20 → 327,6
return native_pte_val(pte) & PTE_FLAGS_MASK;
}
 
#ifdef CONFIG_NUMA_BALANCING
/* Set of bits that distinguishes present, prot_none and numa ptes */
#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)
static inline pteval_t ptenuma_flags(pte_t pte)
{
return pte_flags(pte) & _PAGE_NUMA_MASK;
}
 
static inline pmdval_t pmdnuma_flags(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_NUMA_MASK;
}
#endif /* CONFIG_NUMA_BALANCING */
 
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
 
407,6 → 393,9
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
 
#define pgprot_writethrough pgprot_writethrough
extern pgprot_t pgprot_writethrough(pgprot_t prot);
 
/* Indicate that x86 has its own track and untrack pfn vma functions */
#define __HAVE_PFNMAP_TRACKING
 
/drivers/include/asm/preempt.h
30,12 → 30,9
/*
* must be macros to avoid header recursion hell
*/
#define init_task_preempt_count(p) do { \
task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
} while (0)
#define init_task_preempt_count(p) do { } while (0)
 
#define init_idle_preempt_count(p, cpu) do { \
task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
} while (0)
 
90,9 → 87,9
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(void)
static __always_inline bool should_resched(int preempt_offset)
{
return unlikely(!raw_cpu_read_4(__preempt_count));
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
}
 
#ifdef CONFIG_PREEMPT
99,11 → 96,9
extern asmlinkage void ___preempt_schedule(void);
# define __preempt_schedule() asm ("call ___preempt_schedule")
extern asmlinkage void preempt_schedule(void);
# ifdef CONFIG_CONTEXT_TRACKING
extern asmlinkage void ___preempt_schedule_context(void);
# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
extern asmlinkage void preempt_schedule_context(void);
extern asmlinkage void ___preempt_schedule_notrace(void);
# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace")
extern asmlinkage void preempt_schedule_notrace(void);
# endif
#endif
 
#endif /* __ASM_PREEMPT_H */
/drivers/include/asm/processor.h
6,12 → 6,12
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
struct vm86;
 
#include <asm/vm86.h>
#include <asm/math_emu.h>
#include <asm/segment.h>
#include <asm/types.h>
#include <asm/sigcontext.h>
#include <uapi/asm/sigcontext.h>
#include <asm/current.h>
#include <asm/cpufeature.h>
#include <asm/page.h>
21,6 → 21,7
#include <asm/desc_defs.h>
#include <asm/nops.h>
#include <asm/special_insns.h>
#include <asm/fpu/types.h>
 
#include <linux/personality.h>
#include <linux/cpumask.h>
52,6 → 53,11
return pc;
}
 
/*
* These alignment constraints are for performance in the vSMP case,
* but in the task_struct case we must also meet hardware imposed
* alignment requirements of the FPU state:
*/
#ifdef CONFIG_X86_VSMP
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
109,6 → 115,9
/* in KB - valid for CPUS which support this call: */
int x86_cache_size;
int x86_cache_alignment; /* In bytes */
/* Cache QoS architectural values: */
int x86_cache_max_rmid; /* max index */
int x86_cache_occ_scale; /* scale to bytes */
int x86_power;
unsigned long loops_per_jiffy;
/* cpuid returned max cores value: */
160,10 → 169,7
 
extern const struct seq_operations cpuinfo_op;
 
#define cache_line_size() (x86_cache_alignment)
 
extern void cpu_detect(struct cpuinfo_x86 *c);
extern void fpu_detect(struct cpuinfo_x86 *c);
 
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
210,8 → 216,23
unsigned long sp0;
unsigned short ss0, __ss0h;
unsigned long sp1;
/* ss1 caches MSR_IA32_SYSENTER_CS: */
unsigned short ss1, __ss1h;
 
/*
* We don't use ring 1, so ss1 is a convenient scratch space in
* the same cacheline as sp0. We use ss1 to cache the value in
* MSR_IA32_SYSENTER_CS. When we context switch
* MSR_IA32_SYSENTER_CS, we first check if the new value being
* written matches ss1, and, if it's not, then we wrmsr the new
* value and update ss1.
*
* The only reason we context switch MSR_IA32_SYSENTER_CS is
* that we set it to zero in vm86 tasks to avoid corrupting the
* stack if we were to go through the sysenter path from vm86
* mode.
*/
unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
 
unsigned short __ss1h;
unsigned long sp2;
unsigned short ss2, __ss2h;
unsigned long __cr3;
276,14 → 297,18
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
 
/*
* .. and then another 0x100 bytes for the emergency kernel stack:
* Space for the temporary SYSENTER stack:
*/
unsigned long stack[64];
unsigned long SYSENTER_stack[64];
 
} ____cacheline_aligned;
 
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
 
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#endif
 
/*
* Save the original ist values for checking stack pointers during debugging
*/
291,128 → 316,6
unsigned long ist[7];
};
 
#define MXCSR_DEFAULT 0x1f80
 
struct i387_fsave_struct {
u32 cwd; /* FPU Control Word */
u32 swd; /* FPU Status Word */
u32 twd; /* FPU Tag Word */
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Pointer Offset */
u32 fos; /* FPU Operand Pointer Selector */
 
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
 
/* Software status information [not touched by FSAVE ]: */
u32 status;
};
 
struct i387_fxsave_struct {
u16 cwd; /* Control Word */
u16 swd; /* Status Word */
u16 twd; /* Tag Word */
u16 fop; /* Last Instruction Opcode */
union {
struct {
u64 rip; /* Instruction Pointer */
u64 rdp; /* Data Pointer */
};
struct {
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Offset */
u32 fos; /* FPU Operand Selector */
};
};
u32 mxcsr; /* MXCSR Register State */
u32 mxcsr_mask; /* MXCSR Mask */
 
/* 8*16 bytes for each FP-reg = 128 bytes: */
u32 st_space[32];
 
/* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
 
u32 padding[12];
 
union {
u32 padding1[12];
u32 sw_reserved[12];
};
 
} __attribute__((aligned(16)));
 
struct i387_soft_struct {
u32 cwd;
u32 swd;
u32 twd;
u32 fip;
u32 fcs;
u32 foo;
u32 fos;
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
u8 ftop;
u8 changed;
u8 lookahead;
u8 no_update;
u8 rm;
u8 alimit;
struct math_emu_info *info;
u32 entry_eip;
};
 
struct ymmh_struct {
/* 16 * 16 bytes for each YMMH-reg = 256 bytes */
u32 ymmh_space[64];
};
 
/* We don't support LWP yet: */
struct lwp_struct {
u8 reserved[128];
};
 
struct bndreg {
u64 lower_bound;
u64 upper_bound;
} __packed;
 
struct bndcsr {
u64 bndcfgu;
u64 bndstatus;
} __packed;
 
struct xsave_hdr_struct {
u64 xstate_bv;
u64 xcomp_bv;
u64 reserved[6];
} __attribute__((packed));
 
struct xsave_struct {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
struct ymmh_struct ymmh;
struct lwp_struct lwp;
struct bndreg bndreg[4];
struct bndcsr bndcsr;
/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));
 
union thread_xstate {
struct i387_fsave_struct fsave;
struct i387_fxsave_struct fxsave;
struct i387_soft_struct soft;
struct xsave_struct xsave;
};
 
struct fpu {
unsigned int last_cpu;
unsigned int has_fpu;
union thread_xstate *state;
};
 
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
 
461,8 → 364,6
#endif /* X86_64 */
 
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
 
struct perf_event;
 
474,7 → 375,6
#ifdef CONFIG_X86_32
unsigned long sysenter_cs;
#else
unsigned long usersp; /* Copy from PDA */
unsigned short es;
unsigned short ds;
unsigned short fsindex;
487,6 → 387,7
unsigned long fs;
#endif
unsigned long gs;
 
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
497,17 → 398,9
unsigned long cr2;
unsigned long trap_nr;
unsigned long error_code;
/* floating point and extended processor state */
struct fpu fpu;
#ifdef CONFIG_X86_32
#ifdef CONFIG_VM86
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
unsigned long screen_bitmap;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
unsigned int saved_fs;
unsigned int saved_gs;
struct vm86 *vm86;
#endif
/* IO permissions: */
unsigned long *io_bitmap_ptr;
514,15 → 407,13
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
 
/* Floating point and extended processor state */
struct fpu fpu;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
* WARNING: 'fpu' is dynamically-sized. It *MUST* be at
* the end.
*/
unsigned char fpu_counter;
};
 
/*
564,11 → 455,13
#endif
}
 
 
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __cpuid native_cpuid
#define paravirt_enabled() 0
#define paravirt_has(x) 0
 
static inline void load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
579,39 → 472,6
#define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT */
 
/*
* Save the cr4 feature set we're using (ie
* Pentium 4MB enable and PPro Global page
* enable), so that any CPU's that boot up
* after us can get the correct flags.
*/
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;
 
static inline void set_in_cr4(unsigned long mask)
{
unsigned long cr4;
 
mmu_cr4_features |= mask;
if (trampoline_cr4_features)
*trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 |= mask;
write_cr4(cr4);
}
 
static inline void clear_in_cr4(unsigned long mask)
{
unsigned long cr4;
 
mmu_cr4_features &= ~mask;
if (trampoline_cr4_features)
*trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 &= ~mask;
write_cr4(cr4);
}
 
typedef struct {
unsigned long seg;
} mm_segment_t;
686,12 → 546,12
}
 
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
static __always_inline void rep_nop(void)
{
asm volatile("rep; nop" ::: "memory");
}
 
static inline void cpu_relax(void)
static __always_inline void cpu_relax(void)
{
rep_nop();
}
775,14 → 635,6
 
extern void set_task_blockstep(struct task_struct *task, bool on);
 
/*
* from system description table in BIOS. Mostly for MCA use, but
* others may find it useful:
*/
extern unsigned int machine_id;
extern unsigned int machine_submodel_id;
extern unsigned int BIOS_revision;
 
/* Boot loader type from the setup header: */
extern int bootloader_type;
extern int bootloader_version;
794,10 → 646,10
#define ARCH_HAS_SPINLOCK_PREFETCH
 
#ifdef CONFIG_X86_32
# define BASE_PREFETCH ASM_NOP4
# define BASE_PREFETCH ""
# define ARCH_HAS_PREFETCH
#else
# define BASE_PREFETCH "prefetcht0 (%1)"
# define BASE_PREFETCH "prefetcht0 %P1"
#endif
 
/*
832,6 → 684,9
prefetchw(x);
}
 
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
TOP_OF_KERNEL_STACK_PADDING)
 
#ifdef CONFIG_X86_32
/*
* User space process size: 3GB (default).
842,39 → 697,15
#define STACK_TOP_MAX STACK_TOP
 
#define INIT_THREAD { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \
.vm86_info = NULL, \
.sp0 = TOP_OF_INIT_STACK, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
}
 
/*
* Note that the .io_bitmap member must be extra-big. This is because
* the CPU will access an additional byte beyond the end of the IO
* permission bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
#define INIT_TSS { \
.x86_tss = { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \
.ss0 = __KERNEL_DS, \
.ss1 = __KERNEL_CS, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
}, \
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
}
 
extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
#define KSTK_TOP(info) \
({ \
unsigned long *__ptr = (unsigned long *)(info); \
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
})
 
/*
* The below -8 is to reserve 8 bytes on top of the ring0 stack.
* TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
* This is necessary to guarantee that the entire "struct pt_regs"
* is accessible even if the CPU haven't stored the SS/ESP registers
* on the stack (interrupt gate does not save these registers
885,9 → 716,9
*/
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
__regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
__regs__ - 1; \
unsigned long __ptr = (unsigned long)task_stack_page(task); \
__ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
((struct pt_regs *)__ptr) - 1; \
})
 
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
919,13 → 750,9
#define STACK_TOP_MAX TASK_SIZE_MAX
 
#define INIT_THREAD { \
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
.sp0 = TOP_OF_INIT_STACK \
}
 
#define INIT_TSS { \
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
}
 
/*
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
935,11 → 762,6
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
 
/*
* User space RSP while inside the SYSCALL fast path
*/
DECLARE_PER_CPU(unsigned long, old_rsp);
 
#endif /* CONFIG_X86_64 */
 
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
961,18 → 783,18
extern int set_tsc_mode(unsigned int val);
 
/* Register/unregister a process' MPX related resource */
#define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk))
#define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk))
#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
 
#ifdef CONFIG_X86_INTEL_MPX
extern int mpx_enable_management(struct task_struct *tsk);
extern int mpx_disable_management(struct task_struct *tsk);
extern int mpx_enable_management(void);
extern int mpx_disable_management(void);
#else
static inline int mpx_enable_management(struct task_struct *tsk)
static inline int mpx_enable_management(void)
{
return -EINVAL;
}
static inline int mpx_disable_management(struct task_struct *tsk)
static inline int mpx_disable_management(void)
{
return -EINVAL;
}
979,6 → 801,7
#endif /* CONFIG_X86_INTEL_MPX */
 
extern u16 amd_get_nb_id(int cpu);
extern u32 amd_get_nodes_per_socket(void);
 
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
/drivers/include/asm/sigcontext.h
1,79 → 1,8
#ifndef _ASM_X86_SIGCONTEXT_H
#define _ASM_X86_SIGCONTEXT_H
 
/* This is a legacy header - all kernel code includes <uapi/asm/sigcontext.h> directly. */
 
#include <uapi/asm/sigcontext.h>
 
#ifdef __i386__
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long di;
unsigned long si;
unsigned long bp;
unsigned long sp;
unsigned long bx;
unsigned long dx;
unsigned long cx;
unsigned long ax;
unsigned long trapno;
unsigned long err;
unsigned long ip;
unsigned short cs, __csh;
unsigned long flags;
unsigned long sp_at_signal;
unsigned short ss, __ssh;
 
/*
* fpstate is really (struct _fpstate *) or (struct _xstate *)
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
* of extended memory layout. See comments at the definition of
* (struct _fpx_sw_bytes)
*/
void __user *fpstate; /* zero when no FPU/extended context */
unsigned long oldmask;
unsigned long cr2;
};
#else /* __i386__ */
struct sigcontext {
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
unsigned long di;
unsigned long si;
unsigned long bp;
unsigned long bx;
unsigned long dx;
unsigned long ax;
unsigned long cx;
unsigned long sp;
unsigned long ip;
unsigned long flags;
unsigned short cs;
unsigned short gs;
unsigned short fs;
unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;
unsigned long cr2;
 
/*
* fpstate is really (struct _fpstate *) or (struct _xstate *)
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
* of extended memory layout. See comments at the definition of
* (struct _fpx_sw_bytes)
*/
void __user *fpstate; /* zero when no FPU/extended context */
unsigned long reserved1[8];
};
#endif /* !__i386__ */
#endif /* _ASM_X86_SIGCONTEXT_H */
/drivers/include/asm/special_insns.h
137,17 → 137,17
native_write_cr3(x);
}
 
static inline unsigned long read_cr4(void)
static inline unsigned long __read_cr4(void)
{
return native_read_cr4();
}
 
static inline unsigned long read_cr4_safe(void)
static inline unsigned long __read_cr4_safe(void)
{
return native_read_cr4_safe();
}
 
static inline void write_cr4(unsigned long x)
static inline void __write_cr4(unsigned long x)
{
native_write_cr4(x);
}
/drivers/include/asm/x86_init.h
1,7 → 1,6
#ifndef _ASM_X86_PLATFORM_H
#define _ASM_X86_PLATFORM_H
 
#include <asm/pgtable_types.h>
//#include <asm/bootparam.h>
 
struct mpc_bus;
171,38 → 170,17
};
 
struct pci_dev;
struct msi_msg;
 
struct x86_msi_ops {
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
unsigned int dest, struct msi_msg *msg,
u8 hpet_id);
void (*teardown_msi_irq)(unsigned int irq);
void (*teardown_msi_irqs)(struct pci_dev *dev);
void (*restore_msi_irqs)(struct pci_dev *dev);
int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
};
 
struct IO_APIC_route_entry;
struct io_apic_irq_attr;
struct irq_data;
struct cpumask;
 
struct x86_io_apic_ops {
void (*init) (void);
unsigned int (*read) (unsigned int apic, unsigned int reg);
void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
void (*disable)(void);
void (*print_entries)(unsigned int apic, unsigned int nr_entries);
int (*set_affinity)(struct irq_data *data,
const struct cpumask *mask,
bool force);
int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
unsigned int destination, int vector,
struct io_apic_irq_attr *attr);
void (*eoi_ioapic_pin)(int apic, int pin, int vector);
};
 
extern struct x86_init_ops x86_init;
/drivers/include/asm-generic/atomic-long.h
23,236 → 23,168
typedef atomic64_t atomic_long_t;
 
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
#define ATOMIC_LONG_PFX(x) atomic64 ## x
 
static inline long atomic_long_read(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
#else
 
return (long)atomic64_read(v);
}
typedef atomic_t atomic_long_t;
 
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic64_t *v = (atomic64_t *)l;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
#define ATOMIC_LONG_PFX(x) atomic ## x
 
atomic64_set(v, i);
}
#endif
 
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
atomic64_inc(v);
#define ATOMIC_LONG_READ_OP(mo) \
static inline long atomic_long_read##mo(const atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_read##mo)(v); \
}
ATOMIC_LONG_READ_OP()
ATOMIC_LONG_READ_OP(_acquire)
 
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
#undef ATOMIC_LONG_READ_OP
 
atomic64_dec(v);
#define ATOMIC_LONG_SET_OP(mo) \
static inline void atomic_long_set##mo(atomic_long_t *l, long i) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
ATOMIC_LONG_PFX(_set##mo)(v, i); \
}
ATOMIC_LONG_SET_OP()
ATOMIC_LONG_SET_OP(_release)
 
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
#undef ATOMIC_LONG_SET_OP
 
atomic64_add(i, v);
#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \
static inline long \
atomic_long_##op##_return##mo(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \
}
ATOMIC_LONG_ADD_SUB_OP(add,)
ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
ATOMIC_LONG_ADD_SUB_OP(add, _release)
ATOMIC_LONG_ADD_SUB_OP(sub,)
ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
ATOMIC_LONG_ADD_SUB_OP(sub, _release)
 
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
#undef ATOMIC_LONG_ADD_SUB_OP
 
atomic64_sub(i, v);
}
#define atomic_long_cmpxchg_relaxed(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
(old), (new)))
#define atomic_long_cmpxchg_acquire(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
(old), (new)))
#define atomic_long_cmpxchg_release(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
(old), (new)))
#define atomic_long_cmpxchg(l, old, new) \
(ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
 
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_sub_and_test(i, v);
}
 
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_dec_and_test(v);
}
 
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_inc_and_test(v);
}
 
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return atomic64_add_negative(i, v);
}
 
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_add_return(i, v);
}
 
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_sub_return(i, v);
}
 
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_inc_return(v);
}
 
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_dec_return(v);
}
 
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic64_t *v = (atomic64_t *)l;
 
return (long)atomic64_add_unless(v, a, u);
}
 
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
 
#define atomic_long_cmpxchg(l, old, new) \
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
#define atomic_long_xchg_relaxed(v, new) \
(ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
#define atomic_long_xchg_acquire(v, new) \
(ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
#define atomic_long_xchg_release(v, new) \
(ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
#define atomic_long_xchg(v, new) \
(atomic64_xchg((atomic64_t *)(v), (new)))
(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
 
#else /* BITS_PER_LONG == 64 */
 
typedef atomic_t atomic_long_t;
 
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_read(v);
}
 
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic_t *v = (atomic_t *)l;
 
atomic_set(v, i);
}
 
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
atomic_inc(v);
ATOMIC_LONG_PFX(_inc)(v);
}
 
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
atomic_dec(v);
ATOMIC_LONG_PFX(_dec)(v);
}
 
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
atomic_add(i, v);
#define ATOMIC_LONG_OP(op) \
static inline void \
atomic_long_##op(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
ATOMIC_LONG_PFX(_##op)(i, v); \
}
 
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
ATOMIC_LONG_OP(add)
ATOMIC_LONG_OP(sub)
ATOMIC_LONG_OP(and)
ATOMIC_LONG_OP(or)
ATOMIC_LONG_OP(xor)
ATOMIC_LONG_OP(andnot)
 
atomic_sub(i, v);
}
#undef ATOMIC_LONG_OP
 
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
return atomic_sub_and_test(i, v);
return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
}
 
static inline int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
return atomic_dec_and_test(v);
return ATOMIC_LONG_PFX(_dec_and_test)(v);
}
 
static inline int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
return atomic_inc_and_test(v);
return ATOMIC_LONG_PFX(_inc_and_test)(v);
}
 
static inline int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
return atomic_add_negative(i, v);
return ATOMIC_LONG_PFX(_add_negative)(i, v);
}
 
static inline long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_add_return(i, v);
#define ATOMIC_LONG_INC_DEC_OP(op, mo) \
static inline long \
atomic_long_##op##_return##mo(atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \
}
ATOMIC_LONG_INC_DEC_OP(inc,)
ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
ATOMIC_LONG_INC_DEC_OP(inc, _release)
ATOMIC_LONG_INC_DEC_OP(dec,)
ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
ATOMIC_LONG_INC_DEC_OP(dec, _release)
 
static inline long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
#undef ATOMIC_LONG_INC_DEC_OP
 
return (long)atomic_sub_return(i, v);
}
 
static inline long atomic_long_inc_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_inc_return(v);
}
 
static inline long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
 
return (long)atomic_dec_return(v);
}
 
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic_t *v = (atomic_t *)l;
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
return (long)atomic_add_unless(v, a, u);
return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
}
 
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
#define atomic_long_inc_not_zero(l) \
ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
 
#define atomic_long_cmpxchg(l, old, new) \
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
(atomic_xchg((atomic_t *)(v), (new)))
 
#endif /* BITS_PER_LONG == 64 */
 
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
/drivers/include/asm-generic/memory_model.h
69,6 → 69,12
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
 
/*
* Convert a physical address to a Page Frame Number and back
*/
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
 
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
 
/drivers/include/ddk.h
15,6 → 15,7
 
#define PG_SW 0x003
#define PG_UW 0x007
#define PG_WRITEC 0x008
#define PG_NOCACHE 0x018
#define PG_SHARED 0x200
 
63,7 → 64,4
u32 drvEntry(int, char *)__asm__("_drvEntry");
 
 
 
 
 
#endif /* DDK_H */
/drivers/include/drm/drmP.h
45,8 → 45,6
#include <linux/mm.h>
 
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/sched.h>
110,6 → 108,12
* PRIME: used in the prime code.
* This is the category used by the DRM_DEBUG_PRIME() macro.
*
* ATOMIC: used in the atomic code.
* This is the category used by the DRM_DEBUG_ATOMIC() macro.
*
* VBL: used for verbose debug message in the vblank code
* This is the category used by the DRM_DEBUG_VBL() macro.
*
* Enabling verbose debug messages is done through the drm.debug parameter,
* each category being enabled by a bit.
*
117,7 → 121,7
* drm.debug=0x2 will enable DRIVER messages
* drm.debug=0x3 will enable CORE and DRIVER messages
* ...
* drm.debug=0xf will enable all messages
* drm.debug=0x3f will enable all messages
*
* An interesting feature is that it's possible to enable verbose logging at
* run-time by echoing the debug value in its sysfs node:
127,6 → 131,8
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08
#define DRM_UT_ATOMIC 0x10
#define DRM_UT_VBL 0x20
 
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
149,6 → 155,8
#define DRIVER_MODESET 0x2000
#define DRIVER_PRIME 0x4000
#define DRIVER_RENDER 0x8000
#define DRIVER_ATOMIC 0x10000
#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
 
/***********************************************************************/
/** \name Macros to make printk easier */
161,7 → 169,7
* \param arg arguments
*/
#define DRM_ERROR(fmt, ...) \
drm_err(fmt, ##__VA_ARGS__)
printk("DRM Error" fmt, ##__VA_ARGS__)
 
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
209,13 → 217,23
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0)
#define DRM_DEBUG_ATOMIC(fmt, args...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0)
#define DRM_DEBUG_VBL(fmt, args...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \
} while (0)
 
#else
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#define DRM_DEBUG_ATOMIC(fmt, args...) do { } while (0)
#define DRM_DEBUG_VBL(fmt, args...) do { } while (0)
#endif
 
/*@}*/
 
/***********************************************************************/
252,7 → 270,6
unsigned int cmd;
int flags;
drm_ioctl_t *func;
unsigned int cmd_drv;
const char *name;
};
 
262,7 → 279,12
*/
 
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
[DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
.cmd = DRM_IOCTL_##ioctl, \
.func = _func, \
.flags = _flags, \
.name = #ioctl \
}
 
/* Event queued up for userspace to read */
struct drm_pending_event {
292,7 → 314,10
* in the plane list
*/
unsigned universal_planes:1;
/* true if client understands atomic properties */
unsigned atomic:1;
struct list_head lhead;
struct drm_minor *minor;
unsigned long lock_count;
 
/** Mapping of mm object handles to object pointers. */
313,6 → 338,10
struct list_head fbs;
struct mutex fbs_lock;
 
/** User-created blob properties; this retains a reference on the
* property. */
struct list_head blobs;
 
wait_queue_head_t event_wait;
struct list_head event_list;
int event_space;
340,8 → 369,7
* @minor: Link back to minor char device we are master for. Immutable.
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
* @magicfree: List of used authentication tokens. Protected by struct_mutex.
* @magic_map: Map of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
* @driver_priv: Pointer to driver-private information.
*/
350,8 → 378,7
struct drm_minor *minor;
char *unique;
int unique_len;
struct drm_open_hash magiclist;
struct list_head magicfree;
struct idr magic_map;
struct drm_lock_data lock;
void *driver_priv;
};
383,7 → 410,7
/**
* get_vblank_counter - get raw hardware vblank counter
* @dev: DRM device
* @crtc: counter to fetch
* @pipe: counter to fetch
*
* Driver callback for fetching a raw hardware vblank counter for @crtc.
* If a device doesn't have a hardware counter, the driver can simply
397,12 → 424,12
* RETURNS
* Raw vblank counter value.
*/
u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
 
/**
* enable_vblank - enable vblank interrupt events
* @dev: DRM device
* @crtc: which irq to enable
* @pipe: which irq to enable
*
* Enable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
412,19 → 439,33
* Zero on success, appropriate errno if the given @crtc's vblank
* interrupt cannot be enabled.
*/
int (*enable_vblank) (struct drm_device *dev, int crtc);
int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
 
/**
* disable_vblank - disable vblank interrupt events
* @dev: DRM device
* @crtc: which irq to enable
* @pipe: which irq to enable
*
* Disable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*/
void (*disable_vblank) (struct drm_device *dev, int crtc);
void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
 
/**
* Called by \c drm_device_is_agp. Typically used to determine if a
* card is really attached to AGP or not.
*
* \param dev DRM device handle
*
* \returns
* One of three values is returned depending on whether or not the
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP
* (return of 1), or may or may not be AGP (return of 2).
*/
int (*device_is_agp) (struct drm_device *dev);
 
/**
* Called by vblank timestamping code.
*
* Return the current display scanout position from a crtc, and an
431,7 → 472,7
* optional accurate ktime_get timestamp of when position was measured.
*
* \param dev DRM device.
* \param crtc Id of the crtc to query.
* \param pipe Id of the crtc to query.
* \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
* \param *vpos Target location for current vertical scanout position.
* \param *hpos Target location for current horizontal scanout position.
439,6 → 480,7
* scanout position query. Can be NULL to skip timestamp.
* \param *etime Target location for timestamp taken immediately after
* scanout position query. Can be NULL to skip timestamp.
* \param mode Current display timings.
*
* Returns vpos as a positive number while in active scanout area.
* Returns vpos as a negative number inside vblank, counting the number
454,10 → 496,10
* but unknown small number of scanlines wrt. real scanout position.
*
*/
int (*get_scanout_position) (struct drm_device *dev, int crtc,
unsigned int flags,
int *vpos, int *hpos, void *stime,
void *etime);
int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
unsigned int flags, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
 
/**
* Called by \c drm_get_last_vbltimestamp. Should return a precise
473,7 → 515,7
* to the OpenML OML_sync_control extension specification.
*
* \param dev dev DRM device handle.
* \param crtc crtc for which timestamp should be returned.
* \param pipe crtc for which timestamp should be returned.
* \param *max_error Maximum allowable timestamp error in nanoseconds.
* Implementation should strive to provide timestamp
* with an error of at most *max_error nanoseconds.
489,7 → 531,7
* negative number on failure. A positive status code on success,
* which describes how the vblank_time timestamp was computed.
*/
int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags);
548,7 → 590,7
struct drm_minor {
int index; /**< Minor device number */
int type; /**< Control or render */
// struct device kdev; /**< Linux device */
struct device *kdev; /**< Linux device */
struct drm_device *dev;
 
struct dentry *debugfs_root;
558,13 → 600,12
 
/* currently active master for this node. Protected by master_mutex */
struct drm_master *master;
struct drm_mode_group mode_group;
};
 
 
struct drm_pending_vblank_event {
struct drm_pending_event base;
int pipe;
unsigned int pipe;
struct drm_event_vblank event;
};
 
571,15 → 612,21
struct drm_vblank_crtc {
struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
struct timer_list disable_timer; /* delayed disable timer */
atomic_t count; /**< number of VBLANK interrupts */
 
/* vblank counter, protected by dev->vblank_time_lock for writes */
u32 count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
 
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
u32 last_wait; /* Last vblank seqno waited per CRTC */
unsigned int inmodeset; /* Display driver is setting mode */
int crtc; /* crtc index */
unsigned int pipe; /* crtc index */
int framedur_ns; /* frame/field duration in ns */
int linedur_ns; /* line duration in ns */
bool enabled; /* so we don't call enable more than
once per disable */
};
598,7 → 645,9
struct device *dev; /**< Device structure of bus-device */
struct drm_driver *driver; /**< DRM driver managing the device */
void *dev_private; /**< DRM driver private data */
struct drm_minor *control; /**< Control node */
struct drm_minor *primary; /**< Primary node */
struct drm_minor *render; /**< Render node */
atomic_t unplugged; /**< Flag whether dev is dead */
/** \name Locks */
/*@{ */
638,8 → 687,6
 
/** \name Context support */
/*@{ */
bool irq_enabled; /**< True if irq handler is enabled */
int irq;
 
__volatile__ long context_flag; /**< Context swapping flag */
int last_context; /**< Last current context */
647,6 → 694,8
 
/** \name VBLANK IRQ support */
/*@{ */
bool irq_enabled;
int irq;
 
/*
* At load time, disabling the vblank interrupt won't be allowed since
734,6 → 783,7
/*@{*/
 
/* Driver support (drm_drv.h) */
extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
745,6 → 795,7
extern ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
 
/* Mapping support (drm_vm.h) */
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
752,6 → 803,8
/* Misc. IOCTL support (drm_ioctl.c) */
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_invalid_op(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
/* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
767,32 → 820,39
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
 
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
 
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
int crtc, int *max_error,
unsigned int pipe, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_crtc *refcrtc,
const struct drm_display_mode *mode);
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
810,8 → 870,8
}
 
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
 
/* Stub support (drm_stub.h) */
extern struct drm_master *drm_master_get(struct drm_master *master);
820,6 → 880,7
extern void drm_put_dev(struct drm_device *dev);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
extern bool drm_atomic;
 
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
/drivers/include/drm/drm_agpsupport.h
12,9 → 12,6
struct drm_device;
struct drm_file;
 
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
defined(MODULE)))
 
struct drm_agp_head {
struct agp_kern_info agp_info;
struct list_head memory;
28,7 → 25,7
unsigned long page_mask;
};
 
#if __OS_HAS_AGP
#if IS_ENABLED(CONFIG_AGP)
 
void drm_free_agp(struct agp_memory * handle, int pages);
int drm_bind_agp(struct agp_memory * handle, unsigned int start);
66,7 → 63,7
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
#else /* __OS_HAS_AGP */
#else /* CONFIG_AGP */
 
static inline void drm_free_agp(struct agp_memory * handle, int pages)
{
105,23 → 102,11
return -ENODEV;
}
 
static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_release(struct drm_device *dev)
{
return -ENODEV;
}
 
static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_enable(struct drm_device *dev,
struct drm_agp_mode mode)
{
128,12 → 113,6
return -ENODEV;
}
 
static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_info(struct drm_device *dev,
struct drm_agp_info *info)
{
140,12 → 119,6
return -ENODEV;
}
 
static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_alloc(struct drm_device *dev,
struct drm_agp_buffer *request)
{
152,12 → 125,6
return -ENODEV;
}
 
static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_free(struct drm_device *dev,
struct drm_agp_buffer *request)
{
164,12 → 131,6
return -ENODEV;
}
 
static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_unbind(struct drm_device *dev,
struct drm_agp_binding *request)
{
176,12 → 137,6
return -ENODEV;
}
 
static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
 
static inline int drm_agp_bind(struct drm_device *dev,
struct drm_agp_binding *request)
{
188,12 → 143,6
return -ENODEV;
}
 
static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return -ENODEV;
}
#endif /* CONFIG_AGP */
 
#endif /* __OS_HAS_AGP */
 
#endif /* _DRM_AGPSUPPORT_H_ */
/drivers/include/drm/drm_atomic.h
35,19 → 35,89
void drm_atomic_state_clear(struct drm_atomic_state *state);
void drm_atomic_state_free(struct drm_atomic_state *state);
 
int __must_check
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
void drm_atomic_state_default_clear(struct drm_atomic_state *state);
void drm_atomic_state_default_release(struct drm_atomic_state *state);
 
struct drm_crtc_state * __must_check
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct drm_property *property,
uint64_t val);
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val);
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state, struct drm_property *property,
uint64_t val);
 
/**
* drm_atomic_get_existing_crtc_state - get crtc state, if it exists
* @state: global atomic state object
* @crtc: crtc to grab
*
* This function returns the crtc state for the given crtc, or NULL
* if the crtc is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtc_states[drm_crtc_index(crtc)];
}
 
/**
* drm_atomic_get_existing_plane_state - get plane state, if it exists
* @state: global atomic state object
* @plane: plane to grab
*
* This function returns the plane state for the given plane, or NULL
* if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->plane_states[drm_plane_index(plane)];
}
 
/**
* drm_atomic_get_existing_connector_state - get connector state, if it exists
* @state: global atomic state object
* @connector: connector to grab
*
* This function returns the connector state for the given connector,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
 
if (index >= state->num_connector)
return NULL;
 
return state->connector_states[index];
}
 
int __must_check
drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
struct drm_plane *plane, struct drm_crtc *crtc);
drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
struct drm_display_mode *mode);
int __must_check
drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
struct drm_property_blob *blob);
int __must_check
drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
int __must_check
56,6 → 126,10
int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
 
int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc);
62,8 → 136,42
 
void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
 
void
drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
 
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
 
#define for_each_connector_in_state(state, connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (state)->num_connector && \
((connector) = (state)->connectors[__i], \
(connector_state) = (state)->connector_states[__i], 1); \
(__i)++) \
if (connector)
 
#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
for ((__i) = 0; \
(__i) < (state)->dev->mode_config.num_crtc && \
((crtc) = (state)->crtcs[__i], \
(crtc_state) = (state)->crtc_states[__i], 1); \
(__i)++) \
if (crtc_state)
 
#define for_each_plane_in_state(state, plane, plane_state, __i) \
for ((__i) = 0; \
(__i) < (state)->dev->mode_config.num_total_plane && \
((plane) = (state)->planes[__i], \
(plane_state) = (state)->plane_states[__i], 1); \
(__i)++) \
if (plane_state)
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
{
return state->mode_changed || state->active_changed ||
state->connectors_changed;
}
 
 
#endif /* DRM_ATOMIC_H_ */
/drivers/include/drm/drm_atomic_helper.h
30,6 → 30,12
 
#include <drm/drm_crtc.h>
 
struct drm_atomic_state;
 
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
39,17 → 45,23
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state);
 
void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
void
drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
struct drm_atomic_state *old_state);
 
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state);
 
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_planes(struct drm_device *dev,
struct drm_atomic_state *state);
struct drm_atomic_state *state,
bool active_only);
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state);
void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
 
void drm_atomic_helper_swap_state(struct drm_device *dev,
struct drm_atomic_state *state);
63,7 → 75,11
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
int drm_atomic_helper_disable_plane(struct drm_plane *plane);
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_plane_state *plane_state);
int drm_atomic_helper_set_config(struct drm_mode_set *set);
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state);
 
int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property,
78,23 → 94,42
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags);
int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
int mode);
 
/* default implementations for state handling */
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
struct drm_crtc_state *
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
 
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
struct drm_plane_state *state);
struct drm_plane_state *
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
 
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state);
struct drm_connector_state *
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void
__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
 
123,4 → 158,41
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
 
/*
* drm_atomic_plane_disabling - check whether a plane is being disabled
* @plane: plane object
* @old_state: previous atomic state
*
* Checks the atomic state of a plane to determine whether it's being disabled
* or not. This also WARNs if it detects an invalid state (both CRTC and FB
* need to either both be NULL or both be non-NULL).
*
* RETURNS:
* True if the plane is being disabled, false otherwise.
*/
static inline bool
drm_atomic_plane_disabling(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
/*
* When disabling a plane, CRTC and FB should always be NULL together.
* Anything else should be considered a bug in the atomic core, so we
* gently warn about it.
*/
WARN_ON((plane->state->crtc == NULL && plane->state->fb != NULL) ||
(plane->state->crtc != NULL && plane->state->fb == NULL));
 
/*
* When using the transitional helpers, old_state may be NULL. If so,
* we know nothing about the current state and have to assume that it
* might be enabled.
*
* When using the atomic helpers, old_state won't be NULL. Therefore
* this check assumes that either the driver will have reconstructed
* the correct state in ->reset() or that the driver will have taken
* appropriate measures to disable all planes.
*/
return (!old_state || old_state->crtc) && !plane->state->crtc;
}
 
#endif /* DRM_ATOMIC_HELPER_H_ */
/drivers/include/drm/drm_crtc.h
31,6 → 31,7
#include <linux/idr.h>
#include <linux/fb.h>
#include <linux/hdmi.h>
#include <linux/media-bus-format.h>
#include <uapi/drm/drm_mode.h>
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
52,7 → 53,6
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
#define DRM_MODE_OBJECT_ANY 0
 
struct drm_mode_object {
63,8 → 63,16
 
#define DRM_OBJECT_MAX_PROPERTY 24
struct drm_object_properties {
int count;
uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
int count, atomic_count;
/* NOTE: if we ever start dynamically destroying properties (ie.
* not at drm_mode_config_cleanup() time), then we'd have to do
* a better job of detaching property from mode objects to avoid
* dangling property pointers:
*/
struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
/* do not read/write values directly, but use drm_object_property_get_value()
* and drm_object_property_set_value():
*/
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
 
78,10 → 86,12
}
 
/* rotation property bits */
#define DRM_ROTATE_MASK 0x0f
#define DRM_ROTATE_0 0
#define DRM_ROTATE_90 1
#define DRM_ROTATE_180 2
#define DRM_ROTATE_270 3
#define DRM_REFLECT_MASK (~DRM_ROTATE_MASK)
#define DRM_REFLECT_X 4
#define DRM_REFLECT_Y 5
 
131,6 → 141,9
enum subpixel_order subpixel_order;
u32 color_formats;
 
const u32 *bus_formats;
unsigned int num_bus_formats;
 
/* Mask of supported hdmi deep color modes */
u8 edid_hdmi_dc_modes;
 
190,6 → 203,7
const struct drm_framebuffer_funcs *funcs;
unsigned int pitches[4];
unsigned int offsets[4];
uint64_t modifier[4];
unsigned int width;
unsigned int height;
/* depth can be 15 or 16 */
198,13 → 212,14
int flags;
uint32_t pixel_format; /* fourcc format */
struct list_head filp_head;
/* if you are using the helper */
void *helper_private;
};
 
struct drm_property_blob {
struct drm_mode_object base;
struct list_head head;
struct drm_device *dev;
struct kref refcount;
struct list_head head_global;
struct list_head head_file;
size_t length;
unsigned char data[];
};
237,24 → 252,39
 
/**
* struct drm_crtc_state - mutable CRTC state
* @crtc: backpointer to the CRTC
* @enable: whether the CRTC should be enabled, gates all other state
* @mode_changed: for use by helpers and drivers when computing state updates
* @active: whether the CRTC is actively displaying (used for DPMS)
* @planes_changed: planes on this crtc are updated
* @mode_changed: crtc_state->mode or crtc_state->enable has been changed
* @active_changed: crtc_state->active has been toggled.
* @connectors_changed: connectors to this crtc have been updated
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
* @planes_changed: for use by helpers and drivers when computing state updates
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
* @event: optional pointer to a DRM event to signal upon completion of the
* state update
* @state: backpointer to global drm_atomic_state
*
* Note that the distinction between @enable and @active is rather subtile:
* Flipping @active while @enable is set without changing anything else may
* never return in a failure from the ->atomic_check callback. Userspace assumes
* that a DPMS On will always succeed. In other words: @enable controls resource
* assignment, @active controls the actual hardware state.
*/
struct drm_crtc_state {
struct drm_crtc *crtc;
 
bool enable;
bool active;
 
/* computed state bits used by helpers and drivers */
bool planes_changed : 1;
bool mode_changed : 1;
bool active_changed : 1;
bool connectors_changed : 1;
 
/* attached planes bitmask:
* WARNING: transitional helpers do not maintain plane_mask so
271,6 → 301,9
 
struct drm_display_mode mode;
 
/* blob property to expose current mode to atomic userspace */
struct drm_property_blob *mode_blob;
 
struct drm_pending_vblank_event *event;
 
struct drm_atomic_state *state;
292,6 → 325,9
* @atomic_duplicate_state: duplicate the atomic state for this CRTC
* @atomic_destroy_state: destroy an atomic state for this CRTC
* @atomic_set_property: set a property on an atomic state for this CRTC
* (do not call directly, use drm_atomic_crtc_set_property())
* @atomic_get_property: get a property on an atomic state for this CRTC
* (do not call directly, use drm_atomic_crtc_get_property())
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
351,6 → 387,10
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val);
int (*atomic_get_property)(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
uint64_t *val);
};
 
/**
367,17 → 407,11
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
* @invert_dimensions: for purposes of error checking crtc vs fb sizes,
* invert the width/height of the crtc. This is used if the driver
* is performing 90 or 270 degree rotated scanout
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
* @gamma_size: size of gamma ramp
* @gamma_store: gamma ramp values
* @framedur_ns: precise frame timing
* @linedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
* @state: current atomic state for this CRTC
421,8 → 455,6
*/
struct drm_display_mode hwmode;
 
bool invert_dimensions;
 
int x, y;
const struct drm_crtc_funcs *funcs;
 
430,11 → 462,8
uint32_t gamma_size;
uint16_t *gamma_store;
 
/* Constants needed for precise vblank and swap timestamping. */
int framedur_ns, linedur_ns, pixeldur_ns;
 
/* if you are using the helper */
void *helper_private;
const void *helper_private;
 
struct drm_object_properties properties;
 
449,11 → 478,14
 
/**
* struct drm_connector_state - mutable connector state
* @connector: backpointer to the connector
* @crtc: CRTC to connect connector to, NULL if disabled
* @best_encoder: can be used by helpers and drivers to select the encoder
* @state: backpointer to global drm_atomic_state
*/
struct drm_connector_state {
struct drm_connector *connector;
 
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
 
struct drm_encoder *best_encoder;
463,7 → 495,7
 
/**
* struct drm_connector_funcs - control connectors on a given device
* @dpms: set power state (see drm_crtc_funcs above)
* @dpms: set power state
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidated (e.g. resume)
475,6 → 507,9
* @atomic_duplicate_state: duplicate the atomic state for this connector
* @atomic_destroy_state: destroy an atomic state for this connector
* @atomic_set_property: set a property on an atomic state for this connector
* (do not call directly, use drm_atomic_connector_set_property())
* @atomic_get_property: get a property on an atomic state for this connector
* (do not call directly, use drm_atomic_connector_get_property())
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
481,7 → 516,7
* etc.
*/
struct drm_connector_funcs {
void (*dpms)(struct drm_connector *connector, int mode);
int (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
void (*reset)(struct drm_connector *connector);
508,6 → 543,10
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
int (*atomic_get_property)(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
};
 
/**
554,7 → 593,7
struct drm_crtc *crtc;
struct drm_bridge *bridge;
const struct drm_encoder_funcs *funcs;
void *helper_private;
const void *helper_private;
};
 
/* should we poll this connector for connects and disconnects */
605,6 → 644,7
* @audio_latency: audio latency info from ELD, if found
* @null_edid_counter: track sinks that give us all zeros for the EDID
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum
* @edid_corrupt: indicates whether the last read EDID was corrupt
* @debugfs_entry: debugfs directory for this connector
* @state: current atomic state for this connector
* @has_tile: is this connector connected to a tiled monitor
658,7 → 698,7
/* requested DPMS state */
int dpms;
 
void *helper_private;
const void *helper_private;
 
/* forced on connector */
struct drm_cmdline_mode cmdline_mode;
677,6 → 717,11
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
unsigned bad_edid_counter;
 
/* Flag for raw EDID header corruption - used in Displayport
* compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
*/
bool edid_corrupt;
 
struct dentry *debugfs_entry;
 
struct drm_connector_state *state;
693,6 → 738,7
 
/**
* struct drm_plane_state - mutable plane state
* @plane: backpointer to the plane
* @crtc: currently bound CRTC, NULL if disabled
* @fb: currently bound framebuffer
* @fence: optional fence to wait for before scanning out @fb
709,6 → 755,8
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
struct drm_plane *plane;
 
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
struct fence *fence;
721,6 → 769,9
uint32_t src_x, src_y;
uint32_t src_h, src_w;
 
/* Plane rotation */
unsigned int rotation;
 
struct drm_atomic_state *state;
};
 
735,6 → 786,9
* @atomic_duplicate_state: duplicate the atomic state for this plane
* @atomic_destroy_state: destroy an atomic state for this plane
* @atomic_set_property: set a property on an atomic state for this plane
* (do not call directly, use drm_atomic_plane_set_property())
* @atomic_get_property: get a property on an atomic state for this plane
* (do not call directly, use drm_atomic_plane_get_property())
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
758,6 → 812,10
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
int (*atomic_get_property)(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val);
};
 
enum drm_plane_type {
774,6 → 832,7
* @possible_crtcs: pipes this plane can be bound to
* @format_types: array of formats supported by this plane
* @format_count: number of formats supported
* @format_default: driver hasn't supplied supported formats for the plane
* @crtc: currently bound CRTC
* @fb: currently bound fb
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
793,7 → 852,8
 
uint32_t possible_crtcs;
uint32_t *format_types;
uint32_t format_count;
unsigned int format_count;
bool format_default;
 
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
806,7 → 866,7
 
enum drm_plane_type type;
 
void *helper_private;
const void *helper_private;
 
struct drm_plane_state *state;
};
813,6 → 873,7
 
/**
* struct drm_bridge_funcs - drm_bridge control functions
* @attach: Called during drm_bridge_attach
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
* @disable: Called right before encoder prepare, disables the bridge
* @post_disable: Called right after encoder prepare, for lockstepped disable
819,9 → 880,9
* @mode_set: Set this mode to the bridge
* @pre_enable: Called right before encoder commit, for lockstepped commit
* @enable: Called right after encoder commit, enables the bridge
* @destroy: make object go away
*/
struct drm_bridge_funcs {
int (*attach)(struct drm_bridge *bridge);
bool (*mode_fixup)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
832,31 → 893,36
struct drm_display_mode *adjusted_mode);
void (*pre_enable)(struct drm_bridge *bridge);
void (*enable)(struct drm_bridge *bridge);
void (*destroy)(struct drm_bridge *bridge);
};
 
/**
* struct drm_bridge - central DRM bridge control structure
* @dev: DRM device this bridge belongs to
* @head: list management
* @base: base mode object
* @encoder: encoder to which this bridge is connected
* @next: the next bridge in the encoder chain
* @of_node: device node pointer to the bridge
* @list: to keep track of all added bridges
* @funcs: control functions
* @driver_private: pointer to the bridge driver's internal context
*/
struct drm_bridge {
struct drm_device *dev;
struct list_head head;
struct drm_encoder *encoder;
struct drm_bridge *next;
#ifdef CONFIG_OF
struct device_node *of_node;
#endif
struct list_head list;
 
struct drm_mode_object base;
 
const struct drm_bridge_funcs *funcs;
void *driver_private;
};
 
/**
* struct struct drm_atomic_state - the global state object for atomic updates
* struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
* @flags: state flags like async update
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
868,7 → 934,8
*/
struct drm_atomic_state {
struct drm_device *dev;
uint32_t flags;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
struct drm_plane **planes;
struct drm_plane_state **plane_states;
struct drm_crtc **crtcs;
912,9 → 979,12
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
* @atomic_check: check whether a give atomic state update is possible
* @atomic_check: check whether a given atomic state update is possible
* @atomic_commit: commit an atomic state update previously verified with
* atomic_check()
* @atomic_state_alloc: allocate a new atomic state
* @atomic_state_clear: clear the atomic state
* @atomic_state_free: free the atomic state
*
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
* involve drivers.
930,33 → 1000,12
int (*atomic_commit)(struct drm_device *dev,
struct drm_atomic_state *a,
bool async);
struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
void (*atomic_state_clear)(struct drm_atomic_state *state);
void (*atomic_state_free)(struct drm_atomic_state *state);
};
 
/**
* struct drm_mode_group - group of mode setting resources for potential sub-grouping
* @num_crtcs: CRTC count
* @num_encoders: encoder count
* @num_connectors: connector count
* @num_bridges: bridge count
* @id_list: list of KMS object IDs in this group
*
* Currently this simply tracks the global mode setting state. But in the
* future it could allow groups of objects to be set aside into independent
* control groups for use by different user level processes (e.g. two X servers
* running simultaneously on different heads, each with their own mode
* configuration and freedom of mode setting).
*/
struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
uint32_t num_connectors;
uint32_t num_bridges;
 
/* list of object IDs for this group */
uint32_t *id_list;
};
 
/**
* struct drm_mode_config - Mode configuration control structure
* @mutex: mutex protecting KMS related lists and structures
* @connection_mutex: ww mutex protecting connector state and routing
969,8 → 1018,6
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
* @connector_list: list of connector objects
* @num_bridge: number of bridges on this device
* @bridge_list: list of bridge objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
* @num_overlay_plane: number of overlay planes on this device
989,6 → 1036,7
* @poll_running: track polling status for this device
* @output_poll_work: delayed work for polling in process context
* @property_blob_list: list of all the blob property objects
* @blob_lock: mutex for blob property allocation and management
* @*_property: core property tracking
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
1015,8 → 1063,6
 
int num_connector;
struct list_head connector_list;
int num_bridge;
struct list_head bridge_list;
int num_encoder;
struct list_head encoder_list;
 
1043,8 → 1089,11
/* output poll support */
bool poll_enabled;
bool poll_running;
bool delayed_event;
struct delayed_work output_poll_work;
 
struct mutex blob_lock;
 
/* pointers to standard properties */
struct list_head property_blob_list;
struct drm_property *edid_property;
1053,6 → 1102,18
struct drm_property *tile_property;
struct drm_property *plane_type_property;
struct drm_property *rotation_property;
struct drm_property *prop_src_x;
struct drm_property *prop_src_y;
struct drm_property *prop_src_w;
struct drm_property *prop_src_h;
struct drm_property *prop_crtc_x;
struct drm_property *prop_crtc_y;
struct drm_property *prop_crtc_w;
struct drm_property *prop_crtc_h;
struct drm_property *prop_fb_id;
struct drm_property *prop_crtc_id;
struct drm_property *prop_active;
struct drm_property *prop_mode_id;
 
/* DVI-I properties */
struct drm_property *dvi_i_subconnector_property;
1088,6 → 1149,9
/* whether async page flip is supported or not */
bool async_page_flip;
 
/* whether the driver supports fb modifiers */
bool allow_fb_modifiers;
 
/* cursor size */
uint32_t cursor_width, cursor_height;
};
1153,10 → 1217,22
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
 
extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
const struct drm_bridge_funcs *funcs);
extern void drm_bridge_cleanup(struct drm_bridge *bridge);
extern int drm_bridge_add(struct drm_bridge *bridge);
extern void drm_bridge_remove(struct drm_bridge *bridge);
extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
 
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void drm_bridge_disable(struct drm_bridge *bridge);
void drm_bridge_post_disable(struct drm_bridge *bridge);
void drm_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void drm_bridge_pre_enable(struct drm_bridge *bridge);
void drm_bridge_enable(struct drm_bridge *bridge);
 
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
1180,17 → 1256,22
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
uint32_t format_count,
unsigned int format_count,
enum drm_plane_type type);
extern int drm_plane_init(struct drm_device *dev,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, uint32_t format_count,
const uint32_t *formats, unsigned int format_count,
bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane);
extern unsigned int drm_plane_index(struct drm_plane *plane);
extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
extern void drm_plane_force_disable(struct drm_plane *plane);
extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
u32 format);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
int x, int y,
const struct drm_display_mode *mode,
1206,9 → 1287,8
extern const char *drm_get_tv_subconnector_name(int val);
extern const char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern void drm_mode_group_destroy(struct drm_mode_group *group);
extern void drm_reinit_primary_mode_group(struct drm_device *dev);
extern void drm_property_destroy_user_blobs(struct drm_device *dev,
struct drm_file *file_priv);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
1224,6 → 1304,10
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
 
extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
const u32 *formats,
unsigned int num_formats);
 
static inline bool drm_property_type_is(struct drm_property *property,
uint32_t type)
{
1279,6 → 1363,15
int64_t min, int64_t max);
struct drm_property *drm_property_create_object(struct drm_device *dev,
int flags, const char *name, uint32_t type);
struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
const char *name);
struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
size_t length,
const void *data);
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id);
struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
void drm_property_unreference_blob(struct drm_property_blob *blob);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
1285,11 → 1378,15
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
extern int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
char *modes[]);
const char * const modes[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
extern bool drm_property_change_valid_get(struct drm_property *property,
uint64_t value, struct drm_mode_object **ref);
extern void drm_property_change_valid_put(struct drm_property *property,
struct drm_mode_object *ref);
 
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
1334,6 → 1431,10
void *data, struct drm_file *file_priv);
extern int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_createblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_destroyblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getencoder(struct drm_device *dev,
1355,7 → 1456,8
int hpref, int vpref);
 
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
bool *edid_corrupt);
extern bool drm_edid_is_valid(struct edid *edid);
 
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
1381,6 → 1483,8
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value);
extern int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
 
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
1436,17 → 1540,46
return mo ? obj_to_property(mo) : NULL;
}
 
static inline struct drm_property_blob *
drm_property_blob_find(struct drm_device *dev, uint32_t id)
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
 
#define drm_for_each_crtc(crtc, dev) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
 
static inline void
assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
{
struct drm_mode_object *mo;
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
return mo ? obj_to_blob(mo) : NULL;
/*
* The connector hotadd/remove code currently grabs both locks when
* updating lists. Hence readers need only hold either of them to be
* safe and the check amounts to
*
* WARN_ON(not_holding(A) && not_holding(B)).
*/
WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
!drm_modeset_is_locked(&mode_config->connection_mutex));
}
 
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, planelist) \
list_for_each_entry(plane, planelist, head) \
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
#define drm_for_each_connector(connector, dev) \
for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \
connector = list_first_entry(&(dev)->mode_config.connector_list, \
struct drm_connector, head); \
&connector->head != (&(dev)->mode_config.connector_list); \
connector = list_next_entry(connector, head))
 
#define drm_for_each_encoder(encoder, dev) \
list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
 
#define drm_for_each_fb(fb, dev) \
for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \
fb = list_first_entry(&(dev)->mode_config.fb_list, \
struct drm_framebuffer, head); \
&fb->head != (&(dev)->mode_config.fb_list); \
fb = list_next_entry(fb, head))
 
#endif /* __DRM_CRTC_H__ */
/drivers/include/drm/drm_crtc_helper.h
39,6 → 39,8
 
#include <linux/fb.h>
 
#include <drm/drm_crtc.h>
 
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
ENTER_ATOMIC_MODE_SET,
45,11 → 47,30
};
 
/**
* drm_crtc_helper_funcs - helper operations for CRTCs
* @mode_fixup: try to fixup proposed mode for this connector
* struct drm_crtc_helper_funcs - helper operations for CRTCs
* @dpms: set power state
* @prepare: prepare the CRTC, called before @mode_set
* @commit: commit changes to CRTC, called after @mode_set
* @mode_fixup: try to fixup proposed mode for this CRTC
* @mode_set: set this mode
* @mode_set_nofb: set mode only (no scanout buffer attached)
* @mode_set_base: update the scanout buffer
* @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
* @load_lut: load color palette
* @disable: disable CRTC when no longer in use
* @enable: enable CRTC
* @atomic_check: check for validity of an atomic state
* @atomic_begin: begin atomic update
* @atomic_flush: flush atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
*
* Note that with atomic helpers @dpms, @prepare and @commit hooks are
* deprecated. Used @enable and @disable instead exclusively.
*
* With legacy crtc helpers there's a big semantic difference between @disable
* and the other hooks: @disable also needs to release any resources acquired in
* @mode_set (like shared PLLs).
*/
struct drm_crtc_helper_funcs {
/*
68,6 → 89,7
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
/* Actually set the mode for atomic helpers, optional */
void (*mode_set_nofb)(struct drm_crtc *crtc);
 
/* Move the crtc on the current fb to the given position *optional* */
80,22 → 102,41
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
 
/* disable crtc when not in use - more explicit than dpms off */
void (*disable)(struct drm_crtc *crtc);
void (*enable)(struct drm_crtc *crtc);
 
/* atomic helpers */
int (*atomic_check)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct drm_crtc *crtc);
void (*atomic_flush)(struct drm_crtc *crtc);
void (*atomic_begin)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
void (*atomic_flush)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
};
 
/**
* drm_encoder_helper_funcs - helper operations for encoders
* struct drm_encoder_helper_funcs - helper operations for encoders
* @dpms: set power state
* @save: save connector state
* @restore: restore connector state
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
* @prepare: part of the disable sequence, called before the CRTC modeset
* @commit: called after the CRTC modeset
* @mode_set: set this mode, optional for atomic helpers
* @get_crtc: return CRTC that the encoder is currently attached to
* @detect: connection status detection
* @disable: disable encoder when not in use (overrides DPMS off)
* @enable: enable encoder
* @atomic_check: check for validity of an atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
*
* Note that with atomic helpers @dpms, @prepare and @commit hooks are
* deprecated. Used @enable and @disable instead exclusively.
*
* With legacy crtc helpers there's a big semantic difference between @disable
* and the other hooks: @disable also needs to release any resources acquired in
* @mode_set (like shared PLLs).
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
114,14 → 155,22
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
/* disable encoder when not in use - more explicit than dpms off */
void (*disable)(struct drm_encoder *encoder);
 
void (*enable)(struct drm_encoder *encoder);
 
/* atomic helpers */
int (*atomic_check)(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
};
 
/**
* drm_connector_helper_funcs - helper operations for connectors
* struct drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
* @mode_valid (optional): is this mode valid on the given connector?
* @mode_valid: is this mode valid on the given connector? (optional)
* @best_encoder: return the preferred encoder for this connector
* @atomic_best_encoder: atomic version of @best_encoder
*
* The helper operations are called by the mid-layer CRTC helper.
*/
130,6 → 179,8
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
struct drm_connector_state *connector_state);
};
 
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
141,7 → 192,7
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
 
151,19 → 202,19
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
crtc->helper_private = (void *)funcs;
crtc->helper_private = funcs;
}
 
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
encoder->helper_private = (void *)funcs;
encoder->helper_private = funcs;
}
 
static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
connector->helper_private = (void *)funcs;
connector->helper_private = funcs;
}
 
extern void drm_helper_resume_force_mode(struct drm_device *dev);
189,5 → 240,6
 
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
 
#endif
/drivers/include/drm/drm_dp_helper.h
42,9 → 42,11
* 1.2 formally includes both eDP and DPI definitions.
*/
 
#define DP_AUX_MAX_PAYLOAD_BYTES 16
 
#define DP_AUX_I2C_WRITE 0x0
#define DP_AUX_I2C_READ 0x1
#define DP_AUX_I2C_STATUS 0x2
#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2
#define DP_AUX_I2C_MOT 0x4
#define DP_AUX_NATIVE_WRITE 0x8
#define DP_AUX_NATIVE_READ 0x9
92,6 → 94,15
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
 
#define DP_RECEIVE_PORT_0_CAP_0 0x008
# define DP_LOCAL_EDID_PRESENT (1 << 1)
# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
 
#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
 
#define DP_RECEIVE_PORT_1_CAP_0 0x00a
#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b
 
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
101,8 → 112,19
# define DP_I2C_SPEED_1M 0x20
 
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0)
# define DP_FRAMING_CHANGE_CAP (1 << 1)
# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
 
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
 
#define DP_ADAPTER_CAP 0x00f /* 1.2 */
# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1)
 
#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */
# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */
 
/* Multiple stream transport */
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
110,10 → 132,56
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
 
#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
 
/* AV_SYNC_DATA_BLOCK 1.2 */
#define DP_AV_GRANULARITY 0x023
# define DP_AG_FACTOR_MASK (0xf << 0)
# define DP_AG_FACTOR_3MS (0 << 0)
# define DP_AG_FACTOR_2MS (1 << 0)
# define DP_AG_FACTOR_1MS (2 << 0)
# define DP_AG_FACTOR_500US (3 << 0)
# define DP_AG_FACTOR_200US (4 << 0)
# define DP_AG_FACTOR_100US (5 << 0)
# define DP_AG_FACTOR_10US (6 << 0)
# define DP_AG_FACTOR_1US (7 << 0)
# define DP_VG_FACTOR_MASK (0xf << 4)
# define DP_VG_FACTOR_3MS (0 << 4)
# define DP_VG_FACTOR_2MS (1 << 4)
# define DP_VG_FACTOR_1MS (2 << 4)
# define DP_VG_FACTOR_500US (3 << 4)
# define DP_VG_FACTOR_200US (4 << 4)
# define DP_VG_FACTOR_100US (5 << 4)
 
#define DP_AUD_DEC_LAT0 0x024
#define DP_AUD_DEC_LAT1 0x025
 
#define DP_AUD_PP_LAT0 0x026
#define DP_AUD_PP_LAT1 0x027
 
#define DP_VID_INTER_LAT 0x028
 
#define DP_VID_PROG_LAT 0x029
 
#define DP_REP_LAT 0x02a
 
#define DP_AUD_DEL_INS0 0x02b
#define DP_AUD_DEL_INS1 0x02c
#define DP_AUD_DEL_INS2 0x02d
/* End of AV_SYNC_DATA_BLOCK */
 
#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
# define DP_ALPM_CAP (1 << 0)
 
#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
 
#define DP_GUID 0x030 /* 1.2 */
 
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
 
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
153,6 → 221,7
 
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
168,11 → 237,12
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
 
# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2)
# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2)
# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2)
# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2)
 
# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
215,17 → 285,63
/* bitmask as for DP_I2C_SPEED_CAP */
 
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0)
# define DP_FRAMING_CHANGE_ENABLE (1 << 1)
# define DP_PANEL_SELF_TEST_ENABLE (1 << 7)
 
#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */
#define DP_LINK_QUAL_LANE1_SET 0x10c
#define DP_LINK_QUAL_LANE2_SET 0x10d
#define DP_LINK_QUAL_LANE3_SET 0x10e
# define DP_LINK_QUAL_PATTERN_DISABLE 0
# define DP_LINK_QUAL_PATTERN_D10_2 1
# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
# define DP_LINK_QUAL_PATTERN_PRBS7 3
# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
# define DP_LINK_QUAL_PATTERN_MASK 7
 
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0)
# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4)
# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
 
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
 
#define DP_AUDIO_DELAY0 0x112 /* 1.2 */
#define DP_AUDIO_DELAY1 0x113
#define DP_AUDIO_DELAY2 0x114
 
#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */
# define DP_LINK_RATE_SET_SHIFT 0
# define DP_LINK_RATE_SET_MASK (7 << 0)
 
#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
# define DP_ALPM_ENABLE (1 << 0)
# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
 
#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
# define DP_IRQ_HPD_ENABLE (1 << 1)
 
#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
# define DP_PWR_NOT_NEEDED (1 << 0)
 
#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
 
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
 
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
304,7 → 420,7
 
#define DP_TEST_SINK_MISC 0x246
# define DP_TEST_CRC_SUPPORTED (1 << 5)
# define DP_TEST_COUNT_MASK 0x7
# define DP_TEST_COUNT_MASK 0xf
 
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
332,6 → 448,49
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
 
#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
# define DP_EDP_11 0x00
# define DP_EDP_12 0x01
# define DP_EDP_13 0x02
# define DP_EDP_14 0x03
 
#define DP_EDP_GENERAL_CAP_1 0x701
 
#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
 
#define DP_EDP_GENERAL_CAP_2 0x703
 
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
 
#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
 
#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
 
#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
 
#define DP_EDP_PWMGEN_BIT_COUNT 0x724
#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725
#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726
 
#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727
 
#define DP_EDP_BACKLIGHT_FREQ_SET 0x728
 
#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a
#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b
#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c
 
#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d
#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e
#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f
 
#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
 
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
 
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
350,6 → 509,7
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */
 
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
363,6 → 523,9
# define DP_PSR_SINK_INTERNAL_ERROR 7
# define DP_PSR_SINK_STATE_MASK 0x07
 
#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
 
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
405,6 → 568,10
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
 
/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
#define DP_MST_PHYSICAL_PORT_0 0
#define DP_MST_LOGICAL_PORT_0 8
 
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
415,6 → 582,7
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
 
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
 
470,6 → 638,13
(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
}
 
static inline bool
drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x12 &&
dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
}
 
/*
* DisplayPort AUX channel
*/
516,9 → 691,12
* An AUX channel can also be used to transport I2C messages to a sink. A
* typical application of that is to access an EDID that's present in the
* sink device. The .transfer() function can also be used to execute such
* transactions. The drm_dp_aux_register_i2c_bus() function registers an
* I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
* should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
* transactions. The drm_dp_aux_register() function registers an I2C
* adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
* should call drm_dp_aux_unregister() to remove the I2C adapter.
* The I2C adapter uses long transfers by default; if a partial response is
* received, the adapter will drop down to the size given by the partial
* response for this transaction only.
*
* Note that the aux helper code assumes that the .transfer() function
* only modifies the reply field of the drm_dp_aux_msg structure. The
586,6 → 764,7
 
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
 
int drm_dp_aux_register(struct drm_dp_aux *aux);
/drivers/include/drm/drm_dp_mst_helper.h
253,6 → 253,7
u8 *bytes;
};
 
#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
struct drm_dp_remote_i2c_read {
u8 num_transactions;
u8 port_number;
262,7 → 263,7
u8 *bytes;
u8 no_stop_bit;
u8 i2c_transaction_delay;
} transactions[4];
} transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
u8 read_i2c_device_id;
u8 num_bytes_read;
};
374,6 → 375,7
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
void (*register_connector)(struct drm_connector *connector);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
463,6 → 465,10
struct work_struct work;
 
struct work_struct tx_work;
 
struct list_head destroy_connector_list;
struct mutex destroy_connector_lock;
struct work_struct destroy_connector_work;
};
 
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
486,7 → 492,9
 
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
 
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
/drivers/include/drm/drm_edid.h
215,6 → 215,8
#define DRM_ELD_VER 0
# define DRM_ELD_VER_SHIFT 3
# define DRM_ELD_VER_MASK (0x1f << 3)
# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
# define DRM_ELD_VER_CANNED (0x1f << 3)
 
#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
 
324,9 → 326,8
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
const struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
int drm_load_edid_firmware(struct drm_connector *connector);
 
int
346,6 → 347,25
}
 
/**
* drm_eld_sad - Get ELD SAD structures.
* @eld: pointer to an eld memory structure with sad_count set
*/
static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
{
unsigned int ver, mnl;
 
ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
if (ver != 2 && ver != 31)
return NULL;
 
mnl = drm_eld_mnl(eld);
if (mnl > 16)
return NULL;
 
return eld + DRM_ELD_CEA_SAD(mnl, 0);
}
 
/**
* drm_eld_sad_count - Get ELD SAD count.
* @eld: pointer to an eld memory structure with sad_count set
*/
/drivers/include/drm/drm_fb_helper.h
44,6 → 44,25
int x, y;
};
 
/**
* struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
* @fb_width: fbdev width
* @fb_height: fbdev height
* @surface_width: scanout buffer width
* @surface_height: scanout buffer height
* @surface_bpp: scanout buffer bpp
* @surface_depth: scanout buffer depth
*
* Note that the scanout surface width/height may be larger than the fbdev
* width/height. In case of multiple displays, the scanout surface is sized
* according to the largest width/height (so it is large enough for all CRTCs
* to scanout). But the fbdev width/height is sized to the minimum width/
* height of all the displays. This ensures that fbcon fits on the smallest
* of the attached displays.
*
* So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height,
* rather than the surface size.
*/
struct drm_fb_helper_surface_size {
u32 fb_width;
u32 fb_height;
85,6 → 104,20
struct drm_connector *connector;
};
 
/**
* struct drm_fb_helper - helper to emulate fbdev on top of kms
* @fb: Scanout framebuffer object
* @dev: DRM device
* @crtc_count: number of possible CRTCs
* @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
* @connector_count: number of connected connectors
* @connector_info_alloc_count: size of connector_info
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
* @kernel_fb_list: list_head in kernel_fb_helper_list
* @delayed_hotplug: was there a hotplug while kms master active?
*/
struct drm_fb_helper {
struct drm_framebuffer *fb;
struct drm_device *dev;
101,8 → 134,20
/* we got a hotplug but fbdev wasn't running the console
delay until next set_par */
bool delayed_hotplug;
 
/**
* @atomic:
*
* Use atomic updates for restore_fbdev_mode(), etc. This defaults to
* true if driver has DRIVER_ATOMIC feature flag, but drivers can
* override it to true after drm_fb_helper_init() if they support atomic
* modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
* does not require ASYNC commits).
*/
bool atomic;
};
 
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
int drm_fb_helper_init(struct drm_device *dev,
116,16 → 161,43
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
 
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
 
struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth);
 
void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
 
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
 
void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect);
void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area);
void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image);
 
void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect);
void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area);
void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image);
 
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
 
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
139,4 → 211,188
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
{
}
 
static inline int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *helper, int crtc_count,
int max_conn)
{
return 0;
}
 
static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
{
}
 
static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
{
return 0;
}
 
static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
return 0;
}
 
static inline int drm_fb_helper_set_par(struct fb_info *info)
{
return 0;
}
 
static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
return 0;
}
 
static inline int
drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
return 0;
}
 
static inline struct fb_info *
drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
{
return NULL;
}
 
static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
{
}
static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
{
}
 
static inline void drm_fb_helper_fill_var(struct fb_info *info,
struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
}
 
static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
}
 
static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
struct fb_info *info)
{
return 0;
}
 
static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
{
}
 
static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
char __user *buf, size_t count,
loff_t *ppos)
{
return -ENODEV;
}
 
static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
const char __user *buf,
size_t count, loff_t *ppos)
{
return -ENODEV;
}
 
static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
}
 
static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
}
 
static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image)
{
}
 
static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
}
 
static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
}
 
static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
}
 
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
int state)
{
}
 
static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
return 0;
}
 
static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
int bpp_sel)
{
return 0;
}
 
static inline int
drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
return 0;
}
 
static inline int drm_fb_helper_debug_enter(struct fb_info *info)
{
return 0;
}
 
static inline int drm_fb_helper_debug_leave(struct fb_info *info)
{
return 0;
}
 
static inline struct drm_display_mode *
drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
int width, int height)
{
return NULL;
}
 
static inline struct drm_display_mode *
drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int width, int height)
{
return NULL;
}
 
static inline int
drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
return 0;
}
 
static inline int
drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
return 0;
}
#endif
#endif
/drivers/include/drm/drm_gem.h
119,13 → 119,6
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
 
/**
* dumb - created as dumb buffer
* Whether the gem object was created using the dumb buffer interface
* as such it may not be used for GPU rendering.
*/
bool dumb;
};
 
void drm_gem_object_release(struct drm_gem_object *obj);
/drivers/include/drm/drm_mipi_dsi.h
0,0 → 1,259
/*
* MIPI DSI Bus
*
* Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
* Andrzej Hajda <a.hajda@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
 
#ifndef __DRM_MIPI_DSI_H__
#define __DRM_MIPI_DSI_H__
 
#include <linux/device.h>
 
struct mipi_dsi_host;
struct mipi_dsi_device;
 
/* request ACK from peripheral */
#define MIPI_DSI_MSG_REQ_ACK BIT(0)
/* use Low Power Mode to transmit message */
#define MIPI_DSI_MSG_USE_LPM BIT(1)
 
/**
* struct mipi_dsi_msg - read/write DSI buffer
* @channel: virtual channel id
* @type: payload data type
* @flags: flags controlling this message transmission
* @tx_len: length of @tx_buf
* @tx_buf: data to be written
* @rx_len: length of @rx_buf
* @rx_buf: data to be read, or NULL
*/
struct mipi_dsi_msg {
u8 channel;
u8 type;
u16 flags;
 
size_t tx_len;
const void *tx_buf;
 
size_t rx_len;
void *rx_buf;
};
 
bool mipi_dsi_packet_format_is_short(u8 type);
bool mipi_dsi_packet_format_is_long(u8 type);
 
/**
* struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format
* @size: size (in bytes) of the packet
* @header: the four bytes that make up the header (Data ID, Word Count or
* Packet Data, and ECC)
* @payload_length: number of bytes in the payload
* @payload: a pointer to a buffer containing the payload, if any
*/
struct mipi_dsi_packet {
size_t size;
u8 header[4];
size_t payload_length;
const u8 *payload;
};
 
int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
const struct mipi_dsi_msg *msg);
 
/**
* struct mipi_dsi_host_ops - DSI bus operations
* @attach: attach DSI device to DSI host
* @detach: detach DSI device from DSI host
* @transfer: transmit a DSI packet
*
* DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg
* structures. This structure contains information about the type of packet
* being transmitted as well as the transmit and receive buffers. When an
* error is encountered during transmission, this function will return a
* negative error code. On success it shall return the number of bytes
* transmitted for write packets or the number of bytes received for read
* packets.
*
* Note that typically DSI packet transmission is atomic, so the .transfer()
* function will seldomly return anything other than the number of bytes
* contained in the transmit buffer on success.
*/
struct mipi_dsi_host_ops {
int (*attach)(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi);
int (*detach)(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi);
ssize_t (*transfer)(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
};
 
/**
* struct mipi_dsi_host - DSI host device
* @dev: driver model device node for this DSI host
* @ops: DSI host operations
*/
struct mipi_dsi_host {
struct device *dev;
const struct mipi_dsi_host_ops *ops;
};
 
int mipi_dsi_host_register(struct mipi_dsi_host *host);
void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
 
/* DSI mode flags */
 
/* video mode */
#define MIPI_DSI_MODE_VIDEO BIT(0)
/* video burst mode */
#define MIPI_DSI_MODE_VIDEO_BURST BIT(1)
/* video pulse mode */
#define MIPI_DSI_MODE_VIDEO_SYNC_PULSE BIT(2)
/* enable auto vertical count mode */
#define MIPI_DSI_MODE_VIDEO_AUTO_VERT BIT(3)
/* enable hsync-end packets in vsync-pulse and v-porch area */
#define MIPI_DSI_MODE_VIDEO_HSE BIT(4)
/* disable hfront-porch area */
#define MIPI_DSI_MODE_VIDEO_HFP BIT(5)
/* disable hback-porch area */
#define MIPI_DSI_MODE_VIDEO_HBP BIT(6)
/* disable hsync-active area */
#define MIPI_DSI_MODE_VIDEO_HSA BIT(7)
/* flush display FIFO on vsync pulse */
#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
/* disable EoT packets in HS mode */
#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
/* transmit data in low power */
#define MIPI_DSI_MODE_LPM BIT(11)
 
enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB888,
MIPI_DSI_FMT_RGB666,
MIPI_DSI_FMT_RGB666_PACKED,
MIPI_DSI_FMT_RGB565,
};
 
/**
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
* @lanes: number of active data lanes
* @mode_flags: DSI operation mode related flags
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
 
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
};
 
static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
{
return container_of(dev, struct mipi_dsi_device, dev);
}
 
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value);
 
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size);
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size);
 
/**
* enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
* @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking
* information only
* @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both
* V-Blanking and H-Blanking information
*/
enum mipi_dsi_dcs_tear_mode {
MIPI_DSI_DCS_TEAR_MODE_VBLANK,
MIPI_DSI_DCS_TEAR_MODE_VHBLANK,
};
 
#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2)
#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3)
#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4)
#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5)
#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6)
 
ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
const void *data, size_t len);
ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
const void *data, size_t len);
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len);
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format);
int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
 
/**
* struct mipi_dsi_driver - DSI driver
* @driver: device driver model driver
* @probe: callback for device binding
* @remove: callback for device unbinding
* @shutdown: called at shutdown time to quiesce the device
*/
struct mipi_dsi_driver {
struct device_driver driver;
int(*probe)(struct mipi_dsi_device *dsi);
int(*remove)(struct mipi_dsi_device *dsi);
void (*shutdown)(struct mipi_dsi_device *dsi);
};
 
static inline struct mipi_dsi_driver *
to_mipi_dsi_driver(struct device_driver *driver)
{
return container_of(driver, struct mipi_dsi_driver, driver);
}
 
static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi)
{
return dev_get_drvdata(&dsi->dev);
}
 
static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
{
dev_set_drvdata(&dsi->dev, data);
}
 
int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver,
struct module *owner);
void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
 
#define mipi_dsi_driver_register(driver) \
mipi_dsi_driver_register_full(driver, THIS_MODULE)
 
#define module_mipi_dsi_driver(__mipi_dsi_driver) \
module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
mipi_dsi_driver_unregister)
 
#endif /* __DRM_MIPI_DSI__ */
/drivers/include/drm/drm_mm.h
68,8 → 68,8
unsigned scanned_preceeds_hole : 1;
unsigned allocated : 1;
unsigned long color;
unsigned long start;
unsigned long size;
u64 start;
u64 size;
struct drm_mm *mm;
};
 
82,16 → 82,16
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned long scan_hit_end;
u64 scan_size;
u64 scan_hit_start;
u64 scan_hit_end;
unsigned scanned_blocks;
unsigned long scan_start;
unsigned long scan_end;
u64 scan_start;
u64 scan_end;
struct drm_mm_node *prev_scanned_node;
 
void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
unsigned long *start, unsigned long *end);
u64 *start, u64 *end);
};
 
/**
124,7 → 124,7
return mm->hole_stack.next;
}
 
static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
return hole_node->start + hole_node->size;
}
140,13 → 140,13
* Returns:
* Start of the subsequent hole.
*/
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
BUG_ON(!hole_node->hole_follows);
return __drm_mm_hole_node_start(hole_node);
}
 
static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return list_entry(hole_node->node_list.next,
struct drm_mm_node, node_list)->start;
163,7 → 163,7
* Returns:
* End of the subsequent hole.
*/
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return __drm_mm_hole_node_end(hole_node);
}
222,7 → 222,7
 
int drm_mm_insert_node_generic(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color,
enum drm_mm_search_flags sflags,
245,7 → 245,7
*/
static inline int drm_mm_insert_node(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
u64 size,
unsigned alignment,
enum drm_mm_search_flags flags)
{
255,11 → 255,11
 
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
u64 start,
u64 end,
enum drm_mm_search_flags sflags,
enum drm_mm_allocator_flags aflags);
/**
282,10 → 282,10
*/
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long start,
unsigned long end,
u64 start,
u64 end,
enum drm_mm_search_flags flags)
{
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
296,21 → 296,21
void drm_mm_remove_node(struct drm_mm_node *node);
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
void drm_mm_init(struct drm_mm *mm,
unsigned long start,
unsigned long size);
u64 start,
u64 size);
void drm_mm_takedown(struct drm_mm *mm);
bool drm_mm_clean(struct drm_mm *mm);
 
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color);
void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long size,
u64 size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end);
u64 start,
u64 end);
bool drm_mm_scan_add_block(struct drm_mm_node *node);
bool drm_mm_scan_remove_block(struct drm_mm_node *node);
 
/drivers/include/drm/drm_modes.h
90,6 → 90,9
 
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
 
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
 
179,6 → 182,10
 
struct drm_display_mode *drm_mode_create(struct drm_device *dev);
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in);
int drm_mode_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in);
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
 
197,6 → 204,8
int GTF_K, int GTF_2J);
void drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode);
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm);
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode,
int index);
217,8 → 226,8
const struct drm_display_mode *mode2);
 
/* for use by the crtc helper probe functions */
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
int maxX, int maxY);
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
/drivers/include/drm/drm_modeset_lock.h
43,7 → 43,7
 
struct ww_acquire_ctx ww_ctx;
 
/**
/*
* Contended lock: if a lock is contended you should only call
* drm_modeset_backoff() which drops locks and slow-locks the
* contended lock.
50,12 → 50,12
*/
struct drm_modeset_lock *contended;
 
/**
/*
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
 
/**
/*
* Trylock mode, use only for panic handlers!
*/
bool trylock_only;
70,12 → 70,12
* Used for locking CRTCs and other modeset resources.
*/
struct drm_modeset_lock {
/**
/*
* modeset lock
*/
struct ww_mutex mutex;
 
/**
/*
* Resources that are locked as part of an atomic update are added
* to a list (so we know what to unlock at the end).
*/
130,7 → 130,6
struct drm_plane;
 
void drm_modeset_lock_all(struct drm_device *dev);
int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
void drm_modeset_unlock_all(struct drm_device *dev);
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
struct drm_plane *plane);
/drivers/include/drm/drm_panel.h
0,0 → 1,145
/*
* Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#ifndef __DRM_PANEL_H__
#define __DRM_PANEL_H__
 
#include <linux/list.h>
 
struct drm_connector;
struct drm_device;
struct drm_panel;
struct display_timing;
 
/**
* struct drm_panel_funcs - perform operations on a given panel
* @disable: disable panel (turn off back light, etc.)
* @unprepare: turn off panel
* @prepare: turn on panel and perform set up
* @enable: enable panel (turn on back light, etc.)
* @get_modes: add modes to the connector that the panel is attached to and
* return the number of modes added
* @get_timings: copy display timings into the provided array and return
* the number of display timings available
*
* The .prepare() function is typically called before the display controller
* starts to transmit video data. Panel drivers can use this to turn the panel
* on and wait for it to become ready. If additional configuration is required
* (via a control bus such as I2C, SPI or DSI for example) this is a good time
* to do that.
*
* After the display controller has started transmitting video data, it's safe
* to call the .enable() function. This will typically enable the backlight to
* make the image on screen visible. Some panels require a certain amount of
* time or frames before the image is displayed. This function is responsible
* for taking this into account before enabling the backlight to avoid visual
* glitches.
*
* Before stopping video transmission from the display controller it can be
* necessary to turn off the panel to avoid visual glitches. This is done in
* the .disable() function. Analogously to .enable() this typically involves
* turning off the backlight and waiting for some time to make sure no image
* is visible on the panel. It is then safe for the display controller to
* cease transmission of video data.
*
* To save power when no video data is transmitted, a driver can power down
* the panel. This is the job of the .unprepare() function.
*/
struct drm_panel_funcs {
int (*disable)(struct drm_panel *panel);
int (*unprepare)(struct drm_panel *panel);
int (*prepare)(struct drm_panel *panel);
int (*enable)(struct drm_panel *panel);
int (*get_modes)(struct drm_panel *panel);
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
};
 
struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
struct device *dev;
 
const struct drm_panel_funcs *funcs;
 
struct list_head list;
};
 
static inline int drm_panel_unprepare(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->unprepare)
return panel->funcs->unprepare(panel);
 
return panel ? -ENOSYS : -EINVAL;
}
 
static inline int drm_panel_disable(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->disable)
return panel->funcs->disable(panel);
 
return panel ? -ENOSYS : -EINVAL;
}
 
static inline int drm_panel_prepare(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->prepare)
return panel->funcs->prepare(panel);
 
return panel ? -ENOSYS : -EINVAL;
}
 
static inline int drm_panel_enable(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->enable)
return panel->funcs->enable(panel);
 
return panel ? -ENOSYS : -EINVAL;
}
 
static inline int drm_panel_get_modes(struct drm_panel *panel)
{
if (panel && panel->funcs && panel->funcs->get_modes)
return panel->funcs->get_modes(panel);
 
return panel ? -ENOSYS : -EINVAL;
}
 
void drm_panel_init(struct drm_panel *panel);
 
int drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
 
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
int drm_panel_detach(struct drm_panel *panel);
 
#ifdef CONFIG_OF
struct drm_panel *of_drm_find_panel(struct device_node *np);
#else
static inline struct drm_panel *of_drm_find_panel(struct device_node *np)
{
return NULL;
}
#endif
 
#endif
/drivers/include/drm/drm_pciids.h
172,6 → 172,7
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
186,6 → 187,7
{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
/drivers/include/drm/drm_plane_helper.h
43,8 → 43,7
* planes.
*/
 
extern int drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
 
/**
52,29 → 51,32
* @prepare_fb: prepare a framebuffer for use by the plane
* @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
* @atomic_check: check that a given atomic state is valid and can be applied
* @atomic_update: apply an atomic state to the plane
* @atomic_update: apply an atomic state to the plane (mandatory)
* @atomic_disable: disable the plane
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_plane_helper_funcs {
int (*prepare_fb)(struct drm_plane *plane,
struct drm_framebuffer *fb);
const struct drm_plane_state *new_state);
void (*cleanup_fb)(struct drm_plane *plane,
struct drm_framebuffer *fb);
const struct drm_plane_state *old_state);
 
int (*atomic_check)(struct drm_plane *plane,
struct drm_plane_state *state);
void (*atomic_update)(struct drm_plane *plane,
struct drm_plane_state *old_state);
void (*atomic_disable)(struct drm_plane *plane,
struct drm_plane_state *old_state);
};
 
static inline void drm_plane_helper_add(struct drm_plane *plane,
const struct drm_plane_helper_funcs *funcs)
{
plane->helper_private = (void *)funcs;
plane->helper_private = funcs;
}
 
extern int drm_plane_helper_check_update(struct drm_plane *plane,
int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_rect *src,
85,7 → 87,7
bool can_position,
bool can_update_disabled,
bool *visible);
extern int drm_primary_helper_update(struct drm_plane *plane,
int drm_primary_helper_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
92,14 → 94,10
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
extern int drm_primary_helper_disable(struct drm_plane *plane);
extern void drm_primary_helper_destroy(struct drm_plane *plane);
int drm_primary_helper_disable(struct drm_plane *plane);
void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
const uint32_t *formats,
int num_formats);
 
 
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
/drivers/include/drm/drm_vma_manager.h
54,9 → 54,6
unsigned long page_offset, unsigned long size);
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
 
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages);
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages);
71,12 → 68,12
struct file *filp);
 
/**
* drm_vma_offset_exact_lookup() - Look up node by exact address
* drm_vma_offset_exact_lookup_locked() - Look up node by exact address
* @mgr: Manager object
* @start: Start address (page-based, not byte-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup() but does not allow any offset into the node.
* Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
* It only returns the exact object with the given start address.
*
* RETURNS:
83,13 → 80,13
* Node at exact start address @start.
*/
static inline struct drm_vma_offset_node *
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node;
 
node = drm_vma_offset_lookup(mgr, start, pages);
node = drm_vma_offset_lookup_locked(mgr, start, pages);
return (node && node->vm_node.start == start) ? node : NULL;
}
 
97,7 → 94,7
* drm_vma_offset_lock_lookup() - Lock lookup for extended private use
* @mgr: Manager object
*
* Lock VMA manager for extended lookups. Only *_locked() VMA function calls
* Lock VMA manager for extended lookups. Only locked VMA function calls
* are allowed while holding this lock. All other contexts are blocked from VMA
* until the lock is released via drm_vma_offset_unlock_lookup().
*
108,13 → 105,6
* not call any other VMA helpers while holding this lock.
*
* Note: You're in atomic-context while holding this lock!
*
* Example:
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
* kref_get_unless_zero(container_of(node, sth, entr));
* drm_vma_offset_unlock_lookup(mgr);
*/
static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
{
/drivers/include/drm/i915_component.h
0,0 → 1,78
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#ifndef _I915_COMPONENT_H_
#define _I915_COMPONENT_H_
 
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
* 5 should be enough as only HSW, BDW, SKL need such fix.
*/
#define MAX_PORTS 5
 
/**
* struct i915_audio_component_ops - callbacks defined in gfx driver
* @owner: the module owner
* @get_power: get the POWER_DOMAIN_AUDIO power well
* @put_power: put the POWER_DOMAIN_AUDIO power well
* @codec_wake_override: Enable/Disable generating the codec wake signal
* @get_cdclk_freq: get the Core Display Clock in KHz
* @sync_audio_rate: set n/cts based on the sample rate
*/
struct i915_audio_component_ops {
struct module *owner;
void (*get_power)(struct device *);
void (*put_power)(struct device *);
void (*codec_wake_override)(struct device *, bool enable);
int (*get_cdclk_freq)(struct device *);
int (*sync_audio_rate)(struct device *, int port, int rate);
};
 
struct i915_audio_component_audio_ops {
void *audio_ptr;
/**
* Call from i915 driver, notifying the HDA driver that
* pin sense and/or ELD information has changed.
* @audio_ptr: HDA driver object
* @port: Which port has changed (PORTA / PORTB / PORTC etc)
*/
void (*pin_eld_notify)(void *audio_ptr, int port);
};
 
/**
* struct i915_audio_component - used for audio video interaction
* @dev: the device from gfx driver
* @aud_sample_rate: the array of audio sample rate per port
* @ops: callback for audio driver calling
* @audio_ops: Call from i915 driver
*/
struct i915_audio_component {
struct device *dev;
int aud_sample_rate[MAX_PORTS];
 
const struct i915_audio_component_ops *ops;
 
const struct i915_audio_component_audio_ops *audio_ops;
};
 
#endif /* _I915_COMPONENT_H_ */
/drivers/include/drm/i915_pciids.h
208,40 → 208,41
#define INTEL_VLV_D_IDS(info) \
INTEL_VGA_DEVICE(0x0155, info)
 
#define _INTEL_BDW_M(gt, id, info) \
INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
#define _INTEL_BDW_D(gt, id, info) \
INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)
 
#define _INTEL_BDW_M_IDS(gt, info) \
_INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
_INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
_INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
_INTEL_BDW_M(gt, 0x160E, info) /* ULX */
 
#define _INTEL_BDW_D_IDS(gt, info) \
_INTEL_BDW_D(gt, 0x160A, info), /* Server */ \
_INTEL_BDW_D(gt, 0x160D, info) /* Workstation */
 
#define INTEL_BDW_GT12M_IDS(info) \
_INTEL_BDW_M_IDS(1, info), \
_INTEL_BDW_M_IDS(2, info)
INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
 
#define INTEL_BDW_GT12D_IDS(info) \
_INTEL_BDW_D_IDS(1, info), \
_INTEL_BDW_D_IDS(2, info)
INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
 
#define INTEL_BDW_GT3M_IDS(info) \
_INTEL_BDW_M_IDS(3, info)
INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
INTEL_VGA_DEVICE(0x162E, info) /* ULX */
 
#define INTEL_BDW_GT3D_IDS(info) \
_INTEL_BDW_D_IDS(3, info)
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
 
#define INTEL_BDW_RSVDM_IDS(info) \
_INTEL_BDW_M_IDS(4, info)
INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
INTEL_VGA_DEVICE(0x163E, info) /* ULX */
 
#define INTEL_BDW_RSVDD_IDS(info) \
_INTEL_BDW_D_IDS(4, info)
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
 
#define INTEL_BDW_M_IDS(info) \
INTEL_BDW_GT12M_IDS(info), \
259,21 → 260,35
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
 
#define INTEL_SKL_IDS(info) \
#define INTEL_SKL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
 
#define INTEL_SKL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
 
#define INTEL_SKL_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
 
#define INTEL_SKL_IDS(info) \
INTEL_SKL_GT1_IDS(info), \
INTEL_SKL_GT2_IDS(info), \
INTEL_SKL_GT3_IDS(info)
 
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
INTEL_VGA_DEVICE(0x5A84, info)
 
#endif /* _I915_PCIIDS_H */
/drivers/include/drm/intel-gtt.h
3,8 → 3,8
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
 
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, unsigned long *mappable_end);
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, u64 *mappable_end);
 
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
/drivers/include/drm/ttm/ttm_bo_api.h
249,7 → 249,7
* either of these locks held.
*/
 
unsigned long offset;
uint64_t offset; /* GPU address space is independent of CPU word size */
uint32_t cur_placement;
 
struct sg_table *sg;
/drivers/include/drm/ttm/ttm_bo_driver.h
277,7 → 277,7
bool has_type;
bool use_type;
uint32_t flags;
unsigned long gpu_offset;
uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
/drivers/include/drm/ttm/ttm_lock.h
51,7 → 51,7
 
#include <ttm/ttm_object.h>
#include <linux/wait.h>
//#include <linux/atomic.h>
#include <linux/atomic.h>
 
/**
* struct ttm_lock
/drivers/include/linux/atomic.h
2,7 → 2,427
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
#include <asm/barrier.h>
 
/*
* Relaxed variants of xchg, cmpxchg and some atomic operations.
*
* We support four variants:
*
* - Fully ordered: The default implementation, no suffix required.
* - Acquire: Provides ACQUIRE semantics, _acquire suffix.
* - Release: Provides RELEASE semantics, _release suffix.
* - Relaxed: No ordering guarantees, _relaxed suffix.
*
* For compound atomics performing both a load and a store, ACQUIRE
* semantics apply only to the load and RELEASE semantics only to the
* store portion of the operation. Note that a failed cmpxchg_acquire
* does -not- imply any memory ordering constraints.
*
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
 
#ifndef atomic_read_acquire
#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
 
#ifndef atomic_set_release
#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
 
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*/
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
 
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
 
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
smp_mb__before_atomic(); \
__ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
 
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
#define atomic_add_return_relaxed atomic_add_return
#define atomic_add_return_acquire atomic_add_return
#define atomic_add_return_release atomic_add_return
 
#else /* atomic_add_return_relaxed */
 
#ifndef atomic_add_return_acquire
#define atomic_add_return_acquire(...) \
__atomic_op_acquire(atomic_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic_add_return_release
#define atomic_add_return_release(...) \
__atomic_op_release(atomic_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic_add_return
#define atomic_add_return(...) \
__atomic_op_fence(atomic_add_return, __VA_ARGS__)
#endif
#endif /* atomic_add_return_relaxed */
 
/* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_relaxed
#define atomic_inc_return_relaxed atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#define atomic_inc_return_release atomic_inc_return
 
#else /* atomic_inc_return_relaxed */
 
#ifndef atomic_inc_return_acquire
#define atomic_inc_return_acquire(...) \
__atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic_inc_return_release
#define atomic_inc_return_release(...) \
__atomic_op_release(atomic_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic_inc_return
#define atomic_inc_return(...) \
__atomic_op_fence(atomic_inc_return, __VA_ARGS__)
#endif
#endif /* atomic_inc_return_relaxed */
 
/* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return
#define atomic_sub_return_acquire atomic_sub_return
#define atomic_sub_return_release atomic_sub_return
 
#else /* atomic_sub_return_relaxed */
 
#ifndef atomic_sub_return_acquire
#define atomic_sub_return_acquire(...) \
__atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic_sub_return_release
#define atomic_sub_return_release(...) \
__atomic_op_release(atomic_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic_sub_return
#define atomic_sub_return(...) \
__atomic_op_fence(atomic_sub_return, __VA_ARGS__)
#endif
#endif /* atomic_sub_return_relaxed */
 
/* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_relaxed
#define atomic_dec_return_relaxed atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#define atomic_dec_return_release atomic_dec_return
 
#else /* atomic_dec_return_relaxed */
 
#ifndef atomic_dec_return_acquire
#define atomic_dec_return_acquire(...) \
__atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic_dec_return_release
#define atomic_dec_return_release(...) \
__atomic_op_release(atomic_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic_dec_return
#define atomic_dec_return(...) \
__atomic_op_fence(atomic_dec_return, __VA_ARGS__)
#endif
#endif /* atomic_dec_return_relaxed */
 
/* atomic_xchg_relaxed */
#ifndef atomic_xchg_relaxed
#define atomic_xchg_relaxed atomic_xchg
#define atomic_xchg_acquire atomic_xchg
#define atomic_xchg_release atomic_xchg
 
#else /* atomic_xchg_relaxed */
 
#ifndef atomic_xchg_acquire
#define atomic_xchg_acquire(...) \
__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic_xchg_release
#define atomic_xchg_release(...) \
__atomic_op_release(atomic_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic_xchg
#define atomic_xchg(...) \
__atomic_op_fence(atomic_xchg, __VA_ARGS__)
#endif
#endif /* atomic_xchg_relaxed */
 
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_relaxed
#define atomic_cmpxchg_relaxed atomic_cmpxchg
#define atomic_cmpxchg_acquire atomic_cmpxchg
#define atomic_cmpxchg_release atomic_cmpxchg
 
#else /* atomic_cmpxchg_relaxed */
 
#ifndef atomic_cmpxchg_acquire
#define atomic_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic_cmpxchg_release
#define atomic_cmpxchg_release(...) \
__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic_cmpxchg
#define atomic_cmpxchg(...) \
__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
#endif
#endif /* atomic_cmpxchg_relaxed */
 
#ifndef atomic64_read_acquire
#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
 
#ifndef atomic64_set_release
#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
 
/* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_relaxed
#define atomic64_add_return_relaxed atomic64_add_return
#define atomic64_add_return_acquire atomic64_add_return
#define atomic64_add_return_release atomic64_add_return
 
#else /* atomic64_add_return_relaxed */
 
#ifndef atomic64_add_return_acquire
#define atomic64_add_return_acquire(...) \
__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_add_return_release
#define atomic64_add_return_release(...) \
__atomic_op_release(atomic64_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_add_return
#define atomic64_add_return(...) \
__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
#endif
#endif /* atomic64_add_return_relaxed */
 
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
 
#else /* atomic64_inc_return_relaxed */
 
#ifndef atomic64_inc_return_acquire
#define atomic64_inc_return_acquire(...) \
__atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_inc_return_release
#define atomic64_inc_return_release(...) \
__atomic_op_release(atomic64_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_inc_return
#define atomic64_inc_return(...) \
__atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
#endif
#endif /* atomic64_inc_return_relaxed */
 
 
/* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return
#define atomic64_sub_return_acquire atomic64_sub_return
#define atomic64_sub_return_release atomic64_sub_return
 
#else /* atomic64_sub_return_relaxed */
 
#ifndef atomic64_sub_return_acquire
#define atomic64_sub_return_acquire(...) \
__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_sub_return_release
#define atomic64_sub_return_release(...) \
__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_sub_return
#define atomic64_sub_return(...) \
__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
#endif
#endif /* atomic64_sub_return_relaxed */
 
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
 
#else /* atomic64_dec_return_relaxed */
 
#ifndef atomic64_dec_return_acquire
#define atomic64_dec_return_acquire(...) \
__atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_dec_return_release
#define atomic64_dec_return_release(...) \
__atomic_op_release(atomic64_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_dec_return
#define atomic64_dec_return(...) \
__atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
#endif
#endif /* atomic64_dec_return_relaxed */
 
/* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_relaxed
#define atomic64_xchg_relaxed atomic64_xchg
#define atomic64_xchg_acquire atomic64_xchg
#define atomic64_xchg_release atomic64_xchg
 
#else /* atomic64_xchg_relaxed */
 
#ifndef atomic64_xchg_acquire
#define atomic64_xchg_acquire(...) \
__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_xchg_release
#define atomic64_xchg_release(...) \
__atomic_op_release(atomic64_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_xchg
#define atomic64_xchg(...) \
__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
#endif
#endif /* atomic64_xchg_relaxed */
 
/* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_relaxed
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
#define atomic64_cmpxchg_acquire atomic64_cmpxchg
#define atomic64_cmpxchg_release atomic64_cmpxchg
 
#else /* atomic64_cmpxchg_relaxed */
 
#ifndef atomic64_cmpxchg_acquire
#define atomic64_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_cmpxchg_release
#define atomic64_cmpxchg_release(...) \
__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_cmpxchg
#define atomic64_cmpxchg(...) \
__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
#endif
#endif /* atomic64_cmpxchg_relaxed */
 
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define cmpxchg_relaxed cmpxchg
#define cmpxchg_acquire cmpxchg
#define cmpxchg_release cmpxchg
 
#else /* cmpxchg_relaxed */
 
#ifndef cmpxchg_acquire
#define cmpxchg_acquire(...) \
__atomic_op_acquire(cmpxchg, __VA_ARGS__)
#endif
 
#ifndef cmpxchg_release
#define cmpxchg_release(...) \
__atomic_op_release(cmpxchg, __VA_ARGS__)
#endif
 
#ifndef cmpxchg
#define cmpxchg(...) \
__atomic_op_fence(cmpxchg, __VA_ARGS__)
#endif
#endif /* cmpxchg_relaxed */
 
/* cmpxchg64_relaxed */
#ifndef cmpxchg64_relaxed
#define cmpxchg64_relaxed cmpxchg64
#define cmpxchg64_acquire cmpxchg64
#define cmpxchg64_release cmpxchg64
 
#else /* cmpxchg64_relaxed */
 
#ifndef cmpxchg64_acquire
#define cmpxchg64_acquire(...) \
__atomic_op_acquire(cmpxchg64, __VA_ARGS__)
#endif
 
#ifndef cmpxchg64_release
#define cmpxchg64_release(...) \
__atomic_op_release(cmpxchg64, __VA_ARGS__)
#endif
 
#ifndef cmpxchg64
#define cmpxchg64(...) \
__atomic_op_fence(cmpxchg64, __VA_ARGS__)
#endif
#endif /* cmpxchg64_relaxed */
 
/* xchg_relaxed */
#ifndef xchg_relaxed
#define xchg_relaxed xchg
#define xchg_acquire xchg
#define xchg_release xchg
 
#else /* xchg_relaxed */
 
#ifndef xchg_acquire
#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
#endif
 
#ifndef xchg_release
#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
#endif
 
#ifndef xchg
#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
#endif
#endif /* xchg_relaxed */
 
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
28,6 → 448,23
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
 
#ifndef atomic_andnot
static inline void atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
}
#endif
 
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
atomic_andnot(mask, v);
}
 
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
atomic_or(mask, v);
}
 
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
111,21 → 548,17
}
#endif
 
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
static inline void atomic_or(int i, atomic_t *v)
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
 
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
int old;
int new;
 
do {
old = atomic_read(v);
new = old | i;
} while (atomic_cmpxchg(v, old, new) != old);
atomic64_and(~i, v);
}
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
#endif
 
#include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
 
#endif /* _LINUX_ATOMIC_H */
/drivers/include/linux/bitmap.h
52,16 → 52,13
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
* bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
*/
 
/*
96,10 → 93,10
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
147,59 → 144,55
align_mask, 0);
}
 
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern int bitmap_scnlistprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
const unsigned long *old, const unsigned long *new, unsigned int nbits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits);
const unsigned long *relmap, unsigned int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits);
unsigned int sz, unsigned int nbits);
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
#define bitmap_copy_le bitmap_copy
#endif
extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
 
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
)
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
 
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
 
static inline void bitmap_zero(unsigned long *dst, int nbits)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
 
static inline void bitmap_fill(unsigned long *dst, int nbits)
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
size_t nlongs = BITS_TO_LONGS(nbits);
unsigned int nlongs = BITS_TO_LONGS(nbits);
if (!small_const_nbits(nbits)) {
int len = (nlongs - 1) * sizeof(unsigned long);
unsigned int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
206,12 → 199,12
}
 
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
int nbits)
unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
290,8 → 283,8
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_empty(src, nbits);
 
return find_first_bit(src, nbits) == nbits;
}
 
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
298,11 → 291,11
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_full(src, nbits);
 
return find_first_zero_bit(src, nbits) == nbits;
}
 
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
309,22 → 302,22
return __bitmap_weight(src, nbits);
}
 
static inline void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits)
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
else
__bitmap_shift_right(dst, src, n, nbits);
__bitmap_shift_right(dst, src, shift, nbits);
}
 
static inline void bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int n, int nbits)
static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_shift_left(dst, src, n, nbits);
__bitmap_shift_left(dst, src, shift, nbits);
}
 
static inline int bitmap_parse(const char *buf, unsigned int buflen,
/drivers/include/linux/bitops.h
57,7 → 57,7
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
 
static __inline__ int get_bitmask_order(unsigned int count)
static inline int get_bitmask_order(unsigned int count)
{
int order;
 
65,7 → 65,7
return order; /* We could be slightly more clever with -1 here... */
}
 
static __inline__ int get_count_order(unsigned int count)
static inline int get_count_order(unsigned int count)
{
int order;
 
75,7 → 75,7
return order;
}
 
static inline unsigned long hweight_long(unsigned long w)
static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
164,6 → 164,8
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<32) to sign bit
*
* This is safe to use for 16- and 8-bit types as well.
*/
static inline __s32 sign_extend32(__u32 value, int index)
{
171,6 → 173,17
return (__s32)(value << shift) >> shift;
}
 
/**
* sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
static inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}
 
static inline unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
218,9 → 231,9
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum size to search
* @size: The number of bits to search
*
* Returns the bit number of the first set bit, or size.
* Returns the bit number of the last set bit, or size.
*/
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
/drivers/include/linux/bottom_half.h
2,7 → 2,6
#define _LINUX_BH_H
 
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
 
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
/drivers/include/linux/circ_buf.h
0,0 → 1,36
/*
* See Documentation/circular-buffers.txt for more information.
*/
 
#ifndef _LINUX_CIRC_BUF_H
#define _LINUX_CIRC_BUF_H 1
 
struct circ_buf {
char *buf;
int head;
int tail;
};
 
/* Return count in buffer. */
#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1))
 
/* Return space available, 0..size-1. We always leave one free char
as a completely full buffer has head == tail, which is the same as
empty. */
#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size))
 
/* Return count up to the end of the buffer. Carefully avoid
accessing head and tail more than once, so they can change
underneath us without returning inconsistent results. */
#define CIRC_CNT_TO_END(head,tail,size) \
({int end = (size) - (tail); \
int n = ((head) + end) & ((size)-1); \
n < end ? n : end;})
 
/* Return space available up to the end of the buffer. */
#define CIRC_SPACE_TO_END(head,tail,size) \
({int end = (size) - 1 - (head); \
int n = (end + (tail)) & ((size)-1); \
n <= end ? n : end+1;})
 
#endif /* _LINUX_CIRC_BUF_H */
/drivers/include/linux/compiler-gcc.h
9,10 → 9,24
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
 
/* Optimization barrier */
 
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
/*
* This version is i.e. to prevent dead stores elimination on @ptr
* where gcc and llvm may behave differently when otherwise using
* normal barrier(): while gcc behavior gets along with a normal
* barrier(), llvm needs an explicit input variable to be assumed
* clobbered. The issue is as follows: while the inline asm might
* access any memory it wants, the compiler could have fit all of
* @ptr into memory registers instead, and since @ptr never escaped
* from that, it proofed that the inline asm wasn't touching any of
* it. This version works well with both compilers, i.e. we're telling
* the compiler that the inline asm absolutely may see the contents
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
*/
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
 
/*
* This macro obfuscates arithmetic on a variable address so that gcc
33,15 → 47,18
* case either is valid.
*/
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
({ \
unsigned long __ptr; \
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); })
(typeof(ptr)) (__ptr + (off)); \
})
 
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
#define OPTIMIZER_HIDE_VAR(var) \
__asm__ ("" : "=r" (var) : "0" (var))
 
#ifdef __CHECKER__
#define __must_be_array(arr) 0
#define __must_be_array(a) 0
#else
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
63,19 → 80,25
# define __inline __inline notrace
#endif
 
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))
 
#define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
#define __alias(symbol) __attribute__((alias(#symbol)))
 
/*
* it doesn't make sense on ARM (currently the only user of __naked) to trace
* naked functions because then mcount is called without stack and frame pointer
* being set up and there is no chance to restore the lr register to the value
* before mcount was called.
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
* stack and frame pointer being set up and there is no chance to
* restore the lr register to the value before mcount was called.
*
* The asm() bodies of naked functions often depend on standard calling conventions,
* therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
* this, so we must do so ourselves. See GCC PR44290.
* The asm() bodies of naked functions often depend on standard calling
* conventions, therefore they must be noinline and noclone.
*
* GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
* See GCC PR44290.
*/
#define __naked __attribute__((naked)) noinline __noclone notrace
 
95,24 → 118,166
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
 
#define __gcc_header(x) #x
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
#define gcc_header(x) _gcc_header(x)
#include gcc_header(__GNUC__)
/* gcc version specific checks */
 
#if GCC_VERSION < 30200
# error Sorry, your compiler is too old - please upgrade it.
#endif
 
#if GCC_VERSION < 30300
# define __used __attribute__((__unused__))
#else
# define __used __attribute__((__used__))
#endif
 
#ifdef CONFIG_GCOV_KERNEL
# if GCC_VERSION < 30400
# error "GCOV profiling support for gcc versions below 3.4 not included"
# endif /* __GNUC_MINOR__ */
#endif /* CONFIG_GCOV_KERNEL */
 
#if GCC_VERSION >= 30400
#define __must_check __attribute__((warn_unused_result))
#endif
 
#if GCC_VERSION >= 40000
 
/* GCC 4.1.[01] miscompiles __weak */
#ifdef __KERNEL__
# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
# error Your version of gcc miscompiles the __weak directive
# endif
#endif
 
#define __used __attribute__((__used__))
#define __compiler_offsetof(a, b) \
__builtin_offsetof(a, b)
 
#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
 
#if GCC_VERSION >= 40300
/* Mark functions as cold. gcc will assume any path leading to a call
* to them will be unlikely. This means a lot of manual unlikely()s
* are unnecessary now for any paths leading to the usual suspects
* like BUG(), printk(), panic() etc. [but let's keep them for now for
* older compilers]
*
* Early snapshots of gcc 4.3 don't support this and we can't detect this
* in the preprocessor, but we can live with this because they're unreleased.
* Maketime probing would be overkill here.
*
* gcc also has a __attribute__((__hot__)) to move hot functions into
* a special section, but I don't see any sense in this right now in
* the kernel context
*/
#define __cold __attribute__((__cold__))
 
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
 
#ifndef __CHECKER__
# define __compiletime_warning(message) __attribute__((warning(message)))
# define __compiletime_error(message) __attribute__((error(message)))
#endif /* __CHECKER__ */
#endif /* GCC_VERSION >= 40300 */
 
#if GCC_VERSION >= 40500
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
*
* Early snapshots of gcc 4.5 don't support this and we can't detect
* this in the preprocessor, but we can live with this because they're
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() __builtin_unreachable()
 
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__))
 
#endif /* GCC_VERSION >= 40500 */
 
#if GCC_VERSION >= 40600
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
* optimizer that something else uses this function or variable, thus preventing
* this.
*/
#define __visible __attribute__((externally_visible))
#endif
 
 
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
/*
* __assume_aligned(n, k): Tell the optimizer that the returned
* pointer can be assumed to be k modulo n. The second argument is
* optional (default 0), so we use a variadic macro to make the
* shorthand.
*
* Beware: Do not apply this to functions which may return
* ERR_PTRs. Also, it is probably unwise to apply it to functions
* returning extra information in the low bits (but in that case the
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*/
#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
#endif
 
/*
* GCC 'asm goto' miscompiles certain code sequences:
*
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
*
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
 
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
 
#if GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
#elif GCC_VERSION >= 40902
#define KASAN_ABI_VERSION 3
#endif
 
#if GCC_VERSION >= 40902
/*
* Tell the compiler that address safety instrumentation (KASAN)
* should not be applied to that function.
* Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
*/
#define __no_sanitize_address __attribute__((no_sanitize_address))
#endif
 
#endif /* gcc version >= 40000 specific checks */
 
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
 
#if !defined(__no_sanitize_address)
#define __no_sanitize_address
#endif
 
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
 
#define __always_inline inline __attribute__((always_inline))
/drivers/include/linux/compiler.h
17,6 → 17,7
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
42,6 → 43,7
# define __cond_lock(x,c) (c)
# define __percpu
# define __rcu
# define __pmem
#endif
 
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
54,7 → 56,11
#include <linux/compiler-gcc.h>
#endif
 
#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
#define notrace __attribute__((hotpatch(0,0)))
#else
#define notrace __attribute__((no_instrument_function))
#endif
 
/* Intel compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
165,6 → 171,10
# define barrier() __memory_barrier()
#endif
 
#ifndef barrier_data
# define barrier_data(ptr) barrier()
#endif
 
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
188,46 → 198,56
 
#include <uapi/linux/types.h>
 
static __always_inline void data_access_exceeds_word_size(void)
#ifdef __compiletime_warning
__compiletime_warning("data access exceeds word size and won't be atomic")
#endif
;
#define __READ_ONCE_SIZE \
({ \
switch (size) { \
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
default: \
barrier(); \
__builtin_memcpy((void *)res, (const void *)p, size); \
barrier(); \
} \
})
 
static __always_inline void data_access_exceeds_word_size(void)
static __always_inline
void __read_once_size(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
 
static __always_inline void __read_once_size(volatile void *p, void *res, int size)
#ifdef CONFIG_KASAN
/*
* This function is not 'inline' because __no_sanitize_address confilcts
* with inlining. Attempt to inline it may cause a build failure.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
static __no_sanitize_address __maybe_unused
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
#ifdef CONFIG_64BIT
case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
#endif
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
data_access_exceeds_word_size();
barrier();
__READ_ONCE_SIZE;
}
#else
static __always_inline
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
#endif
 
static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
#ifdef CONFIG_64BIT
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
#endif
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
data_access_exceeds_word_size();
barrier();
}
}
235,15 → 255,15
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
* READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
* ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
254,12 → 274,31
* required ordering.
*/
 
#define READ_ONCE(x) \
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
#define __READ_ONCE(x, check) \
({ \
union { typeof(x) __val; char __c[1]; } __u; \
if (check) \
__read_once_size(&(x), __u.__c, sizeof(x)); \
else \
__read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
#define READ_ONCE(x) __READ_ONCE(x, 1)
 
#define ASSIGN_ONCE(val, x) \
({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
/*
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
* to hide memory access from KASAN.
*/
#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
 
#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(x)) (val) }; \
__write_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
 
#endif /* __KERNEL__ */
 
#endif /* __ASSEMBLY__ */
378,6 → 417,14
#define __visible
#endif
 
/*
* Assume alignment of return value.
*/
#ifndef __assume_aligned
#define __assume_aligned(a, ...)
#endif
 
 
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
385,7 → 432,7
 
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
 
/* Compile time object size, -1 for unknown */
447,13 → 494,39
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
* This macro does absolutely -nothing- to prevent the CPU from reordering,
* merging, or refetching absolutely anything at any time. Its main intended
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
* ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
* on a union member will work as long as the size of the member matches the
* size of the union and the size is smaller than word size.
*
* The major use cases of ACCESS_ONCE used to be (1) Mediating communication
* between process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
* If possible use READ_ONCE()/WRITE_ONCE() instead.
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
 
/**
* lockless_dereference() - safely load a pointer for later dereference
* @p: The pointer to load
*
* Similar to rcu_dereference(), but for situations where the pointed-to
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
/drivers/include/linux/component.h
0,0 → 1,39
#ifndef COMPONENT_H
#define COMPONENT_H
 
struct device;
 
struct component_ops {
int (*bind)(struct device *, struct device *, void *);
void (*unbind)(struct device *, struct device *, void *);
};
 
int component_add(struct device *, const struct component_ops *);
void component_del(struct device *, const struct component_ops *);
 
int component_bind_all(struct device *, void *);
void component_unbind_all(struct device *, void *);
 
struct master;
 
struct component_master_ops {
int (*add_components)(struct device *, struct master *);
int (*bind)(struct device *);
void (*unbind)(struct device *);
};
 
int component_master_add(struct device *, const struct component_master_ops *);
void component_master_del(struct device *,
const struct component_master_ops *);
 
int component_master_add_child(struct master *master,
int (*compare)(struct device *, void *), void *compare_data);
 
struct component_match;
 
int component_master_add_with_match(struct device *,
const struct component_master_ops *, struct component_match *);
void component_match_add(struct device *, struct component_match **,
int (*compare)(struct device *, void *), void *compare_data);
 
#endif
/drivers/include/linux/cpumask.h
11,6 → 11,7
#include <linux/bitmap.h>
#include <linux/bug.h>
 
/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
 
/**
22,6 → 23,14
*/
#define cpumask_bits(maskp) ((maskp)->bits)
 
/**
* cpumask_pr_args - printf args to output a cpumask
* @maskp: cpumask to be printed
*
* Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
 
#if NR_CPUS == 1
#define nr_cpu_ids 1
#else
142,10 → 151,8
return 1;
}
 
static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
static inline unsigned int cpumask_local_spread(unsigned int i, int node)
{
set_bit(0, cpumask_bits(dstp));
 
return 0;
}
 
199,7 → 206,7
 
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
unsigned int cpumask_local_spread(unsigned int i, int node);
 
/**
* for_each_cpu - iterate over every cpu in a mask
281,11 → 288,11
* @cpumask: the cpumask pointer
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
*
* No static inline type checking - see Subtlety (1) above.
*/
#define cpumask_test_cpu(cpu, cpumask) \
test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
 
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
539,21 → 546,6
#define cpumask_of(cpu) (get_cpu_mask(cpu))
 
/**
* cpumask_scnprintf - print a cpumask into a string as comma-separated hex
* @buf: the buffer to sprintf into
* @len: the length of the buffer
* @srcp: the cpumask to print
*
* If len is zero, returns zero. Otherwise returns the length of the
* (nul-terminated) @buf string.
*/
static inline int cpumask_scnprintf(char *buf, int len,
const struct cpumask *srcp)
{
return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
}
 
/**
* cpumask_parse_user - extract a cpumask from a user string
* @buf: the buffer to extract from
* @len: the length of the buffer
564,7 → 556,7
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
}
 
/**
579,26 → 571,10
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
nr_cpu_ids);
}
 
/**
* cpulist_scnprintf - print a cpumask into a string as comma-separated list
* @buf: the buffer to sprintf into
* @len: the length of the buffer
* @srcp: the cpumask to print
*
* If len is zero, returns zero. Otherwise returns the length of the
* (nul-terminated) @buf string.
*/
static inline int cpulist_scnprintf(char *buf, int len,
const struct cpumask *srcp)
{
return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
nr_cpumask_bits);
}
 
/**
* cpumask_parse - extract a cpumask from from a string
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
610,7 → 586,7
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
 
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
}
 
/**
622,7 → 598,7
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
}
 
/**
632,9 → 608,7
*/
static inline size_t cpumask_size(void)
{
/* FIXME: Once all cpumask assignments are eliminated, this
* can be nr_cpumask_bits */
return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
}
 
/*
791,7 → 765,7
#if NR_CPUS <= BITS_PER_LONG
#define CPU_BITS_ALL \
{ \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
 
#else /* NR_CPUS > BITS_PER_LONG */
799,7 → 773,7
#define CPU_BITS_ALL \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#endif /* NR_CPUS > BITS_PER_LONG */
 
817,36 → 791,22
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
nr_cpumask_bits);
nr_cpu_ids);
}
 
/*
*
* From here down, all obsolete. Use cpumask_ variants!
*
*/
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
 
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
#if NR_CPUS <= BITS_PER_LONG
 
#define CPU_MASK_ALL \
(cpumask_t) { { \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
 
#else
 
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
#endif /* NR_CPUS > BITS_PER_LONG */
 
#endif
 
#define CPU_MASK_NONE \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
857,143 → 817,4
[0] = 1UL \
} }
 
#if NR_CPUS == 1
#define first_cpu(src) ({ (void)(src); 0; })
#define next_cpu(n, src) ({ (void)(src); 1; })
#define any_online_cpu(mask) 0
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#else /* NR_CPUS > 1 */
int __first_cpu(const cpumask_t *srcp);
int __next_cpu(int n, const cpumask_t *srcp);
 
#define first_cpu(src) __first_cpu(&(src))
#define next_cpu(n, src) __next_cpu((n), &(src))
#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = -1; \
(cpu) = next_cpu((cpu), (mask)), \
(cpu) < NR_CPUS; )
#endif /* SMP */
 
#if NR_CPUS <= 64
 
#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
 
#else /* NR_CPUS > 64 */
 
int __next_cpu_nr(int n, const cpumask_t *srcp);
#define for_each_cpu_mask_nr(cpu, mask) \
for ((cpu) = -1; \
(cpu) = __next_cpu_nr((cpu), &(mask)), \
(cpu) < nr_cpu_ids; )
 
#endif /* NR_CPUS > 64 */
 
#define cpus_addr(src) ((src).bits)
 
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpu, dstp->bits);
}
 
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpu, dstp->bits);
}
 
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
 
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
 
/* No static inline type checking - see Subtlety (1) above. */
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
 
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
{
return test_and_set_bit(cpu, addr->bits);
}
 
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_andnot(dst, src1, src2) \
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
static inline int __cpus_equal(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
static inline int __cpus_intersects(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
static inline int __cpus_subset(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
 
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
 
#define cpus_shift_left(dst, src, n) \
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_left(cpumask_t *dstp,
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
 
#endif /* __LINUX_CPUMASK_H */
/drivers/include/linux/device.h
0,0 → 1,99
/*
* device.h - generic, centralized driver model
*
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
* This file is released under the GPLv2
*
* See Documentation/driver-model/ for more information.
*/
 
#ifndef _DEVICE_H_
#define _DEVICE_H_
 
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/mutex.h>
struct device;
enum probe_type {
PROBE_DEFAULT_STRATEGY,
PROBE_PREFER_ASYNCHRONOUS,
PROBE_FORCE_SYNCHRONOUS,
};
 
struct device_driver {
const char *name;
const char *mod_name; /* used for built-in modules */
 
bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
enum probe_type probe_type;
};
 
struct device {
struct device *parent;
 
const char *init_name; /* initial name of the device */
struct device_driver *driver; /* which driver has allocated this
device */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set/get_drvdata */
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
struct irq_domain *msi_domain;
#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
#ifdef CONFIG_GENERIC_MSI_IRQ
struct list_head msi_list;
#endif
 
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
#endif
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
};
 
extern __printf(2, 3)
int dev_set_name(struct device *dev, const char *name, ...);
 
#ifdef CONFIG_NUMA
static inline int dev_to_node(struct device *dev)
{
return dev->numa_node;
}
static inline void set_dev_node(struct device *dev, int node)
{
dev->numa_node = node;
}
#else
static inline int dev_to_node(struct device *dev)
{
return -1;
}
static inline void set_dev_node(struct device *dev, int node)
{
}
#endif
 
 
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
}
 
static inline void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}
 
 
 
#endif /* _DEVICE_H_ */
/drivers/include/linux/dma-buf.h
115,6 → 115,8
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
* @exp_name: name of the exporter; useful for debugging.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
170,13 → 172,8
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
 
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *,
struct reservation_object *);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
 
#define dma_buf_export(priv, ops, size, flags, resv) \
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
 
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
void dma_buf_put(struct dma_buf *dmabuf);
/drivers/include/linux/dma_remapping.h
20,7 → 20,15
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2
/* Extended context entry types */
#define CONTEXT_TT_PT_PASID 4
#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
#define CONTEXT_TT_MASK (7ULL << 2)
 
#define CONTEXT_DINVE (1ULL << 8)
#define CONTEXT_PRS (1ULL << 9)
#define CONTEXT_PASIDE (1ULL << 11)
 
struct intel_iommu;
struct dmar_domain;
struct root_entry;
/drivers/include/linux/dmapool.h
19,6 → 19,12
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
 
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
}
 
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
 
/*
/drivers/include/linux/dmi.h
74,7 → 74,7
u8 type;
u8 length;
u16 handle;
};
} __packed;
 
struct dmi_device {
struct list_head list;
/drivers/include/linux/fb.h
1111,7 → 1111,9
struct fb_videomode *fbmode);
 
/* drivers/video/modedb.c */
#define VESA_MODEDB_SIZE 34
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
 
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
1162,9 → 1164,17
u32 flag;
};
 
struct dmt_videomode {
u32 dmt_id;
u32 std_2byte_code;
u32 cvt_3byte_code;
const struct fb_videomode *mode;
};
 
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
extern const struct dmt_videomode dmt_modes[];
 
struct fb_modelist {
struct list_head list;
1178,4 → 1188,16
const struct fb_videomode *default_mode,
unsigned int default_bpp);
 
/* Convenience logging macros */
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_warn(fb_info, fmt, ...) \
pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_info(fb_info, fmt, ...) \
pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
 
#endif /* _LINUX_FB_H */
/drivers/include/linux/fence.h
77,7 → 77,7
spinlock_t *lock;
unsigned context, seqno;
unsigned long flags;
// ktime_t timestamp;
ktime_t timestamp;
int status;
};
 
280,6 → 280,22
}
 
/**
* fence_is_later - return if f1 is chronologically later than f2
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not re-used across contexts.
*/
static inline bool fence_is_later(struct fence *f1, struct fence *f2)
{
if (WARN_ON(f1->context != f2->context))
return false;
 
return f1->seqno - f2->seqno < INT_MAX;
}
 
/**
* fence_later - return the chronologically later fence
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
298,15 → 314,16
* set if enable_signaling wasn't called, and enabling that here is
* overkill.
*/
if (f2->seqno - f1->seqno <= INT_MAX)
if (fence_is_later(f1, f2))
return fence_is_signaled(f1) ? NULL : f1;
else
return fence_is_signaled(f2) ? NULL : f2;
else
return fence_is_signaled(f1) ? NULL : f1;
}
 
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
bool intr, signed long timeout);
 
 
/**
* fence_wait - sleep until the fence gets signaled
* @fence: [in] the fence to wait on
/drivers/include/linux/firmware.h
1,10 → 1,11
#ifndef _LINUX_FIRMWARE_H
#define _LINUX_FIRMWARE_H
 
#include <linux/module.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/cache.h>
#include <linux/module.h>
 
#define FW_ACTION_NOHOTPLUG 0
#define FW_ACTION_HOTPLUG 1
/drivers/include/linux/gfp.h
13,7 → 13,7
#define ___GFP_HIGHMEM 0x02u
#define ___GFP_DMA32 0x04u
#define ___GFP_MOVABLE 0x08u
#define ___GFP_WAIT 0x10u
#define ___GFP_RECLAIMABLE 0x10u
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
28,18 → 28,18
#define ___GFP_NOMEMALLOC 0x10000u
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_ATOMIC 0x80000u
#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
/*
* GFP bitmasks..
* Physical address zone modifiers (see linux/mmzone.h - low four bits)
*
* Zone modifiers (see linux/mmzone.h - low three bits)
*
* Do not put any conditional on these. If necessary modify the definitions
* without the underscores and use them consistently. The definitions here may
* be used in bit comparisons.
48,110 → 48,219
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 
/*
* Action modifiers - doesn't change the zoning
* Page mobility and placement hints
*
* These flags provide hints about how mobile the page is. Pages with similar
* mobility are placed within the same pageblocks to minimise problems due
* to external fragmentation.
*
* __GFP_MOVABLE (also a zone modifier) indicates that the page can be
* moved by page migration during memory compaction or can be reclaimed.
*
* __GFP_RECLAIMABLE is used for slab allocations that specify
* SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
*
* __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
* these pages will be spread between local zones to avoid all the dirty
* pages being in one zone (fair zone allocation policy).
*
* __GFP_HARDWALL enforces the cpuset memory allocation policy.
*
* __GFP_THISNODE forces the allocation to be satisified from the requested
* node with no fallbacks or placement policy enforcements.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
 
/*
* Watermark modifiers -- controls access to emergency reserves
*
* __GFP_HIGH indicates that the caller is high-priority and that granting
* the request is necessary before the system can make forward progress.
* For example, creating an IO context to clean pages.
*
* __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
* high priority. Users are typically interrupt handlers. This may be
* used in conjunction with __GFP_HIGH
*
* __GFP_MEMALLOC allows access to all memory. This should only be used when
* the caller guarantees the allocation will allow more memory to be freed
* very shortly e.g. process exiting or swapping. Users either should
* be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
*
* __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the __GFP_MEMALLOC flag if both are set.
*
* __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT)
 
/*
* Reclaim modifiers
*
* __GFP_IO can start physical IO.
*
* __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
* allocator recursing into the filesystem which might already be holding
* locks.
*
* __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
* This flag can be cleared to avoid unnecessary delays when a fallback
* option is available.
*
* __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
* the low watermark is reached and have it reclaim pages until the high
* watermark is reached. A caller may wish to clear this flag when fallback
* options are available and the reclaim is likely to disrupt the system. The
* canonical example is THP allocation where a fallback is cheap but
* reclaim/compaction may cause indirect stalls.
*
* __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
* cannot handle allocation failures. This modifier is deprecated and no new
* users should be added.
* cannot handle allocation failures. New users should be evaluated carefully
* (and the flag should be used only when there is no reasonable failure
* policy) but it is definitely preferable to use the flag rather than
* opencode endless loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
* __GFP_NORETRY: The VM implementation must not retry indefinitely and will
* return NULL when direct reclaim and memory compaction have failed to allow
* the allocation to succeed. The OOM killer is not called with the current
* implementation.
*/
#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
* This takes precedence over the
* __GFP_MEMALLOC flag if both are
* set
*/
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
 
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
 
/*
* This may seem redundant, but it's a way of annotating false positives vs.
* allocations that simply cannot be supported (e.g. page tables).
* Action modifiers
*
* __GFP_COLD indicates that the caller does not expect to be used in the near
* future. Where possible, a cache-cold page will be returned.
*
* __GFP_NOWARN suppresses allocation failure reports.
*
* __GFP_COMP address compound page metadata.
*
* __GFP_ZERO returns a zeroed page on success.
*
* __GFP_NOTRACK avoids tracking with kmemcheck.
*
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
* distinguishing in the source between false positives and allocations that
* cannot be supported (e.g. page tables).
*
* __GFP_OTHER_NODE is for allocations that are on a remote node but that
* should not be accounted for as a remote allocation in vmstat. A
* typical user would be khugepaged collapsing a huge page on a remote
* node.
*/
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
 
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
/* This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
/*
* Useful GFP flag combinations that are commonly used. It is recommended
* that subsystems start with one of these combinations and then set/clear
* __GFP_FOO flags as necessary.
*
* GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
* watermark is applied to allow access to "atomic reserves"
*
* GFP_KERNEL is typical for kernel-internal allocations. The caller requires
* ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
*
* GFP_NOWAIT is for kernel allocations that should not stall for direct
* reclaim, start physical IO or use any filesystem callback.
*
* GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
*
* GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
*
* GFP_USER is for userspace allocations that also need to be directly
* accessibly by the kernel or hardware. It is typically used by hardware
* for buffers that are mapped to userspace (e.g. graphics) that hardware
* still must DMA to. cpuset limits are enforced for these allocations.
*
* GFP_DMA exists for historical reasons and should be avoided where possible.
* The flags indicates that the caller requires that the lowest zone be
* used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
* it would require careful auditing as some users really require it and
* others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
* lowest zone as a type of emergency reserve.
*
* GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
* address.
*
* GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
* do not need to be directly accessible by the kernel but that cannot
* move once in use. An example may be a hardware allocation that maps
* data directly into userspace but has no addressing limitations.
*
* GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
* need direct access to but can use kmap() when access is required. They
* are expected to be movable via page reclaim or page migration. Typically,
* pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
*
* GFP_TRANSHUGE is used for THP allocations. They are compound allocations
* that will fail quickly if memory is not available and will not wake
* kswapd on failure.
*/
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_DMA __GFP_DMA
#define GFP_DMA32 __GFP_DMA32
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
~__GFP_KSWAPD_RECLAIM)
 
/*
* GFP_THISNODE does not perform any reclaim, you most likely want to
* use __GFP_THISNODE to allocate from a given node without fallback!
*/
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#else
#define GFP_THISNODE ((__force gfp_t)0)
#endif
 
/* This mask makes up all the page movable related flags */
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
 
/* Control page allocator reclaim behavior */
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
 
/* Control slab gfp mask during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return gfp_flags & __GFP_DIRECT_RECLAIM;
}
 
/* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 
/* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
 
#define GFP_DMA __GFP_DMA
 
/* 4GB DMA on some platforms */
#define GFP_DMA32 __GFP_DMA32
 
 
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
/drivers/include/linux/gpio/consumer.h
0,0 → 1,455
#ifndef __LINUX_GPIO_CONSUMER_H
#define __LINUX_GPIO_CONSUMER_H
 
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/kernel.h>
 
struct device;
 
/**
* Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
* preferable to the old integer-based handles.
*
* Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
* until the GPIO is released.
*/
struct gpio_desc;
 
/**
* Struct containing an array of descriptors that can be obtained using
* gpiod_get_array().
*/
struct gpio_descs {
unsigned int ndescs;
struct gpio_desc *desc[];
};
 
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
 
/**
* Optional flags that can be passed to one of gpiod_* to configure direction
* and output value. These values cannot be OR'd.
*/
enum gpiod_flags {
GPIOD_ASIS = 0,
GPIOD_IN = GPIOD_FLAGS_BIT_DIR_SET,
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
GPIOD_FLAGS_BIT_DIR_VAL,
};
 
#ifdef CONFIG_GPIOLIB
 
/* Return the number of GPIOs associated with a device / function */
int gpiod_count(struct device *dev, const char *con_id);
 
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
void gpiod_put(struct gpio_desc *desc);
void gpiod_put_array(struct gpio_descs *descs);
 
struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
 
int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
 
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
void gpiod_set_value(struct gpio_desc *desc, int value);
void gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
void gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
 
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
 
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
 
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
 
int gpiod_to_irq(const struct gpio_desc *desc);
 
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
 
/* Child properties interface */
struct fwnode_handle;
 
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname);
struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
const char *con_id,
struct fwnode_handle *child);
#else /* CONFIG_GPIOLIB */
 
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
}
 
static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *__must_check
gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *__must_check
gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_descs *__must_check
gpiod_get_array(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_descs *__must_check
gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline void gpiod_put(struct gpio_desc *desc)
{
might_sleep();
 
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline void gpiod_put_array(struct gpio_descs *descs)
{
might_sleep();
 
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline struct gpio_desc *__must_check
devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline
struct gpio_desc *__must_check
devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *__must_check
devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *__must_check
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_descs *__must_check
devm_gpiod_get_array(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
might_sleep();
 
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline void devm_gpiod_put_array(struct device *dev,
struct gpio_descs *descs)
{
might_sleep();
 
/* GPIO can never have been requested */
WARN_ON(1);
}
 
 
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
static inline int gpiod_direction_input(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
 
 
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
 
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline int gpiod_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
 
static inline int gpiod_to_irq(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -EINVAL;
}
 
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
return ERR_PTR(-EINVAL);
}
 
static inline int desc_to_gpio(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -EINVAL;
}
 
/* Child properties interface */
struct fwnode_handle;
 
static inline struct gpio_desc *fwnode_get_named_gpiod(
struct fwnode_handle *fwnode, const char *propname)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *devm_get_gpiod_from_child(
struct device *dev, const char *con_id, struct fwnode_handle *child)
{
return ERR_PTR(-ENOSYS);
}
 
#endif /* CONFIG_GPIOLIB */
 
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
 
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
void gpiod_unexport(struct gpio_desc *desc);
 
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
 
static inline int gpiod_export(struct gpio_desc *desc,
bool direction_may_change)
{
return -ENOSYS;
}
 
static inline int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc)
{
return -ENOSYS;
}
 
static inline void gpiod_unexport(struct gpio_desc *desc)
{
}
 
#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
 
#endif
/drivers/include/linux/hdmi.h
25,6 → 25,7
#define __LINUX_HDMI_H_
 
#include <linux/types.h>
#include <linux/device.h>
 
enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
52,6 → 53,11
HDMI_COLORSPACE_RGB,
HDMI_COLORSPACE_YUV422,
HDMI_COLORSPACE_YUV444,
HDMI_COLORSPACE_YUV420,
HDMI_COLORSPACE_RESERVED4,
HDMI_COLORSPACE_RESERVED5,
HDMI_COLORSPACE_RESERVED6,
HDMI_COLORSPACE_IDO_DEFINED,
};
 
enum hdmi_scan_mode {
58,6 → 64,7
HDMI_SCAN_MODE_NONE,
HDMI_SCAN_MODE_OVERSCAN,
HDMI_SCAN_MODE_UNDERSCAN,
HDMI_SCAN_MODE_RESERVED,
};
 
enum hdmi_colorimetry {
71,6 → 78,7
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
HDMI_PICTURE_ASPECT_RESERVED,
};
 
enum hdmi_active_aspect {
92,6 → 100,11
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
 
/* The following EC values are only defined in CEA-861-F. */
HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
HDMI_EXTENDED_COLORIMETRY_BT2020,
HDMI_EXTENDED_COLORIMETRY_RESERVED,
};
 
enum hdmi_quantization_range {
98,6 → 111,7
HDMI_QUANTIZATION_RANGE_DEFAULT,
HDMI_QUANTIZATION_RANGE_LIMITED,
HDMI_QUANTIZATION_RANGE_FULL,
HDMI_QUANTIZATION_RANGE_RESERVED,
};
 
/* non-uniform picture scaling */
114,7 → 128,7
};
 
enum hdmi_content_type {
HDMI_CONTENT_TYPE_NONE,
HDMI_CONTENT_TYPE_GRAPHICS,
HDMI_CONTENT_TYPE_PHOTO,
HDMI_CONTENT_TYPE_CINEMA,
HDMI_CONTENT_TYPE_GAME,
194,6 → 208,7
HDMI_AUDIO_CODING_TYPE_MLP,
HDMI_AUDIO_CODING_TYPE_DST,
HDMI_AUDIO_CODING_TYPE_WMA_PRO,
HDMI_AUDIO_CODING_TYPE_CXT,
};
 
enum hdmi_audio_sample_size {
215,10 → 230,25
};
 
enum hdmi_audio_coding_type_ext {
HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
/* Refer to Audio Coding Type (CT) field in Data Byte 1 */
HDMI_AUDIO_CODING_TYPE_EXT_CT,
 
/*
* The next three CXT values are defined in CEA-861-E only.
* They do not exist in older versions, and in CEA-861-F they are
* defined as 'Not in use'.
*/
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
 
/* The following CXT values are only defined in CEA-861-F. */
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC,
HDMI_AUDIO_CODING_TYPE_EXT_DRA,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
};
 
struct hdmi_audio_infoframe {
299,5 → 329,8
 
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
void hdmi_infoframe_log(const char *level, struct device *dev,
union hdmi_infoframe *frame);
 
#endif /* _DRM_HDMI_H */
/drivers/include/linux/i2c.h
26,11 → 26,8
#ifndef _LINUX_I2C_H
#define _LINUX_I2C_H
 
#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/i2c-id.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
#include <linux/jiffies.h>
61,8 → 58,6
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @suspend: Callback for device suspend
* @resume: Callback for device resume
* @alert: Alert callback, for example for the SMBus alert protocol
* @command: Callback for bus-wide signaling (optional)
* @driver: Device driver model driver
105,8 → 100,6
 
/* driver model interfaces that don't relate to enumeration */
void (*shutdown)(struct i2c_client *);
// int (*suspend)(struct i2c_client *, pm_message_t mesg);
int (*resume)(struct i2c_client *);
 
/* Alert callback, for example for the SMBus alert protocol.
* The format and meaning of the data value depends on the protocol.
166,10 → 159,10
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
 
enum i2c_slave_event {
I2C_SLAVE_REQ_READ_START,
I2C_SLAVE_REQ_READ_END,
I2C_SLAVE_REQ_WRITE_START,
I2C_SLAVE_REQ_WRITE_END,
I2C_SLAVE_READ_REQUESTED,
I2C_SLAVE_WRITE_REQUESTED,
I2C_SLAVE_READ_PROCESSED,
I2C_SLAVE_WRITE_RECEIVED,
I2C_SLAVE_STOP,
};
/**
180,7 → 173,7
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @acpi_node: ACPI device node
* @fwnode: device node supplied by the platform firmware
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
306,8 → 299,8
((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
 
 
#endif /* __KERNEL__ */
 
 
/**
* struct i2c_msg - an I2C transaction segment beginning with START
* @addr: Slave address, either seven or ten bits. When this is a ten
/drivers/include/linux/ioport.h
100,6 → 100,7
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
#define IORESOURCE_IO_FIXED (1<<1)
#define IORESOURCE_IO_SPARSE (1<<2)
 
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
110,10 → 111,63
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
 
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
{ \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
}
 
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
#define DEFINE_RES_IO(_start, _size) \
DEFINE_RES_IO_NAMED((_start), (_size), NULL)
 
#define DEFINE_RES_MEM_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM)
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
 
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
DEFINE_RES_IRQ_NAMED((_irq), NULL)
 
#define DEFINE_RES_DMA_NAMED(_dma, _name) \
DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA)
#define DEFINE_RES_DMA(_dma) \
DEFINE_RES_DMA_NAMED((_dma), NULL)
 
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
 
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
void release_child_resources(struct resource *new);
extern void reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name);
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
void *alignf_data);
struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
/drivers/include/linux/irqflags.h
85,7 → 85,7
* The local_irq_*() APIs are equal to the raw_local_irq*()
* if !TRACE_IRQFLAGS.
*/
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#ifdef CONFIG_TRACE_IRQFLAGS
#define local_irq_enable() \
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
#define local_irq_disable() \
107,23 → 107,7
raw_local_irq_restore(flags); \
} \
} while (0)
#define local_save_flags(flags) \
do { \
raw_local_save_flags(flags); \
} while (0)
 
#define irqs_disabled_flags(flags) \
({ \
raw_irqs_disabled_flags(flags); \
})
 
#define irqs_disabled() \
({ \
unsigned long _flags; \
raw_local_save_flags(_flags); \
raw_irqs_disabled_flags(_flags); \
})
 
#define safe_halt() \
do { \
trace_hardirqs_on(); \
131,7 → 115,7
} while (0)
 
 
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
#else /* !CONFIG_TRACE_IRQFLAGS */
 
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
140,11 → 124,28
raw_local_irq_save(flags); \
} while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0)
#define irqs_disabled() (raw_irqs_disabled())
#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags))
#define safe_halt() do { raw_safe_halt(); } while (0)
 
#endif /* CONFIG_TRACE_IRQFLAGS */
 
#define local_save_flags(flags) raw_local_save_flags(flags)
 
/*
* Some architectures don't define arch_irqs_disabled(), so even if either
* definition would be fine we need to use different ones for the time being
* to avoid build issues.
*/
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#define irqs_disabled() \
({ \
unsigned long _flags; \
raw_local_save_flags(_flags); \
raw_irqs_disabled_flags(_flags); \
})
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
#define irqs_disabled() raw_irqs_disabled()
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
 
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
 
#endif
/drivers/include/linux/irqreturn.h
3,7 → 3,7
 
/**
* enum irqreturn
* @IRQ_NONE interrupt was not from this device
* @IRQ_NONE interrupt was not from this device or was not handled
* @IRQ_HANDLED interrupt was handled by this device
* @IRQ_WAKE_THREAD handler requests to wake the handler thread
*/
/drivers/include/linux/jiffies.h
292,11 → 292,145
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
 
extern unsigned long msecs_to_jiffies(const unsigned int m);
extern unsigned long usecs_to_jiffies(const unsigned int u);
extern unsigned long timespec_to_jiffies(const struct timespec *value);
extern void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value);
extern unsigned long __msecs_to_jiffies(const unsigned int m);
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
* HZ is equal to or smaller than 1000, and 1000 is a nice round
* multiple of HZ, divide with the factor between them, but round
* upwards:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
}
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
/*
* HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
* simply multiply with the factor between them.
*
* But first make sure the multiplication result cannot overflow:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return m * (HZ / MSEC_PER_SEC);
}
#else
/*
* Generic case - multiply, round and divide. But first check that if
* we are doing a net multiplication, that we wouldn't overflow:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
}
#endif
/**
* msecs_to_jiffies: - convert milliseconds to jiffies
* @m: time in milliseconds
*
* conversion is done as follows:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
* for the details see __msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
* code, __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
* the HZ range specific helpers _msecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
static inline unsigned long msecs_to_jiffies(const unsigned int m)
{
if (__builtin_constant_p(m)) {
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
return _msecs_to_jiffies(m);
} else {
return __msecs_to_jiffies(m);
}
}
 
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
}
#else
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
>> USEC_TO_HZ_SHR32;
}
#endif
 
/**
* usecs_to_jiffies: - convert microseconds to jiffies
* @u: time in microseconds
*
* conversion is done as follows:
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows as for msecs_to_jiffies.
*
* usecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
* code, __usecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
* the HZ range specific helpers _usecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
if (__builtin_constant_p(u)) {
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return _usecs_to_jiffies(u);
} else {
return __usecs_to_jiffies(u);
}
}
 
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
static inline unsigned long timespec_to_jiffies(const struct timespec *value)
{
struct timespec64 ts = timespec_to_timespec64(*value);
 
return timespec64_to_jiffies(&ts);
}
 
static inline void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value)
{
struct timespec64 ts;
 
jiffies_to_timespec64(jiffies, &ts);
*value = timespec64_to_timespec(ts);
}
 
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
/drivers/include/linux/kernel.h
102,6 → 102,18
(((__x) - ((__d) / 2)) / (__d)); \
} \
)
/*
* Same as above but for u64 dividends. divisor must be a 32-bit
* number.
*/
#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
{ \
typeof(divisor) __d = divisor; \
unsigned long long _tmp = (x) + (__d) / 2; \
do_div(_tmp, __d); \
_tmp; \
} \
)
 
/*
* Multiplies an integer by a fraction, while avoiding unnecessary
149,14 → 161,52
*/
#define lower_32_bits(n) ((u32)(n))
 
#ifdef CONFIG_PREEMPT_VOLUNTARY
extern int _cond_resched(void);
# define might_resched() _cond_resched()
#else
# define might_resched() do { } while (0)
#endif
 
/*
* abs() handles unsigned and signed longs, ints, shorts and chars. For all
* input types abs() returns a signed long.
* abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
* for those.
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
void ___might_sleep(const char *file, int line, int preempt_offset);
void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
* context (spinlock, irq-handler, ...).
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
#define abs(x) ({ \
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
# define sched_annotate_sleep() do { } while (0)
#endif
 
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
 
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first
* (s64, long or int depending on its size).
*
* Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
* otherwise it is signed long.
*/
#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
}), ({ \
long ret; \
if (sizeof(x) == sizeof(long)) { \
long __x = (x); \
166,13 → 216,35
ret = (__x < 0) ? -__x : __x; \
} \
ret; \
})
}))
 
#define abs64(x) ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
})
/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
* @val: value
* @ep_ro: right open interval endpoint
*
* Perform a "reciprocal multiplication" in order to "scale" a value into
* range [0, ep_ro), where the upper interval endpoint is right-open.
* This is useful, e.g. for accessing a index of an array containing
* ep_ro elements, for example. Think of it as sort of modulus, only that
* the result isn't that of modulo. ;) Note that if initial input is a
* small value, then result will return 0.
*
* Return: a result based on val in interval [0, ep_ro).
*/
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
return (u32)(((u64) val * ep_ro) >> 32);
}
 
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
#define might_fault() __might_fault(__FILE__, __LINE__)
void __might_fault(const char *file, int line);
#else
static inline void might_fault(void) { }
#endif
 
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
181,6 → 253,15
#define KERN_NOTICE "<5>" /* normal but significant condition */
#define KERN_INFO "<6>" /* informational */
#define KERN_DEBUG "<7>" /* debug-level messages */
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
 
extern int num_to_str(char *buf, int size, unsigned long long num);
 
/* lib/printf utilities */
 
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
extern __printf(3, 4)
193,7 → 274,16
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __printf(2, 0)
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __printf(2, 0)
const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
 
extern __scanf(2, 3)
int sscanf(const char *, const char *, ...);
extern __scanf(2, 0)
int vsscanf(const char *, const char *, va_list);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
enum lockdep_ok {
LOCKDEP_STILL_OK,
LOCKDEP_NOW_UNRELIABLE
229,6 → 319,7
#define TAINT_OOT_MODULE 12
#define TAINT_UNSIGNED_MODULE 13
#define TAINT_SOFTLOCKUP 14
#define TAINT_LIVEPATCH 15
 
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
277,12 → 368,6
*
* Most likely, you want to use tracing_on/tracing_off.
*/
#ifdef CONFIG_RING_BUFFER
/* trace_off_permanent stops recording with no way to bring it back */
void tracing_off_permanent(void);
#else
static inline void tracing_off_permanent(void) { }
#endif
 
enum ftrace_dump_mode {
DUMP_NONE,
426,10 → 511,10
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \
} while (0)
 
extern int
extern __printf(2, 0) int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
 
extern int
extern __printf(2, 0) int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
 
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
449,7 → 534,7
{
return 0;
}
static inline int
static __printf(1, 0) inline int
ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
566,10 → 651,12
#define VERIFY_OCTAL_PERMISSIONS(perms) \
(BUILD_BUG_ON_ZERO((perms) < 0) + \
BUILD_BUG_ON_ZERO((perms) > 0777) + \
/* User perms >= group perms >= other perms */ \
BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
/* Other writable? Generally considered a bad idea. */ \
/* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
/* USER_WRITABLE >= GROUP_WRITABLE */ \
BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
/* OTHER_WRITABLE? Generally considered a bad idea. */ \
BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
 
591,23 → 678,7
struct vm_area_struct {};
struct address_space {};
 
struct device
{
struct device *parent;
void *driver_data;
};
 
static inline void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}
 
static inline void *dev_get_drvdata(struct device *dev)
{
return dev->driver_data;
}
 
 
#define in_dbg_master() (0)
 
#define HZ 100
728,10 → 799,7
#define IS_ENABLED(a) 0
 
 
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
 
 
#define cpufreq_quick_get_max(x) GetCpuFreq()
 
extern unsigned int tsc_khz;
771,8 → 839,6
return 0;
}
 
struct seq_file;
 
void *kmap(struct page *page);
void *kmap_atomic(struct page *page);
void kunmap(struct page *page);
787,5 → 853,4
 
#define CONFIG_PAGE_OFFSET 0
 
 
#endif
/drivers/include/linux/kobject.h
80,7 → 80,8
 
extern __printf(2, 3)
int kobject_set_name(struct kobject *kobj, const char *name, ...);
extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
extern __printf(2, 0)
int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs);
 
static inline const char *kobject_name(const struct kobject *kobj)
121,6 → 122,7
};
 
struct kobj_uevent_env {
char *argv[3];
char *envp[UEVENT_NUM_ENVP];
int envp_idx;
char buf[UEVENT_BUFFER_SIZE];
/drivers/include/linux/kref.h
25,10 → 25,147
atomic_t refcount;
};
 
void kref_init(struct kref *kref);
void kref_get(struct kref *kref);
int kref_put(struct kref *kref, void (*release) (struct kref *kref));
int kref_sub(struct kref *kref, unsigned int count,
void (*release) (struct kref *kref));
/**
* kref_init - initialize object.
* @kref: object in question.
*/
static inline void kref_init(struct kref *kref)
{
atomic_set(&kref->refcount, 1);
}
 
/**
* kref_get - increment refcount for object.
* @kref: object.
*/
static inline void kref_get(struct kref *kref)
{
/* If refcount was 0 before incrementing then we have a race
* condition when this kref is freeing by some other thread right now.
* In this case one should use kref_get_unless_zero()
*/
WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
}
 
/**
* kref_sub - subtract a number of refcounts for object.
* @kref: object.
* @count: Number of recounts to subtract.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function. If the caller does pass kfree to this
* function, you will be publicly mocked mercilessly by the kref
* maintainer, and anyone else who happens to notice it. You have
* been warned.
*
* Subtract @count from the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
* function returns 0, you still can not count on the kref from remaining in
* memory. Only use the return value if you want to see if the kref is now
* gone, not present.
*/
static inline int kref_sub(struct kref *kref, unsigned int count,
void (*release)(struct kref *kref))
{
WARN_ON(release == NULL);
 
if (atomic_sub_and_test((int) count, &kref->refcount)) {
release(kref);
return 1;
}
return 0;
}
 
/**
* kref_put - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function. If the caller does pass kfree to this
* function, you will be publicly mocked mercilessly by the kref
* maintainer, and anyone else who happens to notice it. You have
* been warned.
*
* Decrement the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
* function returns 0, you still can not count on the kref from remaining in
* memory. Only use the return value if you want to see if the kref is now
* gone, not present.
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
return kref_sub(kref, 1, release);
}
 
/**
* kref_put_spinlock_irqsave - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function.
* @lock: lock to take in release case
*
* Behaves identical to kref_put with one exception. If the reference count
* drops to zero, the lock will be taken atomically wrt dropping the reference
* count. The release function has to call spin_unlock() without _irqrestore.
*/
static inline int kref_put_spinlock_irqsave(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
unsigned long flags;
 
WARN_ON(release == NULL);
if (atomic_add_unless(&kref->refcount, -1, 1))
return 0;
spin_lock_irqsave(lock, flags);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
local_irq_restore(flags);
return 1;
}
spin_unlock_irqrestore(lock, flags);
return 0;
}
 
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
WARN_ON(release == NULL);
if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
mutex_lock(lock);
if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
mutex_unlock(lock);
return 0;
}
release(kref);
return 1;
}
return 0;
}
 
/**
* kref_get_unless_zero - Increment refcount for object unless it is zero.
* @kref: object.
*
* Return non-zero if the increment succeeded. Otherwise return 0.
*
* This function is intended to simplify locking around refcounting for
* objects that can be looked up from a lookup structure, and which are
* removed from that lookup structure in the object destructor.
* Operations on such objects require at least a read lock around
* lookup + kref_get, and a write lock around kref_put + remove from lookup
* structure. Furthermore, RCU implementations become extremely tricky.
* With a lookup followed by a kref_get_unless_zero *with return value check*
* locking in the kref_put path can be deferred to the actual removal from
* the lookup structure and RCU lookups become trivial.
*/
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
return atomic_add_unless(&kref->refcount, 1, 0);
}
#endif /* _KREF_H_ */
/drivers/include/linux/ktime.h
0,0 → 1,304
/*
* include/linux/ktime.h
*
* ktime_t - nanosecond-resolution time format.
*
* Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
*
* data type definitions, declarations, prototypes and macros.
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* Credits:
*
* Roman Zippel provided the ideas and primary code snippets of
* the ktime_t union and further simplifications of the original
* code.
*
* For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_KTIME_H
#define _LINUX_KTIME_H
 
#include <linux/time.h>
#include <linux/jiffies.h>
 
/*
* ktime_t:
*
* A single 64-bit variable is used to store the hrtimers
* internal representation of time values in scalar nanoseconds. The
* design plays out best on 64-bit CPUs, where most conversions are
* NOPs and most arithmetic ktime_t operations are plain arithmetic
* operations.
*
*/
union ktime {
s64 tv64;
};
 
typedef union ktime ktime_t; /* Kill this */
 
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
* @secs: seconds to set
* @nsecs: nanoseconds to set
*
* Return: The ktime_t representation of the value.
*/
static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
{
if (unlikely(secs >= KTIME_SEC_MAX))
return (ktime_t){ .tv64 = KTIME_MAX };
 
return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
}
 
/* Subtract two ktime_t variables. rem = lhs -rhs: */
#define ktime_sub(lhs, rhs) \
({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
 
/* Add two ktime_t variables. res = lhs + rhs: */
#define ktime_add(lhs, rhs) \
({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
 
/*
* Add a ktime_t variable and a scalar nanosecond value.
* res = kt + nsval:
*/
#define ktime_add_ns(kt, nsval) \
({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
 
/*
* Subtract a scalar nanosecod from a ktime_t variable
* res = kt - nsval:
*/
#define ktime_sub_ns(kt, nsval) \
({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
 
/* convert a timespec to ktime_t format: */
static inline ktime_t timespec_to_ktime(struct timespec ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
 
/* convert a timespec64 to ktime_t format: */
static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
 
/* convert a timeval to ktime_t format: */
static inline ktime_t timeval_to_ktime(struct timeval tv)
{
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
}
 
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
 
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64)
 
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
 
/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
#define ktime_to_ns(kt) ((kt).tv64)
 
 
/**
* ktime_equal - Compares two ktime_t variables to see if they are equal
* @cmp1: comparable1
* @cmp2: comparable2
*
* Compare two ktime_t variables.
*
* Return: 1 if equal.
*/
static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
{
return cmp1.tv64 == cmp2.tv64;
}
 
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
* @cmp1: comparable1
* @cmp2: comparable2
*
* Return: ...
* cmp1 < cmp2: return <0
* cmp1 == cmp2: return 0
* cmp1 > cmp2: return >0
*/
static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
{
if (cmp1.tv64 < cmp2.tv64)
return -1;
if (cmp1.tv64 > cmp2.tv64)
return 1;
return 0;
}
 
/**
* ktime_after - Compare if a ktime_t value is bigger than another one.
* @cmp1: comparable1
* @cmp2: comparable2
*
* Return: true if cmp1 happened after cmp2.
*/
static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
{
return ktime_compare(cmp1, cmp2) > 0;
}
 
/**
* ktime_before - Compare if a ktime_t value is smaller than another one.
* @cmp1: comparable1
* @cmp2: comparable2
*
* Return: true if cmp1 happened before cmp2.
*/
static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
{
return ktime_compare(cmp1, cmp2) < 0;
}
 
#if BITS_PER_LONG < 64
extern s64 __ktime_divns(const ktime_t kt, s64 div);
static inline s64 ktime_divns(const ktime_t kt, s64 div)
{
/*
* Negative divisors could cause an inf loop,
* so bug out here.
*/
BUG_ON(div < 0);
if (__builtin_constant_p(div) && !(div >> 32)) {
s64 ns = kt.tv64;
u64 tmp = ns < 0 ? -ns : ns;
 
do_div(tmp, div);
return ns < 0 ? -tmp : tmp;
} else {
return __ktime_divns(kt, div);
}
}
#else /* BITS_PER_LONG < 64 */
static inline s64 ktime_divns(const ktime_t kt, s64 div)
{
/*
* 32-bit implementation cannot handle negative divisors,
* so catch them on 64bit as well.
*/
WARN_ON(div < 0);
return kt.tv64 / div;
}
#endif
 
static inline s64 ktime_to_us(const ktime_t kt)
{
return ktime_divns(kt, NSEC_PER_USEC);
}
 
static inline s64 ktime_to_ms(const ktime_t kt)
{
return ktime_divns(kt, NSEC_PER_MSEC);
}
 
static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_us(ktime_sub(later, earlier));
}
 
static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_ms(ktime_sub(later, earlier));
}
 
static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
{
return ktime_add_ns(kt, usec * NSEC_PER_USEC);
}
 
static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
{
return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
}
 
static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
{
return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
}
 
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
 
/**
* ktime_to_timespec_cond - convert a ktime_t variable to timespec
* format only if the variable contains data
* @kt: the ktime_t variable to convert
* @ts: the timespec variable to store the result in
*
* Return: %true if there was a successful conversion, %false if kt was 0.
*/
static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
struct timespec *ts)
{
if (kt.tv64) {
*ts = ktime_to_timespec(kt);
return true;
} else {
return false;
}
}
 
/**
* ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
* format only if the variable contains data
* @kt: the ktime_t variable to convert
* @ts: the timespec variable to store the result in
*
* Return: %true if there was a successful conversion, %false if kt was 0.
*/
static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
struct timespec64 *ts)
{
if (kt.tv64) {
*ts = ktime_to_timespec64(kt);
return true;
} else {
return false;
}
}
 
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
* idea of the (in)accuracy of timers. Timer values are rounded up to
* this resolution values.
*/
#define LOW_RES_NSEC TICK_NSEC
#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
 
ktime_t ktime_get(void);
 
static inline ktime_t ns_to_ktime(u64 ns)
{
static const ktime_t ktime_zero = { .tv64 = 0 };
 
return ktime_add_ns(ktime_zero, ns);
}
 
static inline ktime_t ms_to_ktime(u64 ms)
{
static const ktime_t ktime_zero = { .tv64 = 0 };
 
return ktime_add_ms(ktime_zero, ms);
}
 
static inline u64 ktime_get_raw_ns(void)
{
return ktime_get().tv64;
}
 
#endif
/drivers/include/linux/list.h
672,6 → 672,11
n->pprev = &n->next;
}
 
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
 
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
/drivers/include/linux/lockdep.h
130,8 → 130,8
};
 
struct lock_class_stats {
unsigned long contention_point[4];
unsigned long contending_point[4];
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
255,6 → 255,7
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
unsigned int pin_count;
};
 
/*
354,6 → 355,9
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
 
extern void lock_pin_lock(struct lockdep_map *lock);
extern void lock_unpin_lock(struct lockdep_map *lock);
 
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
 
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
368,6 → 372,9
 
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
 
#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
 
#else /* !CONFIG_LOCKDEP */
 
static inline void lockdep_off(void)
420,6 → 427,9
 
#define lockdep_recursing(tsk) (0)
 
#define lockdep_pin_lock(l) do { (void)(l); } while (0)
#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
 
#endif /* !LOCKDEP */
 
#ifdef CONFIG_LOCK_STAT
531,8 → 541,13
# define might_lock_read(lock) do { } while (0)
#endif
 
#ifdef CONFIG_PROVE_RCU
#ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
#else
static inline void
lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
}
#endif
 
#endif /* __LINUX_LOCKDEP_H */
/drivers/include/linux/math64.h
142,6 → 142,13
}
#endif /* mul_u64_u32_shr */
 
#ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u64_shr */
 
#else
 
#ifndef mul_u64_u32_shr
161,6 → 168,79
}
#endif /* mul_u64_u32_shr */
 
#ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
{
union {
u64 ll;
struct {
#ifdef __BIG_ENDIAN
u32 high, low;
#else
u32 low, high;
#endif
} l;
} rl, rm, rn, rh, a0, b0;
u64 c;
 
a0.ll = a;
b0.ll = b;
 
rl.ll = (u64)a0.l.low * b0.l.low;
rm.ll = (u64)a0.l.low * b0.l.high;
rn.ll = (u64)a0.l.high * b0.l.low;
rh.ll = (u64)a0.l.high * b0.l.high;
 
/*
* Each of these lines computes a 64-bit intermediate result into "c",
* starting at bits 32-95. The low 32-bits go into the result of the
* multiplication, the high 32-bits are carried into the next step.
*/
rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
rh.l.high = (c >> 32) + rh.l.high;
 
/*
* The 128-bit result of the multiplication is in rl.ll and rh.ll,
* shift it right and throw away the high part of the result.
*/
if (shift == 0)
return rl.ll;
if (shift < 64)
return (rl.ll >> shift) | (rh.ll << (64 - shift));
return rh.ll >> (shift & 63);
}
#endif /* mul_u64_u64_shr */
 
#endif
 
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
union {
u64 ll;
struct {
#ifdef __BIG_ENDIAN
u32 high, low;
#else
u32 low, high;
#endif
} l;
} u, rl, rh;
 
u.ll = a;
rl.ll = (u64)u.l.low * mul;
rh.ll = (u64)u.l.high * mul + rl.l.high;
 
/* Bits 32-63 of the result will be in rh.l.low. */
rl.l.high = do_div(rh.ll, divisor);
 
/* Bits 0-31 of the result will be in rl.l.low. */
do_div(rl.ll, divisor);
 
rl.l.high = rh.l.low;
return rl.ll;
}
#endif /* mul_u64_u32_div */
 
#endif /* _LINUX_MATH64_H */
/drivers/include/linux/mod_devicetable.h
53,9 → 53,9
 
/**
* struct usb_device_id - identifies USB devices for probing and hotplugging
* @match_flags: Bit mask controlling of the other fields are used to match
* against new devices. Any field except for driver_info may be used,
* although some only make sense in conjunction with other fields.
* @match_flags: Bit mask controlling which of the other fields are used to
* match against new devices. Any field except for driver_info may be
* used, although some only make sense in conjunction with other fields.
* This is usually set by a USB_DEVICE_*() macro, which sets all
* other fields in this structure except for driver_info.
* @idVendor: USB vendor ID for a device; numbers are assigned
189,6 → 189,8
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
kernel_ulong_t driver_data;
__u32 cls;
__u32 cls_msk;
};
 
#define PNP_ID_LEN 8
217,11 → 219,18
__u8 proto;
};
 
struct hda_device_id {
__u32 vendor_id;
__u32 rev_id;
__u8 api_version;
const char *name;
unsigned long driver_data;
};
 
/*
* Struct used for matching a device
*/
struct of_device_id
{
struct of_device_id {
char name[32];
char type[32];
char compatible[128];
365,8 → 374,6
} __attribute__((packed, aligned(2)));
#define SSB_DEVICE(_vendor, _coreid, _revision) \
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
#define SSB_DEVTABLE_END \
{ 0, },
 
#define SSB_ANY_VENDOR 0xFFFF
#define SSB_ANY_ID 0xFFFF
381,8 → 388,6
} __attribute__((packed,aligned(2)));
#define BCMA_CORE(_manuf, _id, _rev, _class) \
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
#define BCMA_CORETABLE_END \
{ 0, },
 
#define BCMA_ANY_MANUF 0xFFFF
#define BCMA_ANY_ID 0xFFFF
551,6 → 556,14
void *data;
};
 
/**
* struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus
* @type: Device type identifier.
*/
struct mips_cdmm_device_id {
__u8 type;
};
 
/*
* Match x86 CPUs for CPU specific drivers.
* See documentation of "x86_match_cpu" for details.
596,9 → 609,21
 
#define MEI_CL_MODULE_PREFIX "mei:"
#define MEI_CL_NAME_SIZE 32
#define MEI_CL_VERSION_ANY 0xff
 
/**
* struct mei_cl_device_id - MEI client device identifier
* @name: helper name
* @uuid: client uuid
* @version: client protocol version
* @driver_info: information used by the driver.
*
* identifies mei client device by uuid and name
*/
struct mei_cl_device_id {
char name[MEI_CL_NAME_SIZE];
uuid_le uuid;
__u8 version;
kernel_ulong_t driver_info;
};
 
626,4 → 651,10
kernel_ulong_t driver_data;
};
 
struct ulpi_device_id {
__u16 vendor;
__u16 product;
kernel_ulong_t driver_data;
};
 
#endif /* LINUX_MOD_DEVICETABLE_H */
/drivers/include/linux/module.h
9,9 → 9,8
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/compiler.h>
 
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/printk.h>
/drivers/include/linux/moduleparam.h
2,9 → 2,69
#define _LINUX_MODULE_PARAMS_H
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
#include <linux/kernel.h>
/**
* module_param - typesafe helper for a module/cmdline parameter
* @value: the variable to alter, and exposed parameter name.
* @type: the type of the parameter
* @perm: visibility in sysfs.
*
* @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
* ".") the kernel commandline parameter. Note that - is changed to _, so
* the user can use "foo-bar=1" even for variable "foo_bar".
*
* @perm is 0 if the the variable is not to appear in sysfs, or 0444
* for world-readable, 0644 for root-writable, etc. Note that if it
* is writable, you may need to use kernel_param_lock() around
* accesses (esp. charp, which can be kfreed when it changes).
*
* The @type is simply pasted to refer to a param_ops_##type and a
* param_check_##type: for convenience many standard types are provided but
* you can create your own by defining those variables.
*
* Standard types are:
* byte, short, ushort, int, uint, long, ulong
* charp: a character pointer
* bool: a bool, values 0/1, y/n, Y/N.
* invbool: the above, only sense-reversed (N = true).
*/
#define module_param(name, type, perm) \
module_param_named(name, name, type, perm)
 
#define MODULE_PARM_DESC(_parm, desc)
/**
* module_param_unsafe - same as module_param but taints kernel
*/
#define module_param_unsafe(name, type, perm) \
module_param_named_unsafe(name, name, type, perm)
 
/**
* module_param_named - typesafe helper for a renamed module/cmdline parameter
* @name: a valid C identifier which is the parameter name.
* @value: the actual lvalue to alter.
* @type: the type of the parameter
* @perm: visibility in sysfs.
*
* Usually it's a good idea to have variable names and user-exposed names the
* same, but that's harder if the variable must be non-static or is inside a
* structure. This allows exposure under a different name.
*/
#define module_param_named(name, value, type, perm)
/**
* module_param_named_unsafe - same as module_param_named but taints kernel
*/
 
#define module_param_named_unsafe(name, value, type, perm)
 
#define MODULE_PARM_DESC(_parm, desc)
 
#ifdef CONFIG_SYSFS
extern void kernel_param_lock(struct module *mod);
extern void kernel_param_unlock(struct module *mod);
#else
static inline void kernel_param_lock(struct module *mod)
{
}
static inline void kernel_param_unlock(struct module *mod)
{
}
#endif
#endif
/drivers/include/linux/pci.h
25,7 → 25,17
#include <linux/pci_regs.h> /* The pci register defines */
#include <linux/ioport.h>
 
#include <linux/device.h>
 
#if 0
struct device
{
struct device *parent;
void *driver_data;
};
 
#endif
 
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
 
204,6 → 214,10
*
* 7:3 = slot
* 2:0 = function
*
* PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
* In the interest of not exposing interfaces to user-space unnecessarily,
* the following kernel-only defines are being added here.
*/
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
427,11 → 441,12
D3cold, not set for devices
powered on/off by the
corresponding bridge */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
 
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
struct pcie_link_state *link_state; /* ASPM link state */
#endif
 
pci_channel_state_t error_state; /* current connectivity state */
446,13 → 461,15
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
 
bool match_driver; /* Skip attaching driver */
/* These fields are used by common fixups */
unsigned int transparent:1; /* Transparent PCI bridge */
unsigned int transparent:1; /* Subtractive decode PCI bridge */
unsigned int multifunction:1;/* Part of multi-function device */
/* keep track of device state */
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
460,8 → 477,6
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
unsigned int is_pcie:1; /* Obsolete. Will be removed.
Use pci_is_pcie() instead */
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
/drivers/include/linux/percpu-defs.h
488,10 → 488,8
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
 
/*
* Operations with implied preemption protection. These operations can be
* used without worrying about preemption. Note that interrupts may still
* occur while an operation is in progress and if the interrupt modifies
* the variable too then RMW actions may not be reliable.
* Operations with implied preemption/interrupt protection. These
* operations can be used without worrying about preemption or interrupt.
*/
#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
/drivers/include/linux/personality.h
44,11 → 44,9
*/
#define personality(pers) (pers & PER_MASK)
 
 
/*
* Change personality of the currently running process.
*/
#define set_personality(pers) \
((current->personality == (pers)) ? 0 : __set_personality(pers))
#define set_personality(pers) (current->personality = (pers))
 
#endif /* _LINUX_PERSONALITY_H */
/drivers/include/linux/pm.h
0,0 → 1,788
/*
* pm.h - Power management interface
*
* Copyright (C) 2000 Andrew Henroid
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
 
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
 
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
//#include <linux/timer.h>
#include <linux/completion.h>
 
/*
* Callbacks for platform drivers to implement.
*/
extern void (*pm_power_off)(void);
extern void (*pm_power_off_prepare)(void);
 
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
extern void pm_vt_switch_required(struct device *dev, bool required);
extern void pm_vt_switch_unregister(struct device *dev);
#else
static inline void pm_vt_switch_required(struct device *dev, bool required)
{
}
static inline void pm_vt_switch_unregister(struct device *dev)
{
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
 
/*
* Device power management
*/
 
struct device;
 
#ifdef CONFIG_PM
extern const char power_group_name[]; /* = "power" */
#else
#define power_group_name NULL
#endif
 
typedef struct pm_message {
int event;
} pm_message_t;
 
/**
* struct dev_pm_ops - device PM callbacks
*
* Several device power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
* internal transitions to various low-power modes which are transparent
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
* The externally visible transitions are handled with the help of callbacks
* included in this structure in such a way that two levels of callbacks are
* involved. First, the PM core executes callbacks provided by PM domains,
* device types, classes and bus types. They are the subsystem-level callbacks
* supposed to execute callbacks provided by device drivers, although they may
* choose not to do that. If the driver callbacks are executed, they have to
* collaborate with the subsystem-level callbacks to achieve the goals
* appropriate for the given system transition, given transition phase and the
* subsystem the device belongs to.
*
* @prepare: The principal role of this callback is to prevent new children of
* the device from being registered after it has returned (the driver's
* subsystem and generally the rest of the kernel is supposed to prevent
* new calls to the probe method from being made too once @prepare() has
* succeeded). If @prepare() detects a situation it cannot handle (e.g.
* registration of a child already in progress), it may return -EAGAIN, so
* that the PM core can execute it once again (e.g. after a new child has
* been registered) to recover from the race condition.
* This method is executed for all kinds of suspend transitions and is
* followed by one of the suspend callbacks: @suspend(), @freeze(), or
* @poweroff(). If the transition is a suspend to memory or standby (that
* is, not related to hibernation), the return value of @prepare() may be
* used to indicate to the PM core to leave the device in runtime suspend
* if applicable. Namely, if @prepare() returns a positive number, the PM
* core will understand that as a declaration that the device appears to be
* runtime-suspended and it may be left in that state during the entire
* transition and during the subsequent resume if all of its descendants
* are left in runtime suspend too. If that happens, @complete() will be
* executed directly after @prepare() and it must ensure the proper
* functioning of the device after the system resume.
* The PM core executes subsystem-level @prepare() for all devices before
* starting to invoke suspend callbacks for any of them, so generally
* devices may be assumed to be functional or to respond to runtime resume
* requests while @prepare() is being executed. However, device drivers
* may NOT assume anything about the availability of user space at that
* time and it is NOT valid to request firmware from within @prepare()
* (it's too late to do that). It also is NOT valid to allocate
* substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
* [To work around these limitations, drivers may register suspend and
* hibernation notifiers to be executed before the freezing of tasks.]
*
* @complete: Undo the changes made by @prepare(). This method is executed for
* all kinds of resume transitions, following one of the resume callbacks:
* @resume(), @thaw(), @restore(). Also called if the state transition
* fails before the driver's suspend callback: @suspend(), @freeze() or
* @poweroff(), can be executed (e.g. if the suspend callback fails for one
* of the other devices that the PM core has unsuccessfully attempted to
* suspend earlier).
* The PM core executes subsystem-level @complete() after it has executed
* the appropriate resume callbacks for all devices. If the corresponding
* @prepare() at the beginning of the suspend transition returned a
* positive number and the device was left in runtime suspend (without
* executing any suspend and resume callbacks for it), @complete() will be
* the only callback executed for the device during resume. In that case,
* @complete() must be prepared to do whatever is necessary to ensure the
* proper functioning of the device after the system resume. To this end,
* @complete() can check the power.direct_complete flag of the device to
* learn whether (unset) or not (set) the previous suspend and resume
* callbacks have been executed for it.
*
* @suspend: Executed before putting the system into a sleep state in which the
* contents of main memory are preserved. The exact action to perform
* depends on the device's subsystem (PM domain, device type, class or bus
* type), but generally the device must be quiescent after subsystem-level
* @suspend() has returned, so that it doesn't do any I/O or DMA.
* Subsystem-level @suspend() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @suspend_late: Continue operations started by @suspend(). For a number of
* devices @suspend_late() may point to the same callback routine as the
* runtime suspend callback.
*
* @resume: Executed after waking the system up from a sleep state in which the
* contents of main memory were preserved. The exact action to perform
* depends on the device's subsystem, but generally the driver is expected
* to start working again, responding to hardware events and software
* requests (the device itself may be left in a low-power state, waiting
* for a runtime resume to occur). The state of the device at the time its
* driver's @resume() callback is run depends on the platform and subsystem
* the device belongs to. On most platforms, there are no restrictions on
* availability of resources like clocks during @resume().
* Subsystem-level @resume() is executed for all devices after invoking
* subsystem-level @resume_noirq() for all of them.
*
* @resume_early: Prepare to execute @resume(). For a number of devices
* @resume_early() may point to the same callback routine as the runtime
* resume callback.
*
* @freeze: Hibernation-specific, executed before creating a hibernation image.
* Analogous to @suspend(), but it should not enable the device to signal
* wakeup events or change its power state. The majority of subsystems
* (with the notable exception of the PCI bus type) expect the driver-level
* @freeze() to save the device settings in memory to be used by @restore()
* during the subsequent resume from hibernation.
* Subsystem-level @freeze() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @freeze_late: Continue operations started by @freeze(). Analogous to
* @suspend_late(), but it should not enable the device to signal wakeup
* events or change its power state.
*
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
* if the creation of an image has failed. Also executed after a failing
* attempt to restore the contents of main memory from such an image.
* Undo the changes made by the preceding @freeze(), so the device can be
* operated in the same way as immediately before the call to @freeze().
* Subsystem-level @thaw() is executed for all devices after invoking
* subsystem-level @thaw_noirq() for all of them. It also may be executed
* directly after @freeze() in case of a transition error.
*
* @thaw_early: Prepare to execute @thaw(). Undo the changes made by the
* preceding @freeze_late().
*
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
* Analogous to @suspend(), but it need not save the device's settings in
* memory.
* Subsystem-level @poweroff() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @poweroff_late: Continue operations started by @poweroff(). Analogous to
* @suspend_late(), but it need not save the device's settings in memory.
*
* @restore: Hibernation-specific, executed after restoring the contents of main
* memory from a hibernation image, analogous to @resume().
*
* @restore_early: Prepare to execute @restore(), analogous to @resume_early().
*
* @suspend_noirq: Complete the actions started by @suspend(). Carry out any
* additional operations required for suspending the device that might be
* racing with its driver's interrupt handler, which is guaranteed not to
* run while @suspend_noirq() is being executed.
* It generally is expected that the device will be in a low-power state
* (appropriate for the target system sleep state) after subsystem-level
* @suspend_noirq() has returned successfully. If the device can generate
* system wakeup signals and is enabled to wake up the system, it should be
* configured to do so at that time. However, depending on the platform
* and device's subsystem, @suspend() or @suspend_late() may be allowed to
* put the device into the low-power state and configure it to generate
* wakeup signals, in which case it generally is not necessary to define
* @suspend_noirq().
*
* @resume_noirq: Prepare for the execution of @resume() by carrying out any
* operations required for resuming the device that might be racing with
* its driver's interrupt handler, which is guaranteed not to run while
* @resume_noirq() is being executed.
*
* @freeze_noirq: Complete the actions started by @freeze(). Carry out any
* additional operations required for freezing the device that might be
* racing with its driver's interrupt handler, which is guaranteed not to
* run while @freeze_noirq() is being executed.
* The power state of the device should not be changed by either @freeze(),
* or @freeze_late(), or @freeze_noirq() and it should not be configured to
* signal system wakeup by any of these callbacks.
*
* @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
* operations required for thawing the device that might be racing with its
* driver's interrupt handler, which is guaranteed not to run while
* @thaw_noirq() is being executed.
*
* @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
* @suspend_noirq(), but it need not save the device's settings in memory.
*
* @restore_noirq: Prepare for the execution of @restore() by carrying out any
* operations required for thawing the device that might be racing with its
* driver's interrupt handler, which is guaranteed not to run while
* @restore_noirq() is being executed. Analogous to @resume_noirq().
*
* All of the above callbacks, except for @complete(), return error codes.
* However, the error codes returned by the resume operations, @resume(),
* @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
* not cause the PM core to abort the resume transition during which they are
* returned. The error codes returned in those cases are only printed by the PM
* core to the system logs for debugging purposes. Still, it is recommended
* that drivers only return error codes from their resume methods in case of an
* unrecoverable failure (i.e. when the device being handled refuses to resume
* and becomes unusable) to allow us to modify the PM core in the future, so
* that it can avoid attempting to handle devices that failed to resume and
* their children.
*
* It is allowed to unregister devices while the above callbacks are being
* executed. However, a callback routine must NOT try to unregister the device
* it was called for, although it may unregister children of that device (for
* example, if it detects that a child was unplugged while the system was
* asleep).
*
* Refer to Documentation/power/devices.txt for more information about the role
* of the above callbacks in the system suspend process.
*
* There also are callbacks related to runtime power management of devices.
* Again, these callbacks are executed by the PM core only for subsystems
* (PM domains, device types, classes and bus types) and the subsystem-level
* callbacks are supposed to invoke the driver callbacks. Moreover, the exact
* actions to be performed by a device driver's callbacks generally depend on
* the platform and subsystem the device belongs to.
*
* @runtime_suspend: Prepare the device for a condition in which it won't be
* able to communicate with the CPU(s) and RAM due to power management.
* This need not mean that the device should be put into a low-power state.
* For example, if the device is behind a link which is about to be turned
* off, the device may remain at full power. If the device does go to low
* power and is capable of generating runtime wakeup events, remote wakeup
* (i.e., a hardware mechanism allowing the device to request a change of
* its power state via an interrupt) should be enabled for it.
*
* @runtime_resume: Put the device into the fully active state in response to a
* wakeup event generated by hardware or at the request of software. If
* necessary, put the device into the full-power state and restore its
* registers, so that it is fully operational.
*
* @runtime_idle: Device appears to be inactive and it might be put into a
* low-power state if all of the necessary conditions are satisfied.
* Check these conditions, and return 0 if it's appropriate to let the PM
* core queue a suspend request for the device.
*
* Refer to Documentation/power/runtime_pm.txt for more information about the
* role of the above callbacks in device runtime power management.
*
*/
 
struct dev_pm_ops {
int (*prepare)(struct device *dev);
void (*complete)(struct device *dev);
int (*suspend)(struct device *dev);
int (*resume)(struct device *dev);
int (*freeze)(struct device *dev);
int (*thaw)(struct device *dev);
int (*poweroff)(struct device *dev);
int (*restore)(struct device *dev);
int (*suspend_late)(struct device *dev);
int (*resume_early)(struct device *dev);
int (*freeze_late)(struct device *dev);
int (*thaw_early)(struct device *dev);
int (*poweroff_late)(struct device *dev);
int (*restore_early)(struct device *dev);
int (*suspend_noirq)(struct device *dev);
int (*resume_noirq)(struct device *dev);
int (*freeze_noirq)(struct device *dev);
int (*thaw_noirq)(struct device *dev);
int (*poweroff_noirq)(struct device *dev);
int (*restore_noirq)(struct device *dev);
int (*runtime_suspend)(struct device *dev);
int (*runtime_resume)(struct device *dev);
int (*runtime_idle)(struct device *dev);
};
 
#ifdef CONFIG_PM_SLEEP
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
.suspend = suspend_fn, \
.resume = resume_fn, \
.freeze = suspend_fn, \
.thaw = resume_fn, \
.poweroff = suspend_fn, \
.restore = resume_fn,
#else
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
 
#ifdef CONFIG_PM_SLEEP
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
.suspend_late = suspend_fn, \
.resume_early = resume_fn, \
.freeze_late = suspend_fn, \
.thaw_early = resume_fn, \
.poweroff_late = suspend_fn, \
.restore_early = resume_fn,
#else
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
 
#ifdef CONFIG_PM_SLEEP
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
.suspend_noirq = suspend_fn, \
.resume_noirq = resume_fn, \
.freeze_noirq = suspend_fn, \
.thaw_noirq = resume_fn, \
.poweroff_noirq = suspend_fn, \
.restore_noirq = resume_fn,
#else
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
 
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
.runtime_suspend = suspend_fn, \
.runtime_resume = resume_fn, \
.runtime_idle = idle_fn,
#else
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
 
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
const struct dev_pm_ops name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
 
/*
* Use this for defining a set of PM operations to be used in all situations
* (system suspend, hibernation or runtime PM).
* NOTE: In general, system suspend callbacks, .suspend() and .resume(), should
* be different from the corresponding runtime PM callbacks, .runtime_suspend(),
* and .runtime_resume(), because .runtime_suspend() always works on an already
* quiescent device, while .suspend() should assume that the device may be doing
* something when it is called (it should ensure that the device will be
* quiescent after it has returned). Therefore it's better to point the "late"
* suspend and "early" resume callback pointers, .suspend_late() and
* .resume_early(), to the same routines as .runtime_suspend() and
* .runtime_resume(), respectively (and analogously for hibernation).
*/
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
const struct dev_pm_ops name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
 
/**
* PM_EVENT_ messages
*
* The following PM_EVENT_ messages are defined for the internal use of the PM
* core, in order to provide a mechanism allowing the high level suspend and
* hibernation code to convey the necessary information to the device PM core
* code:
*
* ON No transition.
*
* FREEZE System is going to hibernate, call ->prepare() and ->freeze()
* for all devices.
*
* SUSPEND System is going to suspend, call ->prepare() and ->suspend()
* for all devices.
*
* HIBERNATE Hibernation image has been saved, call ->prepare() and
* ->poweroff() for all devices.
*
* QUIESCE Contents of main memory are going to be restored from a (loaded)
* hibernation image, call ->prepare() and ->freeze() for all
* devices.
*
* RESUME System is resuming, call ->resume() and ->complete() for all
* devices.
*
* THAW Hibernation image has been created, call ->thaw() and
* ->complete() for all devices.
*
* RESTORE Contents of main memory have been restored from a hibernation
* image, call ->restore() and ->complete() for all devices.
*
* RECOVER Creation of a hibernation image or restoration of the main
* memory contents from a hibernation image has failed, call
* ->thaw() and ->complete() for all devices.
*
* The following PM_EVENT_ messages are defined for internal use by
* kernel subsystems. They are never issued by the PM core.
*
* USER_SUSPEND Manual selective suspend was issued by userspace.
*
* USER_RESUME Manual selective resume was issued by userspace.
*
* REMOTE_WAKEUP Remote-wakeup request was received from the device.
*
* AUTO_SUSPEND Automatic (device idle) runtime suspend was
* initiated by the subsystem.
*
* AUTO_RESUME Automatic (device needed) runtime resume was
* requested by a driver.
*/
 
#define PM_EVENT_INVALID (-1)
#define PM_EVENT_ON 0x0000
#define PM_EVENT_FREEZE 0x0001
#define PM_EVENT_SUSPEND 0x0002
#define PM_EVENT_HIBERNATE 0x0004
#define PM_EVENT_QUIESCE 0x0008
#define PM_EVENT_RESUME 0x0010
#define PM_EVENT_THAW 0x0020
#define PM_EVENT_RESTORE 0x0040
#define PM_EVENT_RECOVER 0x0080
#define PM_EVENT_USER 0x0100
#define PM_EVENT_REMOTE 0x0200
#define PM_EVENT_AUTO 0x0400
 
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND)
#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME)
#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME)
#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME)
 
#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, })
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
#define PMSG_USER_SUSPEND ((struct pm_message) \
{ .event = PM_EVENT_USER_SUSPEND, })
#define PMSG_USER_RESUME ((struct pm_message) \
{ .event = PM_EVENT_USER_RESUME, })
#define PMSG_REMOTE_RESUME ((struct pm_message) \
{ .event = PM_EVENT_REMOTE_RESUME, })
#define PMSG_AUTO_SUSPEND ((struct pm_message) \
{ .event = PM_EVENT_AUTO_SUSPEND, })
#define PMSG_AUTO_RESUME ((struct pm_message) \
{ .event = PM_EVENT_AUTO_RESUME, })
 
#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
 
/**
* Device run-time power management status.
*
* These status labels are used internally by the PM core to indicate the
* current status of a device with respect to the PM core operations. They do
* not reflect the actual power state of the device or its status as seen by the
* driver.
*
* RPM_ACTIVE Device is fully operational. Indicates that the device
* bus type's ->runtime_resume() callback has completed
* successfully.
*
* RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has
* completed successfully. The device is regarded as
* suspended.
*
* RPM_RESUMING Device bus type's ->runtime_resume() callback is being
* executed.
*
* RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being
* executed.
*/
 
enum rpm_status {
RPM_ACTIVE = 0,
RPM_RESUMING,
RPM_SUSPENDED,
RPM_SUSPENDING,
};
 
/**
* Device run-time power management request types.
*
* RPM_REQ_NONE Do nothing.
*
* RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback
*
* RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback
*
* RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has
* been inactive for as long as power.autosuspend_delay
*
* RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback
*/
 
enum rpm_request {
RPM_REQ_NONE = 0,
RPM_REQ_IDLE,
RPM_REQ_SUSPEND,
RPM_REQ_AUTOSUSPEND,
RPM_REQ_RESUME,
};
 
struct wakeup_source;
struct wake_irq;
struct pm_domain_data;
 
struct pm_subsys_data {
spinlock_t lock;
unsigned int refcount;
#ifdef CONFIG_PM_CLK
struct list_head clock_list;
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS
struct pm_domain_data *domain_data;
#endif
};
 
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
bool is_late_suspended:1;
bool ignore_children:1;
bool early_init:1; /* Owned by the PM core */
bool direct_complete:1; /* Owned by the PM core */
spinlock_t lock;
#ifdef CONFIG_PM_SLEEP
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
bool wakeup_path:1;
bool syscore:1;
#else
unsigned int should_wakeup:1;
#endif
#ifdef CONFIG_PM
struct timer_list suspend_timer;
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
unsigned int idle_notification:1;
unsigned int request_pending:1;
unsigned int deferred_resume:1;
unsigned int run_wake:1;
unsigned int runtime_auto:1;
unsigned int no_callbacks:1;
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
int autosuspend_delay;
unsigned long last_busy;
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
struct dev_pm_qos *qos;
};
 
extern void update_pm_runtime_accounting(struct device *dev);
extern int dev_pm_get_subsys_data(struct device *dev);
extern void dev_pm_put_subsys_data(struct device *dev);
 
/*
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions along with
* subsystem-level and driver-level callbacks.
*
* @detach: Called when removing a device from the domain.
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
* @dismiss: Called after unsuccessful driver probe and after driver removal.
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
void (*detach)(struct device *dev, bool power_off);
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
};
 
/*
* The PM_EVENT_ messages are also used by drivers implementing the legacy
* suspend framework, based on the ->suspend() and ->resume() callbacks common
* for suspend and hibernation transitions, according to the rules below.
*/
 
/* Necessary, because several drivers use PM_EVENT_PRETHAW */
#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
 
/*
* One transition is triggered by resume(), after a suspend() call; the
* message is implicit:
*
* ON Driver starts working again, responding to hardware events
* and software requests. The hardware may have gone through
* a power-off reset, or it may have maintained state from the
* previous suspend() which the driver will rely on while
* resuming. On most platforms, there are no restrictions on
* availability of resources like clocks during resume().
*
* Other transitions are triggered by messages sent using suspend(). All
* these transitions quiesce the driver, so that I/O queues are inactive.
* That commonly entails turning off IRQs and DMA; there may be rules
* about how to quiesce that are specific to the bus or the device's type.
* (For example, network drivers mark the link state.) Other details may
* differ according to the message:
*
* SUSPEND Quiesce, enter a low power device state appropriate for
* the upcoming system state (such as PCI_D3hot), and enable
* wakeup events as appropriate.
*
* HIBERNATE Enter a low power device state appropriate for the hibernation
* state (eg. ACPI S4) and enable wakeup events as appropriate.
*
* FREEZE Quiesce operations so that a consistent image can be saved;
* but do NOT otherwise enter a low power device state, and do
* NOT emit system wakeup events.
*
* PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring
* the system from a snapshot taken after an earlier FREEZE.
* Some drivers will need to reset their hardware state instead
* of preserving it, to ensure that it's never mistaken for the
* state which that earlier snapshot had set up.
*
* A minimally power-aware driver treats all messages as SUSPEND, fully
* reinitializes its device during resume() -- whether or not it was reset
* during the suspend/resume cycle -- and can't issue wakeup events.
*
* More power-aware drivers may also use low power states at runtime as
* well as during system sleep states like PM_SUSPEND_STANDBY. They may
* be able to use wakeup events to exit from runtime low-power states,
* or from system low-power states such as standby or suspend-to-RAM.
*/
 
#ifdef CONFIG_PM_SLEEP
extern void device_pm_lock(void);
extern void dpm_resume_start(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
extern void dpm_resume_noirq(pm_message_t state);
extern void dpm_resume_early(pm_message_t state);
extern void dpm_resume(pm_message_t state);
extern void dpm_complete(pm_message_t state);
 
extern void device_pm_unlock(void);
extern int dpm_suspend_end(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
extern int dpm_suspend_noirq(pm_message_t state);
extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
 
extern void __suspend_report_result(const char *function, void *fn, int ret);
 
#define suspend_report_result(fn, ret) \
do { \
__suspend_report_result(__func__, fn, ret); \
} while (0)
 
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *));
 
extern int pm_generic_prepare(struct device *dev);
extern int pm_generic_suspend_late(struct device *dev);
extern int pm_generic_suspend_noirq(struct device *dev);
extern int pm_generic_suspend(struct device *dev);
extern int pm_generic_resume_early(struct device *dev);
extern int pm_generic_resume_noirq(struct device *dev);
extern int pm_generic_resume(struct device *dev);
extern int pm_generic_freeze_noirq(struct device *dev);
extern int pm_generic_freeze_late(struct device *dev);
extern int pm_generic_freeze(struct device *dev);
extern int pm_generic_thaw_noirq(struct device *dev);
extern int pm_generic_thaw_early(struct device *dev);
extern int pm_generic_thaw(struct device *dev);
extern int pm_generic_restore_noirq(struct device *dev);
extern int pm_generic_restore_early(struct device *dev);
extern int pm_generic_restore(struct device *dev);
extern int pm_generic_poweroff_noirq(struct device *dev);
extern int pm_generic_poweroff_late(struct device *dev);
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
extern void pm_complete_with_resume_check(struct device *dev);
 
#else /* !CONFIG_PM_SLEEP */
 
#define device_pm_lock() do {} while (0)
#define device_pm_unlock() do {} while (0)
 
static inline int dpm_suspend_start(pm_message_t state)
{
return 0;
}
 
#define suspend_report_result(fn, ret) do {} while (0)
 
static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
{
return 0;
}
 
static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
}
 
#define pm_generic_prepare NULL
#define pm_generic_suspend_late NULL
#define pm_generic_suspend_noirq NULL
#define pm_generic_suspend NULL
#define pm_generic_resume_early NULL
#define pm_generic_resume_noirq NULL
#define pm_generic_resume NULL
#define pm_generic_freeze_noirq NULL
#define pm_generic_freeze_late NULL
#define pm_generic_freeze NULL
#define pm_generic_thaw_noirq NULL
#define pm_generic_thaw_early NULL
#define pm_generic_thaw NULL
#define pm_generic_restore_noirq NULL
#define pm_generic_restore_early NULL
#define pm_generic_restore NULL
#define pm_generic_poweroff_noirq NULL
#define pm_generic_poweroff_late NULL
#define pm_generic_poweroff NULL
#define pm_generic_complete NULL
#endif /* !CONFIG_PM_SLEEP */
 
/* How to reorder dpm_list after device_move() */
enum dpm_order {
DPM_ORDER_NONE,
DPM_ORDER_DEV_AFTER_PARENT,
DPM_ORDER_PARENT_BEFORE_DEV,
DPM_ORDER_DEV_LAST,
};
 
#endif /* _LINUX_PM_H */
/drivers/include/linux/pm_runtime.h
0,0 → 1,276
/*
* pm_runtime.h - Device run-time power management helper functions.
*
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*/
 
#ifndef _LINUX_PM_RUNTIME_H
#define _LINUX_PM_RUNTIME_H
 
#include <linux/device.h>
#include <linux/pm.h>
 
#include <linux/jiffies.h>
 
/* Runtime PM flag argument bits */
#define RPM_ASYNC 0x01 /* Request is asynchronous */
#define RPM_NOWAIT 0x02 /* Don't wait for concurrent
state change */
#define RPM_GET_PUT 0x04 /* Increment/decrement the
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
 
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
 
static inline bool queue_pm_work(struct work_struct *work)
{
return queue_work(pm_wq, work);
}
 
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern int pm_runtime_force_suspend(struct device *dev);
extern int pm_runtime_force_resume(struct device *dev);
 
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
extern void pm_runtime_forbid(struct device *dev);
extern void pm_runtime_no_callbacks(struct device *dev);
extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
 
static inline bool pm_children_suspended(struct device *dev)
{
return dev->power.ignore_children
|| !atomic_read(&dev->power.child_count);
}
 
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
}
 
static inline void pm_runtime_put_noidle(struct device *dev)
{
atomic_add_unless(&dev->power.usage_count, -1, 0);
}
 
static inline bool device_run_wake(struct device *dev)
{
return dev->power.run_wake;
}
 
static inline void device_set_run_wake(struct device *dev, bool enable)
{
dev->power.run_wake = enable;
}
 
static inline bool pm_runtime_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED
&& !dev->power.disable_depth;
}
 
static inline bool pm_runtime_active(struct device *dev)
{
return dev->power.runtime_status == RPM_ACTIVE
|| dev->power.disable_depth;
}
 
static inline bool pm_runtime_status_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED;
}
 
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
}
 
static inline bool pm_runtime_callbacks_present(struct device *dev)
{
return !dev->power.no_callbacks;
}
 
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
ACCESS_ONCE(dev->power.last_busy) = jiffies;
}
 
static inline bool pm_runtime_is_irq_safe(struct device *dev)
{
return dev->power.irq_safe;
}
 
#else /* !CONFIG_PM */
 
static inline bool queue_pm_work(struct work_struct *work) { return false; }
 
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
 
static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
{
return 1;
}
static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
return -ENOSYS;
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
static inline void pm_runtime_enable(struct device *dev) {}
static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
 
static inline bool pm_children_suspended(struct device *dev) { return false; }
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool device_run_wake(struct device *dev) { return false; }
static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
 
static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
 
static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
int delay) {}
static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
 
#endif /* !CONFIG_PM */
 
static inline int pm_runtime_idle(struct device *dev)
{
return __pm_runtime_idle(dev, 0);
}
 
static inline int pm_runtime_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, 0);
}
 
static inline int pm_runtime_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_AUTO);
}
 
static inline int pm_runtime_resume(struct device *dev)
{
return __pm_runtime_resume(dev, 0);
}
 
static inline int pm_request_idle(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_ASYNC);
}
 
static inline int pm_request_resume(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_ASYNC);
}
 
static inline int pm_request_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
 
static inline int pm_runtime_get(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
}
 
static inline int pm_runtime_get_sync(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
 
static inline int pm_runtime_put(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
}
 
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev,
RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
}
 
static inline int pm_runtime_put_sync(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT);
}
 
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT);
}
 
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
 
static inline int pm_runtime_set_active(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_ACTIVE);
}
 
static inline void pm_runtime_set_suspended(struct device *dev)
{
__pm_runtime_set_status(dev, RPM_SUSPENDED);
}
 
static inline void pm_runtime_disable(struct device *dev)
{
__pm_runtime_disable(dev, true);
}
 
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, true);
}
 
static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, false);
}
 
#endif
/drivers/include/linux/poison.h
19,8 → 19,8
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
 
/********** include/linux/timer.h **********/
/*
69,10 → 69,6
#define ATM_POISON_FREE 0x12
#define ATM_POISON 0xdeadbeef
 
/********** net/ **********/
#define NEIGHBOR_DEAD 0xdeadbeef
#define NETFILTER_LINK_POISON 0xdead57ac
 
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
83,7 → 79,4
/********** security/ **********/
#define KEY_DESTROY 0xbd
 
/********** sound/oss/ **********/
#define OSS_POISON_FREE 0xAB
 
#endif
/drivers/include/linux/preempt.h
10,17 → 10,124
#include <linux/list.h>
 
/*
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
* the other bits -- can't include that header due to inclusion hell.
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count could in theory be the same as the number of
* interrupts in the system, but we run all interrupt handlers with
* interrupts disabled, so we cannot have nesting interrupts. Though
* there are a few palaeontologic drivers which reenable interrupts in
* the handler, so we need more than one bit here.
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000
* NMI_MASK: 0x00100000
* PREEMPT_NEED_RESCHED: 0x80000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
#define NMI_BITS 1
 
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
 
#define __IRQ_MASK(x) ((1UL << (x))-1)
 
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
 
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
 
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
 
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
 
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
 
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
 
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
* in_softirq - Are we currently processing softirq or have bh disabled?
* in_serving_softirq - Are we currently processing softirq?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
 
/*
* Are we in NMI context?
*/
#define in_nmi() (preempt_count() & NMI_MASK)
 
/*
* The preempt_count offset after preempt_disable();
*/
#if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
#else
# define PREEMPT_DISABLE_OFFSET 0
#endif
 
/*
* The preempt_count offset after spin_lock()
*/
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
 
/*
* The preempt_count offset needed for things like:
*
* spin_lock_bh()
*
* Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
* softirqs, such that unlock sequences of:
*
* spin_unlock();
* local_bh_enable();
*
* Work as expected.
*/
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
 
/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
* used in the general case to determine whether sleeping is possible.
* Do not use in_atomic() in driver code.
*/
#define in_atomic() (preempt_count() != 0)
 
/*
* Check whether we were atomic before we did preempt_disable():
* (used by the scheduler)
*/
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
 
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
#define preempt_count_dec_and_test() \
({ preempt_count_sub(1); should_resched(0); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
49,6 → 156,8
 
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
 
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
57,52 → 166,46
__preempt_schedule(); \
} while (0)
 
#define preempt_enable_notrace() \
do { \
barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \
__preempt_schedule_notrace(); \
} while (0)
 
#define preempt_check_resched() \
do { \
if (should_resched()) \
if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
 
#else
#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)
#define preempt_check_resched() do { } while (0)
#endif
 
#define preempt_disable_notrace() \
#define preempt_enable_notrace() \
do { \
__preempt_count_inc(); \
barrier(); \
} while (0)
 
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
 
#ifdef CONFIG_PREEMPT
#define preempt_check_resched() do { } while (0)
#endif /* CONFIG_PREEMPT */
 
#ifndef CONFIG_CONTEXT_TRACKING
#define __preempt_schedule_context() __preempt_schedule()
#endif
 
#define preempt_enable_notrace() \
#define preempt_disable_notrace() \
do { \
__preempt_count_inc(); \
barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \
__preempt_schedule_context(); \
} while (0)
#else
#define preempt_enable_notrace() \
 
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
#endif
 
#else /* !CONFIG_PREEMPT_COUNT */
 
121,6 → 224,7
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
#define preemptible() 0
 
#endif /* CONFIG_PREEMPT_COUNT */
 
180,6 → 284,8
struct preempt_ops *ops;
};
 
void preempt_notifier_inc(void);
void preempt_notifier_dec(void);
void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);
 
/drivers/include/linux/printk.h
65,6 → 65,10
*/
#define DEPRECATED "[Deprecated]: "
 
/*
* Dummy printk for disabled debugging statements to use whilst maintaining
* gcc's format and side-effect checking.
*/
static inline __printf(1, 2)
int no_printk(const char *fmt, ...)
{
103,6 → 107,11
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/*
* Like KERN_CONT, pr_cont() should only be used when continuing
* a line with no newline ('\n') enclosed. Otherwise it defaults
* back to KERN_DEFAULT.
*/
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
 
250,9 → 259,9
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
extern void hex_dump_to_buffer(const void *buf, size_t len,
int rowsize, int groupsize,
char *linebuf, size_t linebuflen, bool ascii);
extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii);
 
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
/drivers/include/linux/pwm.h
0,0 → 1,219
#ifndef __LINUX_PWM_H
#define __LINUX_PWM_H
 
#include <linux/err.h>
#include <linux/mutex.h>
//#include <linux/of.h>
 
struct device;
struct pwm_device;
struct seq_file;
 
#if IS_ENABLED(CONFIG_PWM)
/*
* pwm_request - request a PWM device
*/
struct pwm_device *pwm_request(int pwm_id, const char *label);
 
/*
* pwm_free - free a PWM device
*/
void pwm_free(struct pwm_device *pwm);
 
/*
* pwm_config - change a PWM device configuration
*/
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
 
/*
* pwm_enable - start a PWM output toggling
*/
int pwm_enable(struct pwm_device *pwm);
 
/*
* pwm_disable - stop a PWM output toggling
*/
void pwm_disable(struct pwm_device *pwm);
#else
static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
{
return ERR_PTR(-ENODEV);
}
 
static inline void pwm_free(struct pwm_device *pwm)
{
}
 
static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
{
return -EINVAL;
}
 
static inline int pwm_enable(struct pwm_device *pwm)
{
return -EINVAL;
}
 
static inline void pwm_disable(struct pwm_device *pwm)
{
}
#endif
 
struct pwm_chip;
 
/**
* enum pwm_polarity - polarity of a PWM signal
* @PWM_POLARITY_NORMAL: a high signal for the duration of the duty-
* cycle, followed by a low signal for the remainder of the pulse
* period
* @PWM_POLARITY_INVERSED: a low signal for the duration of the duty-
* cycle, followed by a high signal for the remainder of the pulse
* period
*/
enum pwm_polarity {
PWM_POLARITY_NORMAL,
PWM_POLARITY_INVERSED,
};
 
enum {
PWMF_REQUESTED = 1 << 0,
PWMF_ENABLED = 1 << 1,
PWMF_EXPORTED = 1 << 2,
};
 
/**
* struct pwm_device - PWM channel object
* @label: name of the PWM device
* @flags: flags associated with the PWM device
* @hwpwm: per-chip relative index of the PWM device
* @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
* @chip_data: chip-private data associated with the PWM device
* @lock: used to serialize accesses to the PWM device where necessary
* @period: period of the PWM signal (in nanoseconds)
* @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
* @polarity: polarity of the PWM signal
*/
struct pwm_device {
const char *label;
unsigned long flags;
unsigned int hwpwm;
unsigned int pwm;
struct pwm_chip *chip;
void *chip_data;
struct mutex lock;
 
unsigned int period;
unsigned int duty_cycle;
enum pwm_polarity polarity;
};
 
static inline bool pwm_is_enabled(const struct pwm_device *pwm)
{
return test_bit(PWMF_ENABLED, &pwm->flags);
}
 
static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
{
if (pwm)
pwm->period = period;
}
 
static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
{
return pwm ? pwm->period : 0;
}
 
static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
{
if (pwm)
pwm->duty_cycle = duty;
}
 
static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
{
return pwm ? pwm->duty_cycle : 0;
}
 
/*
* pwm_set_polarity - configure the polarity of a PWM signal
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
 
static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
{
return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
}
 
/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
* @config: configure duty cycles and period length for this PWM
* @set_polarity: configure the polarity of this PWM
* @enable: enable PWM output toggling
* @disable: disable PWM output toggling
* @dbg_show: optional routine to show contents in debugfs
* @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns);
int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity);
int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
#ifdef CONFIG_DEBUG_FS
void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
#endif
struct module *owner;
};
 
#if IS_ENABLED(CONFIG_PWM)
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id);
void pwm_put(struct pwm_device *pwm);
 
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
 
bool pwm_can_sleep(struct pwm_device *pwm);
#else
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
{
return -EINVAL;
}
 
static inline void *pwm_get_chip_data(struct pwm_device *pwm)
{
return NULL;
}
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
return ERR_PTR(-ENODEV);
}
static inline void pwm_put(struct pwm_device *pwm)
{
}
 
static inline struct pwm_device *devm_pwm_get(struct device *dev,
const char *consumer)
{
return ERR_PTR(-ENODEV);
}
static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
{
}
 
static inline bool pwm_can_sleep(struct pwm_device *pwm)
{
return false;
}
#endif
 
#endif /* __LINUX_PWM_H */
/drivers/include/linux/rbtree.h
51,7 → 51,7
 
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
 
/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
85,6 → 85,15
*rb_link = node;
}
 
static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
 
rcu_assign_pointer(*rb_link, node);
}
 
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
91,13 → 100,21
})
 
/**
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
* given type safe against removal of rb_node entry
* rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
* given type allowing the backing memory of @pos to be invalidated
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
*
* rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
* list_for_each_entry_safe() and allows the iteration to continue independent
* of changes to @pos by the body of the loop.
*
* Note, however, that it cannot handle other modifications that re-order the
* rbtree it is iterating over. This includes calling rb_erase() on @pos, as
* rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
/drivers/include/linux/rbtree_augmented.h
123,11 → 123,11
{
if (parent) {
if (parent->rb_left == old)
parent->rb_left = new;
WRITE_ONCE(parent->rb_left, new);
else
parent->rb_right = new;
WRITE_ONCE(parent->rb_right, new);
} else
root->rb_node = new;
WRITE_ONCE(root->rb_node, new);
}
 
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
137,7 → 137,8
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *child = node->rb_right;
struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
 
167,6 → 168,7
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
 
tmp = child->rb_left;
if (!tmp) {
/*
180,6 → 182,7
*/
parent = successor;
child2 = successor->rb_right;
 
augment->copy(node, successor);
} else {
/*
201,19 → 204,23
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
parent->rb_left = child2 = successor->rb_right;
successor->rb_right = child;
child2 = successor->rb_right;
WRITE_ONCE(parent->rb_left, child2);
WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor);
 
augment->copy(node, successor);
augment->propagate(parent, successor);
}
 
successor->rb_left = tmp = node->rb_left;
tmp = node->rb_left;
WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor);
 
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
 
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
/drivers/include/linux/rculist.h
29,8 → 29,8
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
ACCESS_ONCE(list->next) = list;
ACCESS_ONCE(list->prev) = list;
WRITE_ONCE(list->next, list);
WRITE_ONCE(list->prev, list);
}
 
/*
247,10 → 247,7
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
({ \
typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
})
container_of(lockless_dereference(ptr), type, member)
 
/**
* Where are list_empty_rcu() and list_first_entry_rcu()?
288,7 → 285,7
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
 
524,11 → 521,11
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu(pos, member) \
for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member); \
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
 
/**
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
536,11 → 533,11
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member); \
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
 
/**
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
549,8 → 546,8
*/
#define hlist_for_each_entry_from_rcu(pos, member) \
for (; pos; \
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
 
#endif /* __KERNEL__ */
#endif
/drivers/include/linux/rcupdate.h
44,10 → 44,32
//#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/ktime.h>
 
#include <asm/barrier.h>
 
extern int rcu_expedited; /* for sysctl */
 
#ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
{
return false;
}
 
static inline void rcu_expedite_gp(void)
{
}
 
static inline void rcu_unexpedite_gp(void)
{
}
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void);
#endif /* #else #ifdef CONFIG_TINY_RCU */
 
enum rcutorture_type {
RCU_FLAVOR,
RCU_BH_FLAVOR,
138,7 → 160,7
* more than one CPU).
*/
void call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head));
rcu_callback_t func);
 
#else /* #ifdef CONFIG_PREEMPT_RCU */
 
169,7 → 191,7
* memory ordering guarantees.
*/
void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
rcu_callback_t func);
 
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
191,7 → 213,7
* memory ordering guarantees.
*/
void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu));
rcu_callback_t func);
 
void synchronize_sched(void);
 
213,7 → 235,7
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
 
236,11 → 258,13
 
static inline void __rcu_read_lock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
preempt_disable();
}
 
static inline void __rcu_read_unlock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
preempt_enable();
}
 
258,14 → 282,13
 
/* Internal to kernel */
void rcu_init(void);
void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
 
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
279,7 → 302,7
}
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
 
#ifdef CONFIG_RCU_USER_QS
#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
287,7 → 310,7
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_RCU_USER_QS */
#endif /* CONFIG_NO_HZ_FULL */
 
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
331,12 → 354,13
extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
rcu_all_qs(); \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#endif /* #else #ifdef CONFIG_TASKS_RCU */
 
/**
361,10 → 385,6
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
 
typedef void call_rcu_func_t(struct rcu_head *head,
void (*func)(struct rcu_head *head));
void wait_rcu_gp(call_rcu_func_t crf);
 
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
576,17 → 596,17
 
#define __rcu_access_pointer(p, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_check(p, c, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
((typeof(*p) __force __kernel *)(_________p1)); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
595,21 → 615,6
((typeof(*p) __force __kernel *)(p)); \
})
 
#define __rcu_access_index(p, space) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
rcu_dereference_sparse(p, space); \
(_________p1); \
})
#define __rcu_dereference_index_check(p, c) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
rcu_lockdep_assert(c, \
"suspicious rcu_dereference_index_check() usage"); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
617,21 → 622,6
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
 
/**
* lockless_dereference() - safely load a pointer for later dereference
* @p: The pointer to load
*
* Similar to rcu_dereference(), but for situations where the pointed-to
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
669,7 → 659,7
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
* smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
719,7 → 709,7
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
 
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
729,7 → 719,7
* This is the RCU-bh counterpart to rcu_dereference_check().
*/
#define rcu_dereference_bh_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
 
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
739,7 → 729,7
* This is the RCU-sched counterpart to rcu_dereference_check().
*/
#define rcu_dereference_sched_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu)
 
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
754,47 → 744,12
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
 
/**
* rcu_access_index() - fetch RCU index with no dereferencing
* @p: The index to read
*
* Return the value of the specified RCU-protected index, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
* when the value of this index is accessed, but the index is not
* dereferenced, for example, when testing an RCU-protected index against
* -1. Although rcu_access_index() may also be used in cases where
* update-side locks prevent the value of the index from changing, you
* should instead use rcu_dereference_index_protected() for this use case.
*/
#define rcu_access_index(p) __rcu_access_index((p), __rcu)
 
/**
* rcu_dereference_index_check() - rcu_dereference for indices with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Similar to rcu_dereference_check(), but omits the sparse checking.
* This allows rcu_dereference_index_check() to be used on integers,
* which can then be used as array indices. Attempting to use
* rcu_dereference_check() on an integer will give compiler warnings
* because the sparse address-space mechanism relies on dereferencing
* the RCU-protected pointer. Dereferencing integers is not something
* that even gcc will put up with.
*
* Note that this function does not implicitly check for RCU read-side
* critical sections. If this function gains lots of uses, it might
* make sense to provide versions for each flavor of RCU, but it does
* not make sense as of early 2010.
*/
#define rcu_dereference_index_check(p, c) \
__rcu_dereference_index_check((p), (c))
 
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
* both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
932,9 → 887,9
{
rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
 
/**
1120,13 → 1075,13
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
#ifdef CONFIG_TINY_RCU
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
*delta_jiffies = ULONG_MAX;
*nextevt = KTIME_MAX;
return 0;
}
#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
#endif /* #ifdef CONFIG_TINY_RCU */
 
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
/drivers/include/linux/rcutiny.h
92,21 → 92,53
}
 
/*
* Return the number of grace periods.
* Return the number of grace periods started.
*/
static inline long rcu_batches_completed(void)
static inline unsigned long rcu_batches_started(void)
{
return 0;
}
 
/*
* Return the number of bottom-half grace periods.
* Return the number of bottom-half grace periods started.
*/
static inline long rcu_batches_completed_bh(void)
static inline unsigned long rcu_batches_started_bh(void)
{
return 0;
}
 
/*
* Return the number of sched grace periods started.
*/
static inline unsigned long rcu_batches_started_sched(void)
{
return 0;
}
 
/*
* Return the number of grace periods completed.
*/
static inline unsigned long rcu_batches_completed(void)
{
return 0;
}
 
/*
* Return the number of bottom-half grace periods completed.
*/
static inline unsigned long rcu_batches_completed_bh(void)
{
return 0;
}
 
/*
* Return the number of sched grace periods completed.
*/
static inline unsigned long rcu_batches_completed_sched(void)
{
return 0;
}
 
static inline void rcu_force_quiescent_state(void)
{
}
127,6 → 159,22
{
}
 
static inline void rcu_idle_enter(void)
{
}
 
static inline void rcu_idle_exit(void)
{
}
 
static inline void rcu_irq_enter(void)
{
}
 
static inline void rcu_irq_exit(void)
{
}
 
static inline void exit_rcu(void)
{
}
154,7 → 202,10
return true;
}
 
 
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 
static inline void rcu_all_qs(void)
{
}
 
#endif /* __LINUX_RCUTINY_H */
/drivers/include/linux/scatterlist.h
2,13 → 2,38
#define _LINUX_SCATTERLIST_H
 
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/mm.h>
 
#include <asm/types.h>
#include <asm/scatterlist.h>
//#include <asm/io.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
};
 
/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries dma_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
 
#ifdef CONFIG_NEED_SG_DMA_LENGTH
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
#endif
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
18,10 → 43,9
/*
* Notes on SG table design.
*
* Architectures must provide an unsigned long page_link field in the
* scatterlist struct. We use that to place the page pointer AND encode
* information about the sg table as well. The two lower bits are reserved
* for this information.
* We use the unsigned long page_link field in the scatterlist struct to place
* the page pointer AND encode information about the sg table as well. The two
* lower bits are reserved for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
108,14 → 132,14
* @buflen: Data length
*
**/
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
//#ifdef CONFIG_DEBUG_SG
// BUG_ON(!virt_addr_valid(buf));
//#endif
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(!virt_addr_valid(buf));
#endif
sg_set_page(sg, (struct page*)((unsigned)buf&0xFFFFF000), buflen, offset_in_page(buf));
}
 
/*
* Loop over each sg element, following the pointer to a new list if necessary
136,10 → 160,6
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
BUG();
#endif
 
/*
* offset and length are unused for chain entry. Clear them.
*/
221,10 → 241,16
//}
 
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
int sg_split(struct scatterlist *in, const int in_mapped_nents,
const off_t skip, const int nb_splits,
const size_t *split_sizes,
struct scatterlist **out, int *out_mapped_nents,
gfp_t gfp_mask);
 
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
239,13 → 265,16
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
 
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
 
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
const void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
 
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
const void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
 
349,4 → 378,6
bool sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
 
#define dma_unmap_sg(d, s, n, r)
 
#endif /* _LINUX_SCATTERLIST_H */
/drivers/include/linux/sched.h
2,7 → 2,47
#define _LINUX_SCHED_H
 
 
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
* We have two separate sets of flags: task->state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
#define TASK_RUNNING 0
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
#define __TASK_STOPPED 4
#define __TASK_TRACED 8
/* in tsk->exit_state */
#define EXIT_DEAD 16
#define EXIT_ZOMBIE 32
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* in tsk->state again */
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
#define TASK_NOLOAD 1024
#define TASK_STATE_MAX 2048
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
 
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
 
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
 
/* get_task_state() */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
/* Task command name length */
#define TASK_COMM_LEN 16
 
/drivers/include/linux/seq_file.h
1,9 → 1,26
/* stub */
#ifndef _LINUX_SEQ_FILE_H
#define _LINUX_SEQ_FILE_H
 
#include <linux/types.h>
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/mutex.h>
 
struct seq_file {
char *buf;
size_t size;
size_t from;
size_t count;
size_t pad_until;
loff_t index;
loff_t read_pos;
u64 version;
void *private;
};
 
int seq_puts(struct seq_file *m, const char *s);
 
__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
 
 
#endif
/drivers/include/linux/seqlock.h
35,6 → 35,7
#include <linux/spinlock.h>
//#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <asm/processor.h>
 
/*
108,7 → 109,7
unsigned ret;
 
repeat:
ret = ACCESS_ONCE(s->sequence);
ret = READ_ONCE(s->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
127,7 → 128,7
*/
static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret;
}
179,7 → 180,7
*/
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret & ~1;
}
236,6 → 237,79
/*
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
* interrupt the modification -- e.g. the concurrency is strictly between CPUs
* -- you most likely do not need this.
*
* Where the traditional RCU/lockless data structures rely on atomic
* modifications to ensure queries observe either the old or the new state the
* latch allows the same for non-atomic updates. The trade-off is doubling the
* cost of storage; we have to maintain two copies of the entire data
* structure.
*
* Very simply put: we first modify one copy and then the other. This ensures
* there is always one copy in a stable state, ready to give us an answer.
*
* The basic form is a data structure like:
*
* struct latch_struct {
* seqcount_t seq;
* struct data_struct data[2];
* };
*
* Where a modification, which is assumed to be externally serialized, does the
* following:
*
* void latch_modify(struct latch_struct *latch, ...)
* {
* smp_wmb(); <- Ensure that the last data[1] update is visible
* latch->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible
*
* modify(latch->data[0], ...);
*
* smp_wmb(); <- Ensure that the data[0] update is visible
* latch->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible
*
* modify(latch->data[1], ...);
* }
*
* The query will have a form like:
*
* struct entry *latch_query(struct latch_struct *latch, ...)
* {
* struct entry *entry;
* unsigned seq, idx;
*
* do {
* seq = lockless_dereference(latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
*
* smp_rmb();
* } while (seq != latch->seq);
*
* return entry;
* }
*
* So during the modification, queries are first redirected to data[1]. Then we
* modify data[0]. When that is complete, we redirect queries back to data[0]
* and we can modify data[1].
*
* NOTE: The non-requirement for atomic modifications does _NOT_ include
* the publishing of new entries in the case where data is a dynamic
* data structure.
*
* An iteration might start in data[0] and get suspended long enough
* to miss an entire modification sequence, once it resumes it might
* observe the new entry.
*
* NOTE: When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
static inline void raw_write_seqcount_latch(seqcount_t *s)
{
266,13 → 340,13
}
 
/**
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* write_seqcount_invalidate - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
*
* After write_seqcount_barrier, no read-side seq operations will complete
* After write_seqcount_invalidate, no read-side seq operations will complete
* successfully and see data older than this.
*/
static inline void write_seqcount_barrier(seqcount_t *s)
static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
s->sequence+=2;
/drivers/include/linux/sfi.h
0,0 → 1,209
/* sfi.h Simple Firmware Interface */
 
/*
 
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
 
GPL LICENSE SUMMARY
 
Copyright(c) 2009 Intel Corporation. All rights reserved.
 
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
 
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
 
BSD LICENSE
 
Copyright(c) 2009 Intel Corporation. All rights reserved.
 
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
 
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
 
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
*/
 
#ifndef _LINUX_SFI_H
#define _LINUX_SFI_H
 
//#include <linux/init.h>
#include <linux/types.h>
 
/* Table signatures reserved by the SFI specification */
#define SFI_SIG_SYST "SYST"
#define SFI_SIG_FREQ "FREQ"
#define SFI_SIG_IDLE "IDLE"
#define SFI_SIG_CPUS "CPUS"
#define SFI_SIG_MTMR "MTMR"
#define SFI_SIG_MRTC "MRTC"
#define SFI_SIG_MMAP "MMAP"
#define SFI_SIG_APIC "APIC"
#define SFI_SIG_XSDT "XSDT"
#define SFI_SIG_WAKE "WAKE"
#define SFI_SIG_DEVS "DEVS"
#define SFI_SIG_GPIO "GPIO"
 
#define SFI_SIGNATURE_SIZE 4
#define SFI_OEM_ID_SIZE 6
#define SFI_OEM_TABLE_ID_SIZE 8
 
#define SFI_NAME_LEN 16
 
#define SFI_SYST_SEARCH_BEGIN 0x000E0000
#define SFI_SYST_SEARCH_END 0x000FFFFF
 
#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
((ptable->header.len - sizeof(struct sfi_table_header)) / \
(sizeof(entry_type)))
/*
* Table structures must be byte-packed to match the SFI specification,
* as they are provided by the BIOS.
*/
struct sfi_table_header {
char sig[SFI_SIGNATURE_SIZE];
u32 len;
u8 rev;
u8 csum;
char oem_id[SFI_OEM_ID_SIZE];
char oem_table_id[SFI_OEM_TABLE_ID_SIZE];
} __packed;
 
struct sfi_table_simple {
struct sfi_table_header header;
u64 pentry[1];
} __packed;
 
/* Comply with UEFI spec 2.1 */
struct sfi_mem_entry {
u32 type;
u64 phys_start;
u64 virt_start;
u64 pages;
u64 attrib;
} __packed;
 
struct sfi_cpu_table_entry {
u32 apic_id;
} __packed;
 
struct sfi_cstate_table_entry {
u32 hint; /* MWAIT hint */
u32 latency; /* latency in ms */
} __packed;
 
struct sfi_apic_table_entry {
u64 phys_addr; /* phy base addr for APIC reg */
} __packed;
 
struct sfi_freq_table_entry {
u32 freq_mhz; /* in MHZ */
u32 latency; /* transition latency in ms */
u32 ctrl_val; /* value to write to PERF_CTL */
} __packed;
 
struct sfi_wake_table_entry {
u64 phys_addr; /* pointer to where the wake vector locates */
} __packed;
 
struct sfi_timer_table_entry {
u64 phys_addr; /* phy base addr for the timer */
u32 freq_hz; /* in HZ */
u32 irq;
} __packed;
 
struct sfi_rtc_table_entry {
u64 phys_addr; /* phy base addr for the RTC */
u32 irq;
} __packed;
 
struct sfi_device_table_entry {
u8 type; /* bus type, I2C, SPI or ...*/
#define SFI_DEV_TYPE_SPI 0
#define SFI_DEV_TYPE_I2C 1
#define SFI_DEV_TYPE_UART 2
#define SFI_DEV_TYPE_HSI 3
#define SFI_DEV_TYPE_IPC 4
 
u8 host_num; /* attached to host 0, 1...*/
u16 addr;
u8 irq;
u32 max_freq;
char name[SFI_NAME_LEN];
} __packed;
 
struct sfi_gpio_table_entry {
char controller_name[SFI_NAME_LEN];
u16 pin_no;
char pin_name[SFI_NAME_LEN];
} __packed;
 
typedef int (*sfi_table_handler) (struct sfi_table_header *table);
 
#ifdef CONFIG_SFI
extern void __init sfi_init(void);
extern int __init sfi_platform_init(void);
extern void __init sfi_init_late(void);
extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
sfi_table_handler handler);
 
extern int sfi_disabled;
static inline void disable_sfi(void)
{
sfi_disabled = 1;
}
 
#else /* !CONFIG_SFI */
 
static inline void sfi_init(void)
{
}
 
static inline void sfi_init_late(void)
{
}
 
#define sfi_disabled 0
 
static inline int sfi_table_parse(char *signature, char *oem_id,
char *oem_table_id,
sfi_table_handler handler)
{
return -1;
}
 
#endif /* !CONFIG_SFI */
 
#endif /*_LINUX_SFI_H*/
/drivers/include/linux/slab.h
18,7 → 18,7
 
/*
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
104,7 → 104,11
(unsigned long)ZERO_SIZE_PTR)
 
void __init kmem_cache_init(void);
int slab_is_available(void);
bool slab_is_available(void);
 
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
120,7 → 124,9
}
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
return __builtin_malloc(size);
void *ret = __builtin_malloc(size);
memset(ret, 0, size);
return ret;
}
 
/**
/drivers/include/linux/spinlock.h
119,7 → 119,7
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
* can not be reordered with a LOAD inside this section.
* can not be reordered with LOADs and STOREs inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
129,16 → 129,6
#define smp_mb__before_spinlock() smp_wmb()
#endif
 
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifndef smp_mb__after_unlock_lock
#define smp_mb__after_unlock_lock() do { } while (0)
#endif
 
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
189,6 → 179,8
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
# define raw_spin_lock_bh_nested(lock, subclass) \
_raw_spin_lock_bh_nested(lock, subclass)
 
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
204,6 → 196,7
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
 
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
292,7 → 285,7
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
 
static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
303,17 → 296,17
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
 
static inline void spin_lock(spinlock_t *lock)
static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
 
static inline void spin_lock_bh(spinlock_t *lock)
static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
 
static inline int spin_trylock(spinlock_t *lock)
static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
323,12 → 316,17
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
 
#define spin_lock_bh_nested(lock, subclass) \
do { \
raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
} while (0)
 
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
 
static inline void spin_lock_irq(spinlock_t *lock)
static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
343,32 → 341,32
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
 
static inline void spin_unlock(spinlock_t *lock)
static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
 
static inline void spin_unlock_bh(spinlock_t *lock)
static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
 
static inline void spin_unlock_irq(spinlock_t *lock)
static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
 
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
 
static inline int spin_trylock_bh(spinlock_t *lock)
static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
 
static inline int spin_trylock_irq(spinlock_t *lock)
static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
378,22 → 376,22
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
 
static inline void spin_unlock_wait(spinlock_t *lock)
static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
 
static inline int spin_is_locked(spinlock_t *lock)
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
 
static inline int spin_is_contended(spinlock_t *lock)
static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
 
static inline int spin_can_lock(spinlock_t *lock)
static __always_inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}
/drivers/include/linux/spinlock_api_up.h
57,6 → 57,7
 
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
/drivers/include/linux/stddef.h
3,7 → 3,6
 
#include <uapi/linux/stddef.h>
 
 
#undef NULL
#define NULL ((void *)0)
 
18,4 → 17,14
#else
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
 
/**
* offsetofend(TYPE, MEMBER)
*
* @TYPE: The type of the structure
* @MEMBER: The member within the structure to get the end offset of
*/
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
 
#endif
/drivers/include/linux/string.h
25,6 → 25,9
#ifndef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
ssize_t __must_check strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
40,9 → 43,6
#ifndef __HAVE_ARCH_STRNCMP
extern int strncmp(const char *,const char *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRNICMP
#define strnicmp strncasecmp
#endif
#ifndef __HAVE_ARCH_STRCASECMP
extern int strcasecmp(const char *s1, const char *s2);
#endif
114,8 → 114,12
extern void * memchr(const void *,int,__kernel_size_t);
#endif
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
 
extern void kfree_const(const void *x);
 
extern char *kstrdup(const char *s, gfp_t gfp);
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
 
/drivers/include/linux/swab.h
1,284 → 1,8
#ifndef _LINUX_SWAB_H
#define _LINUX_SWAB_H
 
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/swab.h>
#include <uapi/linux/swab.h>
 
/*
* casts are necessary for constants, because we never know how for sure
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
*/
#define ___constant_swab16(x) ((__u16)( \
(((__u16)(x) & (__u16)0x00ffU) << 8) | \
(((__u16)(x) & (__u16)0xff00U) >> 8)))
 
#define ___constant_swab32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
(((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
(((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
(((__u32)(x) & (__u32)0xff000000UL) >> 24)))
 
#define ___constant_swab64(x) ((__u64)( \
(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
 
#define ___constant_swahw32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
(((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
 
#define ___constant_swahb32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
(((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
 
/*
* Implement the following as inlines, but define the interface using
* macros to allow constant folding when possible:
* ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
*/
 
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
#ifdef __arch_swab16
return __arch_swab16(val);
#else
return ___constant_swab16(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
#ifdef __arch_swab32
return __arch_swab32(val);
#else
return ___constant_swab32(val);
#endif
}
 
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
#ifdef __arch_swab64
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
__u32 l = val & ((1ULL << 32) - 1);
return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
#else
return ___constant_swab64(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswahw32(__u32 val)
{
#ifdef __arch_swahw32
return __arch_swahw32(val);
#else
return ___constant_swahw32(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswahb32(__u32 val)
{
#ifdef __arch_swahb32
return __arch_swahb32(val);
#else
return ___constant_swahb32(val);
#endif
}
 
/**
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
 
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
 
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
 
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap
*
* __swahw32(0x12340000) is 0x00001234
*/
#define __swahw32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahw32(x) : \
__fswahw32(x))
 
/**
* __swahb32 - return a high and low byte-swapped 32-bit value
* @x: value to byteswap
*
* __swahb32(0x12345678) is 0x34127856
*/
#define __swahb32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahb32(x) : \
__fswahb32(x))
 
/**
* __swab16p - return a byteswapped 16-bit value from a pointer
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline __u16 __swab16p(const __u16 *p)
{
#ifdef __arch_swab16p
return __arch_swab16p(p);
#else
return __swab16(*p);
#endif
}
 
/**
* __swab32p - return a byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline __u32 __swab32p(const __u32 *p)
{
#ifdef __arch_swab32p
return __arch_swab32p(p);
#else
return __swab32(*p);
#endif
}
 
/**
* __swab64p - return a byteswapped 64-bit value from a pointer
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline __u64 __swab64p(const __u64 *p)
{
#ifdef __arch_swab64p
return __arch_swab64p(p);
#else
return __swab64(*p);
#endif
}
 
/**
* __swahw32p - return a wordswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping.
*/
static inline __u32 __swahw32p(const __u32 *p)
{
#ifdef __arch_swahw32p
return __arch_swahw32p(p);
#else
return __swahw32(*p);
#endif
}
 
/**
* __swahb32p - return a high and low byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high/low byteswapping.
*/
static inline __u32 __swahb32p(const __u32 *p)
{
#ifdef __arch_swahb32p
return __arch_swahb32p(p);
#else
return __swahb32(*p);
#endif
}
 
/**
* __swab16s - byteswap a 16-bit value in-place
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline void __swab16s(__u16 *p)
{
#ifdef __arch_swab16s
__arch_swab16s(p);
#else
*p = __swab16p(p);
#endif
}
/**
* __swab32s - byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline void __swab32s(__u32 *p)
{
#ifdef __arch_swab32s
__arch_swab32s(p);
#else
*p = __swab32p(p);
#endif
}
 
/**
* __swab64s - byteswap a 64-bit value in-place
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline void __swab64s(__u64 *p)
{
#ifdef __arch_swab64s
__arch_swab64s(p);
#else
*p = __swab64p(p);
#endif
}
 
/**
* __swahw32s - wordswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping
*/
static inline void __swahw32s(__u32 *p)
{
#ifdef __arch_swahw32s
__arch_swahw32s(p);
#else
*p = __swahw32p(p);
#endif
}
 
/**
* __swahb32s - high and low byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high and low byte swapping
*/
static inline void __swahb32s(__u32 *p)
{
#ifdef __arch_swahb32s
__arch_swahb32s(p);
#else
*p = __swahb32p(p);
#endif
}
 
#ifdef __KERNEL__
# define swab16 __swab16
# define swab32 __swab32
# define swab64 __swab64
294,6 → 18,4
# define swab64s __swab64s
# define swahw32s __swahw32s
# define swahb32s __swahb32s
#endif /* __KERNEL__ */
 
#endif /* _LINUX_SWAB_H */
/drivers/include/linux/time.h
110,6 → 110,19
return true;
}
 
static inline bool timeval_valid(const struct timeval *tv)
{
/* Dates before 1970 are bogus */
if (tv->tv_sec < 0)
return false;
 
/* Can't have more microseconds then a second */
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
return false;
 
return true;
}
 
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
 
#define CURRENT_TIME (current_kernel_time())
/drivers/include/linux/time64.h
2,6 → 2,7
#define _LINUX_TIME64_H
 
#include <uapi/linux/time.h>
#include <linux/math64.h>
 
typedef __s64 time64_t;
 
11,11 → 12,18
*/
#if __BITS_PER_LONG == 64
# define timespec64 timespec
#define itimerspec64 itimerspec
#else
struct timespec64 {
time64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
 
struct itimerspec64 {
struct timespec64 it_interval;
struct timespec64 it_value;
};
 
#endif
 
/* Parameters used to convert the timespec values: */
28,6 → 36,7
#define FSEC_PER_SEC 1000000000000000LL
 
/* Located here for timespec[64]_valid_strict */
#define TIME64_MAX ((s64)~((u64)1 << 63))
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
 
43,6 → 52,16
return ts;
}
 
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
{
return *its64;
}
 
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
{
return *its;
}
 
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
75,6 → 94,24
return ret;
}
 
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
{
struct itimerspec ret;
 
ret.it_interval = timespec64_to_timespec(its64->it_interval);
ret.it_value = timespec64_to_timespec(its64->it_value);
return ret;
}
 
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
{
struct itimerspec64 ret;
 
ret.it_interval = timespec_to_timespec64(its->it_interval);
ret.it_value = timespec_to_timespec64(its->it_value);
return ret;
}
 
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
/drivers/include/linux/types.h
135,26 → 135,25
#endif
 
/*
* The type of an index into the pagecache. Use a #define so asm/types.h
* can override it.
* The type of an index into the pagecache.
*/
#ifndef pgoff_t
#define pgoff_t unsigned long
#endif
 
/* A dma_addr_t can hold any valid DMA or bus address for the platform */
/*
* A dma_addr_t can hold any valid DMA address, i.e., any address returned
* by the DMA API.
*
* If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
* bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
* but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
* so they don't care about the size of the actual bus addresses.
*/
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
#endif /* dma_addr_t */
#endif
 
#ifdef __CHECKER__
#else
#endif
#ifdef __CHECK_ENDIAN__
#else
#endif
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
206,12 → 205,32
* struct callback_head - callback structure for use with RCU and task_work
* @next: next update requests in a list
* @func: actual update function to call after the grace period.
*
* The struct is aligned to size of pointer. On most architectures it happens
* naturally due ABI requirements, but some architectures (like CRIS) have
* weird ABI and we need to ask it explicitly.
*
* The alignment is required to guarantee that bits 0 and 1 of @next will be
* clear under normal conditions -- as long as we use call_rcu(),
* call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
* - the structure shares storage spacer in struct page with @compound_head,
* which encode PageTail() in bit 0. The guarantee is needed to avoid
* false-positive PageTail().
*/
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
};
} __attribute__((aligned(sizeof(void *))));
#define rcu_head callback_head
 
typedef void (*rcu_callback_t)(struct rcu_head *head);
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
 
/* clocksource cycle base type */
typedef u64 cycle_t;
 
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
/drivers/include/linux/vgaarb.h
65,8 → 65,13
* out of the arbitration process (and can be safe to take
* interrupts at any time.
*/
#if defined(CONFIG_VGA_ARB)
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes);
#else
static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes) { };
#endif
 
/**
* vga_get - acquire & locks VGA resources
/drivers/include/syscall.h
6,6 → 6,14
typedef u32 addr_t;
typedef u32 count_t;
 
typedef struct
{
int width;
int height;
int bpp;
int freq;
}videomode_t;
 
///////////////////////////////////////////////////////////////////////////////
 
#define STDCALL __attribute__ ((stdcall)) __attribute__ ((dllimport))
36,6 → 44,7
void* STDCALL GetDisplay(void)__asm__("GetDisplay");
 
u32 IMPORT GetTimerTicks(void)__asm__("GetTimerTicks");
u64 IMPORT GetClockNs(void)__asm__("GetClockNs");
 
addr_t STDCALL AllocPage(void)__asm__("AllocPage");
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages");
512,9 → 521,14
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
static inline void __iomem *ioremap_nocache(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100);
}
 
static inline void __iomem *ioremap_wc(u32 offset, size_t size)
{
return (void __iomem*) MapIoMem(offset, size, PG_SW|0x100);
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_WRITEC|0x100);
}
 
 
546,7 → 560,7
KernelFree(addr);
}
 
static inline int power_supply_is_system_supplied(void) { return -1; }
static inline int power_supply_is_system_supplied(void) { return -1; };
 
 
#endif
/drivers/include/uapi/asm/e820.h
32,7 → 32,18
#define E820_ACPI 3
#define E820_NVS 4
#define E820_UNUSABLE 5
#define E820_PMEM 7
 
/*
* This is a non-standardized way to represent ADR or NVDIMM regions that
* persist over a reboot. The kernel will ignore their special capabilities
* unless the CONFIG_X86_PMEM_LEGACY option is set.
*
* ( Note that older platforms also used 6 for the same type of memory,
* but newer versions switched to 12 as 6 was assigned differently. Some
* time they will learn... )
*/
#define E820_PRAM 12
 
/*
* reserved RAM used by kernel itself
/drivers/include/uapi/asm/msr.h
1,8 → 1,6
#ifndef _UAPI_ASM_X86_MSR_H
#define _UAPI_ASM_X86_MSR_H
 
#include <asm/msr-index.h>
 
#ifndef __ASSEMBLY__
 
#include <linux/types.h>
/drivers/include/uapi/asm/posix_types.h
1,5 → 1,9
# ifdef CONFIG_X86_32
#ifndef __KERNEL__
# ifdef __i386__
# include <asm/posix_types_32.h>
# elif defined(__ILP32__)
# include <asm/posix_types_x32.h>
# else
# include <asm/posix_types_64.h>
# endif
#endif
/drivers/include/uapi/asm/processor-flags.h
37,8 → 37,6
#define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT)
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT)
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT)
#define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */
#define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT)
#define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */
/drivers/include/uapi/asm/ptrace-abi.h
0,0 → 1,93
#ifndef _ASM_X86_PTRACE_ABI_H
#define _ASM_X86_PTRACE_ABI_H
 
#ifdef __i386__
 
#define EBX 0
#define ECX 1
#define EDX 2
#define ESI 3
#define EDI 4
#define EBP 5
#define EAX 6
#define DS 7
#define ES 8
#define FS 9
#define GS 10
#define ORIG_EAX 11
#define EIP 12
#define CS 13
#define EFL 14
#define UESP 15
#define SS 16
#define FRAME_SIZE 17
 
#else /* __i386__ */
 
#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
/*
* C ABI says these regs are callee-preserved. They aren't saved on kernel entry
* unless syscall needs a complete, fully filled "struct pt_regs".
*/
#define R15 0
#define R14 8
#define R13 16
#define R12 24
#define RBP 32
#define RBX 40
/* These regs are callee-clobbered. Always saved on kernel entry. */
#define R11 48
#define R10 56
#define R9 64
#define R8 72
#define RAX 80
#define RCX 88
#define RDX 96
#define RSI 104
#define RDI 112
/*
* On syscall entry, this is syscall#. On CPU exception, this is error code.
* On hw interrupt, it's IRQ number:
*/
#define ORIG_RAX 120
/* Return frame for iretq */
#define RIP 128
#define CS 136
#define EFLAGS 144
#define RSP 152
#define SS 160
#endif /* __ASSEMBLY__ */
 
/* top of stack page */
#define FRAME_SIZE 168
 
#endif /* !__i386__ */
 
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
#define PTRACE_SETFPREGS 15
#define PTRACE_GETFPXREGS 18
#define PTRACE_SETFPXREGS 19
 
#define PTRACE_OLDSETOPTIONS 21
 
/* only useful for access 32bit programs / kernels */
#define PTRACE_GET_THREAD_AREA 25
#define PTRACE_SET_THREAD_AREA 26
 
#ifdef __x86_64__
# define PTRACE_ARCH_PRCTL 30
#endif
 
#define PTRACE_SYSEMU 31
#define PTRACE_SYSEMU_SINGLESTEP 32
 
#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
 
#ifndef __ASSEMBLY__
#include <linux/types.h>
#endif
 
#endif /* _ASM_X86_PTRACE_ABI_H */
/drivers/include/uapi/asm/ptrace.h
1,262 → 1,85
#ifndef _ASM_X86_PTRACE_H
#define _ASM_X86_PTRACE_H
#ifndef _UAPI_ASM_X86_PTRACE_H
#define _UAPI_ASM_X86_PTRACE_H
 
#include <asm/segment.h>
#include <asm/page_types.h>
#include <uapi/asm/ptrace.h>
#include <linux/compiler.h> /* For __user */
#include <asm/ptrace-abi.h>
#include <asm/processor-flags.h>
 
 
#ifndef __ASSEMBLY__
 
#ifdef __i386__
/* this struct defines the way the registers are stored on the
stack during a system call. */
 
#ifndef __KERNEL__
 
struct pt_regs {
unsigned long bx;
unsigned long cx;
unsigned long dx;
unsigned long si;
unsigned long di;
unsigned long bp;
unsigned long ax;
unsigned long ds;
unsigned long es;
unsigned long fs;
unsigned long gs;
unsigned long orig_ax;
unsigned long ip;
unsigned long cs;
unsigned long flags;
unsigned long sp;
unsigned long ss;
long ebx;
long ecx;
long edx;
long esi;
long edi;
long ebp;
long eax;
int xds;
int xes;
int xfs;
int xgs;
long orig_eax;
long eip;
int xcs;
long eflags;
long esp;
int xss;
};
 
#endif /* __KERNEL__ */
 
#else /* __i386__ */
 
#ifndef __KERNEL__
 
struct pt_regs {
/*
* C ABI says these regs are callee-preserved. They aren't saved on kernel entry
* unless syscall needs a complete, fully filled "struct pt_regs".
*/
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long bp;
unsigned long bx;
/* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long rbp;
unsigned long rbx;
/* These regs are callee-clobbered. Always saved on kernel entry. */
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long ax;
unsigned long cx;
unsigned long dx;
unsigned long si;
unsigned long di;
unsigned long orig_ax;
/* end of arguments */
/* cpu exception frame or undefined */
unsigned long ip;
unsigned long rax;
unsigned long rcx;
unsigned long rdx;
unsigned long rsi;
unsigned long rdi;
/*
* On syscall entry, this is syscall#. On CPU exception, this is error code.
* On hw interrupt, it's IRQ number:
*/
unsigned long orig_rax;
/* Return frame for iretq */
unsigned long rip;
unsigned long cs;
unsigned long flags;
unsigned long sp;
unsigned long eflags;
unsigned long rsp;
unsigned long ss;
/* top of stack page */
};
 
#endif /* __KERNEL__ */
#endif /* !__i386__ */
 
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h>
#endif
 
struct cpuinfo_x86;
struct task_struct;
 
extern unsigned long profile_pc(struct pt_regs *regs);
#define profile_pc profile_pc
#endif /* !__ASSEMBLY__ */
 
extern unsigned long
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code);
 
 
extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch);
extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch,
unsigned long phase1_result);
 
extern long syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_leave(struct pt_regs *);
 
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->ax;
}
 
/*
* user_mode_vm(regs) determines whether a register set came from user mode.
* This is true if V8086 mode was enabled OR if the register set was from
* protected mode with RPL-3 CS value. This tricky test checks that with
* one comparison. Many places in the kernel can bypass this full check
* if they have already ruled out V8086 mode, so user_mode(regs) can be used.
*/
static inline int user_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
#else
return !!(regs->cs & 3);
#endif
}
 
static inline int user_mode_vm(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
USER_RPL;
#else
return user_mode(regs);
#endif
}
 
static inline int v8086_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (regs->flags & X86_VM_MASK);
#else
return 0; /* No V86 mode support in long mode */
#endif
}
 
#ifdef CONFIG_X86_64
static inline bool user_64bit_mode(struct pt_regs *regs)
{
#ifndef CONFIG_PARAVIRT
/*
* On non-paravirt systems, this is the only long mode CPL 3
* selector. We do not allow long mode selectors in the LDT.
*/
return regs->cs == __USER_CS;
#else
/* Headers are too twisted for this to go in paravirt.h. */
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
}
 
#define current_user_stack_pointer() this_cpu_read(old_rsp)
/* ia32 vs. x32 difference */
#define compat_user_stack_pointer() \
(test_thread_flag(TIF_IA32) \
? current_pt_regs()->sp \
: this_cpu_read(old_rsp))
#endif
 
#ifdef CONFIG_X86_32
extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
#else
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->sp;
}
#endif
 
#define GET_IP(regs) ((regs)->ip)
#define GET_FP(regs) ((regs)->bp)
#define GET_USP(regs) ((regs)->sp)
 
#include <asm-generic/ptrace.h>
 
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
 
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten.
* @offset: offset number of the register.
*
* regs_get_register returns the value of a register. The @offset is the
* offset of the register in struct pt_regs address which specified by @regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
#ifdef CONFIG_X86_32
/*
* Traps from the kernel do not save sp and ss.
* Use the helper function to retrieve sp.
*/
if (offset == offsetof(struct pt_regs, sp) &&
regs->cs == __KERNEL_CS)
return kernel_stack_pointer(regs);
#endif
return *(unsigned long *)((unsigned long)regs + offset);
}
 
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static inline int regs_within_kernel_stack(struct pt_regs *regs,
unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
 
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
 
#define arch_has_single_step() (1)
#ifdef CONFIG_X86_DEBUGCTLMSR
#define arch_has_block_step() (1)
#else
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
 
#define ARCH_HAS_USER_SINGLE_STEP_INFO
 
/*
* When hitting ptrace_stop(), we cannot return using SYSRET because
* that does not restore the full CPU state, only a minimal set. The
* ptracer can change arbitrary register values, which is usually okay
* because the usual ptrace stops run off the signal delivery path which
* forces IRET; however, ptrace_event() stops happen in arbitrary places
* in the kernel and don't force IRET path.
*
* So force IRET path after a ptrace stop.
*/
#define arch_ptrace_stop_needed(code, info) \
({ \
set_thread_flag(TIF_NOTIFY_RESUME); \
false; \
})
 
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
 
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PTRACE_H */
#endif /* _UAPI_ASM_X86_PTRACE_H */
/drivers/include/uapi/asm/sigcontext.h
1,6 → 1,19
#ifndef _UAPI_ASM_X86_SIGCONTEXT_H
#define _UAPI_ASM_X86_SIGCONTEXT_H
 
/*
* Linux signal context definitions. The sigcontext includes a complex
* hierarchy of CPU and FPU state, available to user-space (on the stack) when
* a signal handler is executed.
*
* As over the years this ABI grew from its very simple roots towards
* supporting more and more CPU state organically, some of the details (which
* were rather clever hacks back in the days) became a bit quirky by today.
*
* The current ABI includes flexible provisions for future extensions, so we
* won't have to grow new quirks for quite some time. Promise!
*/
 
#include <linux/compiler.h>
#include <linux/types.h>
 
9,155 → 22,306
#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
 
/*
* bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
* are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
* are used to extended the fpstate pointer in the sigcontext, which now
* includes the extended state information along with fpstate information.
* Bytes 464..511 in the current 512-byte layout of the FXSAVE/FXRSTOR frame
* are reserved for SW usage. On CPUs supporting XSAVE/XRSTOR, these bytes are
* used to extend the fpstate pointer in the sigcontext, which now includes the
* extended state information along with fpstate information.
*
* Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
* area and FP_XSTATE_MAGIC2 at the end of memory layout
* (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
* extended state information in the memory layout pointed by the fpstate
* pointer in sigcontext.
* If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then there's a
* sw_reserved.extended_size bytes large extended context area present. (The
* last 32-bit word of this extended area (at the
* fpstate+extended_size-FP_XSTATE_MAGIC2_SIZE address) is set to
* FP_XSTATE_MAGIC2 so that you can sanity check your size calculations.)
*
* This extended area typically grows with newer CPUs that have larger and
* larger XSAVE areas.
*/
struct _fpx_sw_bytes {
__u32 magic1; /* FP_XSTATE_MAGIC1 */
__u32 extended_size; /* total size of the layout referred by
* fpstate pointer in the sigcontext.
/*
* If set to FP_XSTATE_MAGIC1 then this is an xstate context.
* 0 if a legacy frame.
*/
__u64 xstate_bv;
/* feature bit mask (including fp/sse/extended
* state) that is present in the memory
* layout.
__u32 magic1;
 
/*
* Total size of the fpstate area:
*
* - if magic1 == 0 then it's sizeof(struct _fpstate)
* - if magic1 == FP_XSTATE_MAGIC1 then it's sizeof(struct _xstate)
* plus extensions (if any)
*/
__u32 xstate_size; /* actual xsave state size, based on the
* features saved in the layout.
* 'extended_size' will be greater than
* 'xstate_size'.
__u32 extended_size;
 
/*
* Feature bit mask (including FP/SSE/extended state) that is present
* in the memory layout:
*/
__u32 padding[7]; /* for future use. */
__u64 xfeatures;
 
/*
* Actual XSAVE state size, based on the xfeatures saved in the layout.
* 'extended_size' is greater than 'xstate_size':
*/
__u32 xstate_size;
 
/* For future use: */
__u32 padding[7];
};
 
#ifdef __i386__
/*
* As documented in the iBCS2 standard..
* As documented in the iBCS2 standard:
*
* The first part of "struct _fpstate" is just the normal i387
* hardware setup, the extra "status" word is used to save the
* coprocessor status word before entering the handler.
* The first part of "struct _fpstate" is just the normal i387 hardware setup,
* the extra "status" word is used to save the coprocessor status word before
* entering the handler.
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*
* The FPU state data structure has had to grow to accommodate the
* extended FPU state required by the Streaming SIMD Extensions.
* There is no documented standard to accomplish this at the moment.
* The FPU state data structure has had to grow to accommodate the extended FPU
* state required by the Streaming SIMD Extensions. There is no documented
* standard to accomplish this at the moment.
*/
 
/* 10-byte legacy floating point register: */
struct _fpreg {
unsigned short significand[4];
unsigned short exponent;
__u16 significand[4];
__u16 exponent;
};
 
/* 16-byte floating point register: */
struct _fpxreg {
unsigned short significand[4];
unsigned short exponent;
unsigned short padding[3];
__u16 significand[4];
__u16 exponent;
__u16 padding[3];
};
 
/* 16-byte XMM register: */
struct _xmmreg {
unsigned long element[4];
__u32 element[4];
};
 
struct _fpstate {
/* Regular FPU environment */
unsigned long cw;
unsigned long sw;
unsigned long tag;
unsigned long ipoff;
unsigned long cssel;
unsigned long dataoff;
unsigned long datasel;
#define X86_FXSR_MAGIC 0x0000
 
/*
* The 32-bit FPU frame:
*/
struct _fpstate_32 {
/* Legacy FPU environment: */
__u32 cw;
__u32 sw;
__u32 tag;
__u32 ipoff;
__u32 cssel;
__u32 dataoff;
__u32 datasel;
struct _fpreg _st[8];
unsigned short status;
unsigned short magic; /* 0xffff = regular FPU data only */
__u16 status;
__u16 magic; /* 0xffff: regular FPU data only */
/* 0x0000: FXSR FPU data */
 
/* FXSR FPU environment */
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
unsigned long mxcsr;
unsigned long reserved;
__u32 _fxsr_env[6]; /* FXSR FPU env is ignored */
__u32 mxcsr;
__u32 reserved;
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct _xmmreg _xmm[8];
unsigned long padding1[44];
struct _xmmreg _xmm[8]; /* First 8 XMM registers */
union {
__u32 padding1[44]; /* Second 8 XMM registers plus padding */
__u32 padding[44]; /* Alias name for old user-space */
};
 
union {
unsigned long padding2[12];
struct _fpx_sw_bytes sw_reserved; /* represents the extended
* state info */
__u32 padding2[12];
struct _fpx_sw_bytes sw_reserved; /* Potential extended state is encoded here */
};
};
 
#define X86_FXSR_MAGIC 0x0000
 
#ifndef __KERNEL__
/*
* User-space might still rely on the old definition:
* The 64-bit FPU frame. (FXSAVE format and later)
*
* Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is
* larger: 'struct _xstate'. Note that 'struct _xstate' embedds
* 'struct _fpstate' so that you can always assume the _fpstate portion
* exists so that you can check the magic value.
*
* Note2: Reserved fields may someday contain valuable data. Always
* save/restore them when you change signal frames.
*/
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long edi;
unsigned long esi;
unsigned long ebp;
unsigned long esp;
unsigned long ebx;
unsigned long edx;
unsigned long ecx;
unsigned long eax;
unsigned long trapno;
unsigned long err;
unsigned long eip;
unsigned short cs, __csh;
unsigned long eflags;
unsigned long esp_at_signal;
unsigned short ss, __ssh;
struct _fpstate __user *fpstate;
unsigned long oldmask;
unsigned long cr2;
};
#endif /* !__KERNEL__ */
 
#else /* __i386__ */
 
/* FXSAVE frame */
/* Note: reserved1/2 may someday contain valuable data. Always save/restore
them when you change signal frames. */
struct _fpstate {
struct _fpstate_64 {
__u16 cwd;
__u16 swd;
__u16 twd; /* Note this is not the same as the
32bit/x87/FSAVE twd */
/* Note this is not the same as the 32-bit/x87/FSAVE twd: */
__u16 twd;
__u16 fop;
__u64 rip;
__u64 rdp;
__u32 mxcsr;
__u32 mxcsr_mask;
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
__u32 st_space[32]; /* 8x FP registers, 16 bytes each */
__u32 xmm_space[64]; /* 16x XMM registers, 16 bytes each */
__u32 reserved2[12];
union {
__u32 reserved3[12];
struct _fpx_sw_bytes sw_reserved; /* represents the extended
* state information */
struct _fpx_sw_bytes sw_reserved; /* Potential extended state is encoded here */
};
};
 
#ifndef __KERNEL__
#ifdef __i386__
# define _fpstate _fpstate_32
#else
# define _fpstate _fpstate_64
#endif
 
struct _header {
__u64 xfeatures;
__u64 reserved1[2];
__u64 reserved2[5];
};
 
struct _ymmh_state {
/* 16x YMM registers, 16 bytes each: */
__u32 ymmh_space[64];
};
 
/*
* User-space might still rely on the old definition:
* Extended state pointed to by sigcontext::fpstate.
*
* In addition to the fpstate, information encoded in _xstate::xstate_hdr
* indicates the presence of other extended state information supported
* by the CPU and kernel:
*/
struct _xstate {
struct _fpstate fpstate;
struct _header xstate_hdr;
struct _ymmh_state ymmh;
/* New processor state extensions go here: */
};
 
/*
* The 32-bit signal frame:
*/
struct sigcontext_32 {
__u16 gs, __gsh;
__u16 fs, __fsh;
__u16 es, __esh;
__u16 ds, __dsh;
__u32 di;
__u32 si;
__u32 bp;
__u32 sp;
__u32 bx;
__u32 dx;
__u32 cx;
__u32 ax;
__u32 trapno;
__u32 err;
__u32 ip;
__u16 cs, __csh;
__u32 flags;
__u32 sp_at_signal;
__u16 ss, __ssh;
 
/*
* fpstate is really (struct _fpstate *) or (struct _xstate *)
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
* of extended memory layout. See comments at the definition of
* (struct _fpx_sw_bytes)
*/
__u32 fpstate; /* Zero when no FPU/extended context */
__u32 oldmask;
__u32 cr2;
};
 
/*
* The 64-bit signal frame:
*/
struct sigcontext_64 {
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 di;
__u64 si;
__u64 bp;
__u64 bx;
__u64 dx;
__u64 ax;
__u64 cx;
__u64 sp;
__u64 ip;
__u64 flags;
__u16 cs;
__u16 gs;
__u16 fs;
__u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
__u64 cr2;
 
/*
* fpstate is really (struct _fpstate *) or (struct _xstate *)
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
* of extended memory layout. See comments at the definition of
* (struct _fpx_sw_bytes)
*/
__u64 fpstate; /* Zero when no FPU/extended context */
__u64 reserved1[8];
};
 
/*
* Create the real 'struct sigcontext' type:
*/
#ifdef __KERNEL__
# ifdef __i386__
# define sigcontext sigcontext_32
# else
# define sigcontext sigcontext_64
# endif
#endif
 
/*
* The old user-space sigcontext definition, just in case user-space still
* relies on it. The kernel definition (in asm/sigcontext.h) has unified
* field names but otherwise the same layout.
*/
#ifndef __KERNEL__
 
#define _fpstate_ia32 _fpstate_32
#define sigcontext_ia32 sigcontext_32
 
 
# ifdef __i386__
struct sigcontext {
__u16 gs, __gsh;
__u16 fs, __fsh;
__u16 es, __esh;
__u16 ds, __dsh;
__u32 edi;
__u32 esi;
__u32 ebp;
__u32 esp;
__u32 ebx;
__u32 edx;
__u32 ecx;
__u32 eax;
__u32 trapno;
__u32 err;
__u32 eip;
__u16 cs, __csh;
__u32 eflags;
__u32 esp_at_signal;
__u16 ss, __ssh;
struct _fpstate __user *fpstate;
__u32 oldmask;
__u32 cr2;
};
# else /* __x86_64__: */
struct sigcontext {
__u64 r8;
__u64 r9;
__u64 r10;
184,38 → 348,13
__u64 trapno;
__u64 oldmask;
__u64 cr2;
struct _fpstate __user *fpstate; /* zero when no FPU context */
struct _fpstate __user *fpstate; /* Zero when no FPU context */
#ifdef __ILP32__
__u32 __fpstate_pad;
#endif
__u64 reserved1[8];
};
# endif /* __x86_64__ */
#endif /* !__KERNEL__ */
 
#endif /* !__i386__ */
 
struct _xsave_hdr {
__u64 xstate_bv;
__u64 reserved1[2];
__u64 reserved2[5];
};
 
struct _ymmh_state {
/* 16 * 16 bytes for each YMMH-reg */
__u32 ymmh_space[64];
};
 
/*
* Extended state pointed by the fpstate pointer in the sigcontext.
* In addition to the fpstate, information encoded in the xstate_hdr
* indicates the presence of other extended state information
* supported by the processor and OS.
*/
struct _xstate {
struct _fpstate fpstate;
struct _xsave_hdr xstate_hdr;
struct _ymmh_state ymmh;
/* new processor state extensions go here */
};
 
#endif /* _UAPI_ASM_X86_SIGCONTEXT_H */
/drivers/include/uapi/asm-generic/errno.h
6,7 → 6,16
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
#define ENOSYS 38 /* Function not implemented */
 
/*
* This error code is special: arch syscall entry code will return
* -ENOSYS if users try to call a syscall that doesn't exist. To keep
* failures of syscalls that really do exist distinguishable from
* failures due to attempts to use a nonexistent syscall, syscall
* implementations should refrain from returning -ENOSYS.
*/
#define ENOSYS 38 /* Invalid system call number */
 
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
/drivers/include/uapi/drm/drm.h
630,6 → 630,7
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
 
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
654,6 → 655,13
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
 
/**
* DRM_CLIENT_CAP_ATOMIC
*
* If set to 1, the DRM core will expose atomic properties to userspace
*/
#define DRM_CLIENT_CAP_ATOMIC 3
 
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
777,6 → 785,9
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic)
#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob)
#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
 
/**
* Device specific ioctls should only be in their respective headers
/drivers/include/uapi/drm/drm_fourcc.h
34,6 → 34,13
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
 
/* 8 bpp Red */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
 
/* 16 bpp RG */
#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
 
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
109,9 → 116,6
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
 
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
 
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
132,4 → 136,97
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
 
 
/*
* Format Modifiers:
*
* Format modifiers describe, typically, a re-ordering or modification
* of the data in a plane of an FB. This can be used to express tiled/
* swizzled formats, or compression, or a combination of the two.
*
* The upper 8 bits of the format modifier are a vendor-id as assigned
* below. The lower 56 bits are assigned as vendor sees fit.
*/
 
/* Vendor Ids: */
#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
#define DRM_FORMAT_MOD_VENDOR_NV 0x03
#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
/* add more to the end as needed */
 
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL))
 
/*
* Format Modifier tokens:
*
* When adding a new token please document the layout with a code comment,
* similar to the fourcc codes above. drm_fourcc.h is considered the
* authoritative source for all of these.
*/
 
/* Intel framebuffer modifiers */
 
/*
* Intel X-tiling layout
*
* This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
* in row-major layout. Within the tile bytes are laid out row-major, with
* a platform-dependent stride. On top of that the memory can apply
* platform-depending swizzling of some higher address bits into bit6.
*
* This format is highly platforms specific and not useful for cross-driver
* sharing. It exists since on a given platform it does uniquely identify the
* layout in a simple way for i915-specific userspace.
*/
#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
 
/*
* Intel Y-tiling layout
*
* This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
* in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
* chunks column-major, with a platform-dependent height. On top of that the
* memory can apply platform-depending swizzling of some higher address bits
* into bit6.
*
* This format is highly platforms specific and not useful for cross-driver
* sharing. It exists since on a given platform it does uniquely identify the
* layout in a simple way for i915-specific userspace.
*/
#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
 
/*
* Intel Yf-tiling layout
*
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
* Each group therefore consits out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
* 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
* in pixel depends on the pixel depth.
*/
#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
 
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
* standard NV12 style.
* As for NV12, an image is the result of two frame buffers: one for Y,
* one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
* Alignment requirements are (for each buffer):
* - multiple of 128 pixels for the width
* - multiple of 32 pixels for the height
*
* For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
 
#endif /* DRM_FOURCC_H */
/drivers/include/uapi/drm/drm_mode.h
105,8 → 105,16
 
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
__u16 hdisplay;
__u16 hsync_start;
__u16 hsync_end;
__u16 htotal;
__u16 hskew;
__u16 vdisplay;
__u16 vsync_start;
__u16 vsync_end;
__u16 vtotal;
__u16 vscan;
 
__u32 vrefresh;
 
124,8 → 132,10
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
__u32 min_width;
__u32 max_width;
__u32 min_height;
__u32 max_height;
};
 
struct drm_mode_crtc {
135,7 → 145,8
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
 
__u32 x, y; /**< Position on the frameuffer */
__u32 x; /**< x Position on the framebuffer */
__u32 y; /**< y Position on the framebuffer */
 
__u32 gamma_size;
__u32 mode_valid;
153,12 → 164,16
__u32 flags; /* see above flags */
 
/* Signed dest location allows it to be partially off screen */
__s32 crtc_x, crtc_y;
__u32 crtc_w, crtc_h;
__s32 crtc_x;
__s32 crtc_y;
__u32 crtc_w;
__u32 crtc_h;
 
/* Source values are 16.16 fixed point */
__u32 src_x, src_y;
__u32 src_h, src_w;
__u32 src_x;
__u32 src_y;
__u32 src_h;
__u32 src_w;
};
 
struct drm_mode_get_plane {
244,7 → 259,8
__u32 connector_type_id;
 
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 mm_width; /**< width in millimeters */
__u32 mm_height; /**< height in millimeters */
__u32 subpixel;
 
__u32 pad;
272,6 → 288,13
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
 
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
* witout being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
 
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
320,7 → 343,8
 
struct drm_mode_fb_cmd {
__u32 fb_id;
__u32 width, height;
__u32 width;
__u32 height;
__u32 pitch;
__u32 bpp;
__u32 depth;
329,16 → 353,18
};
 
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
 
struct drm_mode_fb_cmd2 {
__u32 fb_id;
__u32 width, height;
__u32 width;
__u32 height;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
 
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offets and pitches per plane.
* buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
346,13 → 372,21
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offset[0] and UV as
* offeset[1]. Note that offset[0] will generally
* be 0.
* So it would consist of Y as offsets[0] and UV as
* offsets[1]. Note that offsets[0] will generally
* be 0 (but this is not required).
*
* To accommodate tiled, compressed, etc formats, a per-plane
* modifier can be specified. The default value of zero
* indicates "native" format as specified by the fourcc.
* Vendor specific modifier token. This allows, for example,
* different tiling/swizzling pattern on different planes.
* See discussion above of DRM_FORMAT_MOD_xxx.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
__u64 modifier[4]; /* ie, tiling, compressed (per plane) */
};
 
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
519,4 → 553,47
uint32_t handle;
};
 
/* page-flip flags are valid, plus: */
#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100
#define DRM_MODE_ATOMIC_NONBLOCK 0x0200
#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400
 
#define DRM_MODE_ATOMIC_FLAGS (\
DRM_MODE_PAGE_FLIP_EVENT |\
DRM_MODE_PAGE_FLIP_ASYNC |\
DRM_MODE_ATOMIC_TEST_ONLY |\
DRM_MODE_ATOMIC_NONBLOCK |\
DRM_MODE_ATOMIC_ALLOW_MODESET)
 
struct drm_mode_atomic {
__u32 flags;
__u32 count_objs;
__u64 objs_ptr;
__u64 count_props_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
__u64 reserved;
__u64 user_data;
};
 
/**
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
*/
struct drm_mode_create_blob {
/** Pointer to data to copy. */
__u64 data;
/** Length of data to copy. */
__u32 length;
/** Return: new property ID. */
__u32 blob_id;
};
 
/**
* Destroy a user-created blob property.
*/
struct drm_mode_destroy_blob {
__u32 blob_id;
};
 
#endif
/drivers/include/uapi/drm/i915_drm.h
171,8 → 171,12
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
 
/* I915 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
/*
* i915 specific ioctls.
*
* The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
* [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
* against DRM_COMMAND_BASE and should be between [0x0, 0x60).
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
224,6 → 228,8
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
 
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
268,7 → 274,7
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
275,6 → 281,8
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
 
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
341,9 → 349,20
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
#define I915_PARAM_MMAP_VERSION 30
#define I915_PARAM_HAS_BSD2 31
#define I915_PARAM_REVISION 32
#define I915_PARAM_SUBSLICE_TOTAL 33
#define I915_PARAM_EU_TOTAL 34
#define I915_PARAM_HAS_GPU_RESET 35
#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 
typedef struct drm_i915_getparam {
int param;
__s32 param;
/*
* WARNING: Using pointers instead of fixed-size u64 means we need to write
* compat32 code. Don't repeat this mistake.
*/
int __user *value;
} drm_i915_getparam_t;
 
488,6 → 507,14
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
 
/**
* Flags for extended behaviour.
*
* Added in version 2.
*/
__u64 flags;
#define I915_MMAP_WC 0x1
};
 
struct drm_i915_gem_mmap_gtt {
663,7 → 690,8
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
#define EXEC_OBJECT_WRITE (1<<2)
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1)
__u64 flags;
 
__u64 rsvd1;
737,8 → 765,19
*/
#define I915_EXEC_HANDLE_LUT (1<<12)
 
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
/** Used for switching BSD rings on the platforms with two BSD rings */
#define I915_EXEC_BSD_MASK (3<<13)
#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */
#define I915_EXEC_BSD_RING1 (1<<13)
#define I915_EXEC_BSD_RING2 (2<<13)
 
/** Tell the kernel that the batchbuffer is processed by
* the resource streamer.
*/
#define I915_EXEC_RESOURCE_STREAMER (1<<15)
 
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
 
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
973,6 → 1012,7
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
1042,6 → 1082,14
__u64 offset;
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
* single instruction 8byte read, in order to workaround that use
* offset (0x2538 | 1) instead.
*
*/
 
struct drm_i915_reset_stats {
__u32 ctx_id;
1073,6 → 1121,16
__u32 handle;
};
 
struct drm_i915_gem_context_param {
__u32 ctx_id;
__u32 size;
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
__u64 value;
};
 
 
struct drm_i915_mask {
__u32 handle;
__u32 width;
1100,6 → 1158,7
__u32 height;
__u32 bo_pitch;
__u32 bo_map;
__u32 forced;
};
 
 
/drivers/include/uapi/drm/radeon_drm.h
33,7 → 33,7
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
 
#include <drm/drm.h>
#include "drm.h"
 
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
1034,6 → 1034,12
#define RADEON_INFO_VRAM_USAGE 0x1e
#define RADEON_INFO_GTT_USAGE 0x1f
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
#define RADEON_INFO_READ_REG 0x24
#define RADEON_INFO_VA_UNMAP_WORKING 0x25
#define RADEON_INFO_GPU_RESET_COUNTER 0x26
 
struct drm_radeon_info {
uint32_t request;
/drivers/include/uapi/drm/vmwgfx_drm.h
1,6 → 1,6
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
64,6 → 64,7
#define DRM_VMW_GB_SURFACE_CREATE 23
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
 
/*************************************************************************/
/**
88,6 → 89,8
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
#define DRM_VMW_PARAM_SCREEN_TARGET 11
#define DRM_VMW_PARAM_DX 12
 
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
296,7 → 299,7
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
 
#define DRM_VMW_EXECBUF_VERSION 1
#define DRM_VMW_EXECBUF_VERSION 2
 
struct drm_vmw_execbuf_arg {
uint64_t commands;
305,6 → 308,8
uint64_t fence_rep;
uint32_t version;
uint32_t flags;
uint32_t context_handle;
uint32_t pad64;
};
 
/**
825,7 → 830,6
enum drm_vmw_shader_type {
drm_vmw_shader_type_vs = 0,
drm_vmw_shader_type_ps,
drm_vmw_shader_type_gs
};
 
 
907,6 → 911,8
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
* if none.
* @base_size Size of the base mip level for all faces.
* @array_size Must be zero for non-DX hardware, and if non-zero
* svga3d_flags must have proper bind flags setup.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
919,7 → 925,7
uint32_t multisample_count;
uint32_t autogen_filter;
uint32_t buffer_handle;
uint32_t pad64;
uint32_t array_size;
struct drm_vmw_size base_size;
};
 
1059,4 → 1065,28
uint32_t pad64;
};
 
/*************************************************************************/
/**
* DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
*
* Allocates a device unique context id, and queues a create context command
* for the host. Does not wait for host completion.
*/
enum drm_vmw_extended_context {
drm_vmw_context_legacy,
drm_vmw_context_dx
};
 
/**
* union drm_vmw_extended_context_arg
*
* @req: Context type.
* @rep: Context identifier.
*
* Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
*/
union drm_vmw_extended_context_arg {
enum drm_vmw_extended_context req;
struct drm_vmw_context_arg rep;
};
#endif
/drivers/include/uapi/linux/media-bus-format.h
0,0 → 1,137
/*
* Media Bus API header
*
* Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
 
#ifndef __LINUX_MEDIA_BUS_FORMAT_H
#define __LINUX_MEDIA_BUS_FORMAT_H
 
/*
* These bus formats uniquely identify data formats on the data bus. Format 0
* is reserved, MEDIA_BUS_FMT_FIXED shall be used by host-client pairs, where
* the data format is fixed. Additionally, "2X8" means that one pixel is
* transferred in two 8-bit samples, "BE" or "LE" specify in which order those
* samples are transferred over the bus: "LE" means that the least significant
* bits are transferred first, "BE" means that the most significant bits are
* transferred first, and "PADHI" and "PADLO" define which bits - low or high,
* in the incomplete high byte, are filled with padding bits.
*
* The bus formats are grouped by type, bus_width, bits per component, samples
* per pixel and order of subsamples. Numerical values are sorted using generic
* numerical sort order (8 thus comes before 10).
*
* As their value can't change when a new bus format is inserted in the
* enumeration, the bus formats are explicitly given a numerical value. The next
* free values for each category are listed below, update them when inserting
* new pixel codes.
*/
 
#define MEDIA_BUS_FMT_FIXED 0x0001
 
/* RGB - next is 0x1018 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004
#define MEDIA_BUS_FMT_RGB565_1X16 0x1017
#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005
#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
#define MEDIA_BUS_FMT_GBR888_1X24 0x1014
#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
 
/* YUV (including grey) - next is 0x2026 */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
#define MEDIA_BUS_FMT_VYUY8_1_5X8 0x2003
#define MEDIA_BUS_FMT_YUYV8_1_5X8 0x2004
#define MEDIA_BUS_FMT_YVYU8_1_5X8 0x2005
#define MEDIA_BUS_FMT_UYVY8_2X8 0x2006
#define MEDIA_BUS_FMT_VYUY8_2X8 0x2007
#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008
#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009
#define MEDIA_BUS_FMT_Y10_1X10 0x200a
#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018
#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c
#define MEDIA_BUS_FMT_Y12_1X12 0x2013
#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
#define MEDIA_BUS_FMT_YVYU8_1X16 0x2012
#define MEDIA_BUS_FMT_YDYUYDYV8_1X16 0x2014
#define MEDIA_BUS_FMT_UYVY10_1X20 0x201a
#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b
#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d
#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
#define MEDIA_BUS_FMT_VUY8_1X24 0x2024
#define MEDIA_BUS_FMT_YUV8_1X24 0x2025
#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
 
/* Bayer - next is 0x3019 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013
#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002
#define MEDIA_BUS_FMT_SRGGB8_1X8 0x3014
#define MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 0x3015
#define MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 0x3016
#define MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 0x3017
#define MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 0x3018
#define MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 0x300b
#define MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 0x300c
#define MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 0x3009
#define MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 0x300d
#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE 0x3003
#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE 0x3004
#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE 0x3005
#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE 0x3006
#define MEDIA_BUS_FMT_SBGGR10_1X10 0x3007
#define MEDIA_BUS_FMT_SGBRG10_1X10 0x300e
#define MEDIA_BUS_FMT_SGRBG10_1X10 0x300a
#define MEDIA_BUS_FMT_SRGGB10_1X10 0x300f
#define MEDIA_BUS_FMT_SBGGR12_1X12 0x3008
#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010
#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011
#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012
 
/* JPEG compressed formats - next is 0x4002 */
#define MEDIA_BUS_FMT_JPEG_1X8 0x4001
 
/* Vendor specific formats - next is 0x5002 */
 
/* S5C73M3 sensor specific interleaved UYVY and JPEG */
#define MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8 0x5001
 
/* HSV - next is 0x6002 */
#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
 
#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
/drivers/include/uapi/linux/swab.h
0,0 → 1,288
#ifndef _UAPI_LINUX_SWAB_H
#define _UAPI_LINUX_SWAB_H
 
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/swab.h>
 
/*
* casts are necessary for constants, because we never know how for sure
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
*/
#define ___constant_swab16(x) ((__u16)( \
(((__u16)(x) & (__u16)0x00ffU) << 8) | \
(((__u16)(x) & (__u16)0xff00U) >> 8)))
 
#define ___constant_swab32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
(((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
(((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
(((__u32)(x) & (__u32)0xff000000UL) >> 24)))
 
#define ___constant_swab64(x) ((__u64)( \
(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
 
#define ___constant_swahw32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
(((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
 
#define ___constant_swahb32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
(((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
 
/*
* Implement the following as inlines, but define the interface using
* macros to allow constant folding when possible:
* ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
*/
 
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
#ifdef __HAVE_BUILTIN_BSWAP16__
return __builtin_bswap16(val);
#elif defined (__arch_swab16)
return __arch_swab16(val);
#else
return ___constant_swab16(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
#ifdef __HAVE_BUILTIN_BSWAP32__
return __builtin_bswap32(val);
#elif defined(__arch_swab32)
return __arch_swab32(val);
#else
return ___constant_swab32(val);
#endif
}
 
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
#ifdef __HAVE_BUILTIN_BSWAP64__
return __builtin_bswap64(val);
#elif defined (__arch_swab64)
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
__u32 l = val & ((1ULL << 32) - 1);
return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
#else
return ___constant_swab64(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswahw32(__u32 val)
{
#ifdef __arch_swahw32
return __arch_swahw32(val);
#else
return ___constant_swahw32(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswahb32(__u32 val)
{
#ifdef __arch_swahb32
return __arch_swahb32(val);
#else
return ___constant_swahb32(val);
#endif
}
 
/**
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
 
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
 
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
 
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap
*
* __swahw32(0x12340000) is 0x00001234
*/
#define __swahw32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahw32(x) : \
__fswahw32(x))
 
/**
* __swahb32 - return a high and low byte-swapped 32-bit value
* @x: value to byteswap
*
* __swahb32(0x12345678) is 0x34127856
*/
#define __swahb32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahb32(x) : \
__fswahb32(x))
 
/**
* __swab16p - return a byteswapped 16-bit value from a pointer
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline __u16 __swab16p(const __u16 *p)
{
#ifdef __arch_swab16p
return __arch_swab16p(p);
#else
return __swab16(*p);
#endif
}
 
/**
* __swab32p - return a byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline __u32 __swab32p(const __u32 *p)
{
#ifdef __arch_swab32p
return __arch_swab32p(p);
#else
return __swab32(*p);
#endif
}
 
/**
* __swab64p - return a byteswapped 64-bit value from a pointer
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline __u64 __swab64p(const __u64 *p)
{
#ifdef __arch_swab64p
return __arch_swab64p(p);
#else
return __swab64(*p);
#endif
}
 
/**
* __swahw32p - return a wordswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping.
*/
static inline __u32 __swahw32p(const __u32 *p)
{
#ifdef __arch_swahw32p
return __arch_swahw32p(p);
#else
return __swahw32(*p);
#endif
}
 
/**
* __swahb32p - return a high and low byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high/low byteswapping.
*/
static inline __u32 __swahb32p(const __u32 *p)
{
#ifdef __arch_swahb32p
return __arch_swahb32p(p);
#else
return __swahb32(*p);
#endif
}
 
/**
* __swab16s - byteswap a 16-bit value in-place
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline void __swab16s(__u16 *p)
{
#ifdef __arch_swab16s
__arch_swab16s(p);
#else
*p = __swab16p(p);
#endif
}
/**
* __swab32s - byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline void __swab32s(__u32 *p)
{
#ifdef __arch_swab32s
__arch_swab32s(p);
#else
*p = __swab32p(p);
#endif
}
 
/**
* __swab64s - byteswap a 64-bit value in-place
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline void __swab64s(__u64 *p)
{
#ifdef __arch_swab64s
__arch_swab64s(p);
#else
*p = __swab64p(p);
#endif
}
 
/**
* __swahw32s - wordswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping
*/
static inline void __swahw32s(__u32 *p)
{
#ifdef __arch_swahw32s
__arch_swahw32s(p);
#else
*p = __swahw32p(p);
#endif
}
 
/**
* __swahb32s - high and low byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high and low byte swapping
*/
static inline void __swahb32s(__u32 *p)
{
#ifdef __arch_swahb32s
__arch_swahb32s(p);
#else
*p = __swahb32p(p);
#endif
}
 
 
#endif /* _UAPI_LINUX_SWAB_H */
/drivers/include/video/display_timing.h
0,0 → 1,102
/*
* Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* description of display timings
*
* This file is released under the GPLv2
*/
 
#ifndef __LINUX_DISPLAY_TIMING_H
#define __LINUX_DISPLAY_TIMING_H
 
#include <linux/bitops.h>
#include <linux/types.h>
 
enum display_flags {
DISPLAY_FLAGS_HSYNC_LOW = BIT(0),
DISPLAY_FLAGS_HSYNC_HIGH = BIT(1),
DISPLAY_FLAGS_VSYNC_LOW = BIT(2),
DISPLAY_FLAGS_VSYNC_HIGH = BIT(3),
 
/* data enable flag */
DISPLAY_FLAGS_DE_LOW = BIT(4),
DISPLAY_FLAGS_DE_HIGH = BIT(5),
/* drive data on pos. edge */
DISPLAY_FLAGS_PIXDATA_POSEDGE = BIT(6),
/* drive data on neg. edge */
DISPLAY_FLAGS_PIXDATA_NEGEDGE = BIT(7),
DISPLAY_FLAGS_INTERLACED = BIT(8),
DISPLAY_FLAGS_DOUBLESCAN = BIT(9),
DISPLAY_FLAGS_DOUBLECLK = BIT(10),
};
 
/*
* A single signal can be specified via a range of minimal and maximal values
* with a typical value, that lies somewhere inbetween.
*/
struct timing_entry {
u32 min;
u32 typ;
u32 max;
};
 
/*
* Single "mode" entry. This describes one set of signal timings a display can
* have in one setting. This struct can later be converted to struct videomode
* (see include/video/videomode.h). As each timing_entry can be defined as a
* range, one struct display_timing may become multiple struct videomodes.
*
* Example: hsync active high, vsync active low
*
* Active Video
* Video ______________________XXXXXXXXXXXXXXXXXXXXXX_____________________
* |<- sync ->|<- back ->|<----- active ----->|<- front ->|<- sync..
* | | porch | | porch |
*
* HSync _|¯¯¯¯¯¯¯¯¯¯|___________________________________________|¯¯¯¯¯¯¯¯¯
*
* VSync ¯|__________|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|_________
*/
struct display_timing {
struct timing_entry pixelclock;
 
struct timing_entry hactive; /* hor. active video */
struct timing_entry hfront_porch; /* hor. front porch */
struct timing_entry hback_porch; /* hor. back porch */
struct timing_entry hsync_len; /* hor. sync len */
 
struct timing_entry vactive; /* ver. active video */
struct timing_entry vfront_porch; /* ver. front porch */
struct timing_entry vback_porch; /* ver. back porch */
struct timing_entry vsync_len; /* ver. sync len */
 
enum display_flags flags; /* display flags */
};
 
/*
* This describes all timing settings a display provides.
* The native_mode is the default setting for this display.
* Drivers that can handle multiple videomodes should work with this struct and
* convert each entry to the desired end result.
*/
struct display_timings {
unsigned int num_timings;
unsigned int native_mode;
 
struct display_timing **timings;
};
 
/* get one entry from struct display_timings */
static inline struct display_timing *display_timings_get(const struct
display_timings *disp,
unsigned int index)
{
if (disp->num_timings > index)
return disp->timings[index];
else
return NULL;
}
 
void display_timings_release(struct display_timings *disp);
 
#endif
/drivers/include/video/of_videomode.h
0,0 → 1,18
/*
* Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* videomode of-helpers
*
* This file is released under the GPLv2
*/
 
#ifndef __LINUX_OF_VIDEOMODE_H
#define __LINUX_OF_VIDEOMODE_H
 
struct device_node;
struct videomode;
 
int of_get_videomode(struct device_node *np, struct videomode *vm,
int index);
 
#endif /* __LINUX_OF_VIDEOMODE_H */
/drivers/include/video/videomode.h
0,0 → 1,58
/*
* Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* generic videomode description
*
* This file is released under the GPLv2
*/
 
#ifndef __LINUX_VIDEOMODE_H
#define __LINUX_VIDEOMODE_H
 
#include <linux/types.h>
#include <video/display_timing.h>
 
/*
* Subsystem independent description of a videomode.
* Can be generated from struct display_timing.
*/
struct videomode {
unsigned long pixelclock; /* pixelclock in Hz */
 
u32 hactive;
u32 hfront_porch;
u32 hback_porch;
u32 hsync_len;
 
u32 vactive;
u32 vfront_porch;
u32 vback_porch;
u32 vsync_len;
 
enum display_flags flags; /* display flags */
};
 
/**
* videomode_from_timing - convert display timing to videomode
* @dt: display_timing structure
* @vm: return value
*
* DESCRIPTION:
* This function converts a struct display_timing to a struct videomode.
*/
void videomode_from_timing(const struct display_timing *dt,
struct videomode *vm);
 
/**
* videomode_from_timings - convert one display timings entry to videomode
* @disp: structure with all possible timing entries
* @vm: return value
* @index: index into the list of display timings in devicetree
*
* DESCRIPTION:
* This function converts one struct display_timing entry to a struct videomode.
*/
int videomode_from_timings(const struct display_timings *disp,
struct videomode *vm, unsigned int index);
 
#endif