Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6936 → Rev 6934

/drivers/include/asm/bug.h
File deleted
/drivers/include/asm/fixmap.h
19,6 → 19,7
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <asm/pvclock.h>
#ifdef CONFIG_X86_32
#include <linux/threads.h>
#include <asm/kmap_types.h>
71,7 → 72,11
#ifdef CONFIG_X86_VSYSCALL_EMULATION
VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
#endif
#ifdef CONFIG_PARAVIRT_CLOCK
PVCLOCK_FIXMAP_BEGIN,
PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
#endif
#endif
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
/drivers/include/asm/pgtable_types.h
22,10 → 22,9
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_SOFTW4 58 /* available for programmer */
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
 
/* If _PAGE_BIT_PRESENT is clear, we use these: */
47,6 → 46,7
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
#define __HAVE_ARCH_PTE_SPECIAL
 
#ifdef CONFIG_KMEMCHECK
85,11 → 85,8
 
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
#define __HAVE_ARCH_PTE_DEVMAP
#else
#define _PAGE_NX (_AT(pteval_t, 0))
#define _PAGE_DEVMAP (_AT(pteval_t, 0))
#endif
 
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
/drivers/include/asm/io.h
File deleted
/drivers/include/asm/atomic.h
3,6 → 3,7
 
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/processor.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/rmwcc.h>
/drivers/include/asm/barrier.h
53,24 → 53,24
* model and we should fall back to full barriers.
*/
 
#define __smp_store_release(p, v) \
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
__smp_mb(); \
smp_mb(); \
WRITE_ONCE(*p, v); \
} while (0)
 
#define __smp_load_acquire(p) \
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
__smp_mb(); \
smp_mb(); \
___p1; \
})
 
#else /* regular x86 TSO memory ordering */
 
#define __smp_store_release(p, v) \
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
77,7 → 77,7
WRITE_ONCE(*p, v); \
} while (0)
 
#define __smp_load_acquire(p) \
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
88,9 → 88,7
#endif
 
/* Atomic operations are already serializing on x86 */
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()
 
#include <asm-generic/barrier.h>
 
#endif /* _ASM_X86_BARRIER_H */
/drivers/include/asm/cpufeature.h
12,7 → 12,7
#include <asm/disabled-features.h>
#endif
 
#define NCAPINTS 16 /* N 32-bit words worth of info */
#define NCAPINTS 14 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
 
/*
181,17 → 181,22
 
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc, word 7.
*
* Reuse free bits when adding new feature flags!
* CPUID levels like 0x6, 0xA etc, word 7
*/
 
#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
 
#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
 
/* Virtualization flags: Linux defined, word 8 */
200,7 → 205,16
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
 
#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
 
245,30 → 259,6
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
 
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
 
/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
 
/*
* BUG word(s)
*/
289,26 → 279,6
#include <asm/asm.h>
#include <linux/bitops.h>
 
enum cpuid_leafs
{
CPUID_1_EDX = 0,
CPUID_8000_0001_EDX,
CPUID_8086_0001_EDX,
CPUID_LNX_1,
CPUID_1_ECX,
CPUID_C000_0001_EDX,
CPUID_8000_0001_ECX,
CPUID_LNX_2,
CPUID_LNX_3,
CPUID_7_0_EBX,
CPUID_D_1_EAX,
CPUID_F_0_EDX,
CPUID_F_1_EDX,
CPUID_8000_0008_EBX,
CPUID_6_EAX,
CPUID_8000_000A_EDX,
};
 
#ifdef CONFIG_X86_FEATURE_NAMES
extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32];
386,31 → 356,60
} while (0)
 
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
/*
* Do not add any more of those clumsy macros - use static_cpu_has_safe() for
* fast paths and boot_cpu_has() otherwise!
*/
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
 
#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);
extern bool __static_cpu_has_safe(u16 bit);
 
/drivers/include/asm/msr-index.h
321,7 → 321,6
#define MSR_F15H_PERF_CTR 0xc0010201
#define MSR_F15H_NB_PERF_CTL 0xc0010240
#define MSR_F15H_NB_PERF_CTR 0xc0010241
#define MSR_F15H_IC_CFG 0xc0011021
 
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
/drivers/include/asm/msr.h
32,16 → 32,6
int err;
};
 
struct saved_msr {
bool valid;
struct msr_info info;
};
 
struct saved_msrs {
unsigned int num;
struct saved_msr *array;
};
 
static inline unsigned long long native_read_tscp(unsigned int *aux)
{
unsigned long low, high;
171,7 → 161,7
 
static inline void wrmsrl(unsigned msr, u64 val)
{
native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
native_write_msr(msr, (u32)val, (u32)(val >> 32));
}
 
/* wrmsr with exception handling */
/drivers/include/asm/pgtable.h
69,6 → 69,9
#define pmd_clear(pmd) native_pmd_clear(pmd)
 
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
#define pmd_update(mm, addr, ptep) do { } while (0)
#define pmd_update_defer(mm, addr, ptep) do { } while (0)
 
#define pgd_val(x) native_pgd_val(x)
#define __pgd(x) native_make_pgd(x)
162,9 → 165,14
}
 
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_splitting(pmd_t pmd)
{
return pmd_val(pmd) & _PAGE_SPLITTING;
}
 
static inline int pmd_trans_huge(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
return pmd_val(pmd) & _PAGE_PSE;
}
 
static inline int has_transparent_hugepage(void)
171,13 → 179,6
{
return cpu_has_pse;
}
 
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline int pmd_devmap(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_DEVMAP);
}
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
254,11 → 255,6
return pte_set_flags(pte, _PAGE_SPECIAL);
}
 
static inline pte_t pte_mkdevmap(pte_t pte)
{
return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
}
 
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
pmdval_t v = native_pmd_val(pmd);
278,11 → 274,6
return pmd_clear_flags(pmd, _PAGE_ACCESSED);
}
 
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_DIRTY);
}
 
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_RW);
293,11 → 284,6
return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
}
 
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_DEVMAP);
}
 
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_PSE);
479,13 → 465,6
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
 
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline int pte_devmap(pte_t a)
{
return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
}
#endif
 
#define pte_accessible pte_accessible
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{
752,9 → 731,14
* updates should either be sets, clears, or set_pte_atomic for P->P
* transitions, which means this hook should only be called for user PTEs.
* This hook implies a P->P protection or access change has taken place, which
* requires a subsequent TLB flush.
* requires a subsequent TLB flush. The notification can optionally be delayed
* until the TLB flush event by using the pte_update_defer form of the
* interface, but care must be taken to assure that the flush happens while
* still holding the same page table lock so that the shadow and primary pages
* do not become out of sync on SMP.
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
#endif
 
/*
832,6 → 816,10
unsigned long address, pmd_t *pmdp);
 
 
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp);
 
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
842,7 → 830,9
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
return native_pmdp_get_and_clear(pmdp);
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
pmd_update(mm, addr, pmdp);
return pmd;
}
 
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
850,6 → 840,7
unsigned long addr, pmd_t *pmdp)
{
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
pmd_update(mm, addr, pmdp);
}
 
/*
/drivers/include/asm/x86_init.h
82,11 → 82,13
* struct x86_init_timers - platform specific timer setup
* @setup_perpcu_clockev: set up the per cpu clock event device for the
* boot cpu
* @tsc_pre_init: platform function called before TSC init
* @timer_init: initialize the platform timer (default PIT/HPET)
* @wallclock_init: init the wallclock device
*/
struct x86_init_timers {
void (*setup_percpu_clockev)(void);
void (*tsc_pre_init)(void);
void (*timer_init)(void);
void (*wallclock_init)(void);
};
/drivers/include/asm-generic/iomap.h
File deleted
/drivers/include/asm-generic/barrier.h
File deleted
/drivers/include/asm-generic/pci_iomap.h
File deleted
/drivers/include/asm-generic/bug.h
File deleted
/drivers/include/drm/drm_modeset_helper_vtables.h
File deleted
/drivers/include/drm/drmP.h
50,7 → 50,6
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/wait.h>
 
#include <linux/firmware.h>
#include <linux/err.h>
920,7 → 919,8
#endif
 
extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
struct drm_gem_object *obj,
int flags);
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
950,7 → 950,7
void drm_dev_unref(struct drm_device *dev);
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
int drm_dev_set_unique(struct drm_device *dev, const char *name);
int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...);
 
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
971,11 → 971,6
extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
static inline int drm_pci_set_busid(struct drm_device *dev,
struct drm_master *master)
{
return -ENOSYS;
}
#endif
 
#define DRM_PCIE_SPEED_25 1
983,12 → 978,7
#define DRM_PCIE_SPEED_80 4
 
extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
 
/* platform section */
extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
 
/* returns true if currently okay to sleep */
static __inline__ bool drm_can_sleep(void)
{
995,9 → 985,6
return true;
}
 
/* helper for handling conditionals in various for_each macros */
#define for_each_if(condition) if (!(condition)) {} else
 
static __inline__ int drm_device_is_pcie(struct drm_device *dev)
{
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
/drivers/include/drm/i915_pciids.h
277,61 → 277,22
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
 
#define INTEL_SKL_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
 
#define INTEL_SKL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */
 
#define INTEL_SKL_IDS(info) \
INTEL_SKL_GT1_IDS(info), \
INTEL_SKL_GT2_IDS(info), \
INTEL_SKL_GT3_IDS(info), \
INTEL_SKL_GT4_IDS(info)
INTEL_SKL_GT3_IDS(info)
 
#define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x1A84, info), \
INTEL_VGA_DEVICE(0x5A84, info)
INTEL_VGA_DEVICE(0x1A85, info), \
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
 
#define INTEL_KBL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
 
#define INTEL_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
 
#define INTEL_KBL_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
 
#define INTEL_KBL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \
INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
 
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
INTEL_KBL_GT4_IDS(info)
 
#endif /* _I915_PCIIDS_H */
/drivers/include/drm/ttm/ttm_bo_api.h
316,7 → 316,21
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
 
/**
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
*
* @placement: Return immediately if buffer is busy.
* @mem: The struct ttm_mem_reg indicating the region where the bo resides
* @new_flags: Describes compatible placement found
*
* Returns true if the placement is compatible
*/
extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem,
uint32_t *new_flags);
 
/**
* ttm_bo_validate
*
* @bo: The buffer object.
383,16 → 397,6
*/
extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
 
/**
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
* object. This function must be called with struct ttm_bo_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
 
/**
* ttm_bo_lock_delayed_workqueue
/drivers/include/drm/ttm/ttm_bo_driver.h
826,10 → 826,10
* reserved, the validation sequence is checked against the validation
* sequence of the process currently reserving the buffer,
* and if the current validation sequence is greater than that of the process
* holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
* holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
* waiting for the buffer to become unreserved, after which it retries
* reserving.
* The caller should, when receiving an -EDEADLK error
* The caller should, when receiving an -EAGAIN error
* release all its buffer reservations, wait for @bo to become unreserved, and
* then rerun the validation with the same validation sequence. This procedure
* will always guarantee that the process with the lowest validation sequence
/drivers/include/drm/drm_dp_mst_helper.h
88,7 → 88,6
struct drm_dp_mst_topology_mgr *mgr;
 
struct edid *cached_edid; /* for DP logical ports - make tiling work */
bool has_audio;
};
 
/**
215,13 → 214,13
struct drm_dp_sideband_msg_hdr initial_hdr;
};
 
#define DRM_DP_MAX_SDP_STREAMS 16
 
struct drm_dp_allocate_payload {
u8 port_number;
u8 number_sdp_streams;
u8 vcpi;
u16 pbn;
u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
u8 sdp_stream_sink[8];
};
 
struct drm_dp_allocate_payload_ack_reply {
418,7 → 417,7
struct drm_dp_mst_topology_mgr {
 
struct device *dev;
const struct drm_dp_mst_topology_cbs *cbs;
struct drm_dp_mst_topology_cbs *cbs;
int max_dpcd_transaction_bytes;
struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
int max_payloads;
478,8 → 477,6
 
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
 
/drivers/include/drm/drm_gem.h
35,68 → 35,34
*/
 
/**
* struct drm_gem_object - GEM buffer object
*
* This structure defines the generic parts for GEM buffer objects, which are
* mostly around handling mmap and userspace handles.
*
* Buffer objects are often abbreviated to BO.
* This structure defines the drm_mm memory object, which will be used by the
* DRM for its buffer objects.
*/
struct drm_gem_object {
/**
* @refcount:
*
* Reference count of this object
*
* Please use drm_gem_object_reference() to acquire and
* drm_gem_object_unreference() or drm_gem_object_unreference_unlocked()
* to release a reference to a GEM buffer object.
*/
/** Reference count of this object */
struct kref refcount;
 
/**
* @handle_count:
* handle_count - gem file_priv handle count of this object
*
* This is the GEM file_priv handle count of this object.
*
* Each handle also holds a reference. Note that when the handle_count
* drops to 0 any global names (e.g. the id in the flink namespace) will
* be cleared.
*
* Protected by dev->object_name_lock.
*/
* */
unsigned handle_count;
 
/**
* @dev: DRM dev this object belongs to.
*/
/** Related drm device */
struct drm_device *dev;
 
/**
* @filp:
*
* SHMEM file node used as backing storage for swappable buffer objects.
* GEM also supports driver private objects with driver-specific backing
* storage (contiguous CMA memory, special reserved blocks). In this
* case @filp is NULL.
*/
/** File representing the shmem storage */
struct file *filp;
 
/**
* @vma_node:
*
* Mapping info for this object to support mmap. Drivers are supposed to
* allocate the mmap offset using drm_gem_create_mmap_offset(). The
* offset itself can be retrieved using drm_vma_node_offset_addr().
*
* Memory mapping itself is handled by drm_gem_mmap(), which also checks
* that userspace is allowed to access the object.
*/
/* Mapping info for this object */
struct drm_vma_offset_node vma_node;
 
/**
* @size:
*
* Size of the object, in bytes. Immutable over the object's
* lifetime.
*/
103,32 → 69,21
size_t size;
 
/**
* @name:
*
* Global name for this object, starts at 1. 0 means unnamed.
* Access is covered by dev->object_name_lock. This is used by the GEM_FLINK
* and GEM_OPEN ioctls.
* Access is covered by the object_name_lock in the related drm_device
*/
int name;
 
/**
* @read_domains:
*
* Read memory domains. These monitor which caches contain read/write data
* Memory domains. These monitor which caches contain read/write data
* related to the object. When transitioning from one set of domains
* to another, the driver is called to ensure that caches are suitably
* flushed and invalidated.
* flushed and invalidated
*/
uint32_t read_domains;
 
/**
* @write_domain: Corresponding unique write memory domain.
*/
uint32_t write_domain;
 
/**
* @pending_read_domains:
*
* While validating an exec operation, the
* new read/write domain values are computed here.
* They will be transferred to the above values
135,30 → 90,22
* at the point that any cache flushing occurs
*/
uint32_t pending_read_domains;
 
/**
* @pending_write_domain: Write domain similar to @pending_read_domains.
*/
uint32_t pending_write_domain;
 
/**
* @dma_buf:
* dma_buf - dma buf associated with this GEM object
*
* dma-buf associated with this GEM object.
*
* Pointer to the dma-buf associated with this gem object (either
* through importing or exporting). We break the resulting reference
* loop when the last gem handle for this object is released.
*
* Protected by obj->object_name_lock.
* Protected by obj->object_name_lock
*/
struct dma_buf *dma_buf;
 
/**
* @import_attach:
* import_attach - dma buf attachment backing this object
*
* dma-buf attachment backing this object.
*
* Any foreign dma_buf imported as a gem object has this set to the
* attachment point for the device. This is invariant over the lifetime
* of a gem object.
186,13 → 133,6
struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
/**
* drm_gem_object_reference - acquire a GEM BO reference
* @obj: GEM buffer object
*
* This acquires additional reference to @obj. It is illegal to call this
* without already holding a reference. No locks required.
*/
static inline void
drm_gem_object_reference(struct drm_gem_object *obj)
{
199,17 → 139,6
kref_get(&obj->refcount);
}
 
/**
* drm_gem_object_unreference - release a GEM BO reference
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must hold the dev->struct_mutex
* lock when calling this function, even when the driver doesn't use
* dev->struct_mutex for anything.
*
* For drivers not encumbered with legacy locking use
* drm_gem_object_unreference_unlocked() instead.
*/
static inline void
drm_gem_object_unreference(struct drm_gem_object *obj)
{
220,13 → 149,6
}
}
 
/**
* drm_gem_object_unreference_unlocked - release a GEM BO reference
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must not hold the
* dev->struct_mutex lock when calling this function.
*/
static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
/drivers/include/drm/drm_atomic.h
130,6 → 130,10
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
 
int
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc);
 
void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
 
void
145,7 → 149,7
((connector) = (state)->connectors[__i], \
(connector_state) = (state)->connector_states[__i], 1); \
(__i)++) \
for_each_if (connector)
if (connector)
 
#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
for ((__i) = 0; \
153,7 → 157,7
((crtc) = (state)->crtcs[__i], \
(crtc_state) = (state)->crtc_states[__i], 1); \
(__i)++) \
for_each_if (crtc_state)
if (crtc_state)
 
#define for_each_plane_in_state(state, plane, plane_state, __i) \
for ((__i) = 0; \
161,7 → 165,7
((plane) = (state)->planes[__i], \
(plane_state) = (state)->plane_states[__i], 1); \
(__i)++) \
for_each_if (plane_state)
if (plane_state)
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
{
/drivers/include/drm/drm_atomic_helper.h
42,10 → 42,6
struct drm_atomic_state *state,
bool async);
 
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
struct drm_atomic_state *old_state,
struct drm_crtc *crtc);
 
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_atomic_state *old_state);
 
66,8 → 62,6
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state);
void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
bool atomic);
 
void drm_atomic_helper_swap_state(struct drm_device *dev,
struct drm_atomic_state *state);
87,12 → 81,6
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state);
 
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state);
 
int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val);
130,8 → 118,6
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
 
void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
/drivers/include/drm/drm_crtc.h
85,11 → 85,7
return (uint64_t)*((uint64_t *)&val);
}
 
/*
* Rotation property bits. DRM_ROTATE_<degrees> rotates the image by the
* specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and
* DRM_REFLECT_Y reflects the image along the specified axis prior to rotation
*/
/* rotation property bits */
#define DRM_ROTATE_MASK 0x0f
#define DRM_ROTATE_0 0
#define DRM_ROTATE_90 1
162,60 → 158,23
u8 group_data[8];
};
 
/**
* struct drm_framebuffer_funcs - framebuffer hooks
*/
struct drm_framebuffer_funcs {
/**
* @destroy:
*
* Clean up framebuffer resources, specifically also unreference the
* backing storage. The core guarantees to call this function for every
* framebuffer successfully created by ->fb_create() in
* &drm_mode_config_funcs. Drivers must also call
* drm_framebuffer_cleanup() to release DRM core resources for this
* framebuffer.
*/
/* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
 
/**
* @create_handle:
*
* Create a buffer handle in the driver-specific buffer manager (either
* GEM or TTM) valid for the passed-in struct &drm_file. This is used by
* the core to implement the GETFB IOCTL, which returns (for
* sufficiently priviledged user) also a native buffer handle. This can
* be used for seamless transitions between modesetting clients by
* copying the current screen contents to a private buffer and blending
* between that and the new contents.
*
* GEM based drivers should call drm_gem_handle_create() to create the
* handle.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
/**
* @dirty:
/*
* Optional callback for the dirty fb ioctl.
*
* Optional callback for the dirty fb IOCTL.
* Userspace can notify the driver via this callback
* that a area of the framebuffer has changed and should
* be flushed to the display hardware.
*
* Userspace can notify the driver via this callback that an area of the
* framebuffer has changed and should be flushed to the display
* hardware. This can also be used internally, e.g. by the fbdev
* emulation, though that's not the case currently.
*
* See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd
* for more information as all the semantics and arguments have a one to
* one mapping on this function.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
* See documentation in drm_mode.h for the struct
* drm_mode_fb_dirty_cmd for more information as all
* the semantics and arguments have a one to one mapping
* on this function.
*/
int (*dirty)(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv, unsigned flags,
291,11 → 250,6
struct drm_bridge;
struct drm_atomic_state;
 
struct drm_crtc_helper_funcs;
struct drm_encoder_helper_funcs;
struct drm_connector_helper_funcs;
struct drm_plane_helper_funcs;
 
/**
* struct drm_crtc_state - mutable CRTC state
* @crtc: backpointer to the CRTC
306,7 → 260,6
* @active_changed: crtc_state->active has been toggled.
* @connectors_changed: connectors to this crtc have been updated
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
* @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
* @last_vblank_count: for helpers and drivers to capture the vblank of the
* update to ensure framebuffer cleanup isn't done too early
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
340,8 → 293,6
*/
u32 plane_mask;
 
u32 connector_mask;
 
/* last_vblank_count: for vblank waits before cleanup */
u32 last_vblank_count;
 
360,6 → 311,23
 
/**
* struct drm_crtc_funcs - control CRTCs for a given device
* @save: save CRTC state
* @restore: restore CRTC state
* @reset: reset CRTC after state has been invalidated (e.g. resume)
* @cursor_set: setup the cursor
* @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
* @cursor_move: move the cursor
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object
* @set_property: called when a property is changed
* @set_config: apply a new CRTC configuration
* @page_flip: initiate a page flip
* @atomic_duplicate_state: duplicate the atomic state for this CRTC
* @atomic_destroy_state: destroy an atomic state for this CRTC
* @atomic_set_property: set a property on an atomic state for this CRTC
* (do not call directly, use drm_atomic_crtc_set_property())
* @atomic_get_property: get a property on an atomic state for this CRTC
* (do not call directly, use drm_atomic_crtc_get_property())
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
371,188 → 339,37
* bus accessors.
*/
struct drm_crtc_funcs {
/**
* @reset:
*
* Reset CRTC hardware and software state to off. This function isn't
* called by the core directly, only through drm_mode_config_reset().
* It's not a helper hook only for historical reasons.
*
* Atomic drivers can use drm_atomic_helper_crtc_reset() to reset
* atomic state using this hook.
*/
/* Save CRTC state */
void (*save)(struct drm_crtc *crtc); /* suspend? */
/* Restore CRTC state */
void (*restore)(struct drm_crtc *crtc); /* resume? */
/* Reset CRTC state */
void (*reset)(struct drm_crtc *crtc);
 
/**
* @cursor_set:
*
* Update the cursor image. The cursor position is relative to the CRTC
* and can be partially or fully outside of the visible area.
*
* Note that contrary to all other KMS functions the legacy cursor entry
* points don't take a framebuffer object, but instead take directly a
* raw buffer object id from the driver's buffer manager (which is
* either GEM or TTM for current drivers).
*
* This entry point is deprecated, drivers should instead implement
* universal plane support and register a proper cursor plane using
* drm_crtc_init_with_planes().
*
* This callback is optional
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
/* cursor controls */
int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
 
/**
* @cursor_set2:
*
* Update the cursor image, including hotspot information. The hotspot
* must not affect the cursor position in CRTC coordinates, but is only
* meant as a hint for virtualized display hardware to coordinate the
* guests and hosts cursor position. The cursor hotspot is relative to
* the cursor image. Otherwise this works exactly like @cursor_set.
*
* This entry point is deprecated, drivers should instead implement
* universal plane support and register a proper cursor plane using
* drm_crtc_init_with_planes().
*
* This callback is optional.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
 
/**
* @cursor_move:
*
* Update the cursor position. The cursor does not need to be visible
* when this hook is called.
*
* This entry point is deprecated, drivers should instead implement
* universal plane support and register a proper cursor plane using
* drm_crtc_init_with_planes().
*
* This callback is optional.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
 
/**
* @gamma_set:
*
* Set gamma on the CRTC.
*
* This callback is optional.
*
* NOTE:
*
* Drivers that support gamma tables and also fbdev emulation through
* the provided helper library need to take care to fill out the gamma
* hooks for both. Currently there's a bit an unfortunate duplication
* going on, which should eventually be unified to just one set of
* hooks.
*/
/* Set gamma on the CRTC */
void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size);
 
/**
* @destroy:
*
* Clean up plane resources. This is only called at driver unload time
* through drm_mode_config_cleanup() since a CRTC cannot be hotplugged
* in DRM.
*/
/* Object destroy routine */
void (*destroy)(struct drm_crtc *crtc);
 
/**
* @set_config:
*
* This is the main legacy entry point to change the modeset state on a
* CRTC. All the details of the desired configuration are passed in a
* struct &drm_mode_set - see there for details.
*
* Drivers implementing atomic modeset should use
* drm_atomic_helper_set_config() to implement this hook.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*set_config)(struct drm_mode_set *set);
 
/**
* @page_flip:
*
* Legacy entry point to schedule a flip to the given framebuffer.
*
* Page flipping is a synchronization mechanism that replaces the frame
* buffer being scanned out by the CRTC with a new frame buffer during
* vertical blanking, avoiding tearing (except when requested otherwise
* through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application
* requests a page flip the DRM core verifies that the new frame buffer
* is large enough to be scanned out by the CRTC in the currently
* configured mode and then calls the CRTC ->page_flip() operation with a
* pointer to the new frame buffer.
*
* The driver must wait for any pending rendering to the new framebuffer
* to complete before executing the flip. It should also wait for any
* pending rendering from other drivers if the underlying buffer is a
* shared dma-buf.
*
* An application can request to be notified when the page flip has
* completed. The drm core will supply a struct &drm_event in the event
* parameter in this case. This can be handled by the
* drm_crtc_send_vblank_event() function, which the driver should call on
* the provided event upon completion of the flip. Note that if
* the driver supports vblank signalling and timestamping the vblank
* counters and timestamps must agree with the ones returned from page
* flip events. With the current vblank helper infrastructure this can
* be achieved by holding a vblank reference while the page flip is
* pending, acquired through drm_crtc_vblank_get() and released with
* drm_crtc_vblank_put(). Drivers are free to implement their own vblank
* counter and timestamp tracking though, e.g. if they have accurate
* timestamp registers in hardware.
*
* FIXME:
*
* Up to that point drivers need to manage events themselves and can use
* even->base.list freely for that. Specifically they need to ensure
* that they don't send out page flip (or vblank) events for which the
* corresponding drm file has been closed already. The drm core
* unfortunately does not (yet) take care of that. Therefore drivers
* currently must clean up and release pending events in their
* ->preclose driver function.
*
* This callback is optional.
*
* NOTE:
*
* Very early versions of the KMS ABI mandated that the driver must
* block (but not reject) any rendering to the old framebuffer until the
* flip operation has completed and the old framebuffer is no longer
* visible. This requirement has been lifted, and userspace is instead
* expected to request delivery of an event and wait with recycling old
* buffers until such has been received.
*
* RETURNS:
*
* 0 on success or a negative error code on failure. Note that if a
* ->page_flip() operation is already pending the callback should return
* -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode
* or just runtime disabled through DPMS respectively the new atomic
* "ACTIVE" state) should result in an -EINVAL error code. Note that
* drm_atomic_helper_page_flip() checks this already for atomic drivers.
/*
* Flip to the given framebuffer. This implements the page
* flip ioctl described in drm_mode.h, specifically, the
* implementation must return immediately and block all
* rendering to the current fb until the flip has completed.
* If userspace set the event flag in the ioctl, the event
* argument will point to an event to send back when the flip
* completes, otherwise it will be NULL.
*/
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
559,129 → 376,17
struct drm_pending_vblank_event *event,
uint32_t flags);
 
/**
* @set_property:
*
* This is the legacy entry point to update a property attached to the
* CRTC.
*
* Drivers implementing atomic modeset should use
* drm_atomic_helper_crtc_set_property() to implement this hook.
*
* This callback is optional if the driver does not support any legacy
* driver-private properties.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
 
/**
* @atomic_duplicate_state:
*
* Duplicate the current atomic state for this CRTC and return it.
* The core and helpers gurantee that any atomic state duplicated with
* this hook and still owned by the caller (i.e. not transferred to the
* driver by calling ->atomic_commit() from struct
* &drm_mode_config_funcs) will be cleaned up by calling the
* @atomic_destroy_state hook in this structure.
*
* Atomic drivers which don't subclass struct &drm_crtc should use
* drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
* __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is
* duplicated in a consistent fashion across drivers.
*
* It is an error to call this hook before crtc->state has been
* initialized correctly.
*
* NOTE:
*
* If the duplicate state references refcounted resources this hook must
* acquire a reference for each of them. The driver must release these
* references again in @atomic_destroy_state.
*
* RETURNS:
*
* Duplicated atomic state or NULL when the allocation failed.
*/
/* atomic update handling */
struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
 
/**
* @atomic_destroy_state:
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
*/
void (*atomic_destroy_state)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
 
/**
* @atomic_set_property:
*
* Decode a driver-private property value and store the decoded value
* into the passed-in state structure. Since the atomic core decodes all
* standardized properties (even for extensions beyond the core set of
* properties which might not be implemented by all drivers) this
* requires drivers to subclass the state structure.
*
* Such driver-private properties should really only be implemented for
* truly hardware/vendor specific state. Instead it is preferred to
* standardize atomic extension and decode the properties used to expose
* such an extension in the core.
*
* Do not call this function directly, use
* drm_atomic_crtc_set_property() instead.
*
* This callback is optional if the driver does not support any
* driver-private atomic properties.
*
* NOTE:
*
* This function is called in the state assembly phase of atomic
* modesets, which can be aborted for any reason (including on
* userspace's request to just check whether a configuration would be
* possible). Drivers MUST NOT touch any persistent state (hardware or
* software) or data structures except the passed in @state parameter.
*
* Also since userspace controls in which order properties are set this
* function must not do any input validation (since the state update is
* incomplete and hence likely inconsistent). Instead any such input
* validation must be done in the various atomic_check callbacks.
*
* RETURNS:
*
* 0 if the property has been found, -EINVAL if the property isn't
* implemented by the driver (which should never happen, the core only
* asks for properties attached to this CRTC). No other validation is
* allowed by the driver. The core already checks that the property
* value is within the range (integer, valid enum value, ...) the driver
* set when registering the property.
*/
int (*atomic_set_property)(struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val);
/**
* @atomic_get_property:
*
* Reads out the decoded driver-private property. This is used to
* implement the GETCRTC IOCTL.
*
* Do not call this function directly, use
* drm_atomic_crtc_get_property() instead.
*
* This callback is optional if the driver does not support any
* driver-private atomic properties.
*
* RETURNS:
*
* 0 on success, -EINVAL if the property isn't implemented by the
* driver (which should never happen, the core only asks for
* properties attached to this CRTC).
*/
int (*atomic_get_property)(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
711,7 → 416,7
* @properties: property tracking for this CRTC
* @state: current atomic state for this CRTC
* @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
* legacy IOCTLs
* legacy ioctls
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
721,8 → 426,6
struct device_node *port;
struct list_head head;
 
char *name;
 
/*
* crtc mutex
*
760,7 → 463,7
uint16_t *gamma_store;
 
/* if you are using the helper */
const struct drm_crtc_helper_funcs *helper_private;
const void *helper_private;
 
struct drm_object_properties properties;
 
767,7 → 470,7
struct drm_crtc_state *state;
 
/*
* For legacy crtc IOCTLs so that atomic drivers can get at the locking
* For legacy crtc ioctls so that atomic drivers can get at the locking
* acquire context.
*/
struct drm_modeset_acquire_ctx *acquire_ctx;
792,6 → 495,21
 
/**
* struct drm_connector_funcs - control connectors on a given device
* @dpms: set power state
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidated (e.g. resume)
* @detect: is this connector active?
* @fill_modes: fill mode list for this connector
* @set_property: property for this connector may need an update
* @destroy: make object go away
* @force: notify the driver that the connector is forced on
* @atomic_duplicate_state: duplicate the atomic state for this connector
* @atomic_destroy_state: destroy an atomic state for this connector
* @atomic_set_property: set a property on an atomic state for this connector
* (do not call directly, use drm_atomic_connector_set_property())
* @atomic_get_property: get a property on an atomic state for this connector
* (do not call directly, use drm_atomic_connector_get_property())
*
* Each CRTC may have one or more connectors attached to it. The functions
* below allow the core DRM code to control connectors, enumerate available modes,
798,233 → 516,33
* etc.
*/
struct drm_connector_funcs {
/**
* @dpms:
*
* Legacy entry point to set the per-connector DPMS state. Legacy DPMS
* is exposed as a standard property on the connector, but diverted to
* this callback in the drm core. Note that atomic drivers don't
* implement the 4 level DPMS support on the connector any more, but
* instead only have an on/off "ACTIVE" property on the CRTC object.
*
* Drivers implementing atomic modeset should use
* drm_atomic_helper_connector_dpms() to implement this hook.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*dpms)(struct drm_connector *connector, int mode);
 
/**
* @reset:
*
* Reset connector hardware and software state to off. This function isn't
* called by the core directly, only through drm_mode_config_reset().
* It's not a helper hook only for historical reasons.
*
* Atomic drivers can use drm_atomic_helper_connector_reset() to reset
* atomic state using this hook.
*/
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
void (*reset)(struct drm_connector *connector);
 
/**
* @detect:
*
* Check to see if anything is attached to the connector. The parameter
* force is set to false whilst polling, true when checking the
* connector due to a user request. force can be used by the driver to
* avoid expensive, destructive operations during automated probing.
*
* FIXME:
*
* Note that this hook is only called by the probe helper. It's not in
* the helper library vtable purely for historical reasons. The only DRM
* core entry point to probe connector state is @fill_modes.
*
* RETURNS:
*
* drm_connector_status indicating the connector's status.
/* Check to see if anything is attached to the connector.
* @force is set to false whilst polling, true when checking the
* connector due to user request. @force can be used by the driver
* to avoid expensive, destructive operations during automated
* probing.
*/
enum drm_connector_status (*detect)(struct drm_connector *connector,
bool force);
 
/**
* @force:
*
* This function is called to update internal encoder state when the
* connector is forced to a certain state by userspace, either through
* the sysfs interfaces or on the kernel cmdline. In that case the
* @detect callback isn't called.
*
* FIXME:
*
* Note that this hook is only called by the probe helper. It's not in
* the helper library vtable purely for historical reasons. The only DRM
* core entry point to probe connector state is @fill_modes.
*/
void (*force)(struct drm_connector *connector);
 
/**
* @fill_modes:
*
* Entry point for output detection and basic mode validation. The
* driver should reprobe the output if needed (e.g. when hotplug
* handling is unreliable), add all detected modes to connector->modes
* and filter out any the device can't support in any configuration. It
* also needs to filter out any modes wider or higher than the
* parameters max_width and max_height indicate.
*
* The drivers must also prune any modes no longer valid from
* connector->modes. Furthermore it must update connector->status and
* connector->edid. If no EDID has been received for this output
* connector->edid must be NULL.
*
* Drivers using the probe helpers should use
* drm_helper_probe_single_connector_modes() or
* drm_helper_probe_single_connector_modes_nomerge() to implement this
* function.
*
* RETURNS:
*
* The number of modes detected and filled into connector->modes.
*/
int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
 
/**
* @set_property:
*
* This is the legacy entry point to update a property attached to the
* connector.
*
* Drivers implementing atomic modeset should use
* drm_atomic_helper_connector_set_property() to implement this hook.
*
* This callback is optional if the driver does not support any legacy
* driver-private properties.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*set_property)(struct drm_connector *connector, struct drm_property *property,
uint64_t val);
 
/**
* @destroy:
*
* Clean up connector resources. This is called at driver unload time
* through drm_mode_config_cleanup(). It can also be called at runtime
* when a connector is being hot-unplugged for drivers that support
* connector hotplugging (e.g. DisplayPort MST).
*/
void (*destroy)(struct drm_connector *connector);
void (*force)(struct drm_connector *connector);
 
/**
* @atomic_duplicate_state:
*
* Duplicate the current atomic state for this connector and return it.
* The core and helpers gurantee that any atomic state duplicated with
* this hook and still owned by the caller (i.e. not transferred to the
* driver by calling ->atomic_commit() from struct
* &drm_mode_config_funcs) will be cleaned up by calling the
* @atomic_destroy_state hook in this structure.
*
* Atomic drivers which don't subclass struct &drm_connector_state should use
* drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
* __drm_atomic_helper_connector_duplicate_state() to make sure shared state is
* duplicated in a consistent fashion across drivers.
*
* It is an error to call this hook before connector->state has been
* initialized correctly.
*
* NOTE:
*
* If the duplicate state references refcounted resources this hook must
* acquire a reference for each of them. The driver must release these
* references again in @atomic_destroy_state.
*
* RETURNS:
*
* Duplicated atomic state or NULL when the allocation failed.
*/
/* atomic update handling */
struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
 
/**
* @atomic_destroy_state:
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
*/
void (*atomic_destroy_state)(struct drm_connector *connector,
struct drm_connector_state *state);
 
/**
* @atomic_set_property:
*
* Decode a driver-private property value and store the decoded value
* into the passed-in state structure. Since the atomic core decodes all
* standardized properties (even for extensions beyond the core set of
* properties which might not be implemented by all drivers) this
* requires drivers to subclass the state structure.
*
* Such driver-private properties should really only be implemented for
* truly hardware/vendor specific state. Instead it is preferred to
* standardize atomic extension and decode the properties used to expose
* such an extension in the core.
*
* Do not call this function directly, use
* drm_atomic_connector_set_property() instead.
*
* This callback is optional if the driver does not support any
* driver-private atomic properties.
*
* NOTE:
*
* This function is called in the state assembly phase of atomic
* modesets, which can be aborted for any reason (including on
* userspace's request to just check whether a configuration would be
* possible). Drivers MUST NOT touch any persistent state (hardware or
* software) or data structures except the passed in @state parameter.
*
* Also since userspace controls in which order properties are set this
* function must not do any input validation (since the state update is
* incomplete and hence likely inconsistent). Instead any such input
* validation must be done in the various atomic_check callbacks.
*
* RETURNS:
*
* 0 if the property has been found, -EINVAL if the property isn't
* implemented by the driver (which shouldn't ever happen, the core only
* asks for properties attached to this connector). No other validation
* is allowed by the driver. The core already checks that the property
* value is within the range (integer, valid enum value, ...) the driver
* set when registering the property.
*/
int (*atomic_set_property)(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
 
/**
* @atomic_get_property:
*
* Reads out the decoded driver-private property. This is used to
* implement the GETCONNECTOR IOCTL.
*
* Do not call this function directly, use
* drm_atomic_connector_get_property() instead.
*
* This callback is optional if the driver does not support any
* driver-private atomic properties.
*
* RETURNS:
*
* 0 on success, -EINVAL if the property isn't implemented by the
* driver (which shouldn't ever happen, the core only asks for
* properties attached to this connector).
*/
int (*atomic_get_property)(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
1033,26 → 551,13
 
/**
* struct drm_encoder_funcs - encoder controls
* @reset: reset state (e.g. at init or resume time)
* @destroy: cleanup and free associated data
*
* Encoders sit between CRTCs and connectors.
*/
struct drm_encoder_funcs {
/**
* @reset:
*
* Reset encoder hardware and software state to off. This function isn't
* called by the core directly, only through drm_mode_config_reset().
* It's not a helper hook only for historical reasons.
*/
void (*reset)(struct drm_encoder *encoder);
 
/**
* @destroy:
*
* Clean up encoder resources. This is only called at driver unload time
* through drm_mode_config_cleanup() since an encoder cannot be
* hotplugged in DRM.
*/
void (*destroy)(struct drm_encoder *encoder);
};
 
1088,7 → 593,7
struct drm_crtc *crtc;
struct drm_bridge *bridge;
const struct drm_encoder_funcs *funcs;
const struct drm_encoder_helper_funcs *helper_private;
const void *helper_private;
};
 
/* should we poll this connector for connects and disconnects */
1166,7 → 671,6
struct drm_mode_object base;
 
char *name;
int connector_id;
int connector_type;
int connector_type_id;
bool interlace_allowed;
1194,7 → 698,7
/* requested DPMS state */
int dpms;
 
const struct drm_connector_helper_funcs *helper_private;
const void *helper_private;
 
/* forced on connector */
struct drm_cmdline_mode cmdline_mode;
1274,34 → 778,19
 
/**
* struct drm_plane_funcs - driver plane control functions
* @update_plane: update the plane configuration
* @disable_plane: shut down the plane
* @destroy: clean up plane resources
* @reset: reset plane after state has been invalidated (e.g. resume)
* @set_property: called when a property is changed
* @atomic_duplicate_state: duplicate the atomic state for this plane
* @atomic_destroy_state: destroy an atomic state for this plane
* @atomic_set_property: set a property on an atomic state for this plane
* (do not call directly, use drm_atomic_plane_set_property())
* @atomic_get_property: get a property on an atomic state for this plane
* (do not call directly, use drm_atomic_plane_get_property())
*/
struct drm_plane_funcs {
/**
* @update_plane:
*
* This is the legacy entry point to enable and configure the plane for
* the given CRTC and framebuffer. It is never called to disable the
* plane, i.e. the passed-in crtc and fb paramters are never NULL.
*
* The source rectangle in frame buffer memory coordinates is given by
* the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point
* values). Devices that don't support subpixel plane coordinates can
* ignore the fractional part.
*
* The destination rectangle in CRTC coordinates is given by the
* crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values).
* Devices scale the source rectangle to the destination rectangle. If
* scaling is not supported, and the source rectangle size doesn't match
* the destination rectangle size, the driver must return a
* -<errorname>EINVAL</errorname> error.
*
* Drivers implementing atomic modeset should use
* drm_atomic_helper_update_plane() to implement this hook.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*update_plane)(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
1308,169 → 797,21
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h);
 
/**
* @disable_plane:
*
* This is the legacy entry point to disable the plane. The DRM core
* calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call
* with the frame buffer ID set to 0. Disabled planes must not be
* processed by the CRTC.
*
* Drivers implementing atomic modeset should use
* drm_atomic_helper_disable_plane() to implement this hook.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*disable_plane)(struct drm_plane *plane);
 
/**
* @destroy:
*
* Clean up plane resources. This is only called at driver unload time
* through drm_mode_config_cleanup() since a plane cannot be hotplugged
* in DRM.
*/
void (*destroy)(struct drm_plane *plane);
 
/**
* @reset:
*
* Reset plane hardware and software state to off. This function isn't
* called by the core directly, only through drm_mode_config_reset().
* It's not a helper hook only for historical reasons.
*
* Atomic drivers can use drm_atomic_helper_plane_reset() to reset
* atomic state using this hook.
*/
void (*reset)(struct drm_plane *plane);
 
/**
* @set_property:
*
* This is the legacy entry point to update a property attached to the
* plane.
*
* Drivers implementing atomic modeset should use
* drm_atomic_helper_plane_set_property() to implement this hook.
*
* This callback is optional if the driver does not support any legacy
* driver-private properties.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*set_property)(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
 
/**
* @atomic_duplicate_state:
*
* Duplicate the current atomic state for this plane and return it.
* The core and helpers gurantee that any atomic state duplicated with
* this hook and still owned by the caller (i.e. not transferred to the
* driver by calling ->atomic_commit() from struct
* &drm_mode_config_funcs) will be cleaned up by calling the
* @atomic_destroy_state hook in this structure.
*
* Atomic drivers which don't subclass struct &drm_plane_state should use
* drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
* __drm_atomic_helper_plane_duplicate_state() to make sure shared state is
* duplicated in a consistent fashion across drivers.
*
* It is an error to call this hook before plane->state has been
* initialized correctly.
*
* NOTE:
*
* If the duplicate state references refcounted resources this hook must
* acquire a reference for each of them. The driver must release these
* references again in @atomic_destroy_state.
*
* RETURNS:
*
* Duplicated atomic state or NULL when the allocation failed.
*/
/* atomic update handling */
struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
 
/**
* @atomic_destroy_state:
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
*/
void (*atomic_destroy_state)(struct drm_plane *plane,
struct drm_plane_state *state);
 
/**
* @atomic_set_property:
*
* Decode a driver-private property value and store the decoded value
* into the passed-in state structure. Since the atomic core decodes all
* standardized properties (even for extensions beyond the core set of
* properties which might not be implemented by all drivers) this
* requires drivers to subclass the state structure.
*
* Such driver-private properties should really only be implemented for
* truly hardware/vendor specific state. Instead it is preferred to
* standardize atomic extension and decode the properties used to expose
* such an extension in the core.
*
* Do not call this function directly, use
* drm_atomic_plane_set_property() instead.
*
* This callback is optional if the driver does not support any
* driver-private atomic properties.
*
* NOTE:
*
* This function is called in the state assembly phase of atomic
* modesets, which can be aborted for any reason (including on
* userspace's request to just check whether a configuration would be
* possible). Drivers MUST NOT touch any persistent state (hardware or
* software) or data structures except the passed in @state parameter.
*
* Also since userspace controls in which order properties are set this
* function must not do any input validation (since the state update is
* incomplete and hence likely inconsistent). Instead any such input
* validation must be done in the various atomic_check callbacks.
*
* RETURNS:
*
* 0 if the property has been found, -EINVAL if the property isn't
* implemented by the driver (which shouldn't ever happen, the core only
* asks for properties attached to this plane). No other validation is
* allowed by the driver. The core already checks that the property
* value is within the range (integer, valid enum value, ...) the driver
* set when registering the property.
*/
int (*atomic_set_property)(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
 
/**
* @atomic_get_property:
*
* Reads out the decoded driver-private property. This is used to
* implement the GETPLANE IOCTL.
*
* Do not call this function directly, use
* drm_atomic_plane_get_property() instead.
*
* This callback is optional if the driver does not support any
* driver-private atomic properties.
*
* RETURNS:
*
* 0 on success, -EINVAL if the property isn't implemented by the
* driver (which should never happen, the core only asks for
* properties attached to this plane).
*/
int (*atomic_get_property)(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
1483,7 → 824,6
DRM_PLANE_TYPE_CURSOR,
};
 
 
/**
* struct drm_plane - central DRM plane control structure
* @dev: DRM device this plane belongs to
1506,8 → 846,6
struct drm_device *dev;
struct list_head head;
 
char *name;
 
struct drm_modeset_lock mutex;
 
struct drm_mode_object base;
1528,7 → 866,7
 
enum drm_plane_type type;
 
const struct drm_plane_helper_funcs *helper_private;
const void *helper_private;
 
struct drm_plane_state *state;
};
1536,114 → 874,24
/**
* struct drm_bridge_funcs - drm_bridge control functions
* @attach: Called during drm_bridge_attach
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
* @disable: Called right before encoder prepare, disables the bridge
* @post_disable: Called right after encoder prepare, for lockstepped disable
* @mode_set: Set this mode to the bridge
* @pre_enable: Called right before encoder commit, for lockstepped commit
* @enable: Called right after encoder commit, enables the bridge
*/
struct drm_bridge_funcs {
int (*attach)(struct drm_bridge *bridge);
 
/**
* @mode_fixup:
*
* This callback is used to validate and adjust a mode. The paramater
* mode is the display mode that should be fed to the next element in
* the display chain, either the final &drm_connector or the next
* &drm_bridge. The parameter adjusted_mode is the input mode the bridge
* requires. It can be modified by this callback and does not need to
* match mode.
*
* This is the only hook that allows a bridge to reject a modeset. If
* this function passes all other callbacks must succeed for this
* configuration.
*
* NOTE:
*
* This function is called in the check phase of atomic modesets, which
* can be aborted for any reason (including on userspace's request to
* just check whether a configuration would be possible). Drivers MUST
* NOT touch any persistent state (hardware or software) or data
* structures except the passed in @state parameter.
*
* RETURNS:
*
* True if an acceptable configuration is possible, false if the modeset
* operation should be rejected.
*/
bool (*mode_fixup)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/**
* @disable:
*
* This callback should disable the bridge. It is called right before
* the preceding element in the display pipe is disabled. If the
* preceding element is a bridge this means it's called before that
* bridge's ->disable() function. If the preceding element is a
* &drm_encoder it's called right before the encoder's ->disable(),
* ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*/
void (*disable)(struct drm_bridge *bridge);
 
/**
* @post_disable:
*
* This callback should disable the bridge. It is called right after
* the preceding element in the display pipe is disabled. If the
* preceding element is a bridge this means it's called after that
* bridge's ->post_disable() function. If the preceding element is a
* &drm_encoder it's called right after the encoder's ->disable(),
* ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
* singals) feeding it is no longer running when this callback is
* called.
*/
void (*post_disable)(struct drm_bridge *bridge);
 
/**
* @mode_set:
*
* This callback should set the given mode on the bridge. It is called
* after the ->mode_set() callback for the preceding element in the
* display pipeline has been called already. The display pipe (i.e.
* clocks and timing signals) is off when this function is called.
*/
void (*mode_set)(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/**
* @pre_enable:
*
* This callback should enable the bridge. It is called right before
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called before that
* bridge's ->pre_enable() function. If the preceding element is a
* &drm_encoder it's called right before the encoder's ->enable(),
* ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
*
* The display pipe (i.e. clocks and timing signals) feeding this bridge
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*/
void (*pre_enable)(struct drm_bridge *bridge);
 
/**
* @enable:
*
* This callback should enable the bridge. It is called right after
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called after that
* bridge's ->enable() function. If the preceding element is a
* &drm_encoder it's called right after the encoder's ->enable(),
* ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*/
void (*enable)(struct drm_bridge *bridge);
};
 
1674,7 → 922,7
* struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics
* @planes: pointer to array of plane pointers
* @plane_states: pointer to array of plane states pointers
* @crtcs: pointer to array of CRTC pointers
1729,265 → 977,31
 
/**
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
* @atomic_check: check whether a given atomic state update is possible
* @atomic_commit: commit an atomic state update previously verified with
* atomic_check()
* @atomic_state_alloc: allocate a new atomic state
* @atomic_state_clear: clear the atomic state
* @atomic_state_free: free the atomic state
*
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
* involve drivers.
*/
struct drm_mode_config_funcs {
/**
* @fb_create:
*
* Create a new framebuffer object. The core does basic checks on the
* requested metadata, but most of that is left to the driver. See
* struct &drm_mode_fb_cmd2 for details.
*
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
* driver-specific information (like the internal native buffer object
* references). It also needs to fill out all relevant metadata, which
* should be done by calling drm_helper_mode_fill_fb_struct().
*
* The initialization is finalized by calling drm_framebuffer_init(),
* which registers the framebuffer and makes it accessible to other
* threads.
*
* RETURNS:
*
* A new framebuffer with an initial reference count of 1 or a negative
* error code encoded with ERR_PTR().
*/
struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd);
 
/**
* @output_poll_changed:
*
* Callback used by helpers to inform the driver of output configuration
* changes.
*
* Drivers implementing fbdev emulation with the helpers can call
* drm_fb_helper_hotplug_changed from this hook to inform the fbdev
* helper of output changes.
*
* FIXME:
*
* Except that there's no vtable for device-level helper callbacks
* there's no reason this is a core function.
*/
struct drm_mode_fb_cmd2 *mode_cmd);
void (*output_poll_changed)(struct drm_device *dev);
 
/**
* @atomic_check:
*
* This is the only hook to validate an atomic modeset update. This
* function must reject any modeset and state changes which the hardware
* or driver doesn't support. This includes but is of course not limited
* to:
*
* - Checking that the modes, framebuffers, scaling and placement
* requirements and so on are within the limits of the hardware.
*
* - Checking that any hidden shared resources are not oversubscribed.
* This can be shared PLLs, shared lanes, overall memory bandwidth,
* display fifo space (where shared between planes or maybe even
* CRTCs).
*
* - Checking that virtualized resources exported to userspace are not
* oversubscribed. For various reasons it can make sense to expose
* more planes, crtcs or encoders than which are physically there. One
* example is dual-pipe operations (which generally should be hidden
* from userspace if when lockstepped in hardware, exposed otherwise),
* where a plane might need 1 hardware plane (if it's just on one
* pipe), 2 hardware planes (when it spans both pipes) or maybe even
* shared a hardware plane with a 2nd plane (if there's a compatible
* plane requested on the area handled by the other pipe).
*
* - Check that any transitional state is possible and that if
* requested, the update can indeed be done in the vblank period
* without temporarily disabling some functions.
*
* - Check any other constraints the driver or hardware might have.
*
* - This callback also needs to correctly fill out the &drm_crtc_state
* in this update to make sure that drm_atomic_crtc_needs_modeset()
* reflects the nature of the possible update and returns true if and
* only if the update cannot be applied without tearing within one
* vblank on that CRTC. The core uses that information to reject
* updates which require a full modeset (i.e. blanking the screen, or
* at least pausing updates for a substantial amount of time) if
* userspace has disallowed that in its request.
*
* - The driver also does not need to repeat basic input validation
* like done for the corresponding legacy entry points. The core does
* that before calling this hook.
*
* See the documentation of @atomic_commit for an exhaustive list of
* error conditions which don't have to be checked at the
* ->atomic_check() stage?
*
* See the documentation for struct &drm_atomic_state for how exactly
* an atomic modeset update is described.
*
* Drivers using the atomic helpers can implement this hook using
* drm_atomic_helper_check(), or one of the exported sub-functions of
* it.
*
* RETURNS:
*
* 0 on success or one of the below negative error codes:
*
* - -EINVAL, if any of the above constraints are violated.
*
* - -EDEADLK, when returned from an attempt to acquire an additional
* &drm_modeset_lock through drm_modeset_lock().
*
* - -ENOMEM, if allocating additional state sub-structures failed due
* to lack of memory.
*
* - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
* This can either be due to a pending signal, or because the driver
* needs to completely bail out to recover from an exceptional
* situation like a GPU hang. From a userspace point all errors are
* treated equally.
*/
int (*atomic_check)(struct drm_device *dev,
struct drm_atomic_state *state);
 
/**
* @atomic_commit:
*
* This is the only hook to commit an atomic modeset update. The core
* guarantees that @atomic_check has been called successfully before
* calling this function, and that nothing has been changed in the
* interim.
*
* See the documentation for struct &drm_atomic_state for how exactly
* an atomic modeset update is described.
*
* Drivers using the atomic helpers can implement this hook using
* drm_atomic_helper_commit(), or one of the exported sub-functions of
* it.
*
* Asynchronous commits (as indicated with the async parameter) must
* do any preparatory work which might result in an unsuccessful commit
* in the context of this callback. The only exceptions are hardware
* errors resulting in -EIO. But even in that case the driver must
* ensure that the display pipe is at least running, to avoid
* compositors crashing when pageflips don't work. Anything else,
* specifically committing the update to the hardware, should be done
* without blocking the caller. For updates which do not require a
* modeset this must be guaranteed.
*
* The driver must wait for any pending rendering to the new
* framebuffers to complete before executing the flip. It should also
* wait for any pending rendering from other drivers if the underlying
* buffer is a shared dma-buf. Asynchronous commits must not wait for
* rendering in the context of this callback.
*
* An application can request to be notified when the atomic commit has
* completed. These events are per-CRTC and can be distinguished by the
* CRTC index supplied in &drm_event to userspace.
*
* The drm core will supply a struct &drm_event in the event
* member of each CRTC's &drm_crtc_state structure. This can be handled by the
* drm_crtc_send_vblank_event() function, which the driver should call on
* the provided event upon completion of the atomic commit. Note that if
* the driver supports vblank signalling and timestamping the vblank
* counters and timestamps must agree with the ones returned from page
* flip events. With the current vblank helper infrastructure this can
* be achieved by holding a vblank reference while the page flip is
* pending, acquired through drm_crtc_vblank_get() and released with
* drm_crtc_vblank_put(). Drivers are free to implement their own vblank
* counter and timestamp tracking though, e.g. if they have accurate
* timestamp registers in hardware.
*
* NOTE:
*
* Drivers are not allowed to shut down any display pipe successfully
* enabled through an atomic commit on their own. Doing so can result in
* compositors crashing if a page flip is suddenly rejected because the
* pipe is off.
*
* RETURNS:
*
* 0 on success or one of the below negative error codes:
*
* - -EBUSY, if an asynchronous updated is requested and there is
* an earlier updated pending. Drivers are allowed to support a queue
* of outstanding updates, but currently no driver supports that.
* Note that drivers must wait for preceding updates to complete if a
* synchronous update is requested, they are not allowed to fail the
* commit in that case.
*
* - -ENOMEM, if the driver failed to allocate memory. Specifically
* this can happen when trying to pin framebuffers, which must only
* be done when committing the state.
*
* - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate
* that the driver has run out of vram, iommu space or similar GPU
* address space needed for framebuffer.
*
* - -EIO, if the hardware completely died.
*
* - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted.
* This can either be due to a pending signal, or because the driver
* needs to completely bail out to recover from an exceptional
* situation like a GPU hang. From a userspace point of view all errors are
* treated equally.
*
* This list is exhaustive. Specifically this hook is not allowed to
* return -EINVAL (any invalid requests should be caught in
* @atomic_check) or -EDEADLK (this function must not acquire
* additional modeset locks).
*/
struct drm_atomic_state *a);
int (*atomic_commit)(struct drm_device *dev,
struct drm_atomic_state *state,
struct drm_atomic_state *a,
bool async);
 
/**
* @atomic_state_alloc:
*
* This optional hook can be used by drivers that want to subclass struct
* &drm_atomic_state to be able to track their own driver-private global
* state easily. If this hook is implemented, drivers must also
* implement @atomic_state_clear and @atomic_state_free.
*
* RETURNS:
*
* A new &drm_atomic_state on success or NULL on failure.
*/
struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev);
 
/**
* @atomic_state_clear:
*
* This hook must clear any driver private state duplicated into the
* passed-in &drm_atomic_state. This hook is called when the caller
* encountered a &drm_modeset_lock deadlock and needs to drop all
* already acquired locks as part of the deadlock avoidance dance
* implemented in drm_modeset_lock_backoff().
*
* Any duplicated state must be invalidated since a concurrent atomic
* update might change it, and the drm atomic interfaces always apply
* updates as relative changes to the current state.
*
* Drivers that implement this must call drm_atomic_state_default_clear()
* to clear common state.
*/
void (*atomic_state_clear)(struct drm_atomic_state *state);
 
/**
* @atomic_state_free:
*
* This hook needs driver private resources and the &drm_atomic_state
* itself. Note that the core first calls drm_atomic_state_clear() to
* avoid code duplicate between the clear and free hooks.
*
* Drivers that implement this must call drm_atomic_state_default_free()
* to release common resources.
*/
void (*atomic_state_free)(struct drm_atomic_state *state);
};
 
1996,7 → 1010,7
* @mutex: mutex protecting KMS related lists and structures
* @connection_mutex: ww mutex protecting connector state and routing
* @acquire_ctx: global implicit acquire context used by atomic drivers for
* legacy IOCTLs
* legacy ioctls
* @idr_mutex: mutex for KMS ID allocation and management
* @crtc_idr: main KMS ID tracking object
* @fb_lock: mutex to protect fb state and lists
2048,7 → 1062,6
struct list_head fb_list;
 
int num_connector;
struct ida connector_ida;
struct list_head connector_list;
int num_encoder;
struct list_head encoder_list;
2153,7 → 1166,7
*/
#define drm_for_each_plane_mask(plane, dev, plane_mask) \
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
if ((plane_mask) & (1 << drm_plane_index(plane)))
 
 
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
2170,13 → 1183,11
char *name;
};
 
extern __printf(6, 7)
int drm_crtc_init_with_planes(struct drm_device *dev,
extern int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_plane *primary,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
 
2202,11 → 1213,7
void drm_connector_unregister(struct drm_connector *connector);
 
extern void drm_connector_cleanup(struct drm_connector *connector);
static inline unsigned drm_connector_index(struct drm_connector *connector)
{
return connector->connector_id;
}
 
extern unsigned int drm_connector_index(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
 
2226,11 → 1233,10
void drm_bridge_pre_enable(struct drm_bridge *bridge);
void drm_bridge_enable(struct drm_bridge *bridge);
 
extern __printf(5, 6)
int drm_encoder_init(struct drm_device *dev,
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
int encoder_type);
 
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
2245,15 → 1251,13
return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
}
 
extern __printf(8, 9)
int drm_universal_plane_init(struct drm_device *dev,
extern int drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats,
unsigned int format_count,
enum drm_plane_type type,
const char *name, ...);
enum drm_plane_type type);
extern int drm_plane_init(struct drm_device *dev,
struct drm_plane *plane,
unsigned long possible_crtcs,
2539,7 → 1543,7
/* Plane list iterator for legacy (overlay only) planes. */
#define drm_for_each_legacy_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY)
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
/drivers/include/drm/drm_crtc_helper.h
40,8 → 40,149
#include <linux/fb.h>
 
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
 
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
ENTER_ATOMIC_MODE_SET,
};
 
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
* @dpms: set power state
* @prepare: prepare the CRTC, called before @mode_set
* @commit: commit changes to CRTC, called after @mode_set
* @mode_fixup: try to fixup proposed mode for this CRTC
* @mode_set: set this mode
* @mode_set_nofb: set mode only (no scanout buffer attached)
* @mode_set_base: update the scanout buffer
* @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
* @load_lut: load color palette
* @disable: disable CRTC when no longer in use
* @enable: enable CRTC
* @atomic_check: check for validity of an atomic state
* @atomic_begin: begin atomic update
* @atomic_flush: flush atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
*
* Note that with atomic helpers @dpms, @prepare and @commit hooks are
* deprecated. Used @enable and @disable instead exclusively.
*
* With legacy crtc helpers there's a big semantic difference between @disable
* and the other hooks: @disable also needs to release any resources acquired in
* @mode_set (like shared PLLs).
*/
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
* unsupported, the provider must use the next lowest power level.
*/
void (*dpms)(struct drm_crtc *crtc, int mode);
void (*prepare)(struct drm_crtc *crtc);
void (*commit)(struct drm_crtc *crtc);
 
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
/* Actually set the mode for atomic helpers, optional */
void (*mode_set_nofb)(struct drm_crtc *crtc);
 
/* Move the crtc on the current fb to the given position *optional* */
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
int (*mode_set_base_atomic)(struct drm_crtc *crtc,
struct drm_framebuffer *fb, int x, int y,
enum mode_set_atomic);
 
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
 
void (*disable)(struct drm_crtc *crtc);
void (*enable)(struct drm_crtc *crtc);
 
/* atomic helpers */
int (*atomic_check)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
void (*atomic_flush)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
};
 
/**
* struct drm_encoder_helper_funcs - helper operations for encoders
* @dpms: set power state
* @save: save connector state
* @restore: restore connector state
* @mode_fixup: try to fixup proposed mode for this connector
* @prepare: part of the disable sequence, called before the CRTC modeset
* @commit: called after the CRTC modeset
* @mode_set: set this mode, optional for atomic helpers
* @get_crtc: return CRTC that the encoder is currently attached to
* @detect: connection status detection
* @disable: disable encoder when not in use (overrides DPMS off)
* @enable: enable encoder
* @atomic_check: check for validity of an atomic update
*
* The helper operations are called by the mid-layer CRTC helper.
*
* Note that with atomic helpers @dpms, @prepare and @commit hooks are
* deprecated. Used @enable and @disable instead exclusively.
*
* With legacy crtc helpers there's a big semantic difference between @disable
* and the other hooks: @disable also needs to release any resources acquired in
* @mode_set (like shared PLLs).
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
void (*restore)(struct drm_encoder *encoder);
 
bool (*mode_fixup)(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
void (*mode_set)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
void (*disable)(struct drm_encoder *encoder);
 
void (*enable)(struct drm_encoder *encoder);
 
/* atomic helpers */
int (*atomic_check)(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
};
 
/**
* struct drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector? (optional)
* @best_encoder: return the preferred encoder for this connector
* @atomic_best_encoder: atomic version of @best_encoder
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
struct drm_connector_state *connector_state);
};
 
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
56,8 → 197,26
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
 
extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_mode_fb_cmd2 *mode_cmd);
 
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
crtc->helper_private = funcs;
}
 
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
encoder->helper_private = funcs;
}
 
static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
connector->helper_private = funcs;
}
 
extern void drm_helper_resume_force_mode(struct drm_device *dev);
 
int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
70,6 → 229,10
extern int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
uint32_t maxY);
extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector
*connector,
uint32_t maxX,
uint32_t maxY);
extern void drm_kms_helper_poll_init(struct drm_device *dev);
extern void drm_kms_helper_poll_fini(struct drm_device *dev);
extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
/drivers/include/drm/drm_dp_helper.h
455,52 → 455,16
# define DP_EDP_14 0x03
 
#define DP_EDP_GENERAL_CAP_1 0x701
# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1)
# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2)
# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3)
# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4)
# define DP_EDP_FRC_ENABLE_CAP (1 << 5)
# define DP_EDP_COLOR_ENGINE_CAP (1 << 6)
# define DP_EDP_SET_POWER_CAP (1 << 7)
 
#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0)
# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1)
# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2)
# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3)
# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4)
# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5)
# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6)
# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7)
 
#define DP_EDP_GENERAL_CAP_2 0x703
# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
 
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
# define DP_EDP_X_REGION_CAP_SHIFT 0
# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4)
# define DP_EDP_Y_REGION_CAP_SHIFT 4
 
#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
# define DP_EDP_BACKLIGHT_ENABLE (1 << 0)
# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1)
# define DP_EDP_FRC_ENABLE (1 << 2)
# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3)
# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7)
 
#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0)
# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0)
# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0)
# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0)
# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0)
# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2)
# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3)
# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4)
# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5)
# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */
 
#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
/drivers/include/drm/drm_fb_helper.h
34,11 → 34,6
 
#include <linux/kgdb.h>
 
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
ENTER_ATOMIC_MODE_SET,
};
 
struct drm_fb_offset {
int x, y;
};
79,76 → 74,25
 
/**
* struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
* @gamma_set: Set the given gamma lut register on the given crtc.
* @gamma_get: Read the given gamma lut register on the given crtc, used to
* save the current lut when force-restoring the fbdev for e.g.
* kdbg.
* @fb_probe: Driver callback to allocate and initialize the fbdev info
* structure. Furthermore it also needs to allocate the drm
* framebuffer used to back the fbdev.
* @initial_config: Setup an initial fbdev display configuration
*
* Driver callbacks used by the fbdev emulation helper library.
*/
struct drm_fb_helper_funcs {
/**
* @gamma_set:
*
* Set the given gamma LUT register on the given CRTC.
*
* This callback is optional.
*
* FIXME:
*
* This callback is functionally redundant with the core gamma table
* support and simply exists because the fbdev hasn't yet been
* refactored to use the core gamma table interfaces.
*/
void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
/**
* @gamma_get:
*
* Read the given gamma LUT register on the given CRTC, used to save the
* current LUT when force-restoring the fbdev for e.g. kdbg.
*
* This callback is optional.
*
* FIXME:
*
* This callback is functionally redundant with the core gamma table
* support and simply exists because the fbdev hasn't yet been
* refactored to use the core gamma table interfaces.
*/
void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
 
/**
* @fb_probe:
*
* Driver callback to allocate and initialize the fbdev info structure.
* Furthermore it also needs to allocate the DRM framebuffer used to
* back the fbdev.
*
* This callback is mandatory.
*
* RETURNS:
*
* The driver should return 0 on success and a negative error code on
* failure.
*/
int (*fb_probe)(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
 
/**
* @initial_config:
*
* Driver callback to setup an initial fbdev display configuration.
* Drivers can use this callback to tell the fbdev emulation what the
* preferred initial configuration is. This is useful to implement
* smooth booting where the fbdev (and subsequently all userspace) never
* changes the mode, but always inherits the existing configuration.
*
* This callback is optional.
*
* RETURNS:
*
* The driver should return true if a suitable initial configuration has
* been filled out and false when the fbdev helper should fall back to
* the default probing logic.
*/
bool (*initial_config)(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
161,7 → 105,7
};
 
/**
* struct drm_fb_helper - main structure to emulate fbdev on top of KMS
* struct drm_fb_helper - helper to emulate fbdev on top of kms
* @fb: Scanout framebuffer object
* @dev: DRM device
* @crtc_count: number of possible CRTCs
168,15 → 112,11
* @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
* @connector_count: number of connected connectors
* @connector_info_alloc_count: size of connector_info
* @connector_info: array of per-connector information
* @funcs: driver callbacks for fb helper
* @fbdev: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
*
* This is the main structure used by the fbdev helpers. Drivers supporting
* fbdev emulation should embedded this into their overall driver structure.
* Drivers must also fill out a struct &drm_fb_helper_funcs with a few
* operations.
* @kernel_fb_list: list_head in kernel_fb_helper_list
* @delayed_hotplug: was there a hotplug while kms master active?
*/
struct drm_fb_helper {
struct drm_framebuffer *fb;
189,21 → 129,10
const struct drm_fb_helper_funcs *funcs;
struct fb_info *fbdev;
u32 pseudo_palette[17];
 
/**
* @kernel_fb_list:
*
* Entry on the global kernel_fb_helper_list, used for kgdb entry/exit.
*/
struct list_head kernel_fb_list;
 
/**
* @delayed_hotplug:
*
* A hotplug was received while fbdev wasn't in control of the DRM
* device, i.e. another KMS master was active. The output configuration
* needs to be reprobe when fbdev is in control again.
*/
/* we got a hotplug but fbdev wasn't running the console
delay until next set_par */
bool delayed_hotplug;
 
/**
/drivers/include/drm/drm_mipi_dsi.h
163,36 → 163,9
return container_of(dev, struct mipi_dsi_device, dev);
}
 
/**
* mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
* given pixel format defined by the MIPI DSI
* specification
* @fmt: MIPI DSI pixel format
*
* Returns: The number of bits per pixel of the given pixel format.
*/
static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt)
{
switch (fmt) {
case MIPI_DSI_FMT_RGB888:
case MIPI_DSI_FMT_RGB666:
return 24;
 
case MIPI_DSI_FMT_RGB666_PACKED:
return 18;
 
case MIPI_DSI_FMT_RGB565:
return 16;
}
 
return -EINVAL;
}
 
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value);
 
/drivers/include/drm/drm_mm.h
148,7 → 148,8
 
static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return list_next_entry(hole_node, node_list)->start;
return list_entry(hole_node->node_list.next,
struct drm_mm_node, node_list)->start;
}
 
/**
179,14 → 180,6
&(mm)->head_node.node_list, \
node_list)
 
#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
&entry->hole_stack != &(mm)->hole_stack ? \
hole_start = drm_mm_hole_node_start(entry), \
hole_end = drm_mm_hole_node_end(entry), \
1 : 0; \
entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
 
/**
* drm_mm_for_each_hole - iterator to walk over all holes
* @entry: drm_mm_node used internally to track progress
207,8 → 200,21
* going backwards.
*/
#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
__drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
&entry->hole_stack != &(mm)->hole_stack ? \
hole_start = drm_mm_hole_node_start(entry), \
hole_end = drm_mm_hole_node_end(entry), \
1 : 0; \
entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
 
#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
&entry->hole_stack != &(mm)->hole_stack ? \
hole_start = drm_mm_hole_node_start(entry), \
hole_end = drm_mm_hole_node_end(entry), \
1 : 0; \
entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
 
/*
* Basic range manager support (drm_mm.c)
*/
/drivers/include/drm/drm_modes.h
35,91 → 35,46
* structures).
*/
 
/**
* enum drm_mode_status - hardware support status of a mode
* @MODE_OK: Mode OK
* @MODE_HSYNC: hsync out of range
* @MODE_VSYNC: vsync out of range
* @MODE_H_ILLEGAL: mode has illegal horizontal timings
* @MODE_V_ILLEGAL: mode has illegal horizontal timings
* @MODE_BAD_WIDTH: requires an unsupported linepitch
* @MODE_NOMODE: no mode with a matching name
* @MODE_NO_INTERLACE: interlaced mode not supported
* @MODE_NO_DBLESCAN: doublescan mode not supported
* @MODE_NO_VSCAN: multiscan mode not supported
* @MODE_MEM: insufficient video memory
* @MODE_VIRTUAL_X: mode width too large for specified virtual size
* @MODE_VIRTUAL_Y: mode height too large for specified virtual size
* @MODE_MEM_VIRT: insufficient video memory given virtual size
* @MODE_NOCLOCK: no fixed clock available
* @MODE_CLOCK_HIGH: clock required is too high
* @MODE_CLOCK_LOW: clock required is too low
* @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange
* @MODE_BAD_HVALUE: horizontal timing was out of range
* @MODE_BAD_VVALUE: vertical timing was out of range
* @MODE_BAD_VSCAN: VScan value out of range
* @MODE_HSYNC_NARROW: horizontal sync too narrow
* @MODE_HSYNC_WIDE: horizontal sync too wide
* @MODE_HBLANK_NARROW: horizontal blanking too narrow
* @MODE_HBLANK_WIDE: horizontal blanking too wide
* @MODE_VSYNC_NARROW: vertical sync too narrow
* @MODE_VSYNC_WIDE: vertical sync too wide
* @MODE_VBLANK_NARROW: vertical blanking too narrow
* @MODE_VBLANK_WIDE: vertical blanking too wide
* @MODE_PANEL: exceeds panel dimensions
* @MODE_INTERLACE_WIDTH: width too large for interlaced mode
* @MODE_ONE_WIDTH: only one width is supported
* @MODE_ONE_HEIGHT: only one height is supported
* @MODE_ONE_SIZE: only one resolution is supported
* @MODE_NO_REDUCED: monitor doesn't accept reduced blanking
* @MODE_NO_STEREO: stereo modes not supported
* @MODE_STALE: mode has become stale
* @MODE_BAD: unspecified reason
* @MODE_ERROR: error condition
*
* This enum is used to filter out modes not supported by the driver/hardware
* combination.
*/
enum drm_mode_status {
MODE_OK = 0,
MODE_HSYNC,
MODE_VSYNC,
MODE_H_ILLEGAL,
MODE_V_ILLEGAL,
MODE_BAD_WIDTH,
MODE_NOMODE,
MODE_NO_INTERLACE,
MODE_NO_DBLESCAN,
MODE_NO_VSCAN,
MODE_MEM,
MODE_VIRTUAL_X,
MODE_VIRTUAL_Y,
MODE_MEM_VIRT,
MODE_NOCLOCK,
MODE_CLOCK_HIGH,
MODE_CLOCK_LOW,
MODE_CLOCK_RANGE,
MODE_BAD_HVALUE,
MODE_BAD_VVALUE,
MODE_BAD_VSCAN,
MODE_HSYNC_NARROW,
MODE_HSYNC_WIDE,
MODE_HBLANK_NARROW,
MODE_HBLANK_WIDE,
MODE_VSYNC_NARROW,
MODE_VSYNC_WIDE,
MODE_VBLANK_NARROW,
MODE_VBLANK_WIDE,
MODE_PANEL,
MODE_INTERLACE_WIDTH,
MODE_ONE_WIDTH,
MODE_ONE_HEIGHT,
MODE_ONE_SIZE,
MODE_NO_REDUCED,
MODE_NO_STEREO,
MODE_STALE = -3,
MODE_BAD = -2,
MODE_ERROR = -1
MODE_OK = 0, /* Mode OK */
MODE_HSYNC, /* hsync out of range */
MODE_VSYNC, /* vsync out of range */
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
MODE_BAD_WIDTH, /* requires an unsupported linepitch */
MODE_NOMODE, /* no mode with a matching name */
MODE_NO_INTERLACE, /* interlaced mode not supported */
MODE_NO_DBLESCAN, /* doublescan mode not supported */
MODE_NO_VSCAN, /* multiscan mode not supported */
MODE_MEM, /* insufficient video memory */
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
MODE_MEM_VIRT, /* insufficient video memory given virtual size */
MODE_NOCLOCK, /* no fixed clock available */
MODE_CLOCK_HIGH, /* clock required is too high */
MODE_CLOCK_LOW, /* clock required is too low */
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
MODE_BAD_HVALUE, /* horizontal timing was out of range */
MODE_BAD_VVALUE, /* vertical timing was out of range */
MODE_BAD_VSCAN, /* VScan value out of range */
MODE_HSYNC_NARROW, /* horizontal sync too narrow */
MODE_HSYNC_WIDE, /* horizontal sync too wide */
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
MODE_HBLANK_WIDE, /* horizontal blanking too wide */
MODE_VSYNC_NARROW, /* vertical sync too narrow */
MODE_VSYNC_WIDE, /* vertical sync too wide */
MODE_VBLANK_NARROW, /* vertical blanking too narrow */
MODE_VBLANK_WIDE, /* vertical blanking too wide */
MODE_PANEL, /* exceeds panel dimensions */
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
MODE_ONE_WIDTH, /* only one width is supported */
MODE_ONE_HEIGHT, /* only one height is supported */
MODE_ONE_SIZE, /* only one resolution is supported */
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
MODE_NO_STEREO, /* stereo modes not supported */
MODE_UNVERIFIED = -3, /* mode needs to reverified */
MODE_BAD = -2, /* unspecified reason */
MODE_ERROR = -1 /* error condition */
};
 
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
141,125 → 96,17
 
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
 
/**
* struct drm_display_mode - DRM kernel-internal display mode structure
* @hdisplay: horizontal display size
* @hsync_start: horizontal sync start
* @hsync_end: horizontal sync end
* @htotal: horizontal total size
* @hskew: horizontal skew?!
* @vdisplay: vertical display size
* @vsync_start: vertical sync start
* @vsync_end: vertical sync end
* @vtotal: vertical total size
* @vscan: vertical scan?!
* @crtc_hdisplay: hardware mode horizontal display size
* @crtc_hblank_start: hardware mode horizontal blank start
* @crtc_hblank_end: hardware mode horizontal blank end
* @crtc_hsync_start: hardware mode horizontal sync start
* @crtc_hsync_end: hardware mode horizontal sync end
* @crtc_htotal: hardware mode horizontal total size
* @crtc_hskew: hardware mode horizontal skew?!
* @crtc_vdisplay: hardware mode vertical display size
* @crtc_vblank_start: hardware mode vertical blank start
* @crtc_vblank_end: hardware mode vertical blank end
* @crtc_vsync_start: hardware mode vertical sync start
* @crtc_vsync_end: hardware mode vertical sync end
* @crtc_vtotal: hardware mode vertical total size
*
* The horizontal and vertical timings are defined per the following diagram.
*
*
* Active Front Sync Back
* Region Porch Porch
* <-----------------------><----------------><-------------><-------------->
* //////////////////////|
* ////////////////////// |
* ////////////////////// |.................. ................
* _______________
* <----- [hv]display ----->
* <------------- [hv]sync_start ------------>
* <--------------------- [hv]sync_end --------------------->
* <-------------------------------- [hv]total ----------------------------->*
*
* This structure contains two copies of timings. First are the plain timings,
* which specify the logical mode, as it would be for a progressive 1:1 scanout
* at the refresh rate userspace can observe through vblank timestamps. Then
* there's the hardware timings, which are corrected for interlacing,
* double-clocking and similar things. They are provided as a convenience, and
* can be appropriately computed using drm_mode_set_crtcinfo().
*/
struct drm_display_mode {
/**
* @head:
*
* struct list_head for mode lists.
*/
/* Header */
struct list_head head;
 
/**
* @base:
*
* A display mode is a normal modeset object, possibly including public
* userspace id.
*
* FIXME:
*
* This can probably be removed since the entire concept of userspace
* managing modes explicitly has never landed in upstream kernel mode
* setting support.
*/
struct drm_mode_object base;
 
/**
* @name:
*
* Human-readable name of the mode, filled out with drm_mode_set_name().
*/
char name[DRM_DISPLAY_MODE_LEN];
 
/**
* @status:
*
* Status of the mode, used to filter out modes not supported by the
* hardware. See enum &drm_mode_status.
*/
enum drm_mode_status status;
 
/**
* @type:
*
* A bitmask of flags, mostly about the source of a mode. Possible flags
* are:
*
* - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively
* unused.
* - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
* resolution of an LCD panel. There should only be one preferred
* mode per connector at any given time.
* - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
* them really. Drivers must set this bit for all modes they create
* and expose to userspace.
*
* Plus a big list of flags which shouldn't be used at all, but are
* still around since these flags are also used in the userspace ABI:
*
* - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
* DRM_MODE_TYPE_PREFERRED instead.
* - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
* which are stuck around for hysterical raisins only. No one has an
* idea what they were meant for. Don't use.
* - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige
* from older kms designs where userspace had to first add a custom
* mode to the kernel's mode list before it could use it. Don't use.
*/
unsigned int type;
 
/**
* @clock:
*
* Pixel clock in kHz.
*/
/* Proposed mode values */
int clock; /* in kHz */
int hdisplay;
int hsync_start;
271,74 → 118,14
int vsync_end;
int vtotal;
int vscan;
/**
* @flags:
*
* Sync and timing flags:
*
* - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high.
* - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low.
* - DRM_MODE_FLAG_PVSYNC: vertical sync is active high.
* - DRM_MODE_FLAG_NVSYNC: vertical sync is active low.
* - DRM_MODE_FLAG_INTERLACE: mode is interlaced.
* - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan.
* - DRM_MODE_FLAG_CSYNC: mode uses composite sync.
* - DRM_MODE_FLAG_PCSYNC: composite sync is active high.
* - DRM_MODE_FLAG_NCSYNC: composite sync is active low.
* - DRM_MODE_FLAG_HSKEW: hskew provided (not used?).
* - DRM_MODE_FLAG_BCAST: not used?
* - DRM_MODE_FLAG_PIXMUX: not used?
* - DRM_MODE_FLAG_DBLCLK: double-clocked mode.
* - DRM_MODE_FLAG_CLKDIV2: half-clocked mode.
*
* Additionally there's flags to specify how 3D modes are packed:
*
* - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode.
* - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right.
* - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields.
* - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines.
* - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames.
* - DRM_MODE_FLAG_3D_L_DEPTH: ?
* - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ?
* - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom
* parts.
* - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and
* right parts.
*/
unsigned int flags;
 
/**
* @width_mm:
*
* Addressable size of the output in mm, projectors should set this to
* 0.
*/
/* Addressable image size (may be 0 for projectors, etc.) */
int width_mm;
 
/**
* @height_mm:
*
* Addressable size of the output in mm, projectors should set this to
* 0.
*/
int height_mm;
 
/**
* @crtc_clock:
*
* Actual pixel or dot clock in the hardware. This differs from the
* logical @clock when e.g. using interlacing, double-clocking, stereo
* modes or other fancy stuff that changes the timings and signals
* actually sent over the wire.
*
* This is again in kHz.
*
* Note that with digital outputs like HDMI or DP there's usually a
* massive confusion between the dot clock and the signal clock at the
* bit encoding level. Especially when a 8b/10b encoding is used and the
* difference is exactly a factor of 10.
*/
int crtc_clock;
/* Actual mode we give to hw */
int crtc_clock; /* in KHz */
int crtc_hdisplay;
int crtc_hblank_start;
int crtc_hblank_end;
353,48 → 140,12
int crtc_vsync_end;
int crtc_vtotal;
 
/**
* @private:
*
* Pointer for driver private data. This can only be used for mode
* objects passed to drivers in modeset operations. It shouldn't be used
* by atomic drivers since they can store any additional data by
* subclassing state structures.
*/
/* Driver private mode info */
int *private;
 
/**
* @private_flags:
*
* Similar to @private, but just an integer.
*/
int private_flags;
 
/**
* @vrefresh:
*
* Vertical refresh rate, for debug output in human readable form. Not
* used in a functional way.
*
* This value is in Hz.
*/
int vrefresh;
 
/**
* @hsync:
*
* Horizontal refresh rate, for debug output in human readable form. Not
* used in a functional way.
*
* This value is in kHz.
*/
int hsync;
 
/**
* @picture_aspect_ratio:
*
* Field for setting the HDMI picture aspect ratio of a mode.
*/
int vrefresh; /* in Hz */
int hsync; /* in kHz */
enum hdmi_picture_aspect picture_aspect_ratio;
};
 
471,8 → 222,6
const struct drm_display_mode *mode);
bool drm_mode_equal(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
const struct drm_display_mode *mode2);
 
483,7 → 232,7
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
void drm_mode_connector_list_update(struct drm_connector *connector);
void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits);
 
/* parsing cmdline modes */
bool
/drivers/include/drm/drm_modeset_lock.h
138,7 → 138,7
struct drm_modeset_acquire_ctx *
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
 
int drm_modeset_lock_all_ctx(struct drm_device *dev,
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
 
#endif /* DRM_MODESET_LOCK_H_ */
/drivers/include/drm/drm_plane_helper.h
26,7 → 26,6
 
#include <drm/drm_rect.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
 
/*
* Drivers that don't allow primary plane scaling may pass this macro in place
37,9 → 36,46
*/
#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
 
/**
* DOC: plane helpers
*
* Helper functions to assist with creation and handling of CRTC primary
* planes.
*/
 
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
 
/**
* drm_plane_helper_funcs - helper operations for CRTCs
* @prepare_fb: prepare a framebuffer for use by the plane
* @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
* @atomic_check: check that a given atomic state is valid and can be applied
* @atomic_update: apply an atomic state to the plane (mandatory)
* @atomic_disable: disable the plane
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_plane_helper_funcs {
int (*prepare_fb)(struct drm_plane *plane,
const struct drm_plane_state *new_state);
void (*cleanup_fb)(struct drm_plane *plane,
const struct drm_plane_state *old_state);
 
int (*atomic_check)(struct drm_plane *plane,
struct drm_plane_state *state);
void (*atomic_update)(struct drm_plane *plane,
struct drm_plane_state *old_state);
void (*atomic_disable)(struct drm_plane *plane,
struct drm_plane_state *old_state);
};
 
static inline void drm_plane_helper_add(struct drm_plane *plane,
const struct drm_plane_helper_funcs *funcs)
{
plane->helper_private = funcs;
}
 
int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
/drivers/include/drm/i915_component.h
31,94 → 31,47
#define MAX_PORTS 5
 
/**
* struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
* struct i915_audio_component_ops - callbacks defined in gfx driver
* @owner: the module owner
* @get_power: get the POWER_DOMAIN_AUDIO power well
* @put_power: put the POWER_DOMAIN_AUDIO power well
* @codec_wake_override: Enable/Disable generating the codec wake signal
* @get_cdclk_freq: get the Core Display Clock in KHz
* @sync_audio_rate: set n/cts based on the sample rate
*/
struct i915_audio_component_ops {
/**
* @owner: i915 module
*/
struct module *owner;
/**
* @get_power: get the POWER_DOMAIN_AUDIO power well
*
* Request the power well to be turned on.
*/
void (*get_power)(struct device *);
/**
* @put_power: put the POWER_DOMAIN_AUDIO power well
*
* Allow the power well to be turned off.
*/
void (*put_power)(struct device *);
/**
* @codec_wake_override: Enable/disable codec wake signal
*/
void (*codec_wake_override)(struct device *, bool enable);
/**
* @get_cdclk_freq: Get the Core Display Clock in kHz
*/
int (*get_cdclk_freq)(struct device *);
/**
* @sync_audio_rate: set n/cts based on the sample rate
*
* Called from audio driver. After audio driver sets the
* sample rate, it will call this function to set n/cts
*/
int (*sync_audio_rate)(struct device *, int port, int rate);
/**
* @get_eld: fill the audio state and ELD bytes for the given port
*
* Called from audio driver to get the HDMI/DP audio state of the given
* digital port, and also fetch ELD bytes to the given pointer.
*
* It returns the byte size of the original ELD (not the actually
* copied size), zero for an invalid ELD, or a negative error code.
*
* Note that the returned size may be over @max_bytes. Then it
* implies that only a part of ELD has been copied to the buffer.
*/
int (*get_eld)(struct device *, int port, bool *enabled,
unsigned char *buf, int max_bytes);
};
 
/**
* struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver
*/
struct i915_audio_component_audio_ops {
/**
* @audio_ptr: Pointer to be used in call to pin_eld_notify
*/
void *audio_ptr;
/**
* @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
*
* Called when the i915 driver has set up audio pipeline or has just
* begun to tear it down. This allows the HDA driver to update its
* status accordingly (even when the HDA controller is in power save
* mode).
* Call from i915 driver, notifying the HDA driver that
* pin sense and/or ELD information has changed.
* @audio_ptr: HDA driver object
* @port: Which port has changed (PORTA / PORTB / PORTC etc)
*/
void (*pin_eld_notify)(void *audio_ptr, int port);
};
 
/**
* struct i915_audio_component - Used for direct communication between i915 and hda drivers
* struct i915_audio_component - used for audio video interaction
* @dev: the device from gfx driver
* @aud_sample_rate: the array of audio sample rate per port
* @ops: callback for audio driver calling
* @audio_ops: Call from i915 driver
*/
struct i915_audio_component {
/**
* @dev: i915 device, used as parameter for ops
*/
struct device *dev;
/**
* @aud_sample_rate: the array of audio sample rate per port
*/
int aud_sample_rate[MAX_PORTS];
/**
* @ops: Ops implemented by i915 driver, called by hda driver
*/
 
const struct i915_audio_component_ops *ops;
/**
* @audio_ops: Ops implemented by hda driver, called by i915 driver
*/
 
const struct i915_audio_component_audio_ops *audio_ops;
};
 
/drivers/include/drm/drm_rect.h
162,8 → 162,7
int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
struct drm_rect *dst,
int min_vscale, int max_vscale);
void drm_rect_debug_print(const char *prefix,
const struct drm_rect *r, bool fixed_point);
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
void drm_rect_rotate(struct drm_rect *r,
int width, int height,
unsigned int rotation);
/drivers/include/linux/notifier.h
File deleted
/drivers/include/linux/mmzone.h
File deleted
/drivers/include/linux/clocksource.h
62,18 → 62,12
* @suspend: suspend function for the clocksource, if necessary
* @resume: resume function for the clocksource, if necessary
* @owner: module reference, must be set by clocksource in modules
*
* Note: This struct is not used in hotpathes of the timekeeping code
* because the timekeeper caches the hot path fields in its own data
* structure, so no line cache alignment is required,
*
* The pointer to the clocksource itself is handed to the read
* callback. If you need extra information there you can wrap struct
* clocksource into your own struct. Depending on the amount of
* information you need you should consider to cache line align that
* structure.
*/
struct clocksource {
/*
* Hotpath data, fits in a single cache line when the
* clocksource itself is cacheline aligned.
*/
cycle_t (*read)(struct clocksource *cs);
cycle_t mask;
u32 mult;
101,7 → 95,7
cycle_t wd_last;
#endif
struct module *owner;
};
} ____cacheline_aligned;
 
/*
* Clock source flags bits::
/drivers/include/linux/compiler-gcc.h
251,7 → 251,9
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
 
#if GCC_VERSION >= 50000
#if GCC_VERSION >= 70000
#define KASAN_ABI_VERSION 5
#elif GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
#elif GCC_VERSION >= 40902
#define KASAN_ABI_VERSION 3
/drivers/include/linux/cpumask.h
85,14 → 85,10
* only one CPU.
*/
 
extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
extern const struct cpumask *const cpu_possible_mask;
extern const struct cpumask *const cpu_online_mask;
extern const struct cpumask *const cpu_present_mask;
extern const struct cpumask *const cpu_active_mask;
 
#if NR_CPUS > 1
#define num_online_cpus() cpumask_weight(cpu_online_mask)
560,7 → 556,7
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
 
/**
575,7 → 571,7
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpu_ids);
nr_cpumask_bits);
}
 
/**
590,7 → 586,7
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
 
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
 
/**
602,7 → 598,7
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
 
/**
720,49 → 716,14
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
 
/* Wrappers for arch boot code to manipulate normally-constant masks */
void set_cpu_possible(unsigned int cpu, bool possible);
void set_cpu_present(unsigned int cpu, bool present);
void set_cpu_online(unsigned int cpu, bool online);
void set_cpu_active(unsigned int cpu, bool active);
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
 
static inline void
set_cpu_possible(unsigned int cpu, bool possible)
{
if (possible)
cpumask_set_cpu(cpu, &__cpu_possible_mask);
else
cpumask_clear_cpu(cpu, &__cpu_possible_mask);
}
 
static inline void
set_cpu_present(unsigned int cpu, bool present)
{
if (present)
cpumask_set_cpu(cpu, &__cpu_present_mask);
else
cpumask_clear_cpu(cpu, &__cpu_present_mask);
}
 
static inline void
set_cpu_online(unsigned int cpu, bool online)
{
if (online) {
cpumask_set_cpu(cpu, &__cpu_online_mask);
cpumask_set_cpu(cpu, &__cpu_active_mask);
} else {
cpumask_clear_cpu(cpu, &__cpu_online_mask);
}
}
 
static inline void
set_cpu_active(unsigned int cpu, bool active)
{
if (active)
cpumask_set_cpu(cpu, &__cpu_active_mask);
else
cpumask_clear_cpu(cpu, &__cpu_active_mask);
}
 
 
/**
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
/drivers/include/linux/interrupt.h
65,17 → 65,6
 
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 
/*
* These values can be returned by request_any_context_irq() and
* describe the context the interrupt will be run in.
*
* IRQC_IS_HARDIRQ - interrupt runs in hardirq context
* IRQC_IS_NESTED - interrupt runs in a nested threaded context
*/
enum {
IRQC_IS_HARDIRQ = 0,
IRQC_IS_NESTED,
};
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
/drivers/include/linux/io.h
22,14 → 22,6
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <asm/io.h>
struct device;
struct resource;
 
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __ioread32_copy(void *to, const void __iomem *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
 
#endif /* _LINUX_IO_H */
/drivers/include/linux/jiffies.h
5,11 → 5,12
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/timex.h>
//#include <linux/timex.h>
//#include <asm/param.h> /* for HZ */
 
 
#define HZ 100
#define CLOCK_TICK_RATE 1193182ul
 
/*
* The following defines establish the engineering parameters of the PLL
/drivers/include/linux/kernel.h
714,6 → 714,32
# define del_timer_sync(t) del_timer(t)
 
 
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
:"m" (*(volatile type __force *)addr) barrier); return ret; }
 
#define build_mmio_write(name, size, type, reg, barrier) \
static inline void name(type val, volatile void __iomem *addr) \
{ asm volatile("mov" size " %0,%1": :reg (val), \
"m" (*(volatile type __force *)addr) barrier); }
 
build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
 
build_mmio_read(__readb, "b", unsigned char, "=q", )
build_mmio_read(__readw, "w", unsigned short, "=r", )
build_mmio_read(__readl, "l", unsigned int, "=r", )
 
build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
build_mmio_write(writew, "w", unsigned short, "r", :"memory")
build_mmio_write(writel, "l", unsigned int, "r", :"memory")
 
build_mmio_write(__writeb, "b", unsigned char, "q", )
build_mmio_write(__writew, "w", unsigned short, "r", )
build_mmio_write(__writel, "l", unsigned int, "r", )
 
#define readb_relaxed(a) __readb(a)
#define readw_relaxed(a) __readw(a)
#define readl_relaxed(a) __readl(a)
846,14 → 872,6
return __copy_to_user(to, from, n);
}
 
#define CAP_SYS_ADMIN 21
 
static inline bool capable(int cap)
{
return true;
}
 
 
void *kmap(struct page *page);
void *kmap_atomic(struct page *page);
void kunmap(struct page *page);
861,14 → 879,10
 
typedef u64 async_cookie_t;
 
//#define iowrite32(v, addr) writel((v), (addr))
#define iowrite32(v, addr) writel((v), (addr))
 
#define __init
 
#define CONFIG_PAGE_OFFSET 0
 
typedef long long __kernel_long_t;
typedef unsigned long long __kernel_ulong_t;
#define __kernel_long_t __kernel_long_t
 
#endif
/drivers/include/linux/list.h
24,7 → 24,7
 
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
list->next = list;
list->prev = list;
}
 
42,7 → 42,7
next->prev = new;
new->next = next;
new->prev = prev;
WRITE_ONCE(prev->next, new);
prev->next = new;
}
#else
extern void __list_add(struct list_head *new,
186,7 → 186,7
*/
static inline int list_empty(const struct list_head *head)
{
return READ_ONCE(head->next) == head;
return head->next == head;
}
 
/**
608,7 → 608,7
 
static inline int hlist_empty(const struct hlist_head *h)
{
return !READ_ONCE(h->first);
return !h->first;
}
 
static inline void __hlist_del(struct hlist_node *n)
642,7 → 642,7
n->next = first;
if (first)
first->pprev = &n->next;
WRITE_ONCE(h->first, n);
h->first = n;
n->pprev = &h->first;
}
 
653,7 → 653,7
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
WRITE_ONCE(*(n->pprev), n);
*(n->pprev) = n;
}
 
static inline void hlist_add_behind(struct hlist_node *n,
660,7 → 660,7
struct hlist_node *prev)
{
n->next = prev->next;
WRITE_ONCE(prev->next, n);
prev->next = n;
n->pprev = &prev->next;
 
if (n->next)
/drivers/include/linux/log2.h
16,12 → 16,6
#include <linux/bitops.h>
 
/*
* deal with unrepresentable constant logarithms
*/
extern __attribute__((const, noreturn))
int ____ilog2_NaN(void);
 
/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
* more efficiently than using fls() and fls64()
85,7 → 79,7
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 1 ? ____ilog2_NaN() : \
(n) < 2 ? 0 : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
148,10 → 142,7
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
(n) & (1ULL << 1) ? 1 : \
(n) & (1ULL << 0) ? 0 : \
____ilog2_NaN() \
) : \
1 ) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
203,6 → 194,17
* ... and so on.
*/
 
#define order_base_2(n) ilog2(roundup_pow_of_two(n))
static inline __attribute_const__
int __order_base_2(unsigned long n)
{
return n > 1 ? ilog2(n - 1) + 1 : 0;
}
 
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0 || (n) == 1) ? 0 : \
ilog2((n) - 1) + 1) : \
__order_base_2(n) \
)
#endif /* _LINUX_LOG2_H */
/drivers/include/linux/pci.h
990,6 → 990,23
return pdev->is_managed;
}
 
static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
{
pdev->irq = irq;
pdev->irq_managed = 1;
}
 
static inline void pci_reset_managed_irq(struct pci_dev *pdev)
{
pdev->irq = 0;
pdev->irq_managed = 0;
}
 
static inline bool pci_has_managed_irq(struct pci_dev *pdev)
{
return pdev->irq_managed && pdev->irq > 0;
}
 
void pci_disable_device(struct pci_dev *dev);
 
extern unsigned int pcibios_max_latency;
1250,6 → 1267,8
u16 entry; /* driver uses to specify entry, OS writes */
};
 
void pci_msi_setup_pci_dev(struct pci_dev *dev);
 
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
void pci_msi_shutdown(struct pci_dev *dev);
1937,16 → 1956,6
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
#endif /* CONFIG_OF */
 
#ifdef CONFIG_ACPI
struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
 
void
pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
#else
static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
#endif
 
#ifdef CONFIG_EEH
static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
{
1994,6 → 2003,4
const struct pci_device_id*
find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist);
 
struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
 
#endif /* LINUX_PCI_H */
/drivers/include/linux/pm.h
573,7 → 573,6
struct wakeup_source *wakeup;
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
#else
unsigned int should_wakeup:1;
#endif
/drivers/include/linux/seqlock.h
234,53 → 234,7
s->sequence++;
}
 
/**
* raw_write_seqcount_barrier - do a seq write barrier
* @s: pointer to seqcount_t
*
* This can be used to provide an ordering guarantee instead of the
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
* seqcount_t seq;
* bool X = true, Y = false;
*
* void read(void)
* {
* bool x, y;
*
* do {
* int s = read_seqcount_begin(&seq);
*
* x = X; y = Y;
*
* } while (read_seqcount_retry(&seq, s));
*
* BUG_ON(!x && !y);
* }
*
* void write(void)
* {
* Y = true;
*
* raw_write_seqcount_barrier(seq);
*
* X = false;
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
{
s->sequence++;
smp_wmb();
s->sequence++;
}
 
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
return lockless_dereference(s->sequence);
}
 
/**
/*
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
*
/drivers/include/linux/string.h
10,7 → 10,6
 
extern char *strndup_user(const char __user *, long);
extern void *memdup_user(const void __user *, size_t);
extern void *memdup_user_nul(const void __user *, size_t);
 
/*
* Include machine specific inline routines
128,7 → 127,11
extern void argv_free(char **argv);
 
extern bool sysfs_streq(const char *s1, const char *s2);
extern int strtobool(const char *s, bool *res);
extern int kstrtobool(const char *s, bool *res);
static inline int strtobool(const char *s, bool *res)
{
return kstrtobool(s, res);
}
 
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
/drivers/include/linux/sysfs.h
31,15 → 31,6
struct lock_class_key skey;
#endif
};
struct attribute_group {
const char *name;
umode_t (*is_visible)(struct kobject *,
struct attribute *, int);
umode_t (*is_bin_visible)(struct kobject *,
struct bin_attribute *, int);
struct attribute **attrs;
struct bin_attribute **bin_attrs;
};
#ifdef CONFIG_SYSFS
 
int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns);
225,34 → 216,7
{
}
 
static inline int sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return 0;
}
 
static inline int sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups)
{
return 0;
}
 
static inline int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return 0;
}
 
static inline void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp)
{
}
 
static inline void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups)
{
}
 
static inline int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
264,17 → 228,6
{
}
 
static inline int sysfs_merge_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return 0;
}
 
static inline void sysfs_unmerge_group(struct kobject *kobj,
const struct attribute_group *grp)
{
}
 
static inline int sysfs_add_link_to_group(struct kobject *kobj,
const char *group_name, struct kobject *target,
const char *link_name)
/drivers/include/linux/timer.h
2,12 → 2,7
#define _LINUX_TIMER_H
 
#include <linux/list.h>
#include <linux/ktime.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
 
struct tvec_base;
 
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
/drivers/include/linux/vmalloc.h
13,6 → 13,7
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
/drivers/include/linux/acpi.h
37,8 → 37,6
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mutex.h>
 
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
320,7 → 318,6
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
struct resource_win *win);
unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
unsigned int acpi_dev_get_irq_type(int triggering, int polarity);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
 
923,7 → 920,7
return NULL;
}
 
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
static const void * __acpi_table_##name[] \
__attribute__((unused)) \
= { (void *) table_id, \
/drivers/include/linux/bug.h
1,27 → 1,57
#ifndef _LINUX_BUG_H
#define _LINUX_BUG_H
#ifndef _ASM_GENERIC_BUG_H
#define _ASM_GENERIC_BUG_H
 
#include <asm/bug.h>
#include <linux/compiler.h>
 
enum bug_trap_type {
BUG_TRAP_TYPE_NONE = 0,
BUG_TRAP_TYPE_WARN = 1,
BUG_TRAP_TYPE_BUG = 2,
};
int printf(const char *fmt, ...);
 
struct pt_regs;
#define __WARN() printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__)
//#define __WARN_printf(arg...) printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__)
#define __WARN_printf(arg...) do { printf(arg); __WARN(); } while (0)
 
#ifdef __CHECKER__
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
#define BUILD_BUG_ON_INVALID(e) (0)
#define BUILD_BUG_ON_MSG(cond, msg) (0)
#define BUILD_BUG_ON(condition) (0)
#define BUILD_BUG() (0)
#else /* __CHECKER__ */
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_printf(format); \
unlikely(__ret_warn_on); \
})
 
 
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN(); \
unlikely(__ret_warn_on); \
})
 
 
#define WARN_ONCE(condition, format...) ({ \
static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
__warned = true; \
unlikely(__ret_warn_once); \
})
 
 
#define WARN_ON_ONCE(condition) ({ \
static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
__warned = true; \
unlikely(__ret_warn_once); \
})
 
#define BUG() do { \
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \
} while (0)
 
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
 
/* Force a compilation error if a constant expression is not a power of 2 */
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
83,30 → 113,10
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
 
#endif /* __CHECKER__ */
 
#ifdef CONFIG_GENERIC_BUG
#include <asm-generic/bug.h>
 
static inline int is_warning_bug(const struct bug_entry *bug)
{
return bug->flags & BUGFLAG_WARNING;
}
 
const struct bug_entry *find_bug(unsigned long bugaddr);
#define pr_warn_once(fmt, ...) \
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
 
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
 
/* These are defined by the architecture */
int is_valid_bugaddr(unsigned long addr);
 
#else /* !CONFIG_GENERIC_BUG */
 
static inline enum bug_trap_type report_bug(unsigned long bug_addr,
struct pt_regs *regs)
{
return BUG_TRAP_TYPE_BUG;
}
 
#endif /* CONFIG_GENERIC_BUG */
#endif /* _LINUX_BUG_H */
#endif
/drivers/include/linux/dma-mapping.h
8,7 → 8,6
#include <linux/dma-attrs.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/bug.h>
 
extern void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
/drivers/include/linux/property.h
144,19 → 144,15
/**
* struct property_entry - "Built-in" device property representation.
* @name: Name of the property.
* @length: Length of data making up the value.
* @is_array: True when the property is an array.
* @is_string: True when property is a string.
* @pointer: Pointer to the property (an array of items of the given type).
* @value: Value of the property (when it is a single item of the given type).
* @type: Type of the property.
* @nval: Number of items of type @type making up the value.
* @value: Value of the property (an array of @nval items of type @type).
*/
struct property_entry {
const char *name;
size_t length;
bool is_array;
bool is_string;
enum dev_prop_type type;
size_t nval;
union {
union {
void *raw_data;
u8 *u8_data;
u16 *u16_data;
163,81 → 159,9
u32 *u32_data;
u64 *u64_data;
const char **str;
} pointer;
union {
unsigned long long raw_data;
u8 u8_data;
u16 u16_data;
u32 u32_data;
u64 u64_data;
const char *str;
} value;
};
};
 
/*
* Note: the below four initializers for the anonymous union are carefully
* crafted to avoid gcc-4.4.4's problems with initialization of anon unions
* and structs.
*/
 
#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \
{ \
.name = _name_, \
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \
.is_array = true, \
.is_string = false, \
{ .pointer = { _type_##_data = _val_ } }, \
}
 
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, _val_)
#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, _val_)
#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, _val_)
#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
 
#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
{ \
.name = _name_, \
.length = ARRAY_SIZE(_val_) * sizeof(const char *), \
.is_array = true, \
.is_string = true, \
{ .pointer = { .str = _val_ } }, \
}
 
#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \
{ \
.name = _name_, \
.length = sizeof(_type_), \
.is_string = false, \
{ .value = { ._type_##_data = _val_ } }, \
}
 
#define PROPERTY_ENTRY_U8(_name_, _val_) \
PROPERTY_ENTRY_INTEGER(_name_, u8, _val_)
#define PROPERTY_ENTRY_U16(_name_, _val_) \
PROPERTY_ENTRY_INTEGER(_name_, u16, _val_)
#define PROPERTY_ENTRY_U32(_name_, _val_) \
PROPERTY_ENTRY_INTEGER(_name_, u32, _val_)
#define PROPERTY_ENTRY_U64(_name_, _val_) \
PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
 
#define PROPERTY_ENTRY_STRING(_name_, _val_) \
{ \
.name = _name_, \
.length = sizeof(_val_), \
.is_string = true, \
{ .value = { .str = _val_ } }, \
}
 
#define PROPERTY_ENTRY_BOOL(_name_) \
{ \
.name = _name_, \
}
 
/**
* struct property_set - Collection of "built-in" device properties.
* @fwnode: Handle to be pointed to by the fwnode field of struct device.
248,8 → 172,7
struct property_entry *properties;
};
 
int device_add_property_set(struct device *dev, const struct property_set *pset);
void device_remove_property_set(struct device *dev);
void device_add_property_set(struct device *dev, struct property_set *pset);
 
bool device_dma_supported(struct device *dev);
 
/drivers/include/linux/rcupdate.h
48,17 → 48,10
 
#include <asm/barrier.h>
 
#ifndef CONFIG_TINY_RCU
extern int rcu_expedited; /* for sysctl */
extern int rcu_normal; /* also for sysctl */
#endif /* #ifndef CONFIG_TINY_RCU */
 
#ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */
{
return true;
}
static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
{
return false;
72,7 → 65,6
{
}
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_normal(void); /* Internal RCU use. */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void);
291,6 → 283,7
 
/* Internal to kernel */
void rcu_init(void);
void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
298,12 → 291,6
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
 
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
#else /* #ifndef CONFIG_TINY_RCU */
static inline void rcu_end_inkernel_boot(void) { }
#endif /* #ifndef CONFIG_TINY_RCU */
 
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
354,9 → 341,9
*/
#define RCU_NONIDLE(a) \
do { \
rcu_irq_enter_irqson(); \
rcu_irq_enter(); \
do { a; } while (0); \
rcu_irq_exit_irqson(); \
rcu_irq_exit(); \
} while (0)
 
/*
716,7 → 703,7
* The tracing infrastructure traces RCU (we want that), but unfortunately
* some of the RCU checks causes tracing to lock up the system.
*
* The no-tracing version of rcu_dereference_raw() must not call
* The tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held().
*/
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
767,28 → 754,6
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
 
/**
* rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
* @p: The pointer to hand off
*
* This is simply an identity function, but it documents where a pointer
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
* kill_dependency(). It could be used as follows:
*
* rcu_read_lock();
* p = rcu_dereference(gp);
* long_lived = is_long_lived(p);
* if (long_lived) {
* if (!atomic_inc_not_zero(p->refcnt))
* long_lived = false;
* else
* p = rcu_pointer_handoff(p);
* }
* rcu_read_unlock();
*/
#define rcu_pointer_handoff(p) (p)
 
/**
* rcu_read_lock() - mark the beginning of an RCU read-side critical section
*
* When synchronize_rcu() is invoked on one CPU while other CPUs
1020,7 → 985,7
#define RCU_INIT_POINTER(p, v) \
do { \
rcu_dereference_sparse(p, __rcu); \
WRITE_ONCE(p, RCU_INITIALIZER(v)); \
p = RCU_INITIALIZER(v); \
} while (0)
 
/**
/drivers/include/linux/dma-attrs.h
41,6 → 41,7
bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
}
 
#ifdef CONFIG_HAVE_DMA_ATTRS
/**
* dma_set_attr - set a specific attribute
* @attr: attribute to set
66,5 → 67,14
BUG_ON(attr >= DMA_ATTR_MAX);
return test_bit(attr, attrs->flags);
}
#else /* !CONFIG_HAVE_DMA_ATTRS */
static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
{
}
 
static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
{
return 0;
}
#endif /* CONFIG_HAVE_DMA_ATTRS */
#endif /* _DMA_ATTR_H */
/drivers/include/linux/backlight.h
11,8 → 11,6
#include <linux/device.h>
#include <linux/fb.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
 
/* Notes on locking:
*
* backlight_device->ops_lock is an internal backlight lock protecting the
45,6 → 43,4
BACKLIGHT_UNREGISTERED,
};
 
struct backlight_device;
struct fb_info;
#endif
/drivers/include/linux/dmi.h
22,7 → 22,6
DMI_DEV_TYPE_IPMI = -1,
DMI_DEV_TYPE_OEM_STRING = -2,
DMI_DEV_TYPE_DEV_ONBOARD = -3,
DMI_DEV_TYPE_DEV_SLOT = -4,
};
 
enum dmi_entry_type {
/drivers/include/linux/idr.h
135,20 → 135,6
#define idr_for_each_entry(idp, entry, id) \
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
 
/**
* idr_for_each_entry - continue iteration over an idr's elements of a given type
* @idp: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
*
* Continue to iterate over list of given type, continuing after
* the current position.
*/
#define idr_for_each_entry_continue(idp, entry, id) \
for ((entry) = idr_get_next((idp), &(id)); \
entry; \
++id, (entry) = idr_get_next((idp), &(id)))
 
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
/drivers/include/linux/lockdep.h
66,7 → 66,7
/*
* class-hash:
*/
struct hlist_node hash_entry;
struct list_head hash_entry;
 
/*
* global list of all lock-classes:
199,7 → 199,7
u8 irq_context;
u8 depth;
u16 base;
struct hlist_node entry;
struct list_head entry;
u64 chain_key;
};
 
/drivers/include/linux/mmdebug.h
56,10 → 56,4
#define VIRTUAL_BUG_ON(cond) do { } while (0)
#endif
 
#ifdef CONFIG_DEBUG_VM_PGFLAGS
#define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page)
#else
#define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond)
#endif
 
#endif
/drivers/include/linux/rcutiny.h
175,14 → 175,6
{
}
 
static inline void rcu_irq_exit_irqson(void)
{
}
 
static inline void rcu_irq_enter_irqson(void)
{
}
 
static inline void rcu_irq_exit(void)
{
}
/drivers/include/linux/compiler.h
299,23 → 299,6
__u.__val; \
})
 
/**
* smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
* @cond: boolean expression to wait for
*
* Equivalent to using smp_load_acquire() on the condition variable but employs
* the control dependency of the wait to reduce the barrier on many platforms.
*
* The control dependency provides a LOAD->STORE order, the additional RMB
* provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
* aka. ACQUIRE.
*/
#define smp_cond_acquire(cond) do { \
while (!(cond)) \
cpu_relax(); \
smp_rmb(); /* ctrl + rmb := acquire */ \
} while (0)
 
#endif /* __KERNEL__ */
 
#endif /* __ASSEMBLY__ */
/drivers/include/linux/fs.h
94,5 → 94,4
#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
#define FL_LAYOUT 2048 /* outstanding pNFS layout */
struct inode;
#endif /* _LINUX_FS_H */
/drivers/include/linux/fb.h
1,19 → 1,413
#ifndef _LINUX_FB_H
#define _LINUX_FB_H
 
#include <linux/kgdb.h>
#include <uapi/linux/fb.h>
#include <linux/types.h>
#include <linux/i2c.h>
 
struct dentry;
 
/* Definitions of frame buffers */
 
#define FB_MAX 32 /* sufficient for now */
 
/* ioctls
0x46 is 'F' */
#define FBIOGET_VSCREENINFO 0x4600
#define FBIOPUT_VSCREENINFO 0x4601
#define FBIOGET_FSCREENINFO 0x4602
#define FBIOGETCMAP 0x4604
#define FBIOPUTCMAP 0x4605
#define FBIOPAN_DISPLAY 0x4606
#ifdef __KERNEL__
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user)
#else
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor)
#endif
/* 0x4607-0x460B are defined below */
/* #define FBIOGET_MONITORSPEC 0x460C */
/* #define FBIOPUT_MONITORSPEC 0x460D */
/* #define FBIOSWITCH_MONIBIT 0x460E */
#define FBIOGET_CON2FBMAP 0x460F
#define FBIOPUT_CON2FBMAP 0x4610
#define FBIOBLANK 0x4611 /* arg: 0 or vesa level + 1 */
#define FBIOGET_VBLANK _IOR('F', 0x12, struct fb_vblank)
#define FBIO_ALLOC 0x4613
#define FBIO_FREE 0x4614
#define FBIOGET_GLYPH 0x4615
#define FBIOGET_HWCINFO 0x4616
#define FBIOPUT_MODEINFO 0x4617
#define FBIOGET_DISPINFO 0x4618
#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
 
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */
#define FB_TYPE_PLANES 1 /* Non interleaved planes */
#define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */
#define FB_TYPE_TEXT 3 /* Text/attributes */
#define FB_TYPE_VGA_PLANES 4 /* EGA/VGA planes */
 
#define FB_AUX_TEXT_MDA 0 /* Monochrome text */
#define FB_AUX_TEXT_CGA 1 /* CGA/EGA/VGA Color text */
#define FB_AUX_TEXT_S3_MMIO 2 /* S3 MMIO fasttext */
#define FB_AUX_TEXT_MGA_STEP16 3 /* MGA Millenium I: text, attr, 14 reserved bytes */
#define FB_AUX_TEXT_MGA_STEP8 4 /* other MGAs: text, attr, 6 reserved bytes */
#define FB_AUX_TEXT_SVGA_GROUP 8 /* 8-15: SVGA tileblit compatible modes */
#define FB_AUX_TEXT_SVGA_MASK 7 /* lower three bits says step */
#define FB_AUX_TEXT_SVGA_STEP2 8 /* SVGA text mode: text, attr */
#define FB_AUX_TEXT_SVGA_STEP4 9 /* SVGA text mode: text, attr, 2 reserved bytes */
#define FB_AUX_TEXT_SVGA_STEP8 10 /* SVGA text mode: text, attr, 6 reserved bytes */
#define FB_AUX_TEXT_SVGA_STEP16 11 /* SVGA text mode: text, attr, 14 reserved bytes */
#define FB_AUX_TEXT_SVGA_LAST 15 /* reserved up to 15 */
 
#define FB_AUX_VGA_PLANES_VGA4 0 /* 16 color planes (EGA/VGA) */
#define FB_AUX_VGA_PLANES_CFB4 1 /* CFB4 in planes (VGA) */
#define FB_AUX_VGA_PLANES_CFB8 2 /* CFB8 in planes (VGA) */
 
#define FB_VISUAL_MONO01 0 /* Monochr. 1=Black 0=White */
#define FB_VISUAL_MONO10 1 /* Monochr. 1=White 0=Black */
#define FB_VISUAL_TRUECOLOR 2 /* True color */
#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */
#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */
#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */
 
#define FB_ACCEL_NONE 0 /* no hardware accelerator */
#define FB_ACCEL_ATARIBLITT 1 /* Atari Blitter */
#define FB_ACCEL_AMIGABLITT 2 /* Amiga Blitter */
#define FB_ACCEL_S3_TRIO64 3 /* Cybervision64 (S3 Trio64) */
#define FB_ACCEL_NCR_77C32BLT 4 /* RetinaZ3 (NCR 77C32BLT) */
#define FB_ACCEL_S3_VIRGE 5 /* Cybervision64/3D (S3 ViRGE) */
#define FB_ACCEL_ATI_MACH64GX 6 /* ATI Mach 64GX family */
#define FB_ACCEL_DEC_TGA 7 /* DEC 21030 TGA */
#define FB_ACCEL_ATI_MACH64CT 8 /* ATI Mach 64CT family */
#define FB_ACCEL_ATI_MACH64VT 9 /* ATI Mach 64CT family VT class */
#define FB_ACCEL_ATI_MACH64GT 10 /* ATI Mach 64CT family GT class */
#define FB_ACCEL_SUN_CREATOR 11 /* Sun Creator/Creator3D */
#define FB_ACCEL_SUN_CGSIX 12 /* Sun cg6 */
#define FB_ACCEL_SUN_LEO 13 /* Sun leo/zx */
#define FB_ACCEL_IMS_TWINTURBO 14 /* IMS Twin Turbo */
#define FB_ACCEL_3DLABS_PERMEDIA2 15 /* 3Dlabs Permedia 2 */
#define FB_ACCEL_MATROX_MGA2064W 16 /* Matrox MGA2064W (Millenium) */
#define FB_ACCEL_MATROX_MGA1064SG 17 /* Matrox MGA1064SG (Mystique) */
#define FB_ACCEL_MATROX_MGA2164W 18 /* Matrox MGA2164W (Millenium II) */
#define FB_ACCEL_MATROX_MGA2164W_AGP 19 /* Matrox MGA2164W (Millenium II) */
#define FB_ACCEL_MATROX_MGAG100 20 /* Matrox G100 (Productiva G100) */
#define FB_ACCEL_MATROX_MGAG200 21 /* Matrox G200 (Myst, Mill, ...) */
#define FB_ACCEL_SUN_CG14 22 /* Sun cgfourteen */
#define FB_ACCEL_SUN_BWTWO 23 /* Sun bwtwo */
#define FB_ACCEL_SUN_CGTHREE 24 /* Sun cgthree */
#define FB_ACCEL_SUN_TCX 25 /* Sun tcx */
#define FB_ACCEL_MATROX_MGAG400 26 /* Matrox G400 */
#define FB_ACCEL_NV3 27 /* nVidia RIVA 128 */
#define FB_ACCEL_NV4 28 /* nVidia RIVA TNT */
#define FB_ACCEL_NV5 29 /* nVidia RIVA TNT2 */
#define FB_ACCEL_CT_6555x 30 /* C&T 6555x */
#define FB_ACCEL_3DFX_BANSHEE 31 /* 3Dfx Banshee */
#define FB_ACCEL_ATI_RAGE128 32 /* ATI Rage128 family */
#define FB_ACCEL_IGS_CYBER2000 33 /* CyberPro 2000 */
#define FB_ACCEL_IGS_CYBER2010 34 /* CyberPro 2010 */
#define FB_ACCEL_IGS_CYBER5000 35 /* CyberPro 5000 */
#define FB_ACCEL_SIS_GLAMOUR 36 /* SiS 300/630/540 */
#define FB_ACCEL_3DLABS_PERMEDIA3 37 /* 3Dlabs Permedia 3 */
#define FB_ACCEL_ATI_RADEON 38 /* ATI Radeon family */
#define FB_ACCEL_I810 39 /* Intel 810/815 */
#define FB_ACCEL_SIS_GLAMOUR_2 40 /* SiS 315, 650, 740 */
#define FB_ACCEL_SIS_XABRE 41 /* SiS 330 ("Xabre") */
#define FB_ACCEL_I830 42 /* Intel 830M/845G/85x/865G */
#define FB_ACCEL_NV_10 43 /* nVidia Arch 10 */
#define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */
#define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */
#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */
#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */
#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */
#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */
#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */
#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */
#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */
#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */
#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */
#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
#define FB_ACCEL_NEOMAGIC_NM2097 93 /* NeoMagic NM2097 */
#define FB_ACCEL_NEOMAGIC_NM2160 94 /* NeoMagic NM2160 */
#define FB_ACCEL_NEOMAGIC_NM2200 95 /* NeoMagic NM2200 */
#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */
#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */
#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */
#define FB_ACCEL_PXA3XX 99 /* PXA3xx */
 
#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */
#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */
#define FB_ACCEL_SAVAGE3D_MV 0x82 /* S3 Savage3D-MV */
#define FB_ACCEL_SAVAGE2000 0x83 /* S3 Savage2000 */
#define FB_ACCEL_SAVAGE_MX_MV 0x84 /* S3 Savage/MX-MV */
#define FB_ACCEL_SAVAGE_MX 0x85 /* S3 Savage/MX */
#define FB_ACCEL_SAVAGE_IX_MV 0x86 /* S3 Savage/IX-MV */
#define FB_ACCEL_SAVAGE_IX 0x87 /* S3 Savage/IX */
#define FB_ACCEL_PROSAVAGE_PM 0x88 /* S3 ProSavage PM133 */
#define FB_ACCEL_PROSAVAGE_KM 0x89 /* S3 ProSavage KM133 */
#define FB_ACCEL_S3TWISTER_P 0x8a /* S3 Twister */
#define FB_ACCEL_S3TWISTER_K 0x8b /* S3 TwisterK */
#define FB_ACCEL_SUPERSAVAGE 0x8c /* S3 Supersavage */
#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */
#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */
 
#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */
 
struct fb_fix_screeninfo {
char id[16]; /* identification string eg "TT Builtin" */
unsigned long smem_start; /* Start of frame buffer mem */
/* (physical address) */
__u32 smem_len; /* Length of frame buffer mem */
__u32 type; /* see FB_TYPE_* */
__u32 type_aux; /* Interleave for interleaved Planes */
__u32 visual; /* see FB_VISUAL_* */
__u16 xpanstep; /* zero if no hardware panning */
__u16 ypanstep; /* zero if no hardware panning */
__u16 ywrapstep; /* zero if no hardware ywrap */
__u32 line_length; /* length of a line in bytes */
unsigned long mmio_start; /* Start of Memory Mapped I/O */
/* (physical address) */
__u32 mmio_len; /* Length of Memory Mapped I/O */
__u32 accel; /* Indicate to driver which */
/* specific chip/card we have */
__u16 reserved[3]; /* Reserved for future compatibility */
};
 
/* Interpretation of offset for color fields: All offsets are from the right,
* inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you
* can use the offset as right argument to <<). A pixel afterwards is a bit
* stream and is written to video memory as that unmodified.
*
* For pseudocolor: offset and length should be the same for all color
* components. Offset specifies the position of the least significant bit
* of the pallette index in a pixel value. Length indicates the number
* of available palette entries (i.e. # of entries = 1 << length).
*/
struct fb_bitfield {
__u32 offset; /* beginning of bitfield */
__u32 length; /* length of bitfield */
__u32 msb_right; /* != 0 : Most significant bit is */
/* right */
};
 
#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */
#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */
 
#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/
#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */
#define FB_ACTIVATE_TEST 2 /* don't set, round up impossible */
#define FB_ACTIVATE_MASK 15
/* values */
#define FB_ACTIVATE_VBL 16 /* activate values on next vbl */
#define FB_CHANGE_CMAP_VBL 32 /* change colormap on vbl */
#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */
#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/
#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */
 
#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */
 
#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */
#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */
#define FB_SYNC_EXT 4 /* external sync */
#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */
#define FB_SYNC_BROADCAST 16 /* broadcast video timings */
/* vtotal = 144d/288n/576i => PAL */
/* vtotal = 121d/242n/484i => NTSC */
#define FB_SYNC_ON_GREEN 32 /* sync on green */
 
#define FB_VMODE_NONINTERLACED 0 /* non interlaced */
#define FB_VMODE_INTERLACED 1 /* interlaced */
#define FB_VMODE_DOUBLE 2 /* double scan */
#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */
#define FB_VMODE_MASK 255
 
#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */
#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */
#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */
 
/*
* Display rotation support
*/
#define FB_ROTATE_UR 0
#define FB_ROTATE_CW 1
#define FB_ROTATE_UD 2
#define FB_ROTATE_CCW 3
 
#define PICOS2KHZ(a) (1000000000UL/(a))
#define KHZ2PICOS(a) (1000000000UL/(a))
 
struct fb_var_screeninfo {
__u32 xres; /* visible resolution */
__u32 yres;
__u32 xres_virtual; /* virtual resolution */
__u32 yres_virtual;
__u32 xoffset; /* offset from virtual to visible */
__u32 yoffset; /* resolution */
 
__u32 bits_per_pixel; /* guess what */
__u32 grayscale; /* != 0 Graylevels instead of colors */
 
struct fb_bitfield red; /* bitfield in fb mem if true color, */
struct fb_bitfield green; /* else only length is significant */
struct fb_bitfield blue;
struct fb_bitfield transp; /* transparency */
 
__u32 nonstd; /* != 0 Non standard pixel format */
 
__u32 activate; /* see FB_ACTIVATE_* */
 
__u32 height; /* height of picture in mm */
__u32 width; /* width of picture in mm */
 
__u32 accel_flags; /* (OBSOLETE) see fb_info.flags */
 
/* Timing: All values in pixclocks, except pixclock (of course) */
__u32 pixclock; /* pixel clock in ps (pico seconds) */
__u32 left_margin; /* time from sync to picture */
__u32 right_margin; /* time from picture to sync */
__u32 upper_margin; /* time from sync to picture */
__u32 lower_margin;
__u32 hsync_len; /* length of horizontal sync */
__u32 vsync_len; /* length of vertical sync */
__u32 sync; /* see FB_SYNC_* */
__u32 vmode; /* see FB_VMODE_* */
__u32 rotate; /* angle we rotate counter clockwise */
__u32 reserved[5]; /* Reserved for future compatibility */
};
 
struct fb_cmap {
__u32 start; /* First entry */
__u32 len; /* Number of entries */
__u16 *red; /* Red values */
__u16 *green;
__u16 *blue;
__u16 *transp; /* transparency, can be NULL */
};
 
struct fb_con2fbmap {
__u32 console;
__u32 framebuffer;
};
 
/* VESA Blanking Levels */
#define VESA_NO_BLANKING 0
#define VESA_VSYNC_SUSPEND 1
#define VESA_HSYNC_SUSPEND 2
#define VESA_POWERDOWN 3
 
 
enum {
/* screen: unblanked, hsync: on, vsync: on */
FB_BLANK_UNBLANK = VESA_NO_BLANKING,
 
/* screen: blanked, hsync: on, vsync: on */
FB_BLANK_NORMAL = VESA_NO_BLANKING + 1,
 
/* screen: blanked, hsync: on, vsync: off */
FB_BLANK_VSYNC_SUSPEND = VESA_VSYNC_SUSPEND + 1,
 
/* screen: blanked, hsync: off, vsync: on */
FB_BLANK_HSYNC_SUSPEND = VESA_HSYNC_SUSPEND + 1,
 
/* screen: blanked, hsync: off, vsync: off */
FB_BLANK_POWERDOWN = VESA_POWERDOWN + 1
};
 
#define FB_VBLANK_VBLANKING 0x001 /* currently in a vertical blank */
#define FB_VBLANK_HBLANKING 0x002 /* currently in a horizontal blank */
#define FB_VBLANK_HAVE_VBLANK 0x004 /* vertical blanks can be detected */
#define FB_VBLANK_HAVE_HBLANK 0x008 /* horizontal blanks can be detected */
#define FB_VBLANK_HAVE_COUNT 0x010 /* global retrace counter is available */
#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */
#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */
#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */
#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */
 
struct fb_vblank {
__u32 flags; /* FB_VBLANK flags */
__u32 count; /* counter of retraces since boot */
__u32 vcount; /* current scanline position */
__u32 hcount; /* current scandot position */
__u32 reserved[4]; /* reserved for future compatibility */
};
 
/* Internal HW accel */
#define ROP_COPY 0
#define ROP_XOR 1
 
struct fb_copyarea {
__u32 dx;
__u32 dy;
__u32 width;
__u32 height;
__u32 sx;
__u32 sy;
};
 
struct fb_fillrect {
__u32 dx; /* screen-relative */
__u32 dy;
__u32 width;
__u32 height;
__u32 color;
__u32 rop;
};
 
struct fb_image {
__u32 dx; /* Where to place image */
__u32 dy;
__u32 width; /* Size of image */
__u32 height;
__u32 fg_color; /* Only used when a mono bitmap */
__u32 bg_color;
__u8 depth; /* Depth of the image */
const char *data; /* Pointer to image data */
struct fb_cmap cmap; /* color map info */
};
 
/*
* hardware cursor control
*/
 
#define FB_CUR_SETIMAGE 0x01
#define FB_CUR_SETPOS 0x02
#define FB_CUR_SETHOT 0x04
#define FB_CUR_SETCMAP 0x08
#define FB_CUR_SETSHAPE 0x10
#define FB_CUR_SETSIZE 0x20
#define FB_CUR_SETALL 0xFF
 
struct fbcurpos {
__u16 x, y;
};
 
struct fb_cursor {
__u16 set; /* what to set */
__u16 enable; /* cursor on/off */
__u16 rop; /* bitop operation */
const char *mask; /* cursor mask bits */
struct fbcurpos hot; /* cursor hot spot */
struct fb_image image; /* Cursor image */
};
 
#ifdef CONFIG_FB_BACKLIGHT
/* Settings for the generic backlight code */
#define FB_BACKLIGHT_LEVELS 128
#define FB_BACKLIGHT_MAX 0xFF
#endif
 
//#ifdef __KERNEL__
 
//#include <linux/fs.h>
//#include <linux/init.h>
//#include <linux/device.h>
//#include <linux/workqueue.h>
//#include <linux/notifier.h>
#include <linux/list.h>
#include <linux/backlight.h>
#include <linux/mutex.h>
//#include <linux/backlight.h>
#include <linux/slab.h>
#include <asm/io.h>
//#include <asm/io.h>
 
struct vm_area_struct;
struct fb_info;
175,27 → 569,7
u32 flags;
};
 
#ifdef CONFIG_FB_NOTIFY
extern int fb_register_client(struct notifier_block *nb);
extern int fb_unregister_client(struct notifier_block *nb);
extern int fb_notifier_call_chain(unsigned long val, void *v);
#else
static inline int fb_register_client(struct notifier_block *nb)
{
return 0;
};
 
static inline int fb_unregister_client(struct notifier_block *nb)
{
return 0;
};
 
static inline int fb_notifier_call_chain(unsigned long val, void *v)
{
return 0;
};
#endif
 
/*
* Pixmap structure definition
*
676,13 → 1050,6
}
 
/* drivers/video/fb_defio.c */
extern void fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
 
static inline bool fb_be_math(struct fb_info *info)
{
/drivers/include/linux/gfp.h
2,7 → 2,7
#define __LINUX_GFP_H
 
#include <linux/mmdebug.h>
#include <linux/mmzone.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/linkage.h>
 
29,7 → 29,7
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_ATOMIC 0x80000u
#define ___GFP_ACCOUNT 0x100000u
#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
72,15 → 72,11
*
* __GFP_THISNODE forces the allocation to be satisified from the requested
* node with no fallbacks or placement policy enforcements.
*
* __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant
* to kmem allocations).
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
 
/*
* Watermark modifiers -- controls access to emergency reserves
107,6 → 103,7
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT)
 
/*
* Reclaim modifiers
199,9 → 196,6
* GFP_KERNEL is typical for kernel-internal allocations. The caller requires
* ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
*
* GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
* accounted to kmemcg.
*
* GFP_NOWAIT is for kernel allocations that should not stall for direct
* reclaim, start physical IO or use any filesystem callback.
*
241,7 → 235,6
*/
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
256,9 → 249,16
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
~__GFP_KSWAPD_RECLAIM)
 
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
 
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
 
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
}
 
#ifdef CONFIG_HIGHMEM
/drivers/include/linux/ioport.h
181,13 → 181,5
}
 
 
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
#define request_mem_region_exclusive(start,n,name) \
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
/drivers/include/linux/printk.h
68,16 → 68,14
 
/*
* Dummy printk for disabled debugging statements to use whilst maintaining
* gcc's format checking.
* gcc's format and side-effect checking.
*/
#define no_printk(fmt, ...) \
do { \
if (0) \
printk(fmt, ##__VA_ARGS__); \
} while (0)
static inline __printf(1, 2)
int no_printk(const char *fmt, ...)
{
return 0;
}
 
 
 
__printf(1, 2) int dbgprintf(const char *fmt, ...);
 
#define printk(fmt, arg...) dbgprintf(fmt , ##arg)
/drivers/include/linux/rbtree.h
50,7 → 50,7
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
 
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
 
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
/drivers/include/linux/component.h
1,48 → 1,39
#ifndef COMPONENT_H
#define COMPONENT_H
 
#include <linux/stddef.h>
 
struct device;
 
struct component_ops {
int (*bind)(struct device *comp, struct device *master,
void *master_data);
void (*unbind)(struct device *comp, struct device *master,
void *master_data);
int (*bind)(struct device *, struct device *, void *);
void (*unbind)(struct device *, struct device *, void *);
};
 
int component_add(struct device *, const struct component_ops *);
void component_del(struct device *, const struct component_ops *);
 
int component_bind_all(struct device *master, void *master_data);
void component_unbind_all(struct device *master, void *master_data);
int component_bind_all(struct device *, void *);
void component_unbind_all(struct device *, void *);
 
struct master;
 
struct component_master_ops {
int (*bind)(struct device *master);
void (*unbind)(struct device *master);
int (*add_components)(struct device *, struct master *);
int (*bind)(struct device *);
void (*unbind)(struct device *);
};
 
int component_master_add(struct device *, const struct component_master_ops *);
void component_master_del(struct device *,
const struct component_master_ops *);
 
int component_master_add_child(struct master *master,
int (*compare)(struct device *, void *), void *compare_data);
 
struct component_match;
 
int component_master_add_with_match(struct device *,
const struct component_master_ops *, struct component_match *);
void component_match_add_release(struct device *master,
struct component_match **matchptr,
void (*release)(struct device *, void *),
void component_match_add(struct device *, struct component_match **,
int (*compare)(struct device *, void *), void *compare_data);
 
static inline void component_match_add(struct device *master,
struct component_match **matchptr,
int (*compare)(struct device *, void *), void *compare_data)
{
component_match_add_release(master, matchptr, NULL, compare,
compare_data);
}
 
#endif
/drivers/include/linux/i2c.h
30,7 → 30,6
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
#include <linux/swab.h> /* for swab16 */
#include <linux/jiffies.h>
 
extern struct bus_type i2c_bus_type;
/drivers/include/linux/mod_devicetable.h
404,7 → 404,7
* For Hyper-V devices we use the device guid as the id.
*/
struct hv_vmbus_device_id {
uuid_le guid;
__u8 guid[16];
kernel_ulong_t driver_data; /* Data private to the driver */
};
 
/drivers/include/linux/pm_runtime.h
10,7 → 10,6
#define _LINUX_PM_RUNTIME_H
 
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm.h>
 
#include <linux/jiffies.h>
39,7 → 38,6
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
144,10 → 142,6
{
return -ENOSYS;
}
static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
/drivers/include/linux/poison.h
27,15 → 27,11
* Magic number "tsta" to indicate a static timer initializer
* for the object debugging code.
*/
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
#define TIMER_ENTRY_STATIC ((void *) 0x74737461)
 
/********** mm/debug-pagealloc.c **********/
#define PAGE_POISON 0xaa
 
/********** mm/page_alloc.c ************/
 
#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA)
 
/********** mm/slab.c **********/
/*
* Magic nums for obj red zoning.
/drivers/include/linux/pwm.h
179,8 → 179,6
void pwm_put(struct pwm_device *pwm);
 
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
const char *con_id);
void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
 
bool pwm_can_sleep(struct pwm_device *pwm);
194,36 → 192,11
{
return NULL;
}
 
static inline int pwmchip_add(struct pwm_chip *chip)
{
return -EINVAL;
}
 
static inline int pwmchip_add_inversed(struct pwm_chip *chip)
{
return -EINVAL;
}
 
static inline int pwmchip_remove(struct pwm_chip *chip)
{
return -EINVAL;
}
 
static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label)
{
return ERR_PTR(-ENODEV);
}
 
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
return ERR_PTR(-ENODEV);
}
 
 
static inline void pwm_put(struct pwm_device *pwm)
{
}
233,8 → 206,6
{
return ERR_PTR(-ENODEV);
}
 
 
static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
{
}
/drivers/include/linux/rculist.h
179,32 → 179,33
}
 
/**
* __list_splice_init_rcu - join an RCU-protected list into an existing list.
* list_splice_init_rcu - splice an RCU-protected list into an existing list.
* @list: the RCU-protected list to splice
* @prev: points to the last element of the existing list
* @next: points to the first element of the existing list
* @head: the place in the list to splice the first list into
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
*
* The list pointed to by @prev and @next can be RCU-read traversed
* concurrently with this function.
* @head can be RCU-read traversed concurrently with this function.
*
* Note that this function blocks.
*
* Important note: the caller must take whatever action is necessary to prevent
* any other updates to the existing list. In principle, it is possible to
* modify the list as soon as sync() begins execution. If this sort of thing
* becomes necessary, an alternative version based on call_rcu() could be
* created. But only if -really- needed -- there is no shortage of RCU API
* members.
* Important note: the caller must take whatever action is necessary to
* prevent any other updates to @head. In principle, it is possible
* to modify the list as soon as sync() begins execution.
* If this sort of thing becomes necessary, an alternative version
* based on call_rcu() could be created. But only if -really-
* needed -- there is no shortage of RCU API members.
*/
static inline void __list_splice_init_rcu(struct list_head *list,
struct list_head *prev,
struct list_head *next,
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;
 
if (list_empty(list))
return;
 
/*
* "first" and "last" tracking list, so initialize it. RCU readers
* have access to this list, so we must use INIT_LIST_HEAD_RCU()
230,43 → 231,13
* this function.
*/
 
last->next = next;
rcu_assign_pointer(list_next_rcu(prev), first);
first->prev = prev;
next->prev = last;
last->next = at;
rcu_assign_pointer(list_next_rcu(head), first);
first->prev = head;
at->prev = last;
}
 
/**
* list_splice_init_rcu - splice an RCU-protected list into an existing list,
* designed for stacks.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
*/
static inline void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head, head->next, sync);
}
 
/**
* list_splice_tail_init_rcu - splice an RCU-protected list into an existing
* list, designed for queues.
* @list: the RCU-protected list to splice
* @head: the place in the existing list to splice the first list into
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
*/
static inline void list_splice_tail_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
if (!list_empty(list))
__list_splice_init_rcu(list, head->prev, head, sync);
}
 
/**
* list_entry_rcu - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
334,42 → 305,6
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
 
/**
* list_entry_lockless - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu(), but requires some implicit RCU
* read-side guarding. One example is running within a special
* exception-time environment where preemption is disabled and where
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
* example is when items are added to the list, but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
container_of((typeof(ptr))lockless_dereference(ptr), type, member)
 
/**
* list_for_each_entry_lockless - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu(), but requires some implicit RCU
* read-side guarding. One example is running within a special
* exception-time environment where preemption is disabled and where
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
* example is when items are added to the list, but never deleted.
*/
#define list_for_each_entry_lockless(pos, head, member) \
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
 
/**
* list_for_each_entry_continue_rcu - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
/drivers/include/linux/seq_file.h
5,10 → 5,6
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/mutex.h>
struct file;
struct path;
struct inode;
struct dentry;
 
struct seq_file {
char *buf;
/drivers/include/linux/slab.h
86,11 → 86,6
#else
# define SLAB_FAILSLAB 0x00000000UL
#endif
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
#else
# define SLAB_ACCOUNT 0x00000000UL
#endif
 
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
118,14 → 113,14
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
 
static inline void *krealloc(const void *p, size_t new_size, gfp_t flags)
static inline void *krealloc(void *p, size_t new_size, gfp_t flags)
{
return __builtin_realloc((void*)p, new_size);
return __builtin_realloc(p, new_size);
}
 
static inline void kfree(const void *p)
static inline void kfree(void *p)
{
__builtin_free((void*)p);
__builtin_free(p);
}
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
/drivers/include/linux/err.h
37,7 → 37,7
 
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}
 
/**
/drivers/include/linux/byteorder/little_endian.h
1,7 → 1,108
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
 
#include <uapi/linux/byteorder/little_endian.h>
#ifndef __LITTLE_ENDIAN
#define __LITTLE_ENDIAN 1234
#endif
#ifndef __LITTLE_ENDIAN_BITFIELD
#define __LITTLE_ENDIAN_BITFIELD
#endif
 
#include <linux/types.h>
#include <linux/swab.h>
 
#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
#define __constant_htons(x) ((__force __be16)___constant_swab16((x)))
#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x))
#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x))
#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x))
#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x))
#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x))
#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x))
#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x))
#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x)))
#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x))
#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x)))
#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x))
#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x)))
#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x))
#define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
#define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
#define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
#define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
#define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
#define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
#define __cpu_to_be64(x) ((__force __be64)__swab64((x)))
#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
#define __cpu_to_be32(x) ((__force __be32)__swab32((x)))
#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
#define __cpu_to_be16(x) ((__force __be16)__swab16((x)))
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
 
static inline __le64 __cpu_to_le64p(const __u64 *p)
{
return (__force __le64)*p;
}
static inline __u64 __le64_to_cpup(const __le64 *p)
{
return (__force __u64)*p;
}
static inline __le32 __cpu_to_le32p(const __u32 *p)
{
return (__force __le32)*p;
}
static inline __u32 __le32_to_cpup(const __le32 *p)
{
return (__force __u32)*p;
}
static inline __le16 __cpu_to_le16p(const __u16 *p)
{
return (__force __le16)*p;
}
static inline __u16 __le16_to_cpup(const __le16 *p)
{
return (__force __u16)*p;
}
static inline __be64 __cpu_to_be64p(const __u64 *p)
{
return (__force __be64)__swab64p(p);
}
static inline __u64 __be64_to_cpup(const __be64 *p)
{
return __swab64p((__u64 *)p);
}
static inline __be32 __cpu_to_be32p(const __u32 *p)
{
return (__force __be32)__swab32p(p);
}
static inline __u32 __be32_to_cpup(const __be32 *p)
{
return __swab32p((__u32 *)p);
}
static inline __be16 __cpu_to_be16p(const __u16 *p)
{
return (__force __be16)__swab16p(p);
}
static inline __u16 __be16_to_cpup(const __be16 *p)
{
return __swab16p((__u16 *)p);
}
#define __cpu_to_le64s(x) do { (void)(x); } while (0)
#define __le64_to_cpus(x) do { (void)(x); } while (0)
#define __cpu_to_le32s(x) do { (void)(x); } while (0)
#define __le32_to_cpus(x) do { (void)(x); } while (0)
#define __cpu_to_le16s(x) do { (void)(x); } while (0)
#define __le16_to_cpus(x) do { (void)(x); } while (0)
#define __cpu_to_be64s(x) __swab64s((x))
#define __be64_to_cpus(x) __swab64s((x))
#define __cpu_to_be32s(x) __swab32s((x))
#define __be32_to_cpus(x) __swab32s((x))
#define __cpu_to_be16s(x) __swab16s((x))
#define __be16_to_cpus(x) __swab16s((x))
 
#ifdef __KERNEL__
#include <linux/byteorder/generic.h>
#endif
 
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
/drivers/include/linux/hashtable.h
16,10 → 16,6
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
 
#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] __read_mostly = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
 
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
 
/drivers/include/linux/sysrq.h
1,14 → 1,2
/* -*- linux-c -*-
*
* $Id: sysrq.h,v 1.3 1997/07/17 11:54:33 mj Exp $
*
* Linux Magic System Request Key Hacks
*
* (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*
* (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* overhauled to use key registration
* based upon discusions in irc://irc.openprojects.net/#kernelnewbies
*/
 
// stub
/drivers/include/uapi/linux/byteorder/little_endian.h
File deleted
/drivers/include/uapi/linux/fb.h
File deleted
/drivers/include/uapi/drm/drm.h
54,7 → 54,6
typedef uint32_t __u32;
typedef int64_t __s64;
typedef uint64_t __u64;
typedef size_t __kernel_size_t;
typedef unsigned long drm_handle_t;
 
#endif
130,11 → 129,11
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
__kernel_size_t name_len; /**< Length of name buffer */
size_t name_len; /**< Length of name buffer */
char __user *name; /**< Name of driver */
__kernel_size_t date_len; /**< Length of date buffer */
size_t date_len; /**< Length of date buffer */
char __user *date; /**< User-space buffer to hold date */
__kernel_size_t desc_len; /**< Length of desc buffer */
size_t desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
};
 
144,7 → 143,7
* \sa drmGetBusid() and drmSetBusId().
*/
struct drm_unique {
__kernel_size_t unique_len; /**< Length of unique */
size_t unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
};
 
/drivers/include/uapi/drm/drm_fourcc.h
24,7 → 24,7
#ifndef DRM_FOURCC_H
#define DRM_FOURCC_H
 
#include "drm.h"
#include <linux/types.h>
 
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
225,7 → 225,7
* - multiple of 128 pixels for the width
* - multiple of 32 pixels for the height
*
* For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html
* For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
 
/drivers/include/uapi/drm/drm_mode.h
27,7 → 27,7
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
 
#include "drm.h"
#include <linux/types.h>
 
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
526,14 → 526,14
 
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
uint32_t height;
uint32_t width;
uint32_t bpp;
uint32_t flags;
/* handle, pitch, size will be returned */
__u32 handle;
__u32 pitch;
__u64 size;
uint32_t handle;
uint32_t pitch;
uint64_t size;
};
 
/* set up for mmap of a dumb scanout buffer */
550,7 → 550,7
};
 
struct drm_mode_destroy_dumb {
__u32 handle;
uint32_t handle;
};
 
/* page-flip flags are valid, plus: */
/drivers/include/uapi/drm/i915_drm.h
27,7 → 27,7
#ifndef _UAPI_I915_DRM_H_
#define _UAPI_I915_DRM_H_
 
#include "drm.h"
#include <drm/drm.h>
 
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
356,7 → 356,6
#define I915_PARAM_EU_TOTAL 34
#define I915_PARAM_HAS_GPU_RESET 35
#define I915_PARAM_HAS_RESOURCE_STREAMER 36
#define I915_PARAM_HAS_EXEC_SOFTPIN 37
 
typedef struct drm_i915_getparam {
__s32 param;
683,12 → 682,8
__u64 alignment;
 
/**
* When the EXEC_OBJECT_PINNED flag is specified this is populated by
* the user with the GTT offset at which this object will be pinned.
* When the I915_EXEC_NO_RELOC flag is specified this must contain the
* presumed_offset of the object.
* During execbuffer2 the kernel populates it with the value of the
* current GTT offset of the object, for future presumed_offset writes.
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
 
696,8 → 691,7
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
#define EXEC_OBJECT_WRITE (1<<2)
#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
#define EXEC_OBJECT_PINNED (1<<4)
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1)
__u64 flags;
 
__u64 rsvd1;
1085,12 → 1079,6
};
 
struct drm_i915_reg_read {
/*
* Register offset.
* For 64bit wide registers where the upper 32bits don't immediately
* follow the lower 32bits, the offset of the lower 32bits must
* be specified
*/
__u64 offset;
__u64 val; /* Return value */
};
1139,7 → 1127,6
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
__u64 value;
};
 
/drivers/include/uapi/drm/radeon_drm.h
793,9 → 793,9
#define RADEON_GEM_DOMAIN_VRAM 0x4
 
struct drm_radeon_gem_info {
__u64 gart_size;
__u64 vram_size;
__u64 vram_visible;
uint64_t gart_size;
uint64_t vram_size;
uint64_t vram_visible;
};
 
#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
807,11 → 807,11
#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
 
struct drm_radeon_gem_create {
__u64 size;
__u64 alignment;
__u32 handle;
__u32 initial_domain;
__u32 flags;
uint64_t size;
uint64_t alignment;
uint32_t handle;
uint32_t initial_domain;
uint32_t flags;
};
 
/*
825,10 → 825,10
#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
 
struct drm_radeon_gem_userptr {
__u64 addr;
__u64 size;
__u32 flags;
__u32 handle;
uint64_t addr;
uint64_t size;
uint32_t flags;
uint32_t handle;
};
 
#define RADEON_TILING_MACRO 0x1
850,72 → 850,72
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
 
struct drm_radeon_gem_set_tiling {
__u32 handle;
__u32 tiling_flags;
__u32 pitch;
uint32_t handle;
uint32_t tiling_flags;
uint32_t pitch;
};
 
struct drm_radeon_gem_get_tiling {
__u32 handle;
__u32 tiling_flags;
__u32 pitch;
uint32_t handle;
uint32_t tiling_flags;
uint32_t pitch;
};
 
struct drm_radeon_gem_mmap {
__u32 handle;
__u32 pad;
__u64 offset;
__u64 size;
__u64 addr_ptr;
uint32_t handle;
uint32_t pad;
uint64_t offset;
uint64_t size;
uint64_t addr_ptr;
};
 
struct drm_radeon_gem_set_domain {
__u32 handle;
__u32 read_domains;
__u32 write_domain;
uint32_t handle;
uint32_t read_domains;
uint32_t write_domain;
};
 
struct drm_radeon_gem_wait_idle {
__u32 handle;
__u32 pad;
uint32_t handle;
uint32_t pad;
};
 
struct drm_radeon_gem_busy {
__u32 handle;
__u32 domain;
uint32_t handle;
uint32_t domain;
};
 
struct drm_radeon_gem_pread {
/** Handle for the object being read. */
__u32 handle;
__u32 pad;
uint32_t handle;
uint32_t pad;
/** Offset into the object to read from */
__u64 offset;
uint64_t offset;
/** Length of data to read */
__u64 size;
uint64_t size;
/** Pointer to write the data into. */
/* void *, but pointers are not 32/64 compatible */
__u64 data_ptr;
uint64_t data_ptr;
};
 
struct drm_radeon_gem_pwrite {
/** Handle for the object being written to. */
__u32 handle;
__u32 pad;
uint32_t handle;
uint32_t pad;
/** Offset into the object to write to */
__u64 offset;
uint64_t offset;
/** Length of data to write */
__u64 size;
uint64_t size;
/** Pointer to read the data from. */
/* void *, but pointers are not 32/64 compatible */
__u64 data_ptr;
uint64_t data_ptr;
};
 
/* Sets or returns a value associated with a buffer. */
struct drm_radeon_gem_op {
__u32 handle; /* buffer */
__u32 op; /* RADEON_GEM_OP_* */
__u64 value; /* input or return value */
uint32_t handle; /* buffer */
uint32_t op; /* RADEON_GEM_OP_* */
uint64_t value; /* input or return value */
};
 
#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
935,11 → 935,11
#define RADEON_VM_PAGE_SNOOPED (1 << 4)
 
struct drm_radeon_gem_va {
__u32 handle;
__u32 operation;
__u32 vm_id;
__u32 flags;
__u64 offset;
uint32_t handle;
uint32_t operation;
uint32_t vm_id;
uint32_t flags;
uint64_t offset;
};
 
#define RADEON_CHUNK_ID_RELOCS 0x01
961,9 → 961,9
/* 0 = normal, + = higher priority, - = lower priority */
 
struct drm_radeon_cs_chunk {
__u32 chunk_id;
__u32 length_dw;
__u64 chunk_data;
uint32_t chunk_id;
uint32_t length_dw;
uint64_t chunk_data;
};
 
/* drm_radeon_cs_reloc.flags */
970,20 → 970,20
#define RADEON_RELOC_PRIO_MASK (0xf << 0)
 
struct drm_radeon_cs_reloc {
__u32 handle;
__u32 read_domains;
__u32 write_domain;
__u32 flags;
uint32_t handle;
uint32_t read_domains;
uint32_t write_domain;
uint32_t flags;
};
 
struct drm_radeon_cs {
__u32 num_chunks;
__u32 cs_id;
/* this points to __u64 * which point to cs chunks */
__u64 chunks;
uint32_t num_chunks;
uint32_t cs_id;
/* this points to uint64_t * which point to cs chunks */
uint64_t chunks;
/* updates to the limits after this CS ioctl */
__u64 gart_limit;
__u64 vram_limit;
uint64_t gart_limit;
uint64_t vram_limit;
};
 
#define RADEON_INFO_DEVICE_ID 0x00
1042,9 → 1042,9
#define RADEON_INFO_GPU_RESET_COUNTER 0x26
 
struct drm_radeon_info {
__u32 request;
__u32 pad;
__u64 value;
uint32_t request;
uint32_t pad;
uint64_t value;
};
 
/* Those correspond to the tile index to use, this is to explicitly state
/drivers/include/uapi/drm/vmwgfx_drm.h
28,7 → 28,9
#ifndef __VMWGFX_DRM_H__
#define __VMWGFX_DRM_H__
 
#include "drm.h"
#ifndef __KERNEL__
#include <drm/drm.h>
#endif
 
#define DRM_VMW_MAX_SURFACE_FACES 6
#define DRM_VMW_MAX_MIP_LEVELS 24
109,9 → 111,9
*/
 
struct drm_vmw_getparam_arg {
__u64 value;
__u32 param;
__u32 pad64;
uint64_t value;
uint32_t param;
uint32_t pad64;
};
 
/*************************************************************************/
132,8 → 134,8
*/
 
struct drm_vmw_context_arg {
__s32 cid;
__u32 pad64;
int32_t cid;
uint32_t pad64;
};
 
/*************************************************************************/
163,7 → 165,7
* @mip_levels: Number of mip levels for each face.
* An unused face should have 0 encoded.
* @size_addr: Address of a user-space array of sruct drm_vmw_size
* cast to an __u64 for 32-64 bit compatibility.
* cast to an uint64_t for 32-64 bit compatibility.
* The size of the array should equal the total number of mipmap levels.
* @shareable: Boolean whether other clients (as identified by file descriptors)
* may reference this surface.
175,12 → 177,12
*/
 
struct drm_vmw_surface_create_req {
__u32 flags;
__u32 format;
__u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
__u64 size_addr;
__s32 shareable;
__s32 scanout;
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
uint64_t size_addr;
int32_t shareable;
int32_t scanout;
};
 
/**
195,7 → 197,7
*/
 
struct drm_vmw_surface_arg {
__s32 sid;
int32_t sid;
enum drm_vmw_handle_type handle_type;
};
 
211,10 → 213,10
*/
 
struct drm_vmw_size {
__u32 width;
__u32 height;
__u32 depth;
__u32 pad64;
uint32_t width;
uint32_t height;
uint32_t depth;
uint32_t pad64;
};
 
/**
282,13 → 284,13
/**
* struct drm_vmw_execbuf_arg
*
* @commands: User-space address of a command buffer cast to an __u64.
* @commands: User-space address of a command buffer cast to an uint64_t.
* @command-size: Size in bytes of the command buffer.
* @throttle-us: Sleep until software is less than @throttle_us
* microseconds ahead of hardware. The driver may round this value
* to the nearest kernel tick.
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
* __u64.
* uint64_t.
* @version: Allows expanding the execbuf ioctl parameters without breaking
* backwards compatibility, since user-space will always tell the kernel
* which version it uses.
300,14 → 302,14
#define DRM_VMW_EXECBUF_VERSION 2
 
struct drm_vmw_execbuf_arg {
__u64 commands;
__u32 command_size;
__u32 throttle_us;
__u64 fence_rep;
__u32 version;
__u32 flags;
__u32 context_handle;
__u32 pad64;
uint64_t commands;
uint32_t command_size;
uint32_t throttle_us;
uint64_t fence_rep;
uint32_t version;
uint32_t flags;
uint32_t context_handle;
uint32_t pad64;
};
 
/**
336,12 → 338,12
*/
 
struct drm_vmw_fence_rep {
__u32 handle;
__u32 mask;
__u32 seqno;
__u32 passed_seqno;
__u32 pad64;
__s32 error;
uint32_t handle;
uint32_t mask;
uint32_t seqno;
uint32_t passed_seqno;
uint32_t pad64;
int32_t error;
};
 
/*************************************************************************/
371,8 → 373,8
*/
 
struct drm_vmw_alloc_dmabuf_req {
__u32 size;
__u32 pad64;
uint32_t size;
uint32_t pad64;
};
 
/**
389,11 → 391,11
*/
 
struct drm_vmw_dmabuf_rep {
__u64 map_handle;
__u32 handle;
__u32 cur_gmr_id;
__u32 cur_gmr_offset;
__u32 pad64;
uint64_t map_handle;
uint32_t handle;
uint32_t cur_gmr_id;
uint32_t cur_gmr_offset;
uint32_t pad64;
};
 
/**
426,8 → 428,8
*/
 
struct drm_vmw_unref_dmabuf_arg {
__u32 handle;
__u32 pad64;
uint32_t handle;
uint32_t pad64;
};
 
/*************************************************************************/
450,10 → 452,10
*/
 
struct drm_vmw_rect {
__s32 x;
__s32 y;
__u32 w;
__u32 h;
int32_t x;
int32_t y;
uint32_t w;
uint32_t h;
};
 
/**
475,21 → 477,21
*/
 
struct drm_vmw_control_stream_arg {
__u32 stream_id;
__u32 enabled;
uint32_t stream_id;
uint32_t enabled;
 
__u32 flags;
__u32 color_key;
uint32_t flags;
uint32_t color_key;
 
__u32 handle;
__u32 offset;
__s32 format;
__u32 size;
__u32 width;
__u32 height;
__u32 pitch[3];
uint32_t handle;
uint32_t offset;
int32_t format;
uint32_t size;
uint32_t width;
uint32_t height;
uint32_t pitch[3];
 
__u32 pad64;
uint32_t pad64;
struct drm_vmw_rect src;
struct drm_vmw_rect dst;
};
517,12 → 519,12
*/
 
struct drm_vmw_cursor_bypass_arg {
__u32 flags;
__u32 crtc_id;
__s32 xpos;
__s32 ypos;
__s32 xhot;
__s32 yhot;
uint32_t flags;
uint32_t crtc_id;
int32_t xpos;
int32_t ypos;
int32_t xhot;
int32_t yhot;
};
 
/*************************************************************************/
540,8 → 542,8
*/
 
struct drm_vmw_stream_arg {
__u32 stream_id;
__u32 pad64;
uint32_t stream_id;
uint32_t pad64;
};
 
/*************************************************************************/
563,7 → 565,7
/**
* struct drm_vmw_get_3d_cap_arg
*
* @buffer: Pointer to a buffer for capability data, cast to an __u64
* @buffer: Pointer to a buffer for capability data, cast to an uint64_t
* @size: Max size to copy
*
* Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
571,9 → 573,9
*/
 
struct drm_vmw_get_3d_cap_arg {
__u64 buffer;
__u32 max_size;
__u32 pad64;
uint64_t buffer;
uint32_t max_size;
uint32_t pad64;
};
 
/*************************************************************************/
622,14 → 624,14
*/
 
struct drm_vmw_fence_wait_arg {
__u32 handle;
__s32 cookie_valid;
__u64 kernel_cookie;
__u64 timeout_us;
__s32 lazy;
__s32 flags;
__s32 wait_options;
__s32 pad64;
uint32_t handle;
int32_t cookie_valid;
uint64_t kernel_cookie;
uint64_t timeout_us;
int32_t lazy;
int32_t flags;
int32_t wait_options;
int32_t pad64;
};
 
/*************************************************************************/
653,12 → 655,12
*/
 
struct drm_vmw_fence_signaled_arg {
__u32 handle;
__u32 flags;
__s32 signaled;
__u32 passed_seqno;
__u32 signaled_flags;
__u32 pad64;
uint32_t handle;
uint32_t flags;
int32_t signaled;
uint32_t passed_seqno;
uint32_t signaled_flags;
uint32_t pad64;
};
 
/*************************************************************************/
679,8 → 681,8
*/
 
struct drm_vmw_fence_arg {
__u32 handle;
__u32 pad64;
uint32_t handle;
uint32_t pad64;
};
 
 
701,9 → 703,9
 
struct drm_vmw_event_fence {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
uint64_t user_data;
uint32_t tv_sec;
uint32_t tv_usec;
};
 
/*
715,7 → 717,7
/**
* struct drm_vmw_fence_event_arg
*
* @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
* @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if
* the fence is not supposed to be referenced by user-space.
* @user_info: Info to be delivered with the event.
* @handle: Attach the event to this fence only.
722,10 → 724,10
* @flags: A set of flags as defined above.
*/
struct drm_vmw_fence_event_arg {
__u64 fence_rep;
__u64 user_data;
__u32 handle;
__u32 flags;
uint64_t fence_rep;
uint64_t user_data;
uint32_t handle;
uint32_t flags;
};
 
 
745,7 → 747,7
* @sid: Surface id to present from.
* @dest_x: X placement coordinate for surface.
* @dest_y: Y placement coordinate for surface.
* @clips_ptr: Pointer to an array of clip rects cast to an __u64.
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
* @num_clips: Number of cliprects given relative to the framebuffer origin,
* in the same coordinate space as the frame buffer.
* @pad64: Unused 64-bit padding.
754,13 → 756,13
*/
 
struct drm_vmw_present_arg {
__u32 fb_id;
__u32 sid;
__s32 dest_x;
__s32 dest_y;
__u64 clips_ptr;
__u32 num_clips;
__u32 pad64;
uint32_t fb_id;
uint32_t sid;
int32_t dest_x;
int32_t dest_y;
uint64_t clips_ptr;
uint32_t num_clips;
uint32_t pad64;
};
 
 
778,16 → 780,16
* struct drm_vmw_present_arg
* @fb_id: fb_id to present / read back from.
* @num_clips: Number of cliprects.
* @clips_ptr: Pointer to an array of clip rects cast to an __u64.
* @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
* @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t.
* If this member is NULL, then the ioctl should not return a fence.
*/
 
struct drm_vmw_present_readback_arg {
__u32 fb_id;
__u32 num_clips;
__u64 clips_ptr;
__u64 fence_rep;
uint32_t fb_id;
uint32_t num_clips;
uint64_t clips_ptr;
uint64_t fence_rep;
};
 
/*************************************************************************/
803,14 → 805,14
* struct drm_vmw_update_layout_arg
*
* @num_outputs: number of active connectors
* @rects: pointer to array of drm_vmw_rect cast to an __u64
* @rects: pointer to array of drm_vmw_rect cast to an uint64_t
*
* Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
*/
struct drm_vmw_update_layout_arg {
__u32 num_outputs;
__u32 pad64;
__u64 rects;
uint32_t num_outputs;
uint32_t pad64;
uint64_t rects;
};
 
 
847,10 → 849,10
*/
struct drm_vmw_shader_create_arg {
enum drm_vmw_shader_type shader_type;
__u32 size;
__u32 buffer_handle;
__u32 shader_handle;
__u64 offset;
uint32_t size;
uint32_t buffer_handle;
uint32_t shader_handle;
uint64_t offset;
};
 
/*************************************************************************/
869,8 → 871,8
* Input argument to the DRM_VMW_UNREF_SHADER ioctl.
*/
struct drm_vmw_shader_arg {
__u32 handle;
__u32 pad64;
uint32_t handle;
uint32_t pad64;
};
 
/*************************************************************************/
916,14 → 918,14
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
*/
struct drm_vmw_gb_surface_create_req {
__u32 svga3d_flags;
__u32 format;
__u32 mip_levels;
uint32_t svga3d_flags;
uint32_t format;
uint32_t mip_levels;
enum drm_vmw_surface_flags drm_surface_flags;
__u32 multisample_count;
__u32 autogen_filter;
__u32 buffer_handle;
__u32 array_size;
uint32_t multisample_count;
uint32_t autogen_filter;
uint32_t buffer_handle;
uint32_t array_size;
struct drm_vmw_size base_size;
};
 
942,11 → 944,11
* Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
*/
struct drm_vmw_gb_surface_create_rep {
__u32 handle;
__u32 backup_size;
__u32 buffer_handle;
__u32 buffer_size;
__u64 buffer_map_handle;
uint32_t handle;
uint32_t backup_size;
uint32_t buffer_handle;
uint32_t buffer_size;
uint64_t buffer_map_handle;
};
 
/**
1059,8 → 1061,8
struct drm_vmw_synccpu_arg {
enum drm_vmw_synccpu_op op;
enum drm_vmw_synccpu_flags flags;
__u32 handle;
__u32 pad64;
uint32_t handle;
uint32_t pad64;
};
 
/*************************************************************************/
/drivers/include/uapi/drm/drm_sarea.h
32,7 → 32,7
#ifndef _DRM_SAREA_H_
#define _DRM_SAREA_H_
 
#include "drm.h"
#include <drm/drm.h>
 
/* SAREA area needs to be at least a page */
#if defined(__alpha__)