/drivers/ddk/Makefile |
---|
1,5 → 1,4 |
CC = gcc |
AS = as |
6,8 → 5,14 |
DRV_TOPDIR = $(CURDIR)/.. |
DRV_INCLUDES = $(DRV_TOPDIR)/include |
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI |
INCLUDES = -I$(DRV_INCLUDES) \ |
-I$(DRV_INCLUDES)/asm \ |
-I$(DRV_INCLUDES)/uapi |
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI -DCONFIG_TINY_RCU |
DEFINES+= -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf \ |
-mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 |
25,6 → 30,7 |
io/write.c \ |
linux/bitmap.c \ |
linux/dmi.c \ |
linux/find_next_bit.c \ |
linux/idr.c \ |
linux/interval_tree.c \ |
linux/firmware.c \ |
/drivers/ddk/debug/dbglog.c |
---|
1,6 → 1,6 |
#include <ddk.h> |
#include <mutex.h> |
#include <linux/mutex.h> |
#include <syscall.h> |
#pragma pack(push, 1) |
/drivers/ddk/linux/bitmap.c |
---|
132,7 → 132,9 |
lower = src[off + k]; |
if (left && off + k == lim - 1) |
lower &= mask; |
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; |
dst[k] = lower >> rem; |
if (rem) |
dst[k] |= upper << (BITS_PER_LONG - rem); |
if (left && k == lim - 1) |
dst[k] &= mask; |
} |
173,7 → 175,9 |
upper = src[k]; |
if (left && k == lim - 1) |
upper &= (1UL << left) - 1; |
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; |
dst[k + off] = upper << rem; |
if (rem) |
dst[k + off] |= lower >> (BITS_PER_LONG - rem); |
if (left && k + off == lim - 1) |
dst[k + off] &= (1UL << left) - 1; |
} |
323,23 → 327,25 |
} |
EXPORT_SYMBOL(bitmap_clear); |
/* |
* bitmap_find_next_zero_area - find a contiguous aligned zero area |
/** |
* bitmap_find_next_zero_area_off - find a contiguous aligned zero area |
* @map: The address to base the search on |
* @size: The bitmap size in bits |
* @start: The bitnumber to start searching at |
* @nr: The number of zeroed bits we're looking for |
* @align_mask: Alignment mask for zero area |
* @align_offset: Alignment offset for zero area. |
* |
* The @align_mask should be one less than a power of 2; the effect is that |
* the bit offset of all zero areas this function finds is multiples of that |
* power of 2. A @align_mask of 0 means no alignment is required. |
* the bit offset of all zero areas this function finds plus @align_offset |
* is multiple of that power of 2. |
*/ |
unsigned long bitmap_find_next_zero_area(unsigned long *map, |
unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask) |
unsigned long align_mask, |
unsigned long align_offset) |
{ |
unsigned long index, end, i; |
again: |
346,7 → 352,7 |
index = find_next_zero_bit(map, size, start); |
/* Align allocation */ |
index = __ALIGN_MASK(index, align_mask); |
index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; |
end = index + nr; |
if (end > size) |
358,7 → 364,7 |
} |
return index; |
} |
EXPORT_SYMBOL(bitmap_find_next_zero_area); |
EXPORT_SYMBOL(bitmap_find_next_zero_area_off); |
/* |
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers, |
599,7 → 605,7 |
* |
* Further lets say we use the following code, invoking |
* bitmap_fold() then bitmap_onto, as suggested above to |
* avoid the possitility of an empty @dst result: |
* avoid the possibility of an empty @dst result: |
* |
* unsigned long *tmp; // a temporary bitmap's bits |
* |
/drivers/ddk/linux/dmapool.c |
---|
24,9 → 24,11 |
#include <ddk.h> |
#include <linux/slab.h> |
#include <linux/errno.h> |
#include <linux/mutex.h> |
#include <pci.h> |
#include <linux/pci.h> |
#include <linux/gfp.h> |
#include <syscall.h> |
142,7 → 144,7 |
{ |
struct dma_page *page; |
page = malloc(sizeof(*page)); |
page = __builtin_malloc(sizeof(*page)); |
if (!page) |
return NULL; |
page->vaddr = (void*)KernelAlloc(pool->allocation); |
228,7 → 230,7 |
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
dma_addr_t *handle) |
{ |
u32_t efl; |
u32 efl; |
struct dma_page *page; |
size_t offset; |
void *retval; |
262,7 → 264,7 |
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
{ |
struct dma_page *page; |
u32_t efl; |
u32 efl; |
efl = safe_cli(); |
294,7 → 296,7 |
unsigned long flags; |
unsigned int offset; |
u32_t efl; |
u32 efl; |
page = pool_find_page(pool, dma); |
if (!page) { |
/drivers/ddk/linux/dmi.c |
---|
7,12 → 7,9 |
#include <linux/dmi.h> |
#include <syscall.h> |
#define pr_debug dbgprintf |
#define pr_info printf |
static void *dmi_alloc(unsigned len) |
{ |
return malloc(len); |
return __builtin_malloc(len); |
}; |
/* |
/drivers/ddk/linux/find_next_bit.c |
---|
0,0 → 1,285 |
/* find_next_bit.c: fallback find next bit implementation |
* |
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
* Written by David Howells (dhowells@redhat.com) |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License |
* as published by the Free Software Foundation; either version |
* 2 of the License, or (at your option) any later version. |
*/ |
#include <linux/bitops.h> |
#include <linux/export.h> |
#include <asm/types.h> |
#include <asm/byteorder.h> |
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
#ifndef find_next_bit |
/* |
* Find the next set bit in a memory region. |
*/ |
unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp &= (~0UL << offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp &= (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + __ffs(tmp); |
} |
EXPORT_SYMBOL(find_next_bit); |
#endif |
#ifndef find_next_zero_bit |
/* |
* This implementation of find_{first,next}_zero_bit was stolen from |
* Linus' asm-alpha/bitops.h. |
*/ |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp |= ~0UL >> (BITS_PER_LONG - offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (~tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp |= ~0UL << size; |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + ffz(tmp); |
} |
EXPORT_SYMBOL(find_next_zero_bit); |
#endif |
#ifndef find_first_bit |
/* |
* Find the first set bit in a memory region. |
*/ |
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found: |
return result + __ffs(tmp); |
} |
EXPORT_SYMBOL(find_first_bit); |
#endif |
#ifndef find_first_zero_bit |
/* |
* Find the first cleared bit in a memory region. |
*/ |
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = (*p) | (~0UL << size); |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found: |
return result + ffz(tmp); |
} |
EXPORT_SYMBOL(find_first_zero_bit); |
#endif |
#ifdef __BIG_ENDIAN |
/* include/linux/byteorder does not support "unsigned long" type */ |
static inline unsigned long ext2_swabp(const unsigned long * x) |
{ |
#if BITS_PER_LONG == 64 |
return (unsigned long) __swab64p((u64 *) x); |
#elif BITS_PER_LONG == 32 |
return (unsigned long) __swab32p((u32 *) x); |
#else |
#error BITS_PER_LONG not defined |
#endif |
} |
/* include/linux/byteorder doesn't support "unsigned long" type */ |
static inline unsigned long ext2_swab(const unsigned long y) |
{ |
#if BITS_PER_LONG == 64 |
return (unsigned long) __swab64((u64) y); |
#elif BITS_PER_LONG == 32 |
return (unsigned long) __swab32((u32) y); |
#else |
#error BITS_PER_LONG not defined |
#endif |
} |
#ifndef find_next_zero_bit_le |
unsigned long find_next_zero_bit_le(const void *addr, unsigned |
long size, unsigned long offset) |
{ |
const unsigned long *p = addr; |
unsigned long result = offset & ~(BITS_PER_LONG - 1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
p += BITOP_WORD(offset); |
size -= result; |
offset &= (BITS_PER_LONG - 1UL); |
if (offset) { |
tmp = ext2_swabp(p++); |
tmp |= (~0UL >> (BITS_PER_LONG - offset)); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (~tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG - 1)) { |
if (~(tmp = *(p++))) |
goto found_middle_swap; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = ext2_swabp(p); |
found_first: |
tmp |= ~0UL << size; |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. Skip ffz */ |
found_middle: |
return result + ffz(tmp); |
found_middle_swap: |
return result + ffz(ext2_swab(tmp)); |
} |
EXPORT_SYMBOL(find_next_zero_bit_le); |
#endif |
#ifndef find_next_bit_le |
unsigned long find_next_bit_le(const void *addr, unsigned |
long size, unsigned long offset) |
{ |
const unsigned long *p = addr; |
unsigned long result = offset & ~(BITS_PER_LONG - 1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
p += BITOP_WORD(offset); |
size -= result; |
offset &= (BITS_PER_LONG - 1UL); |
if (offset) { |
tmp = ext2_swabp(p++); |
tmp &= (~0UL << offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG - 1)) { |
tmp = *(p++); |
if (tmp) |
goto found_middle_swap; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = ext2_swabp(p); |
found_first: |
tmp &= (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + __ffs(tmp); |
found_middle_swap: |
return result + __ffs(ext2_swab(tmp)); |
} |
EXPORT_SYMBOL(find_next_bit_le); |
#endif |
#endif /* __BIG_ENDIAN */ |
/drivers/ddk/linux/firmware.c |
---|
1,6 → 1,8 |
#include <linux/kernel.h> |
#include <linux/slab.h> |
#include <linux/byteorder/little_endian.h> |
#include <linux/gfp.h> |
#include <linux/errno.h> |
#include <linux/firmware.h> |
/drivers/ddk/linux/idr.c |
---|
20,20 → 20,16 |
* that id to this code and it returns your pointer. |
*/ |
#include <linux/kernel.h> |
#ifndef TEST // to test in user space... |
#include <linux/slab.h> |
#include <linux/export.h> |
#endif |
#include <linux/err.h> |
#include <linux/string.h> |
#include <linux/bitops.h> |
#include <linux/idr.h> |
//#include <stdlib.h> |
#include <linux/spinlock.h> |
static inline void * __must_check ERR_PTR(long error) |
{ |
return (void *) error; |
} |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset); |
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
132,7 → 128,7 |
{ |
if (idr->hint == p) |
RCU_INIT_POINTER(idr->hint, NULL); |
idr_layer_rcu_free(&p->rcu_head); |
call_rcu(&p->rcu_head, idr_layer_rcu_free); |
} |
/* only called when idp->lock is held */ |
500,7 → 496,7 |
n = id & IDR_MASK; |
if (likely(p != NULL && test_bit(n, p->bitmap))) { |
__clear_bit(n, p->bitmap); |
rcu_assign_pointer(p->ary[n], NULL); |
RCU_INIT_POINTER(p->ary[n], NULL); |
to_free = NULL; |
while(*paa && ! --((**paa)->count)){ |
if (to_free) |
564,7 → 560,7 |
n = idp->layers * IDR_BITS; |
*paa = idp->top; |
rcu_assign_pointer(idp->top, NULL); |
RCU_INIT_POINTER(idp->top, NULL); |
max = idr_max(idp->layers); |
id = 0; |
599,7 → 595,7 |
* idr_destroy(). |
* |
* A typical clean-up sequence for objects stored in an idr tree will use |
* idr_for_each() to free all objects, if necessay, then idr_destroy() to |
* idr_for_each() to free all objects, if necessary, then idr_destroy() to |
* free up the id mappings and cached idr_layers. |
*/ |
void idr_destroy(struct idr *idp) |
1119,129 → 1115,3 |
} |
EXPORT_SYMBOL(ida_init); |
unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found: |
return result + __ffs(tmp); |
} |
unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp &= (~0UL << offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if ((tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp &= (~0UL >> (BITS_PER_LONG - size)); |
if (tmp == 0UL) /* Are any bits set? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + __ffs(tmp); |
} |
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
unsigned long offset) |
{ |
const unsigned long *p = addr + BITOP_WORD(offset); |
unsigned long result = offset & ~(BITS_PER_LONG-1); |
unsigned long tmp; |
if (offset >= size) |
return size; |
size -= result; |
offset %= BITS_PER_LONG; |
if (offset) { |
tmp = *(p++); |
tmp |= ~0UL >> (BITS_PER_LONG - offset); |
if (size < BITS_PER_LONG) |
goto found_first; |
if (~tmp) |
goto found_middle; |
size -= BITS_PER_LONG; |
result += BITS_PER_LONG; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found_middle; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
} |
if (!size) |
return result; |
tmp = *p; |
found_first: |
tmp |= ~0UL << size; |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found_middle: |
return result + ffz(tmp); |
} |
unsigned int hweight32(unsigned int w) |
{ |
unsigned int res = w - ((w >> 1) & 0x55555555); |
res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
res = (res + (res >> 4)) & 0x0F0F0F0F; |
res = res + (res >> 8); |
return (res + (res >> 16)) & 0x000000FF; |
} |
unsigned long hweight64(__u64 w) |
{ |
#if BITS_PER_LONG == 32 |
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); |
#elif BITS_PER_LONG == 64 |
__u64 res = w - ((w >> 1) & 0x5555555555555555ul); |
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); |
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; |
res = res + (res >> 8); |
res = res + (res >> 16); |
return (res + (res >> 32)) & 0x00000000000000FFul; |
#endif |
} |
/drivers/ddk/linux/list_sort.c |
---|
1,101 → 1,145 |
#define pr_fmt(fmt) "list_sort_test: " fmt |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <linux/list_sort.h> |
#include <linux/slab.h> |
#include <linux/list.h> |
#define MAX_LIST_LENGTH_BITS 20 |
/* |
* Returns a list organized in an intermediate format suited |
* to chaining of merge() calls: null-terminated, no reserved or |
* sentinel head node, "prev" links not maintained. |
*/ |
static struct list_head *merge(void *priv, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b), |
struct list_head *a, struct list_head *b) |
{ |
struct list_head head, *tail = &head; |
while (a && b) { |
/* if equal, take 'a' -- important for sort stability */ |
if ((*cmp)(priv, a, b) <= 0) { |
tail->next = a; |
a = a->next; |
} else { |
tail->next = b; |
b = b->next; |
} |
tail = tail->next; |
} |
tail->next = a?:b; |
return head.next; |
} |
/* |
* Combine final list merge with restoration of standard doubly-linked |
* list structure. This approach duplicates code from merge(), but |
* runs faster than the tidier alternatives of either a separate final |
* prev-link restoration pass, or maintaining the prev links |
* throughout. |
*/ |
static void merge_and_restore_back_links(void *priv, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b), |
struct list_head *head, |
struct list_head *a, struct list_head *b) |
{ |
struct list_head *tail = head; |
u8 count = 0; |
while (a && b) { |
/* if equal, take 'a' -- important for sort stability */ |
if ((*cmp)(priv, a, b) <= 0) { |
tail->next = a; |
a->prev = tail; |
a = a->next; |
} else { |
tail->next = b; |
b->prev = tail; |
b = b->next; |
} |
tail = tail->next; |
} |
tail->next = a ? : b; |
do { |
/* |
* In worst cases this loop may run many iterations. |
* Continue callbacks to the client even though no |
* element comparison is needed, so the client's cmp() |
* routine can invoke cond_resched() periodically. |
*/ |
if (unlikely(!(++count))) |
(*cmp)(priv, tail->next, tail->next); |
tail->next->prev = tail; |
tail = tail->next; |
} while (tail->next); |
tail->next = head; |
head->prev = tail; |
} |
/** |
* list_sort - sort a list. |
* @priv: private data, passed to @cmp |
* list_sort - sort a list |
* @priv: private data, opaque to list_sort(), passed to @cmp |
* @head: the list to sort |
* @cmp: the elements comparison function |
* |
* This function has been implemented by Mark J Roberts <mjr@znex.org>. It |
* implements "merge sort" which has O(nlog(n)) complexity. The list is sorted |
* in ascending order. |
* This function implements "merge sort", which has O(nlog(n)) |
* complexity. |
* |
* The comparison function @cmp is supposed to return a negative value if @a is |
* less than @b, and a positive value if @a is greater than @b. If @a and @b |
* are equivalent, then it does not matter what this function returns. |
* The comparison function @cmp must return a negative value if @a |
* should sort before @b, and a positive value if @a should sort after |
* @b. If @a and @b are equivalent, and their original relative |
* ordering is to be preserved, @cmp must return 0. |
*/ |
void list_sort(void *priv, struct list_head *head, |
int (*cmp)(void *priv, struct list_head *a, |
struct list_head *b)) |
{ |
struct list_head *p, *q, *e, *list, *tail, *oldhead; |
int insize, nmerges, psize, qsize, i; |
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists |
-- last slot is a sentinel */ |
int lev; /* index into part[] */ |
int max_lev = 0; |
struct list_head *list; |
if (list_empty(head)) |
return; |
memset(part, 0, sizeof(part)); |
head->prev->next = NULL; |
list = head->next; |
list_del(head); |
insize = 1; |
for (;;) { |
p = oldhead = list; |
list = tail = NULL; |
nmerges = 0; |
while (p) { |
nmerges++; |
q = p; |
psize = 0; |
for (i = 0; i < insize; i++) { |
psize++; |
q = q->next == oldhead ? NULL : q->next; |
if (!q) |
break; |
} |
while (list) { |
struct list_head *cur = list; |
list = list->next; |
cur->next = NULL; |
qsize = insize; |
while (psize > 0 || (qsize > 0 && q)) { |
if (!psize) { |
e = q; |
q = q->next; |
qsize--; |
if (q == oldhead) |
q = NULL; |
} else if (!qsize || !q) { |
e = p; |
p = p->next; |
psize--; |
if (p == oldhead) |
p = NULL; |
} else if (cmp(priv, p, q) <= 0) { |
e = p; |
p = p->next; |
psize--; |
if (p == oldhead) |
p = NULL; |
} else { |
e = q; |
q = q->next; |
qsize--; |
if (q == oldhead) |
q = NULL; |
for (lev = 0; part[lev]; lev++) { |
cur = merge(priv, cmp, part[lev], cur); |
part[lev] = NULL; |
} |
if (tail) |
tail->next = e; |
else |
list = e; |
e->prev = tail; |
tail = e; |
if (lev > max_lev) { |
if (unlikely(lev >= ARRAY_SIZE(part)-1)) { |
printk_once(KERN_DEBUG "list too long for efficiency\n"); |
lev--; |
} |
p = q; |
max_lev = lev; |
} |
part[lev] = cur; |
} |
tail->next = list; |
list->prev = tail; |
for (lev = 0; lev < max_lev; lev++) |
if (part[lev]) |
list = merge(priv, cmp, part[lev], list); |
if (nmerges <= 1) |
break; |
insize *= 2; |
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); |
} |
head->next = list; |
head->prev = list->prev; |
list->prev->next = head; |
list->prev = head; |
} |
EXPORT_SYMBOL(list_sort); |
/drivers/ddk/linux/rbtree.c |
---|
101,7 → 101,7 |
* / \ / \ |
* p u --> P U |
* / / |
* n N |
* n n |
* |
* However, since g's parent might be red, and |
* 4) does not allow this, we need to recurse |
/drivers/ddk/linux/scatterlist.c |
---|
7,6 → 7,7 |
* Version 2. See the file COPYING for more details. |
*/ |
#include <linux/export.h> |
#include <linux/slab.h> |
#include <linux/scatterlist.h> |
/** |
70,7 → 71,7 |
**/ |
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) |
{ |
#ifndef ARCH_HAS_SG_CHAIN |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
struct scatterlist *ret = &sgl[nents - 1]; |
#else |
struct scatterlist *sg, *ret = NULL; |
182,10 → 183,10 |
} |
table->orig_nents -= sg_size; |
if (!skip_first_chunk) { |
if (skip_first_chunk) |
skip_first_chunk = false; |
else |
free_fn(sgl, alloc_size); |
skip_first_chunk = false; |
} |
sgl = next; |
} |
234,7 → 235,7 |
if (nents == 0) |
return -EINVAL; |
#ifndef ARCH_HAS_SG_CHAIN |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
if (WARN_ON_ONCE(nents > max_ents)) |
return -EINVAL; |
#endif |
/drivers/ddk/linux/string.c |
---|
27,7 → 27,7 |
#ifndef __HAVE_ARCH_STRLCPY |
/** |
* strlcpy - Copy a %NUL terminated string into a sized buffer |
* strlcpy - Copy a C-string into a sized buffer |
* @dest: Where to copy the string to |
* @src: Where to copy the string from |
* @size: size of destination buffer |
/drivers/ddk/linux/time.c |
---|
1,4 → 1,4 |
#include <jiffies.h> |
#include <linux/jiffies.h> |
131,6 → 131,7 |
>> MSEC_TO_HZ_SHR32; |
#endif |
} |
EXPORT_SYMBOL(msecs_to_jiffies); |
unsigned long usecs_to_jiffies(const unsigned int u) |
{ |
145,12 → 146,27 |
>> USEC_TO_HZ_SHR32; |
#endif |
} |
EXPORT_SYMBOL(usecs_to_jiffies); |
unsigned long |
timespec_to_jiffies(const struct timespec *value) |
/* |
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note |
* that a remainder subtract here would not do the right thing as the |
* resolution values don't fall on second boundries. I.e. the line: |
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. |
* Note that due to the small error in the multiplier here, this |
* rounding is incorrect for sufficiently large values of tv_nsec, but |
* well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're |
* OK. |
* |
* Rather, we just shift the bits off the right. |
* |
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec |
* value to a scaled second value. |
*/ |
static unsigned long |
__timespec_to_jiffies(unsigned long sec, long nsec) |
{ |
unsigned long sec = value->tv_sec; |
long nsec = value->tv_nsec + TICK_NSEC - 1; |
nsec = nsec + TICK_NSEC - 1; |
if (sec >= MAX_SEC_IN_JIFFIES){ |
sec = MAX_SEC_IN_JIFFIES; |
162,6 → 178,28 |
} |
unsigned long |
timespec_to_jiffies(const struct timespec *value) |
{ |
return __timespec_to_jiffies(value->tv_sec, value->tv_nsec); |
} |
EXPORT_SYMBOL(timespec_to_jiffies); |
void |
jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) |
{ |
/* |
* Convert jiffies to nanoseconds and separate with |
* one divide. |
*/ |
u32 rem; |
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, |
NSEC_PER_SEC, &rem); |
value->tv_nsec = rem; |
} |
EXPORT_SYMBOL(jiffies_to_timespec); |
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
{ |
u64 quotient; |
/drivers/ddk/linux/workqueue.c |
---|
1,5 → 1,39 |
/* |
* kernel/workqueue.c - generic async execution with shared worker pool |
* |
* Copyright (C) 2002 Ingo Molnar |
* |
* Derived from the taskqueue/keventd code by: |
* David Woodhouse <dwmw2@infradead.org> |
* Andrew Morton |
* Kai Petzke <wpp@marie.physik.tu-berlin.de> |
* Theodore Ts'o <tytso@mit.edu> |
* |
* Made to use alloc_percpu by Christoph Lameter. |
* |
* Copyright (C) 2010 SUSE Linux Products GmbH |
* Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
* |
* This is the generic async execution mechanism. Work items as are |
* executed in process context. The worker pool is shared and |
* automatically managed. There are two worker pools for each CPU (one for |
* normal work items and the other for high priority ones) and some extra |
* pools for workqueues which are not bound to any specific CPU - the |
* number of these backing pools is dynamic. |
* |
* Please read Documentation/workqueue.txt for details. |
*/ |
#include <linux/export.h> |
#include <linux/kernel.h> |
#include <linux/sched.h> |
#include <linux/completion.h> |
#include <linux/workqueue.h> |
#include <linux/slab.h> |
#include <linux/lockdep.h> |
#include <linux/idr.h> |
#include <ddk.h> |
extern int driver_wq_state; |
/drivers/ddk/malloc/malloc.c |
---|
522,7 → 522,7 |
*/ |
#include <ddk.h> |
#include <mutex.h> |
#include <linux/mutex.h> |
#include <syscall.h> |
/* Version identifier to allow people to support multiple versions */ |
/drivers/ddk/stdio/vsprintf.c |
---|
22,11 → 22,12 |
#include <linux/string.h> |
#include <linux/ctype.h> |
#include <linux/kernel.h> |
#include <errno-base.h> |
#include <linux/ioport.h> |
#include <linux/export.h> |
#include <asm/div64.h> |
#include <asm/page.h> /* for PAGE_SIZE */ |
static inline u64 div_u64(u64 dividend, u32 divisor) |
41,10 → 42,6 |
return div_s64_rem(dividend, divisor, &remainder); |
} |
struct va_format { |
const char *fmt; |
va_list *va; |
}; |
#define ZERO_SIZE_PTR ((void *)16) |
62,14 → 59,7 |
/* Works only for digits and letters, but small and fast */ |
#define TOLOWER(x) ((x) | 0x20) |
static inline char *hex_byte_pack(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_hi(byte); |
*buf++ = hex_asc_lo(byte); |
return buf; |
} |
char *skip_spaces(const char *str) |
{ |
while (isspace(*str)) |
1297,6 → 1287,7 |
* %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address |
* %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper |
* case. |
* %*pE[achnops] print an escaped buffer |
* %*ph[CDN] a variable-length hex string with a separator (supports up to 64 |
* bytes of the input) |
* %n is ignored |
/drivers/include/asm/agp.h |
---|
0,0 → 1,31 |
#ifndef _ASM_X86_AGP_H |
#define _ASM_X86_AGP_H |
#include <asm/pgtable.h> |
#include <asm/cacheflush.h> |
/* |
* Functions to keep the agpgart mappings coherent with the MMU. The |
* GART gives the CPU a physical alias of pages in memory. The alias |
* region is mapped uncacheable. Make sure there are no conflicting |
* mappings with different cachability attributes for the same |
* page. This avoids data corruption on some CPUs. |
*/ |
#define map_page_into_agp(page) set_pages_uc(page, 1) |
#define unmap_page_from_agp(page) set_pages_wb(page, 1) |
/* |
* Could use CLFLUSH here if the cpu supports it. But then it would |
* need to be called for each cacheline of the whole page so it may |
* not be worth it. Would need a page for it. |
*/ |
#define flush_agp_cache() wbinvd() |
/* GATT allocation. Returns/accepts GATT kernel virtual address. */ |
#define alloc_gatt_pages(order) \ |
((char *)__get_free_pages(GFP_KERNEL, (order))) |
#define free_gatt_pages(table, order) \ |
free_pages((unsigned long)(table), (order)) |
#endif /* _ASM_X86_AGP_H */ |
/drivers/include/asm/alternative.h |
---|
0,0 → 1,243 |
#ifndef _ASM_X86_ALTERNATIVE_H |
#define _ASM_X86_ALTERNATIVE_H |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/stringify.h> |
#include <asm/asm.h> |
/* |
* Alternative inline assembly for SMP. |
* |
* The LOCK_PREFIX macro defined here replaces the LOCK and |
* LOCK_PREFIX macros used everywhere in the source tree. |
* |
* SMP alternatives use the same data structures as the other |
* alternatives and the X86_FEATURE_UP flag to indicate the case of a |
* UP system running a SMP kernel. The existing apply_alternatives() |
* works fine for patching a SMP kernel for UP. |
* |
* The SMP alternative tables can be kept after boot and contain both |
* UP and SMP versions of the instructions to allow switching back to |
* SMP at runtime, when hotplugging in a new CPU, which is especially |
* useful in virtualized environments. |
* |
* The very common lock prefix is handled as special case in a |
* separate table which is a pure address list without replacement ptr |
* and size information. That keeps the table sizes small. |
*/ |
#ifdef CONFIG_SMP |
#define LOCK_PREFIX_HERE \ |
".pushsection .smp_locks,\"a\"\n" \ |
".balign 4\n" \ |
".long 671f - .\n" /* offset */ \ |
".popsection\n" \ |
"671:" |
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; " |
#else /* ! CONFIG_SMP */ |
#define LOCK_PREFIX_HERE "" |
#define LOCK_PREFIX "" |
#endif |
struct alt_instr { |
s32 instr_offset; /* original instruction */ |
s32 repl_offset; /* offset to replacement instruction */ |
u16 cpuid; /* cpuid bit set for replacement */ |
u8 instrlen; /* length of original instruction */ |
u8 replacementlen; /* length of new instruction, <= instrlen */ |
}; |
extern void alternative_instructions(void); |
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); |
struct module; |
#ifdef CONFIG_SMP |
extern void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end); |
extern void alternatives_smp_module_del(struct module *mod); |
extern void alternatives_enable_smp(void); |
extern int alternatives_text_reserved(void *start, void *end); |
extern bool skip_smp_alternatives; |
#else |
static inline void alternatives_smp_module_add(struct module *mod, char *name, |
void *locks, void *locks_end, |
void *text, void *text_end) {} |
static inline void alternatives_smp_module_del(struct module *mod) {} |
static inline void alternatives_enable_smp(void) {} |
static inline int alternatives_text_reserved(void *start, void *end) |
{ |
return 0; |
} |
#endif /* CONFIG_SMP */ |
#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n" |
#define b_replacement(number) "663"#number |
#define e_replacement(number) "664"#number |
#define alt_slen "662b-661b" |
#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f" |
#define ALTINSTR_ENTRY(feature, number) \ |
" .long 661b - .\n" /* label */ \ |
" .long " b_replacement(number)"f - .\n" /* new instruction */ \ |
" .word " __stringify(feature) "\n" /* feature bit */ \ |
" .byte " alt_slen "\n" /* source len */ \ |
" .byte " alt_rlen(number) "\n" /* replacement len */ |
#define DISCARD_ENTRY(number) /* rlen <= slen */ \ |
" .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n" |
#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \ |
b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t" |
/* alternative assembly primitive: */ |
#define ALTERNATIVE(oldinstr, newinstr, feature) \ |
OLDINSTR(oldinstr) \ |
".pushsection .altinstructions,\"a\"\n" \ |
ALTINSTR_ENTRY(feature, 1) \ |
".popsection\n" \ |
".pushsection .discard,\"aw\",@progbits\n" \ |
DISCARD_ENTRY(1) \ |
".popsection\n" \ |
".pushsection .altinstr_replacement, \"ax\"\n" \ |
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ |
".popsection" |
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ |
OLDINSTR(oldinstr) \ |
".pushsection .altinstructions,\"a\"\n" \ |
ALTINSTR_ENTRY(feature1, 1) \ |
ALTINSTR_ENTRY(feature2, 2) \ |
".popsection\n" \ |
".pushsection .discard,\"aw\",@progbits\n" \ |
DISCARD_ENTRY(1) \ |
DISCARD_ENTRY(2) \ |
".popsection\n" \ |
".pushsection .altinstr_replacement, \"ax\"\n" \ |
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ |
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ |
".popsection" |
/* |
* This must be included *after* the definition of ALTERNATIVE due to |
* <asm/arch_hweight.h> |
*/ |
#include <asm/cpufeature.h> |
/* |
* Alternative instructions for different CPU types or capabilities. |
* |
* This allows to use optimized instructions even on generic binary |
* kernels. |
* |
* length of oldinstr must be longer or equal the length of newinstr |
* It can be padded with nops as needed. |
* |
* For non barrier like inlines please define new variants |
* without volatile and memory clobber. |
*/ |
#define alternative(oldinstr, newinstr, feature) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") |
/* |
* Alternative inline assembly with input. |
* |
* Pecularities: |
* No memory clobber here. |
* Argument numbers start with 1. |
* Best is to use constraints that are fixed size (like (%1) ... "r") |
* If you use variable sized constraints like "m" or "g" in the |
* replacement make sure to pad to the worst case length. |
* Leaving an unused argument 0 to keep API compatibility. |
*/ |
#define alternative_input(oldinstr, newinstr, feature, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: : "i" (0), ## input) |
/* |
* This is similar to alternative_input. But it has two features and |
* respective instructions. |
* |
* If CPU has feature2, newinstr2 is used. |
* Otherwise, if CPU has feature1, newinstr1 is used. |
* Otherwise, oldinstr is used. |
*/ |
#define alternative_input_2(oldinstr, newinstr1, feature1, newinstr2, \ |
feature2, input...) \ |
asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, \ |
newinstr2, feature2) \ |
: : "i" (0), ## input) |
/* Like alternative_input, but with a single output argument */ |
#define alternative_io(oldinstr, newinstr, feature, output, input...) \ |
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \ |
: output : "i" (0), ## input) |
/* Like alternative_io, but for replacing a direct call with another one. */ |
#define alternative_call(oldfunc, newfunc, feature, output, input...) \ |
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \ |
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input) |
/* |
* Like alternative_call, but there are two features and respective functions. |
* If CPU has feature2, function2 is used. |
* Otherwise, if CPU has feature1, function1 is used. |
* Otherwise, old function is used. |
*/ |
#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \ |
output, input...) \ |
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\ |
"call %P[new2]", feature2) \ |
: output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \ |
[new2] "i" (newfunc2), ## input) |
/* |
* use this macro(s) if you need more than one output parameter |
* in alternative_io |
*/ |
#define ASM_OUTPUT2(a...) a |
/* |
* use this macro if you need clobbers but no inputs in |
* alternative_{input,io,call}() |
*/ |
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr |
struct paravirt_patch_site; |
#ifdef CONFIG_PARAVIRT |
void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end); |
#else |
static inline void apply_paravirt(struct paravirt_patch_site *start, |
struct paravirt_patch_site *end) |
{} |
#define __parainstructions NULL |
#define __parainstructions_end NULL |
#endif |
extern void *text_poke_early(void *addr, const void *opcode, size_t len); |
/* |
* Clear and restore the kernel write-protection flag on the local CPU. |
* Allows the kernel to edit read-only pages. |
* Side-effect: any interrupt handler running between save and restore will have |
* the ability to write to read-only pages. |
* |
* Warning: |
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and |
* no thread can be preempted in the instructions being modified (no iret to an |
* invalid instruction possible) or if the instructions are changed from a |
* consistent state to another consistent state atomically. |
* On the local CPU you need to be protected again NMI or MCE handlers seeing an |
* inconsistent instruction while you patch. |
*/ |
extern void *text_poke(void *addr, const void *opcode, size_t len); |
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); |
#endif /* _ASM_X86_ALTERNATIVE_H */ |
/drivers/include/asm/arch_hweight.h |
---|
0,0 → 1,61 |
#ifndef _ASM_X86_HWEIGHT_H |
#define _ASM_X86_HWEIGHT_H |
#ifdef CONFIG_64BIT |
/* popcnt %edi, %eax -- redundant REX prefix for alignment */ |
#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7" |
/* popcnt %rdi, %rax */ |
#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7" |
#define REG_IN "D" |
#define REG_OUT "a" |
#else |
/* popcnt %eax, %eax */ |
#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0" |
#define REG_IN "a" |
#define REG_OUT "a" |
#endif |
/* |
* __sw_hweightXX are called from within the alternatives below |
* and callee-clobbered registers need to be taken care of. See |
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective |
* compiler switches. |
*/ |
static inline unsigned int __arch_hweight32(unsigned int w) |
{ |
unsigned int res = 0; |
asm ("call __sw_hweight32" |
: "="REG_OUT (res) |
: REG_IN (w)); |
return res; |
} |
static inline unsigned int __arch_hweight16(unsigned int w) |
{ |
return __arch_hweight32(w & 0xffff); |
} |
static inline unsigned int __arch_hweight8(unsigned int w) |
{ |
return __arch_hweight32(w & 0xff); |
} |
static inline unsigned long __arch_hweight64(__u64 w) |
{ |
unsigned long res = 0; |
#ifdef CONFIG_X86_32 |
return __arch_hweight32((u32)w) + |
__arch_hweight32((u32)(w >> 32)); |
#else |
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) |
: "="REG_OUT (res) |
: REG_IN (w)); |
#endif /* CONFIG_X86_32 */ |
return res; |
} |
#endif |
/drivers/include/asm/asm.h |
---|
0,0 → 1,83 |
#ifndef _ASM_X86_ASM_H |
#define _ASM_X86_ASM_H |
#ifdef __ASSEMBLY__ |
# define __ASM_FORM(x) x |
# define __ASM_FORM_RAW(x) x |
# define __ASM_FORM_COMMA(x) x, |
#else |
# define __ASM_FORM(x) " " #x " " |
# define __ASM_FORM_RAW(x) #x |
# define __ASM_FORM_COMMA(x) " " #x "," |
#endif |
#ifdef CONFIG_X86_32 |
# define __ASM_SEL(a,b) __ASM_FORM(a) |
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) |
#else |
# define __ASM_SEL(a,b) __ASM_FORM(b) |
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) |
#endif |
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \ |
inst##q##__VA_ARGS__) |
#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg) |
#define _ASM_PTR __ASM_SEL(.long, .quad) |
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
#define _ASM_MOV __ASM_SIZE(mov) |
#define _ASM_INC __ASM_SIZE(inc) |
#define _ASM_DEC __ASM_SIZE(dec) |
#define _ASM_ADD __ASM_SIZE(add) |
#define _ASM_SUB __ASM_SIZE(sub) |
#define _ASM_XADD __ASM_SIZE(xadd) |
#define _ASM_AX __ASM_REG(ax) |
#define _ASM_BX __ASM_REG(bx) |
#define _ASM_CX __ASM_REG(cx) |
#define _ASM_DX __ASM_REG(dx) |
#define _ASM_SP __ASM_REG(sp) |
#define _ASM_BP __ASM_REG(bp) |
#define _ASM_SI __ASM_REG(si) |
#define _ASM_DI __ASM_REG(di) |
/* Exception table entry */ |
#ifdef __ASSEMBLY__ |
# define _ASM_EXTABLE(from,to) \ |
.pushsection "__ex_table","a" ; \ |
.balign 8 ; \ |
.long (from) - . ; \ |
.long (to) - . ; \ |
.popsection |
# define _ASM_EXTABLE_EX(from,to) \ |
.pushsection "__ex_table","a" ; \ |
.balign 8 ; \ |
.long (from) - . ; \ |
.long (to) - . + 0x7ffffff0 ; \ |
.popsection |
# define _ASM_NOKPROBE(entry) \ |
.pushsection "_kprobe_blacklist","aw" ; \ |
_ASM_ALIGN ; \ |
_ASM_PTR (entry); \ |
.popsection |
#else |
# define _ASM_EXTABLE(from,to) \ |
" .pushsection \"__ex_table\",\"a\"\n" \ |
" .balign 8\n" \ |
" .long (" #from ") - .\n" \ |
" .long (" #to ") - .\n" \ |
" .popsection\n" |
# define _ASM_EXTABLE_EX(from,to) \ |
" .pushsection \"__ex_table\",\"a\"\n" \ |
" .balign 8\n" \ |
" .long (" #from ") - .\n" \ |
" .long (" #to ") - . + 0x7ffffff0\n" \ |
" .popsection\n" |
/* For C file, we already have NOKPROBE_SYMBOL macro */ |
#endif |
#endif /* _ASM_X86_ASM_H */ |
/drivers/include/asm/atomic.h |
---|
0,0 → 1,238 |
#ifndef _ASM_X86_ATOMIC_H |
#define _ASM_X86_ATOMIC_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
#include <asm/processor.h> |
#include <asm/alternative.h> |
#include <asm/cmpxchg.h> |
#include <asm/rmwcc.h> |
#include <asm/barrier.h> |
/* |
* Atomic operations that C can't guarantee us. Useful for |
* resource counting etc.. |
*/ |
#define ATOMIC_INIT(i) { (i) } |
/** |
* atomic_read - read atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically reads the value of @v. |
*/ |
static inline int atomic_read(const atomic_t *v) |
{ |
return ACCESS_ONCE((v)->counter); |
} |
/** |
* atomic_set - set atomic variable |
* @v: pointer of type atomic_t |
* @i: required value |
* |
* Atomically sets the value of @v to @i. |
*/ |
static inline void atomic_set(atomic_t *v, int i) |
{ |
v->counter = i; |
} |
/** |
* atomic_add - add integer to atomic variable |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v. |
*/ |
static inline void atomic_add(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "addl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub - subtract integer from atomic variable |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline void atomic_sub(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "subl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub_and_test - subtract value from variable and test result |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_sub_and_test(int i, atomic_t *v) |
{ |
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); |
} |
/** |
* atomic_inc - increment atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic_inc(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "incl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec - decrement atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic_dec(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "decl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec_and_test - decrement and test |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic_dec_and_test(atomic_t *v) |
{ |
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); |
} |
/** |
* atomic_inc_and_test - increment and test |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_inc_and_test(atomic_t *v) |
{ |
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); |
} |
/** |
* atomic_add_negative - add and test if negative |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic_add_negative(int i, atomic_t *v) |
{ |
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); |
} |
/** |
* atomic_add_return - add integer and return |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v and returns @i + @v |
*/ |
static inline int atomic_add_return(int i, atomic_t *v) |
{ |
return i + xadd(&v->counter, i); |
} |
/** |
* atomic_sub_return - subtract integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to subtract |
* |
* Atomically subtracts @i from @v and returns @v - @i |
*/ |
static inline int atomic_sub_return(int i, atomic_t *v) |
{ |
return atomic_add_return(-i, v); |
} |
#define atomic_inc_return(v) (atomic_add_return(1, v)) |
#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
{ |
return cmpxchg(&v->counter, old, new); |
} |
static inline int atomic_xchg(atomic_t *v, int new) |
{ |
return xchg(&v->counter, new); |
} |
/** |
* __atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns the old value of @v. |
*/ |
static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
{ |
int c, old; |
c = atomic_read(v); |
for (;;) { |
if (unlikely(c == (u))) |
break; |
old = atomic_cmpxchg((v), c, c + (a)); |
if (likely(old == c)) |
break; |
c = old; |
} |
return c; |
} |
/** |
* atomic_inc_short - increment of a short integer |
* @v: pointer to type int |
* |
* Atomically adds 1 to @v |
* Returns the new value of @u |
*/ |
static inline short int atomic_inc_short(short int *v) |
{ |
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); |
return *v; |
} |
/* These are x86-specific, used by some header files */ |
#define atomic_clear_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "andl %0,%1" \ |
: : "r" (~(mask)), "m" (*(addr)) : "memory") |
#define atomic_set_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "orl %0,%1" \ |
: : "r" ((unsigned)(mask)), "m" (*(addr)) \ |
: "memory") |
#ifdef CONFIG_X86_32 |
# include <asm/atomic64_32.h> |
#else |
# include <asm/atomic64_64.h> |
#endif |
#endif /* _ASM_X86_ATOMIC_H */ |
/drivers/include/asm/atomic64_32.h |
---|
0,0 → 1,315 |
#ifndef _ASM_X86_ATOMIC64_32_H |
#define _ASM_X86_ATOMIC64_32_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
#include <asm/processor.h> |
//#include <asm/cmpxchg.h> |
/* An 64bit atomic type */ |
typedef struct { |
u64 __aligned(8) counter; |
} atomic64_t; |
#define ATOMIC64_INIT(val) { (val) } |
#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...) |
#ifndef ATOMIC64_EXPORT |
#define ATOMIC64_DECL_ONE __ATOMIC64_DECL |
#else |
#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \ |
ATOMIC64_EXPORT(atomic64_##sym) |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
#define __alternative_atomic64(f, g, out, in...) \ |
asm volatile("call %P[func]" \ |
: out : [func] "i" (atomic64_##g##_cx8), ## in) |
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8) |
#else |
#define __alternative_atomic64(f, g, out, in...) \ |
alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \ |
X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in) |
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \ |
ATOMIC64_DECL_ONE(sym##_386) |
ATOMIC64_DECL_ONE(add_386); |
ATOMIC64_DECL_ONE(sub_386); |
ATOMIC64_DECL_ONE(inc_386); |
ATOMIC64_DECL_ONE(dec_386); |
#endif |
#define alternative_atomic64(f, out, in...) \ |
__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in) |
ATOMIC64_DECL(read); |
ATOMIC64_DECL(set); |
ATOMIC64_DECL(xchg); |
ATOMIC64_DECL(add_return); |
ATOMIC64_DECL(sub_return); |
ATOMIC64_DECL(inc_return); |
ATOMIC64_DECL(dec_return); |
ATOMIC64_DECL(dec_if_positive); |
ATOMIC64_DECL(inc_not_zero); |
ATOMIC64_DECL(add_unless); |
#undef ATOMIC64_DECL |
#undef ATOMIC64_DECL_ONE |
#undef __ATOMIC64_DECL |
#undef ATOMIC64_EXPORT |
/** |
* atomic64_cmpxchg - cmpxchg atomic64 variable |
* @v: pointer to type atomic64_t |
* @o: expected value |
* @n: new value |
* |
* Atomically sets @v to @n if it was equal to @o and returns |
* the old value. |
*/ |
static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) |
{ |
return cmpxchg64(&v->counter, o, n); |
} |
/** |
* atomic64_xchg - xchg atomic64 variable |
* @v: pointer to type atomic64_t |
* @n: value to assign |
* |
* Atomically xchgs the value of @v to @n and returns |
* the old value. |
*/ |
static inline long long atomic64_xchg(atomic64_t *v, long long n) |
{ |
long long o; |
unsigned high = (unsigned)(n >> 32); |
unsigned low = (unsigned)n; |
asm volatile( |
"1: \n\t" |
"cmpxchg8b (%%esi) \n\t" |
"jnz 1b \n\t" |
:"=&A" (o) |
:"S" (v), "b" (low), "c" (high) |
: "memory", "cc"); |
return o; |
} |
/** |
* atomic64_set - set atomic64 variable |
* @v: pointer to type atomic64_t |
* @i: value to assign |
* |
* Atomically sets the value of @v to @n. |
*/ |
static inline void atomic64_set(atomic64_t *v, long long i) |
{ |
__sync_lock_test_and_set((long long *)&v->counter, i); |
} |
/** |
* atomic64_read - read atomic64 variable |
* @v: pointer to type atomic64_t |
* |
* Atomically reads the value of @v and returns it. |
*/ |
static inline long long atomic64_read(const atomic64_t *v) |
{ |
return __sync_fetch_and_add( (long long *)&v->counter, 0); |
} |
/** |
* atomic64_add_return - add and return |
* @i: integer value to add |
* @v: pointer to type atomic64_t |
* |
* Atomically adds @i to @v and returns @i + *@v |
*/ |
static inline long long atomic64_add_return(long long i, atomic64_t *v) |
{ |
alternative_atomic64(add_return, |
ASM_OUTPUT2("+A" (i), "+c" (v)), |
ASM_NO_INPUT_CLOBBER("memory")); |
return i; |
} |
/* |
* Other variants with different arithmetic operators: |
*/ |
static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
{ |
alternative_atomic64(sub_return, |
ASM_OUTPUT2("+A" (i), "+c" (v)), |
ASM_NO_INPUT_CLOBBER("memory")); |
return i; |
} |
static inline long long atomic64_inc_return(atomic64_t *v) |
{ |
long long a; |
alternative_atomic64(inc_return, "=&A" (a), |
"S" (v) : "memory", "ecx"); |
return a; |
} |
static inline long long atomic64_dec_return(atomic64_t *v) |
{ |
long long a; |
alternative_atomic64(dec_return, "=&A" (a), |
"S" (v) : "memory", "ecx"); |
return a; |
} |
/** |
* atomic64_add - add integer to atomic64 variable |
* @i: integer value to add |
* @v: pointer to type atomic64_t |
* |
* Atomically adds @i to @v. |
*/ |
static inline long long atomic64_add(long long i, atomic64_t *v) |
{ |
__alternative_atomic64(add, add_return, |
ASM_OUTPUT2("+A" (i), "+c" (v)), |
ASM_NO_INPUT_CLOBBER("memory")); |
return i; |
} |
/** |
* atomic64_sub - subtract the atomic64 variable |
* @i: integer value to subtract |
* @v: pointer to type atomic64_t |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline long long atomic64_sub(long long i, atomic64_t *v) |
{ |
__alternative_atomic64(sub, sub_return, |
ASM_OUTPUT2("+A" (i), "+c" (v)), |
ASM_NO_INPUT_CLOBBER("memory")); |
return i; |
} |
/** |
* atomic64_sub_and_test - subtract value from variable and test result |
* @i: integer value to subtract |
* @v: pointer to type atomic64_t |
* |
* Atomically subtracts @i from @v and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic64_sub_and_test(long long i, atomic64_t *v) |
{ |
return atomic64_sub_return(i, v) == 0; |
} |
/** |
* atomic64_inc - increment atomic64 variable |
* @v: pointer to type atomic64_t |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic64_inc(atomic64_t *v) |
{ |
__alternative_atomic64(inc, inc_return, /* no output */, |
"S" (v) : "memory", "eax", "ecx", "edx"); |
} |
/** |
* atomic64_dec - decrement atomic64 variable |
* @v: pointer to type atomic64_t |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic64_dec(atomic64_t *v) |
{ |
__alternative_atomic64(dec, dec_return, /* no output */, |
"S" (v) : "memory", "eax", "ecx", "edx"); |
} |
/** |
* atomic64_dec_and_test - decrement and test |
* @v: pointer to type atomic64_t |
* |
* Atomically decrements @v by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic64_dec_and_test(atomic64_t *v) |
{ |
return atomic64_dec_return(v) == 0; |
} |
/** |
* atomic64_inc_and_test - increment and test |
* @v: pointer to type atomic64_t |
* |
* Atomically increments @v by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic64_inc_and_test(atomic64_t *v) |
{ |
return atomic64_inc_return(v) == 0; |
} |
/** |
* atomic64_add_negative - add and test if negative |
* @i: integer value to add |
* @v: pointer to type atomic64_t |
* |
* Atomically adds @i to @v and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic64_add_negative(long long i, atomic64_t *v) |
{ |
return atomic64_add_return(i, v) < 0; |
} |
/** |
* atomic64_add_unless - add unless the number is a given value |
* @v: pointer of type atomic64_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as it was not @u. |
* Returns non-zero if the add was done, zero otherwise. |
*/ |
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
{ |
unsigned low = (unsigned)u; |
unsigned high = (unsigned)(u >> 32); |
alternative_atomic64(add_unless, |
ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)), |
"S" (v) : "memory"); |
return (int)a; |
} |
static inline int atomic64_inc_not_zero(atomic64_t *v) |
{ |
int r; |
alternative_atomic64(inc_not_zero, "=&a" (r), |
"S" (v) : "ecx", "edx", "memory"); |
return r; |
} |
static inline long long atomic64_dec_if_positive(atomic64_t *v) |
{ |
long long r; |
alternative_atomic64(dec_if_positive, "=&A" (r), |
"S" (v) : "ecx", "memory"); |
return r; |
} |
#undef alternative_atomic64 |
#undef __alternative_atomic64 |
#endif /* _ASM_X86_ATOMIC64_32_H */ |
/drivers/include/asm/atomic_32.h |
---|
0,0 → 1,441 |
#ifndef _ASM_X86_ATOMIC_32_H |
#define _ASM_X86_ATOMIC_32_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
#include <asm/processor.h> |
#include <asm/cmpxchg.h> |
/* |
* Atomic operations that C can't guarantee us. Useful for |
* resource counting etc.. |
*/ |
#define ATOMIC_INIT(i) { (i) } |
/** |
* atomic_read - read atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically reads the value of @v. |
*/ |
static inline int atomic_read(const atomic_t *v) |
{ |
return v->counter; |
} |
/** |
* atomic_set - set atomic variable |
* @v: pointer of type atomic_t |
* @i: required value |
* |
* Atomically sets the value of @v to @i. |
*/ |
static inline void atomic_set(atomic_t *v, int i) |
{ |
v->counter = i; |
} |
/** |
* atomic_add - add integer to atomic variable |
* @i: integer value to add |
* @v: pointer of type atomic_t |
* |
* Atomically adds @i to @v. |
*/ |
static inline void atomic_add(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "addl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub - subtract integer from atomic variable |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v. |
*/ |
static inline void atomic_sub(int i, atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "subl %1,%0" |
: "+m" (v->counter) |
: "ir" (i)); |
} |
/** |
* atomic_sub_and_test - subtract value from variable and test result |
* @i: integer value to subtract |
* @v: pointer of type atomic_t |
* |
* Atomically subtracts @i from @v and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_sub_and_test(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_inc - increment atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1. |
*/ |
static inline void atomic_inc(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "incl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec - decrement atomic variable |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1. |
*/ |
static inline void atomic_dec(atomic_t *v) |
{ |
asm volatile(LOCK_PREFIX "decl %0" |
: "+m" (v->counter)); |
} |
/** |
* atomic_dec_and_test - decrement and test |
* @v: pointer of type atomic_t |
* |
* Atomically decrements @v by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
static inline int atomic_dec_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "decl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_inc_and_test - increment and test |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
static inline int atomic_inc_and_test(atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "incl %0; sete %1" |
: "+m" (v->counter), "=qm" (c) |
: : "memory"); |
return c != 0; |
} |
/** |
* atomic_add_negative - add and test if negative |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
static inline int atomic_add_negative(int i, atomic_t *v) |
{ |
unsigned char c; |
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
: "+m" (v->counter), "=qm" (c) |
: "ir" (i) : "memory"); |
return c; |
} |
/** |
* atomic_add_return - add integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to add |
* |
* Atomically adds @i to @v and returns @i + @v |
*/ |
static inline int atomic_add_return(int i, atomic_t *v) |
{ |
int __i; |
#ifdef CONFIG_M386 |
unsigned long flags; |
if (unlikely(boot_cpu_data.x86 <= 3)) |
goto no_xadd; |
#endif |
/* Modern 486+ processor */ |
__i = i; |
asm volatile(LOCK_PREFIX "xaddl %0, %1" |
: "+r" (i), "+m" (v->counter) |
: : "memory"); |
return i + __i; |
#ifdef CONFIG_M386 |
no_xadd: /* Legacy 386 processor */ |
local_irq_save(flags); |
__i = atomic_read(v); |
atomic_set(v, i + __i); |
local_irq_restore(flags); |
return i + __i; |
#endif |
} |
/** |
* atomic_sub_return - subtract integer and return |
* @v: pointer of type atomic_t |
* @i: integer value to subtract |
* |
* Atomically subtracts @i from @v and returns @v - @i |
*/ |
static inline int atomic_sub_return(int i, atomic_t *v) |
{ |
return atomic_add_return(-i, v); |
} |
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
{ |
return cmpxchg(&v->counter, old, new); |
} |
static inline int atomic_xchg(atomic_t *v, int new) |
{ |
return xchg(&v->counter, new); |
} |
/** |
* atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns non-zero if @v was not @u, and zero otherwise. |
*/ |
static inline int atomic_add_unless(atomic_t *v, int a, int u) |
{ |
int c, old; |
c = atomic_read(v); |
for (;;) { |
if (unlikely(c == (u))) |
break; |
old = atomic_cmpxchg((v), c, c + (a)); |
if (likely(old == c)) |
break; |
c = old; |
} |
return c != (u); |
} |
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
#define atomic_inc_return(v) (atomic_add_return(1, v)) |
#define atomic_dec_return(v) (atomic_sub_return(1, v)) |
/* These are x86-specific, used by some header files */ |
#define atomic_clear_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "andl %0,%1" \ |
: : "r" (~(mask)), "m" (*(addr)) : "memory") |
#define atomic_set_mask(mask, addr) \ |
asm volatile(LOCK_PREFIX "orl %0,%1" \ |
: : "r" (mask), "m" (*(addr)) : "memory") |
/* Atomic operations are already serializing on x86 */ |
#define smp_mb__before_atomic_dec() barrier() |
#define smp_mb__after_atomic_dec() barrier() |
#define smp_mb__before_atomic_inc() barrier() |
#define smp_mb__after_atomic_inc() barrier() |
/* An 64bit atomic type */ |
typedef struct { |
u64 __aligned(8) counter; |
} atomic64_t; |
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); |
/** |
* atomic64_xchg - xchg atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically xchgs the value of @ptr to @new_val and returns |
* the old value. |
*/ |
static inline long long atomic64_xchg(atomic64_t *v, long long n) |
{ |
long long o; |
unsigned high = (unsigned)(n >> 32); |
unsigned low = (unsigned)n; |
asm volatile( |
"1: \n\t" |
"cmpxchg8b (%%esi) \n\t" |
"jnz 1b \n\t" |
:"=&A" (o) |
:"S" (v), "b" (low), "c" (high) |
: "memory", "cc"); |
return o; |
} |
/** |
* atomic64_set - set atomic64 variable |
* @ptr: pointer to type atomic64_t |
* @new_val: value to assign |
* |
* Atomically sets the value of @ptr to @new_val. |
*/ |
static inline void atomic64_set(atomic64_t *v, long long i) |
{ |
unsigned high = (unsigned)(i >> 32); |
unsigned low = (unsigned)i; |
asm volatile ( |
"1: \n\t" |
"cmpxchg8b (%%esi) \n\t" |
"jnz 1b \n\t" |
: |
:"S" (v), "b" (low), "c" (high) |
: "eax", "edx", "memory", "cc"); |
} |
/** |
* atomic64_read - read atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically reads the value of @ptr and returns it. |
*/ |
static inline u64 atomic64_read(atomic64_t *ptr) |
{ |
u64 res; |
/* |
* Note, we inline this atomic64_t primitive because |
* it only clobbers EAX/EDX and leaves the others |
* untouched. We also (somewhat subtly) rely on the |
* fact that cmpxchg8b returns the current 64-bit value |
* of the memory location we are touching: |
*/ |
asm volatile( |
"mov %%ebx, %%eax\n\t" |
"mov %%ecx, %%edx\n\t" |
LOCK_PREFIX "cmpxchg8b %1\n" |
: "=&A" (res) |
: "m" (*ptr) |
); |
return res; |
} |
/** |
* atomic64_add_return - add and return |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns @delta + *@ptr |
*/ |
extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); |
/* |
* Other variants with different arithmetic operators: |
*/ |
extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); |
extern u64 atomic64_inc_return(atomic64_t *ptr); |
extern u64 atomic64_dec_return(atomic64_t *ptr); |
/** |
* atomic64_add - add integer to atomic64 variable |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr. |
*/ |
extern void atomic64_add(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub - subtract the atomic64 variable |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr. |
*/ |
extern void atomic64_sub(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_sub_and_test - subtract value from variable and test result |
* @delta: integer value to subtract |
* @ptr: pointer to type atomic64_t |
* |
* Atomically subtracts @delta from @ptr and returns |
* true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); |
/** |
* atomic64_inc - increment atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1. |
*/ |
extern void atomic64_inc(atomic64_t *ptr); |
/** |
* atomic64_dec - decrement atomic64 variable |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1. |
*/ |
extern void atomic64_dec(atomic64_t *ptr); |
/** |
* atomic64_dec_and_test - decrement and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically decrements @ptr by 1 and |
* returns true if the result is 0, or false for all other |
* cases. |
*/ |
extern int atomic64_dec_and_test(atomic64_t *ptr); |
/** |
* atomic64_inc_and_test - increment and test |
* @ptr: pointer to type atomic64_t |
* |
* Atomically increments @ptr by 1 |
* and returns true if the result is zero, or false for all |
* other cases. |
*/ |
extern int atomic64_inc_and_test(atomic64_t *ptr); |
/** |
* atomic64_add_negative - add and test if negative |
* @delta: integer value to add |
* @ptr: pointer to type atomic64_t |
* |
* Atomically adds @delta to @ptr and returns true |
* if the result is negative, or false when |
* result is greater than or equal to zero. |
*/ |
extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); |
#include <asm-generic/atomic-long.h> |
#endif /* _ASM_X86_ATOMIC_32_H */ |
/drivers/include/asm/barrier.h |
---|
0,0 → 1,107 |
#ifndef _ASM_X86_BARRIER_H |
#define _ASM_X86_BARRIER_H |
#include <asm/alternative.h> |
#include <asm/nops.h> |
/* |
* Force strict CPU ordering. |
* And yes, this is required on UP too when we're talking |
* to devices. |
*/ |
#ifdef CONFIG_X86_32 |
/* |
* Some non-Intel clones support out of order store. wmb() ceases to be a |
* nop for these. |
*/ |
#define mb() asm volatile ("lock; addl $0,0(%esp)")/*, "mfence", X86_FEATURE_XMM2) */ |
#define rmb() asm volatile("lock; addl $0,0(%esp)")/*, "lfence", X86_FEATURE_XMM2) */ |
#define wmb() asm volatile("lock; addl $0,0(%esp)")/*, "sfence", X86_FEATURE_XMM) */ |
#else |
#define mb() asm volatile("mfence":::"memory") |
#define rmb() asm volatile("lfence":::"memory") |
#define wmb() asm volatile("sfence" ::: "memory") |
#endif |
#ifdef CONFIG_X86_PPRO_FENCE |
#define dma_rmb() rmb() |
#else |
#define dma_rmb() barrier() |
#endif |
#define dma_wmb() barrier() |
#ifdef CONFIG_SMP |
#define smp_mb() mb() |
#define smp_rmb() dma_rmb() |
#define smp_wmb() barrier() |
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
#else /* !SMP */ |
#define smp_mb() barrier() |
#define smp_rmb() barrier() |
#define smp_wmb() barrier() |
#define set_mb(var, value) do { var = value; barrier(); } while (0) |
#endif /* SMP */ |
#define read_barrier_depends() do { } while (0) |
#define smp_read_barrier_depends() do { } while (0) |
#if defined(CONFIG_X86_PPRO_FENCE) |
/* |
* For this option x86 doesn't have a strong TSO memory |
* model and we should fall back to full barriers. |
*/ |
#define smp_store_release(p, v) \ |
do { \ |
compiletime_assert_atomic_type(*p); \ |
smp_mb(); \ |
ACCESS_ONCE(*p) = (v); \ |
} while (0) |
#define smp_load_acquire(p) \ |
({ \ |
typeof(*p) ___p1 = ACCESS_ONCE(*p); \ |
compiletime_assert_atomic_type(*p); \ |
smp_mb(); \ |
___p1; \ |
}) |
#else /* regular x86 TSO memory ordering */ |
#define smp_store_release(p, v) \ |
do { \ |
compiletime_assert_atomic_type(*p); \ |
barrier(); \ |
ACCESS_ONCE(*p) = (v); \ |
} while (0) |
#define smp_load_acquire(p) \ |
({ \ |
typeof(*p) ___p1 = ACCESS_ONCE(*p); \ |
compiletime_assert_atomic_type(*p); \ |
barrier(); \ |
___p1; \ |
}) |
#endif |
/* Atomic operations are already serializing on x86 */ |
#define smp_mb__before_atomic() barrier() |
#define smp_mb__after_atomic() barrier() |
/* |
* Stop RDTSC speculation. This is needed when you need to use RDTSC |
* (or get_cycles or vread that possibly accesses the TSC) in a defined |
* code region. |
* |
* (Could use an alternative three way for this if there was one.) |
*/ |
static __always_inline void rdtsc_barrier(void) |
{ |
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); |
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); |
} |
#endif /* _ASM_X86_BARRIER_H */ |
/drivers/include/asm/bitops.h |
---|
0,0 → 1,509 |
#ifndef _ASM_X86_BITOPS_H |
#define _ASM_X86_BITOPS_H |
/* |
* Copyright 1992, Linus Torvalds. |
* |
* Note: inlines with more than a single statement should be marked |
* __always_inline to avoid problems with older gcc's inlining heuristics. |
*/ |
#ifndef _LINUX_BITOPS_H |
#error only <linux/bitops.h> can be included directly |
#endif |
#include <linux/compiler.h> |
#include <asm/alternative.h> |
#include <asm/rmwcc.h> |
#include <asm/barrier.h> |
#if BITS_PER_LONG == 32 |
# define _BITOPS_LONG_SHIFT 5 |
#elif BITS_PER_LONG == 64 |
# define _BITOPS_LONG_SHIFT 6 |
#else |
# error "Unexpected BITS_PER_LONG" |
#endif |
#define BIT_64(n) (U64_C(1) << (n)) |
/* |
* These have to be done with inline assembly: that way the bit-setting |
* is guaranteed to be atomic. All bit operations return 0 if the bit |
* was cleared before the operation and != 0 if it was not. |
* |
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
*/ |
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) |
/* Technically wrong, but this avoids compilation errors on some gcc |
versions. */ |
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x)) |
#else |
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) |
#endif |
#define ADDR BITOP_ADDR(addr) |
/* |
* We do the locked ops that don't return the old value as |
* a mask operation on a byte. |
*/ |
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) |
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) |
#define CONST_MASK(nr) (1 << ((nr) & 7)) |
/** |
* set_bit - Atomically set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* This function is atomic and may not be reordered. See __set_bit() |
* if you do not require the atomic guarantees. |
* |
* Note: there are no guarantees that this function will not be reordered |
* on non x86 architectures, so if you are writing portable code, |
* make sure not to rely on its reordering guarantees. |
* |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static __always_inline void |
set_bit(long nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "orb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr)) |
: "memory"); |
} else { |
asm volatile(LOCK_PREFIX "bts %1,%0" |
: BITOP_ADDR(addr) : "Ir" (nr) : "memory"); |
} |
} |
/** |
* __set_bit - Set a bit in memory |
* @nr: the bit to set |
* @addr: the address to start counting from |
* |
* Unlike set_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __set_bit(long nr, volatile unsigned long *addr) |
{ |
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
} |
/** |
* clear_bit - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and may not be reordered. However, it does |
* not contain a memory barrier, so if it is used for locking purposes, |
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() |
* in order to ensure changes are visible on other processors. |
*/ |
static __always_inline void |
clear_bit(long nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "andb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)~CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btr %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/* |
* clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* clear_bit() is atomic and implies release semantics before the memory |
* operation. It can be used for an unlock. |
*/ |
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) |
{ |
barrier(); |
clear_bit(nr, addr); |
} |
static inline void __clear_bit(long nr, volatile unsigned long *addr) |
{ |
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
} |
/* |
* __clear_bit_unlock - Clears a bit in memory |
* @nr: Bit to clear |
* @addr: Address to start counting from |
* |
* __clear_bit() is non-atomic and implies release semantics before the memory |
* operation. It can be used for an unlock if no other CPUs can concurrently |
* modify other bits in the word. |
* |
* No memory barrier is required here, because x86 cannot reorder stores past |
* older loads. Same principle as spin_unlock. |
*/ |
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) |
{ |
barrier(); |
__clear_bit(nr, addr); |
} |
/** |
* __change_bit - Toggle a bit in memory |
* @nr: the bit to change |
* @addr: the address to start counting from |
* |
* Unlike change_bit(), this function is non-atomic and may be reordered. |
* If it's called on the same region of memory simultaneously, the effect |
* may be that only one operation succeeds. |
*/ |
static inline void __change_bit(long nr, volatile unsigned long *addr) |
{ |
asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
} |
/** |
* change_bit - Toggle a bit in memory |
* @nr: Bit to change |
* @addr: Address to start counting from |
* |
* change_bit() is atomic and may not be reordered. |
* Note that @nr may be almost arbitrarily large; this function is not |
* restricted to acting on a single-word quantity. |
*/ |
static inline void change_bit(long nr, volatile unsigned long *addr) |
{ |
if (IS_IMMEDIATE(nr)) { |
asm volatile(LOCK_PREFIX "xorb %1,%0" |
: CONST_MASK_ADDR(nr, addr) |
: "iq" ((u8)CONST_MASK(nr))); |
} else { |
asm volatile(LOCK_PREFIX "btc %1,%0" |
: BITOP_ADDR(addr) |
: "Ir" (nr)); |
} |
} |
/** |
* test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_set_bit(long nr, volatile unsigned long *addr) |
{ |
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); |
} |
/** |
* test_and_set_bit_lock - Set a bit and return its old value for lock |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This is the same as test_and_set_bit on x86. |
*/ |
static __always_inline int |
test_and_set_bit_lock(long nr, volatile unsigned long *addr) |
{ |
return test_and_set_bit(nr, addr); |
} |
/** |
* __test_and_set_bit - Set a bit and return its old value |
* @nr: Bit to set |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
*/ |
static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm("bts %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/** |
* test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) |
{ |
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); |
} |
/** |
* __test_and_clear_bit - Clear a bit and return its old value |
* @nr: Bit to clear |
* @addr: Address to count from |
* |
* This operation is non-atomic and can be reordered. |
* If two examples of this operation race, one can appear to succeed |
* but actually fail. You must protect multiple accesses with a lock. |
* |
* Note: the operation is performed atomically with respect to |
* the local CPU, but not other CPUs. Portable code should not |
* rely on this behaviour. |
* KVM relies on this behaviour on x86 for modifying memory that is also |
* accessed from a hypervisor on the same CPU if running in a VM: don't change |
* this without also updating arch/x86/kernel/kvm.c |
*/ |
static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btr %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr)); |
return oldbit; |
} |
/* WARNING: non atomic and it can be reordered! */ |
static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) |
{ |
int oldbit; |
asm volatile("btc %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit), ADDR |
: "Ir" (nr) : "memory"); |
return oldbit; |
} |
/** |
* test_and_change_bit - Change a bit and return its old value |
* @nr: Bit to change |
* @addr: Address to count from |
* |
* This operation is atomic and cannot be reordered. |
* It also implies a memory barrier. |
*/ |
static inline int test_and_change_bit(long nr, volatile unsigned long *addr) |
{ |
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); |
} |
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) |
{ |
return ((1UL << (nr & (BITS_PER_LONG-1))) & |
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0; |
} |
static inline int variable_test_bit(long nr, volatile const unsigned long *addr) |
{ |
int oldbit; |
asm volatile("bt %2,%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit) |
: "m" (*(unsigned long *)addr), "Ir" (nr)); |
return oldbit; |
} |
#if 0 /* Fool kernel-doc since it doesn't do macros yet */ |
/** |
* test_bit - Determine whether a bit is set |
* @nr: bit number to test |
* @addr: Address to start counting from |
*/ |
static int test_bit(int nr, const volatile unsigned long *addr); |
#endif |
#define test_bit(nr, addr) \ |
(__builtin_constant_p((nr)) \ |
? constant_test_bit((nr), (addr)) \ |
: variable_test_bit((nr), (addr))) |
/** |
* __ffs - find first set bit in word |
* @word: The word to search |
* |
* Undefined if no bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __ffs(unsigned long word) |
{ |
asm("rep; bsf %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
/** |
* ffz - find first zero bit in word |
* @word: The word to search |
* |
* Undefined if no zero exists, so code should check against ~0UL first. |
*/ |
static inline unsigned long ffz(unsigned long word) |
{ |
asm("rep; bsf %1,%0" |
: "=r" (word) |
: "r" (~word)); |
return word; |
} |
/* |
* __fls: find last set bit in word |
* @word: The word to search |
* |
* Undefined if no set bit exists, so code should check against 0 first. |
*/ |
static inline unsigned long __fls(unsigned long word) |
{ |
asm("bsr %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
#undef ADDR |
#ifdef __KERNEL__ |
/** |
* ffs - find first set bit in word |
* @x: the word to search |
* |
* This is defined the same way as the libc and compiler builtin ffs |
* routines, therefore differs in spirit from the other bitops. |
* |
* ffs(value) returns 0 if value is 0 or the position of the first |
* set bit if value is nonzero. The first (least significant) bit |
* is at position 1. |
*/ |
static inline int ffs(int x) |
{ |
int r; |
#ifdef CONFIG_X86_64 |
/* |
* AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the |
* dest reg is undefined if x==0, but their CPU architect says its |
* value is written to set it to the same as before, except that the |
* top 32 bits will be cleared. |
* |
* We cannot do this on 32 bits because at the very least some |
* 486 CPUs did not behave this way. |
*/ |
asm("bsfl %1,%0" |
: "=r" (r) |
: "rm" (x), "0" (-1)); |
#elif defined(CONFIG_X86_CMOV) |
asm("bsfl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=&r" (r) : "rm" (x), "r" (-1)); |
#else |
asm("bsfl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
/** |
* fls - find last set bit in word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffs, but returns the position of the most significant set bit. |
* |
* fls(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 32. |
*/ |
static inline int fls(int x) |
{ |
int r; |
#ifdef CONFIG_X86_64 |
/* |
* AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the |
* dest reg is undefined if x==0, but their CPU architect says its |
* value is written to set it to the same as before, except that the |
* top 32 bits will be cleared. |
* |
* We cannot do this on 32 bits because at the very least some |
* 486 CPUs did not behave this way. |
*/ |
asm("bsrl %1,%0" |
: "=r" (r) |
: "rm" (x), "0" (-1)); |
#elif defined(CONFIG_X86_CMOV) |
asm("bsrl %1,%0\n\t" |
"cmovzl %2,%0" |
: "=&r" (r) : "rm" (x), "rm" (-1)); |
#else |
asm("bsrl %1,%0\n\t" |
"jnz 1f\n\t" |
"movl $-1,%0\n" |
"1:" : "=r" (r) : "rm" (x)); |
#endif |
return r + 1; |
} |
/** |
* fls64 - find last set bit in a 64-bit word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffsll, but returns the position of the most significant set bit. |
* |
* fls64(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 64. |
*/ |
#ifdef CONFIG_X86_64 |
static __always_inline int fls64(__u64 x) |
{ |
int bitpos = -1; |
/* |
* AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the |
* dest reg is undefined if x==0, but their CPU architect says its |
* value is written to set it to the same as before. |
*/ |
asm("bsrq %1,%q0" |
: "+r" (bitpos) |
: "rm" (x)); |
return bitpos + 1; |
} |
#else |
#include <asm-generic/bitops/fls64.h> |
#endif |
#include <asm-generic/bitops/find.h> |
#include <asm-generic/bitops/sched.h> |
#include <asm/arch_hweight.h> |
#include <asm-generic/bitops/const_hweight.h> |
#include <asm-generic/bitops/le.h> |
#include <asm-generic/bitops/ext2-atomic-setbit.h> |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_BITOPS_H */ |
/drivers/include/asm/bitsperlong.h |
---|
0,0 → 1,13 |
#ifndef __ASM_X86_BITSPERLONG_H |
#define __ASM_X86_BITSPERLONG_H |
#ifdef __x86_64__ |
# define __BITS_PER_LONG 64 |
#else |
# define __BITS_PER_LONG 32 |
#endif |
#include <asm-generic/bitsperlong.h> |
#endif /* __ASM_X86_BITSPERLONG_H */ |
/drivers/include/asm/byteorder.h |
---|
0,0 → 1,6 |
#ifndef _ASM_X86_BYTEORDER_H |
#define _ASM_X86_BYTEORDER_H |
#include <linux/byteorder/little_endian.h> |
#endif /* _ASM_X86_BYTEORDER_H */ |
/drivers/include/asm/cache.h |
---|
0,0 → 1,23 |
#ifndef _ASM_X86_CACHE_H |
#define _ASM_X86_CACHE_H |
#include <linux/linkage.h> |
/* L1 cache line size */ |
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) |
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
#define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT |
#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) |
#ifdef CONFIG_X86_VSMP |
#ifdef CONFIG_SMP |
#define __cacheline_aligned_in_smp \ |
__attribute__((__aligned__(INTERNODE_CACHE_BYTES))) \ |
__page_aligned_data |
#endif |
#endif |
#endif /* _ASM_X86_CACHE_H */ |
/drivers/include/asm/cacheflush.h |
---|
0,0 → 1,131 |
#ifndef _ASM_X86_CACHEFLUSH_H |
#define _ASM_X86_CACHEFLUSH_H |
/* Caches aren't brain-dead on the intel. */ |
#include <asm-generic/cacheflush.h> |
#include <asm/special_insns.h> |
/* |
* The set_memory_* API can be used to change various attributes of a virtual |
* address range. The attributes include: |
* Cachability : UnCached, WriteCombining, WriteBack |
* Executability : eXeutable, NoteXecutable |
* Read/Write : ReadOnly, ReadWrite |
* Presence : NotPresent |
* |
* Within a category, the attributes are mutually exclusive. |
* |
* The implementation of this API will take care of various aspects that |
* are associated with changing such attributes, such as: |
* - Flushing TLBs |
* - Flushing CPU caches |
* - Making sure aliases of the memory behind the mapping don't violate |
* coherency rules as defined by the CPU in the system. |
* |
* What this API does not do: |
* - Provide exclusion between various callers - including callers that |
* operation on other mappings of the same physical page |
* - Restore default attributes when a page is freed |
* - Guarantee that mappings other than the requested one are |
* in any state, other than that these do not violate rules for |
* the CPU you have. Do not depend on any effects on other mappings, |
* CPUs other than the one you have may have more relaxed rules. |
* The caller is required to take care of these. |
*/ |
int _set_memory_uc(unsigned long addr, int numpages); |
int _set_memory_wc(unsigned long addr, int numpages); |
int _set_memory_wb(unsigned long addr, int numpages); |
int set_memory_uc(unsigned long addr, int numpages); |
int set_memory_wc(unsigned long addr, int numpages); |
int set_memory_wb(unsigned long addr, int numpages); |
int set_memory_x(unsigned long addr, int numpages); |
int set_memory_nx(unsigned long addr, int numpages); |
int set_memory_ro(unsigned long addr, int numpages); |
int set_memory_rw(unsigned long addr, int numpages); |
int set_memory_np(unsigned long addr, int numpages); |
int set_memory_4k(unsigned long addr, int numpages); |
int set_memory_array_uc(unsigned long *addr, int addrinarray); |
int set_memory_array_wc(unsigned long *addr, int addrinarray); |
int set_memory_array_wb(unsigned long *addr, int addrinarray); |
int set_pages_array_uc(struct page **pages, int addrinarray); |
int set_pages_array_wc(struct page **pages, int addrinarray); |
int set_pages_array_wb(struct page **pages, int addrinarray); |
/* |
* For legacy compatibility with the old APIs, a few functions |
* are provided that work on a "struct page". |
* These functions operate ONLY on the 1:1 kernel mapping of the |
* memory that the struct page represents, and internally just |
* call the set_memory_* function. See the description of the |
* set_memory_* function for more details on conventions. |
* |
* These APIs should be considered *deprecated* and are likely going to |
* be removed in the future. |
* The reason for this is the implicit operation on the 1:1 mapping only, |
* making this not a generally useful API. |
* |
* Specifically, many users of the old APIs had a virtual address, |
* called virt_to_page() or vmalloc_to_page() on that address to |
* get a struct page* that the old API required. |
* To convert these cases, use set_memory_*() on the original |
* virtual address, do not use these functions. |
*/ |
static int set_pages_uc(struct page *page, int numpages) |
{ |
return 0; |
}; |
static int set_pages_wb(struct page *page, int numpages) |
{ |
return 0; |
}; |
static int set_pages_x(struct page *page, int numpages) |
{ |
return 0; |
}; |
static int set_pages_nx(struct page *page, int numpages) |
{ |
return 0; |
}; |
static int set_pages_ro(struct page *page, int numpages) |
{ |
return 0; |
}; |
static int set_pages_rw(struct page *page, int numpages) |
{ |
return 0; |
}; |
void clflush_cache_range(void *addr, unsigned int size); |
#ifdef CONFIG_DEBUG_RODATA |
void mark_rodata_ro(void); |
extern const int rodata_test_data; |
extern int kernel_set_to_readonly; |
void set_kernel_text_rw(void); |
void set_kernel_text_ro(void); |
#else |
static inline void set_kernel_text_rw(void) { } |
static inline void set_kernel_text_ro(void) { } |
#endif |
#ifdef CONFIG_DEBUG_RODATA_TEST |
int rodata_test(void); |
#else |
static inline int rodata_test(void) |
{ |
return 0; |
} |
#endif |
#endif /* _ASM_X86_CACHEFLUSH_H */ |
/drivers/include/asm/cmpxchg.h |
---|
0,0 → 1,233 |
#ifndef ASM_X86_CMPXCHG_H |
#define ASM_X86_CMPXCHG_H |
#include <linux/compiler.h> |
#include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
#define __HAVE_ARCH_CMPXCHG 1 |
/* |
* Non-existant functions to indicate usage errors at link time |
* (or compile-time if the compiler implements __compiletime_error(). |
*/ |
extern void __xchg_wrong_size(void) |
__compiletime_error("Bad argument size for xchg"); |
extern void __cmpxchg_wrong_size(void) |
__compiletime_error("Bad argument size for cmpxchg"); |
extern void __xadd_wrong_size(void) |
__compiletime_error("Bad argument size for xadd"); |
extern void __add_wrong_size(void) |
__compiletime_error("Bad argument size for add"); |
/* |
* Constants for operation sizes. On 32-bit, the 64-bit size it set to |
* -1 because sizeof will never return -1, thereby making those switch |
* case statements guaranteeed dead code which the compiler will |
* eliminate, and allowing the "missing symbol in the default case" to |
* indicate a usage error. |
*/ |
#define __X86_CASE_B 1 |
#define __X86_CASE_W 2 |
#define __X86_CASE_L 4 |
#ifdef CONFIG_64BIT |
#define __X86_CASE_Q 8 |
#else |
#define __X86_CASE_Q -1 /* sizeof will never return -1 */ |
#endif |
/* |
* An exchange-type operation, which takes a value and a pointer, and |
* returns the old value. |
*/ |
#define __xchg_op(ptr, arg, op, lock) \ |
({ \ |
__typeof__ (*(ptr)) __ret = (arg); \ |
switch (sizeof(*(ptr))) { \ |
case __X86_CASE_B: \ |
asm volatile (lock #op "b %b0, %1\n" \ |
: "+q" (__ret), "+m" (*(ptr)) \ |
: : "memory", "cc"); \ |
break; \ |
case __X86_CASE_W: \ |
asm volatile (lock #op "w %w0, %1\n" \ |
: "+r" (__ret), "+m" (*(ptr)) \ |
: : "memory", "cc"); \ |
break; \ |
case __X86_CASE_L: \ |
asm volatile (lock #op "l %0, %1\n" \ |
: "+r" (__ret), "+m" (*(ptr)) \ |
: : "memory", "cc"); \ |
break; \ |
case __X86_CASE_Q: \ |
asm volatile (lock #op "q %q0, %1\n" \ |
: "+r" (__ret), "+m" (*(ptr)) \ |
: : "memory", "cc"); \ |
break; \ |
default: \ |
__ ## op ## _wrong_size(); \ |
} \ |
__ret; \ |
}) |
/* |
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway. |
* Since this is generally used to protect other memory information, we |
* use "asm volatile" and "memory" clobbers to prevent gcc from moving |
* information around. |
*/ |
#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") |
/* |
* Atomic compare and exchange. Compare OLD with MEM, if identical, |
* store NEW in MEM. Return the initial value in MEM. Success is |
* indicated by comparing RETURN with OLD. |
*/ |
#define __raw_cmpxchg(ptr, old, new, size, lock) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (old); \ |
__typeof__(*(ptr)) __new = (new); \ |
switch (size) { \ |
case __X86_CASE_B: \ |
{ \ |
volatile u8 *__ptr = (volatile u8 *)(ptr); \ |
asm volatile(lock "cmpxchgb %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "q" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
case __X86_CASE_W: \ |
{ \ |
volatile u16 *__ptr = (volatile u16 *)(ptr); \ |
asm volatile(lock "cmpxchgw %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "r" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
case __X86_CASE_L: \ |
{ \ |
volatile u32 *__ptr = (volatile u32 *)(ptr); \ |
asm volatile(lock "cmpxchgl %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "r" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
case __X86_CASE_Q: \ |
{ \ |
volatile u64 *__ptr = (volatile u64 *)(ptr); \ |
asm volatile(lock "cmpxchgq %2,%1" \ |
: "=a" (__ret), "+m" (*__ptr) \ |
: "r" (__new), "0" (__old) \ |
: "memory"); \ |
break; \ |
} \ |
default: \ |
__cmpxchg_wrong_size(); \ |
} \ |
__ret; \ |
}) |
#define __cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) |
#define __sync_cmpxchg(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
#define __cmpxchg_local(ptr, old, new, size) \ |
__raw_cmpxchg((ptr), (old), (new), (size), "") |
#ifdef CONFIG_X86_32 |
# include <asm/cmpxchg_32.h> |
#else |
# include <asm/cmpxchg_64.h> |
#endif |
#define cmpxchg(ptr, old, new) \ |
__cmpxchg(ptr, old, new, sizeof(*(ptr))) |
#define sync_cmpxchg(ptr, old, new) \ |
__sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) |
#define cmpxchg_local(ptr, old, new) \ |
__cmpxchg_local(ptr, old, new, sizeof(*(ptr))) |
/* |
* xadd() adds "inc" to "*ptr" and atomically returns the previous |
* value of "*ptr". |
* |
* xadd() is locked when multiple CPUs are online |
* xadd_sync() is always locked |
* xadd_local() is never locked |
*/ |
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) |
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) |
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") |
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "") |
#define __add(ptr, inc, lock) \ |
({ \ |
__typeof__ (*(ptr)) __ret = (inc); \ |
switch (sizeof(*(ptr))) { \ |
case __X86_CASE_B: \ |
asm volatile (lock "addb %b1, %0\n" \ |
: "+m" (*(ptr)) : "qi" (inc) \ |
: "memory", "cc"); \ |
break; \ |
case __X86_CASE_W: \ |
asm volatile (lock "addw %w1, %0\n" \ |
: "+m" (*(ptr)) : "ri" (inc) \ |
: "memory", "cc"); \ |
break; \ |
case __X86_CASE_L: \ |
asm volatile (lock "addl %1, %0\n" \ |
: "+m" (*(ptr)) : "ri" (inc) \ |
: "memory", "cc"); \ |
break; \ |
case __X86_CASE_Q: \ |
asm volatile (lock "addq %1, %0\n" \ |
: "+m" (*(ptr)) : "ri" (inc) \ |
: "memory", "cc"); \ |
break; \ |
default: \ |
__add_wrong_size(); \ |
} \ |
__ret; \ |
}) |
/* |
* add_*() adds "inc" to "*ptr" |
* |
* __add() takes a lock prefix |
* add_smp() is locked when multiple CPUs are online |
* add_sync() is always locked |
*/ |
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX) |
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ") |
#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \ |
({ \ |
bool __ret; \ |
__typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \ |
__typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \ |
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ |
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ |
VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \ |
VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \ |
asm volatile(pfx "cmpxchg%c4b %2; sete %0" \ |
: "=a" (__ret), "+d" (__old2), \ |
"+m" (*(p1)), "+m" (*(p2)) \ |
: "i" (2 * sizeof(long)), "a" (__old1), \ |
"b" (__new1), "c" (__new2)); \ |
__ret; \ |
}) |
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ |
__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2) |
#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ |
__cmpxchg_double(, p1, p2, o1, o2, n1, n2) |
#endif /* ASM_X86_CMPXCHG_H */ |
/drivers/include/asm/cmpxchg_32.h |
---|
0,0 → 1,114 |
#ifndef _ASM_X86_CMPXCHG_32_H |
#define _ASM_X86_CMPXCHG_32_H |
/* |
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you |
* you need to test for the feature in boot_cpu_data. |
*/ |
/* |
* CMPXCHG8B only writes to the target if we had the previous |
* value in registers, otherwise it acts as a read and gives us the |
* "new previous" value. That is why there is a loop. Preloading |
* EDX:EAX is a performance optimization: in the common case it means |
* we need only one locked operation. |
* |
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very |
* least an FPU save and/or %cr0.ts manipulation. |
* |
* cmpxchg8b must be used with the lock prefix here to allow the |
* instruction to be executed atomically. We need to have the reader |
* side to see the coherent 64bit value. |
*/ |
static inline void set_64bit(volatile u64 *ptr, u64 value) |
{ |
u32 low = value; |
u32 high = value >> 32; |
u64 prev = *ptr; |
asm volatile("\n1:\t" |
LOCK_PREFIX "cmpxchg8b %0\n\t" |
"jnz 1b" |
: "=m" (*ptr), "+A" (prev) |
: "b" (low), "c" (high) |
: "memory"); |
} |
#ifdef CONFIG_X86_CMPXCHG64 |
#define cmpxchg64(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#define cmpxchg64_local(ptr, o, n) \ |
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
(unsigned long long)(n))) |
#endif |
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
{ |
u64 prev; |
asm volatile(LOCK_PREFIX "cmpxchg8b %1" |
: "=A" (prev), |
"+m" (*ptr) |
: "b" ((u32)new), |
"c" ((u32)(new >> 32)), |
"0" (old) |
: "memory"); |
return prev; |
} |
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
{ |
u64 prev; |
asm volatile("cmpxchg8b %1" |
: "=A" (prev), |
"+m" (*ptr) |
: "b" ((u32)new), |
"c" ((u32)(new >> 32)), |
"0" (old) |
: "memory"); |
return prev; |
} |
#ifndef CONFIG_X86_CMPXCHG64 |
/* |
* Building a kernel capable running on 80386 and 80486. It may be necessary |
* to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
*/ |
#define cmpxchg64(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io(LOCK_PREFIX_HERE \ |
"call cmpxchg8b_emu", \ |
"lock; cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
"S" ((ptr)), "0" (__old), \ |
"b" ((unsigned int)__new), \ |
"c" ((unsigned int)(__new>>32)) \ |
: "memory"); \ |
__ret; }) |
#define cmpxchg64_local(ptr, o, n) \ |
({ \ |
__typeof__(*(ptr)) __ret; \ |
__typeof__(*(ptr)) __old = (o); \ |
__typeof__(*(ptr)) __new = (n); \ |
alternative_io("call cmpxchg8b_emu", \ |
"cmpxchg8b (%%esi)" , \ |
X86_FEATURE_CX8, \ |
"=A" (__ret), \ |
"S" ((ptr)), "0" (__old), \ |
"b" ((unsigned int)__new), \ |
"c" ((unsigned int)(__new>>32)) \ |
: "memory"); \ |
__ret; }) |
#endif |
#define system_has_cmpxchg_double() cpu_has_cx8 |
#endif /* _ASM_X86_CMPXCHG_32_H */ |
/drivers/include/asm/cpufeature.h |
---|
0,0 → 1,586 |
/* |
* Defines x86 CPU feature bits |
*/ |
#ifndef _ASM_X86_CPUFEATURE_H |
#define _ASM_X86_CPUFEATURE_H |
#ifndef _ASM_X86_REQUIRED_FEATURES_H |
#include <asm/required-features.h> |
#endif |
#ifndef _ASM_X86_DISABLED_FEATURES_H |
#include <asm/disabled-features.h> |
#endif |
#define NCAPINTS 11 /* N 32-bit words worth of info */ |
#define NBUGINTS 1 /* N 32-bit bug flags */ |
/* |
* Note: If the comment begins with a quoted string, that string is used |
* in /proc/cpuinfo instead of the macro name. If the string is "", |
* this feature bit is not displayed in /proc/cpuinfo at all. |
*/ |
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ |
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ |
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ |
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ |
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ |
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ |
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ |
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ |
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ |
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ |
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ |
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ |
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ |
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ |
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ |
/* (plus FCMOVcc, FCOMI with FPU) */ |
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ |
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ |
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ |
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ |
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ |
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ |
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ |
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ |
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ |
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ |
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ |
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ |
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ |
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ |
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
/* Don't duplicate feature flags which are redundant with Intel! */ |
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ |
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ |
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ |
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ |
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ |
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ |
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ |
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ |
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ |
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ |
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ |
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ |
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ |
/* Other features, Linux-defined mapping, word 3 */ |
/* This range is used for feature bits which conflict or are synthesized */ |
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ |
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ |
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
/* cpu types for specific tunings: */ |
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ |
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ |
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ |
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ |
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ |
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ |
/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ |
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ |
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ |
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ |
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ |
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ |
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ |
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ |
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ |
/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */ |
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ |
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ |
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ |
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ |
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ |
/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ |
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ |
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ |
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ |
#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */ |
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ |
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ |
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ |
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ |
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ |
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ |
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ |
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ |
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ |
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ |
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ |
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ |
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ |
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ |
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ |
#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ |
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ |
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ |
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ |
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ |
#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ |
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ |
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ |
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ |
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ |
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ |
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ |
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ |
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ |
#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ |
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ |
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ |
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ |
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ |
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ |
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ |
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ |
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ |
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ |
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ |
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ |
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ |
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ |
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ |
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ |
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ |
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ |
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ |
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ |
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ |
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ |
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ |
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ |
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ |
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ |
#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ |
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ |
#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ |
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ |
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ |
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ |
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ |
/* |
* Auxiliary flags: Linux defined - For features scattered in various |
* CPUID levels like 0x6, 0xA etc, word 7 |
*/ |
#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ |
#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */ |
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ |
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ |
#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ |
#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */ |
#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ |
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ |
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ |
#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ |
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ |
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ |
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ |
/* Virtualization flags: Linux defined, word 8 */ |
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ |
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ |
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ |
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ |
#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ |
#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */ |
#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ |
#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ |
#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ |
#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ |
#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */ |
#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */ |
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ |
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ |
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ |
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ |
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ |
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ |
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ |
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ |
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ |
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ |
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ |
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ |
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ |
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ |
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ |
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ |
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ |
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ |
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ |
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ |
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ |
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ |
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ |
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ |
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ |
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ |
/* |
* BUG word(s) |
*/ |
#define X86_BUG(x) (NCAPINTS*32 + (x)) |
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ |
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ |
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ |
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ |
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ |
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ |
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ |
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ |
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
#include <asm/asm.h> |
#include <linux/bitops.h> |
#ifdef CONFIG_X86_FEATURE_NAMES |
extern const char * const x86_cap_flags[NCAPINTS*32]; |
extern const char * const x86_power_flags[32]; |
#define X86_CAP_FMT "%s" |
#define x86_cap_flag(flag) x86_cap_flags[flag] |
#else |
#define X86_CAP_FMT "%d:%d" |
#define x86_cap_flag(flag) ((flag) >> 5), ((flag) & 31) |
#endif |
/* |
* In order to save room, we index into this array by doing |
* X86_BUG_<name> - NCAPINTS*32. |
*/ |
extern const char * const x86_bug_flags[NBUGINTS*32]; |
#define test_cpu_cap(c, bit) \ |
test_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define REQUIRED_MASK_BIT_SET(bit) \ |
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ |
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ |
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ |
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ |
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ |
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ |
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ |
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \ |
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \ |
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) ) |
#define DISABLED_MASK_BIT_SET(bit) \ |
( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \ |
(((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \ |
(((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \ |
(((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \ |
(((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \ |
(((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \ |
(((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \ |
(((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \ |
(((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \ |
(((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) ) |
#define cpu_has(c, bit) \ |
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ |
test_cpu_cap(c, bit)) |
#define this_cpu_has(bit) \ |
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ |
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) |
/* |
* This macro is for detection of features which need kernel |
* infrastructure to be used. It may *not* directly test the CPU |
* itself. Use the cpu_has() family if you want true runtime |
* testing of CPU features, like in hypervisor code where you are |
* supporting a possible guest feature where host support for it |
* is not relevant. |
*/ |
#define cpu_feature_enabled(bit) \ |
(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \ |
cpu_has(&boot_cpu_data, bit)) |
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) |
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability)) |
#define setup_clear_cpu_cap(bit) do { \ |
clear_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_cleared); \ |
} while (0) |
#define setup_force_cpu_cap(bit) do { \ |
set_cpu_cap(&boot_cpu_data, bit); \ |
set_bit(bit, (unsigned long *)cpu_caps_set); \ |
} while (0) |
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) |
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) |
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) |
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) |
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) |
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) |
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) |
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) |
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) |
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) |
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) |
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) |
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3) |
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) |
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) |
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) |
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) |
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) |
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) |
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) |
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) |
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) |
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) |
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) |
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) |
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) |
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) |
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) |
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) |
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) |
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) |
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) |
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) |
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) |
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) |
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) |
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) |
#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) |
#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) |
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) |
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) |
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) |
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) |
#if __GNUC__ >= 4 |
extern void warn_pre_alternatives(void); |
extern bool __static_cpu_has_safe(u16 bit); |
/* |
* Static testing of CPU features. Used the same as boot_cpu_has(). |
* These are only valid after alternatives have run, but will statically |
* patch the target code for additional performance. |
*/ |
static __always_inline __pure bool __static_cpu_has(u16 bit) |
{ |
#ifdef CC_HAVE_ASM_GOTO |
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS |
/* |
* Catch too early usage of this before alternatives |
* have run. |
*/ |
asm_volatile_goto("1: jmp %l[t_warn]\n" |
"2:\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" |
" .long 0\n" /* no replacement */ |
" .word %P0\n" /* 1: do replace */ |
" .byte 2b - 1b\n" /* source len */ |
" .byte 0\n" /* replacement len */ |
".previous\n" |
/* skipping size check since replacement size = 0 */ |
: : "i" (X86_FEATURE_ALWAYS) : : t_warn); |
#endif |
asm_volatile_goto("1: jmp %l[t_no]\n" |
"2:\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" |
" .long 0\n" /* no replacement */ |
" .word %P0\n" /* feature bit */ |
" .byte 2b - 1b\n" /* source len */ |
" .byte 0\n" /* replacement len */ |
".previous\n" |
/* skipping size check since replacement size = 0 */ |
: : "i" (bit) : : t_no); |
return true; |
t_no: |
return false; |
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS |
t_warn: |
warn_pre_alternatives(); |
return false; |
#endif |
#else /* CC_HAVE_ASM_GOTO */ |
u8 flag; |
/* Open-coded due to __stringify() in ALTERNATIVE() */ |
asm volatile("1: movb $0,%0\n" |
"2:\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" |
" .long 3f - .\n" |
" .word %P1\n" /* feature bit */ |
" .byte 2b - 1b\n" /* source len */ |
" .byte 4f - 3f\n" /* replacement len */ |
".previous\n" |
".section .discard,\"aw\",@progbits\n" |
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ |
".previous\n" |
".section .altinstr_replacement,\"ax\"\n" |
"3: movb $1,%0\n" |
"4:\n" |
".previous\n" |
: "=qm" (flag) : "i" (bit)); |
return flag; |
#endif /* CC_HAVE_ASM_GOTO */ |
} |
#define static_cpu_has(bit) \ |
( \ |
__builtin_constant_p(boot_cpu_has(bit)) ? \ |
boot_cpu_has(bit) : \ |
__builtin_constant_p(bit) ? \ |
__static_cpu_has(bit) : \ |
boot_cpu_has(bit) \ |
) |
static __always_inline __pure bool _static_cpu_has_safe(u16 bit) |
{ |
#ifdef CC_HAVE_ASM_GOTO |
/* |
* We need to spell the jumps to the compiler because, depending on the offset, |
* the replacement jump can be bigger than the original jump, and this we cannot |
* have. Thus, we force the jump to the widest, 4-byte, signed relative |
* offset even though the last would often fit in less bytes. |
*/ |
asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" |
"2:\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" /* src offset */ |
" .long 3f - .\n" /* repl offset */ |
" .word %P1\n" /* always replace */ |
" .byte 2b - 1b\n" /* src len */ |
" .byte 4f - 3f\n" /* repl len */ |
".previous\n" |
".section .altinstr_replacement,\"ax\"\n" |
"3: .byte 0xe9\n .long %l[t_no] - 2b\n" |
"4:\n" |
".previous\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" /* src offset */ |
" .long 0\n" /* no replacement */ |
" .word %P0\n" /* feature bit */ |
" .byte 2b - 1b\n" /* src len */ |
" .byte 0\n" /* repl len */ |
".previous\n" |
: : "i" (bit), "i" (X86_FEATURE_ALWAYS) |
: : t_dynamic, t_no); |
return true; |
t_no: |
return false; |
t_dynamic: |
return __static_cpu_has_safe(bit); |
#else |
u8 flag; |
/* Open-coded due to __stringify() in ALTERNATIVE() */ |
asm volatile("1: movb $2,%0\n" |
"2:\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" /* src offset */ |
" .long 3f - .\n" /* repl offset */ |
" .word %P2\n" /* always replace */ |
" .byte 2b - 1b\n" /* source len */ |
" .byte 4f - 3f\n" /* replacement len */ |
".previous\n" |
".section .discard,\"aw\",@progbits\n" |
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ |
".previous\n" |
".section .altinstr_replacement,\"ax\"\n" |
"3: movb $0,%0\n" |
"4:\n" |
".previous\n" |
".section .altinstructions,\"a\"\n" |
" .long 1b - .\n" /* src offset */ |
" .long 5f - .\n" /* repl offset */ |
" .word %P1\n" /* feature bit */ |
" .byte 4b - 3b\n" /* src len */ |
" .byte 6f - 5f\n" /* repl len */ |
".previous\n" |
".section .discard,\"aw\",@progbits\n" |
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */ |
".previous\n" |
".section .altinstr_replacement,\"ax\"\n" |
"5: movb $1,%0\n" |
"6:\n" |
".previous\n" |
: "=qm" (flag) |
: "i" (bit), "i" (X86_FEATURE_ALWAYS)); |
return (flag == 2 ? __static_cpu_has_safe(bit) : flag); |
#endif /* CC_HAVE_ASM_GOTO */ |
} |
#define static_cpu_has_safe(bit) \ |
( \ |
__builtin_constant_p(boot_cpu_has(bit)) ? \ |
boot_cpu_has(bit) : \ |
_static_cpu_has_safe(bit) \ |
) |
#else |
/* |
* gcc 3.x is too stupid to do the static test; fall back to dynamic. |
*/ |
#define static_cpu_has(bit) boot_cpu_has(bit) |
#define static_cpu_has_safe(bit) boot_cpu_has(bit) |
#endif |
#define cpu_has_bug(c, bit) cpu_has(c, (bit)) |
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) |
#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) |
#define static_cpu_has_bug(bit) static_cpu_has((bit)) |
#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit)) |
#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) |
#define MAX_CPU_FEATURES (NCAPINTS * 32) |
#define cpu_have_feature boot_cpu_has |
#define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X" |
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ |
boot_cpu_data.x86_model |
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
#endif /* _ASM_X86_CPUFEATURE_H */ |
/drivers/include/asm/cpumask.h |
---|
0,0 → 1,14 |
#ifndef _ASM_X86_CPUMASK_H |
#define _ASM_X86_CPUMASK_H |
#ifndef __ASSEMBLY__ |
#include <linux/cpumask.h> |
extern cpumask_var_t cpu_callin_mask; |
extern cpumask_var_t cpu_callout_mask; |
extern cpumask_var_t cpu_initialized_mask; |
extern cpumask_var_t cpu_sibling_setup_mask; |
extern void setup_cpu_local_masks(void); |
#endif /* __ASSEMBLY__ */ |
#endif /* _ASM_X86_CPUMASK_H */ |
/drivers/include/asm/current.h |
---|
0,0 → 1,21 |
#ifndef _ASM_X86_CURRENT_H |
#define _ASM_X86_CURRENT_H |
#include <linux/compiler.h> |
#include <asm/percpu.h> |
#ifndef __ASSEMBLY__ |
struct task_struct; |
DECLARE_PER_CPU(struct task_struct *, current_task); |
static __always_inline struct task_struct *get_current(void) |
{ |
return this_cpu_read_stable(current_task); |
} |
#define current (void*)GetPid() |
#endif /* __ASSEMBLY__ */ |
#endif /* _ASM_X86_CURRENT_H */ |
/drivers/include/asm/delay.h |
---|
0,0 → 1,8 |
#ifndef _ASM_X86_DELAY_H |
#define _ASM_X86_DELAY_H |
#include <asm-generic/delay.h> |
void use_tsc_delay(void); |
#endif /* _ASM_X86_DELAY_H */ |
/drivers/include/asm/desc_defs.h |
---|
0,0 → 1,101 |
/* Written 2000 by Andi Kleen */ |
#ifndef _ASM_X86_DESC_DEFS_H |
#define _ASM_X86_DESC_DEFS_H |
/* |
* Segment descriptor structure definitions, usable from both x86_64 and i386 |
* archs. |
*/ |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
/* |
* FIXME: Accessing the desc_struct through its fields is more elegant, |
* and should be the one valid thing to do. However, a lot of open code |
* still touches the a and b accessors, and doing this allow us to do it |
* incrementally. We keep the signature as a struct, rather than an union, |
* so we can get rid of it transparently in the future -- glommer |
*/ |
/* 8 byte segment descriptor */ |
struct desc_struct { |
union { |
struct { |
unsigned int a; |
unsigned int b; |
}; |
struct { |
u16 limit0; |
u16 base0; |
unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; |
unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; |
}; |
}; |
} __attribute__((packed)); |
#define GDT_ENTRY_INIT(flags, base, limit) { { { \ |
.a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \ |
.b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \ |
((limit) & 0xf0000) | ((base) & 0xff000000), \ |
} } } |
enum { |
GATE_INTERRUPT = 0xE, |
GATE_TRAP = 0xF, |
GATE_CALL = 0xC, |
GATE_TASK = 0x5, |
}; |
/* 16byte gate */ |
struct gate_struct64 { |
u16 offset_low; |
u16 segment; |
unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; |
u16 offset_middle; |
u32 offset_high; |
u32 zero1; |
} __attribute__((packed)); |
#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF) |
#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF) |
#define PTR_HIGH(x) ((unsigned long long)(x) >> 32) |
enum { |
DESC_TSS = 0x9, |
DESC_LDT = 0x2, |
DESCTYPE_S = 0x10, /* !system */ |
}; |
/* LDT or TSS descriptor in the GDT. 16 bytes. */ |
struct ldttss_desc64 { |
u16 limit0; |
u16 base0; |
unsigned base1 : 8, type : 5, dpl : 2, p : 1; |
unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; |
u32 base3; |
u32 zero1; |
} __attribute__((packed)); |
#ifdef CONFIG_X86_64 |
typedef struct gate_struct64 gate_desc; |
typedef struct ldttss_desc64 ldt_desc; |
typedef struct ldttss_desc64 tss_desc; |
#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32)) |
#define gate_segment(g) ((g).segment) |
#else |
typedef struct desc_struct gate_desc; |
typedef struct desc_struct ldt_desc; |
typedef struct desc_struct tss_desc; |
#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff)) |
#define gate_segment(g) ((g).a >> 16) |
#endif |
struct desc_ptr { |
unsigned short size; |
unsigned long address; |
} __attribute__((packed)) ; |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_DESC_DEFS_H */ |
/drivers/include/asm/disabled-features.h |
---|
0,0 → 1,45 |
#ifndef _ASM_X86_DISABLED_FEATURES_H |
#define _ASM_X86_DISABLED_FEATURES_H |
/* These features, although they might be available in a CPU |
* will not be used because the compile options to support |
* them are not present. |
* |
* This code allows them to be checked and disabled at |
* compile time without an explicit #ifdef. Use |
* cpu_feature_enabled(). |
*/ |
#ifdef CONFIG_X86_INTEL_MPX |
# define DISABLE_MPX 0 |
#else |
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) |
#endif |
#ifdef CONFIG_X86_64 |
# define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) |
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) |
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) |
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) |
#else |
# define DISABLE_VME 0 |
# define DISABLE_K6_MTRR 0 |
# define DISABLE_CYRIX_ARR 0 |
# define DISABLE_CENTAUR_MCR 0 |
#endif /* CONFIG_X86_64 */ |
/* |
* Make sure to add features to the correct mask |
*/ |
#define DISABLED_MASK0 (DISABLE_VME) |
#define DISABLED_MASK1 0 |
#define DISABLED_MASK2 0 |
#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) |
#define DISABLED_MASK4 0 |
#define DISABLED_MASK5 0 |
#define DISABLED_MASK6 0 |
#define DISABLED_MASK7 0 |
#define DISABLED_MASK8 0 |
#define DISABLED_MASK9 (DISABLE_MPX) |
#endif /* _ASM_X86_DISABLED_FEATURES_H */ |
/drivers/include/asm/div64.h |
---|
0,0 → 1,66 |
#ifndef _ASM_X86_DIV64_H |
#define _ASM_X86_DIV64_H |
#ifdef CONFIG_X86_32 |
#include <linux/types.h> |
#include <linux/log2.h> |
/* |
* do_div() is NOT a C function. It wants to return |
* two values (the quotient and the remainder), but |
* since that doesn't work very well in C, what it |
* does is: |
* |
* - modifies the 64-bit dividend _in_place_ |
* - returns the 32-bit remainder |
* |
* This ends up being the most efficient "calling |
* convention" on x86. |
*/ |
#define do_div(n, base) \ |
({ \ |
unsigned long __upper, __low, __high, __mod, __base; \ |
__base = (base); \ |
if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \ |
__mod = n & (__base - 1); \ |
n >>= ilog2(__base); \ |
} else { \ |
asm("" : "=a" (__low), "=d" (__high) : "A" (n));\ |
__upper = __high; \ |
if (__high) { \ |
__upper = __high % (__base); \ |
__high = __high / (__base); \ |
} \ |
asm("divl %2" : "=a" (__low), "=d" (__mod) \ |
: "rm" (__base), "0" (__low), "1" (__upper)); \ |
asm("" : "=A" (n) : "a" (__low), "d" (__high)); \ |
} \ |
__mod; \ |
}) |
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
{ |
union { |
u64 v64; |
u32 v32[2]; |
} d = { dividend }; |
u32 upper; |
upper = d.v32[1]; |
d.v32[1] = 0; |
if (upper >= divisor) { |
d.v32[1] = upper / divisor; |
upper %= divisor; |
} |
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) : |
"rm" (divisor), "0" (d.v32[0]), "1" (upper)); |
return d.v64; |
} |
#define div_u64_rem div_u64_rem |
#else |
# include <asm-generic/div64.h> |
#endif /* CONFIG_X86_32 */ |
#endif /* _ASM_X86_DIV64_H */ |
/drivers/include/asm/e820.h |
---|
0,0 → 1,77 |
#ifndef _ASM_X86_E820_H |
#define _ASM_X86_E820_H |
#ifdef CONFIG_EFI |
#include <linux/numa.h> |
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES) |
#else /* ! CONFIG_EFI */ |
#define E820_X_MAX E820MAX |
#endif |
#include <uapi/asm/e820.h> |
#ifndef __ASSEMBLY__ |
/* see comment in arch/x86/kernel/e820.c */ |
extern struct e820map e820; |
extern struct e820map e820_saved; |
extern unsigned long pci_mem_start; |
extern int e820_any_mapped(u64 start, u64 end, unsigned type); |
extern int e820_all_mapped(u64 start, u64 end, unsigned type); |
extern void e820_add_region(u64 start, u64 size, int type); |
extern void e820_print_map(char *who); |
extern int |
sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map); |
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type, |
unsigned new_type); |
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type, |
int checktype); |
extern void update_e820(void); |
extern void e820_setup_gap(void); |
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, |
unsigned long start_addr, unsigned long long end_addr); |
struct setup_data; |
extern void parse_e820_ext(u64 phys_addr, u32 data_len); |
#if defined(CONFIG_X86_64) || \ |
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) |
extern void e820_mark_nosave_regions(unsigned long limit_pfn); |
#else |
static inline void e820_mark_nosave_regions(unsigned long limit_pfn) |
{ |
} |
#endif |
#ifdef CONFIG_MEMTEST |
extern void early_memtest(unsigned long start, unsigned long end); |
#else |
static inline void early_memtest(unsigned long start, unsigned long end) |
{ |
} |
#endif |
extern unsigned long e820_end_of_ram_pfn(void); |
extern unsigned long e820_end_of_low_ram_pfn(void); |
extern u64 early_reserve_e820(u64 sizet, u64 align); |
void memblock_x86_fill(void); |
void memblock_find_dma_reserve(void); |
extern void finish_e820_parsing(void); |
extern void e820_reserve_resources(void); |
extern void e820_reserve_resources_late(void); |
extern void setup_memory_map(void); |
extern char *default_machine_specific_memory_setup(void); |
/* |
* Returns true iff the specified range [s,e) is completely contained inside |
* the ISA region. |
*/ |
static inline bool is_ISA_range(u64 s, u64 e) |
{ |
return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS; |
} |
#endif /* __ASSEMBLY__ */ |
#include <linux/ioport.h> |
#define HIGH_MEMORY (1024*1024) |
#endif /* _ASM_X86_E820_H */ |
/drivers/include/asm/irqflags.h |
---|
0,0 → 1,209 |
#ifndef _X86_IRQFLAGS_H_ |
#define _X86_IRQFLAGS_H_ |
#include <asm/processor-flags.h> |
#ifndef __ASSEMBLY__ |
/* |
* Interrupt control: |
*/ |
static inline unsigned long native_save_fl(void) |
{ |
unsigned long flags; |
/* |
* "=rm" is safe here, because "pop" adjusts the stack before |
* it evaluates its effective address -- this is part of the |
* documented behavior of the "pop" instruction. |
*/ |
asm volatile("# __raw_save_flags\n\t" |
"pushf ; pop %0" |
: "=rm" (flags) |
: /* no input */ |
: "memory"); |
return flags; |
} |
static inline void native_restore_fl(unsigned long flags) |
{ |
asm volatile("push %0 ; popf" |
: /* no output */ |
:"g" (flags) |
:"memory", "cc"); |
} |
static inline void native_irq_disable(void) |
{ |
asm volatile("cli": : :"memory"); |
} |
static inline void native_irq_enable(void) |
{ |
asm volatile("sti": : :"memory"); |
} |
static inline void native_safe_halt(void) |
{ |
asm volatile("sti; hlt": : :"memory"); |
} |
static inline void native_halt(void) |
{ |
asm volatile("hlt": : :"memory"); |
} |
#endif |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt.h> |
#else |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
static inline notrace unsigned long arch_local_save_flags(void) |
{ |
return native_save_fl(); |
} |
static inline notrace void arch_local_irq_restore(unsigned long flags) |
{ |
native_restore_fl(flags); |
} |
static inline notrace void arch_local_irq_disable(void) |
{ |
native_irq_disable(); |
} |
static inline notrace void arch_local_irq_enable(void) |
{ |
native_irq_enable(); |
} |
/* |
* Used in the idle loop; sti takes one instruction cycle |
* to complete: |
*/ |
static inline void arch_safe_halt(void) |
{ |
native_safe_halt(); |
} |
/* |
* Used when interrupts are already enabled or to |
* shutdown the processor: |
*/ |
static inline void halt(void) |
{ |
native_halt(); |
} |
/* |
* For spinlocks, etc: |
*/ |
static inline notrace unsigned long arch_local_irq_save(void) |
{ |
unsigned long flags = arch_local_save_flags(); |
arch_local_irq_disable(); |
return flags; |
} |
#else |
#define ENABLE_INTERRUPTS(x) sti |
#define DISABLE_INTERRUPTS(x) cli |
#ifdef CONFIG_X86_64 |
#define SWAPGS swapgs |
/* |
* Currently paravirt can't handle swapgs nicely when we |
* don't have a stack we can rely on (such as a user space |
* stack). So we either find a way around these or just fault |
* and emulate if a guest tries to call swapgs directly. |
* |
* Either way, this is a good way to document that we don't |
* have a reliable stack. x86_64 only. |
*/ |
#define SWAPGS_UNSAFE_STACK swapgs |
#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ |
#define INTERRUPT_RETURN jmp native_iret |
#define USERGS_SYSRET64 \ |
swapgs; \ |
sysretq; |
#define USERGS_SYSRET32 \ |
swapgs; \ |
sysretl |
#define ENABLE_INTERRUPTS_SYSEXIT32 \ |
swapgs; \ |
sti; \ |
sysexit |
#else |
#define INTERRUPT_RETURN iret |
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit |
#define GET_CR0_INTO_EAX movl %cr0, %eax |
#endif |
#endif /* __ASSEMBLY__ */ |
#endif /* CONFIG_PARAVIRT */ |
#ifndef __ASSEMBLY__ |
static inline int arch_irqs_disabled_flags(unsigned long flags) |
{ |
return !(flags & X86_EFLAGS_IF); |
} |
static inline int arch_irqs_disabled(void) |
{ |
unsigned long flags = arch_local_save_flags(); |
return arch_irqs_disabled_flags(flags); |
} |
#else |
#ifdef CONFIG_X86_64 |
#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk |
#define ARCH_LOCKDEP_SYS_EXIT_IRQ \ |
TRACE_IRQS_ON; \ |
sti; \ |
SAVE_REST; \ |
LOCKDEP_SYS_EXIT; \ |
RESTORE_REST; \ |
cli; \ |
TRACE_IRQS_OFF; |
#else |
#define ARCH_LOCKDEP_SYS_EXIT \ |
pushl %eax; \ |
pushl %ecx; \ |
pushl %edx; \ |
call lockdep_sys_exit; \ |
popl %edx; \ |
popl %ecx; \ |
popl %eax; |
#define ARCH_LOCKDEP_SYS_EXIT_IRQ |
#endif |
#ifdef CONFIG_TRACE_IRQFLAGS |
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk; |
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; |
#else |
# define TRACE_IRQS_ON |
# define TRACE_IRQS_OFF |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT |
# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ |
# else |
# define LOCKDEP_SYS_EXIT |
# define LOCKDEP_SYS_EXIT_IRQ |
# endif |
#endif /* __ASSEMBLY__ */ |
#endif |
/drivers/include/asm/linkage.h |
---|
0,0 → 1,61 |
#ifndef _ASM_X86_LINKAGE_H |
#define _ASM_X86_LINKAGE_H |
#include <linux/stringify.h> |
#undef notrace |
#define notrace __attribute__((no_instrument_function)) |
#ifdef CONFIG_X86_32 |
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) |
/* |
* Make sure the compiler doesn't do anything stupid with the |
* arguments on the stack - they are owned by the *caller*, not |
* the callee. This just fools gcc into not spilling into them, |
* and keeps it from doing tailcall recursion and/or using the |
* stack slots for temporaries, since they are live and "used" |
* all the way to the end of the function. |
* |
* NOTE! On x86-64, all the arguments are in registers, so this |
* only matters on a 32-bit kernel. |
*/ |
#define asmlinkage_protect(n, ret, args...) \ |
__asmlinkage_protect##n(ret, ##args) |
#define __asmlinkage_protect_n(ret, args...) \ |
__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) |
#define __asmlinkage_protect0(ret) \ |
__asmlinkage_protect_n(ret) |
#define __asmlinkage_protect1(ret, arg1) \ |
__asmlinkage_protect_n(ret, "m" (arg1)) |
#define __asmlinkage_protect2(ret, arg1, arg2) \ |
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) |
#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ |
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) |
#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ |
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
"m" (arg4)) |
#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ |
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
"m" (arg4), "m" (arg5)) |
#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ |
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ |
"m" (arg4), "m" (arg5), "m" (arg6)) |
#endif /* CONFIG_X86_32 */ |
#ifdef __ASSEMBLY__ |
#define GLOBAL(name) \ |
.globl name; \ |
name: |
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16) |
#define __ALIGN .p2align 4, 0x90 |
#define __ALIGN_STR __stringify(__ALIGN) |
#endif |
#endif /* __ASSEMBLY__ */ |
#endif /* _ASM_X86_LINKAGE_H */ |
/drivers/include/asm/math_emu.h |
---|
0,0 → 1,18 |
#ifndef _ASM_X86_MATH_EMU_H |
#define _ASM_X86_MATH_EMU_H |
#include <asm/ptrace.h> |
#include <asm/vm86.h> |
/* This structure matches the layout of the data saved to the stack |
following a device-not-present interrupt, part of it saved |
automatically by the 80386/80486. |
*/ |
struct math_emu_info { |
long ___orig_eip; |
union { |
struct pt_regs *regs; |
struct kernel_vm86_regs *vm86; |
}; |
}; |
#endif /* _ASM_X86_MATH_EMU_H */ |
/drivers/include/asm/msr.h |
---|
0,0 → 1,291 |
#ifndef _ASM_X86_MSR_H |
#define _ASM_X86_MSR_H |
#include <uapi/asm/msr.h> |
#ifndef __ASSEMBLY__ |
#include <asm/asm.h> |
#include <asm/errno.h> |
#include <asm/cpumask.h> |
struct msr { |
union { |
struct { |
u32 l; |
u32 h; |
}; |
u64 q; |
}; |
}; |
struct msr_info { |
u32 msr_no; |
struct msr reg; |
struct msr *msrs; |
int err; |
}; |
struct msr_regs_info { |
u32 *regs; |
int err; |
}; |
static inline unsigned long long native_read_tscp(unsigned int *aux) |
{ |
unsigned long low, high; |
asm volatile(".byte 0x0f,0x01,0xf9" |
: "=a" (low), "=d" (high), "=c" (*aux)); |
return low | ((u64)high << 32); |
} |
/* |
* both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" |
* constraint has different meanings. For i386, "A" means exactly |
* edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, |
* it means rax *or* rdx. |
*/ |
#ifdef CONFIG_X86_64 |
#define DECLARE_ARGS(val, low, high) unsigned low, high |
#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) |
#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) |
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) |
#else |
#define DECLARE_ARGS(val, low, high) unsigned long long val |
#define EAX_EDX_VAL(val, low, high) (val) |
#define EAX_EDX_ARGS(val, low, high) "A" (val) |
#define EAX_EDX_RET(val, low, high) "=A" (val) |
#endif |
static inline unsigned long long native_read_msr(unsigned int msr) |
{ |
DECLARE_ARGS(val, low, high); |
asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); |
return EAX_EDX_VAL(val, low, high); |
} |
static inline unsigned long long native_read_msr_safe(unsigned int msr, |
int *err) |
{ |
DECLARE_ARGS(val, low, high); |
asm volatile("2: rdmsr ; xor %[err],%[err]\n" |
"1:\n\t" |
".section .fixup,\"ax\"\n\t" |
"3: mov %[fault],%[err] ; jmp 1b\n\t" |
".previous\n\t" |
_ASM_EXTABLE(2b, 3b) |
: [err] "=r" (*err), EAX_EDX_RET(val, low, high) |
: "c" (msr), [fault] "i" (-EIO)); |
return EAX_EDX_VAL(val, low, high); |
} |
static inline void native_write_msr(unsigned int msr, |
unsigned low, unsigned high) |
{ |
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); |
} |
/* Can be uninlined because referenced by paravirt */ |
notrace static inline int native_write_msr_safe(unsigned int msr, |
unsigned low, unsigned high) |
{ |
int err; |
asm volatile("2: wrmsr ; xor %[err],%[err]\n" |
"1:\n\t" |
".section .fixup,\"ax\"\n\t" |
"3: mov %[fault],%[err] ; jmp 1b\n\t" |
".previous\n\t" |
_ASM_EXTABLE(2b, 3b) |
: [err] "=a" (err) |
: "c" (msr), "0" (low), "d" (high), |
[fault] "i" (-EIO) |
: "memory"); |
return err; |
} |
extern unsigned long long native_read_tsc(void); |
extern int rdmsr_safe_regs(u32 regs[8]); |
extern int wrmsr_safe_regs(u32 regs[8]); |
static __always_inline unsigned long long __native_read_tsc(void) |
{ |
DECLARE_ARGS(val, low, high); |
asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); |
return EAX_EDX_VAL(val, low, high); |
} |
static inline unsigned long long native_read_pmc(int counter) |
{ |
DECLARE_ARGS(val, low, high); |
asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); |
return EAX_EDX_VAL(val, low, high); |
} |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt.h> |
#else |
#include <linux/errno.h> |
/* |
* Access to machine-specific registers (available on 586 and better only) |
* Note: the rd* operations modify the parameters directly (without using |
* pointer indirection), this allows gcc to optimize better |
*/ |
#define rdmsr(msr, low, high) \ |
do { \ |
u64 __val = native_read_msr((msr)); \ |
(void)((low) = (u32)__val); \ |
(void)((high) = (u32)(__val >> 32)); \ |
} while (0) |
static inline void wrmsr(unsigned msr, unsigned low, unsigned high) |
{ |
native_write_msr(msr, low, high); |
} |
#define rdmsrl(msr, val) \ |
((val) = native_read_msr((msr))) |
#define wrmsrl(msr, val) \ |
native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) |
/* wrmsr with exception handling */ |
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) |
{ |
return native_write_msr_safe(msr, low, high); |
} |
/* rdmsr with exception handling */ |
#define rdmsr_safe(msr, low, high) \ |
({ \ |
int __err; \ |
u64 __val = native_read_msr_safe((msr), &__err); \ |
(*low) = (u32)__val; \ |
(*high) = (u32)(__val >> 32); \ |
__err; \ |
}) |
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
{ |
int err; |
*p = native_read_msr_safe(msr, &err); |
return err; |
} |
#define rdtscl(low) \ |
((low) = (u32)__native_read_tsc()) |
#define rdtscll(val) \ |
((val) = __native_read_tsc()) |
#define rdpmc(counter, low, high) \ |
do { \ |
u64 _l = native_read_pmc((counter)); \ |
(low) = (u32)_l; \ |
(high) = (u32)(_l >> 32); \ |
} while (0) |
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) |
#define rdtscp(low, high, aux) \ |
do { \ |
unsigned long long _val = native_read_tscp(&(aux)); \ |
(low) = (u32)_val; \ |
(high) = (u32)(_val >> 32); \ |
} while (0) |
#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) |
#endif /* !CONFIG_PARAVIRT */ |
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ |
(u32)((val) >> 32)) |
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) |
#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) |
struct msr *msrs_alloc(void); |
void msrs_free(struct msr *msrs); |
int msr_set_bit(u32 msr, u8 bit); |
int msr_clear_bit(u32 msr, u8 bit); |
#ifdef CONFIG_SMP |
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); |
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); |
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); |
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); |
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); |
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); |
int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); |
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); |
#else /* CONFIG_SMP */ |
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) |
{ |
rdmsr(msr_no, *l, *h); |
return 0; |
} |
static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
{ |
wrmsr(msr_no, l, h); |
return 0; |
} |
static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) |
{ |
rdmsrl(msr_no, *q); |
return 0; |
} |
static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) |
{ |
wrmsrl(msr_no, q); |
return 0; |
} |
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
struct msr *msrs) |
{ |
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); |
} |
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, |
struct msr *msrs) |
{ |
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); |
} |
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, |
u32 *l, u32 *h) |
{ |
return rdmsr_safe(msr_no, l, h); |
} |
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) |
{ |
return wrmsr_safe(msr_no, l, h); |
} |
static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) |
{ |
return rdmsrl_safe(msr_no, q); |
} |
static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) |
{ |
return wrmsrl_safe(msr_no, q); |
} |
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) |
{ |
return rdmsr_safe_regs(regs); |
} |
static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) |
{ |
return wrmsr_safe_regs(regs); |
} |
#endif /* CONFIG_SMP */ |
#endif /* __ASSEMBLY__ */ |
#endif /* _ASM_X86_MSR_H */ |
/drivers/include/asm/nops.h |
---|
0,0 → 1,146 |
#ifndef _ASM_X86_NOPS_H |
#define _ASM_X86_NOPS_H |
/* |
* Define nops for use with alternative() and for tracing. |
* |
* *_NOP5_ATOMIC must be a single instruction. |
*/ |
#define NOP_DS_PREFIX 0x3e |
/* generic versions from gas |
1: nop |
the following instructions are NOT nops in 64-bit mode, |
for 64-bit mode use K8 or P6 nops instead |
2: movl %esi,%esi |
3: leal 0x00(%esi),%esi |
4: leal 0x00(,%esi,1),%esi |
6: leal 0x00000000(%esi),%esi |
7: leal 0x00000000(,%esi,1),%esi |
*/ |
#define GENERIC_NOP1 0x90 |
#define GENERIC_NOP2 0x89,0xf6 |
#define GENERIC_NOP3 0x8d,0x76,0x00 |
#define GENERIC_NOP4 0x8d,0x74,0x26,0x00 |
#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4 |
#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00 |
#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00 |
#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7 |
#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4 |
/* Opteron 64bit nops |
1: nop |
2: osp nop |
3: osp osp nop |
4: osp osp osp nop |
*/ |
#define K8_NOP1 GENERIC_NOP1 |
#define K8_NOP2 0x66,K8_NOP1 |
#define K8_NOP3 0x66,K8_NOP2 |
#define K8_NOP4 0x66,K8_NOP3 |
#define K8_NOP5 K8_NOP3,K8_NOP2 |
#define K8_NOP6 K8_NOP3,K8_NOP3 |
#define K8_NOP7 K8_NOP4,K8_NOP3 |
#define K8_NOP8 K8_NOP4,K8_NOP4 |
#define K8_NOP5_ATOMIC 0x66,K8_NOP4 |
/* K7 nops |
uses eax dependencies (arbitrary choice) |
1: nop |
2: movl %eax,%eax |
3: leal (,%eax,1),%eax |
4: leal 0x00(,%eax,1),%eax |
6: leal 0x00000000(%eax),%eax |
7: leal 0x00000000(,%eax,1),%eax |
*/ |
#define K7_NOP1 GENERIC_NOP1 |
#define K7_NOP2 0x8b,0xc0 |
#define K7_NOP3 0x8d,0x04,0x20 |
#define K7_NOP4 0x8d,0x44,0x20,0x00 |
#define K7_NOP5 K7_NOP4,K7_NOP1 |
#define K7_NOP6 0x8d,0x80,0,0,0,0 |
#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0 |
#define K7_NOP8 K7_NOP7,K7_NOP1 |
#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4 |
/* P6 nops |
uses eax dependencies (Intel-recommended choice) |
1: nop |
2: osp nop |
3: nopl (%eax) |
4: nopl 0x00(%eax) |
5: nopl 0x00(%eax,%eax,1) |
6: osp nopl 0x00(%eax,%eax,1) |
7: nopl 0x00000000(%eax) |
8: nopl 0x00000000(%eax,%eax,1) |
Note: All the above are assumed to be a single instruction. |
There is kernel code that depends on this. |
*/ |
#define P6_NOP1 GENERIC_NOP1 |
#define P6_NOP2 0x66,0x90 |
#define P6_NOP3 0x0f,0x1f,0x00 |
#define P6_NOP4 0x0f,0x1f,0x40,0 |
#define P6_NOP5 0x0f,0x1f,0x44,0x00,0 |
#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0 |
#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0 |
#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 |
#define P6_NOP5_ATOMIC P6_NOP5 |
#ifdef __ASSEMBLY__ |
#define _ASM_MK_NOP(x) .byte x |
#else |
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" |
#endif |
#if defined(CONFIG_MK7) |
#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) |
#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2) |
#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3) |
#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4) |
#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5) |
#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6) |
#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7) |
#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8) |
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC) |
#elif defined(CONFIG_X86_P6_NOP) |
#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1) |
#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2) |
#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3) |
#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4) |
#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5) |
#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6) |
#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7) |
#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8) |
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC) |
#elif defined(CONFIG_X86_64) |
#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1) |
#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2) |
#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3) |
#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4) |
#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5) |
#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6) |
#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7) |
#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8) |
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC) |
#else |
#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1) |
#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2) |
#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3) |
#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4) |
#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5) |
#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6) |
#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7) |
#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8) |
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC) |
#endif |
#define ASM_NOP_MAX 8 |
#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */ |
#ifndef __ASSEMBLY__ |
extern const unsigned char * const *ideal_nops; |
extern void arch_init_ideal_nops(void); |
#endif |
#endif /* _ASM_X86_NOPS_H */ |
/drivers/include/asm/page.h |
---|
0,0 → 1,76 |
#ifndef _ASM_X86_PAGE_H |
#define _ASM_X86_PAGE_H |
#include <linux/types.h> |
#ifdef __KERNEL__ |
#include <asm/page_types.h> |
#ifdef CONFIG_X86_64 |
#include <asm/page_64.h> |
#else |
#include <asm/page_32.h> |
#endif /* CONFIG_X86_64 */ |
#ifndef __ASSEMBLY__ |
struct page; |
#include <linux/range.h> |
extern struct range pfn_mapped[]; |
extern int nr_pfn_mapped; |
static inline void clear_user_page(void *page, unsigned long vaddr, |
struct page *pg) |
{ |
clear_page(page); |
} |
static inline void copy_user_page(void *to, void *from, unsigned long vaddr, |
struct page *topage) |
{ |
copy_page(to, from); |
} |
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) |
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
#define __pa(x) __phys_addr((unsigned long)(x)) |
#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x)) |
/* __pa_symbol should be used for C visible symbols. |
This seems to be the official gcc blessed way to do such arithmetic. */ |
/* |
* We need __phys_reloc_hide() here because gcc may assume that there is no |
* overflow during __pa() calculation and can optimize it unexpectedly. |
* Newer versions of gcc provide -fno-strict-overflow switch to handle this |
* case properly. Once all supported versions of gcc understand it, we can |
* remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated) |
*/ |
#define __pa_symbol(x) \ |
__phys_addr_symbol(__phys_reloc_hide((unsigned long)(x))) |
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
#define __boot_va(x) __va(x) |
#define __boot_pa(x) __pa(x) |
/* |
* virt_to_page(kaddr) returns a valid pointer if and only if |
* virt_addr_valid(kaddr) returns true. |
*/ |
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
extern bool __virt_addr_valid(unsigned long kaddr); |
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) |
#endif /* __ASSEMBLY__ */ |
#include <asm-generic/memory_model.h> |
#include <asm-generic/getorder.h> |
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_PAGE_H */ |
/drivers/include/asm/page_32.h |
---|
0,0 → 1,48 |
#ifndef _ASM_X86_PAGE_32_H |
#define _ASM_X86_PAGE_32_H |
#include <asm/page_32_types.h> |
#ifndef __ASSEMBLY__ |
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) |
#ifdef CONFIG_DEBUG_VIRTUAL |
extern unsigned long __phys_addr(unsigned long); |
#else |
#define __phys_addr(x) __phys_addr_nodebug(x) |
#endif |
#define __phys_addr_symbol(x) __phys_addr(x) |
#define __phys_reloc_hide(x) RELOC_HIDE((x), 0) |
#ifdef CONFIG_FLATMEM |
#define pfn_valid(pfn) ((pfn) < max_mapnr) |
#endif /* CONFIG_FLATMEM */ |
#ifdef CONFIG_X86_USE_3DNOW |
#include <asm/mmx.h> |
static inline void clear_page(void *page) |
{ |
mmx_clear_page(page); |
} |
static inline void copy_page(void *to, void *from) |
{ |
mmx_copy_page(to, from); |
} |
#else /* !CONFIG_X86_USE_3DNOW */ |
#include <linux/string.h> |
static inline void clear_page(void *page) |
{ |
memset(page, 0, PAGE_SIZE); |
} |
static inline void copy_page(void *to, void *from) |
{ |
memcpy(to, from, PAGE_SIZE); |
} |
#endif /* CONFIG_X86_3DNOW */ |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PAGE_32_H */ |
/drivers/include/asm/percpu.h |
---|
0,0 → 1,620 |
#ifndef _ASM_X86_PERCPU_H |
#define _ASM_X86_PERCPU_H |
#ifdef CONFIG_X86_64 |
#define __percpu_seg gs |
#define __percpu_mov_op movq |
#else |
#define __percpu_seg fs |
#define __percpu_mov_op movl |
#endif |
#ifdef __ASSEMBLY__ |
/* |
* PER_CPU finds an address of a per-cpu variable. |
* |
* Args: |
* var - variable name |
* reg - 32bit register |
* |
* The resulting address is stored in the "reg" argument. |
* |
* Example: |
* PER_CPU(cpu_gdt_descr, %ebx) |
*/ |
#ifdef CONFIG_SMP |
#define PER_CPU(var, reg) \ |
__percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ |
lea var(reg), reg |
#define PER_CPU_VAR(var) %__percpu_seg:var |
#else /* ! SMP */ |
#define PER_CPU(var, reg) __percpu_mov_op $var, reg |
#define PER_CPU_VAR(var) var |
#endif /* SMP */ |
#ifdef CONFIG_X86_64_SMP |
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var |
#else |
#define INIT_PER_CPU_VAR(var) var |
#endif |
#else /* ...!ASSEMBLY */ |
#include <linux/kernel.h> |
#include <linux/stringify.h> |
#ifdef CONFIG_SMP |
#define __percpu_prefix "%%"__stringify(__percpu_seg)":" |
#define __my_cpu_offset this_cpu_read(this_cpu_off) |
/* |
* Compared to the generic __my_cpu_offset version, the following |
* saves one instruction and avoids clobbering a temp register. |
*/ |
#define arch_raw_cpu_ptr(ptr) \ |
({ \ |
unsigned long tcp_ptr__; \ |
asm volatile("add " __percpu_arg(1) ", %0" \ |
: "=r" (tcp_ptr__) \ |
: "m" (this_cpu_off), "0" (ptr)); \ |
(typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ |
}) |
#else |
#define __percpu_prefix "" |
#endif |
#define __percpu_arg(x) __percpu_prefix "%" #x |
/* |
* Initialized pointers to per-cpu variables needed for the boot |
* processor need to use these macros to get the proper address |
* offset from __per_cpu_load on SMP. |
* |
* There also must be an entry in vmlinux_64.lds.S |
*/ |
#define DECLARE_INIT_PER_CPU(var) \ |
extern typeof(var) init_per_cpu_var(var) |
#ifdef CONFIG_X86_64_SMP |
#define init_per_cpu_var(var) init_per_cpu__##var |
#else |
#define init_per_cpu_var(var) var |
#endif |
/* For arch-specific code, we can use direct single-insn ops (they |
* don't give an lvalue though). */ |
extern void __bad_percpu_size(void); |
#define percpu_to_op(op, var, val) \ |
do { \ |
typedef typeof(var) pto_T__; \ |
if (0) { \ |
pto_T__ pto_tmp__; \ |
pto_tmp__ = (val); \ |
(void)pto_tmp__; \ |
} \ |
switch (sizeof(var)) { \ |
case 1: \ |
asm(op "b %1,"__percpu_arg(0) \ |
: "+m" (var) \ |
: "qi" ((pto_T__)(val))); \ |
break; \ |
case 2: \ |
asm(op "w %1,"__percpu_arg(0) \ |
: "+m" (var) \ |
: "ri" ((pto_T__)(val))); \ |
break; \ |
case 4: \ |
asm(op "l %1,"__percpu_arg(0) \ |
: "+m" (var) \ |
: "ri" ((pto_T__)(val))); \ |
break; \ |
case 8: \ |
asm(op "q %1,"__percpu_arg(0) \ |
: "+m" (var) \ |
: "re" ((pto_T__)(val))); \ |
break; \ |
default: __bad_percpu_size(); \ |
} \ |
} while (0) |
/* |
* Generate a percpu add to memory instruction and optimize code |
* if one is added or subtracted. |
*/ |
#define percpu_add_op(var, val) \ |
do { \ |
typedef typeof(var) pao_T__; \ |
const int pao_ID__ = (__builtin_constant_p(val) && \ |
((val) == 1 || (val) == -1)) ? \ |
(int)(val) : 0; \ |
if (0) { \ |
pao_T__ pao_tmp__; \ |
pao_tmp__ = (val); \ |
(void)pao_tmp__; \ |
} \ |
switch (sizeof(var)) { \ |
case 1: \ |
if (pao_ID__ == 1) \ |
asm("incb "__percpu_arg(0) : "+m" (var)); \ |
else if (pao_ID__ == -1) \ |
asm("decb "__percpu_arg(0) : "+m" (var)); \ |
else \ |
asm("addb %1, "__percpu_arg(0) \ |
: "+m" (var) \ |
: "qi" ((pao_T__)(val))); \ |
break; \ |
case 2: \ |
if (pao_ID__ == 1) \ |
asm("incw "__percpu_arg(0) : "+m" (var)); \ |
else if (pao_ID__ == -1) \ |
asm("decw "__percpu_arg(0) : "+m" (var)); \ |
else \ |
asm("addw %1, "__percpu_arg(0) \ |
: "+m" (var) \ |
: "ri" ((pao_T__)(val))); \ |
break; \ |
case 4: \ |
if (pao_ID__ == 1) \ |
asm("incl "__percpu_arg(0) : "+m" (var)); \ |
else if (pao_ID__ == -1) \ |
asm("decl "__percpu_arg(0) : "+m" (var)); \ |
else \ |
asm("addl %1, "__percpu_arg(0) \ |
: "+m" (var) \ |
: "ri" ((pao_T__)(val))); \ |
break; \ |
case 8: \ |
if (pao_ID__ == 1) \ |
asm("incq "__percpu_arg(0) : "+m" (var)); \ |
else if (pao_ID__ == -1) \ |
asm("decq "__percpu_arg(0) : "+m" (var)); \ |
else \ |
asm("addq %1, "__percpu_arg(0) \ |
: "+m" (var) \ |
: "re" ((pao_T__)(val))); \ |
break; \ |
default: __bad_percpu_size(); \ |
} \ |
} while (0) |
#define percpu_from_op(op, var) \ |
({ \ |
typeof(var) pfo_ret__; \ |
switch (sizeof(var)) { \ |
case 1: \ |
asm(op "b "__percpu_arg(1)",%0" \ |
: "=q" (pfo_ret__) \ |
: "m" (var)); \ |
break; \ |
case 2: \ |
asm(op "w "__percpu_arg(1)",%0" \ |
: "=r" (pfo_ret__) \ |
: "m" (var)); \ |
break; \ |
case 4: \ |
asm(op "l "__percpu_arg(1)",%0" \ |
: "=r" (pfo_ret__) \ |
: "m" (var)); \ |
break; \ |
case 8: \ |
asm(op "q "__percpu_arg(1)",%0" \ |
: "=r" (pfo_ret__) \ |
: "m" (var)); \ |
break; \ |
default: __bad_percpu_size(); \ |
} \ |
pfo_ret__; \ |
}) |
#define percpu_stable_op(op, var) \ |
({ \ |
typeof(var) pfo_ret__; \ |
switch (sizeof(var)) { \ |
case 1: \ |
asm(op "b "__percpu_arg(P1)",%0" \ |
: "=q" (pfo_ret__) \ |
: "p" (&(var))); \ |
break; \ |
case 2: \ |
asm(op "w "__percpu_arg(P1)",%0" \ |
: "=r" (pfo_ret__) \ |
: "p" (&(var))); \ |
break; \ |
case 4: \ |
asm(op "l "__percpu_arg(P1)",%0" \ |
: "=r" (pfo_ret__) \ |
: "p" (&(var))); \ |
break; \ |
case 8: \ |
asm(op "q "__percpu_arg(P1)",%0" \ |
: "=r" (pfo_ret__) \ |
: "p" (&(var))); \ |
break; \ |
default: __bad_percpu_size(); \ |
} \ |
pfo_ret__; \ |
}) |
#define percpu_unary_op(op, var) \ |
({ \ |
switch (sizeof(var)) { \ |
case 1: \ |
asm(op "b "__percpu_arg(0) \ |
: "+m" (var)); \ |
break; \ |
case 2: \ |
asm(op "w "__percpu_arg(0) \ |
: "+m" (var)); \ |
break; \ |
case 4: \ |
asm(op "l "__percpu_arg(0) \ |
: "+m" (var)); \ |
break; \ |
case 8: \ |
asm(op "q "__percpu_arg(0) \ |
: "+m" (var)); \ |
break; \ |
default: __bad_percpu_size(); \ |
} \ |
}) |
/* |
* Add return operation |
*/ |
#define percpu_add_return_op(var, val) \ |
({ \ |
typeof(var) paro_ret__ = val; \ |
switch (sizeof(var)) { \ |
case 1: \ |
asm("xaddb %0, "__percpu_arg(1) \ |
: "+q" (paro_ret__), "+m" (var) \ |
: : "memory"); \ |
break; \ |
case 2: \ |
asm("xaddw %0, "__percpu_arg(1) \ |
: "+r" (paro_ret__), "+m" (var) \ |
: : "memory"); \ |
break; \ |
case 4: \ |
asm("xaddl %0, "__percpu_arg(1) \ |
: "+r" (paro_ret__), "+m" (var) \ |
: : "memory"); \ |
break; \ |
case 8: \ |
asm("xaddq %0, "__percpu_arg(1) \ |
: "+re" (paro_ret__), "+m" (var) \ |
: : "memory"); \ |
break; \ |
default: __bad_percpu_size(); \ |
} \ |
paro_ret__ += val; \ |
paro_ret__; \ |
}) |
/* |
* xchg is implemented using cmpxchg without a lock prefix. xchg is |
* expensive due to the implied lock prefix. The processor cannot prefetch |
* cachelines if xchg is used. |
*/ |
#define percpu_xchg_op(var, nval) \ |
({ \ |
typeof(var) pxo_ret__; \ |
typeof(var) pxo_new__ = (nval); \ |
switch (sizeof(var)) { \ |
case 1: \ |
asm("\n\tmov "__percpu_arg(1)",%%al" \ |
"\n1:\tcmpxchgb %2, "__percpu_arg(1) \ |
"\n\tjnz 1b" \ |
: "=&a" (pxo_ret__), "+m" (var) \ |
: "q" (pxo_new__) \ |
: "memory"); \ |
break; \ |
case 2: \ |
asm("\n\tmov "__percpu_arg(1)",%%ax" \ |
"\n1:\tcmpxchgw %2, "__percpu_arg(1) \ |
"\n\tjnz 1b" \ |
: "=&a" (pxo_ret__), "+m" (var) \ |
: "r" (pxo_new__) \ |
: "memory"); \ |
break; \ |
case 4: \ |
asm("\n\tmov "__percpu_arg(1)",%%eax" \ |
"\n1:\tcmpxchgl %2, "__percpu_arg(1) \ |
"\n\tjnz 1b" \ |
: "=&a" (pxo_ret__), "+m" (var) \ |
: "r" (pxo_new__) \ |
: "memory"); \ |
break; \ |
case 8: \ |
asm("\n\tmov "__percpu_arg(1)",%%rax" \ |
"\n1:\tcmpxchgq %2, "__percpu_arg(1) \ |
"\n\tjnz 1b" \ |
: "=&a" (pxo_ret__), "+m" (var) \ |
: "r" (pxo_new__) \ |
: "memory"); \ |
break; \ |
default: __bad_percpu_size(); \ |
} \ |
pxo_ret__; \ |
}) |
/* |
* cmpxchg has no such implied lock semantics as a result it is much |
* more efficient for cpu local operations. |
*/ |
#define percpu_cmpxchg_op(var, oval, nval) \ |
({ \ |
typeof(var) pco_ret__; \ |
typeof(var) pco_old__ = (oval); \ |
typeof(var) pco_new__ = (nval); \ |
switch (sizeof(var)) { \ |
case 1: \ |
asm("cmpxchgb %2, "__percpu_arg(1) \ |
: "=a" (pco_ret__), "+m" (var) \ |
: "q" (pco_new__), "0" (pco_old__) \ |
: "memory"); \ |
break; \ |
case 2: \ |
asm("cmpxchgw %2, "__percpu_arg(1) \ |
: "=a" (pco_ret__), "+m" (var) \ |
: "r" (pco_new__), "0" (pco_old__) \ |
: "memory"); \ |
break; \ |
case 4: \ |
asm("cmpxchgl %2, "__percpu_arg(1) \ |
: "=a" (pco_ret__), "+m" (var) \ |
: "r" (pco_new__), "0" (pco_old__) \ |
: "memory"); \ |
break; \ |
case 8: \ |
asm("cmpxchgq %2, "__percpu_arg(1) \ |
: "=a" (pco_ret__), "+m" (var) \ |
: "r" (pco_new__), "0" (pco_old__) \ |
: "memory"); \ |
break; \ |
default: __bad_percpu_size(); \ |
} \ |
pco_ret__; \ |
}) |
/* |
* this_cpu_read() makes gcc load the percpu variable every time it is |
* accessed while this_cpu_read_stable() allows the value to be cached. |
* this_cpu_read_stable() is more efficient and can be used if its value |
* is guaranteed to be valid across cpus. The current users include |
* get_current() and get_thread_info() both of which are actually |
* per-thread variables implemented as per-cpu variables and thus |
* stable for the duration of the respective task. |
*/ |
#define this_cpu_read_stable(var) percpu_stable_op("mov", var) |
#define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp) |
#define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp) |
#define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp) |
#define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) |
#define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) |
#define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) |
#define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
#define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
#define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val) |
#define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
#define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) |
#define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) |
#define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) |
#define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) |
#define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) |
#define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) |
#define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) |
#define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) |
#define this_cpu_read_1(pcp) percpu_from_op("mov", pcp) |
#define this_cpu_read_2(pcp) percpu_from_op("mov", pcp) |
#define this_cpu_read_4(pcp) percpu_from_op("mov", pcp) |
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) |
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) |
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) |
#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) |
#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) |
#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) |
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) |
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) |
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) |
#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) |
#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) |
#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) |
#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) |
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) |
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) |
#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) |
#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) |
#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) |
#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) |
#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) |
#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) |
#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
#ifdef CONFIG_X86_CMPXCHG64 |
#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \ |
({ \ |
bool __ret; \ |
typeof(pcp1) __o1 = (o1), __n1 = (n1); \ |
typeof(pcp2) __o2 = (o2), __n2 = (n2); \ |
asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \ |
: "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \ |
: "b" (__n1), "c" (__n2), "a" (__o1)); \ |
__ret; \ |
}) |
#define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double |
#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double |
#endif /* CONFIG_X86_CMPXCHG64 */ |
/* |
* Per cpu atomic 64 bit operations are only available under 64 bit. |
* 32 bit must fall back to generic operations. |
*/ |
#ifdef CONFIG_X86_64 |
#define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp) |
#define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) |
#define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
#define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
#define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) |
#define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) |
#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
#define this_cpu_read_8(pcp) percpu_from_op("mov", pcp) |
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) |
#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) |
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) |
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) |
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) |
#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) |
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) |
/* |
* Pretty complex macro to generate cmpxchg16 instruction. The instruction |
* is not supported on early AMD64 processors so we must be able to emulate |
* it in software. The address used in the cmpxchg16 instruction must be |
* aligned to a 16 byte boundary. |
*/ |
#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \ |
({ \ |
bool __ret; \ |
typeof(pcp1) __o1 = (o1), __n1 = (n1); \ |
typeof(pcp2) __o2 = (o2), __n2 = (n2); \ |
alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \ |
"cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \ |
X86_FEATURE_CX16, \ |
ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \ |
"+m" (pcp2), "+d" (__o2)), \ |
"b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \ |
__ret; \ |
}) |
#define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double |
#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double |
#endif |
/* This is not atomic against other CPUs -- CPU preemption needs to be off */ |
#define x86_test_and_clear_bit_percpu(bit, var) \ |
({ \ |
int old__; \ |
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ |
: "=r" (old__), "+m" (var) \ |
: "dIr" (bit)); \ |
old__; \ |
}) |
static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, |
const unsigned long __percpu *addr) |
{ |
unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; |
#ifdef CONFIG_X86_64 |
return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0; |
#else |
return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0; |
#endif |
} |
static inline int x86_this_cpu_variable_test_bit(int nr, |
const unsigned long __percpu *addr) |
{ |
int oldbit; |
asm volatile("bt "__percpu_arg(2)",%1\n\t" |
"sbb %0,%0" |
: "=r" (oldbit) |
: "m" (*(unsigned long *)addr), "Ir" (nr)); |
return oldbit; |
} |
#define x86_this_cpu_test_bit(nr, addr) \ |
(__builtin_constant_p((nr)) \ |
? x86_this_cpu_constant_test_bit((nr), (addr)) \ |
: x86_this_cpu_variable_test_bit((nr), (addr))) |
#include <asm-generic/percpu.h> |
/* We can use this directly for local CPU (faster). */ |
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); |
#endif /* !__ASSEMBLY__ */ |
#ifdef CONFIG_SMP |
/* |
* Define the "EARLY_PER_CPU" macros. These are used for some per_cpu |
* variables that are initialized and accessed before there are per_cpu |
* areas allocated. |
*/ |
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ |
DEFINE_PER_CPU(_type, _name) = _initvalue; \ |
__typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ |
{ [0 ... NR_CPUS-1] = _initvalue }; \ |
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ |
__typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ |
{ [0 ... NR_CPUS-1] = _initvalue }; \ |
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
EXPORT_PER_CPU_SYMBOL(_name) |
#define DECLARE_EARLY_PER_CPU(_type, _name) \ |
DECLARE_PER_CPU(_type, _name); \ |
extern __typeof__(_type) *_name##_early_ptr; \ |
extern __typeof__(_type) _name##_early_map[] |
#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ |
extern __typeof__(_type) *_name##_early_ptr; \ |
extern __typeof__(_type) _name##_early_map[] |
#define early_per_cpu_ptr(_name) (_name##_early_ptr) |
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) |
#define early_per_cpu(_name, _cpu) \ |
*(early_per_cpu_ptr(_name) ? \ |
&early_per_cpu_ptr(_name)[_cpu] : \ |
&per_cpu(_name, _cpu)) |
#else /* !CONFIG_SMP */ |
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ |
DEFINE_PER_CPU(_type, _name) = _initvalue |
#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue |
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
EXPORT_PER_CPU_SYMBOL(_name) |
#define DECLARE_EARLY_PER_CPU(_type, _name) \ |
DECLARE_PER_CPU(_type, _name) |
#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
DECLARE_PER_CPU_READ_MOSTLY(_type, _name) |
#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) |
#define early_per_cpu_ptr(_name) NULL |
/* no early_per_cpu_map() */ |
#endif /* !CONFIG_SMP */ |
#endif /* _ASM_X86_PERCPU_H */ |
/drivers/include/asm/pgtable-2level.h |
---|
0,0 → 1,116 |
#ifndef _ASM_X86_PGTABLE_2LEVEL_H |
#define _ASM_X86_PGTABLE_2LEVEL_H |
#define pte_ERROR(e) \ |
pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low) |
#define pgd_ERROR(e) \ |
pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e)) |
/* |
* Certain architectures need to do special things when PTEs |
* within a page table are directly modified. Thus, the following |
* hook is made available. |
*/ |
static inline void native_set_pte(pte_t *ptep , pte_t pte) |
{ |
*ptep = pte; |
} |
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
{ |
*pmdp = pmd; |
} |
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
{ |
native_set_pte(ptep, pte); |
} |
static inline void native_pmd_clear(pmd_t *pmdp) |
{ |
native_set_pmd(pmdp, __pmd(0)); |
} |
static inline void native_pte_clear(struct mm_struct *mm, |
unsigned long addr, pte_t *xp) |
{ |
*xp = native_make_pte(0); |
} |
#ifdef CONFIG_SMP |
static inline pte_t native_ptep_get_and_clear(pte_t *xp) |
{ |
return __pte(xchg(&xp->pte_low, 0)); |
} |
#else |
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) |
#endif |
#ifdef CONFIG_SMP |
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) |
{ |
return __pmd(xchg((pmdval_t *)xp, 0)); |
} |
#else |
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
#endif |
/* Bit manipulation helper on pte/pgoff entry */ |
static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift, |
unsigned long mask, unsigned int leftshift) |
{ |
return ((value >> rightshift) & mask) << leftshift; |
} |
/* |
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, |
* split up the 29 bits of offset into this range. |
*/ |
#define PTE_FILE_MAX_BITS 29 |
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) |
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) |
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) |
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) |
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) |
#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) |
#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) |
#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) |
#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) |
static __always_inline pgoff_t pte_to_pgoff(pte_t pte) |
{ |
return (pgoff_t) |
(pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + |
pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + |
pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3)); |
} |
static __always_inline pte_t pgoff_to_pte(pgoff_t off) |
{ |
return (pte_t){ |
.pte_low = |
pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + |
pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + |
pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) + |
_PAGE_FILE, |
}; |
} |
/* Encode and de-code a swap entry */ |
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) |
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) |
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) |
#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \ |
& ((1U << SWP_TYPE_BITS) - 1)) |
#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT) |
#define __swp_entry(type, offset) ((swp_entry_t) { \ |
((type) << (_PAGE_BIT_PRESENT + 1)) \ |
| ((offset) << SWP_OFFSET_SHIFT) }) |
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) |
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */ |
/drivers/include/asm/pgtable-2level_types.h |
---|
0,0 → 1,37 |
#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H |
#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
typedef unsigned long pteval_t; |
typedef unsigned long pmdval_t; |
typedef unsigned long pudval_t; |
typedef unsigned long pgdval_t; |
typedef unsigned long pgprotval_t; |
typedef union { |
pteval_t pte; |
pteval_t pte_low; |
} pte_t; |
#endif /* !__ASSEMBLY__ */ |
#define SHARED_KERNEL_PMD 0 |
#define PAGETABLE_LEVELS 2 |
/* |
* traditional i386 two-level paging structure: |
*/ |
#define PGDIR_SHIFT 22 |
#define PTRS_PER_PGD 1024 |
/* |
* the i386 is two-level, so we don't really have any |
* PMD directory physically. |
*/ |
#define PTRS_PER_PTE 1024 |
#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */ |
/drivers/include/asm/pgtable.h |
---|
0,0 → 1,905 |
#ifndef _ASM_X86_PGTABLE_H |
#define _ASM_X86_PGTABLE_H |
#include <asm/page.h> |
#include <asm/e820.h> |
#include <asm/pgtable_types.h> |
/* |
* Macro to mark a page protection value as UC- |
*/ |
#define pgprot_noncached(prot) \ |
((boot_cpu_data.x86 > 3) \ |
? (__pgprot(pgprot_val(prot) | \ |
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ |
: (prot)) |
#ifndef __ASSEMBLY__ |
#include <asm/x86_init.h> |
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); |
/* |
* ZERO_PAGE is a global shared page that is always zero: used |
* for zero-mapped memory areas etc.. |
*/ |
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] |
__visible; |
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
extern spinlock_t pgd_lock; |
extern struct list_head pgd_list; |
extern struct mm_struct *pgd_page_get_mm(struct page *page); |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt.h> |
#else /* !CONFIG_PARAVIRT */ |
#define set_pte(ptep, pte) native_set_pte(ptep, pte) |
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) |
#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) |
#define set_pte_atomic(ptep, pte) \ |
native_set_pte_atomic(ptep, pte) |
#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) |
#ifndef __PAGETABLE_PUD_FOLDED |
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) |
#define pgd_clear(pgd) native_pgd_clear(pgd) |
#endif |
#ifndef set_pud |
# define set_pud(pudp, pud) native_set_pud(pudp, pud) |
#endif |
#ifndef __PAGETABLE_PMD_FOLDED |
#define pud_clear(pud) native_pud_clear(pud) |
#endif |
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) |
#define pmd_clear(pmd) native_pmd_clear(pmd) |
#define pte_update(mm, addr, ptep) do { } while (0) |
#define pte_update_defer(mm, addr, ptep) do { } while (0) |
#define pmd_update(mm, addr, ptep) do { } while (0) |
#define pmd_update_defer(mm, addr, ptep) do { } while (0) |
#define pgd_val(x) native_pgd_val(x) |
#define __pgd(x) native_make_pgd(x) |
#ifndef __PAGETABLE_PUD_FOLDED |
#define pud_val(x) native_pud_val(x) |
#define __pud(x) native_make_pud(x) |
#endif |
#ifndef __PAGETABLE_PMD_FOLDED |
#define pmd_val(x) native_pmd_val(x) |
#define __pmd(x) native_make_pmd(x) |
#endif |
#define pte_val(x) native_pte_val(x) |
#define __pte(x) native_make_pte(x) |
#define arch_end_context_switch(prev) do {} while(0) |
#endif /* CONFIG_PARAVIRT */ |
/* |
* The following only work if pte_present() is true. |
* Undefined behaviour if not.. |
*/ |
static inline int pte_dirty(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_DIRTY; |
} |
static inline int pte_young(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_ACCESSED; |
} |
static inline int pmd_dirty(pmd_t pmd) |
{ |
return pmd_flags(pmd) & _PAGE_DIRTY; |
} |
static inline int pmd_young(pmd_t pmd) |
{ |
return pmd_flags(pmd) & _PAGE_ACCESSED; |
} |
static inline int pte_write(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_RW; |
} |
static inline int pte_file(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_FILE; |
} |
static inline int pte_huge(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_PSE; |
} |
static inline int pte_global(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_GLOBAL; |
} |
static inline int pte_exec(pte_t pte) |
{ |
return !(pte_flags(pte) & _PAGE_NX); |
} |
static inline int pte_special(pte_t pte) |
{ |
/* |
* See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h. |
* On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 == |
* __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL. |
*/ |
return (pte_flags(pte) & _PAGE_SPECIAL) && |
(pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE)); |
} |
static inline unsigned long pte_pfn(pte_t pte) |
{ |
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; |
} |
static inline unsigned long pmd_pfn(pmd_t pmd) |
{ |
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; |
} |
static inline unsigned long pud_pfn(pud_t pud) |
{ |
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; |
} |
#define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
static inline int pmd_large(pmd_t pte) |
{ |
return pmd_flags(pte) & _PAGE_PSE; |
} |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
static inline int pmd_trans_splitting(pmd_t pmd) |
{ |
return pmd_val(pmd) & _PAGE_SPLITTING; |
} |
static inline int pmd_trans_huge(pmd_t pmd) |
{ |
return pmd_val(pmd) & _PAGE_PSE; |
} |
static inline int has_transparent_hugepage(void) |
{ |
return cpu_has_pse; |
} |
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
static inline pte_t pte_set_flags(pte_t pte, pteval_t set) |
{ |
pteval_t v = native_pte_val(pte); |
return native_make_pte(v | set); |
} |
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) |
{ |
pteval_t v = native_pte_val(pte); |
return native_make_pte(v & ~clear); |
} |
static inline pte_t pte_mkclean(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_DIRTY); |
} |
static inline pte_t pte_mkold(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_ACCESSED); |
} |
static inline pte_t pte_wrprotect(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_RW); |
} |
static inline pte_t pte_mkexec(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_NX); |
} |
static inline pte_t pte_mkdirty(pte_t pte) |
{ |
return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
} |
static inline pte_t pte_mkyoung(pte_t pte) |
{ |
return pte_set_flags(pte, _PAGE_ACCESSED); |
} |
static inline pte_t pte_mkwrite(pte_t pte) |
{ |
return pte_set_flags(pte, _PAGE_RW); |
} |
static inline pte_t pte_mkhuge(pte_t pte) |
{ |
return pte_set_flags(pte, _PAGE_PSE); |
} |
static inline pte_t pte_clrhuge(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_PSE); |
} |
static inline pte_t pte_mkglobal(pte_t pte) |
{ |
return pte_set_flags(pte, _PAGE_GLOBAL); |
} |
static inline pte_t pte_clrglobal(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_GLOBAL); |
} |
static inline pte_t pte_mkspecial(pte_t pte) |
{ |
return pte_set_flags(pte, _PAGE_SPECIAL); |
} |
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) |
{ |
pmdval_t v = native_pmd_val(pmd); |
return __pmd(v | set); |
} |
static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) |
{ |
pmdval_t v = native_pmd_val(pmd); |
return __pmd(v & ~clear); |
} |
static inline pmd_t pmd_mkold(pmd_t pmd) |
{ |
return pmd_clear_flags(pmd, _PAGE_ACCESSED); |
} |
static inline pmd_t pmd_wrprotect(pmd_t pmd) |
{ |
return pmd_clear_flags(pmd, _PAGE_RW); |
} |
static inline pmd_t pmd_mkdirty(pmd_t pmd) |
{ |
return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
} |
static inline pmd_t pmd_mkhuge(pmd_t pmd) |
{ |
return pmd_set_flags(pmd, _PAGE_PSE); |
} |
static inline pmd_t pmd_mkyoung(pmd_t pmd) |
{ |
return pmd_set_flags(pmd, _PAGE_ACCESSED); |
} |
static inline pmd_t pmd_mkwrite(pmd_t pmd) |
{ |
return pmd_set_flags(pmd, _PAGE_RW); |
} |
static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
{ |
return pmd_clear_flags(pmd, _PAGE_PRESENT); |
} |
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
static inline int pte_soft_dirty(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_SOFT_DIRTY; |
} |
static inline int pmd_soft_dirty(pmd_t pmd) |
{ |
return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; |
} |
static inline pte_t pte_mksoft_dirty(pte_t pte) |
{ |
return pte_set_flags(pte, _PAGE_SOFT_DIRTY); |
} |
static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) |
{ |
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); |
} |
static inline pte_t pte_file_clear_soft_dirty(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); |
} |
static inline pte_t pte_file_mksoft_dirty(pte_t pte) |
{ |
return pte_set_flags(pte, _PAGE_SOFT_DIRTY); |
} |
static inline int pte_file_soft_dirty(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_SOFT_DIRTY; |
} |
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
/* |
* Mask out unsupported bits in a present pgprot. Non-present pgprots |
* can use those bits for other purposes, so leave them be. |
*/ |
static inline pgprotval_t massage_pgprot(pgprot_t pgprot) |
{ |
pgprotval_t protval = pgprot_val(pgprot); |
if (protval & _PAGE_PRESENT) |
protval &= __supported_pte_mask; |
return protval; |
} |
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) |
{ |
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | |
massage_pgprot(pgprot)); |
} |
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) |
{ |
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | |
massage_pgprot(pgprot)); |
} |
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
{ |
pteval_t val = pte_val(pte); |
/* |
* Chop off the NX bit (if present), and add the NX portion of |
* the newprot (if present): |
*/ |
val &= _PAGE_CHG_MASK; |
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; |
return __pte(val); |
} |
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
{ |
pmdval_t val = pmd_val(pmd); |
val &= _HPAGE_CHG_MASK; |
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; |
return __pmd(val); |
} |
/* mprotect needs to preserve PAT bits when updating vm_page_prot */ |
#define pgprot_modify pgprot_modify |
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) |
{ |
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; |
pgprotval_t addbits = pgprot_val(newprot); |
return __pgprot(preservebits | addbits); |
} |
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) |
#define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
enum page_cache_mode pcm, |
enum page_cache_mode new_pcm) |
{ |
/* |
* PAT type is always WB for untracked ranges, so no need to check. |
*/ |
if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) |
return 1; |
/* |
* Certain new memtypes are not allowed with certain |
* requested memtype: |
* - request is uncached, return cannot be write-back |
* - request is write-combine, return cannot be write-back |
*/ |
if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && |
new_pcm == _PAGE_CACHE_MODE_WB) || |
(pcm == _PAGE_CACHE_MODE_WC && |
new_pcm == _PAGE_CACHE_MODE_WB)) { |
return 0; |
} |
return 1; |
} |
pmd_t *populate_extra_pmd(unsigned long vaddr); |
pte_t *populate_extra_pte(unsigned long vaddr); |
#endif /* __ASSEMBLY__ */ |
#ifdef CONFIG_X86_32 |
# include <asm/pgtable_32.h> |
#else |
# include <asm/pgtable_64.h> |
#endif |
#ifndef __ASSEMBLY__ |
//#include <linux/mm_types.h> |
#include <linux/mmdebug.h> |
#include <linux/log2.h> |
static inline int pte_none(pte_t pte) |
{ |
return !pte.pte; |
} |
#define __HAVE_ARCH_PTE_SAME |
static inline int pte_same(pte_t a, pte_t b) |
{ |
return a.pte == b.pte; |
} |
static inline int pte_present(pte_t a) |
{ |
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | |
_PAGE_NUMA); |
} |
#define pte_present_nonuma pte_present_nonuma |
static inline int pte_present_nonuma(pte_t a) |
{ |
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); |
} |
#define pte_accessible pte_accessible |
static inline bool pte_accessible(struct mm_struct *mm, pte_t a) |
{ |
if (pte_flags(a) & _PAGE_PRESENT) |
return true; |
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && |
mm_tlb_flush_pending(mm)) |
return true; |
return false; |
} |
static inline int pte_hidden(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_HIDDEN; |
} |
static inline int pmd_present(pmd_t pmd) |
{ |
/* |
* Checking for _PAGE_PSE is needed too because |
* split_huge_page will temporarily clear the present bit (but |
* the _PAGE_PSE flag will remain set at all times while the |
* _PAGE_PRESENT bit is clear). |
*/ |
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE | |
_PAGE_NUMA); |
} |
static inline int pmd_none(pmd_t pmd) |
{ |
/* Only check low word on 32-bit platforms, since it might be |
out of sync with upper half. */ |
return (unsigned long)native_pmd_val(pmd) == 0; |
} |
static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
{ |
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); |
} |
/* |
* Currently stuck as a macro due to indirect forward reference to |
* linux/mmzone.h's __section_mem_map_addr() definition: |
*/ |
#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) |
/* |
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] |
* |
* this macro returns the index of the entry in the pmd page which would |
* control the given virtual address |
*/ |
static inline unsigned long pmd_index(unsigned long address) |
{ |
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); |
} |
/* |
* Conversion functions: convert a page and protection to a page entry, |
* and a page entry and page directory to the page they refer to. |
* |
* (Currently stuck as a macro because of indirect forward reference |
* to linux/mm.h:page_to_nid()) |
*/ |
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
/* |
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
* |
* this function returns the index of the entry in the pte page which would |
* control the given virtual address |
*/ |
static inline unsigned long pte_index(unsigned long address) |
{ |
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
} |
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) |
{ |
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); |
} |
static inline int pmd_bad(pmd_t pmd) |
{ |
#ifdef CONFIG_NUMA_BALANCING |
/* pmd_numa check */ |
if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA) |
return 0; |
#endif |
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; |
} |
static inline unsigned long pages_to_mb(unsigned long npg) |
{ |
return npg >> (20 - PAGE_SHIFT); |
} |
#if PAGETABLE_LEVELS > 2 |
static inline int pud_none(pud_t pud) |
{ |
return native_pud_val(pud) == 0; |
} |
static inline int pud_present(pud_t pud) |
{ |
return pud_flags(pud) & _PAGE_PRESENT; |
} |
static inline unsigned long pud_page_vaddr(pud_t pud) |
{ |
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); |
} |
/* |
* Currently stuck as a macro due to indirect forward reference to |
* linux/mmzone.h's __section_mem_map_addr() definition: |
*/ |
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) |
/* Find an entry in the second-level page table.. */ |
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
{ |
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); |
} |
static inline int pud_large(pud_t pud) |
{ |
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == |
(_PAGE_PSE | _PAGE_PRESENT); |
} |
static inline int pud_bad(pud_t pud) |
{ |
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; |
} |
#else |
static inline int pud_large(pud_t pud) |
{ |
return 0; |
} |
#endif /* PAGETABLE_LEVELS > 2 */ |
#if PAGETABLE_LEVELS > 3 |
static inline int pgd_present(pgd_t pgd) |
{ |
return pgd_flags(pgd) & _PAGE_PRESENT; |
} |
static inline unsigned long pgd_page_vaddr(pgd_t pgd) |
{ |
return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); |
} |
/* |
* Currently stuck as a macro due to indirect forward reference to |
* linux/mmzone.h's __section_mem_map_addr() definition: |
*/ |
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) |
/* to find an entry in a page-table-directory. */ |
static inline unsigned long pud_index(unsigned long address) |
{ |
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); |
} |
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) |
{ |
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); |
} |
static inline int pgd_bad(pgd_t pgd) |
{ |
return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; |
} |
static inline int pgd_none(pgd_t pgd) |
{ |
return !native_pgd_val(pgd); |
} |
#endif /* PAGETABLE_LEVELS > 3 */ |
#endif /* __ASSEMBLY__ */ |
/* |
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] |
* |
* this macro returns the index of the entry in the pgd page which would |
* control the given virtual address |
*/ |
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
/* |
* pgd_offset() returns a (pgd_t *) |
* pgd_index() is used get the offset into the pgd page's array of pgd_t's; |
*/ |
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) |
/* |
* a shortcut which implies the use of the kernel's pgd, instead |
* of a process's |
*/ |
#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) |
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) |
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) |
#ifndef __ASSEMBLY__ |
extern int direct_gbpages; |
void init_mem_mapping(void); |
void early_alloc_pgt_buf(void); |
/* local pte updates need not use xchg for locking */ |
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) |
{ |
pte_t res = *ptep; |
/* Pure native function needs no input for mm, addr */ |
native_pte_clear(NULL, 0, ptep); |
return res; |
} |
static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) |
{ |
pmd_t res = *pmdp; |
native_pmd_clear(pmdp); |
return res; |
} |
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, |
pte_t *ptep , pte_t pte) |
{ |
native_set_pte(ptep, pte); |
} |
static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, |
pmd_t *pmdp , pmd_t pmd) |
{ |
native_set_pmd(pmdp, pmd); |
} |
#ifndef CONFIG_PARAVIRT |
/* |
* Rules for using pte_update - it must be called after any PTE update which |
* has not been done using the set_pte / clear_pte interfaces. It is used by |
* shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE |
* updates should either be sets, clears, or set_pte_atomic for P->P |
* transitions, which means this hook should only be called for user PTEs. |
* This hook implies a P->P protection or access change has taken place, which |
* requires a subsequent TLB flush. The notification can optionally be delayed |
* until the TLB flush event by using the pte_update_defer form of the |
* interface, but care must be taken to assure that the flush happens while |
* still holding the same page table lock so that the shadow and primary pages |
* do not become out of sync on SMP. |
*/ |
#define pte_update(mm, addr, ptep) do { } while (0) |
#define pte_update_defer(mm, addr, ptep) do { } while (0) |
#endif |
/* |
* We only update the dirty/accessed state if we set |
* the dirty bit by hand in the kernel, since the hardware |
* will do the accessed bit for us, and we don't want to |
* race with other CPU's that might be updating the dirty |
* bit at the same time. |
*/ |
struct vm_area_struct; |
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
extern int ptep_set_access_flags(struct vm_area_struct *vma, |
unsigned long address, pte_t *ptep, |
pte_t entry, int dirty); |
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
extern int ptep_test_and_clear_young(struct vm_area_struct *vma, |
unsigned long addr, pte_t *ptep); |
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
extern int ptep_clear_flush_young(struct vm_area_struct *vma, |
unsigned long address, pte_t *ptep); |
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
pte_t *ptep) |
{ |
pte_t pte = native_ptep_get_and_clear(ptep); |
pte_update(mm, addr, ptep); |
return pte; |
} |
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
unsigned long addr, pte_t *ptep, |
int full) |
{ |
pte_t pte; |
if (full) { |
/* |
* Full address destruction in progress; paravirt does not |
* care about updates and native needs no locking |
*/ |
pte = native_local_ptep_get_and_clear(ptep); |
} else { |
pte = ptep_get_and_clear(mm, addr, ptep); |
} |
return pte; |
} |
#define __HAVE_ARCH_PTEP_SET_WRPROTECT |
static inline void ptep_set_wrprotect(struct mm_struct *mm, |
unsigned long addr, pte_t *ptep) |
{ |
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
pte_update(mm, addr, ptep); |
} |
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) |
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
unsigned long address, pmd_t *pmdp, |
pmd_t entry, int dirty); |
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
unsigned long addr, pmd_t *pmdp); |
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
extern int pmdp_clear_flush_young(struct vm_area_struct *vma, |
unsigned long address, pmd_t *pmdp); |
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
extern void pmdp_splitting_flush(struct vm_area_struct *vma, |
unsigned long addr, pmd_t *pmdp); |
#define __HAVE_ARCH_PMD_WRITE |
static inline int pmd_write(pmd_t pmd) |
{ |
return pmd_flags(pmd) & _PAGE_RW; |
} |
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR |
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, |
pmd_t *pmdp) |
{ |
pmd_t pmd = native_pmdp_get_and_clear(pmdp); |
pmd_update(mm, addr, pmdp); |
return pmd; |
} |
#define __HAVE_ARCH_PMDP_SET_WRPROTECT |
static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
unsigned long addr, pmd_t *pmdp) |
{ |
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); |
pmd_update(mm, addr, pmdp); |
} |
/* |
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count); |
* |
* dst - pointer to pgd range anwhere on a pgd page |
* src - "" |
* count - the number of pgds to copy. |
* |
* dst and src can be on the same page, but the range must not overlap, |
* and must not cross a page boundary. |
*/ |
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) |
{ |
memcpy(dst, src, count * sizeof(pgd_t)); |
} |
#define PTE_SHIFT ilog2(PTRS_PER_PTE) |
static inline int page_level_shift(enum pg_level level) |
{ |
return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT; |
} |
static inline unsigned long page_level_size(enum pg_level level) |
{ |
return 1UL << page_level_shift(level); |
} |
static inline unsigned long page_level_mask(enum pg_level level) |
{ |
return ~(page_level_size(level) - 1); |
} |
/* |
* The x86 doesn't have any external MMU info: the kernel page |
* tables contain all the necessary information. |
*/ |
static inline void update_mmu_cache(struct vm_area_struct *vma, |
unsigned long addr, pte_t *ptep) |
{ |
} |
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, |
unsigned long addr, pmd_t *pmd) |
{ |
} |
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
} |
static inline int pte_swp_soft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; |
} |
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
} |
#endif |
//#include <asm-generic/pgtable.h> |
#endif /* __ASSEMBLY__ */ |
#endif /* _ASM_X86_PGTABLE_H */ |
/drivers/include/asm/pgtable_32.h |
---|
0,0 → 1,76 |
#ifndef _ASM_X86_PGTABLE_32_H |
#define _ASM_X86_PGTABLE_32_H |
#include <asm/pgtable_32_types.h> |
/* |
* The Linux memory management assumes a three-level page table setup. On |
* the i386, we use that, but "fold" the mid level into the top-level page |
* table, so that we physically have the same two-level page table as the |
* i386 mmu expects. |
* |
* This file contains the functions and defines necessary to modify and use |
* the i386 page table tree. |
*/ |
#ifndef __ASSEMBLY__ |
#include <asm/processor.h> |
#include <linux/threads.h> |
#include <linux/bitops.h> |
#include <linux/list.h> |
#include <linux/spinlock.h> |
struct mm_struct; |
struct vm_area_struct; |
extern pgd_t swapper_pg_dir[1024]; |
extern pgd_t initial_page_table[1024]; |
static inline void pgtable_cache_init(void) { } |
static inline void check_pgt_cache(void) { } |
void paging_init(void); |
/* |
* Define this if things work differently on an i386 and an i486: |
* it will (on an i486) warn about kernel memory accesses that are |
* done without a 'access_ok(VERIFY_WRITE,..)' |
*/ |
#undef TEST_ACCESS_OK |
#ifdef CONFIG_X86_PAE |
# include <asm/pgtable-3level.h> |
#else |
# include <asm/pgtable-2level.h> |
#endif |
#if defined(CONFIG_HIGHPTE) |
#define pte_offset_map(dir, address) \ |
((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ |
pte_index((address))) |
#define pte_unmap(pte) kunmap_atomic((pte)) |
#else |
#define pte_offset_map(dir, address) \ |
((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address))) |
#define pte_unmap(pte) do { } while (0) |
#endif |
/* Clear a kernel PTE and flush it from the TLB */ |
#define kpte_clear_flush(ptep, vaddr) \ |
do { \ |
pte_clear(&init_mm, (vaddr), (ptep)); \ |
__flush_tlb_one((vaddr)); \ |
} while (0) |
#endif /* !__ASSEMBLY__ */ |
/* |
* kern_addr_valid() is (1) for FLATMEM and (0) for |
* SPARSEMEM and DISCONTIGMEM |
*/ |
#ifdef CONFIG_FLATMEM |
#define kern_addr_valid(addr) (1) |
#else |
#define kern_addr_valid(kaddr) (0) |
#endif |
#endif /* _ASM_X86_PGTABLE_32_H */ |
/drivers/include/asm/pgtable_32_types.h |
---|
0,0 → 1,55 |
#ifndef _ASM_X86_PGTABLE_32_DEFS_H |
#define _ASM_X86_PGTABLE_32_DEFS_H |
/* |
* The Linux x86 paging architecture is 'compile-time dual-mode', it |
* implements both the traditional 2-level x86 page tables and the |
* newer 3-level PAE-mode page tables. |
*/ |
#ifdef CONFIG_X86_PAE |
# include <asm/pgtable-3level_types.h> |
# define PMD_SIZE (1UL << PMD_SHIFT) |
# define PMD_MASK (~(PMD_SIZE - 1)) |
#else |
# include <asm/pgtable-2level_types.h> |
#endif |
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
#define PGDIR_MASK (~(PGDIR_SIZE - 1)) |
/* Just any arbitrary offset to the start of the vmalloc VM area: the |
* current 8MB value just means that there will be a 8MB "hole" after the |
* physical memory until the kernel virtual memory starts. That means that |
* any out-of-bounds memory accesses will hopefully be caught. |
* The vmalloc() routines leaves a hole of 4kB between each vmalloced |
* area for the same reason. ;) |
*/ |
#define VMALLOC_OFFSET (8 * 1024 * 1024) |
#ifndef __ASSEMBLY__ |
extern bool __vmalloc_start_set; /* set once high_memory is set */ |
#endif |
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET) |
#ifdef CONFIG_X86_PAE |
#define LAST_PKMAP 512 |
#else |
#define LAST_PKMAP 1024 |
#endif |
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ |
& PMD_MASK) |
#ifdef CONFIG_HIGHMEM |
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) |
#else |
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) |
#endif |
#define MODULES_VADDR VMALLOC_START |
#define MODULES_END VMALLOC_END |
#define MODULES_LEN (MODULES_VADDR - MODULES_END) |
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) |
#endif /* _ASM_X86_PGTABLE_32_DEFS_H */ |
/drivers/include/asm/pgtable_types.h |
---|
0,0 → 1,463 |
#ifndef _ASM_X86_PGTABLE_DEFS_H |
#define _ASM_X86_PGTABLE_DEFS_H |
#include <linux/const.h> |
#include <asm/page_types.h> |
#define FIRST_USER_ADDRESS 0 |
#define _PAGE_BIT_PRESENT 0 /* is present */ |
#define _PAGE_BIT_RW 1 /* writeable */ |
#define _PAGE_BIT_USER 2 /* userspace addressable */ |
#define _PAGE_BIT_PWT 3 /* page write through */ |
#define _PAGE_BIT_PCD 4 /* page cache disabled */ |
#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ |
#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ |
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ |
#define _PAGE_BIT_PAT 7 /* on 4KB pages */ |
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ |
#define _PAGE_BIT_SOFTW1 9 /* available for programmer */ |
#define _PAGE_BIT_SOFTW2 10 /* " */ |
#define _PAGE_BIT_SOFTW3 11 /* " */ |
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ |
#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 |
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 |
#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */ |
#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */ |
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ |
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
/* |
* Swap offsets on configurations that allow automatic NUMA balancing use the |
* bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from |
* swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the |
* maximum possible swap space from 16TB to 8TB. |
*/ |
#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1) |
/* If _PAGE_BIT_PRESENT is clear, we use these: */ |
/* - if the user mapped it with PROT_NONE; pte_present gives true */ |
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL |
/* - set: nonlinear file mapping, saved PTE; unset:swap */ |
#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY |
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) |
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) |
#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) |
#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) |
#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) |
#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) |
#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) |
#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) |
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) |
#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1) |
#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2) |
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) |
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) |
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) |
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) |
#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING) |
#define __HAVE_ARCH_PTE_SPECIAL |
#ifdef CONFIG_KMEMCHECK |
#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) |
#else |
#define _PAGE_HIDDEN (_AT(pteval_t, 0)) |
#endif |
/* |
* The same hidden bit is used by kmemcheck, but since kmemcheck |
* works on kernel pages while soft-dirty engine on user space, |
* they do not conflict with each other. |
*/ |
#ifdef CONFIG_MEM_SOFT_DIRTY |
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) |
#else |
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) |
#endif |
/* |
* _PAGE_NUMA distinguishes between a numa hinting minor fault and a page |
* that is not present. The hinting fault gathers numa placement statistics |
* (see pte_numa()). The bit is always zero when the PTE is not present. |
* |
* The bit picked must be always zero when the pmd is present and not |
* present, so that we don't lose information when we set it while |
* atomically clearing the present bit. |
*/ |
#ifdef CONFIG_NUMA_BALANCING |
#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA) |
#else |
#define _PAGE_NUMA (_AT(pteval_t, 0)) |
#endif |
/* |
* Tracking soft dirty bit when a page goes to a swap is tricky. |
* We need a bit which can be stored in pte _and_ not conflict |
* with swap entry format. On x86 bits 6 and 7 are *not* involved |
* into swap entry computation, but bit 6 is used for nonlinear |
* file mapping, so we borrow bit 7 for soft dirty tracking. |
* |
* Please note that this bit must be treated as swap dirty page |
* mark if and only if the PTE has present bit clear! |
*/ |
#ifdef CONFIG_MEM_SOFT_DIRTY |
#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE |
#else |
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) |
#endif |
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
#else |
#define _PAGE_NX (_AT(pteval_t, 0)) |
#endif |
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) |
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) |
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
_PAGE_ACCESSED | _PAGE_DIRTY) |
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ |
_PAGE_DIRTY) |
/* Set of bits not changed in pte_modify */ |
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ |
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
_PAGE_SOFT_DIRTY | _PAGE_NUMA) |
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA) |
/* |
* The cache modes defined here are used to translate between pure SW usage |
* and the HW defined cache mode bits and/or PAT entries. |
* |
* The resulting bits for PWT, PCD and PAT should be chosen in a way |
* to have the WB mode at index 0 (all bits clear). This is the default |
* right now and likely would break too much if changed. |
*/ |
#ifndef __ASSEMBLY__ |
enum page_cache_mode { |
_PAGE_CACHE_MODE_WB = 0, |
_PAGE_CACHE_MODE_WC = 1, |
_PAGE_CACHE_MODE_UC_MINUS = 2, |
_PAGE_CACHE_MODE_UC = 3, |
_PAGE_CACHE_MODE_WT = 4, |
_PAGE_CACHE_MODE_WP = 5, |
_PAGE_CACHE_MODE_NUM = 8 |
}; |
#endif |
#define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT) |
#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC)) |
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
_PAGE_ACCESSED | _PAGE_NX) |
#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ |
_PAGE_USER | _PAGE_ACCESSED) |
#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
_PAGE_ACCESSED | _PAGE_NX) |
#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
_PAGE_ACCESSED) |
#define PAGE_COPY PAGE_COPY_NOEXEC |
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
_PAGE_ACCESSED | _PAGE_NX) |
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
_PAGE_ACCESSED) |
#define __PAGE_KERNEL_EXEC \ |
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) |
#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) |
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) |
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) |
#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) |
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) |
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) |
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) |
#define __PAGE_KERNEL_IO (__PAGE_KERNEL) |
#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE) |
#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) |
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) |
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) |
#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) |
#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) |
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) |
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) |
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) |
#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR) |
#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) |
#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) |
/* xwr */ |
#define __P000 PAGE_NONE |
#define __P001 PAGE_READONLY |
#define __P010 PAGE_COPY |
#define __P011 PAGE_COPY |
#define __P100 PAGE_READONLY_EXEC |
#define __P101 PAGE_READONLY_EXEC |
#define __P110 PAGE_COPY_EXEC |
#define __P111 PAGE_COPY_EXEC |
#define __S000 PAGE_NONE |
#define __S001 PAGE_READONLY |
#define __S010 PAGE_SHARED |
#define __S011 PAGE_SHARED |
#define __S100 PAGE_READONLY_EXEC |
#define __S101 PAGE_READONLY_EXEC |
#define __S110 PAGE_SHARED_EXEC |
#define __S111 PAGE_SHARED_EXEC |
/* |
* early identity mapping pte attrib macros. |
*/ |
#ifdef CONFIG_X86_64 |
#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC |
#else |
#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ |
#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ |
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ |
#endif |
#ifdef CONFIG_X86_32 |
# include <asm/pgtable_32_types.h> |
#else |
# include <asm/pgtable_64_types.h> |
#endif |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ |
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) |
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ |
#define PTE_FLAGS_MASK (~PTE_PFN_MASK) |
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
typedef struct { pgdval_t pgd; } pgd_t; |
static inline pgd_t native_make_pgd(pgdval_t val) |
{ |
return (pgd_t) { val }; |
} |
static inline pgdval_t native_pgd_val(pgd_t pgd) |
{ |
return pgd.pgd; |
} |
static inline pgdval_t pgd_flags(pgd_t pgd) |
{ |
return native_pgd_val(pgd) & PTE_FLAGS_MASK; |
} |
#if PAGETABLE_LEVELS > 3 |
typedef struct { pudval_t pud; } pud_t; |
static inline pud_t native_make_pud(pmdval_t val) |
{ |
return (pud_t) { val }; |
} |
static inline pudval_t native_pud_val(pud_t pud) |
{ |
return pud.pud; |
} |
#else |
#include <asm-generic/pgtable-nopud.h> |
static inline pudval_t native_pud_val(pud_t pud) |
{ |
return native_pgd_val(pud.pgd); |
} |
#endif |
#if PAGETABLE_LEVELS > 2 |
typedef struct { pmdval_t pmd; } pmd_t; |
static inline pmd_t native_make_pmd(pmdval_t val) |
{ |
return (pmd_t) { val }; |
} |
static inline pmdval_t native_pmd_val(pmd_t pmd) |
{ |
return pmd.pmd; |
} |
#else |
#include <asm-generic/pgtable-nopmd.h> |
static inline pmdval_t native_pmd_val(pmd_t pmd) |
{ |
return native_pgd_val(pmd.pud.pgd); |
} |
#endif |
static inline pudval_t pud_flags(pud_t pud) |
{ |
return native_pud_val(pud) & PTE_FLAGS_MASK; |
} |
static inline pmdval_t pmd_flags(pmd_t pmd) |
{ |
return native_pmd_val(pmd) & PTE_FLAGS_MASK; |
} |
static inline pte_t native_make_pte(pteval_t val) |
{ |
return (pte_t) { .pte = val }; |
} |
static inline pteval_t native_pte_val(pte_t pte) |
{ |
return pte.pte; |
} |
static inline pteval_t pte_flags(pte_t pte) |
{ |
return native_pte_val(pte) & PTE_FLAGS_MASK; |
} |
#ifdef CONFIG_NUMA_BALANCING |
/* Set of bits that distinguishes present, prot_none and numa ptes */ |
#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT) |
static inline pteval_t ptenuma_flags(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_NUMA_MASK; |
} |
static inline pmdval_t pmdnuma_flags(pmd_t pmd) |
{ |
return pmd_flags(pmd) & _PAGE_NUMA_MASK; |
} |
#endif /* CONFIG_NUMA_BALANCING */ |
#define pgprot_val(x) ((x).pgprot) |
#define __pgprot(x) ((pgprot_t) { (x) } ) |
extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM]; |
extern uint8_t __pte2cachemode_tbl[8]; |
#define __pte2cm_idx(cb) \ |
((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \ |
(((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \ |
(((cb) >> _PAGE_BIT_PWT) & 1)) |
#define __cm_idx2pte(i) \ |
((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \ |
(((i) & 2) << (_PAGE_BIT_PCD - 1)) | \ |
(((i) & 1) << _PAGE_BIT_PWT)) |
static inline unsigned long cachemode2protval(enum page_cache_mode pcm) |
{ |
if (likely(pcm == 0)) |
return 0; |
return __cachemode2pte_tbl[pcm]; |
} |
static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm) |
{ |
return __pgprot(cachemode2protval(pcm)); |
} |
static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) |
{ |
unsigned long masked; |
masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK; |
if (likely(masked == 0)) |
return 0; |
return __pte2cachemode_tbl[__pte2cm_idx(masked)]; |
} |
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) |
{ |
pgprot_t new; |
unsigned long val; |
val = pgprot_val(pgprot); |
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | |
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); |
return new; |
} |
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) |
{ |
pgprot_t new; |
unsigned long val; |
val = pgprot_val(pgprot); |
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | |
((val & _PAGE_PAT_LARGE) >> |
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); |
return new; |
} |
typedef struct page *pgtable_t; |
extern pteval_t __supported_pte_mask; |
extern void set_nx(void); |
extern int nx_enabled; |
#define pgprot_writecombine pgprot_writecombine |
extern pgprot_t pgprot_writecombine(pgprot_t prot); |
/* Indicate that x86 has its own track and untrack pfn vma functions */ |
#define __HAVE_PFNMAP_TRACKING |
#define __HAVE_PHYS_MEM_ACCESS_PROT |
struct file; |
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
unsigned long size, pgprot_t vma_prot); |
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
unsigned long size, pgprot_t *vma_prot); |
/* Install a pte for a particular vaddr in kernel space. */ |
void set_pte_vaddr(unsigned long vaddr, pte_t pte); |
#ifdef CONFIG_X86_32 |
extern void native_pagetable_init(void); |
#else |
#define native_pagetable_init paging_init |
#endif |
struct seq_file; |
extern void arch_report_meminfo(struct seq_file *m); |
enum pg_level { |
PG_LEVEL_NONE, |
PG_LEVEL_4K, |
PG_LEVEL_2M, |
PG_LEVEL_1G, |
PG_LEVEL_NUM |
}; |
#ifdef CONFIG_PROC_FS |
extern void update_page_count(int level, unsigned long pages); |
#else |
static inline void update_page_count(int level, unsigned long pages) { } |
#endif |
/* |
* Helper function that returns the kernel pagetable entry controlling |
* the virtual address 'address'. NULL means no pagetable entry present. |
* NOTE: the return type is pte_t but if the pmd is PSE then we return it |
* as a pte too. |
*/ |
extern pte_t *lookup_address(unsigned long address, unsigned int *level); |
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, |
unsigned int *level); |
extern pmd_t *lookup_pmd_address(unsigned long address); |
extern phys_addr_t slow_virt_to_phys(void *__address); |
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, |
unsigned numpages, unsigned long page_flags); |
void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address, |
unsigned numpages); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PGTABLE_DEFS_H */ |
/drivers/include/asm/posix_types.h |
---|
0,0 → 1,5 |
# ifdef CONFIG_X86_32 |
# include <asm/posix_types_32.h> |
# else |
# include <asm/posix_types_64.h> |
# endif |
/drivers/include/asm/posix_types_32.h |
---|
0,0 → 1,85 |
#ifndef _ASM_X86_POSIX_TYPES_32_H |
#define _ASM_X86_POSIX_TYPES_32_H |
/* |
* This file is generally used by user-level software, so you need to |
* be a little careful about namespace pollution etc. Also, we cannot |
* assume GCC is being used. |
*/ |
typedef unsigned long __kernel_ino_t; |
typedef unsigned short __kernel_mode_t; |
typedef unsigned short __kernel_nlink_t; |
typedef long __kernel_off_t; |
typedef int __kernel_pid_t; |
typedef unsigned short __kernel_ipc_pid_t; |
typedef unsigned short __kernel_uid_t; |
typedef unsigned short __kernel_gid_t; |
typedef unsigned int __kernel_size_t; |
typedef int __kernel_ssize_t; |
typedef int __kernel_ptrdiff_t; |
typedef long __kernel_time_t; |
typedef long __kernel_suseconds_t; |
typedef long __kernel_clock_t; |
typedef int __kernel_timer_t; |
typedef int __kernel_clockid_t; |
typedef int __kernel_daddr_t; |
typedef char * __kernel_caddr_t; |
typedef unsigned short __kernel_uid16_t; |
typedef unsigned short __kernel_gid16_t; |
typedef unsigned int __kernel_uid32_t; |
typedef unsigned int __kernel_gid32_t; |
typedef unsigned short __kernel_old_uid_t; |
typedef unsigned short __kernel_old_gid_t; |
typedef unsigned short __kernel_old_dev_t; |
#ifdef __GNUC__ |
typedef long long __kernel_loff_t; |
#endif |
typedef struct { |
int val[2]; |
} __kernel_fsid_t; |
#if defined(__KERNEL__) |
#undef __FD_SET |
#define __FD_SET(fd,fdsetp) \ |
asm volatile("btsl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int)(fd))) |
#undef __FD_CLR |
#define __FD_CLR(fd,fdsetp) \ |
asm volatile("btrl %1,%0": \ |
"+m" (*(__kernel_fd_set *)(fdsetp)) \ |
: "r" ((int) (fd))) |
#undef __FD_ISSET |
#define __FD_ISSET(fd,fdsetp) \ |
(__extension__ \ |
({ \ |
unsigned char __result; \ |
asm volatile("btl %1,%2 ; setb %0" \ |
: "=q" (__result) \ |
: "r" ((int)(fd)), \ |
"m" (*(__kernel_fd_set *)(fdsetp))); \ |
__result; \ |
})) |
#undef __FD_ZERO |
#define __FD_ZERO(fdsetp) \ |
do { \ |
int __d0, __d1; \ |
asm volatile("cld ; rep ; stosl" \ |
: "=m" (*(__kernel_fd_set *)(fdsetp)), \ |
"=&c" (__d0), "=&D" (__d1) \ |
: "a" (0), "1" (__FDSET_LONGS), \ |
"2" ((__kernel_fd_set *)(fdsetp)) \ |
: "memory"); \ |
} while (0) |
#endif /* defined(__KERNEL__) */ |
#endif /* _ASM_X86_POSIX_TYPES_32_H */ |
/drivers/include/asm/processor-flags.h |
---|
0,0 → 1,11 |
#ifndef _ASM_X86_PROCESSOR_FLAGS_H |
#define _ASM_X86_PROCESSOR_FLAGS_H |
#include <uapi/asm/processor-flags.h> |
#ifdef CONFIG_VM86 |
#define X86_VM_MASK X86_EFLAGS_VM |
#else |
#define X86_VM_MASK 0 /* No VM86 support */ |
#endif |
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */ |
/drivers/include/asm/processor.h |
---|
0,0 → 1,1010 |
#ifndef _ASM_X86_PROCESSOR_H |
#define _ASM_X86_PROCESSOR_H |
#include <asm/processor-flags.h> |
/* Forward declaration, a strange C thing */ |
struct task_struct; |
struct mm_struct; |
#include <asm/vm86.h> |
#include <asm/math_emu.h> |
#include <asm/segment.h> |
#include <asm/types.h> |
#include <asm/sigcontext.h> |
#include <asm/current.h> |
#include <asm/cpufeature.h> |
#include <asm/page.h> |
#include <asm/pgtable_types.h> |
#include <asm/percpu.h> |
#include <asm/msr.h> |
#include <asm/desc_defs.h> |
#include <asm/nops.h> |
#include <asm/special_insns.h> |
#include <linux/personality.h> |
#include <linux/cpumask.h> |
#include <linux/cache.h> |
#include <linux/threads.h> |
#include <linux/math64.h> |
#include <linux/err.h> |
#include <linux/irqflags.h> |
/* |
* We handle most unaligned accesses in hardware. On the other hand |
* unaligned DMA can be quite expensive on some Nehalem processors. |
* |
* Based on this we disable the IP header alignment in network drivers. |
*/ |
#define NET_IP_ALIGN 0 |
#define HBP_NUM 4 |
/* |
* Default implementation of macro that returns current |
* instruction pointer ("program counter"). |
*/ |
static inline void *current_text_addr(void) |
{ |
void *pc; |
asm volatile("mov $1f, %0; 1:":"=r" (pc)); |
return pc; |
} |
#ifdef CONFIG_X86_VSMP |
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) |
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) |
#else |
# define ARCH_MIN_TASKALIGN 16 |
# define ARCH_MIN_MMSTRUCT_ALIGN 0 |
#endif |
enum tlb_infos { |
ENTRIES, |
NR_INFO |
}; |
extern u16 __read_mostly tlb_lli_4k[NR_INFO]; |
extern u16 __read_mostly tlb_lli_2m[NR_INFO]; |
extern u16 __read_mostly tlb_lli_4m[NR_INFO]; |
extern u16 __read_mostly tlb_lld_4k[NR_INFO]; |
extern u16 __read_mostly tlb_lld_2m[NR_INFO]; |
extern u16 __read_mostly tlb_lld_4m[NR_INFO]; |
extern u16 __read_mostly tlb_lld_1g[NR_INFO]; |
/* |
* CPU type and hardware bug flags. Kept separately for each CPU. |
* Members of this structure are referenced in head.S, so think twice |
* before touching them. [mj] |
*/ |
struct cpuinfo_x86 { |
__u8 x86; /* CPU family */ |
__u8 x86_vendor; /* CPU vendor */ |
__u8 x86_model; |
__u8 x86_mask; |
#ifdef CONFIG_X86_32 |
char wp_works_ok; /* It doesn't on 386's */ |
/* Problems on some 486Dx4's and old 386's: */ |
char rfu; |
char pad0; |
char pad1; |
#else |
/* Number of 4K pages in DTLB/ITLB combined(in pages): */ |
int x86_tlbsize; |
#endif |
__u8 x86_virt_bits; |
__u8 x86_phys_bits; |
/* CPUID returned core id bits: */ |
__u8 x86_coreid_bits; |
/* Max extended CPUID function supported: */ |
__u32 extended_cpuid_level; |
/* Maximum supported CPUID level, -1=no CPUID: */ |
int cpuid_level; |
__u32 x86_capability[NCAPINTS + NBUGINTS]; |
char x86_vendor_id[16]; |
char x86_model_id[64]; |
/* in KB - valid for CPUS which support this call: */ |
int x86_cache_size; |
int x86_cache_alignment; /* In bytes */ |
int x86_power; |
unsigned long loops_per_jiffy; |
/* cpuid returned max cores value: */ |
u16 x86_max_cores; |
u16 apicid; |
u16 initial_apicid; |
u16 x86_clflush_size; |
/* number of cores as seen by the OS: */ |
u16 booted_cores; |
/* Physical processor id: */ |
u16 phys_proc_id; |
/* Core id: */ |
u16 cpu_core_id; |
/* Compute unit id */ |
u8 compute_unit_id; |
/* Index into per_cpu list: */ |
u16 cpu_index; |
u32 microcode; |
}; |
#define X86_VENDOR_INTEL 0 |
#define X86_VENDOR_CYRIX 1 |
#define X86_VENDOR_AMD 2 |
#define X86_VENDOR_UMC 3 |
#define X86_VENDOR_CENTAUR 5 |
#define X86_VENDOR_TRANSMETA 7 |
#define X86_VENDOR_NSC 8 |
#define X86_VENDOR_NUM 9 |
#define X86_VENDOR_UNKNOWN 0xff |
/* |
* capabilities of CPUs |
*/ |
extern struct cpuinfo_x86 boot_cpu_data; |
extern struct cpuinfo_x86 new_cpu_data; |
extern struct tss_struct doublefault_tss; |
extern __u32 cpu_caps_cleared[NCAPINTS]; |
extern __u32 cpu_caps_set[NCAPINTS]; |
#ifdef CONFIG_SMP |
DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
#define cpu_data(cpu) per_cpu(cpu_info, cpu) |
#else |
#define cpu_info boot_cpu_data |
#define cpu_data(cpu) boot_cpu_data |
#endif |
extern const struct seq_operations cpuinfo_op; |
#define cache_line_size() (x86_cache_alignment) |
extern void cpu_detect(struct cpuinfo_x86 *c); |
extern void fpu_detect(struct cpuinfo_x86 *c); |
extern void early_cpu_init(void); |
extern void identify_boot_cpu(void); |
extern void identify_secondary_cpu(struct cpuinfo_x86 *); |
extern void print_cpu_info(struct cpuinfo_x86 *); |
void print_cpu_msr(struct cpuinfo_x86 *); |
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); |
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); |
extern void detect_extended_topology(struct cpuinfo_x86 *c); |
extern void detect_ht(struct cpuinfo_x86 *c); |
#ifdef CONFIG_X86_32 |
extern int have_cpuid_p(void); |
#else |
static inline int have_cpuid_p(void) |
{ |
return 1; |
} |
#endif |
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
unsigned int *ecx, unsigned int *edx) |
{ |
/* ecx is often an input as well as an output. */ |
asm volatile("cpuid" |
: "=a" (*eax), |
"=b" (*ebx), |
"=c" (*ecx), |
"=d" (*edx) |
: "0" (*eax), "2" (*ecx) |
: "memory"); |
} |
static inline void load_cr3(pgd_t *pgdir) |
{ |
write_cr3(__pa(pgdir)); |
} |
#ifdef CONFIG_X86_32 |
/* This is the TSS defined by the hardware. */ |
struct x86_hw_tss { |
unsigned short back_link, __blh; |
unsigned long sp0; |
unsigned short ss0, __ss0h; |
unsigned long sp1; |
/* ss1 caches MSR_IA32_SYSENTER_CS: */ |
unsigned short ss1, __ss1h; |
unsigned long sp2; |
unsigned short ss2, __ss2h; |
unsigned long __cr3; |
unsigned long ip; |
unsigned long flags; |
unsigned long ax; |
unsigned long cx; |
unsigned long dx; |
unsigned long bx; |
unsigned long sp; |
unsigned long bp; |
unsigned long si; |
unsigned long di; |
unsigned short es, __esh; |
unsigned short cs, __csh; |
unsigned short ss, __ssh; |
unsigned short ds, __dsh; |
unsigned short fs, __fsh; |
unsigned short gs, __gsh; |
unsigned short ldt, __ldth; |
unsigned short trace; |
unsigned short io_bitmap_base; |
} __attribute__((packed)); |
#else |
struct x86_hw_tss { |
u32 reserved1; |
u64 sp0; |
u64 sp1; |
u64 sp2; |
u64 reserved2; |
u64 ist[7]; |
u32 reserved3; |
u32 reserved4; |
u16 reserved5; |
u16 io_bitmap_base; |
} __attribute__((packed)) ____cacheline_aligned; |
#endif |
/* |
* IO-bitmap sizes: |
*/ |
#define IO_BITMAP_BITS 65536 |
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) |
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) |
#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) |
#define INVALID_IO_BITMAP_OFFSET 0x8000 |
struct tss_struct { |
/* |
* The hardware state: |
*/ |
struct x86_hw_tss x86_tss; |
/* |
* The extra 1 is there because the CPU will access an |
* additional byte beyond the end of the IO permission |
* bitmap. The extra byte must be all 1 bits, and must |
* be within the limit. |
*/ |
unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; |
/* |
* .. and then another 0x100 bytes for the emergency kernel stack: |
*/ |
unsigned long stack[64]; |
} ____cacheline_aligned; |
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); |
/* |
* Save the original ist values for checking stack pointers during debugging |
*/ |
struct orig_ist { |
unsigned long ist[7]; |
}; |
#define MXCSR_DEFAULT 0x1f80 |
struct i387_fsave_struct { |
u32 cwd; /* FPU Control Word */ |
u32 swd; /* FPU Status Word */ |
u32 twd; /* FPU Tag Word */ |
u32 fip; /* FPU IP Offset */ |
u32 fcs; /* FPU IP Selector */ |
u32 foo; /* FPU Operand Pointer Offset */ |
u32 fos; /* FPU Operand Pointer Selector */ |
/* 8*10 bytes for each FP-reg = 80 bytes: */ |
u32 st_space[20]; |
/* Software status information [not touched by FSAVE ]: */ |
u32 status; |
}; |
struct i387_fxsave_struct { |
u16 cwd; /* Control Word */ |
u16 swd; /* Status Word */ |
u16 twd; /* Tag Word */ |
u16 fop; /* Last Instruction Opcode */ |
union { |
struct { |
u64 rip; /* Instruction Pointer */ |
u64 rdp; /* Data Pointer */ |
}; |
struct { |
u32 fip; /* FPU IP Offset */ |
u32 fcs; /* FPU IP Selector */ |
u32 foo; /* FPU Operand Offset */ |
u32 fos; /* FPU Operand Selector */ |
}; |
}; |
u32 mxcsr; /* MXCSR Register State */ |
u32 mxcsr_mask; /* MXCSR Mask */ |
/* 8*16 bytes for each FP-reg = 128 bytes: */ |
u32 st_space[32]; |
/* 16*16 bytes for each XMM-reg = 256 bytes: */ |
u32 xmm_space[64]; |
u32 padding[12]; |
union { |
u32 padding1[12]; |
u32 sw_reserved[12]; |
}; |
} __attribute__((aligned(16))); |
struct i387_soft_struct { |
u32 cwd; |
u32 swd; |
u32 twd; |
u32 fip; |
u32 fcs; |
u32 foo; |
u32 fos; |
/* 8*10 bytes for each FP-reg = 80 bytes: */ |
u32 st_space[20]; |
u8 ftop; |
u8 changed; |
u8 lookahead; |
u8 no_update; |
u8 rm; |
u8 alimit; |
struct math_emu_info *info; |
u32 entry_eip; |
}; |
struct ymmh_struct { |
/* 16 * 16 bytes for each YMMH-reg = 256 bytes */ |
u32 ymmh_space[64]; |
}; |
/* We don't support LWP yet: */ |
struct lwp_struct { |
u8 reserved[128]; |
}; |
struct bndreg { |
u64 lower_bound; |
u64 upper_bound; |
} __packed; |
struct bndcsr { |
u64 bndcfgu; |
u64 bndstatus; |
} __packed; |
struct xsave_hdr_struct { |
u64 xstate_bv; |
u64 xcomp_bv; |
u64 reserved[6]; |
} __attribute__((packed)); |
struct xsave_struct { |
struct i387_fxsave_struct i387; |
struct xsave_hdr_struct xsave_hdr; |
struct ymmh_struct ymmh; |
struct lwp_struct lwp; |
struct bndreg bndreg[4]; |
struct bndcsr bndcsr; |
/* new processor state extensions will go here */ |
} __attribute__ ((packed, aligned (64))); |
union thread_xstate { |
struct i387_fsave_struct fsave; |
struct i387_fxsave_struct fxsave; |
struct i387_soft_struct soft; |
struct xsave_struct xsave; |
}; |
struct fpu { |
unsigned int last_cpu; |
unsigned int has_fpu; |
union thread_xstate *state; |
}; |
#ifdef CONFIG_X86_64 |
DECLARE_PER_CPU(struct orig_ist, orig_ist); |
union irq_stack_union { |
char irq_stack[IRQ_STACK_SIZE]; |
/* |
* GCC hardcodes the stack canary as %gs:40. Since the |
* irq_stack is the object at %gs:0, we reserve the bottom |
* 48 bytes of the irq stack for the canary. |
*/ |
struct { |
char gs_base[40]; |
unsigned long stack_canary; |
}; |
}; |
DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible; |
DECLARE_INIT_PER_CPU(irq_stack_union); |
DECLARE_PER_CPU(char *, irq_stack_ptr); |
DECLARE_PER_CPU(unsigned int, irq_count); |
extern asmlinkage void ignore_sysret(void); |
#else /* X86_64 */ |
#ifdef CONFIG_CC_STACKPROTECTOR |
/* |
* Make sure stack canary segment base is cached-aligned: |
* "For Intel Atom processors, avoid non zero segment base address |
* that is not aligned to cache line boundary at all cost." |
* (Optim Ref Manual Assembly/Compiler Coding Rule 15.) |
*/ |
struct stack_canary { |
char __pad[20]; /* canary at %gs:20 */ |
unsigned long canary; |
}; |
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); |
#endif |
/* |
* per-CPU IRQ handling stacks |
*/ |
struct irq_stack { |
u32 stack[THREAD_SIZE/sizeof(u32)]; |
} __aligned(THREAD_SIZE); |
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack); |
DECLARE_PER_CPU(struct irq_stack *, softirq_stack); |
#endif /* X86_64 */ |
extern unsigned int xstate_size; |
extern void free_thread_xstate(struct task_struct *); |
extern struct kmem_cache *task_xstate_cachep; |
struct perf_event; |
struct thread_struct { |
/* Cached TLS descriptors: */ |
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
unsigned long sp0; |
unsigned long sp; |
#ifdef CONFIG_X86_32 |
unsigned long sysenter_cs; |
#else |
unsigned long usersp; /* Copy from PDA */ |
unsigned short es; |
unsigned short ds; |
unsigned short fsindex; |
unsigned short gsindex; |
#endif |
#ifdef CONFIG_X86_32 |
unsigned long ip; |
#endif |
#ifdef CONFIG_X86_64 |
unsigned long fs; |
#endif |
unsigned long gs; |
/* Save middle states of ptrace breakpoints */ |
struct perf_event *ptrace_bps[HBP_NUM]; |
/* Debug status used for traps, single steps, etc... */ |
unsigned long debugreg6; |
/* Keep track of the exact dr7 value set by the user */ |
unsigned long ptrace_dr7; |
/* Fault info: */ |
unsigned long cr2; |
unsigned long trap_nr; |
unsigned long error_code; |
/* floating point and extended processor state */ |
struct fpu fpu; |
#ifdef CONFIG_X86_32 |
/* Virtual 86 mode info */ |
struct vm86_struct __user *vm86_info; |
unsigned long screen_bitmap; |
unsigned long v86flags; |
unsigned long v86mask; |
unsigned long saved_sp0; |
unsigned int saved_fs; |
unsigned int saved_gs; |
#endif |
/* IO permissions: */ |
unsigned long *io_bitmap_ptr; |
unsigned long iopl; |
/* Max allowed port in the bitmap, in bytes: */ |
unsigned io_bitmap_max; |
/* |
* fpu_counter contains the number of consecutive context switches |
* that the FPU is used. If this is over a threshold, the lazy fpu |
* saving becomes unlazy to save the trap. This is an unsigned char |
* so that after 256 times the counter wraps and the behavior turns |
* lazy again; this to deal with bursty apps that only use FPU for |
* a short time |
*/ |
unsigned char fpu_counter; |
}; |
/* |
* Set IOPL bits in EFLAGS from given mask |
*/ |
static inline void native_set_iopl_mask(unsigned mask) |
{ |
#ifdef CONFIG_X86_32 |
unsigned int reg; |
asm volatile ("pushfl;" |
"popl %0;" |
"andl %1, %0;" |
"orl %2, %0;" |
"pushl %0;" |
"popfl" |
: "=&r" (reg) |
: "i" (~X86_EFLAGS_IOPL), "r" (mask)); |
#endif |
} |
static inline void |
native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) |
{ |
tss->x86_tss.sp0 = thread->sp0; |
#ifdef CONFIG_X86_32 |
/* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
tss->x86_tss.ss1 = thread->sysenter_cs; |
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
} |
#endif |
} |
static inline void native_swapgs(void) |
{ |
#ifdef CONFIG_X86_64 |
asm volatile("swapgs" ::: "memory"); |
#endif |
} |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt.h> |
#else |
#define __cpuid native_cpuid |
#define paravirt_enabled() 0 |
static inline void load_sp0(struct tss_struct *tss, |
struct thread_struct *thread) |
{ |
native_load_sp0(tss, thread); |
} |
#define set_iopl_mask native_set_iopl_mask |
#endif /* CONFIG_PARAVIRT */ |
/* |
* Save the cr4 feature set we're using (ie |
* Pentium 4MB enable and PPro Global page |
* enable), so that any CPU's that boot up |
* after us can get the correct flags. |
*/ |
extern unsigned long mmu_cr4_features; |
extern u32 *trampoline_cr4_features; |
static inline void set_in_cr4(unsigned long mask) |
{ |
unsigned long cr4; |
mmu_cr4_features |= mask; |
if (trampoline_cr4_features) |
*trampoline_cr4_features = mmu_cr4_features; |
cr4 = read_cr4(); |
cr4 |= mask; |
write_cr4(cr4); |
} |
static inline void clear_in_cr4(unsigned long mask) |
{ |
unsigned long cr4; |
mmu_cr4_features &= ~mask; |
if (trampoline_cr4_features) |
*trampoline_cr4_features = mmu_cr4_features; |
cr4 = read_cr4(); |
cr4 &= ~mask; |
write_cr4(cr4); |
} |
typedef struct { |
unsigned long seg; |
} mm_segment_t; |
/* Free all resources held by a thread. */ |
extern void release_thread(struct task_struct *); |
unsigned long get_wchan(struct task_struct *p); |
/* |
* Generic CPUID function |
* clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx |
* resulting in stale register contents being returned. |
*/ |
static inline void cpuid(unsigned int op, |
unsigned int *eax, unsigned int *ebx, |
unsigned int *ecx, unsigned int *edx) |
{ |
*eax = op; |
*ecx = 0; |
__cpuid(eax, ebx, ecx, edx); |
} |
/* Some CPUID calls want 'count' to be placed in ecx */ |
static inline void cpuid_count(unsigned int op, int count, |
unsigned int *eax, unsigned int *ebx, |
unsigned int *ecx, unsigned int *edx) |
{ |
*eax = op; |
*ecx = count; |
__cpuid(eax, ebx, ecx, edx); |
} |
/* |
* CPUID functions returning a single datum |
*/ |
static inline unsigned int cpuid_eax(unsigned int op) |
{ |
unsigned int eax, ebx, ecx, edx; |
cpuid(op, &eax, &ebx, &ecx, &edx); |
return eax; |
} |
static inline unsigned int cpuid_ebx(unsigned int op) |
{ |
unsigned int eax, ebx, ecx, edx; |
cpuid(op, &eax, &ebx, &ecx, &edx); |
return ebx; |
} |
static inline unsigned int cpuid_ecx(unsigned int op) |
{ |
unsigned int eax, ebx, ecx, edx; |
cpuid(op, &eax, &ebx, &ecx, &edx); |
return ecx; |
} |
static inline unsigned int cpuid_edx(unsigned int op) |
{ |
unsigned int eax, ebx, ecx, edx; |
cpuid(op, &eax, &ebx, &ecx, &edx); |
return edx; |
} |
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
static inline void rep_nop(void) |
{ |
asm volatile("rep; nop" ::: "memory"); |
} |
static inline void cpu_relax(void) |
{ |
rep_nop(); |
} |
#define cpu_relax_lowlatency() cpu_relax() |
/* Stop speculative execution and prefetching of modified code. */ |
static inline void sync_core(void) |
{ |
int tmp; |
#ifdef CONFIG_M486 |
/* |
* Do a CPUID if available, otherwise do a jump. The jump |
* can conveniently enough be the jump around CPUID. |
*/ |
asm volatile("cmpl %2,%1\n\t" |
"jl 1f\n\t" |
"cpuid\n" |
"1:" |
: "=a" (tmp) |
: "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1) |
: "ebx", "ecx", "edx", "memory"); |
#else |
/* |
* CPUID is a barrier to speculative execution. |
* Prefetched instructions are automatically |
* invalidated when modified. |
*/ |
asm volatile("cpuid" |
: "=a" (tmp) |
: "0" (1) |
: "ebx", "ecx", "edx", "memory"); |
#endif |
} |
extern void select_idle_routine(const struct cpuinfo_x86 *c); |
extern void init_amd_e400_c1e_mask(void); |
extern unsigned long boot_option_idle_override; |
extern bool amd_e400_c1e_detected; |
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
IDLE_POLL}; |
extern void enable_sep_cpu(void); |
extern int sysenter_setup(void); |
extern void early_trap_init(void); |
void early_trap_pf_init(void); |
/* Defined in head.S */ |
extern struct desc_ptr early_gdt_descr; |
extern void cpu_set_gdt(int); |
extern void switch_to_new_gdt(int); |
extern void load_percpu_segment(int); |
extern void cpu_init(void); |
static inline unsigned long get_debugctlmsr(void) |
{ |
unsigned long debugctlmsr = 0; |
#ifndef CONFIG_X86_DEBUGCTLMSR |
if (boot_cpu_data.x86 < 6) |
return 0; |
#endif |
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); |
return debugctlmsr; |
} |
static inline void update_debugctlmsr(unsigned long debugctlmsr) |
{ |
#ifndef CONFIG_X86_DEBUGCTLMSR |
if (boot_cpu_data.x86 < 6) |
return; |
#endif |
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); |
} |
extern void set_task_blockstep(struct task_struct *task, bool on); |
/* |
* from system description table in BIOS. Mostly for MCA use, but |
* others may find it useful: |
*/ |
extern unsigned int machine_id; |
extern unsigned int machine_submodel_id; |
extern unsigned int BIOS_revision; |
/* Boot loader type from the setup header: */ |
extern int bootloader_type; |
extern int bootloader_version; |
extern char ignore_fpu_irq; |
#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 |
#define ARCH_HAS_PREFETCHW |
#define ARCH_HAS_SPINLOCK_PREFETCH |
#ifdef CONFIG_X86_32 |
# define BASE_PREFETCH ASM_NOP4 |
# define ARCH_HAS_PREFETCH |
#else |
# define BASE_PREFETCH "prefetcht0 (%1)" |
#endif |
/* |
* Prefetch instructions for Pentium III (+) and AMD Athlon (+) |
* |
* It's not worth to care about 3dnow prefetches for the K6 |
* because they are microcoded there and very slow. |
*/ |
static inline void prefetch(const void *x) |
{ |
alternative_input(BASE_PREFETCH, |
"prefetchnta (%1)", |
X86_FEATURE_XMM, |
"r" (x)); |
} |
/* |
* 3dnow prefetch to get an exclusive cache line. |
* Useful for spinlocks to avoid one state transition in the |
* cache coherency protocol: |
*/ |
static inline void prefetchw(const void *x) |
{ |
alternative_input(BASE_PREFETCH, |
"prefetchw (%1)", |
X86_FEATURE_3DNOW, |
"r" (x)); |
} |
static inline void spin_lock_prefetch(const void *x) |
{ |
prefetchw(x); |
} |
#ifdef CONFIG_X86_32 |
/* |
* User space process size: 3GB (default). |
*/ |
#define TASK_SIZE PAGE_OFFSET |
#define TASK_SIZE_MAX TASK_SIZE |
#define STACK_TOP TASK_SIZE |
#define STACK_TOP_MAX STACK_TOP |
#define INIT_THREAD { \ |
.sp0 = sizeof(init_stack) + (long)&init_stack, \ |
.vm86_info = NULL, \ |
.sysenter_cs = __KERNEL_CS, \ |
.io_bitmap_ptr = NULL, \ |
} |
/* |
* Note that the .io_bitmap member must be extra-big. This is because |
* the CPU will access an additional byte beyond the end of the IO |
* permission bitmap. The extra byte must be all 1 bits, and must |
* be within the limit. |
*/ |
#define INIT_TSS { \ |
.x86_tss = { \ |
.sp0 = sizeof(init_stack) + (long)&init_stack, \ |
.ss0 = __KERNEL_DS, \ |
.ss1 = __KERNEL_CS, \ |
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ |
}, \ |
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ |
} |
extern unsigned long thread_saved_pc(struct task_struct *tsk); |
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) |
#define KSTK_TOP(info) \ |
({ \ |
unsigned long *__ptr = (unsigned long *)(info); \ |
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ |
}) |
/* |
* The below -8 is to reserve 8 bytes on top of the ring0 stack. |
* This is necessary to guarantee that the entire "struct pt_regs" |
* is accessible even if the CPU haven't stored the SS/ESP registers |
* on the stack (interrupt gate does not save these registers |
* when switching to the same priv ring). |
* Therefore beware: accessing the ss/esp fields of the |
* "struct pt_regs" is possible, but they may contain the |
* completely wrong values. |
*/ |
#define task_pt_regs(task) \ |
({ \ |
struct pt_regs *__regs__; \ |
__regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ |
__regs__ - 1; \ |
}) |
#define KSTK_ESP(task) (task_pt_regs(task)->sp) |
#else |
/* |
* User space process size. 47bits minus one guard page. The guard |
* page is necessary on Intel CPUs: if a SYSCALL instruction is at |
* the highest possible canonical userspace address, then that |
* syscall will enter the kernel with a non-canonical return |
* address, and SYSRET will explode dangerously. We avoid this |
* particular problem by preventing anything from being mapped |
* at the maximum canonical address. |
*/ |
#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) |
/* This decides where the kernel will search for a free chunk of vm |
* space during mmap's. |
*/ |
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ |
0xc0000000 : 0xFFFFe000) |
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \ |
IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \ |
IA32_PAGE_OFFSET : TASK_SIZE_MAX) |
#define STACK_TOP TASK_SIZE |
#define STACK_TOP_MAX TASK_SIZE_MAX |
#define INIT_THREAD { \ |
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ |
} |
#define INIT_TSS { \ |
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ |
} |
/* |
* Return saved PC of a blocked thread. |
* What is this good for? it will be always the scheduler or ret_from_fork. |
*/ |
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) |
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
extern unsigned long KSTK_ESP(struct task_struct *task); |
/* |
* User space RSP while inside the SYSCALL fast path |
*/ |
DECLARE_PER_CPU(unsigned long, old_rsp); |
#endif /* CONFIG_X86_64 */ |
extern void start_thread(struct pt_regs *regs, unsigned long new_ip, |
unsigned long new_sp); |
/* |
* This decides where the kernel will search for a free chunk of vm |
* space during mmap's. |
*/ |
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) |
#define KSTK_EIP(task) (task_pt_regs(task)->ip) |
/* Get/set a process' ability to use the timestamp counter instruction */ |
#define GET_TSC_CTL(adr) get_tsc_mode((adr)) |
#define SET_TSC_CTL(val) set_tsc_mode((val)) |
extern int get_tsc_mode(unsigned long adr); |
extern int set_tsc_mode(unsigned int val); |
/* Register/unregister a process' MPX related resource */ |
#define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk)) |
#define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk)) |
#ifdef CONFIG_X86_INTEL_MPX |
extern int mpx_enable_management(struct task_struct *tsk); |
extern int mpx_disable_management(struct task_struct *tsk); |
#else |
static inline int mpx_enable_management(struct task_struct *tsk) |
{ |
return -EINVAL; |
} |
static inline int mpx_disable_management(struct task_struct *tsk) |
{ |
return -EINVAL; |
} |
#endif /* CONFIG_X86_INTEL_MPX */ |
extern u16 amd_get_nb_id(int cpu); |
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) |
{ |
uint32_t base, eax, signature[3]; |
for (base = 0x40000000; base < 0x40010000; base += 0x100) { |
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); |
if (!memcmp(sig, signature, 12) && |
(leaves == 0 || ((eax - base) >= leaves))) |
return base; |
} |
return 0; |
} |
extern unsigned long arch_align_stack(unsigned long sp); |
extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
void default_idle(void); |
#ifdef CONFIG_XEN |
bool xen_set_default_idle(void); |
#else |
#define xen_set_default_idle 0 |
#endif |
void stop_this_cpu(void *dummy); |
void df_debug(struct pt_regs *regs, long error_code); |
#endif /* _ASM_X86_PROCESSOR_H */ |
/drivers/include/asm/required-features.h |
---|
0,0 → 1,96 |
#ifndef _ASM_X86_REQUIRED_FEATURES_H |
#define _ASM_X86_REQUIRED_FEATURES_H |
/* Define minimum CPUID feature set for kernel These bits are checked |
really early to actually display a visible error message before the |
kernel dies. Make sure to assign features to the proper mask! |
Some requirements that are not in CPUID yet are also in the |
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. |
The real information is in arch/x86/Kconfig.cpu, this just converts |
the CONFIGs into a bitmask */ |
#ifndef CONFIG_MATH_EMULATION |
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) |
#else |
# define NEED_FPU 0 |
#endif |
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) |
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) |
#else |
# define NEED_PAE 0 |
#endif |
#ifdef CONFIG_X86_CMPXCHG64 |
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) |
#else |
# define NEED_CX8 0 |
#endif |
#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64) |
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) |
#else |
# define NEED_CMOV 0 |
#endif |
#ifdef CONFIG_X86_USE_3DNOW |
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) |
#else |
# define NEED_3DNOW 0 |
#endif |
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64) |
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31)) |
#else |
# define NEED_NOPL 0 |
#endif |
#ifdef CONFIG_MATOM |
# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31)) |
#else |
# define NEED_MOVBE 0 |
#endif |
#ifdef CONFIG_X86_64 |
#ifdef CONFIG_PARAVIRT |
/* Paravirtualized systems may not have PSE or PGE available */ |
#define NEED_PSE 0 |
#define NEED_PGE 0 |
#else |
#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31) |
#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31) |
#endif |
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) |
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) |
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) |
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) |
#define NEED_LM (1<<(X86_FEATURE_LM & 31)) |
#else |
#define NEED_PSE 0 |
#define NEED_MSR 0 |
#define NEED_PGE 0 |
#define NEED_FXSR 0 |
#define NEED_XMM 0 |
#define NEED_XMM2 0 |
#define NEED_LM 0 |
#endif |
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\ |
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\ |
NEED_XMM|NEED_XMM2) |
#define SSE_MASK (NEED_XMM|NEED_XMM2) |
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) |
#define REQUIRED_MASK2 0 |
#define REQUIRED_MASK3 (NEED_NOPL) |
#define REQUIRED_MASK4 (NEED_MOVBE) |
#define REQUIRED_MASK5 0 |
#define REQUIRED_MASK6 0 |
#define REQUIRED_MASK7 0 |
#define REQUIRED_MASK8 0 |
#define REQUIRED_MASK9 0 |
#endif /* _ASM_X86_REQUIRED_FEATURES_H */ |
/drivers/include/asm/rmwcc.h |
---|
0,0 → 1,41 |
#ifndef _ASM_X86_RMWcc |
#define _ASM_X86_RMWcc |
#ifdef CC_HAVE_ASM_GOTO |
#define __GEN_RMWcc(fullop, var, cc, ...) \ |
do { \ |
asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \ |
: : "m" (var), ## __VA_ARGS__ \ |
: "memory" : cc_label); \ |
return 0; \ |
cc_label: \ |
return 1; \ |
} while (0) |
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ |
__GEN_RMWcc(op " " arg0, var, cc) |
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ |
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val)) |
#else /* !CC_HAVE_ASM_GOTO */ |
#define __GEN_RMWcc(fullop, var, cc, ...) \ |
do { \ |
char c; \ |
asm volatile (fullop "; set" cc " %1" \ |
: "+m" (var), "=qm" (c) \ |
: __VA_ARGS__ : "memory"); \ |
return c != 0; \ |
} while (0) |
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \ |
__GEN_RMWcc(op " " arg0, var, cc) |
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ |
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val)) |
#endif /* CC_HAVE_ASM_GOTO */ |
#endif /* _ASM_X86_RMWcc */ |
/drivers/include/asm/scatterlist.h |
---|
0,0 → 1,41 |
#ifndef __ASM_GENERIC_SCATTERLIST_H |
#define __ASM_GENERIC_SCATTERLIST_H |
#include <linux/types.h> |
struct scatterlist { |
#ifdef CONFIG_DEBUG_SG |
unsigned long sg_magic; |
#endif |
unsigned long page_link; |
unsigned int offset; |
unsigned int length; |
dma_addr_t dma_address; |
#ifdef CONFIG_NEED_SG_DMA_LENGTH |
unsigned int dma_length; |
#endif |
}; |
/* |
* These macros should be used after a dma_map_sg call has been done |
* to get bus addresses of each of the SG entries and their lengths. |
* You should only work with the number of sg entries pci_map_sg |
* returns, or alternatively stop on the first sg_dma_len(sg) which |
* is 0. |
*/ |
#define sg_dma_address(sg) ((sg)->dma_address) |
#ifdef CONFIG_NEED_SG_DMA_LENGTH |
#define sg_dma_len(sg) ((sg)->dma_length) |
#else |
#define sg_dma_len(sg) ((sg)->length) |
#endif |
#define ARCH_HAS_SG_CHAIN |
int dma_map_sg(struct device *dev, struct scatterlist *sglist, |
int nelems, int dir); |
#define dma_unmap_sg(d, s, n, r) |
#endif /* __ASM_GENERIC_SCATTERLIST_H */ |
/drivers/include/asm/sigcontext.h |
---|
0,0 → 1,79 |
#ifndef _ASM_X86_SIGCONTEXT_H |
#define _ASM_X86_SIGCONTEXT_H |
#include <uapi/asm/sigcontext.h> |
#ifdef __i386__ |
struct sigcontext { |
unsigned short gs, __gsh; |
unsigned short fs, __fsh; |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned long di; |
unsigned long si; |
unsigned long bp; |
unsigned long sp; |
unsigned long bx; |
unsigned long dx; |
unsigned long cx; |
unsigned long ax; |
unsigned long trapno; |
unsigned long err; |
unsigned long ip; |
unsigned short cs, __csh; |
unsigned long flags; |
unsigned long sp_at_signal; |
unsigned short ss, __ssh; |
/* |
* fpstate is really (struct _fpstate *) or (struct _xstate *) |
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved |
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end |
* of extended memory layout. See comments at the definition of |
* (struct _fpx_sw_bytes) |
*/ |
void __user *fpstate; /* zero when no FPU/extended context */ |
unsigned long oldmask; |
unsigned long cr2; |
}; |
#else /* __i386__ */ |
struct sigcontext { |
unsigned long r8; |
unsigned long r9; |
unsigned long r10; |
unsigned long r11; |
unsigned long r12; |
unsigned long r13; |
unsigned long r14; |
unsigned long r15; |
unsigned long di; |
unsigned long si; |
unsigned long bp; |
unsigned long bx; |
unsigned long dx; |
unsigned long ax; |
unsigned long cx; |
unsigned long sp; |
unsigned long ip; |
unsigned long flags; |
unsigned short cs; |
unsigned short gs; |
unsigned short fs; |
unsigned short __pad0; |
unsigned long err; |
unsigned long trapno; |
unsigned long oldmask; |
unsigned long cr2; |
/* |
* fpstate is really (struct _fpstate *) or (struct _xstate *) |
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved |
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end |
* of extended memory layout. See comments at the definition of |
* (struct _fpx_sw_bytes) |
*/ |
void __user *fpstate; /* zero when no FPU/extended context */ |
unsigned long reserved1[8]; |
}; |
#endif /* !__i386__ */ |
#endif /* _ASM_X86_SIGCONTEXT_H */ |
/drivers/include/asm/special_insns.h |
---|
0,0 → 1,207 |
#ifndef _ASM_X86_SPECIAL_INSNS_H |
#define _ASM_X86_SPECIAL_INSNS_H |
#ifdef __KERNEL__ |
static inline void native_clts(void) |
{ |
asm volatile("clts"); |
} |
/* |
* Volatile isn't enough to prevent the compiler from reordering the |
* read/write functions for the control registers and messing everything up. |
* A memory clobber would solve the problem, but would prevent reordering of |
* all loads stores around it, which can hurt performance. Solution is to |
* use a variable and mimic reads and writes to it to enforce serialization |
*/ |
extern unsigned long __force_order; |
static inline unsigned long native_read_cr0(void) |
{ |
unsigned long val; |
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); |
return val; |
} |
static inline void native_write_cr0(unsigned long val) |
{ |
asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); |
} |
static inline unsigned long native_read_cr2(void) |
{ |
unsigned long val; |
asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); |
return val; |
} |
static inline void native_write_cr2(unsigned long val) |
{ |
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); |
} |
static inline unsigned long native_read_cr3(void) |
{ |
unsigned long val; |
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); |
return val; |
} |
static inline void native_write_cr3(unsigned long val) |
{ |
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); |
} |
static inline unsigned long native_read_cr4(void) |
{ |
unsigned long val; |
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); |
return val; |
} |
static inline unsigned long native_read_cr4_safe(void) |
{ |
unsigned long val; |
/* This could fault if %cr4 does not exist. In x86_64, a cr4 always |
* exists, so it will never fail. */ |
#ifdef CONFIG_X86_32 |
asm volatile("1: mov %%cr4, %0\n" |
"2:\n" |
_ASM_EXTABLE(1b, 2b) |
: "=r" (val), "=m" (__force_order) : "0" (0)); |
#else |
val = native_read_cr4(); |
#endif |
return val; |
} |
static inline void native_write_cr4(unsigned long val) |
{ |
asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); |
} |
#ifdef CONFIG_X86_64 |
static inline unsigned long native_read_cr8(void) |
{ |
unsigned long cr8; |
asm volatile("movq %%cr8,%0" : "=r" (cr8)); |
return cr8; |
} |
static inline void native_write_cr8(unsigned long val) |
{ |
asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); |
} |
#endif |
static inline void native_wbinvd(void) |
{ |
asm volatile("wbinvd": : :"memory"); |
} |
extern asmlinkage void native_load_gs_index(unsigned); |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt.h> |
#else |
static inline unsigned long read_cr0(void) |
{ |
return native_read_cr0(); |
} |
static inline void write_cr0(unsigned long x) |
{ |
native_write_cr0(x); |
} |
static inline unsigned long read_cr2(void) |
{ |
return native_read_cr2(); |
} |
static inline void write_cr2(unsigned long x) |
{ |
native_write_cr2(x); |
} |
static inline unsigned long read_cr3(void) |
{ |
return native_read_cr3(); |
} |
static inline void write_cr3(unsigned long x) |
{ |
native_write_cr3(x); |
} |
static inline unsigned long read_cr4(void) |
{ |
return native_read_cr4(); |
} |
static inline unsigned long read_cr4_safe(void) |
{ |
return native_read_cr4_safe(); |
} |
static inline void write_cr4(unsigned long x) |
{ |
native_write_cr4(x); |
} |
static inline void wbinvd(void) |
{ |
native_wbinvd(); |
} |
#ifdef CONFIG_X86_64 |
static inline unsigned long read_cr8(void) |
{ |
return native_read_cr8(); |
} |
static inline void write_cr8(unsigned long x) |
{ |
native_write_cr8(x); |
} |
static inline void load_gs_index(unsigned selector) |
{ |
native_load_gs_index(selector); |
} |
#endif |
/* Clear the 'TS' bit */ |
static inline void clts(void) |
{ |
native_clts(); |
} |
#endif/* CONFIG_PARAVIRT */ |
#define stts() write_cr0(read_cr0() | X86_CR0_TS) |
static inline void clflush(volatile void *__p) |
{ |
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); |
} |
static inline void clflushopt(volatile void *__p) |
{ |
alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0", |
".byte 0x66; clflush %P0", |
X86_FEATURE_CLFLUSHOPT, |
"+m" (*(volatile char __force *)__p)); |
} |
#define nop() asm volatile ("nop") |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_SPECIAL_INSNS_H */ |
/drivers/include/asm/spinlock_types.h |
---|
0,0 → 1,20 |
#ifndef _ASM_X86_SPINLOCK_TYPES_H |
#define _ASM_X86_SPINLOCK_TYPES_H |
#ifndef __LINUX_SPINLOCK_TYPES_H |
# error "please don't include this file directly" |
#endif |
typedef struct raw_spinlock { |
unsigned int slock; |
} raw_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } |
typedef struct { |
unsigned int lock; |
} raw_rwlock_t; |
#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
#endif /* _ASM_X86_SPINLOCK_TYPES_H */ |
/drivers/include/asm/string.h |
---|
0,0 → 1,5 |
#ifdef CONFIG_X86_32 |
# include <asm/string_32.h> |
#else |
# include <asm/string_64.h> |
#endif |
/drivers/include/asm/string_32.h |
---|
0,0 → 1,342 |
#ifndef _ASM_X86_STRING_32_H |
#define _ASM_X86_STRING_32_H |
#ifdef __KERNEL__ |
/* Let gcc decide whether to inline or use the out of line functions */ |
#define __HAVE_ARCH_STRCPY |
extern char *strcpy(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCPY |
extern char *strncpy(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCAT |
extern char *strcat(char *dest, const char *src); |
#define __HAVE_ARCH_STRNCAT |
extern char *strncat(char *dest, const char *src, size_t count); |
#define __HAVE_ARCH_STRCMP |
extern int strcmp(const char *cs, const char *ct); |
#define __HAVE_ARCH_STRNCMP |
extern int strncmp(const char *cs, const char *ct, size_t count); |
#define __HAVE_ARCH_STRCHR |
extern char *strchr(const char *s, int c); |
#define __HAVE_ARCH_STRLEN |
extern size_t strlen(const char *s); |
static __always_inline void *__memcpy(void *to, const void *from, size_t n) |
{ |
int d0, d1, d2; |
asm volatile("rep ; movsl\n\t" |
"movl %4,%%ecx\n\t" |
"andl $3,%%ecx\n\t" |
"jz 1f\n\t" |
"rep ; movsb\n\t" |
"1:" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) |
: "memory"); |
return to; |
} |
/* |
* This looks ugly, but the compiler can optimize it totally, |
* as the count is constant. |
*/ |
static __always_inline void *__constant_memcpy(void *to, const void *from, |
size_t n) |
{ |
long esi, edi; |
if (!n) |
return to; |
switch (n) { |
case 1: |
*(char *)to = *(char *)from; |
return to; |
case 2: |
*(short *)to = *(short *)from; |
return to; |
case 4: |
*(int *)to = *(int *)from; |
return to; |
case 3: |
*(short *)to = *(short *)from; |
*((char *)to + 2) = *((char *)from + 2); |
return to; |
case 5: |
*(int *)to = *(int *)from; |
*((char *)to + 4) = *((char *)from + 4); |
return to; |
case 6: |
*(int *)to = *(int *)from; |
*((short *)to + 2) = *((short *)from + 2); |
return to; |
case 8: |
*(int *)to = *(int *)from; |
*((int *)to + 1) = *((int *)from + 1); |
return to; |
} |
esi = (long)from; |
edi = (long)to; |
if (n >= 5 * 4) { |
/* large block: use rep prefix */ |
int ecx; |
asm volatile("rep ; movsl" |
: "=&c" (ecx), "=&D" (edi), "=&S" (esi) |
: "0" (n / 4), "1" (edi), "2" (esi) |
: "memory" |
); |
} else { |
/* small block: don't clobber ecx + smaller code */ |
if (n >= 4 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 3 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 2 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
if (n >= 1 * 4) |
asm volatile("movsl" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
} |
switch (n % 4) { |
/* tail */ |
case 0: |
return to; |
case 1: |
asm volatile("movsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
case 2: |
asm volatile("movsw" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
default: |
asm volatile("movsw\n\tmovsb" |
: "=&D"(edi), "=&S"(esi) |
: "0"(edi), "1"(esi) |
: "memory"); |
return to; |
} |
} |
#define __HAVE_ARCH_MEMCPY |
#ifdef CONFIG_X86_USE_3DNOW |
#include <asm/mmx.h> |
/* |
* This CPU favours 3DNow strongly (eg AMD Athlon) |
*/ |
static inline void *__constant_memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __constant_memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
static inline void *__memcpy3d(void *to, const void *from, size_t len) |
{ |
if (len < 512) |
return __memcpy(to, from, len); |
return _mmx_memcpy(to, from, len); |
} |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy3d((t), (f), (n)) \ |
: __memcpy3d((t), (f), (n))) |
#else |
/* |
* No 3D Now! |
*/ |
#ifndef CONFIG_KMEMCHECK |
#if (__GNUC__ >= 4) |
#define memcpy(t, f, n) __builtin_memcpy(t, f, n) |
#else |
#define memcpy(t, f, n) \ |
(__builtin_constant_p((n)) \ |
? __constant_memcpy((t), (f), (n)) \ |
: __memcpy((t), (f), (n))) |
#endif |
#else |
/* |
* kmemcheck becomes very happy if we use the REP instructions unconditionally, |
* because it means that we know both memory operands in advance. |
*/ |
#define memcpy(t, f, n) __memcpy((t), (f), (n)) |
#endif |
#endif |
#define __HAVE_ARCH_MEMMOVE |
void *memmove(void *dest, const void *src, size_t n); |
#define memcmp __builtin_memcmp |
#define __HAVE_ARCH_MEMCHR |
extern void *memchr(const void *cs, int c, size_t count); |
static inline void *__memset_generic(void *s, char c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep\n\t" |
"stosb" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "1" (s), "0" (count) |
: "memory"); |
return s; |
} |
/* we might want to write optimized versions of these later */ |
#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count)) |
/* |
* memset(x, 0, y) is a reasonably common thing to do, so we want to fill |
* things 32 bits at a time even when we don't know the size of the |
* area at compile-time.. |
*/ |
static __always_inline |
void *__constant_c_memset(void *s, unsigned long c, size_t count) |
{ |
int d0, d1; |
asm volatile("rep ; stosl\n\t" |
"testb $2,%b3\n\t" |
"je 1f\n\t" |
"stosw\n" |
"1:\ttestb $1,%b3\n\t" |
"je 2f\n\t" |
"stosb\n" |
"2:" |
: "=&c" (d0), "=&D" (d1) |
: "a" (c), "q" (count), "0" (count/4), "1" ((long)s) |
: "memory"); |
return s; |
} |
/* Added by Gertjan van Wingerde to make minix and sysv module work */ |
#define __HAVE_ARCH_STRNLEN |
extern size_t strnlen(const char *s, size_t count); |
/* end of additional stuff */ |
#define __HAVE_ARCH_STRSTR |
extern char *strstr(const char *cs, const char *ct); |
/* |
* This looks horribly ugly, but the compiler can optimize it totally, |
* as we by now know that both pattern and count is constant.. |
*/ |
static __always_inline |
void *__constant_c_and_count_memset(void *s, unsigned long pattern, |
size_t count) |
{ |
switch (count) { |
case 0: |
return s; |
case 1: |
*(unsigned char *)s = pattern & 0xff; |
return s; |
case 2: |
*(unsigned short *)s = pattern & 0xffff; |
return s; |
case 3: |
*(unsigned short *)s = pattern & 0xffff; |
*((unsigned char *)s + 2) = pattern & 0xff; |
return s; |
case 4: |
*(unsigned long *)s = pattern; |
return s; |
} |
#define COMMON(x) \ |
asm volatile("rep ; stosl" \ |
x \ |
: "=&c" (d0), "=&D" (d1) \ |
: "a" (eax), "0" (count/4), "1" ((long)s) \ |
: "memory") |
{ |
int d0, d1; |
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0 |
/* Workaround for broken gcc 4.0 */ |
register unsigned long eax asm("%eax") = pattern; |
#else |
unsigned long eax = pattern; |
#endif |
switch (count % 4) { |
case 0: |
COMMON(""); |
return s; |
case 1: |
COMMON("\n\tstosb"); |
return s; |
case 2: |
COMMON("\n\tstosw"); |
return s; |
default: |
COMMON("\n\tstosw\n\tstosb"); |
return s; |
} |
} |
#undef COMMON |
} |
#define __constant_c_x_memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_c_and_count_memset((s), (c), (count)) \ |
: __constant_c_memset((s), (c), (count))) |
#define __memset(s, c, count) \ |
(__builtin_constant_p(count) \ |
? __constant_count_memset((s), (c), (count)) \ |
: __memset_generic((s), (c), (count))) |
#define __HAVE_ARCH_MEMSET |
#if (__GNUC__ >= 4) |
#define memset(s, c, count) __builtin_memset(s, c, count) |
#else |
#define memset(s, c, count) \ |
(__builtin_constant_p(c) \ |
? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ |
(count)) \ |
: __memset((s), (c), (count))) |
#endif |
/* |
* find the first occurrence of byte 'c', or 1 past the area if none |
*/ |
#define __HAVE_ARCH_MEMSCAN |
extern void *memscan(void *addr, int c, size_t size); |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_STRING_32_H */ |
/drivers/include/asm/swab.h |
---|
0,0 → 1,61 |
#ifndef _ASM_X86_SWAB_H |
#define _ASM_X86_SWAB_H |
#include <linux/types.h> |
#include <linux/compiler.h> |
static inline __attribute_const__ __u32 __arch_swab32(__u32 val) |
{ |
#ifdef __i386__ |
# ifdef CONFIG_X86_BSWAP |
asm("bswap %0" : "=r" (val) : "0" (val)); |
# else |
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ |
"rorl $16,%0\n\t" /* swap words */ |
"xchgb %b0,%h0" /* swap higher bytes */ |
: "=q" (val) |
: "0" (val)); |
# endif |
#else /* __i386__ */ |
asm("bswapl %0" |
: "=r" (val) |
: "0" (val)); |
#endif |
return val; |
} |
#define __arch_swab32 __arch_swab32 |
static inline __attribute_const__ __u64 __arch_swab64(__u64 val) |
{ |
#ifdef __i386__ |
union { |
struct { |
__u32 a; |
__u32 b; |
} s; |
__u64 u; |
} v; |
v.u = val; |
# ifdef CONFIG_X86_BSWAP |
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# else |
v.s.a = __arch_swab32(v.s.a); |
v.s.b = __arch_swab32(v.s.b); |
asm("xchgl %0,%1" |
: "=r" (v.s.a), "=r" (v.s.b) |
: "0" (v.s.a), "1" (v.s.b)); |
# endif |
return v.u; |
#else /* __i386__ */ |
asm("bswapq %0" |
: "=r" (val) |
: "0" (val)); |
return val; |
#endif |
} |
#define __arch_swab64 __arch_swab64 |
#endif /* _ASM_X86_SWAB_H */ |
/drivers/include/asm/types.h |
---|
0,0 → 1,16 |
#ifndef _ASM_X86_TYPES_H |
#define _ASM_X86_TYPES_H |
#define dma_addr_t dma_addr_t |
#include <asm-generic/types.h> |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
typedef u64 dma64_addr_t; |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_TYPES_H */ |
/drivers/include/asm/unaligned.h |
---|
0,0 → 1,14 |
#ifndef _ASM_X86_UNALIGNED_H |
#define _ASM_X86_UNALIGNED_H |
/* |
* The x86 can do unaligned accesses itself. |
*/ |
#include <linux/unaligned/access_ok.h> |
#include <linux/unaligned/generic.h> |
#define get_unaligned __get_unaligned_le |
#define put_unaligned __put_unaligned_le |
#endif /* _ASM_X86_UNALIGNED_H */ |
/drivers/include/asm/x86_init.h |
---|
0,0 → 1,216 |
#ifndef _ASM_X86_PLATFORM_H |
#define _ASM_X86_PLATFORM_H |
#include <asm/pgtable_types.h> |
//#include <asm/bootparam.h> |
struct mpc_bus; |
struct mpc_cpu; |
struct mpc_table; |
struct cpuinfo_x86; |
/** |
* struct x86_init_mpparse - platform specific mpparse ops |
* @mpc_record: platform specific mpc record accounting |
* @setup_ioapic_ids: platform specific ioapic id override |
* @mpc_apic_id: platform specific mpc apic id assignment |
* @smp_read_mpc_oem: platform specific oem mpc table setup |
* @mpc_oem_pci_bus: platform specific pci bus setup (default NULL) |
* @mpc_oem_bus_info: platform specific mpc bus info |
* @find_smp_config: find the smp configuration |
* @get_smp_config: get the smp configuration |
*/ |
struct x86_init_mpparse { |
void (*mpc_record)(unsigned int mode); |
void (*setup_ioapic_ids)(void); |
int (*mpc_apic_id)(struct mpc_cpu *m); |
void (*smp_read_mpc_oem)(struct mpc_table *mpc); |
void (*mpc_oem_pci_bus)(struct mpc_bus *m); |
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); |
void (*find_smp_config)(void); |
void (*get_smp_config)(unsigned int early); |
}; |
/** |
* struct x86_init_resources - platform specific resource related ops |
* @probe_roms: probe BIOS roms |
* @reserve_resources: reserve the standard resources for the |
* platform |
* @memory_setup: platform specific memory setup |
* |
*/ |
struct x86_init_resources { |
void (*probe_roms)(void); |
void (*reserve_resources)(void); |
char *(*memory_setup)(void); |
}; |
/** |
* struct x86_init_irqs - platform specific interrupt setup |
* @pre_vector_init: init code to run before interrupt vectors |
* are set up. |
* @intr_init: interrupt init code |
* @trap_init: platform specific trap setup |
*/ |
struct x86_init_irqs { |
void (*pre_vector_init)(void); |
void (*intr_init)(void); |
void (*trap_init)(void); |
}; |
/** |
* struct x86_init_oem - oem platform specific customizing functions |
* @arch_setup: platform specific architecure setup |
* @banner: print a platform specific banner |
*/ |
struct x86_init_oem { |
void (*arch_setup)(void); |
void (*banner)(void); |
}; |
/** |
* struct x86_init_paging - platform specific paging functions |
* @pagetable_init: platform specific paging initialization call to setup |
* the kernel pagetables and prepare accessors functions. |
* Callback must call paging_init(). Called once after the |
* direct mapping for phys memory is available. |
*/ |
struct x86_init_paging { |
void (*pagetable_init)(void); |
}; |
/** |
* struct x86_init_timers - platform specific timer setup |
* @setup_perpcu_clockev: set up the per cpu clock event device for the |
* boot cpu |
* @tsc_pre_init: platform function called before TSC init |
* @timer_init: initialize the platform timer (default PIT/HPET) |
* @wallclock_init: init the wallclock device |
*/ |
struct x86_init_timers { |
void (*setup_percpu_clockev)(void); |
void (*tsc_pre_init)(void); |
void (*timer_init)(void); |
void (*wallclock_init)(void); |
}; |
/** |
* struct x86_init_iommu - platform specific iommu setup |
* @iommu_init: platform specific iommu setup |
*/ |
struct x86_init_iommu { |
int (*iommu_init)(void); |
}; |
/** |
* struct x86_init_pci - platform specific pci init functions |
* @arch_init: platform specific pci arch init call |
* @init: platform specific pci subsystem init |
* @init_irq: platform specific pci irq init |
* @fixup_irqs: platform specific pci irq fixup |
*/ |
struct x86_init_pci { |
int (*arch_init)(void); |
int (*init)(void); |
void (*init_irq)(void); |
void (*fixup_irqs)(void); |
}; |
/** |
* struct x86_init_ops - functions for platform specific setup |
* |
*/ |
struct x86_init_ops { |
struct x86_init_resources resources; |
struct x86_init_mpparse mpparse; |
struct x86_init_irqs irqs; |
struct x86_init_oem oem; |
struct x86_init_paging paging; |
struct x86_init_timers timers; |
struct x86_init_iommu iommu; |
struct x86_init_pci pci; |
}; |
/** |
* struct x86_cpuinit_ops - platform specific cpu hotplug setups |
* @setup_percpu_clockev: set up the per cpu clock event device |
* @early_percpu_clock_init: early init of the per cpu clock event device |
*/ |
struct x86_cpuinit_ops { |
void (*setup_percpu_clockev)(void); |
void (*early_percpu_clock_init)(void); |
void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node); |
}; |
struct timespec; |
/** |
* struct x86_platform_ops - platform specific runtime functions |
* @calibrate_tsc: calibrate TSC |
* @get_wallclock: get time from HW clock like RTC etc. |
* @set_wallclock: set time back to HW clock |
* @is_untracked_pat_range exclude from PAT logic |
* @nmi_init enable NMI on cpus |
* @i8042_detect pre-detect if i8042 controller exists |
* @save_sched_clock_state: save state for sched_clock() on suspend |
* @restore_sched_clock_state: restore state for sched_clock() on resume |
* @apic_post_init: adjust apic if neeeded |
*/ |
struct x86_platform_ops { |
unsigned long (*calibrate_tsc)(void); |
void (*get_wallclock)(struct timespec *ts); |
int (*set_wallclock)(const struct timespec *ts); |
void (*iommu_shutdown)(void); |
bool (*is_untracked_pat_range)(u64 start, u64 end); |
void (*nmi_init)(void); |
unsigned char (*get_nmi_reason)(void); |
int (*i8042_detect)(void); |
void (*save_sched_clock_state)(void); |
void (*restore_sched_clock_state)(void); |
void (*apic_post_init)(void); |
}; |
struct pci_dev; |
struct msi_msg; |
struct x86_msi_ops { |
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); |
void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq, |
unsigned int dest, struct msi_msg *msg, |
u8 hpet_id); |
void (*teardown_msi_irq)(unsigned int irq); |
void (*teardown_msi_irqs)(struct pci_dev *dev); |
void (*restore_msi_irqs)(struct pci_dev *dev); |
int (*setup_hpet_msi)(unsigned int irq, unsigned int id); |
}; |
struct IO_APIC_route_entry; |
struct io_apic_irq_attr; |
struct irq_data; |
struct cpumask; |
struct x86_io_apic_ops { |
void (*init) (void); |
unsigned int (*read) (unsigned int apic, unsigned int reg); |
void (*write) (unsigned int apic, unsigned int reg, unsigned int value); |
void (*modify) (unsigned int apic, unsigned int reg, unsigned int value); |
void (*disable)(void); |
void (*print_entries)(unsigned int apic, unsigned int nr_entries); |
int (*set_affinity)(struct irq_data *data, |
const struct cpumask *mask, |
bool force); |
int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry, |
unsigned int destination, int vector, |
struct io_apic_irq_attr *attr); |
void (*eoi_ioapic_pin)(int apic, int pin, int vector); |
}; |
extern struct x86_init_ops x86_init; |
extern struct x86_cpuinit_ops x86_cpuinit; |
extern struct x86_platform_ops x86_platform; |
extern struct x86_msi_ops x86_msi; |
extern struct x86_io_apic_ops x86_io_apic_ops; |
extern void x86_init_noop(void); |
extern void x86_init_uint_noop(unsigned int unused); |
#endif |
/drivers/include/asm-generic/atomic-long.h |
---|
0,0 → 1,258 |
#ifndef _ASM_GENERIC_ATOMIC_LONG_H |
#define _ASM_GENERIC_ATOMIC_LONG_H |
/* |
* Copyright (C) 2005 Silicon Graphics, Inc. |
* Christoph Lameter |
* |
* Allows to provide arch independent atomic definitions without the need to |
* edit all arch specific atomic.h files. |
*/ |
#include <asm/types.h> |
/* |
* Suppport for atomic_long_t |
* |
* Casts for parameters are avoided for existing atomic functions in order to |
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the |
* macros of a platform may have. |
*/ |
#if BITS_PER_LONG == 64 |
typedef atomic64_t atomic_long_t; |
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) |
static inline long atomic_long_read(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_read(v); |
} |
static inline void atomic_long_set(atomic_long_t *l, long i) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_set(v, i); |
} |
static inline void atomic_long_inc(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_inc(v); |
} |
static inline void atomic_long_dec(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_dec(v); |
} |
static inline void atomic_long_add(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_add(i, v); |
} |
static inline void atomic_long_sub(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
atomic64_sub(i, v); |
} |
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_sub_and_test(i, v); |
} |
static inline int atomic_long_dec_and_test(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_dec_and_test(v); |
} |
static inline int atomic_long_inc_and_test(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_inc_and_test(v); |
} |
static inline int atomic_long_add_negative(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return atomic64_add_negative(i, v); |
} |
static inline long atomic_long_add_return(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_add_return(i, v); |
} |
static inline long atomic_long_sub_return(long i, atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_sub_return(i, v); |
} |
static inline long atomic_long_inc_return(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_inc_return(v); |
} |
static inline long atomic_long_dec_return(atomic_long_t *l) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_dec_return(v); |
} |
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) |
{ |
atomic64_t *v = (atomic64_t *)l; |
return (long)atomic64_add_unless(v, a, u); |
} |
#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) |
#define atomic_long_cmpxchg(l, old, new) \ |
(atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) |
#define atomic_long_xchg(v, new) \ |
(atomic64_xchg((atomic64_t *)(v), (new))) |
#else /* BITS_PER_LONG == 64 */ |
typedef atomic_t atomic_long_t; |
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) |
static inline long atomic_long_read(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_read(v); |
} |
static inline void atomic_long_set(atomic_long_t *l, long i) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_set(v, i); |
} |
static inline void atomic_long_inc(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_inc(v); |
} |
static inline void atomic_long_dec(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_dec(v); |
} |
static inline void atomic_long_add(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_add(i, v); |
} |
static inline void atomic_long_sub(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
atomic_sub(i, v); |
} |
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_sub_and_test(i, v); |
} |
static inline int atomic_long_dec_and_test(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_dec_and_test(v); |
} |
static inline int atomic_long_inc_and_test(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_inc_and_test(v); |
} |
static inline int atomic_long_add_negative(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return atomic_add_negative(i, v); |
} |
static inline long atomic_long_add_return(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_add_return(i, v); |
} |
static inline long atomic_long_sub_return(long i, atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_sub_return(i, v); |
} |
static inline long atomic_long_inc_return(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_inc_return(v); |
} |
static inline long atomic_long_dec_return(atomic_long_t *l) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_dec_return(v); |
} |
static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) |
{ |
atomic_t *v = (atomic_t *)l; |
return (long)atomic_add_unless(v, a, u); |
} |
#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) |
#define atomic_long_cmpxchg(l, old, new) \ |
(atomic_cmpxchg((atomic_t *)(l), (old), (new))) |
#define atomic_long_xchg(v, new) \ |
(atomic_xchg((atomic_t *)(v), (new))) |
#endif /* BITS_PER_LONG == 64 */ |
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ |
/drivers/include/asm-generic/bitops/const_hweight.h |
---|
0,0 → 1,43 |
#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ |
#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ |
/* |
* Compile time versions of __arch_hweightN() |
*/ |
#define __const_hweight8(w) \ |
((unsigned int) \ |
((!!((w) & (1ULL << 0))) + \ |
(!!((w) & (1ULL << 1))) + \ |
(!!((w) & (1ULL << 2))) + \ |
(!!((w) & (1ULL << 3))) + \ |
(!!((w) & (1ULL << 4))) + \ |
(!!((w) & (1ULL << 5))) + \ |
(!!((w) & (1ULL << 6))) + \ |
(!!((w) & (1ULL << 7))))) |
#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 )) |
#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) |
#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) |
/* |
* Generic interface. |
*/ |
#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w)) |
#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w)) |
#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w)) |
#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w)) |
/* |
* Interface for known constant arguments |
*/ |
#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w)) |
#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w)) |
#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w)) |
#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w)) |
/* |
* Type invariant interface to the compile time constant hweight functions. |
*/ |
#define HWEIGHT(w) HWEIGHT64((u64)w) |
#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */ |
/drivers/include/asm-generic/bitops/ext2-atomic-setbit.h |
---|
0,0 → 1,11 |
#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ |
#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ |
/* |
* Atomic bitops based version of ext2 atomic bitops |
*/ |
#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr) |
#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr) |
#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */ |
/drivers/include/asm-generic/bitops/ext2-non-atomic.h |
---|
0,0 → 1,20 |
#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ |
#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ |
#include <asm-generic/bitops/le.h> |
#define ext2_set_bit(nr,addr) \ |
generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_clear_bit(nr,addr) \ |
generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_test_bit(nr,addr) \ |
generic_test_le_bit((nr),(unsigned long *)(addr)) |
#define ext2_find_first_zero_bit(addr, size) \ |
generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) |
#define ext2_find_next_zero_bit(addr, size, off) \ |
generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) |
#define ext2_find_next_bit(addr, size, off) \ |
generic_find_next_le_bit((unsigned long *)(addr), (size), (off)) |
#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ |
/drivers/include/asm-generic/bitops/find.h |
---|
0,0 → 1,62 |
#ifndef _ASM_GENERIC_BITOPS_FIND_H_ |
#define _ASM_GENERIC_BITOPS_FIND_H_ |
#ifndef find_next_bit |
/** |
* find_next_bit - find the next set bit in a memory region |
* @addr: The address to base the search on |
* @offset: The bitnumber to start searching at |
* @size: The bitmap size in bits |
* |
* Returns the bit number for the next set bit |
* If no bits are set, returns @size. |
*/ |
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long |
size, unsigned long offset); |
#endif |
#ifndef find_next_zero_bit |
/** |
* find_next_zero_bit - find the next cleared bit in a memory region |
* @addr: The address to base the search on |
* @offset: The bitnumber to start searching at |
* @size: The bitmap size in bits |
* |
* Returns the bit number of the next zero bit |
* If no bits are zero, returns @size. |
*/ |
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned |
long size, unsigned long offset); |
#endif |
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
/** |
* find_first_bit - find the first set bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum number of bits to search |
* |
* Returns the bit number of the first set bit. |
* If no bits are set, returns @size. |
*/ |
extern unsigned long find_first_bit(const unsigned long *addr, |
unsigned long size); |
/** |
* find_first_zero_bit - find the first cleared bit in a memory region |
* @addr: The address to start the search at |
* @size: The maximum number of bits to search |
* |
* Returns the bit number of the first cleared bit. |
* If no bits are zero, returns @size. |
*/ |
extern unsigned long find_first_zero_bit(const unsigned long *addr, |
unsigned long size); |
#else /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) |
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) |
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ |
/drivers/include/asm-generic/bitops/fls64.h |
---|
0,0 → 1,36 |
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_ |
#define _ASM_GENERIC_BITOPS_FLS64_H_ |
#include <asm/types.h> |
/** |
* fls64 - find last set bit in a 64-bit word |
* @x: the word to search |
* |
* This is defined in a similar way as the libc and compiler builtin |
* ffsll, but returns the position of the most significant set bit. |
* |
* fls64(value) returns 0 if value is 0 or the position of the last |
* set bit if value is nonzero. The last (most significant) bit is |
* at position 64. |
*/ |
#if BITS_PER_LONG == 32 |
static __always_inline int fls64(__u64 x) |
{ |
__u32 h = x >> 32; |
if (h) |
return fls(h) + 32; |
return fls(x); |
} |
#elif BITS_PER_LONG == 64 |
static __always_inline int fls64(__u64 x) |
{ |
if (x == 0) |
return 0; |
return __fls(x) + 1; |
} |
#else |
#error BITS_PER_LONG not 32 or 64 |
#endif |
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ |
/drivers/include/asm-generic/bitops/hweight.h |
---|
0,0 → 1,7 |
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_ |
#include <asm-generic/bitops/arch_hweight.h> |
#include <asm-generic/bitops/const_hweight.h> |
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ |
/drivers/include/asm-generic/bitops/le.h |
---|
0,0 → 1,97 |
#ifndef _ASM_GENERIC_BITOPS_LE_H_ |
#define _ASM_GENERIC_BITOPS_LE_H_ |
#include <asm/types.h> |
#include <asm/byteorder.h> |
#if defined(__LITTLE_ENDIAN) |
#define BITOP_LE_SWIZZLE 0 |
static inline unsigned long find_next_zero_bit_le(const void *addr, |
unsigned long size, unsigned long offset) |
{ |
return find_next_zero_bit(addr, size, offset); |
} |
static inline unsigned long find_next_bit_le(const void *addr, |
unsigned long size, unsigned long offset) |
{ |
return find_next_bit(addr, size, offset); |
} |
static inline unsigned long find_first_zero_bit_le(const void *addr, |
unsigned long size) |
{ |
return find_first_zero_bit(addr, size); |
} |
#elif defined(__BIG_ENDIAN) |
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) |
#ifndef find_next_zero_bit_le |
extern unsigned long find_next_zero_bit_le(const void *addr, |
unsigned long size, unsigned long offset); |
#endif |
#ifndef find_next_bit_le |
extern unsigned long find_next_bit_le(const void *addr, |
unsigned long size, unsigned long offset); |
#endif |
#ifndef find_first_zero_bit_le |
#define find_first_zero_bit_le(addr, size) \ |
find_next_zero_bit_le((addr), (size), 0) |
#endif |
#else |
#error "Please fix <asm/byteorder.h>" |
#endif |
static inline int test_bit_le(int nr, const void *addr) |
{ |
return test_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
static inline void set_bit_le(int nr, void *addr) |
{ |
set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
static inline void clear_bit_le(int nr, void *addr) |
{ |
clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
static inline void __set_bit_le(int nr, void *addr) |
{ |
__set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
static inline void __clear_bit_le(int nr, void *addr) |
{ |
__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
static inline int test_and_set_bit_le(int nr, void *addr) |
{ |
return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
static inline int test_and_clear_bit_le(int nr, void *addr) |
{ |
return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
static inline int __test_and_set_bit_le(int nr, void *addr) |
{ |
return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
static inline int __test_and_clear_bit_le(int nr, void *addr) |
{ |
return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
} |
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */ |
/drivers/include/asm-generic/bitops/minix.h |
---|
0,0 → 1,15 |
#ifndef _ASM_GENERIC_BITOPS_MINIX_H_ |
#define _ASM_GENERIC_BITOPS_MINIX_H_ |
#define minix_test_and_set_bit(nr,addr) \ |
__test_and_set_bit((nr),(unsigned long *)(addr)) |
#define minix_set_bit(nr,addr) \ |
__set_bit((nr),(unsigned long *)(addr)) |
#define minix_test_and_clear_bit(nr,addr) \ |
__test_and_clear_bit((nr),(unsigned long *)(addr)) |
#define minix_test_bit(nr,addr) \ |
test_bit((nr),(unsigned long *)(addr)) |
#define minix_find_first_zero_bit(addr,size) \ |
find_first_zero_bit((unsigned long *)(addr),(size)) |
#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */ |
/drivers/include/asm-generic/bitops/sched.h |
---|
0,0 → 1,31 |
#ifndef _ASM_GENERIC_BITOPS_SCHED_H_ |
#define _ASM_GENERIC_BITOPS_SCHED_H_ |
#include <linux/compiler.h> /* unlikely() */ |
#include <asm/types.h> |
/* |
* Every architecture must define this function. It's the fastest |
* way of searching a 100-bit bitmap. It's guaranteed that at least |
* one of the 100 bits is cleared. |
*/ |
static inline int sched_find_first_bit(const unsigned long *b) |
{ |
#if BITS_PER_LONG == 64 |
if (b[0]) |
return __ffs(b[0]); |
return __ffs(b[1]) + 64; |
#elif BITS_PER_LONG == 32 |
if (b[0]) |
return __ffs(b[0]); |
if (b[1]) |
return __ffs(b[1]) + 32; |
if (b[2]) |
return __ffs(b[2]) + 64; |
return __ffs(b[3]) + 96; |
#else |
#error BITS_PER_LONG not defined |
#endif |
} |
#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ |
/drivers/include/asm-generic/bitsperlong.h |
---|
0,0 → 1,25 |
#ifndef __ASM_GENERIC_BITS_PER_LONG |
#define __ASM_GENERIC_BITS_PER_LONG |
#include <uapi/asm-generic/bitsperlong.h> |
#ifdef CONFIG_64BIT |
#define BITS_PER_LONG 64 |
#else |
#define BITS_PER_LONG 32 |
#endif /* CONFIG_64BIT */ |
/* |
* FIXME: The check currently breaks x86-64 build, so it's |
* temporarily disabled. Please fix x86-64 and reenable |
*/ |
#if 0 && BITS_PER_LONG != __BITS_PER_LONG |
#error Inconsistent word size. Check asm/bitsperlong.h |
#endif |
#ifndef BITS_PER_LONG_LONG |
#define BITS_PER_LONG_LONG 64 |
#endif |
#endif /* __ASM_GENERIC_BITS_PER_LONG */ |
/drivers/include/asm-generic/cacheflush.h |
---|
0,0 → 1,34 |
#ifndef __ASM_CACHEFLUSH_H |
#define __ASM_CACHEFLUSH_H |
/* Keep includes the same across arches. */ |
#include <linux/mm.h> |
/* |
* The cache doesn't need to be flushed when TLB entries change when |
* the cache is mapped to physical memory, not virtual memory |
*/ |
#define flush_cache_all() do { } while (0) |
#define flush_cache_mm(mm) do { } while (0) |
#define flush_cache_dup_mm(mm) do { } while (0) |
#define flush_cache_range(vma, start, end) do { } while (0) |
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 |
#define flush_dcache_page(page) do { } while (0) |
#define flush_dcache_mmap_lock(mapping) do { } while (0) |
#define flush_dcache_mmap_unlock(mapping) do { } while (0) |
#define flush_icache_range(start, end) do { } while (0) |
#define flush_icache_page(vma,pg) do { } while (0) |
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) |
#define flush_cache_vmap(start, end) do { } while (0) |
#define flush_cache_vunmap(start, end) do { } while (0) |
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
do { \ |
memcpy(dst, src, len); \ |
flush_icache_user_range(vma, page, vaddr, len); \ |
} while (0) |
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
memcpy(dst, src, len) |
#endif /* __ASM_CACHEFLUSH_H */ |
/drivers/include/asm-generic/delay.h |
---|
0,0 → 1,44 |
#ifndef __ASM_GENERIC_DELAY_H |
#define __ASM_GENERIC_DELAY_H |
/* Undefined functions to get compile-time errors */ |
extern void __bad_udelay(void); |
extern void __bad_ndelay(void); |
extern void __udelay(unsigned long usecs); |
extern void __ndelay(unsigned long nsecs); |
extern void __const_udelay(unsigned long xloops); |
extern void __delay(unsigned long loops); |
/* |
* The weird n/20000 thing suppresses a "comparison is always false due to |
* limited range of data type" warning with non-const 8-bit arguments. |
*/ |
/* 0x10c7 is 2**32 / 1000000 (rounded up) */ |
#define udelay(n) \ |
({ \ |
if (__builtin_constant_p(n)) { \ |
if ((n) / 20000 >= 1) \ |
__bad_udelay(); \ |
else \ |
__const_udelay((n) * 0x10c7ul); \ |
} else { \ |
__udelay(n); \ |
} \ |
}) |
/* 0x5 is 2**32 / 1000000000 (rounded up) */ |
#define ndelay(n) \ |
({ \ |
if (__builtin_constant_p(n)) { \ |
if ((n) / 20000 >= 1) \ |
__bad_ndelay(); \ |
else \ |
__const_udelay((n) * 5ul); \ |
} else { \ |
__ndelay(n); \ |
} \ |
}) |
#endif /* __ASM_GENERIC_DELAY_H */ |
/drivers/include/asm-generic/getorder.h |
---|
0,0 → 1,61 |
#ifndef __ASM_GENERIC_GETORDER_H |
#define __ASM_GENERIC_GETORDER_H |
#ifndef __ASSEMBLY__ |
#include <linux/compiler.h> |
#include <linux/log2.h> |
/* |
* Runtime evaluation of get_order() |
*/ |
static inline __attribute_const__ |
int __get_order(unsigned long size) |
{ |
int order; |
size--; |
size >>= PAGE_SHIFT; |
#if BITS_PER_LONG == 32 |
order = fls(size); |
#else |
order = fls64(size); |
#endif |
return order; |
} |
/** |
* get_order - Determine the allocation order of a memory size |
* @size: The size for which to get the order |
* |
* Determine the allocation order of a particular sized block of memory. This |
* is on a logarithmic scale, where: |
* |
* 0 -> 2^0 * PAGE_SIZE and below |
* 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1 |
* 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1 |
* 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1 |
* 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1 |
* ... |
* |
* The order returned is used to find the smallest allocation granule required |
* to hold an object of the specified size. |
* |
* The result is undefined if the size is 0. |
* |
* This function may be used to initialise variables with compile time |
* evaluations of constants. |
*/ |
#define get_order(n) \ |
( \ |
__builtin_constant_p(n) ? ( \ |
((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \ |
(((n) < (1UL << PAGE_SHIFT)) ? 0 : \ |
ilog2((n) - 1) - PAGE_SHIFT + 1) \ |
) : \ |
__get_order(n) \ |
) |
#endif /* __ASSEMBLY__ */ |
#endif /* __ASM_GENERIC_GETORDER_H */ |
/drivers/include/asm-generic/int-ll64.h |
---|
0,0 → 1,49 |
/* |
* asm-generic/int-ll64.h |
* |
* Integer declarations for architectures which use "long long" |
* for 64-bit types. |
*/ |
#ifndef _ASM_GENERIC_INT_LL64_H |
#define _ASM_GENERIC_INT_LL64_H |
#include <uapi/asm-generic/int-ll64.h> |
#ifndef __ASSEMBLY__ |
typedef signed char s8; |
typedef unsigned char u8; |
typedef signed short s16; |
typedef unsigned short u16; |
typedef signed int s32; |
typedef unsigned int u32; |
typedef signed long long s64; |
typedef unsigned long long u64; |
#define S8_C(x) x |
#define U8_C(x) x ## U |
#define S16_C(x) x |
#define U16_C(x) x ## U |
#define S32_C(x) x |
#define U32_C(x) x ## U |
#define S64_C(x) x ## LL |
#define U64_C(x) x ## ULL |
#else /* __ASSEMBLY__ */ |
#define S8_C(x) x |
#define U8_C(x) x |
#define S16_C(x) x |
#define U16_C(x) x |
#define S32_C(x) x |
#define U32_C(x) x |
#define S64_C(x) x |
#define U64_C(x) x |
#endif /* __ASSEMBLY__ */ |
#endif /* _ASM_GENERIC_INT_LL64_H */ |
/drivers/include/asm-generic/memory_model.h |
---|
0,0 → 1,77 |
#ifndef __ASM_MEMORY_MODEL_H |
#define __ASM_MEMORY_MODEL_H |
#ifndef __ASSEMBLY__ |
#if defined(CONFIG_FLATMEM) |
#ifndef ARCH_PFN_OFFSET |
#define ARCH_PFN_OFFSET (0UL) |
#endif |
#elif defined(CONFIG_DISCONTIGMEM) |
#ifndef arch_pfn_to_nid |
#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) |
#endif |
#ifndef arch_local_page_offset |
#define arch_local_page_offset(pfn, nid) \ |
((pfn) - NODE_DATA(nid)->node_start_pfn) |
#endif |
#endif /* CONFIG_DISCONTIGMEM */ |
/* |
* supports 3 memory models. |
*/ |
#if defined(CONFIG_FLATMEM) |
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) |
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ |
ARCH_PFN_OFFSET) |
#elif defined(CONFIG_DISCONTIGMEM) |
#define __pfn_to_page(pfn) \ |
({ unsigned long __pfn = (pfn); \ |
unsigned long __nid = arch_pfn_to_nid(__pfn); \ |
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ |
}) |
#define __page_to_pfn(pg) \ |
({ const struct page *__pg = (pg); \ |
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ |
(unsigned long)(__pg - __pgdat->node_mem_map) + \ |
__pgdat->node_start_pfn; \ |
}) |
#elif defined(CONFIG_SPARSEMEM_VMEMMAP) |
/* memmap is virtually contiguous. */ |
#define __pfn_to_page(pfn) (vmemmap + (pfn)) |
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap) |
#elif defined(CONFIG_SPARSEMEM) |
/* |
* Note: section's mem_map is encoded to reflect its start_pfn. |
* section[i].section_mem_map == mem_map's address - start_pfn; |
*/ |
#define __page_to_pfn(pg) \ |
({ const struct page *__pg = (pg); \ |
int __sec = page_to_section(__pg); \ |
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ |
}) |
#define __pfn_to_page(pfn) \ |
({ unsigned long __pfn = (pfn); \ |
struct mem_section *__sec = __pfn_to_section(__pfn); \ |
__section_mem_map_addr(__sec) + __pfn; \ |
}) |
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ |
#define page_to_pfn __page_to_pfn |
#define pfn_to_page __pfn_to_page |
#endif /* __ASSEMBLY__ */ |
#endif |
/drivers/include/asm-generic/percpu.h |
---|
0,0 → 1,420 |
#ifndef _ASM_GENERIC_PERCPU_H_ |
#define _ASM_GENERIC_PERCPU_H_ |
#include <linux/compiler.h> |
#include <linux/threads.h> |
#include <linux/percpu-defs.h> |
#ifdef CONFIG_SMP |
/* |
* per_cpu_offset() is the offset that has to be added to a |
* percpu variable to get to the instance for a certain processor. |
* |
* Most arches use the __per_cpu_offset array for those offsets but |
* some arches have their own ways of determining the offset (x86_64, s390). |
*/ |
#ifndef __per_cpu_offset |
extern unsigned long __per_cpu_offset[NR_CPUS]; |
#define per_cpu_offset(x) (__per_cpu_offset[x]) |
#endif |
/* |
* Determine the offset for the currently active processor. |
* An arch may define __my_cpu_offset to provide a more effective |
* means of obtaining the offset to the per cpu variables of the |
* current processor. |
*/ |
#ifndef __my_cpu_offset |
#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) |
#endif |
#ifdef CONFIG_DEBUG_PREEMPT |
#define my_cpu_offset per_cpu_offset(smp_processor_id()) |
#else |
#define my_cpu_offset __my_cpu_offset |
#endif |
/* |
* Arch may define arch_raw_cpu_ptr() to provide more efficient address |
* translations for raw_cpu_ptr(). |
*/ |
#ifndef arch_raw_cpu_ptr |
#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) |
#endif |
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
extern void setup_per_cpu_areas(void); |
#endif |
#endif /* SMP */ |
#ifndef PER_CPU_BASE_SECTION |
#ifdef CONFIG_SMP |
#define PER_CPU_BASE_SECTION ".data..percpu" |
#else |
#define PER_CPU_BASE_SECTION ".data" |
#endif |
#endif |
#ifndef PER_CPU_ATTRIBUTES |
#define PER_CPU_ATTRIBUTES |
#endif |
#ifndef PER_CPU_DEF_ATTRIBUTES |
#define PER_CPU_DEF_ATTRIBUTES |
#endif |
#define raw_cpu_generic_to_op(pcp, val, op) \ |
do { \ |
*raw_cpu_ptr(&(pcp)) op val; \ |
} while (0) |
#define raw_cpu_generic_add_return(pcp, val) \ |
({ \ |
raw_cpu_add(pcp, val); \ |
raw_cpu_read(pcp); \ |
}) |
#define raw_cpu_generic_xchg(pcp, nval) \ |
({ \ |
typeof(pcp) __ret; \ |
__ret = raw_cpu_read(pcp); \ |
raw_cpu_write(pcp, nval); \ |
__ret; \ |
}) |
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ |
({ \ |
typeof(pcp) __ret; \ |
__ret = raw_cpu_read(pcp); \ |
if (__ret == (oval)) \ |
raw_cpu_write(pcp, nval); \ |
__ret; \ |
}) |
#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
({ \ |
int __ret = 0; \ |
if (raw_cpu_read(pcp1) == (oval1) && \ |
raw_cpu_read(pcp2) == (oval2)) { \ |
raw_cpu_write(pcp1, nval1); \ |
raw_cpu_write(pcp2, nval2); \ |
__ret = 1; \ |
} \ |
(__ret); \ |
}) |
#define this_cpu_generic_read(pcp) \ |
({ \ |
typeof(pcp) __ret; \ |
preempt_disable(); \ |
__ret = *this_cpu_ptr(&(pcp)); \ |
preempt_enable(); \ |
__ret; \ |
}) |
#define this_cpu_generic_to_op(pcp, val, op) \ |
do { \ |
unsigned long __flags; \ |
raw_local_irq_save(__flags); \ |
*raw_cpu_ptr(&(pcp)) op val; \ |
raw_local_irq_restore(__flags); \ |
} while (0) |
#define this_cpu_generic_add_return(pcp, val) \ |
({ \ |
typeof(pcp) __ret; \ |
unsigned long __flags; \ |
raw_local_irq_save(__flags); \ |
raw_cpu_add(pcp, val); \ |
__ret = raw_cpu_read(pcp); \ |
raw_local_irq_restore(__flags); \ |
__ret; \ |
}) |
#define this_cpu_generic_xchg(pcp, nval) \ |
({ \ |
typeof(pcp) __ret; \ |
unsigned long __flags; \ |
raw_local_irq_save(__flags); \ |
__ret = raw_cpu_read(pcp); \ |
raw_cpu_write(pcp, nval); \ |
raw_local_irq_restore(__flags); \ |
__ret; \ |
}) |
#define this_cpu_generic_cmpxchg(pcp, oval, nval) \ |
({ \ |
typeof(pcp) __ret; \ |
unsigned long __flags; \ |
raw_local_irq_save(__flags); \ |
__ret = raw_cpu_read(pcp); \ |
if (__ret == (oval)) \ |
raw_cpu_write(pcp, nval); \ |
raw_local_irq_restore(__flags); \ |
__ret; \ |
}) |
#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
({ \ |
int __ret; \ |
unsigned long __flags; \ |
raw_local_irq_save(__flags); \ |
__ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ |
oval1, oval2, nval1, nval2); \ |
raw_local_irq_restore(__flags); \ |
__ret; \ |
}) |
#ifndef raw_cpu_read_1 |
#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) |
#endif |
#ifndef raw_cpu_read_2 |
#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) |
#endif |
#ifndef raw_cpu_read_4 |
#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) |
#endif |
#ifndef raw_cpu_read_8 |
#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) |
#endif |
#ifndef raw_cpu_write_1 |
#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =) |
#endif |
#ifndef raw_cpu_write_2 |
#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =) |
#endif |
#ifndef raw_cpu_write_4 |
#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =) |
#endif |
#ifndef raw_cpu_write_8 |
#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =) |
#endif |
#ifndef raw_cpu_add_1 |
#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) |
#endif |
#ifndef raw_cpu_add_2 |
#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) |
#endif |
#ifndef raw_cpu_add_4 |
#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) |
#endif |
#ifndef raw_cpu_add_8 |
#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=) |
#endif |
#ifndef raw_cpu_and_1 |
#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) |
#endif |
#ifndef raw_cpu_and_2 |
#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) |
#endif |
#ifndef raw_cpu_and_4 |
#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) |
#endif |
#ifndef raw_cpu_and_8 |
#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=) |
#endif |
#ifndef raw_cpu_or_1 |
#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) |
#endif |
#ifndef raw_cpu_or_2 |
#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) |
#endif |
#ifndef raw_cpu_or_4 |
#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) |
#endif |
#ifndef raw_cpu_or_8 |
#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=) |
#endif |
#ifndef raw_cpu_add_return_1 |
#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) |
#endif |
#ifndef raw_cpu_add_return_2 |
#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) |
#endif |
#ifndef raw_cpu_add_return_4 |
#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) |
#endif |
#ifndef raw_cpu_add_return_8 |
#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) |
#endif |
#ifndef raw_cpu_xchg_1 |
#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) |
#endif |
#ifndef raw_cpu_xchg_2 |
#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) |
#endif |
#ifndef raw_cpu_xchg_4 |
#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) |
#endif |
#ifndef raw_cpu_xchg_8 |
#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) |
#endif |
#ifndef raw_cpu_cmpxchg_1 |
#define raw_cpu_cmpxchg_1(pcp, oval, nval) \ |
raw_cpu_generic_cmpxchg(pcp, oval, nval) |
#endif |
#ifndef raw_cpu_cmpxchg_2 |
#define raw_cpu_cmpxchg_2(pcp, oval, nval) \ |
raw_cpu_generic_cmpxchg(pcp, oval, nval) |
#endif |
#ifndef raw_cpu_cmpxchg_4 |
#define raw_cpu_cmpxchg_4(pcp, oval, nval) \ |
raw_cpu_generic_cmpxchg(pcp, oval, nval) |
#endif |
#ifndef raw_cpu_cmpxchg_8 |
#define raw_cpu_cmpxchg_8(pcp, oval, nval) \ |
raw_cpu_generic_cmpxchg(pcp, oval, nval) |
#endif |
#ifndef raw_cpu_cmpxchg_double_1 |
#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
#endif |
#ifndef raw_cpu_cmpxchg_double_2 |
#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
#endif |
#ifndef raw_cpu_cmpxchg_double_4 |
#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
#endif |
#ifndef raw_cpu_cmpxchg_double_8 |
#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
#endif |
#ifndef this_cpu_read_1 |
#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp) |
#endif |
#ifndef this_cpu_read_2 |
#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp) |
#endif |
#ifndef this_cpu_read_4 |
#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp) |
#endif |
#ifndef this_cpu_read_8 |
#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp) |
#endif |
#ifndef this_cpu_write_1 |
#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =) |
#endif |
#ifndef this_cpu_write_2 |
#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =) |
#endif |
#ifndef this_cpu_write_4 |
#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =) |
#endif |
#ifndef this_cpu_write_8 |
#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =) |
#endif |
#ifndef this_cpu_add_1 |
#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=) |
#endif |
#ifndef this_cpu_add_2 |
#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=) |
#endif |
#ifndef this_cpu_add_4 |
#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=) |
#endif |
#ifndef this_cpu_add_8 |
#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=) |
#endif |
#ifndef this_cpu_and_1 |
#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=) |
#endif |
#ifndef this_cpu_and_2 |
#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=) |
#endif |
#ifndef this_cpu_and_4 |
#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=) |
#endif |
#ifndef this_cpu_and_8 |
#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=) |
#endif |
#ifndef this_cpu_or_1 |
#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=) |
#endif |
#ifndef this_cpu_or_2 |
#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=) |
#endif |
#ifndef this_cpu_or_4 |
#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=) |
#endif |
#ifndef this_cpu_or_8 |
#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=) |
#endif |
#ifndef this_cpu_add_return_1 |
#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val) |
#endif |
#ifndef this_cpu_add_return_2 |
#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val) |
#endif |
#ifndef this_cpu_add_return_4 |
#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val) |
#endif |
#ifndef this_cpu_add_return_8 |
#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val) |
#endif |
#ifndef this_cpu_xchg_1 |
#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval) |
#endif |
#ifndef this_cpu_xchg_2 |
#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval) |
#endif |
#ifndef this_cpu_xchg_4 |
#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval) |
#endif |
#ifndef this_cpu_xchg_8 |
#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval) |
#endif |
#ifndef this_cpu_cmpxchg_1 |
#define this_cpu_cmpxchg_1(pcp, oval, nval) \ |
this_cpu_generic_cmpxchg(pcp, oval, nval) |
#endif |
#ifndef this_cpu_cmpxchg_2 |
#define this_cpu_cmpxchg_2(pcp, oval, nval) \ |
this_cpu_generic_cmpxchg(pcp, oval, nval) |
#endif |
#ifndef this_cpu_cmpxchg_4 |
#define this_cpu_cmpxchg_4(pcp, oval, nval) \ |
this_cpu_generic_cmpxchg(pcp, oval, nval) |
#endif |
#ifndef this_cpu_cmpxchg_8 |
#define this_cpu_cmpxchg_8(pcp, oval, nval) \ |
this_cpu_generic_cmpxchg(pcp, oval, nval) |
#endif |
#ifndef this_cpu_cmpxchg_double_1 |
#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
#endif |
#ifndef this_cpu_cmpxchg_double_2 |
#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
#endif |
#ifndef this_cpu_cmpxchg_double_4 |
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
#endif |
#ifndef this_cpu_cmpxchg_double_8 |
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) |
#endif |
#endif /* _ASM_GENERIC_PERCPU_H_ */ |
/drivers/include/asm-generic/pgtable-nopmd.h |
---|
0,0 → 1,69 |
#ifndef _PGTABLE_NOPMD_H |
#define _PGTABLE_NOPMD_H |
#ifndef __ASSEMBLY__ |
#include <asm-generic/pgtable-nopud.h> |
struct mm_struct; |
#define __PAGETABLE_PMD_FOLDED |
/* |
* Having the pmd type consist of a pud gets the size right, and allows |
* us to conceptually access the pud entry that this pmd is folded into |
* without casting. |
*/ |
typedef struct { pud_t pud; } pmd_t; |
#define PMD_SHIFT PUD_SHIFT |
#define PTRS_PER_PMD 1 |
#define PMD_SIZE (1UL << PMD_SHIFT) |
#define PMD_MASK (~(PMD_SIZE-1)) |
/* |
* The "pud_xxx()" functions here are trivial for a folded two-level |
* setup: the pmd is never bad, and a pmd always exists (as it's folded |
* into the pud entry) |
*/ |
static inline int pud_none(pud_t pud) { return 0; } |
static inline int pud_bad(pud_t pud) { return 0; } |
static inline int pud_present(pud_t pud) { return 1; } |
static inline void pud_clear(pud_t *pud) { } |
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) |
#define pud_populate(mm, pmd, pte) do { } while (0) |
/* |
* (pmds are folded into puds so this doesn't get actually called, |
* but the define is needed for a generic inline function.) |
*/ |
#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval }) |
static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) |
{ |
return (pmd_t *)pud; |
} |
#define pmd_val(x) (pud_val((x).pud)) |
#define __pmd(x) ((pmd_t) { __pud(x) } ) |
#define pud_page(pud) (pmd_page((pmd_t){ pud })) |
#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud })) |
/* |
* allocating and freeing a pmd is trivial: the 1-entry pmd is |
* inside the pud, so has no extra memory associated with it. |
*/ |
#define pmd_alloc_one(mm, address) NULL |
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
{ |
} |
#define __pmd_free_tlb(tlb, x, a) do { } while (0) |
#undef pmd_addr_end |
#define pmd_addr_end(addr, end) (end) |
#endif /* __ASSEMBLY__ */ |
#endif /* _PGTABLE_NOPMD_H */ |
/drivers/include/asm-generic/pgtable-nopud.h |
---|
0,0 → 1,61 |
#ifndef _PGTABLE_NOPUD_H |
#define _PGTABLE_NOPUD_H |
#ifndef __ASSEMBLY__ |
#define __PAGETABLE_PUD_FOLDED |
/* |
* Having the pud type consist of a pgd gets the size right, and allows |
* us to conceptually access the pgd entry that this pud is folded into |
* without casting. |
*/ |
typedef struct { pgd_t pgd; } pud_t; |
#define PUD_SHIFT PGDIR_SHIFT |
#define PTRS_PER_PUD 1 |
#define PUD_SIZE (1UL << PUD_SHIFT) |
#define PUD_MASK (~(PUD_SIZE-1)) |
/* |
* The "pgd_xxx()" functions here are trivial for a folded two-level |
* setup: the pud is never bad, and a pud always exists (as it's folded |
* into the pgd entry) |
*/ |
static inline int pgd_none(pgd_t pgd) { return 0; } |
static inline int pgd_bad(pgd_t pgd) { return 0; } |
static inline int pgd_present(pgd_t pgd) { return 1; } |
static inline void pgd_clear(pgd_t *pgd) { } |
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) |
#define pgd_populate(mm, pgd, pud) do { } while (0) |
/* |
* (puds are folded into pgds so this doesn't get actually called, |
* but the define is needed for a generic inline function.) |
*/ |
#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) |
static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address) |
{ |
return (pud_t *)pgd; |
} |
#define pud_val(x) (pgd_val((x).pgd)) |
#define __pud(x) ((pud_t) { __pgd(x) } ) |
#define pgd_page(pgd) (pud_page((pud_t){ pgd })) |
#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) |
/* |
* allocating and freeing a pud is trivial: the 1-entry pud is |
* inside the pgd, so has no extra memory associated with it. |
*/ |
#define pud_alloc_one(mm, address) NULL |
#define pud_free(mm, x) do { } while (0) |
#define __pud_free_tlb(tlb, x, a) do { } while (0) |
#undef pud_addr_end |
#define pud_addr_end(addr, end) (end) |
#endif /* __ASSEMBLY__ */ |
#endif /* _PGTABLE_NOPUD_H */ |
/drivers/include/asm-generic/ptrace.h |
---|
0,0 → 1,74 |
/* |
* Common low level (register) ptrace helpers |
* |
* Copyright 2004-2011 Analog Devices Inc. |
* |
* Licensed under the GPL-2 or later. |
*/ |
#ifndef __ASM_GENERIC_PTRACE_H__ |
#define __ASM_GENERIC_PTRACE_H__ |
#ifndef __ASSEMBLY__ |
/* Helpers for working with the instruction pointer */ |
#ifndef GET_IP |
#define GET_IP(regs) ((regs)->pc) |
#endif |
#ifndef SET_IP |
#define SET_IP(regs, val) (GET_IP(regs) = (val)) |
#endif |
static inline unsigned long instruction_pointer(struct pt_regs *regs) |
{ |
return GET_IP(regs); |
} |
static inline void instruction_pointer_set(struct pt_regs *regs, |
unsigned long val) |
{ |
SET_IP(regs, val); |
} |
#ifndef profile_pc |
#define profile_pc(regs) instruction_pointer(regs) |
#endif |
/* Helpers for working with the user stack pointer */ |
#ifndef GET_USP |
#define GET_USP(regs) ((regs)->usp) |
#endif |
#ifndef SET_USP |
#define SET_USP(regs, val) (GET_USP(regs) = (val)) |
#endif |
static inline unsigned long user_stack_pointer(struct pt_regs *regs) |
{ |
return GET_USP(regs); |
} |
static inline void user_stack_pointer_set(struct pt_regs *regs, |
unsigned long val) |
{ |
SET_USP(regs, val); |
} |
/* Helpers for working with the frame pointer */ |
#ifndef GET_FP |
#define GET_FP(regs) ((regs)->fp) |
#endif |
#ifndef SET_FP |
#define SET_FP(regs, val) (GET_FP(regs) = (val)) |
#endif |
static inline unsigned long frame_pointer(struct pt_regs *regs) |
{ |
return GET_FP(regs); |
} |
static inline void frame_pointer_set(struct pt_regs *regs, |
unsigned long val) |
{ |
SET_FP(regs, val); |
} |
#endif /* __ASSEMBLY__ */ |
#endif |
/drivers/include/asm-generic/types.h |
---|
0,0 → 1,42 |
#ifndef _ASM_GENERIC_TYPES_H |
#define _ASM_GENERIC_TYPES_H |
/* |
* int-ll64 is used practically everywhere now, |
* so use it as a reasonable default. |
*/ |
#include <asm-generic/int-ll64.h> |
#ifndef __ASSEMBLY__ |
typedef unsigned short umode_t; |
#endif /* __ASSEMBLY__ */ |
/* |
* These aren't exported outside the kernel to avoid name space clashes |
*/ |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
/* |
* DMA addresses may be very different from physical addresses |
* and pointers. i386 and powerpc may have 64 bit DMA on 32 bit |
* systems, while sparc64 uses 32 bit DMA addresses for 64 bit |
* physical addresses. |
* This default defines dma_addr_t to have the same size as |
* phys_addr_t, which is the most common way. |
* Do not define the dma64_addr_t type, which never really |
* worked. |
*/ |
#ifndef dma_addr_t |
#ifdef CONFIG_PHYS_ADDR_T_64BIT |
typedef u64 dma_addr_t; |
#else |
typedef u32 dma_addr_t; |
#endif /* CONFIG_PHYS_ADDR_T_64BIT */ |
#endif /* dma_addr_t */ |
#endif /* __ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_GENERIC_TYPES_H */ |
/drivers/include/ddk.h |
---|
3,10 → 3,10 |
#ifndef __DDK_H__ |
#define __DDK_H__ |
#include <kernel.h> |
#include <linux/kernel.h> |
#include <linux/errno.h> |
#include <linux/spinlock.h> |
#include <mutex.h> |
#include <linux/mutex.h> |
#include <linux/pci.h> |
17,12 → 17,6 |
#define PG_NOCACHE 0x018 |
#define PG_SHARED 0x200 |
#define _PAGE_PRESENT (1<<0) |
#define _PAGE_RW (1<<1) |
#define _PAGE_PWT (1<<3) |
#define _PAGE_PCD (1<<4) |
#define _PAGE_PAT (1<<7) |
#define MANUAL_DESTROY 0x80000000 |
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__) |
31,8 → 25,8 |
typedef struct |
{ |
u32_t code; |
u32_t data[5]; |
u32 code; |
u32 data[5]; |
}kevent_t; |
typedef union |
39,16 → 33,16 |
{ |
struct |
{ |
u32_t handle; |
u32_t euid; |
u32 handle; |
u32 euid; |
}; |
u64_t raw; |
u64 raw; |
}evhandle_t; |
typedef struct |
{ |
u32_t handle; |
u32_t io_code; |
u32 handle; |
u32 io_code; |
void *input; |
int inp_size; |
void *output; |
65,16 → 59,10 |
int ddk_init(struct ddk_params *params); |
u32_t drvEntry(int, char *)__asm__("_drvEntry"); |
u32 drvEntry(int, char *)__asm__("_drvEntry"); |
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
{ |
// if (size != 0 && n > SIZE_MAX / size) |
// return NULL; |
return kmalloc(n * size, flags); |
} |
#endif /* DDK_H */ |
/drivers/include/drm/drm.h |
---|
File deleted |
/drivers/include/drm/drm_memory.h |
---|
File deleted |
/drivers/include/drm/drmP.h |
---|
1,17 → 1,14 |
/** |
* \file drmP.h |
* Private header for Direct Rendering Manager |
/* |
* Internal Header for the Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* \author Gareth Hughes <gareth@valinux.com> |
*/ |
/* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* Copyright (c) 2009-2010, Code Aurora Forum. |
* All rights reserved. |
* |
* Author: Rickard E. (Rik) Faith <faith@valinux.com> |
* Author: Gareth Hughes <gareth@valinux.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
35,91 → 32,69 |
#ifndef _DRM_P_H_ |
#define _DRM_P_H_ |
#define iowrite32(v, addr) writel((v), (addr)) |
#ifdef __KERNEL__ |
#ifdef __alpha__ |
/* add include of current.h so that "current" is defined |
* before static inline funcs in wait.h. Doing this so we |
* can build the DRM (part of PI DRI). 4/21/2000 S + B */ |
#include <asm/current.h> |
#endif /* __alpha__ */ |
#include <syscall.h> |
#include <linux/agp_backend.h> |
#include <linux/dma-mapping.h> |
#include <linux/file.h> |
#include <linux/fs.h> |
#include <linux/idr.h> |
#include <linux/jiffies.h> |
#include <linux/kernel.h> |
#include <linux/export.h> |
#include <linux/errno.h> |
#include <linux/kref.h> |
#include <linux/mm.h> |
#include <linux/spinlock.h> |
#include <linux/wait.h> |
#include <linux/bug.h> |
#include <linux/mutex.h> |
#include <linux/pci.h> |
#include <linux/sched.h> |
#include <linux/firmware.h> |
#include <linux/err.h> |
#include <linux/fs.h> |
//#include <linux/init.h> |
#include <linux/file.h> |
#include <linux/pci.h> |
#include <linux/jiffies.h> |
#include <linux/dma-mapping.h> |
#include <linux/irqreturn.h> |
#include <linux/mutex.h> |
//#include <asm/io.h> |
#include <linux/slab.h> |
//#include <asm/uaccess.h> |
//#include <linux/workqueue.h> |
//#include <linux/poll.h> |
//#include <asm/pgalloc.h> |
#include <linux/types.h> |
#include <linux/vmalloc.h> |
#include <linux/workqueue.h> |
#include <uapi/drm/drm.h> |
#include <uapi/drm/drm_mode.h> |
#include <drm/drm.h> |
#include <drm/drm_agpsupport.h> |
#include <drm/drm_crtc.h> |
#include <drm/drm_global.h> |
#include <drm/drm_hashtab.h> |
#include <drm/drm_mem_util.h> |
#include <drm/drm_mm.h> |
#include <drm/drm_os_linux.h> |
#include <drm/drm_sarea.h> |
#include <drm/drm_vma_manager.h> |
#include <linux/idr.h> |
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) |
struct module; |
struct drm_file; |
struct drm_device; |
struct drm_agp_head; |
struct drm_local_map; |
struct drm_device_dma; |
struct drm_dma_handle; |
struct drm_gem_object; |
struct device_node; |
struct videomode; |
struct reservation_object; |
struct dma_buf_attachment; |
struct inode; |
struct poll_table_struct; |
struct drm_lock_data; |
struct sg_table; |
struct dma_buf; |
//#include <drm/drm_os_linux.h> |
#include <drm/drm_hashtab.h> |
#include <drm/drm_mm.h> |
#define KHZ2PICOS(a) (1000000000UL/(a)) |
/* Flags and return codes for get_vblank_timestamp() driver function. */ |
#define DRM_CALLED_FROM_VBLIRQ 1 |
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) |
#define DRM_VBLANKTIME_INVBL (1 << 1) |
/* get_scanout_position() return flags */ |
#define DRM_SCANOUTPOS_VALID (1 << 0) |
#define DRM_SCANOUTPOS_INVBL (1 << 1) |
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) |
/* |
* 4 debug categories are defined: |
* |
156,8 → 131,8 |
extern __printf(2, 3) |
void drm_ut_debug_printk(const char *function_name, |
const char *format, ...); |
extern __printf(2, 3) |
int drm_err(const char *func, const char *format, ...); |
extern __printf(1, 2) |
void drm_err(const char *format, ...); |
/***********************************************************************/ |
/** \name DRM template customization defaults */ |
175,25 → 150,7 |
#define DRIVER_PRIME 0x4000 |
#define DRIVER_RENDER 0x8000 |
#define DRIVER_BUS_PCI 0x1 |
#define DRIVER_BUS_PLATFORM 0x2 |
#define DRIVER_BUS_USB 0x3 |
#define DRIVER_BUS_HOST1X 0x4 |
/***********************************************************************/ |
/** \name Begin the DRM... */ |
/*@{*/ |
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then |
also include looping detection. */ |
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ |
#define DRM_MAP_HASH_OFFSET 0x10000000 |
/*@}*/ |
/***********************************************************************/ |
/** \name Macros to make printk easier */ |
/*@{*/ |
204,7 → 161,7 |
* \param arg arguments |
*/ |
#define DRM_ERROR(fmt, ...) \ |
drm_err(__func__, fmt, ##__VA_ARGS__) |
drm_err(fmt, ##__VA_ARGS__) |
/** |
* Rate limited error output. Like DRM_ERROR() but won't flood the log. |
219,7 → 176,7 |
DEFAULT_RATELIMIT_BURST); \ |
\ |
if (__ratelimit(&_rs)) \ |
drm_err(__func__, fmt, ##__VA_ARGS__); \ |
drm_err(fmt, ##__VA_ARGS__); \ |
}) |
#define DRM_INFO(fmt, ...) \ |
265,28 → 222,9 |
/** \name Internal types and structures */ |
/*@{*/ |
#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) |
#define DRM_IF_VERSION(maj, min) (maj << 16 | min) |
/** |
* Test that the hardware lock is held by the caller, returning otherwise. |
* |
* \param dev DRM device. |
* \param filp file pointer of the caller. |
*/ |
#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ |
do { \ |
if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ |
_file_priv->master->lock.file_priv != _file_priv) { \ |
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ |
__func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ |
_file_priv->master->lock.file_priv, _file_priv); \ |
return -EINVAL; \ |
} \ |
} while (0) |
/** |
* Ioctl function type. |
* |
* \param inode device inode. |
326,83 → 264,6 |
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ |
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} |
#if 0 |
struct drm_magic_entry { |
struct list_head head; |
struct drm_hash_item hash_item; |
struct drm_file *priv; |
}; |
struct drm_vma_entry { |
struct list_head head; |
struct vm_area_struct *vma; |
pid_t pid; |
}; |
/** |
* DMA buffer. |
*/ |
struct drm_buf { |
int idx; /**< Index into master buflist */ |
int total; /**< Buffer size */ |
int order; /**< log-base-2(total) */ |
int used; /**< Amount of buffer in use (for DMA) */ |
unsigned long offset; /**< Byte offset (used internally) */ |
void *address; /**< Address of buffer */ |
unsigned long bus_address; /**< Bus address of buffer */ |
struct drm_buf *next; /**< Kernel-only: used for free list */ |
__volatile__ int waiting; /**< On kernel DMA queue */ |
__volatile__ int pending; /**< On hardware DMA queue */ |
struct drm_file *file_priv; /**< Private of holding file descr */ |
int context; /**< Kernel queue for this buffer */ |
int while_locked; /**< Dispatch this buffer while locked */ |
enum { |
DRM_LIST_NONE = 0, |
DRM_LIST_FREE = 1, |
DRM_LIST_WAIT = 2, |
DRM_LIST_PEND = 3, |
DRM_LIST_PRIO = 4, |
DRM_LIST_RECLAIM = 5 |
} list; /**< Which list we're on */ |
int dev_priv_size; /**< Size of buffer private storage */ |
void *dev_private; /**< Per-buffer private storage */ |
}; |
/** bufs is one longer than it has to be */ |
struct drm_waitlist { |
int count; /**< Number of possible buffers */ |
struct drm_buf **bufs; /**< List of pointers to buffers */ |
struct drm_buf **rp; /**< Read pointer */ |
struct drm_buf **wp; /**< Write pointer */ |
struct drm_buf **end; /**< End pointer */ |
spinlock_t read_lock; |
spinlock_t write_lock; |
}; |
#endif |
typedef struct drm_dma_handle { |
dma_addr_t busaddr; |
void *vaddr; |
size_t size; |
} drm_dma_handle_t; |
/** |
* Buffer entry. There is one of this for each buffer size order. |
*/ |
struct drm_buf_entry { |
int buf_size; /**< size */ |
int buf_count; /**< number of buffers */ |
struct drm_buf *buflist; /**< buffer list */ |
int seg_count; |
int page_order; |
struct drm_dma_handle **seglist; |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
}; |
/* Event queued up for userspace to read */ |
struct drm_pending_event { |
struct drm_event *event; |
457,7 → 318,6 |
int event_space; |
}; |
#if 0 |
/** |
* Lock data. |
*/ |
474,192 → 334,6 |
}; |
/** |
* DMA data. |
*/ |
struct drm_device_dma { |
struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ |
int buf_count; /**< total number of buffers */ |
struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ |
int seg_count; |
int page_count; /**< number of pages */ |
unsigned long *pagelist; /**< page list */ |
unsigned long byte_count; |
enum { |
_DRM_DMA_USE_AGP = 0x01, |
_DRM_DMA_USE_SG = 0x02, |
_DRM_DMA_USE_FB = 0x04, |
_DRM_DMA_USE_PCI_RO = 0x08 |
} flags; |
}; |
/** |
* AGP memory entry. Stored as a doubly linked list. |
*/ |
struct drm_agp_mem { |
unsigned long handle; /**< handle */ |
struct agp_memory *memory; |
unsigned long bound; /**< address */ |
int pages; |
struct list_head head; |
}; |
/** |
* AGP data. |
* |
* \sa drm_agp_init() and drm_device::agp. |
*/ |
struct drm_agp_head { |
struct agp_kern_info agp_info; /**< AGP device information */ |
struct list_head memory; |
unsigned long mode; /**< AGP mode */ |
struct agp_bridge_data *bridge; |
int enabled; /**< whether the AGP bus as been enabled */ |
int acquired; /**< whether the AGP device has been acquired */ |
unsigned long base; |
int agp_mtrr; |
int cant_use_aperture; |
unsigned long page_mask; |
}; |
/** |
* Scatter-gather memory. |
*/ |
struct drm_sg_mem { |
unsigned long handle; |
void *virtual; |
int pages; |
struct page **pagelist; |
dma_addr_t *busaddr; |
}; |
struct drm_sigdata { |
int context; |
struct drm_hw_lock *lock; |
}; |
#endif |
/** |
* Kernel side of a mapping |
*/ |
struct drm_local_map { |
resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
}; |
typedef struct drm_local_map drm_local_map_t; |
/** |
* Mappings list |
*/ |
struct drm_map_list { |
struct list_head head; /**< list head */ |
struct drm_hash_item hash; |
struct drm_local_map *map; /**< mapping */ |
uint64_t user_token; |
struct drm_master *master; |
}; |
/* location of GART table */ |
#define DRM_ATI_GART_MAIN 1 |
#define DRM_ATI_GART_FB 2 |
#define DRM_ATI_GART_PCI 1 |
#define DRM_ATI_GART_PCIE 2 |
#define DRM_ATI_GART_IGP 3 |
struct drm_ati_pcigart_info { |
int gart_table_location; |
int gart_reg_if; |
void *addr; |
dma_addr_t bus_addr; |
dma_addr_t table_mask; |
struct drm_dma_handle *table_handle; |
struct drm_local_map mapping; |
int table_size; |
}; |
/** |
* This structure defines the drm_mm memory object, which will be used by the |
* DRM for its buffer objects. |
*/ |
struct drm_gem_object { |
/** Reference count of this object */ |
struct kref refcount; |
/** |
* handle_count - gem file_priv handle count of this object |
* |
* Each handle also holds a reference. Note that when the handle_count |
* drops to 0 any global names (e.g. the id in the flink namespace) will |
* be cleared. |
* |
* Protected by dev->object_name_lock. |
* */ |
unsigned handle_count; |
/** Related drm device */ |
struct drm_device *dev; |
/** File representing the shmem storage */ |
struct file *filp; |
/* Mapping info for this object */ |
struct drm_vma_offset_node vma_node; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
/** |
* dma_buf - dma buf associated with this GEM object |
* |
* Pointer to the dma-buf associated with this gem object (either |
* through importing or exporting). We break the resulting reference |
* loop when the last gem handle for this object is released. |
* |
* Protected by obj->object_name_lock |
*/ |
struct dma_buf *dma_buf; |
}; |
#include <drm/drm_crtc.h> |
/** |
* struct drm_master - drm master structure |
* |
* @refcount: Refcount for this master object. |
666,7 → 340,6 |
* @minor: Link back to minor char device we are master for. Immutable. |
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. |
* @unique_len: Length of unique field. Protected by drm_global_mutex. |
* @unique_size: Amount allocated. Protected by drm_global_mutex. |
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex. |
* @magicfree: List of used authentication tokens. Protected by struct_mutex. |
* @lock: DRI lock information. |
677,10 → 350,9 |
struct drm_minor *minor; |
char *unique; |
int unique_len; |
int unique_size; |
// struct drm_open_hash magiclist; |
// struct list_head magicfree; |
// struct drm_lock_data lock; |
struct drm_open_hash magiclist; |
struct list_head magicfree; |
struct drm_lock_data lock; |
void *driver_priv; |
}; |
692,17 → 364,13 |
/* Flags and return codes for get_vblank_timestamp() driver function. */ |
#define DRM_CALLED_FROM_VBLIRQ 1 |
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) |
#define DRM_VBLANKTIME_INVBL (1 << 1) |
#define DRM_VBLANKTIME_IN_VBLANK (1 << 1) |
/* get_scanout_position() return flags */ |
#define DRM_SCANOUTPOS_VALID (1 << 0) |
#define DRM_SCANOUTPOS_INVBL (1 << 1) |
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1) |
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) |
struct drm_bus { |
int (*set_busid)(struct drm_device *dev, struct drm_master *master); |
}; |
/** |
* DRM driver structure. This structure represent the common code for |
* a family of cards. There will one drm_device for each card present |
894,7 → 562,28 |
}; |
struct drm_pending_vblank_event { |
struct drm_pending_event base; |
int pipe; |
struct drm_event_vblank event; |
}; |
struct drm_vblank_crtc { |
struct drm_device *dev; /* pointer to the drm_device */ |
wait_queue_head_t queue; /**< VBLANK wait queue */ |
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */ |
struct timer_list disable_timer; /* delayed disable timer */ |
atomic_t count; /**< number of VBLANK interrupts */ |
atomic_t refcount; /* number of users of vblank interruptsper crtc */ |
u32 last; /* protected by dev->vbl_lock, used */ |
/* for wraparound handling */ |
u32 last_wait; /* Last vblank seqno waited per CRTC */ |
unsigned int inmodeset; /* Display driver is setting mode */ |
int crtc; /* crtc index */ |
bool enabled; /* so we don't call enable more than |
once per disable */ |
}; |
/** |
* DRM device structure. This structure represent a complete card that |
* may contain multiple heads. |
903,6 → 592,9 |
struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ |
int if_version; /**< Highest interface version set */ |
/** \name Lifetime Management */ |
/*@{ */ |
struct kref ref; /**< Object ref-count */ |
struct device *dev; /**< Device structure of bus-device */ |
struct drm_driver *driver; /**< DRM driver managing the device */ |
void *dev_private; /**< DRM driver private data */ |
964,6 → 656,16 |
*/ |
bool vblank_disable_allowed; |
/* |
* If true, vblank interrupt will be disabled immediately when the |
* refcount drops to zero, as opposed to via the vblank disable |
* timer. |
* This can be set to true it the hardware has a working vblank |
* counter and the driver uses drm_vblank_on() and drm_vblank_off() |
* appropriately. |
*/ |
bool vblank_disable_immediate; |
/* array of size num_crtcs */ |
struct drm_vblank_crtc *vblank; |
986,6 → 688,10 |
unsigned int num_crtcs; /**< Number of CRTCs on this device */ |
struct { |
int context; |
struct drm_hw_lock *lock; |
} sigdata; |
struct drm_mode_config mode_config; /**< Current mode config */ |
1032,11 → 738,9 |
unsigned int cmd, unsigned long arg); |
extern long drm_compat_ioctl(struct file *filp, |
unsigned int cmd, unsigned long arg); |
extern int drm_lastclose(struct drm_device *dev); |
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); |
/* Device support (drm_fops.h) */ |
extern struct mutex drm_global_mutex; |
extern int drm_open(struct inode *inode, struct file *filp); |
extern ssize_t drm_read(struct file *filp, char __user *buffer, |
size_t count, loff_t *offset); |
1043,101 → 747,23 |
extern int drm_release(struct inode *inode, struct file *filp); |
/* Mapping support (drm_vm.h) */ |
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma); |
extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma); |
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
/* Memory management support (drm_memory.h) */ |
#include <drm/drm_memory.h> |
/* Misc. IOCTL support (drm_ioctl.h) */ |
extern int drm_irq_by_busid(struct drm_device *dev, void *data, |
/* Misc. IOCTL support (drm_ioctl.c) */ |
int drm_noop(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getunique(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setunique(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getmap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getclient(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getstats(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getcap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setclientcap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setversion(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_noop(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Authentication IOCTL support (drm_auth.h) */ |
extern int drm_getmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_authmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic); |
/* Cache management (drm_cache.c) */ |
void drm_clflush_pages(struct page *pages[], unsigned long num_pages); |
void drm_clflush_sg(struct sg_table *st); |
void drm_clflush_virt_range(void *addr, unsigned long length); |
/* Locking IOCTL support (drm_lock.h) */ |
extern int drm_lock(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_unlock(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); |
extern void drm_idlelock_take(struct drm_lock_data *lock_data); |
extern void drm_idlelock_release(struct drm_lock_data *lock_data); |
/* |
* These are exported to drivers so that they can implement fencing using |
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock. |
*/ |
extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv); |
/* Buffer management support (drm_bufs.h) */ |
extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); |
extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); |
extern int drm_addmap(struct drm_device *dev, resource_size_t offset, |
unsigned int size, enum drm_map_type type, |
enum drm_map_flags flags, struct drm_local_map **map_ptr); |
extern int drm_addmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map); |
extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map); |
extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_addbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_infobufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_markbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_freebufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_mapbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_dma_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* DMA support (drm_dma.h) */ |
extern int drm_legacy_dma_setup(struct drm_device *dev); |
extern void drm_legacy_dma_takedown(struct drm_device *dev); |
extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); |
extern void drm_core_reclaim_buffers(struct drm_device *dev, |
struct drm_file *filp); |
/* IRQ support (drm_irq.h) */ |
extern int drm_control(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_irq_install(struct drm_device *dev, int irq); |
extern int drm_irq_uninstall(struct drm_device *dev); |
1154,6 → 780,8 |
extern void drm_vblank_put(struct drm_device *dev, int crtc); |
extern int drm_crtc_vblank_get(struct drm_crtc *crtc); |
extern void drm_crtc_vblank_put(struct drm_crtc *crtc); |
extern void drm_wait_one_vblank(struct drm_device *dev, int crtc); |
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); |
extern void drm_vblank_off(struct drm_device *dev, int crtc); |
extern void drm_vblank_on(struct drm_device *dev, int crtc); |
extern void drm_crtc_vblank_off(struct drm_crtc *crtc); |
1160,8 → 788,6 |
extern void drm_crtc_vblank_on(struct drm_crtc *crtc); |
extern void drm_vblank_cleanup(struct drm_device *dev); |
extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, |
struct timeval *tvblank, unsigned flags); |
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, |
int crtc, int *max_error, |
struct timeval *vblank_time, |
1171,21 → 797,23 |
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, |
const struct drm_display_mode *mode); |
/** |
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC |
* @crtc: which CRTC's vblank waitqueue to retrieve |
* |
* This function returns a pointer to the vblank waitqueue for the CRTC. |
* Drivers can use this to implement vblank waits using wait_event() & co. |
*/ |
static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc) |
{ |
return &crtc->dev->vblank[drm_crtc_index(crtc)].queue; |
} |
/* Modesetting support */ |
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); |
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); |
extern int drm_modeset_ctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* AGP/GART support (drm_agpsupport.h) */ |
/* Stub support (drm_stub.h) */ |
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
struct drm_master *drm_master_create(struct drm_minor *minor); |
extern struct drm_master *drm_master_get(struct drm_master *master); |
extern void drm_master_put(struct drm_master **master); |
1193,34 → 821,14 |
extern void drm_unplug_dev(struct drm_device *dev); |
extern unsigned int drm_debug; |
#if 0 |
extern unsigned int drm_vblank_offdelay; |
extern unsigned int drm_timestamp_precision; |
extern unsigned int drm_timestamp_monotonic; |
extern struct class *drm_class; |
extern struct drm_local_map *drm_getsarea(struct drm_device *dev); |
#endif |
/* Debugfs support */ |
#if defined(CONFIG_DEBUG_FS) |
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, |
struct dentry *root); |
extern int drm_debugfs_create_files(const struct drm_info_list *files, |
int count, struct dentry *root, |
struct drm_minor *minor); |
extern int drm_debugfs_remove_files(const struct drm_info_list *files, |
int count, struct drm_minor *minor); |
extern int drm_debugfs_cleanup(struct drm_minor *minor); |
extern int drm_debugfs_connector_add(struct drm_connector *connector); |
extern void drm_debugfs_connector_remove(struct drm_connector *connector); |
#else |
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id, |
struct dentry *root) |
{ |
return 0; |
} |
static inline int drm_debugfs_create_files(const struct drm_info_list *files, |
int count, struct dentry *root, |
struct drm_minor *minor) |
1233,164 → 841,44 |
{ |
return 0; |
} |
static inline int drm_debugfs_cleanup(struct drm_minor *minor) |
{ |
return 0; |
} |
static inline int drm_debugfs_connector_add(struct drm_connector *connector) |
{ |
return 0; |
} |
static inline void drm_debugfs_connector_remove(struct drm_connector *connector) |
{ |
} |
#endif |
/* Info file support */ |
extern int drm_name_info(struct seq_file *m, void *data); |
extern int drm_vm_info(struct seq_file *m, void *data); |
extern int drm_bufs_info(struct seq_file *m, void *data); |
extern int drm_vblank_info(struct seq_file *m, void *data); |
extern int drm_clients_info(struct seq_file *m, void* data); |
extern int drm_gem_name_info(struct seq_file *m, void *data); |
extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, |
struct drm_gem_object *obj, int flags); |
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
struct drm_file *file_priv, uint32_t handle, uint32_t flags, |
int *prime_fd); |
extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, |
struct dma_buf *dma_buf); |
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, |
struct drm_file *file_priv, int prime_fd, uint32_t *handle); |
extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf); |
#if DRM_DEBUG_CODE |
extern int drm_vma_info(struct seq_file *m, void *data); |
#endif |
extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, |
dma_addr_t *addrs, int max_pages); |
extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); |
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); |
/* Scatter Gather Support (drm_scatter.h) */ |
extern void drm_legacy_sg_cleanup(struct drm_device *dev); |
extern int drm_sg_alloc(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_sg_free(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* ATI PCIGART support (ati_pcigart.h) */ |
extern int drm_ati_pcigart_init(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, |
extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, |
size_t align); |
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); |
#if 0 |
/* sysfs support (drm_sysfs.c) */ |
struct drm_sysfs_class; |
extern struct class *drm_sysfs_create(struct module *owner, char *name); |
extern void drm_sysfs_destroy(void); |
extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); |
extern void drm_sysfs_hotplug_event(struct drm_device *dev); |
extern int drm_sysfs_connector_add(struct drm_connector *connector); |
extern void drm_sysfs_connector_remove(struct drm_connector *connector); |
#endif |
/* Graphics Execution Manager library functions (drm_gem.c) */ |
int drm_gem_init(struct drm_device *dev); |
void drm_gem_destroy(struct drm_device *dev); |
void drm_gem_object_release(struct drm_gem_object *obj); |
void drm_gem_object_free(struct kref *kref); |
int drm_gem_object_init(struct drm_device *dev, |
struct drm_gem_object *obj, size_t size); |
void drm_gem_private_object_init(struct drm_device *dev, |
struct drm_gem_object *obj, size_t size); |
void drm_gem_vm_open(struct vm_area_struct *vma); |
void drm_gem_vm_close(struct vm_area_struct *vma); |
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, |
struct vm_area_struct *vma); |
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
#include <drm/drm_global.h> |
static inline void |
drm_gem_object_reference(struct drm_gem_object *obj) |
{ |
kref_get(&obj->refcount); |
} |
static inline void |
drm_gem_object_unreference(struct drm_gem_object *obj) |
{ |
if (obj != NULL) |
kref_put(&obj->refcount, drm_gem_object_free); |
} |
static inline void |
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) |
{ |
if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) { |
struct drm_device *dev = obj->dev; |
mutex_lock(&dev->struct_mutex); |
if (likely(atomic_dec_and_test(&obj->refcount.refcount))) |
drm_gem_object_free(&obj->refcount); |
mutex_unlock(&dev->struct_mutex); |
} |
} |
int drm_gem_handle_create_tail(struct drm_file *file_priv, |
struct drm_gem_object *obj, |
u32 *handlep); |
int drm_gem_handle_create(struct drm_file *file_priv, |
struct drm_gem_object *obj, |
u32 *handlep); |
int drm_gem_handle_delete(struct drm_file *filp, u32 handle); |
void drm_gem_free_mmap_offset(struct drm_gem_object *obj); |
int drm_gem_create_mmap_offset(struct drm_gem_object *obj); |
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); |
struct page **drm_gem_get_pages(struct drm_gem_object *obj); |
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
bool dirty, bool accessed); |
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, |
struct drm_file *filp, |
u32 handle); |
int drm_gem_close_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_gem_open_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); |
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); |
extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev); |
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); |
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev); |
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, |
unsigned int token) |
{ |
struct drm_map_list *_entry; |
list_for_each_entry(_entry, &dev->maplist, head) |
if (_entry->user_token == token) |
return _entry->map; |
return NULL; |
} |
static __inline__ void drm_core_dropmap(struct drm_local_map *map) |
{ |
} |
#include <drm/drm_mem_util.h> |
struct drm_device *drm_dev_alloc(struct drm_driver *driver, |
struct device *parent); |
void drm_dev_ref(struct drm_device *dev); |
void drm_dev_unref(struct drm_device *dev); |
int drm_dev_register(struct drm_device *dev, unsigned long flags); |
void drm_dev_unregister(struct drm_device *dev); |
int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...); |
extern int drm_fill_in_dev(struct drm_device *dev, |
const struct pci_device_id *ent, |
struct drm_driver *driver); |
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type); |
struct drm_minor *drm_minor_acquire(unsigned int minor_id); |
void drm_minor_release(struct drm_minor *minor); |
/*@}*/ |
/* PCI section */ |
1420,11 → 908,7 |
{ |
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); |
} |
#endif /* __KERNEL__ */ |
#define drm_sysfs_connector_add(connector) |
#define drm_sysfs_connector_remove(connector) |
#define LFB_SIZE 0x1000000 |
extern struct drm_device *main_device; |
extern struct drm_file *drm_file_handlers[256]; |
/drivers/include/drm/drm_agpsupport.h |
---|
0,0 → 1,199 |
#ifndef _DRM_AGPSUPPORT_H_ |
#define _DRM_AGPSUPPORT_H_ |
#include <linux/agp_backend.h> |
#include <linux/kernel.h> |
#include <linux/list.h> |
#include <linux/mm.h> |
#include <linux/mutex.h> |
#include <linux/types.h> |
#include <uapi/drm/drm.h> |
struct drm_device; |
struct drm_file; |
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \ |
defined(MODULE))) |
struct drm_agp_head { |
struct agp_kern_info agp_info; |
struct list_head memory; |
unsigned long mode; |
struct agp_bridge_data *bridge; |
int enabled; |
int acquired; |
unsigned long base; |
int agp_mtrr; |
int cant_use_aperture; |
unsigned long page_mask; |
}; |
#if __OS_HAS_AGP |
void drm_free_agp(struct agp_memory * handle, int pages); |
int drm_bind_agp(struct agp_memory * handle, unsigned int start); |
int drm_unbind_agp(struct agp_memory * handle); |
struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, |
struct page **pages, |
unsigned long num_pages, |
uint32_t gtt_offset, |
uint32_t type); |
struct drm_agp_head *drm_agp_init(struct drm_device *dev); |
void drm_agp_clear(struct drm_device *dev); |
int drm_agp_acquire(struct drm_device *dev); |
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_agp_release(struct drm_device *dev); |
int drm_agp_release_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); |
int drm_agp_enable_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); |
int drm_agp_info_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); |
int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); |
int drm_agp_free_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); |
int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); |
int drm_agp_bind_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
#else /* __OS_HAS_AGP */ |
static inline void drm_free_agp(struct agp_memory * handle, int pages) |
{ |
} |
static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start) |
{ |
return -ENODEV; |
} |
static inline int drm_unbind_agp(struct agp_memory * handle) |
{ |
return -ENODEV; |
} |
static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev, |
struct page **pages, |
unsigned long num_pages, |
uint32_t gtt_offset, |
uint32_t type) |
{ |
return NULL; |
} |
static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev) |
{ |
return NULL; |
} |
static inline void drm_agp_clear(struct drm_device *dev) |
{ |
} |
static inline int drm_agp_acquire(struct drm_device *dev) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_release(struct drm_device *dev) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_enable(struct drm_device *dev, |
struct drm_agp_mode mode) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_info(struct drm_device *dev, |
struct drm_agp_info *info) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_alloc(struct drm_device *dev, |
struct drm_agp_buffer *request) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_free(struct drm_device *dev, |
struct drm_agp_buffer *request) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_unbind(struct drm_device *dev, |
struct drm_agp_binding *request) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_bind(struct drm_device *dev, |
struct drm_agp_binding *request) |
{ |
return -ENODEV; |
} |
static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
return -ENODEV; |
} |
#endif /* __OS_HAS_AGP */ |
#endif /* _DRM_AGPSUPPORT_H_ */ |
/drivers/include/drm/drm_atomic.h |
---|
0,0 → 1,69 |
/* |
* Copyright (C) 2014 Red Hat |
* Copyright (C) 2014 Intel Corp. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Rob Clark <robdclark@gmail.com> |
* Daniel Vetter <daniel.vetter@ffwll.ch> |
*/ |
#ifndef DRM_ATOMIC_H_ |
#define DRM_ATOMIC_H_ |
#include <drm/drm_crtc.h> |
struct drm_atomic_state * __must_check |
drm_atomic_state_alloc(struct drm_device *dev); |
void drm_atomic_state_clear(struct drm_atomic_state *state); |
void drm_atomic_state_free(struct drm_atomic_state *state); |
struct drm_crtc_state * __must_check |
drm_atomic_get_crtc_state(struct drm_atomic_state *state, |
struct drm_crtc *crtc); |
struct drm_plane_state * __must_check |
drm_atomic_get_plane_state(struct drm_atomic_state *state, |
struct drm_plane *plane); |
struct drm_connector_state * __must_check |
drm_atomic_get_connector_state(struct drm_atomic_state *state, |
struct drm_connector *connector); |
int __must_check |
drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state, |
struct drm_plane *plane, struct drm_crtc *crtc); |
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, |
struct drm_framebuffer *fb); |
int __must_check |
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, |
struct drm_crtc *crtc); |
int __must_check |
drm_atomic_add_affected_connectors(struct drm_atomic_state *state, |
struct drm_crtc *crtc); |
int |
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, |
struct drm_crtc *crtc); |
void drm_atomic_legacy_backoff(struct drm_atomic_state *state); |
int __must_check drm_atomic_check_only(struct drm_atomic_state *state); |
int __must_check drm_atomic_commit(struct drm_atomic_state *state); |
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); |
#endif /* DRM_ATOMIC_H_ */ |
/drivers/include/drm/drm_atomic_helper.h |
---|
0,0 → 1,126 |
/* |
* Copyright (C) 2014 Red Hat |
* Copyright (C) 2014 Intel Corp. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Rob Clark <robdclark@gmail.com> |
* Daniel Vetter <daniel.vetter@ffwll.ch> |
*/ |
#ifndef DRM_ATOMIC_HELPER_H_ |
#define DRM_ATOMIC_HELPER_H_ |
#include <drm/drm_crtc.h> |
int drm_atomic_helper_check(struct drm_device *dev, |
struct drm_atomic_state *state); |
int drm_atomic_helper_commit(struct drm_device *dev, |
struct drm_atomic_state *state, |
bool async); |
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, |
struct drm_atomic_state *old_state); |
void drm_atomic_helper_commit_pre_planes(struct drm_device *dev, |
struct drm_atomic_state *state); |
void drm_atomic_helper_commit_post_planes(struct drm_device *dev, |
struct drm_atomic_state *old_state); |
int drm_atomic_helper_prepare_planes(struct drm_device *dev, |
struct drm_atomic_state *state); |
void drm_atomic_helper_commit_planes(struct drm_device *dev, |
struct drm_atomic_state *state); |
void drm_atomic_helper_cleanup_planes(struct drm_device *dev, |
struct drm_atomic_state *old_state); |
void drm_atomic_helper_swap_state(struct drm_device *dev, |
struct drm_atomic_state *state); |
/* implementations for legacy interfaces */ |
int drm_atomic_helper_update_plane(struct drm_plane *plane, |
struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
int crtc_x, int crtc_y, |
unsigned int crtc_w, unsigned int crtc_h, |
uint32_t src_x, uint32_t src_y, |
uint32_t src_w, uint32_t src_h); |
int drm_atomic_helper_disable_plane(struct drm_plane *plane); |
int drm_atomic_helper_set_config(struct drm_mode_set *set); |
int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, |
struct drm_property *property, |
uint64_t val); |
int drm_atomic_helper_plane_set_property(struct drm_plane *plane, |
struct drm_property *property, |
uint64_t val); |
int drm_atomic_helper_connector_set_property(struct drm_connector *connector, |
struct drm_property *property, |
uint64_t val); |
int drm_atomic_helper_page_flip(struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
struct drm_pending_vblank_event *event, |
uint32_t flags); |
/* default implementations for state handling */ |
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); |
struct drm_crtc_state * |
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); |
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, |
struct drm_crtc_state *state); |
void drm_atomic_helper_plane_reset(struct drm_plane *plane); |
struct drm_plane_state * |
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); |
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, |
struct drm_plane_state *state); |
void drm_atomic_helper_connector_reset(struct drm_connector *connector); |
struct drm_connector_state * |
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); |
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, |
struct drm_connector_state *state); |
/** |
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC |
* @plane: the loop cursor |
* @crtc: the crtc whose planes are iterated |
* |
* This iterates over the current state, useful (for example) when applying |
* atomic state after it has been checked and swapped. To iterate over the |
* planes which *will* be attached (for ->atomic_check()) see |
* drm_crtc_for_each_pending_plane() |
*/ |
#define drm_atomic_crtc_for_each_plane(plane, crtc) \ |
drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) |
/** |
* drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state |
* @plane: the loop cursor |
* @crtc_state: the incoming crtc-state |
* |
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be |
* attached if the specified state is applied. Useful during (for example) |
* ->atomic_check() operations, to validate the incoming state |
*/ |
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ |
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) |
#endif /* DRM_ATOMIC_HELPER_H_ */ |
/drivers/include/drm/drm_cache.h |
---|
0,0 → 1,38 |
/************************************************************************** |
* |
* Copyright 2009 Red Hat Inc. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
* |
**************************************************************************/ |
/* |
* Authors: |
* Dave Airlie <airlied@redhat.com> |
*/ |
#ifndef _DRM_CACHE_H_ |
#define _DRM_CACHE_H_ |
void drm_clflush_pages(struct page *pages[], unsigned long num_pages); |
#endif |
/drivers/include/drm/drm_crtc.h |
---|
31,8 → 31,8 |
#include <linux/idr.h> |
#include <linux/fb.h> |
#include <linux/hdmi.h> |
#include <drm/drm_mode.h> |
#include <drm/drm_fourcc.h> |
#include <uapi/drm/drm_mode.h> |
#include <uapi/drm/drm_fourcc.h> |
#include <drm/drm_modeset_lock.h> |
struct drm_device; |
42,6 → 42,7 |
struct drm_file; |
struct drm_clip_rect; |
struct device_node; |
struct fence; |
#define DRM_MODE_OBJECT_CRTC 0xcccccccc |
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 |
136,6 → 137,14 |
u8 cea_rev; |
}; |
/* data corresponds to displayid vend/prod/serial */ |
struct drm_tile_group { |
struct kref refcount; |
struct drm_device *dev; |
int id; |
u8 group_data[8]; |
}; |
struct drm_framebuffer_funcs { |
/* note: use drm_framebuffer_remove() */ |
void (*destroy)(struct drm_framebuffer *framebuffer); |
142,8 → 151,8 |
int (*create_handle)(struct drm_framebuffer *fb, |
struct drm_file *file_priv, |
unsigned int *handle); |
/** |
* Optinal callback for the dirty fb ioctl. |
/* |
* Optional callback for the dirty fb ioctl. |
* |
* Userspace can notify the driver via this callback |
* that a area of the framebuffer has changed and should |
196,7 → 205,7 |
struct drm_property_blob { |
struct drm_mode_object base; |
struct list_head head; |
unsigned int length; |
size_t length; |
unsigned char data[]; |
}; |
215,13 → 224,9 |
uint64_t *values; |
struct drm_device *dev; |
struct list_head enum_blob_list; |
struct list_head enum_list; |
}; |
void drm_modeset_lock_all(struct drm_device *dev); |
void drm_modeset_unlock_all(struct drm_device *dev); |
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); |
struct drm_crtc; |
struct drm_connector; |
struct drm_encoder; |
228,13 → 233,56 |
struct drm_pending_vblank_event; |
struct drm_plane; |
struct drm_bridge; |
struct drm_atomic_state; |
/** |
* drm_crtc_funcs - control CRTCs for a given device |
* struct drm_crtc_state - mutable CRTC state |
* @enable: whether the CRTC should be enabled, gates all other state |
* @mode_changed: for use by helpers and drivers when computing state updates |
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes |
* @last_vblank_count: for helpers and drivers to capture the vblank of the |
* update to ensure framebuffer cleanup isn't done too early |
* @planes_changed: for use by helpers and drivers when computing state updates |
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings |
* @mode: current mode timings |
* @event: optional pointer to a DRM event to signal upon completion of the |
* state update |
* @state: backpointer to global drm_atomic_state |
*/ |
struct drm_crtc_state { |
bool enable; |
/* computed state bits used by helpers and drivers */ |
bool planes_changed : 1; |
bool mode_changed : 1; |
/* attached planes bitmask: |
* WARNING: transitional helpers do not maintain plane_mask so |
* drivers not converted over to atomic helpers should not rely |
* on plane_mask being accurate! |
*/ |
u32 plane_mask; |
/* last_vblank_count: for vblank waits before cleanup */ |
u32 last_vblank_count; |
/* adjusted_mode: for use by helpers and drivers */ |
struct drm_display_mode adjusted_mode; |
struct drm_display_mode mode; |
struct drm_pending_vblank_event *event; |
struct drm_atomic_state *state; |
}; |
/** |
* struct drm_crtc_funcs - control CRTCs for a given device |
* @save: save CRTC state |
* @restore: restore CRTC state |
* @reset: reset CRTC after state has been invalidated (e.g. resume) |
* @cursor_set: setup the cursor |
* @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set |
* @cursor_move: move the cursor |
* @gamma_set: specify color ramp for CRTC |
* @destroy: deinit and free object |
241,6 → 289,9 |
* @set_property: called when a property is changed |
* @set_config: apply a new CRTC configuration |
* @page_flip: initiate a page flip |
* @atomic_duplicate_state: duplicate the atomic state for this CRTC |
* @atomic_destroy_state: destroy an atomic state for this CRTC |
* @atomic_set_property: set a property on an atomic state for this CRTC |
* |
* The drm_crtc_funcs structure is the central CRTC management structure |
* in the DRM. Each CRTC controls one or more connectors (note that the name |
291,16 → 342,28 |
int (*set_property)(struct drm_crtc *crtc, |
struct drm_property *property, uint64_t val); |
/* atomic update handling */ |
struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); |
void (*atomic_destroy_state)(struct drm_crtc *crtc, |
struct drm_crtc_state *state); |
int (*atomic_set_property)(struct drm_crtc *crtc, |
struct drm_crtc_state *state, |
struct drm_property *property, |
uint64_t val); |
}; |
/** |
* drm_crtc - central CRTC control structure |
* struct drm_crtc - central CRTC control structure |
* @dev: parent DRM device |
* @port: OF node used by drm_of_find_possible_crtcs() |
* @head: list management |
* @mutex: per-CRTC locking |
* @base: base KMS object for ID tracking etc. |
* @primary: primary plane for this CRTC |
* @cursor: cursor plane for this CRTC |
* @cursor_x: current x position of the cursor, used for universal cursor planes |
* @cursor_y: current y position of the cursor, used for universal cursor planes |
* @enabled: is this CRTC enabled? |
* @mode: current mode timings |
* @hwmode: mode timings as programmed to hw regs |
313,10 → 376,13 |
* @gamma_size: size of gamma ramp |
* @gamma_store: gamma ramp values |
* @framedur_ns: precise frame timing |
* @framedur_ns: precise line timing |
* @linedur_ns: precise line timing |
* @pixeldur_ns: precise pixel timing |
* @helper_private: mid-layer private data |
* @properties: property tracking for this CRTC |
* @state: current atomic state for this CRTC |
* @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for |
* legacy ioctls |
* |
* Each CRTC may have one or more connectors associated with it. This structure |
* allows the CRTC to be controlled. |
326,7 → 392,7 |
struct device_node *port; |
struct list_head head; |
/** |
/* |
* crtc mutex |
* |
* This provides a read lock for the overall crtc state (mode, dpms |
345,10 → 411,6 |
int cursor_x; |
int cursor_y; |
/* Temporary tracking of the old fb while a modeset is ongoing. Used |
* by drm_mode_set_config_internal to implement correct refcounting. */ |
struct drm_framebuffer *old_fb; |
bool enabled; |
/* Requested mode from modesetting. */ |
375,11 → 437,32 |
void *helper_private; |
struct drm_object_properties properties; |
struct drm_crtc_state *state; |
/* |
* For legacy crtc ioctls so that atomic drivers can get at the locking |
* acquire context. |
*/ |
struct drm_modeset_acquire_ctx *acquire_ctx; |
}; |
/** |
* struct drm_connector_state - mutable connector state |
* @crtc: CRTC to connect connector to, NULL if disabled |
* @best_encoder: can be used by helpers and drivers to select the encoder |
* @state: backpointer to global drm_atomic_state |
*/ |
struct drm_connector_state { |
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */ |
struct drm_encoder *best_encoder; |
struct drm_atomic_state *state; |
}; |
/** |
* drm_connector_funcs - control connectors on a given device |
* struct drm_connector_funcs - control connectors on a given device |
* @dpms: set power state (see drm_crtc_funcs above) |
* @save: save connector state |
* @restore: restore connector state |
389,6 → 472,9 |
* @set_property: property for this connector may need an update |
* @destroy: make object go away |
* @force: notify the driver that the connector is forced on |
* @atomic_duplicate_state: duplicate the atomic state for this connector |
* @atomic_destroy_state: destroy an atomic state for this connector |
* @atomic_set_property: set a property on an atomic state for this connector |
* |
* Each CRTC may have one or more connectors attached to it. The functions |
* below allow the core DRM code to control connectors, enumerate available modes, |
413,10 → 499,19 |
uint64_t val); |
void (*destroy)(struct drm_connector *connector); |
void (*force)(struct drm_connector *connector); |
/* atomic update handling */ |
struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); |
void (*atomic_destroy_state)(struct drm_connector *connector, |
struct drm_connector_state *state); |
int (*atomic_set_property)(struct drm_connector *connector, |
struct drm_connector_state *state, |
struct drm_property *property, |
uint64_t val); |
}; |
/** |
* drm_encoder_funcs - encoder controls |
* struct drm_encoder_funcs - encoder controls |
* @reset: reset state (e.g. at init or resume time) |
* @destroy: cleanup and free associated data |
* |
430,7 → 525,7 |
#define DRM_CONNECTOR_MAX_ENCODER 3 |
/** |
* drm_encoder - central DRM encoder structure |
* struct drm_encoder - central DRM encoder structure |
* @dev: parent DRM device |
* @head: list management |
* @base: base KMS object |
474,7 → 569,7 |
#define MAX_ELD_BYTES 128 |
/** |
* drm_connector - central DRM connector control structure |
* struct drm_connector - central DRM connector control structure |
* @dev: parent DRM device |
* @kdev: kernel device for sysfs attributes |
* @attr: sysfs attributes |
485,6 → 580,7 |
* @connector_type_id: index into connector type enum |
* @interlace_allowed: can this connector handle interlaced modes? |
* @doublescan_allowed: can this connector handle doublescan? |
* @stereo_allowed: can this connector handle stereo modes? |
* @modes: modes available on this connector (from fill_modes() + user) |
* @status: one of the drm_connector_status enums (connected, not, or unknown) |
* @probed_modes: list of modes derived directly from the display |
492,10 → 588,13 |
* @funcs: connector control functions |
* @edid_blob_ptr: DRM property containing EDID if present |
* @properties: property tracking for this connector |
* @path_blob_ptr: DRM blob property data for the DP MST path property |
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling |
* @dpms: current dpms state |
* @helper_private: mid-layer private data |
* @cmdline_mode: mode line parsed from the kernel cmdline for this connector |
* @force: a %DRM_FORCE_<foo> state for forced mode sets |
* @override_edid: has the EDID been overwritten through debugfs for testing? |
* @encoder_ids: valid encoders for this connector |
* @encoder: encoder driving this connector, if any |
* @eld: EDID-like data, if present |
505,6 → 604,18 |
* @video_latency: video latency info from ELD, if found |
* @audio_latency: audio latency info from ELD, if found |
* @null_edid_counter: track sinks that give us all zeros for the EDID |
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum |
* @debugfs_entry: debugfs directory for this connector |
* @state: current atomic state for this connector |
* @has_tile: is this connector connected to a tiled monitor |
* @tile_group: tile group for the connected monitor |
* @tile_is_single_monitor: whether the tile is one monitor housing |
* @num_h_tile: number of horizontal tiles in the tile group |
* @num_v_tile: number of vertical tiles in the tile group |
* @tile_h_loc: horizontal location of this tile |
* @tile_v_loc: vertical location of this tile |
* @tile_h_size: horizontal size of this tile. |
* @tile_v_size: vertical size of this tile. |
* |
* Each connector may be connected to one or more CRTCs, or may be clonable by |
* another connector if they can share a CRTC. Each connector also has a specific |
540,6 → 651,8 |
struct drm_property_blob *path_blob_ptr; |
struct drm_property_blob *tile_blob_ptr; |
uint8_t polled; /* DRM_CONNECTOR_POLL_* */ |
/* requested DPMS state */ |
548,6 → 661,7 |
void *helper_private; |
/* forced on connector */ |
struct drm_cmdline_mode cmdline_mode; |
enum drm_connector_force force; |
bool override_edid; |
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; |
564,14 → 678,63 |
unsigned bad_edid_counter; |
struct dentry *debugfs_entry; |
struct drm_connector_state *state; |
/* DisplayID bits */ |
bool has_tile; |
struct drm_tile_group *tile_group; |
bool tile_is_single_monitor; |
uint8_t num_h_tile, num_v_tile; |
uint8_t tile_h_loc, tile_v_loc; |
uint16_t tile_h_size, tile_v_size; |
}; |
/** |
* drm_plane_funcs - driver plane control functions |
* struct drm_plane_state - mutable plane state |
* @crtc: currently bound CRTC, NULL if disabled |
* @fb: currently bound framebuffer |
* @fence: optional fence to wait for before scanning out @fb |
* @crtc_x: left position of visible portion of plane on crtc |
* @crtc_y: upper position of visible portion of plane on crtc |
* @crtc_w: width of visible portion of plane on crtc |
* @crtc_h: height of visible portion of plane on crtc |
* @src_x: left position of visible portion of plane within |
* plane (in 16.16) |
* @src_y: upper position of visible portion of plane within |
* plane (in 16.16) |
* @src_w: width of visible portion of plane (in 16.16) |
* @src_h: height of visible portion of plane (in 16.16) |
* @state: backpointer to global drm_atomic_state |
*/ |
struct drm_plane_state { |
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ |
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ |
struct fence *fence; |
/* Signed dest location allows it to be partially off screen */ |
int32_t crtc_x, crtc_y; |
uint32_t crtc_w, crtc_h; |
/* Source values are 16.16 fixed point */ |
uint32_t src_x, src_y; |
uint32_t src_h, src_w; |
struct drm_atomic_state *state; |
}; |
/** |
* struct drm_plane_funcs - driver plane control functions |
* @update_plane: update the plane configuration |
* @disable_plane: shut down the plane |
* @destroy: clean up plane resources |
* @reset: reset plane after state has been invalidated (e.g. resume) |
* @set_property: called when a property is changed |
* @atomic_duplicate_state: duplicate the atomic state for this plane |
* @atomic_destroy_state: destroy an atomic state for this plane |
* @atomic_set_property: set a property on an atomic state for this plane |
*/ |
struct drm_plane_funcs { |
int (*update_plane)(struct drm_plane *plane, |
582,9 → 745,19 |
uint32_t src_w, uint32_t src_h); |
int (*disable_plane)(struct drm_plane *plane); |
void (*destroy)(struct drm_plane *plane); |
void (*reset)(struct drm_plane *plane); |
int (*set_property)(struct drm_plane *plane, |
struct drm_property *property, uint64_t val); |
/* atomic update handling */ |
struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); |
void (*atomic_destroy_state)(struct drm_plane *plane, |
struct drm_plane_state *state); |
int (*atomic_set_property)(struct drm_plane *plane, |
struct drm_plane_state *state, |
struct drm_property *property, |
uint64_t val); |
}; |
enum drm_plane_type { |
594,7 → 767,7 |
}; |
/** |
* drm_plane - central DRM plane control structure |
* struct drm_plane - central DRM plane control structure |
* @dev: DRM device this plane belongs to |
* @head: for list management |
* @base: base mode object |
603,14 → 776,19 |
* @format_count: number of formats supported |
* @crtc: currently bound CRTC |
* @fb: currently bound fb |
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by |
* drm_mode_set_config_internal() to implement correct refcounting. |
* @funcs: helper functions |
* @properties: property tracking for this plane |
* @type: type of plane (overlay, primary, cursor) |
* @state: current atomic state for this plane |
*/ |
struct drm_plane { |
struct drm_device *dev; |
struct list_head head; |
struct drm_modeset_lock mutex; |
struct drm_mode_object base; |
uint32_t possible_crtcs; |
620,15 → 798,21 |
struct drm_crtc *crtc; |
struct drm_framebuffer *fb; |
struct drm_framebuffer *old_fb; |
const struct drm_plane_funcs *funcs; |
struct drm_object_properties properties; |
enum drm_plane_type type; |
void *helper_private; |
struct drm_plane_state *state; |
}; |
/** |
* drm_bridge_funcs - drm_bridge control functions |
* struct drm_bridge_funcs - drm_bridge control functions |
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge |
* @disable: Called right before encoder prepare, disables the bridge |
* @post_disable: Called right after encoder prepare, for lockstepped disable |
652,7 → 836,7 |
}; |
/** |
* drm_bridge - central DRM bridge control structure |
* struct drm_bridge - central DRM bridge control structure |
* @dev: DRM device this bridge belongs to |
* @head: list management |
* @base: base mode object |
670,8 → 854,35 |
}; |
/** |
* drm_mode_set - new values for a CRTC config change |
* @head: list management |
* struct struct drm_atomic_state - the global state object for atomic updates |
* @dev: parent DRM device |
* @flags: state flags like async update |
* @planes: pointer to array of plane pointers |
* @plane_states: pointer to array of plane states pointers |
* @crtcs: pointer to array of CRTC pointers |
* @crtc_states: pointer to array of CRTC states pointers |
* @num_connector: size of the @connectors and @connector_states arrays |
* @connectors: pointer to array of connector pointers |
* @connector_states: pointer to array of connector states pointers |
* @acquire_ctx: acquire context for this atomic modeset state update |
*/ |
struct drm_atomic_state { |
struct drm_device *dev; |
uint32_t flags; |
struct drm_plane **planes; |
struct drm_plane_state **plane_states; |
struct drm_crtc **crtcs; |
struct drm_crtc_state **crtc_states; |
int num_connector; |
struct drm_connector **connectors; |
struct drm_connector_state **connector_states; |
struct drm_modeset_acquire_ctx *acquire_ctx; |
}; |
/** |
* struct drm_mode_set - new values for a CRTC config change |
* @fb: framebuffer to use for new config |
* @crtc: CRTC whose configuration we're about to change |
* @mode: mode timings to use |
701,6 → 912,9 |
* struct drm_mode_config_funcs - basic driver provided mode setting functions |
* @fb_create: create a new framebuffer object |
* @output_poll_changed: function to handle output configuration changes |
* @atomic_check: check whether a give atomic state update is possible |
* @atomic_commit: commit an atomic state update previously verified with |
* atomic_check() |
* |
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that |
* involve drivers. |
710,13 → 924,20 |
struct drm_file *file_priv, |
struct drm_mode_fb_cmd2 *mode_cmd); |
void (*output_poll_changed)(struct drm_device *dev); |
int (*atomic_check)(struct drm_device *dev, |
struct drm_atomic_state *a); |
int (*atomic_commit)(struct drm_device *dev, |
struct drm_atomic_state *a, |
bool async); |
}; |
/** |
* drm_mode_group - group of mode setting resources for potential sub-grouping |
* struct drm_mode_group - group of mode setting resources for potential sub-grouping |
* @num_crtcs: CRTC count |
* @num_encoders: encoder count |
* @num_connectors: connector count |
* @num_bridges: bridge count |
* @id_list: list of KMS object IDs in this group |
* |
* Currently this simply tracks the global mode setting state. But in the |
736,10 → 957,14 |
}; |
/** |
* drm_mode_config - Mode configuration control structure |
* struct drm_mode_config - Mode configuration control structure |
* @mutex: mutex protecting KMS related lists and structures |
* @connection_mutex: ww mutex protecting connector state and routing |
* @acquire_ctx: global implicit acquire context used by atomic drivers for |
* legacy ioctls |
* @idr_mutex: mutex for KMS ID allocation and management |
* @crtc_idr: main KMS ID tracking object |
* @fb_lock: mutex to protect fb state and lists |
* @num_fb: number of fbs available |
* @fb_list: list of framebuffers available |
* @num_connector: number of connectors on this device |
748,8 → 973,12 |
* @bridge_list: list of bridge objects |
* @num_encoder: number of encoders on this device |
* @encoder_list: list of encoder objects |
* @num_overlay_plane: number of overlay planes on this device |
* @num_total_plane: number of universal (i.e. with primary/curso) planes on this device |
* @plane_list: list of plane objects |
* @num_crtc: number of CRTCs on this device |
* @crtc_list: list of CRTC objects |
* @property_list: list of property objects |
* @min_width: minimum pixel width on this device |
* @min_height: minimum pixel height on this device |
* @max_width: maximum pixel width on this device |
756,9 → 985,16 |
* @max_height: maximum pixel height on this device |
* @funcs: core driver provided mode setting functions |
* @fb_base: base address of the framebuffer |
* @poll_enabled: track polling status for this device |
* @poll_enabled: track polling support for this device |
* @poll_running: track polling status for this device |
* @output_poll_work: delayed work for polling in process context |
* @property_blob_list: list of all the blob property objects |
* @*_property: core property tracking |
* @preferred_depth: preferred RBG pixel depth, used by fb helpers |
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering |
* @async_page_flip: does this device support async flips on the primary plane? |
* @cursor_width: hint to userspace for max cursor width |
* @cursor_height: hint to userspace for max cursor height |
* |
* Core mode resource tracking structure. All CRTC, encoders, and connectors |
* enumerated by the driver are added here, as are global properties. Some |
770,16 → 1006,10 |
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ |
struct mutex idr_mutex; /* for IDR management */ |
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ |
struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ |
/* this is limited to one for now */ |
/** |
* fb_lock - mutex to protect fb state |
* |
* Besides the global fb list his also protects the fbs list in the |
* file_priv |
*/ |
struct mutex fb_lock; |
struct mutex fb_lock; /* proctects global and per-file fb lists */ |
int num_fb; |
struct list_head fb_list; |
820,7 → 1050,9 |
struct drm_property *edid_property; |
struct drm_property *dpms_property; |
struct drm_property *path_property; |
struct drm_property *tile_property; |
struct drm_property *plane_type_property; |
struct drm_property *rotation_property; |
/* DVI-I properties */ |
struct drm_property *dvi_i_subconnector_property; |
846,6 → 1078,10 |
struct drm_property *aspect_ratio_property; |
struct drm_property *dirty_info_property; |
/* properties for virtual machine layout */ |
struct drm_property *suggested_x_property; |
struct drm_property *suggested_y_property; |
/* dumb ioctl parameters */ |
uint32_t preferred_depth, prefer_shadow; |
856,6 → 1092,19 |
uint32_t cursor_width, cursor_height; |
}; |
/** |
* drm_for_each_plane_mask - iterate over planes specified by bitmask |
* @plane: the loop cursor |
* @dev: the DRM device |
* @plane_mask: bitmask of plane indices |
* |
* Iterate over all planes specified by bitmask. |
*/ |
#define drm_for_each_plane_mask(plane, dev, plane_mask) \ |
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ |
if ((plane_mask) & (1 << drm_plane_index(plane))) |
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) |
#define obj_to_connector(x) container_of(x, struct drm_connector, base) |
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) |
875,9 → 1124,6 |
struct drm_plane *primary, |
struct drm_plane *cursor, |
const struct drm_crtc_funcs *funcs); |
extern int drm_crtc_init(struct drm_device *dev, |
struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs); |
extern void drm_crtc_cleanup(struct drm_crtc *crtc); |
extern unsigned int drm_crtc_index(struct drm_crtc *crtc); |
903,6 → 1149,7 |
void drm_connector_unregister(struct drm_connector *connector); |
extern void drm_connector_cleanup(struct drm_connector *connector); |
extern unsigned int drm_connector_index(struct drm_connector *connector); |
/* helper to unplug all connectors from sysfs for device */ |
extern void drm_connector_unplug_all(struct drm_device *dev); |
942,6 → 1189,7 |
const uint32_t *formats, uint32_t format_count, |
bool is_primary); |
extern void drm_plane_cleanup(struct drm_plane *plane); |
extern unsigned int drm_plane_index(struct drm_plane *plane); |
extern void drm_plane_force_disable(struct drm_plane *plane); |
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc, |
int x, int y, |
971,9 → 1219,10 |
extern void drm_mode_config_cleanup(struct drm_device *dev); |
extern int drm_mode_connector_set_path_property(struct drm_connector *connector, |
char *path); |
const char *path); |
int drm_mode_connector_set_tile_property(struct drm_connector *connector); |
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, |
struct edid *edid); |
const struct edid *edid); |
static inline bool drm_property_type_is(struct drm_property *property, |
uint32_t type) |
1034,11 → 1283,13 |
extern int drm_property_add_enum(struct drm_property *property, int index, |
uint64_t value, const char *name); |
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); |
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, |
char *formats[]); |
extern int drm_mode_create_tv_properties(struct drm_device *dev, |
unsigned int num_modes, |
char *modes[]); |
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); |
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev); |
extern int drm_mode_create_dirty_info_property(struct drm_device *dev); |
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev); |
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
1106,6 → 1357,13 |
extern int drm_edid_header_is_valid(const u8 *raw_edid); |
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); |
extern bool drm_edid_is_valid(struct edid *edid); |
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, |
char topology[8]); |
extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, |
char topology[8]); |
extern void drm_mode_put_tile_group(struct drm_device *dev, |
struct drm_tile_group *tg); |
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, |
int hsize, int vsize, int fresh, |
bool rb); |
1120,6 → 1378,9 |
struct drm_file *file_priv); |
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane, |
struct drm_property *property, |
uint64_t value); |
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, |
int *bpp); |
/drivers/include/drm/drm_crtc_helper.h |
---|
68,6 → 68,7 |
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, int x, int y, |
struct drm_framebuffer *old_fb); |
void (*mode_set_nofb)(struct drm_crtc *crtc); |
/* Move the crtc on the current fb to the given position *optional* */ |
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, |
81,6 → 82,12 |
/* disable crtc when not in use - more explicit than dpms off */ |
void (*disable)(struct drm_crtc *crtc); |
/* atomic helpers */ |
int (*atomic_check)(struct drm_crtc *crtc, |
struct drm_crtc_state *state); |
void (*atomic_begin)(struct drm_crtc *crtc); |
void (*atomic_flush)(struct drm_crtc *crtc); |
}; |
/** |
161,6 → 168,12 |
extern void drm_helper_resume_force_mode(struct drm_device *dev); |
int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, int x, int y, |
struct drm_framebuffer *old_fb); |
int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, |
struct drm_framebuffer *old_fb); |
/* drm_probe_helper.c */ |
extern int drm_helper_probe_single_connector_modes(struct drm_connector |
*connector, uint32_t maxX, |
/drivers/include/drm/drm_displayid.h |
---|
0,0 → 1,76 |
/* |
* Copyright © 2014 Red Hat Inc. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef DRM_DISPLAYID_H |
#define DRM_DISPLAYID_H |
#define DATA_BLOCK_PRODUCT_ID 0x00 |
#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 |
#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 |
#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03 |
#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04 |
#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05 |
#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06 |
#define DATA_BLOCK_VESA_TIMING 0x07 |
#define DATA_BLOCK_CEA_TIMING 0x08 |
#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09 |
#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a |
#define DATA_BLOCK_GP_ASCII_STRING 0x0b |
#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c |
#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d |
#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e |
#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f |
#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 |
#define DATA_BLOCK_TILED_DISPLAY 0x12 |
#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f |
#define PRODUCT_TYPE_EXTENSION 0 |
#define PRODUCT_TYPE_TEST 1 |
#define PRODUCT_TYPE_PANEL 2 |
#define PRODUCT_TYPE_MONITOR 3 |
#define PRODUCT_TYPE_TV 4 |
#define PRODUCT_TYPE_REPEATER 5 |
#define PRODUCT_TYPE_DIRECT_DRIVE 6 |
struct displayid_hdr { |
u8 rev; |
u8 bytes; |
u8 prod_id; |
u8 ext_count; |
} __packed; |
struct displayid_block { |
u8 tag; |
u8 rev; |
u8 num_bytes; |
} __packed; |
struct displayid_tiled_block { |
struct displayid_block base; |
u8 tile_cap; |
u8 topo[3]; |
u8 tile_size[4]; |
u8 tile_pixel_bezel[5]; |
u8 topology_id[8]; |
} __packed; |
#endif |
/drivers/include/drm/drm_dp_helper.h |
---|
190,16 → 190,16 |
# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 |
# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 |
# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) |
# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0) |
# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0) |
# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) |
# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3) |
# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3) |
# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3) |
# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3) |
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 |
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) |
304,6 → 304,7 |
#define DP_TEST_SINK_MISC 0x246 |
#define DP_TEST_CRC_SUPPORTED (1 << 5) |
# define DP_TEST_COUNT_MASK 0x7 |
#define DP_TEST_RESPONSE 0x260 |
# define DP_TEST_ACK (1 << 0) |
404,26 → 405,6 |
#define MODE_I2C_READ 4 |
#define MODE_I2C_STOP 8 |
/** |
* struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp |
* aux algorithm |
* @running: set by the algo indicating whether an i2c is ongoing or whether |
* the i2c bus is quiescent |
* @address: i2c target address for the currently ongoing transfer |
* @aux_ch: driver callback to transfer a single byte of the i2c payload |
*/ |
struct i2c_algo_dp_aux_data { |
bool running; |
u16 address; |
int (*aux_ch) (struct i2c_adapter *adapter, |
int mode, uint8_t write_byte, |
uint8_t *read_byte); |
}; |
int |
i2c_dp_aux_add_bus(struct i2c_adapter *adapter); |
#define DP_LINK_STATUS_SIZE 6 |
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], |
int lane_count); |
550,6 → 531,7 |
struct mutex hw_mutex; |
ssize_t (*transfer)(struct drm_dp_aux *aux, |
struct drm_dp_aux_msg *msg); |
unsigned i2c_nack_count, i2c_defer_count; |
}; |
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, |
/drivers/include/drm/drm_dp_mst_helper.h |
---|
28,7 → 28,7 |
struct drm_dp_mst_branch; |
/** |
* struct drm_dp_vcpi - Virtual Channel Payload Identifer |
* struct drm_dp_vcpi - Virtual Channel Payload Identifier |
* @vcpi: Virtual channel ID. |
* @pbn: Payload Bandwidth Number for this channel |
* @aligned_pbn: PBN aligned with slot size |
92,6 → 92,8 |
struct drm_dp_vcpi vcpi; |
struct drm_connector *connector; |
struct drm_dp_mst_topology_mgr *mgr; |
struct edid *cached_edid; /* for DP logical ports - make tiling work */ |
}; |
/** |
371,7 → 373,7 |
struct drm_dp_mst_topology_mgr; |
struct drm_dp_mst_topology_cbs { |
/* create a connector for a port */ |
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path); |
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); |
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, |
struct drm_connector *connector); |
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); |
388,6 → 390,7 |
int payload_state; |
int start_slot; |
int num_slots; |
int vcpi; |
}; |
/** |
454,6 → 457,7 |
struct drm_dp_vcpi **proposed_vcpis; |
struct drm_dp_payload *payloads; |
unsigned long payload_mask; |
unsigned long vcpi_mask; |
wait_queue_head_t tx_waitq; |
struct work_struct work; |
472,7 → 476,7 |
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); |
enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
/drivers/include/drm/drm_edid.h |
---|
27,6 → 27,7 |
#define EDID_LENGTH 128 |
#define DDC_ADDR 0x50 |
#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */ |
#define CEA_EXT 0x02 |
#define VTB_EXT 0x10 |
33,6 → 34,7 |
#define DI_EXT 0x40 |
#define LS_EXT 0x50 |
#define MI_EXT 0x60 |
#define DISPLAYID_EXT 0x70 |
struct est_timings { |
u8 t1; |
207,6 → 209,61 |
#define DRM_EDID_HDMI_DC_30 (1 << 4) |
#define DRM_EDID_HDMI_DC_Y444 (1 << 3) |
/* ELD Header Block */ |
#define DRM_ELD_HEADER_BLOCK_SIZE 4 |
#define DRM_ELD_VER 0 |
# define DRM_ELD_VER_SHIFT 3 |
# define DRM_ELD_VER_MASK (0x1f << 3) |
#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ |
/* ELD Baseline Block for ELD_Ver == 2 */ |
#define DRM_ELD_CEA_EDID_VER_MNL 4 |
# define DRM_ELD_CEA_EDID_VER_SHIFT 5 |
# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) |
# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) |
# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) |
# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) |
# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) |
# define DRM_ELD_MNL_SHIFT 0 |
# define DRM_ELD_MNL_MASK (0x1f << 0) |
#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 |
# define DRM_ELD_SAD_COUNT_SHIFT 4 |
# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) |
# define DRM_ELD_CONN_TYPE_SHIFT 2 |
# define DRM_ELD_CONN_TYPE_MASK (3 << 2) |
# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) |
# define DRM_ELD_CONN_TYPE_DP (1 << 2) |
# define DRM_ELD_SUPPORTS_AI (1 << 1) |
# define DRM_ELD_SUPPORTS_HDCP (1 << 0) |
#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ |
# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ |
#define DRM_ELD_SPEAKER 7 |
# define DRM_ELD_SPEAKER_RLRC (1 << 6) |
# define DRM_ELD_SPEAKER_FLRC (1 << 5) |
# define DRM_ELD_SPEAKER_RC (1 << 4) |
# define DRM_ELD_SPEAKER_RLR (1 << 3) |
# define DRM_ELD_SPEAKER_FC (1 << 2) |
# define DRM_ELD_SPEAKER_LFE (1 << 1) |
# define DRM_ELD_SPEAKER_FLR (1 << 0) |
#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ |
# define DRM_ELD_PORT_ID_LEN 8 |
#define DRM_ELD_MANUFACTURER_NAME0 16 |
#define DRM_ELD_MANUFACTURER_NAME1 17 |
#define DRM_ELD_PRODUCT_CODE0 18 |
#define DRM_ELD_PRODUCT_CODE1 19 |
#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ |
#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) |
struct edid { |
u8 header[8]; |
/* Vendor & product info */ |
279,4 → 336,56 |
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, |
const struct drm_display_mode *mode); |
/** |
* drm_eld_mnl - Get ELD monitor name length in bytes. |
* @eld: pointer to an eld memory structure with mnl set |
*/ |
static inline int drm_eld_mnl(const uint8_t *eld) |
{ |
return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; |
} |
/** |
* drm_eld_sad_count - Get ELD SAD count. |
* @eld: pointer to an eld memory structure with sad_count set |
*/ |
static inline int drm_eld_sad_count(const uint8_t *eld) |
{ |
return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> |
DRM_ELD_SAD_COUNT_SHIFT; |
} |
/** |
* drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes |
* @eld: pointer to an eld memory structure with mnl and sad_count set |
* |
* This is a helper for determining the payload size of the baseline block, in |
* bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. |
*/ |
static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) |
{ |
return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + |
drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; |
} |
/** |
* drm_eld_size - Get ELD size in bytes |
* @eld: pointer to a complete eld memory structure |
* |
* The returned value does not include the vendor block. It's vendor specific, |
* and comprises of the remaining bytes in the ELD memory buffer after |
* drm_eld_size() bytes of header and baseline block. |
* |
* The returned value is guaranteed to be a multiple of 4. |
*/ |
static inline int drm_eld_size(const uint8_t *eld) |
{ |
return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; |
} |
struct edid *drm_do_get_edid(struct drm_connector *connector, |
int (*get_edid_block)(void *data, u8 *buf, unsigned int block, |
size_t len), |
void *data); |
#endif /* __DRM_EDID_H__ */ |
/drivers/include/drm/drm_fb_helper.h |
---|
34,9 → 34,14 |
#include <linux/kgdb.h> |
struct drm_fb_offset { |
int x, y; |
}; |
struct drm_fb_helper_crtc { |
struct drm_mode_set mode_set; |
struct drm_display_mode *desired_mode; |
int x, y; |
}; |
struct drm_fb_helper_surface_size { |
72,12 → 77,12 |
bool (*initial_config)(struct drm_fb_helper *fb_helper, |
struct drm_fb_helper_crtc **crtcs, |
struct drm_display_mode **modes, |
struct drm_fb_offset *offsets, |
bool *enabled, int width, int height); |
}; |
struct drm_fb_helper_connector { |
struct drm_connector *connector; |
struct drm_cmdline_mode cmdline_mode; |
}; |
struct drm_fb_helper { |
/drivers/include/drm/drm_gem.h |
---|
0,0 → 1,190 |
#ifndef __DRM_GEM_H__ |
#define __DRM_GEM_H__ |
/* |
* GEM Graphics Execution Manager Driver Interfaces |
* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* Copyright (c) 2009-2010, Code Aurora Forum. |
* All rights reserved. |
* Copyright © 2014 Intel Corporation |
* Daniel Vetter <daniel.vetter@ffwll.ch> |
* |
* Author: Rickard E. (Rik) Faith <faith@valinux.com> |
* Author: Gareth Hughes <gareth@valinux.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
/** |
* This structure defines the drm_mm memory object, which will be used by the |
* DRM for its buffer objects. |
*/ |
struct drm_gem_object { |
/** Reference count of this object */ |
struct kref refcount; |
/** |
* handle_count - gem file_priv handle count of this object |
* |
* Each handle also holds a reference. Note that when the handle_count |
* drops to 0 any global names (e.g. the id in the flink namespace) will |
* be cleared. |
* |
* Protected by dev->object_name_lock. |
* */ |
unsigned handle_count; |
/** Related drm device */ |
struct drm_device *dev; |
/** File representing the shmem storage */ |
struct file *filp; |
/* Mapping info for this object */ |
struct drm_vma_offset_node vma_node; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
/** |
* dma_buf - dma buf associated with this GEM object |
* |
* Pointer to the dma-buf associated with this gem object (either |
* through importing or exporting). We break the resulting reference |
* loop when the last gem handle for this object is released. |
* |
* Protected by obj->object_name_lock |
*/ |
struct dma_buf *dma_buf; |
/** |
* import_attach - dma buf attachment backing this object |
* |
* Any foreign dma_buf imported as a gem object has this set to the |
* attachment point for the device. This is invariant over the lifetime |
* of a gem object. |
* |
* The driver's ->gem_free_object callback is responsible for cleaning |
* up the dma_buf attachment and references acquired at import time. |
* |
* Note that the drm gem/prime core does not depend upon drivers setting |
* this field any more. So for drivers where this doesn't make sense |
* (e.g. virtual devices or a displaylink behind an usb bus) they can |
* simply leave it as NULL. |
*/ |
struct dma_buf_attachment *import_attach; |
/** |
* dumb - created as dumb buffer |
* Whether the gem object was created using the dumb buffer interface |
* as such it may not be used for GPU rendering. |
*/ |
bool dumb; |
}; |
void drm_gem_object_release(struct drm_gem_object *obj); |
void drm_gem_object_free(struct kref *kref); |
int drm_gem_object_init(struct drm_device *dev, |
struct drm_gem_object *obj, size_t size); |
void drm_gem_private_object_init(struct drm_device *dev, |
struct drm_gem_object *obj, size_t size); |
void drm_gem_vm_open(struct vm_area_struct *vma); |
void drm_gem_vm_close(struct vm_area_struct *vma); |
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, |
struct vm_area_struct *vma); |
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
static inline void |
drm_gem_object_reference(struct drm_gem_object *obj) |
{ |
kref_get(&obj->refcount); |
} |
static inline void |
drm_gem_object_unreference(struct drm_gem_object *obj) |
{ |
if (obj != NULL) |
kref_put(&obj->refcount, drm_gem_object_free); |
} |
static inline void |
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) |
{ |
if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) { |
struct drm_device *dev = obj->dev; |
mutex_lock(&dev->struct_mutex); |
if (likely(atomic_dec_and_test(&obj->refcount.refcount))) |
drm_gem_object_free(&obj->refcount); |
mutex_unlock(&dev->struct_mutex); |
} |
} |
int drm_gem_handle_create(struct drm_file *file_priv, |
struct drm_gem_object *obj, |
u32 *handlep); |
int drm_gem_handle_delete(struct drm_file *filp, u32 handle); |
void drm_gem_free_mmap_offset(struct drm_gem_object *obj); |
int drm_gem_create_mmap_offset(struct drm_gem_object *obj); |
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); |
struct page **drm_gem_get_pages(struct drm_gem_object *obj); |
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
bool dirty, bool accessed); |
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, |
struct drm_file *filp, |
u32 handle); |
int drm_gem_dumb_destroy(struct drm_file *file, |
struct drm_device *dev, |
uint32_t handle); |
#endif /* __DRM_GEM_H__ */ |
/drivers/include/drm/drm_legacy.h |
---|
0,0 → 1,203 |
#ifndef __DRM_DRM_LEGACY_H__ |
#define __DRM_DRM_LEGACY_H__ |
/* |
* Legacy driver interfaces for the Direct Rendering Manager |
* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* Copyright (c) 2009-2010, Code Aurora Forum. |
* All rights reserved. |
* Copyright © 2014 Intel Corporation |
* Daniel Vetter <daniel.vetter@ffwll.ch> |
* |
* Author: Rickard E. (Rik) Faith <faith@valinux.com> |
* Author: Gareth Hughes <gareth@valinux.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
/* |
* Legacy Support for palateontologic DRM drivers |
* |
* If you add a new driver and it uses any of these functions or structures, |
* you're doing it terribly wrong. |
*/ |
/** |
* DMA buffer. |
*/ |
struct drm_buf { |
int idx; /**< Index into master buflist */ |
int total; /**< Buffer size */ |
int order; /**< log-base-2(total) */ |
int used; /**< Amount of buffer in use (for DMA) */ |
unsigned long offset; /**< Byte offset (used internally) */ |
void *address; /**< Address of buffer */ |
unsigned long bus_address; /**< Bus address of buffer */ |
struct drm_buf *next; /**< Kernel-only: used for free list */ |
__volatile__ int waiting; /**< On kernel DMA queue */ |
__volatile__ int pending; /**< On hardware DMA queue */ |
struct drm_file *file_priv; /**< Private of holding file descr */ |
int context; /**< Kernel queue for this buffer */ |
int while_locked; /**< Dispatch this buffer while locked */ |
enum { |
DRM_LIST_NONE = 0, |
DRM_LIST_FREE = 1, |
DRM_LIST_WAIT = 2, |
DRM_LIST_PEND = 3, |
DRM_LIST_PRIO = 4, |
DRM_LIST_RECLAIM = 5 |
} list; /**< Which list we're on */ |
int dev_priv_size; /**< Size of buffer private storage */ |
void *dev_private; /**< Per-buffer private storage */ |
}; |
typedef struct drm_dma_handle { |
dma_addr_t busaddr; |
void *vaddr; |
size_t size; |
} drm_dma_handle_t; |
/** |
* Buffer entry. There is one of this for each buffer size order. |
*/ |
struct drm_buf_entry { |
int buf_size; /**< size */ |
int buf_count; /**< number of buffers */ |
struct drm_buf *buflist; /**< buffer list */ |
int seg_count; |
int page_order; |
struct drm_dma_handle **seglist; |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
}; |
/** |
* DMA data. |
*/ |
struct drm_device_dma { |
struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ |
int buf_count; /**< total number of buffers */ |
struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ |
int seg_count; |
int page_count; /**< number of pages */ |
unsigned long *pagelist; /**< page list */ |
unsigned long byte_count; |
enum { |
_DRM_DMA_USE_AGP = 0x01, |
_DRM_DMA_USE_SG = 0x02, |
_DRM_DMA_USE_FB = 0x04, |
_DRM_DMA_USE_PCI_RO = 0x08 |
} flags; |
}; |
/** |
* Scatter-gather memory. |
*/ |
struct drm_sg_mem { |
unsigned long handle; |
void *virtual; |
int pages; |
struct page **pagelist; |
dma_addr_t *busaddr; |
}; |
/** |
* Kernel side of a mapping |
*/ |
struct drm_local_map { |
resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
}; |
typedef struct drm_local_map drm_local_map_t; |
/** |
* Mappings list |
*/ |
struct drm_map_list { |
struct list_head head; /**< list head */ |
struct drm_hash_item hash; |
struct drm_local_map *map; /**< mapping */ |
uint64_t user_token; |
struct drm_master *master; |
}; |
int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, |
unsigned int size, enum drm_map_type type, |
enum drm_map_flags flags, struct drm_local_map **map_p); |
int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); |
int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); |
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); |
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); |
int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); |
int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); |
/** |
* Test that the hardware lock is held by the caller, returning otherwise. |
* |
* \param dev DRM device. |
* \param filp file pointer of the caller. |
*/ |
#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ |
do { \ |
if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ |
_file_priv->master->lock.file_priv != _file_priv) { \ |
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ |
__func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ |
_file_priv->master->lock.file_priv, _file_priv); \ |
return -EINVAL; \ |
} \ |
} while (0) |
void drm_legacy_idlelock_take(struct drm_lock_data *lock); |
void drm_legacy_idlelock_release(struct drm_lock_data *lock); |
/* drm_pci.c dma alloc wrappers */ |
void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
/* drm_memory.c */ |
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); |
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); |
void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); |
static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, |
unsigned int token) |
{ |
struct drm_map_list *_entry; |
list_for_each_entry(_entry, &dev->maplist, head) |
if (_entry->user_token == token) |
return _entry->map; |
return NULL; |
} |
#endif /* __DRM_DRM_LEGACY_H__ */ |
/drivers/include/drm/drm_modeset_lock.h |
---|
29,10 → 29,11 |
struct drm_modeset_lock; |
/** |
* drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) |
* struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) |
* @ww_ctx: base acquire ctx |
* @contended: used internally for -EDEADLK handling |
* @locked: list of held locks |
* @trylock_only: trylock mode used in atomic contexts/panic notifiers |
* |
* Each thread competing for a set of locks must use one acquire |
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and |
53,10 → 54,15 |
* list of held locks (drm_modeset_lock) |
*/ |
struct list_head locked; |
/** |
* Trylock mode, use only for panic handlers! |
*/ |
bool trylock_only; |
}; |
/** |
* drm_modeset_lock - used for locking modeset resources. |
* struct drm_modeset_lock - used for locking modeset resources. |
* @mutex: resource locking |
* @head: used to hold it's place on state->locked list when |
* part of an atomic update |
120,6 → 126,19 |
void drm_modeset_unlock(struct drm_modeset_lock *lock); |
struct drm_device; |
struct drm_crtc; |
struct drm_plane; |
void drm_modeset_lock_all(struct drm_device *dev); |
int __drm_modeset_lock_all(struct drm_device *dev, bool trylock); |
void drm_modeset_unlock_all(struct drm_device *dev); |
void drm_modeset_lock_crtc(struct drm_crtc *crtc, |
struct drm_plane *plane); |
void drm_modeset_unlock_crtc(struct drm_crtc *crtc); |
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); |
struct drm_modeset_acquire_ctx * |
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); |
int drm_modeset_lock_all_crtcs(struct drm_device *dev, |
struct drm_modeset_acquire_ctx *ctx); |
/drivers/include/drm/drm_os_linux.h |
---|
0,0 → 1,65 |
/** |
* \file drm_os_linux.h |
* OS abstraction macros. |
*/ |
//#include <linux/interrupt.h> /* For task queue support */ |
#include <linux/delay.h> |
#ifndef readq |
static inline u64 readq(void __iomem *reg) |
{ |
return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); |
} |
static inline void writeq(u64 val, void __iomem *reg) |
{ |
writel(val & 0xffffffff, reg); |
writel(val >> 32, reg + 0x4UL); |
} |
#endif |
/** Current process ID */ |
#define DRM_CURRENTPID task_pid_nr(current) |
#define DRM_UDELAY(d) udelay(d) |
/** Read a byte from a MMIO region */ |
#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) |
/** Read a word from a MMIO region */ |
#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset)) |
/** Read a dword from a MMIO region */ |
#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset)) |
/** Write a byte into a MMIO region */ |
#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset)) |
/** Write a word into a MMIO region */ |
#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset)) |
/** Write a dword into a MMIO region */ |
#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) |
/** Read a qword from a MMIO region - be careful using these unless you really understand them */ |
#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset)) |
/** Write a qword into a MMIO region */ |
#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset)) |
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ |
do { \ |
DECLARE_WAITQUEUE(entry, current); \ |
unsigned long end = jiffies + (timeout); \ |
add_wait_queue(&(queue), &entry); \ |
\ |
for (;;) { \ |
__set_current_state(TASK_INTERRUPTIBLE); \ |
if (condition) \ |
break; \ |
if (time_after_eq(jiffies, end)) { \ |
ret = -EBUSY; \ |
break; \ |
} \ |
schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \ |
if (signal_pending(current)) { \ |
ret = -EINTR; \ |
break; \ |
} \ |
} \ |
__set_current_state(TASK_RUNNING); \ |
remove_wait_queue(&(queue), &entry); \ |
} while (0) |
/drivers/include/drm/drm_pciids.h |
---|
17,6 → 17,7 |
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
73,7 → 74,6 |
{0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ |
164,8 → 164,11 |
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
175,6 → 178,8 |
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
297,6 → 302,7 |
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
/drivers/include/drm/drm_plane_helper.h |
---|
25,6 → 25,7 |
#define DRM_PLANE_HELPER_H |
#include <drm/drm_rect.h> |
#include <drm/drm_crtc.h> |
/* |
* Drivers that don't allow primary plane scaling may pass this macro in place |
42,6 → 43,37 |
* planes. |
*/ |
extern int drm_crtc_init(struct drm_device *dev, |
struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs); |
/** |
* drm_plane_helper_funcs - helper operations for CRTCs |
* @prepare_fb: prepare a framebuffer for use by the plane |
* @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane |
* @atomic_check: check that a given atomic state is valid and can be applied |
* @atomic_update: apply an atomic state to the plane |
* |
* The helper operations are called by the mid-layer CRTC helper. |
*/ |
struct drm_plane_helper_funcs { |
int (*prepare_fb)(struct drm_plane *plane, |
struct drm_framebuffer *fb); |
void (*cleanup_fb)(struct drm_plane *plane, |
struct drm_framebuffer *fb); |
int (*atomic_check)(struct drm_plane *plane, |
struct drm_plane_state *state); |
void (*atomic_update)(struct drm_plane *plane, |
struct drm_plane_state *old_state); |
}; |
static inline void drm_plane_helper_add(struct drm_plane *plane, |
const struct drm_plane_helper_funcs *funcs) |
{ |
plane->helper_private = (void *)funcs; |
} |
extern int drm_plane_helper_check_update(struct drm_plane *plane, |
struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
68,4 → 100,16 |
int num_formats); |
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
int crtc_x, int crtc_y, |
unsigned int crtc_w, unsigned int crtc_h, |
uint32_t src_x, uint32_t src_y, |
uint32_t src_w, uint32_t src_h); |
int drm_plane_helper_disable(struct drm_plane *plane); |
/* For use by drm_crtc_helper.c */ |
int drm_plane_helper_commit(struct drm_plane *plane, |
struct drm_plane_state *plane_state, |
struct drm_framebuffer *old_fb); |
#endif |
/drivers/include/drm/i915_pciids.h |
---|
259,4 → 259,21 |
INTEL_VGA_DEVICE(0x22b2, info), \ |
INTEL_VGA_DEVICE(0x22b3, info) |
#define INTEL_SKL_IDS(info) \ |
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ |
INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ |
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ |
INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \ |
INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ |
INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \ |
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ |
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ |
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ |
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ |
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ |
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ |
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \ |
INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \ |
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ |
#endif /* _I915_PCIIDS_H */ |
/drivers/include/drm/intel-gtt.h |
---|
3,8 → 3,6 |
#ifndef _DRM_INTEL_GTT_H |
#define _DRM_INTEL_GTT_H |
struct agp_bridge_data; |
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, |
phys_addr_t *mappable_base, unsigned long *mappable_end); |
/drivers/include/drm/ttm/ttm_bo_api.h |
---|
45,12 → 45,24 |
struct drm_mm_node; |
/** |
* struct ttm_place |
* |
* @fpfn: first valid page frame number to put the object |
* @lpfn: last valid page frame number to put the object |
* @flags: memory domain and caching flags for the object |
* |
* Structure indicating a possible place to put an object. |
*/ |
struct ttm_place { |
unsigned fpfn; |
unsigned lpfn; |
uint32_t flags; |
}; |
/** |
* struct ttm_placement |
* |
* @fpfn: first valid page frame number to put the object |
* @lpfn: last valid page frame number to put the object |
* @num_placement: number of preferred placements |
* @placement: preferred placements |
* @num_busy_placement: number of preferred placements when need to evict buffer |
59,12 → 71,10 |
* Structure indicating the placement you request for an object. |
*/ |
struct ttm_placement { |
unsigned fpfn; |
unsigned lpfn; |
unsigned num_placement; |
const uint32_t *placement; |
const struct ttm_place *placement; |
unsigned num_busy_placement; |
const uint32_t *busy_placement; |
const struct ttm_place *busy_placement; |
}; |
/** |
163,7 → 173,6 |
* @lru: List head for the lru list. |
* @ddestroy: List head for the delayed destroy list. |
* @swap: List head for swap LRU list. |
* @sync_obj: Pointer to a synchronization object. |
* @priv_flags: Flags describing buffer object internal state. |
* @vma_node: Address space manager node. |
* @offset: The current GPU offset, which can have different meanings |
227,13 → 236,9 |
struct list_head io_reserve_lru; |
/** |
* Members protected by struct buffer_object_device::fence_lock |
* In addition, setting sync_obj to anything else |
* than NULL requires bo::reserved to be held. This allows for |
* checking NULL while reserved but not holding the mentioned lock. |
* Members protected by a bo reservation. |
*/ |
void *sync_obj; |
unsigned long priv_flags; |
struct drm_vma_offset_node vma_node; |
455,6 → 460,7 |
* point to the shmem object backing a GEM object if TTM is used to back a |
* GEM user interface. |
* @acc_size: Accounted size for this object. |
* @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. |
* @destroy: Destroy function. Use NULL for kfree(). |
* |
* This function initializes a pre-allocated struct ttm_buffer_object. |
482,6 → 488,7 |
struct file *persistent_swap_storage, |
size_t acc_size, |
struct sg_table *sg, |
struct reservation_object *resv, |
void (*destroy) (struct ttm_buffer_object *)); |
/** |
519,20 → 526,6 |
struct ttm_buffer_object **p_bo); |
/** |
* ttm_bo_check_placement |
* |
* @bo: the buffer object. |
* @placement: placements |
* |
* Performs minimal validity checking on an intended change of |
* placement flags. |
* Returns |
* -EINVAL: Intended change is invalid or not allowed. |
*/ |
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
struct ttm_placement *placement); |
/** |
* ttm_bo_init_mm |
* |
* @bdev: Pointer to a ttm_bo_device struct. |
/drivers/include/drm/ttm/ttm_bo_driver.h |
---|
208,8 → 208,7 |
*/ |
int (*get_node)(struct ttm_mem_type_manager *man, |
struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
uint32_t flags, |
const struct ttm_place *place, |
struct ttm_mem_reg *mem); |
/** |
313,11 → 312,6 |
* @move: Callback for a driver to hook in accelerated functions to |
* move a buffer. |
* If set to NULL, a potentially slow memcpy() move is used. |
* @sync_obj_signaled: See ttm_fence_api.h |
* @sync_obj_wait: See ttm_fence_api.h |
* @sync_obj_flush: See ttm_fence_api.h |
* @sync_obj_unref: See ttm_fence_api.h |
* @sync_obj_ref: See ttm_fence_api.h |
*/ |
struct ttm_bo_driver { |
419,23 → 413,6 |
int (*verify_access) (struct ttm_buffer_object *bo, |
struct file *filp); |
/** |
* In case a driver writer dislikes the TTM fence objects, |
* the driver writer can replace those with sync objects of |
* his / her own. If it turns out that no driver writer is |
* using these. I suggest we remove these hooks and plug in |
* fences directly. The bo driver needs the following functionality: |
* See the corresponding functions in the fence object API |
* documentation. |
*/ |
bool (*sync_obj_signaled) (void *sync_obj); |
int (*sync_obj_wait) (void *sync_obj, |
bool lazy, bool interruptible); |
int (*sync_obj_flush) (void *sync_obj); |
void (*sync_obj_unref) (void **sync_obj); |
void *(*sync_obj_ref) (void *sync_obj); |
/* hook to notify driver about a driver move so it |
* can do tiling things */ |
void (*move_notify)(struct ttm_buffer_object *bo, |
522,8 → 499,6 |
* |
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. |
* @man: An array of mem_type_managers. |
* @fence_lock: Protects the synchronizing members on *all* bos belonging |
* to this device. |
* @vma_manager: Address space manager |
* lru_lock: Spinlock that protects the buffer+device lru lists and |
* ddestroy lists. |
543,7 → 518,6 |
struct ttm_bo_global *glob; |
struct ttm_bo_driver *driver; |
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
spinlock_t fence_lock; |
/* |
* Protected by internal locks. |
1022,7 → 996,7 |
* ttm_bo_move_accel_cleanup. |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* @sync_obj: A sync object that signals when moving is complete. |
* @fence: A fence object that signals when moving is complete. |
* @evict: This is an evict move. Don't return until the buffer is idle. |
* @no_wait_gpu: Return immediately if the GPU is busy. |
* @new_mem: struct ttm_mem_reg indicating where to move. |
1036,7 → 1010,7 |
*/ |
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
void *sync_obj, |
struct fence *fence, |
bool evict, bool no_wait_gpu, |
struct ttm_mem_reg *new_mem); |
/** |
/drivers/include/drm/ttm/ttm_execbuf_util.h |
---|
39,19 → 39,13 |
* |
* @head: list head for thread-private list. |
* @bo: refcounted buffer object pointer. |
* @reserved: Indicates whether @bo has been reserved for validation. |
* @removed: Indicates whether @bo has been removed from lru lists. |
* @put_count: Number of outstanding references on bo::list_kref. |
* @old_sync_obj: Pointer to a sync object about to be unreferenced |
* @shared: should the fence be added shared? |
*/ |
struct ttm_validate_buffer { |
struct list_head head; |
struct ttm_buffer_object *bo; |
bool reserved; |
bool removed; |
int put_count; |
void *old_sync_obj; |
bool shared; |
}; |
/** |
73,6 → 67,8 |
* @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only |
* non-blocking reserves should be tried. |
* @list: thread private list of ttm_validate_buffer structs. |
* @intr: should the wait be interruptible |
* @dups: [out] optional list of duplicates. |
* |
* Tries to reserve bos pointed to by the list entries for validation. |
* If the function returns 0, all buffers are marked as "unfenced", |
84,10 → 80,15 |
* CPU write reservations to be cleared, and for other threads to |
* unreserve their buffers. |
* |
* This function may return -ERESTART or -EAGAIN if the calling process |
* receives a signal while waiting. In that case, no buffers on the list |
* will be reserved upon return. |
* If intr is set to true, this function may return -ERESTARTSYS if the |
* calling process receives a signal while waiting. In that case, no |
* buffers on the list will be reserved upon return. |
* |
* If dups is non NULL all buffers already reserved by the current thread |
* (e.g. duplicates) are added to this list, otherwise -EALREADY is returned |
* on the first already reserved buffer and all buffers from the list are |
* unreserved again. |
* |
* Buffers reserved by this function should be unreserved by |
* a call to either ttm_eu_backoff_reservation() or |
* ttm_eu_fence_buffer_objects() when command submission is complete or |
95,7 → 96,8 |
*/ |
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, |
struct list_head *list); |
struct list_head *list, bool intr, |
struct list_head *dups); |
/** |
* function ttm_eu_fence_buffer_objects. |
102,7 → 104,7 |
* |
* @ticket: ww_acquire_ctx from reserve call |
* @list: thread private list of ttm_validate_buffer structs. |
* @sync_obj: The new sync object for the buffers. |
* @fence: The new exclusive fence for the buffers. |
* |
* This function should be called when command submission is complete, and |
* it will add a new sync object to bos pointed to by entries on @list. |
111,6 → 113,7 |
*/ |
extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
struct list_head *list, void *sync_obj); |
struct list_head *list, |
struct fence *fence); |
#endif |
/drivers/include/linux/uapi/drm/drm.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/drm_fourcc.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/drm_mode.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/i915_drm.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/radeon_drm.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/vmwgfx_drm.h |
---|
File deleted |
/drivers/include/linux/asm/bitops.h |
---|
File deleted |
/drivers/include/linux/asm/scatterlist.h |
---|
File deleted |
/drivers/include/linux/asm/cpufeature.h |
---|
File deleted |
/drivers/include/linux/asm/types.h |
---|
File deleted |
/drivers/include/linux/asm/string.h |
---|
File deleted |
/drivers/include/linux/asm/spinlock_types.h |
---|
File deleted |
/drivers/include/linux/asm/unaligned.h |
---|
File deleted |
/drivers/include/linux/asm/string_32.h |
---|
File deleted |
/drivers/include/linux/asm/atomic.h |
---|
File deleted |
/drivers/include/linux/asm/alternative.h |
---|
File deleted |
/drivers/include/linux/asm/atomic_32.h |
---|
File deleted |
/drivers/include/linux/asm/asm.h |
---|
File deleted |
/drivers/include/linux/asm/posix_types.h |
---|
File deleted |
/drivers/include/linux/asm/bitsperlong.h |
---|
File deleted |
/drivers/include/linux/asm/cmpxchg.h |
---|
File deleted |
/drivers/include/linux/asm/posix_types_32.h |
---|
File deleted |
/drivers/include/linux/asm/required-features.h |
---|
File deleted |
/drivers/include/linux/asm/swab.h |
---|
File deleted |
/drivers/include/linux/asm/div64.h |
---|
File deleted |
/drivers/include/linux/asm/cmpxchg_32.h |
---|
File deleted |
/drivers/include/linux/asm/byteorder.h |
---|
File deleted |
/drivers/include/linux/asm |
---|
Property changes: |
Deleted: svn:ignore |
-*.o |
-*.obj |
/drivers/include/linux/asm-generic/types.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitsperlong.h |
---|
File deleted |
/drivers/include/linux/asm-generic/int-ll64.h |
---|
File deleted |
/drivers/include/linux/asm-generic/atomic-long.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/ext2-non-atomic.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/minix.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/sched.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/fls64.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/hweight.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/le.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops |
---|
Property changes: |
Deleted: svn:ignore |
-*.o |
-*.obj |
/drivers/include/linux/asm-generic |
---|
Property changes: |
Deleted: svn:ignore |
-*.o |
-*.obj |
/drivers/include/linux/agp_backend.h |
---|
0,0 → 1,109 |
/* |
* AGPGART backend specific includes. Not for userspace consumption. |
* |
* Copyright (C) 2004 Silicon Graphics, Inc. |
* Copyright (C) 2002-2003 Dave Jones |
* Copyright (C) 1999 Jeff Hartmann |
* Copyright (C) 1999 Precision Insight, Inc. |
* Copyright (C) 1999 Xi Graphics, Inc. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included |
* in all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE |
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
*/ |
#ifndef _AGP_BACKEND_H |
#define _AGP_BACKEND_H 1 |
#include <linux/list.h> |
enum chipset_type { |
NOT_SUPPORTED, |
SUPPORTED, |
}; |
struct agp_version { |
u16 major; |
u16 minor; |
}; |
struct agp_kern_info { |
struct agp_version version; |
struct pci_dev *device; |
enum chipset_type chipset; |
unsigned long mode; |
unsigned long aper_base; |
size_t aper_size; |
int max_memory; /* In pages */ |
int current_memory; |
bool cant_use_aperture; |
unsigned long page_mask; |
const struct vm_operations_struct *vm_ops; |
}; |
/* |
* The agp_memory structure has information about the block of agp memory |
* allocated. A caller may manipulate the next and prev pointers to link |
* each allocated item into a list. These pointers are ignored by the backend. |
* Everything else should never be written to, but the caller may read any of |
* the items to determine the status of this block of agp memory. |
*/ |
struct agp_bridge_data; |
struct agp_memory { |
struct agp_memory *next; |
struct agp_memory *prev; |
struct agp_bridge_data *bridge; |
struct page **pages; |
size_t page_count; |
int key; |
int num_scratch_pages; |
off_t pg_start; |
u32 type; |
u32 physical; |
bool is_bound; |
bool is_flushed; |
/* list of agp_memory mapped to the aperture */ |
struct list_head mapped_list; |
/* DMA-mapped addresses */ |
struct scatterlist *sg_list; |
int num_sg; |
}; |
#define AGP_NORMAL_MEMORY 0 |
#define AGP_USER_TYPES (1 << 16) |
#define AGP_USER_MEMORY (AGP_USER_TYPES) |
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
extern struct agp_bridge_data *agp_bridge; |
extern struct list_head agp_bridges; |
extern struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *); |
extern void agp_free_memory(struct agp_memory *); |
extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, u32); |
extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); |
extern int agp_bind_memory(struct agp_memory *, off_t); |
extern int agp_unbind_memory(struct agp_memory *); |
extern void agp_enable(struct agp_bridge_data *, u32); |
extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); |
extern void agp_backend_release(struct agp_bridge_data *); |
#endif /* _AGP_BACKEND_H */ |
/drivers/include/linux/async.h |
---|
0,0 → 1,50 |
/* |
* async.h: Asynchronous function calls for boot performance |
* |
* (C) Copyright 2009 Intel Corporation |
* Author: Arjan van de Ven <arjan@linux.intel.com> |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License |
* as published by the Free Software Foundation; version 2 |
* of the License. |
*/ |
#ifndef __ASYNC_H__ |
#define __ASYNC_H__ |
#include <linux/types.h> |
#include <linux/list.h> |
typedef u64 async_cookie_t; |
typedef void (*async_func_t) (void *data, async_cookie_t cookie); |
struct async_domain { |
struct list_head pending; |
unsigned registered:1; |
}; |
/* |
* domain participates in global async_synchronize_full |
*/ |
#define ASYNC_DOMAIN(_name) \ |
struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ |
.registered = 1 } |
/* |
* domain is free to go out of scope as soon as all pending work is |
* complete, this domain does not participate in async_synchronize_full |
*/ |
#define ASYNC_DOMAIN_EXCLUSIVE(_name) \ |
struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ |
.registered = 0 } |
extern async_cookie_t async_schedule(async_func_t func, void *data); |
extern async_cookie_t async_schedule_domain(async_func_t func, void *data, |
struct async_domain *domain); |
void async_unregister_domain(struct async_domain *domain); |
extern void async_synchronize_full(void); |
extern void async_synchronize_full_domain(struct async_domain *domain); |
extern void async_synchronize_cookie(async_cookie_t cookie); |
extern void async_synchronize_cookie_domain(async_cookie_t cookie, |
struct async_domain *domain); |
extern bool current_is_async(void); |
#endif |
/drivers/include/linux/atomic.h |
---|
0,0 → 1,131 |
/* Atomic operations usable in machine independent code */ |
#ifndef _LINUX_ATOMIC_H |
#define _LINUX_ATOMIC_H |
#include <asm/atomic.h> |
/** |
* atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns non-zero if @v was not @u, and zero otherwise. |
*/ |
static inline int atomic_add_unless(atomic_t *v, int a, int u) |
{ |
return __atomic_add_unless(v, a, u) != u; |
} |
/** |
* atomic_inc_not_zero - increment unless the number is zero |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1, so long as @v is non-zero. |
* Returns non-zero if @v was non-zero, and zero otherwise. |
*/ |
#ifndef atomic_inc_not_zero |
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
#endif |
/** |
* atomic_inc_not_zero_hint - increment if not null |
* @v: pointer of type atomic_t |
* @hint: probable value of the atomic before the increment |
* |
* This version of atomic_inc_not_zero() gives a hint of probable |
* value of the atomic. This helps processor to not read the memory |
* before doing the atomic read/modify/write cycle, lowering |
* number of bus transactions on some arches. |
* |
* Returns: 0 if increment was not done, 1 otherwise. |
*/ |
#ifndef atomic_inc_not_zero_hint |
static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) |
{ |
int val, c = hint; |
/* sanity test, should be removed by compiler if hint is a constant */ |
if (!hint) |
return atomic_inc_not_zero(v); |
do { |
val = atomic_cmpxchg(v, c, c + 1); |
if (val == c) |
return 1; |
c = val; |
} while (c); |
return 0; |
} |
#endif |
#ifndef atomic_inc_unless_negative |
static inline int atomic_inc_unless_negative(atomic_t *p) |
{ |
int v, v1; |
for (v = 0; v >= 0; v = v1) { |
v1 = atomic_cmpxchg(p, v, v + 1); |
if (likely(v1 == v)) |
return 1; |
} |
return 0; |
} |
#endif |
#ifndef atomic_dec_unless_positive |
static inline int atomic_dec_unless_positive(atomic_t *p) |
{ |
int v, v1; |
for (v = 0; v <= 0; v = v1) { |
v1 = atomic_cmpxchg(p, v, v - 1); |
if (likely(v1 == v)) |
return 1; |
} |
return 0; |
} |
#endif |
/* |
* atomic_dec_if_positive - decrement by 1 if old value positive |
* @v: pointer of type atomic_t |
* |
* The function returns the old value of *v minus 1, even if |
* the atomic variable, v, was not decremented. |
*/ |
#ifndef atomic_dec_if_positive |
static inline int atomic_dec_if_positive(atomic_t *v) |
{ |
int c, old, dec; |
c = atomic_read(v); |
for (;;) { |
dec = c - 1; |
if (unlikely(dec < 0)) |
break; |
old = atomic_cmpxchg((v), c, dec); |
if (likely(old == c)) |
break; |
c = old; |
} |
return dec; |
} |
#endif |
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR |
static inline void atomic_or(int i, atomic_t *v) |
{ |
int old; |
int new; |
do { |
old = atomic_read(v); |
new = old | i; |
} while (atomic_cmpxchg(v, old, new) != old); |
} |
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ |
#include <asm-generic/atomic-long.h> |
#ifdef CONFIG_GENERIC_ATOMIC64 |
#include <asm-generic/atomic64.h> |
#endif |
#endif /* _LINUX_ATOMIC_H */ |
/drivers/include/linux/bitmap.h |
---|
45,6 → 45,7 |
* bitmap_set(dst, pos, nbits) Set specified bit area |
* bitmap_clear(dst, pos, nbits) Clear specified bit area |
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area |
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above |
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
60,6 → 61,7 |
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
* bitmap_release_region(bitmap, pos, order) Free specified bit region |
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
* bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex |
*/ |
/* |
114,12 → 116,37 |
extern void bitmap_set(unsigned long *map, unsigned int start, int len); |
extern void bitmap_clear(unsigned long *map, unsigned int start, int len); |
extern unsigned long bitmap_find_next_zero_area(unsigned long *map, |
extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask); |
unsigned long align_mask, |
unsigned long align_offset); |
/** |
* bitmap_find_next_zero_area - find a contiguous aligned zero area |
* @map: The address to base the search on |
* @size: The bitmap size in bits |
* @start: The bitnumber to start searching at |
* @nr: The number of zeroed bits we're looking for |
* @align_mask: Alignment mask for zero area |
* |
* The @align_mask should be one less than a power of 2; the effect is that |
* the bit offset of all zero areas this function finds is multiples of that |
* power of 2. A @align_mask of 0 means no alignment is required. |
*/ |
static inline unsigned long |
bitmap_find_next_zero_area(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask) |
{ |
return bitmap_find_next_zero_area_off(map, size, start, nr, |
align_mask, 0); |
} |
extern int bitmap_scnprintf(char *buf, unsigned int len, |
const unsigned long *src, int nbits); |
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, |
145,6 → 172,8 |
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); |
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); |
extern int bitmap_print_to_pagebuf(bool list, char *buf, |
const unsigned long *maskp, int nmaskbits); |
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) |
#define BITMAP_LAST_WORD_MASK(nbits) \ |
/drivers/include/linux/bitops.h |
---|
18,9 → 18,12 |
* position @h. For example |
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. |
*/ |
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
#define GENMASK(h, l) \ |
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) |
#define GENMASK_ULL(h, l) \ |
(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) |
extern unsigned int __sw_hweight8(unsigned int w); |
extern unsigned int __sw_hweight16(unsigned int w); |
extern unsigned int __sw_hweight32(unsigned int w); |
/drivers/include/linux/bug.h |
---|
1,15 → 1,8 |
#ifndef _ASM_GENERIC_BUG_H |
#define _ASM_GENERIC_BUG_H |
//extern __printf(3, 4) |
//void warn_slowpath_fmt(const char *file, const int line, |
// const char *fmt, ...); |
//extern __printf(4, 5) |
//void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, |
// const char *fmt, ...); |
#include <linux/compiler.h> |
//extern void warn_slowpath_null(const char *file, const int line); |
#define __WARN() printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
//#define __WARN_printf(arg...) printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
#define __WARN_printf(arg...) do { printf(arg); __WARN(); } while (0) |
61,18 → 54,66 |
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ |
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) |
/* Force a compilation error if condition is true, but also produce a |
result (of value 0 and type size_t), so the expression can be used |
e.g. in a structure initializer (or where-ever else comma expressions |
aren't permitted). */ |
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) |
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) |
#define printk_once(fmt, ...) \ |
({ \ |
static bool __print_once; \ |
\ |
if (!__print_once) { \ |
__print_once = true; \ |
printk(fmt, ##__VA_ARGS__); \ |
} \ |
}) |
/* |
* BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the |
* expression but avoids the generation of any code, even if that expression |
* has side-effects. |
*/ |
#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e)))) |
/** |
* BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied |
* error message. |
* @condition: the condition which the compiler should know is false. |
* |
* See BUILD_BUG_ON for description. |
*/ |
#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) |
/** |
* BUILD_BUG_ON - break compile if a condition is true. |
* @condition: the condition which the compiler should know is false. |
* |
* If you have some code which relies on certain constants being equal, or |
* some other compile-time-evaluated condition, you should use BUILD_BUG_ON to |
* detect if someone changes it. |
* |
* The implementation uses gcc's reluctance to create a negative array, but gcc |
* (as of 4.4) only emits that error for obvious cases (e.g. not arguments to |
* inline functions). Luckily, in 4.3 they added the "error" function |
* attribute just for this type of case. Thus, we use a negative sized array |
* (should always create an error on gcc versions older than 4.4) and then call |
* an undefined function with the error attribute (should always create an |
* error on gcc 4.3 and later). If for some reason, neither creates a |
* compile-time error, we'll still have a link-time error, which is harder to |
* track down. |
*/ |
#ifndef __OPTIMIZE__ |
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
#else |
#define BUILD_BUG_ON(condition) \ |
BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) |
#endif |
/** |
* BUILD_BUG - break compile if used. |
* |
* If you have some code that you expect the compiler to eliminate at |
* build time, you should use BUILD_BUG to detect if it is |
* unexpectedly used. |
*/ |
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") |
#define pr_warn_once(fmt, ...) \ |
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
/drivers/include/linux/cache.h |
---|
0,0 → 1,67 |
#ifndef __LINUX_CACHE_H |
#define __LINUX_CACHE_H |
#include <uapi/linux/kernel.h> |
#include <asm/cache.h> |
#ifndef L1_CACHE_ALIGN |
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) |
#endif |
#ifndef SMP_CACHE_BYTES |
#define SMP_CACHE_BYTES L1_CACHE_BYTES |
#endif |
#ifndef __read_mostly |
#define __read_mostly |
#endif |
#ifndef ____cacheline_aligned |
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) |
#endif |
#ifndef ____cacheline_aligned_in_smp |
#ifdef CONFIG_SMP |
#define ____cacheline_aligned_in_smp ____cacheline_aligned |
#else |
#define ____cacheline_aligned_in_smp |
#endif /* CONFIG_SMP */ |
#endif |
#ifndef __cacheline_aligned |
#define __cacheline_aligned \ |
__attribute__((__aligned__(SMP_CACHE_BYTES), \ |
__section__(".data..cacheline_aligned"))) |
#endif /* __cacheline_aligned */ |
#ifndef __cacheline_aligned_in_smp |
#ifdef CONFIG_SMP |
#define __cacheline_aligned_in_smp __cacheline_aligned |
#else |
#define __cacheline_aligned_in_smp |
#endif /* CONFIG_SMP */ |
#endif |
/* |
* The maximum alignment needed for some critical structures |
* These could be inter-node cacheline sizes/L3 cacheline |
* size etc. Define this in asm/cache.h for your arch |
*/ |
#ifndef INTERNODE_CACHE_SHIFT |
#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT |
#endif |
#if !defined(____cacheline_internodealigned_in_smp) |
#if defined(CONFIG_SMP) |
#define ____cacheline_internodealigned_in_smp \ |
__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) |
#else |
#define ____cacheline_internodealigned_in_smp |
#endif |
#endif |
#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE |
#define cache_line_size() L1_CACHE_BYTES |
#endif |
#endif /* __LINUX_CACHE_H */ |
/drivers/include/linux/compiler-gcc4.h |
---|
71,7 → 71,6 |
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 |
* |
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek. |
* Fixed in GCC 4.8.2 and later versions. |
* |
* (asm goto is automatically volatile - the naming reflects this.) |
*/ |
/drivers/include/linux/compiler.h |
---|
186,6 → 186,80 |
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
#endif |
#include <uapi/linux/types.h> |
static __always_inline void data_access_exceeds_word_size(void) |
#ifdef __compiletime_warning |
__compiletime_warning("data access exceeds word size and won't be atomic") |
#endif |
; |
static __always_inline void data_access_exceeds_word_size(void) |
{ |
} |
static __always_inline void __read_once_size(volatile void *p, void *res, int size) |
{ |
switch (size) { |
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; |
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; |
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; |
#ifdef CONFIG_64BIT |
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; |
#endif |
default: |
barrier(); |
__builtin_memcpy((void *)res, (const void *)p, size); |
data_access_exceeds_word_size(); |
barrier(); |
} |
} |
static __always_inline void __assign_once_size(volatile void *p, void *res, int size) |
{ |
switch (size) { |
case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
#ifdef CONFIG_64BIT |
case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
#endif |
default: |
barrier(); |
__builtin_memcpy((void *)p, (const void *)res, size); |
data_access_exceeds_word_size(); |
barrier(); |
} |
} |
/* |
* Prevent the compiler from merging or refetching reads or writes. The |
* compiler is also forbidden from reordering successive instances of |
* READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the |
* compiler is aware of some particular ordering. One way to make the |
* compiler aware of ordering is to put the two invocations of READ_ONCE, |
* ASSIGN_ONCE or ACCESS_ONCE() in different C statements. |
* |
* In contrast to ACCESS_ONCE these two macros will also work on aggregate |
* data types like structs or unions. If the size of the accessed data |
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits) |
* READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a |
* compile-time warning. |
* |
* Their two major use cases are: (1) Mediating communication between |
* process-level code and irq/NMI handlers, all running on the same CPU, |
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
* mutilate accesses that either do not require ordering or that interact |
* with an explicit memory barrier or atomic instruction that provides the |
* required ordering. |
*/ |
#define READ_ONCE(x) \ |
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) |
#define ASSIGN_ONCE(val, x) \ |
({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
/drivers/include/linux/completion.h |
---|
0,0 → 1,109 |
#ifndef __LINUX_COMPLETION_H |
#define __LINUX_COMPLETION_H |
/* |
* (C) Copyright 2001 Linus Torvalds |
* |
* Atomic wait-for-completion handler data structures. |
* See kernel/sched/completion.c for details. |
*/ |
#include <linux/wait.h> |
/* |
* struct completion - structure used to maintain state for a "completion" |
* |
* This is the opaque structure used to maintain the state for a "completion". |
* Completions currently use a FIFO to queue threads that have to wait for |
* the "completion" event. |
* |
* See also: complete(), wait_for_completion() (and friends _timeout, |
* _interruptible, _interruptible_timeout, and _killable), init_completion(), |
* reinit_completion(), and macros DECLARE_COMPLETION(), |
* DECLARE_COMPLETION_ONSTACK(). |
*/ |
struct completion { |
unsigned int done; |
wait_queue_head_t wait; |
}; |
#define COMPLETION_INITIALIZER(work) \ |
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } |
#define COMPLETION_INITIALIZER_ONSTACK(work) \ |
({ init_completion(&work); work; }) |
/** |
* DECLARE_COMPLETION - declare and initialize a completion structure |
* @work: identifier for the completion structure |
* |
* This macro declares and initializes a completion structure. Generally used |
* for static declarations. You should use the _ONSTACK variant for automatic |
* variables. |
*/ |
#define DECLARE_COMPLETION(work) \ |
struct completion work = COMPLETION_INITIALIZER(work) |
/* |
* Lockdep needs to run a non-constant initializer for on-stack |
* completions - so we use the _ONSTACK() variant for those that |
* are on the kernel stack: |
*/ |
/** |
* DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure |
* @work: identifier for the completion structure |
* |
* This macro declares and initializes a completion structure on the kernel |
* stack. |
*/ |
#ifdef CONFIG_LOCKDEP |
# define DECLARE_COMPLETION_ONSTACK(work) \ |
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) |
#else |
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) |
#endif |
/** |
* init_completion - Initialize a dynamically allocated completion |
* @x: pointer to completion structure that is to be initialized |
* |
* This inline function will initialize a dynamically created completion |
* structure. |
*/ |
static inline void init_completion(struct completion *x) |
{ |
x->done = 0; |
init_waitqueue_head(&x->wait); |
} |
/** |
* reinit_completion - reinitialize a completion structure |
* @x: pointer to completion structure that is to be reinitialized |
* |
* This inline function should be used to reinitialize a completion structure so it can |
* be reused. This is especially important after complete_all() is used. |
*/ |
static inline void reinit_completion(struct completion *x) |
{ |
x->done = 0; |
} |
extern void wait_for_completion(struct completion *); |
extern void wait_for_completion_io(struct completion *); |
extern int wait_for_completion_interruptible(struct completion *x); |
extern int wait_for_completion_killable(struct completion *x); |
extern unsigned long wait_for_completion_timeout(struct completion *x, |
unsigned long timeout); |
extern unsigned long wait_for_completion_io_timeout(struct completion *x, |
unsigned long timeout); |
extern long wait_for_completion_interruptible_timeout( |
struct completion *x, unsigned long timeout); |
extern long wait_for_completion_killable_timeout( |
struct completion *x, unsigned long timeout); |
extern bool try_wait_for_completion(struct completion *x); |
extern bool completion_done(struct completion *x); |
extern void complete(struct completion *); |
extern void complete_all(struct completion *); |
#endif |
/drivers/include/linux/cpumask.h |
---|
0,0 → 1,999 |
#ifndef __LINUX_CPUMASK_H |
#define __LINUX_CPUMASK_H |
/* |
* Cpumasks provide a bitmap suitable for representing the |
* set of CPU's in a system, one bit position per CPU number. In general, |
* only nr_cpu_ids (<= NR_CPUS) bits are valid. |
*/ |
#include <linux/kernel.h> |
#include <linux/threads.h> |
#include <linux/bitmap.h> |
#include <linux/bug.h> |
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; |
/** |
* cpumask_bits - get the bits in a cpumask |
* @maskp: the struct cpumask * |
* |
* You should only assume nr_cpu_ids bits of this mask are valid. This is |
* a macro so it's const-correct. |
*/ |
#define cpumask_bits(maskp) ((maskp)->bits) |
#if NR_CPUS == 1 |
#define nr_cpu_ids 1 |
#else |
extern int nr_cpu_ids; |
#endif |
#ifdef CONFIG_CPUMASK_OFFSTACK |
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, |
* not all bits may be allocated. */ |
#define nr_cpumask_bits nr_cpu_ids |
#else |
#define nr_cpumask_bits NR_CPUS |
#endif |
/* |
* The following particular system cpumasks and operations manage |
* possible, present, active and online cpus. |
* |
* cpu_possible_mask- has bit 'cpu' set iff cpu is populatable |
* cpu_present_mask - has bit 'cpu' set iff cpu is populated |
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler |
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration |
* |
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. |
* |
* The cpu_possible_mask is fixed at boot time, as the set of CPU id's |
* that it is possible might ever be plugged in at anytime during the |
* life of that system boot. The cpu_present_mask is dynamic(*), |
* representing which CPUs are currently plugged in. And |
* cpu_online_mask is the dynamic subset of cpu_present_mask, |
* indicating those CPUs available for scheduling. |
* |
* If HOTPLUG is enabled, then cpu_possible_mask is forced to have |
* all NR_CPUS bits set, otherwise it is just the set of CPUs that |
* ACPI reports present at boot. |
* |
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically, |
* depending on what ACPI reports as currently plugged in, otherwise |
* cpu_present_mask is just a copy of cpu_possible_mask. |
* |
* (*) Well, cpu_present_mask is dynamic in the hotplug case. If not |
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. |
* |
* Subtleties: |
* 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode |
* assumption that their single CPU is online. The UP |
* cpu_{online,possible,present}_masks are placebos. Changing them |
* will have no useful affect on the following num_*_cpus() |
* and cpu_*() macros in the UP case. This ugliness is a UP |
* optimization - don't waste any instructions or memory references |
* asking if you're online or how many CPUs there are if there is |
* only one CPU. |
*/ |
extern const struct cpumask *const cpu_possible_mask; |
extern const struct cpumask *const cpu_online_mask; |
extern const struct cpumask *const cpu_present_mask; |
extern const struct cpumask *const cpu_active_mask; |
#if NR_CPUS > 1 |
#define num_online_cpus() cpumask_weight(cpu_online_mask) |
#define num_possible_cpus() cpumask_weight(cpu_possible_mask) |
#define num_present_cpus() cpumask_weight(cpu_present_mask) |
#define num_active_cpus() cpumask_weight(cpu_active_mask) |
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) |
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) |
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) |
#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) |
#else |
#define num_online_cpus() 1U |
#define num_possible_cpus() 1U |
#define num_present_cpus() 1U |
#define num_active_cpus() 1U |
#define cpu_online(cpu) ((cpu) == 0) |
#define cpu_possible(cpu) ((cpu) == 0) |
#define cpu_present(cpu) ((cpu) == 0) |
#define cpu_active(cpu) ((cpu) == 0) |
#endif |
/* verify cpu argument to cpumask_* operators */ |
static inline unsigned int cpumask_check(unsigned int cpu) |
{ |
#ifdef CONFIG_DEBUG_PER_CPU_MAPS |
WARN_ON_ONCE(cpu >= nr_cpumask_bits); |
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ |
return cpu; |
} |
#if NR_CPUS == 1 |
/* Uniprocessor. Assume all masks are "1". */ |
static inline unsigned int cpumask_first(const struct cpumask *srcp) |
{ |
return 0; |
} |
/* Valid inputs for n are -1 and 0. */ |
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) |
{ |
return n+1; |
} |
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) |
{ |
return n+1; |
} |
static inline unsigned int cpumask_next_and(int n, |
const struct cpumask *srcp, |
const struct cpumask *andp) |
{ |
return n+1; |
} |
/* cpu must be a valid cpu, ie 0, so there's no other choice. */ |
static inline unsigned int cpumask_any_but(const struct cpumask *mask, |
unsigned int cpu) |
{ |
return 1; |
} |
static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) |
{ |
set_bit(0, cpumask_bits(dstp)); |
return 0; |
} |
#define for_each_cpu(cpu, mask) \ |
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
#define for_each_cpu_not(cpu, mask) \ |
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
#define for_each_cpu_and(cpu, mask, and) \ |
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) |
#else |
/** |
* cpumask_first - get the first cpu in a cpumask |
* @srcp: the cpumask pointer |
* |
* Returns >= nr_cpu_ids if no cpus set. |
*/ |
static inline unsigned int cpumask_first(const struct cpumask *srcp) |
{ |
return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_next - get the next cpu in a cpumask |
* @n: the cpu prior to the place to search (ie. return will be > @n) |
* @srcp: the cpumask pointer |
* |
* Returns >= nr_cpu_ids if no further cpus set. |
*/ |
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) |
{ |
/* -1 is a legal arg here. */ |
if (n != -1) |
cpumask_check(n); |
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); |
} |
/** |
* cpumask_next_zero - get the next unset cpu in a cpumask |
* @n: the cpu prior to the place to search (ie. return will be > @n) |
* @srcp: the cpumask pointer |
* |
* Returns >= nr_cpu_ids if no further cpus unset. |
*/ |
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) |
{ |
/* -1 is a legal arg here. */ |
if (n != -1) |
cpumask_check(n); |
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); |
} |
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); |
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); |
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); |
/** |
* for_each_cpu - iterate over every cpu in a mask |
* @cpu: the (optionally unsigned) integer iterator |
* @mask: the cpumask pointer |
* |
* After the loop, cpu is >= nr_cpu_ids. |
*/ |
#define for_each_cpu(cpu, mask) \ |
for ((cpu) = -1; \ |
(cpu) = cpumask_next((cpu), (mask)), \ |
(cpu) < nr_cpu_ids;) |
/** |
* for_each_cpu_not - iterate over every cpu in a complemented mask |
* @cpu: the (optionally unsigned) integer iterator |
* @mask: the cpumask pointer |
* |
* After the loop, cpu is >= nr_cpu_ids. |
*/ |
#define for_each_cpu_not(cpu, mask) \ |
for ((cpu) = -1; \ |
(cpu) = cpumask_next_zero((cpu), (mask)), \ |
(cpu) < nr_cpu_ids;) |
/** |
* for_each_cpu_and - iterate over every cpu in both masks |
* @cpu: the (optionally unsigned) integer iterator |
* @mask: the first cpumask pointer |
* @and: the second cpumask pointer |
* |
* This saves a temporary CPU mask in many places. It is equivalent to: |
* struct cpumask tmp; |
* cpumask_and(&tmp, &mask, &and); |
* for_each_cpu(cpu, &tmp) |
* ... |
* |
* After the loop, cpu is >= nr_cpu_ids. |
*/ |
#define for_each_cpu_and(cpu, mask, and) \ |
for ((cpu) = -1; \ |
(cpu) = cpumask_next_and((cpu), (mask), (and)), \ |
(cpu) < nr_cpu_ids;) |
#endif /* SMP */ |
#define CPU_BITS_NONE \ |
{ \ |
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ |
} |
#define CPU_BITS_CPU0 \ |
{ \ |
[0] = 1UL \ |
} |
/** |
* cpumask_set_cpu - set a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @dstp: the cpumask pointer |
*/ |
static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) |
{ |
set_bit(cpumask_check(cpu), cpumask_bits(dstp)); |
} |
/** |
* cpumask_clear_cpu - clear a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @dstp: the cpumask pointer |
*/ |
static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) |
{ |
clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); |
} |
/** |
* cpumask_test_cpu - test for a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @cpumask: the cpumask pointer |
* |
* Returns 1 if @cpu is set in @cpumask, else returns 0 |
* |
* No static inline type checking - see Subtlety (1) above. |
*/ |
#define cpumask_test_cpu(cpu, cpumask) \ |
test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) |
/** |
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @cpumask: the cpumask pointer |
* |
* Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 |
* |
* test_and_set_bit wrapper for cpumasks. |
*/ |
static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) |
{ |
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); |
} |
/** |
* cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @cpumask: the cpumask pointer |
* |
* Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 |
* |
* test_and_clear_bit wrapper for cpumasks. |
*/ |
static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) |
{ |
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); |
} |
/** |
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask |
* @dstp: the cpumask pointer |
*/ |
static inline void cpumask_setall(struct cpumask *dstp) |
{ |
bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask |
* @dstp: the cpumask pointer |
*/ |
static inline void cpumask_clear(struct cpumask *dstp) |
{ |
bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpumask_and - *dstp = *src1p & *src2p |
* @dstp: the cpumask result |
* @src1p: the first input |
* @src2p: the second input |
* |
* If *@dstp is empty, returns 0, else returns 1 |
*/ |
static inline int cpumask_and(struct cpumask *dstp, |
const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), |
cpumask_bits(src2p), nr_cpumask_bits); |
} |
/** |
* cpumask_or - *dstp = *src1p | *src2p |
* @dstp: the cpumask result |
* @src1p: the first input |
* @src2p: the second input |
*/ |
static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), |
cpumask_bits(src2p), nr_cpumask_bits); |
} |
/** |
* cpumask_xor - *dstp = *src1p ^ *src2p |
* @dstp: the cpumask result |
* @src1p: the first input |
* @src2p: the second input |
*/ |
static inline void cpumask_xor(struct cpumask *dstp, |
const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), |
cpumask_bits(src2p), nr_cpumask_bits); |
} |
/** |
* cpumask_andnot - *dstp = *src1p & ~*src2p |
* @dstp: the cpumask result |
* @src1p: the first input |
* @src2p: the second input |
* |
* If *@dstp is empty, returns 0, else returns 1 |
*/ |
static inline int cpumask_andnot(struct cpumask *dstp, |
const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), |
cpumask_bits(src2p), nr_cpumask_bits); |
} |
/** |
* cpumask_complement - *dstp = ~*srcp |
* @dstp: the cpumask result |
* @srcp: the input to invert |
*/ |
static inline void cpumask_complement(struct cpumask *dstp, |
const struct cpumask *srcp) |
{ |
bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), |
nr_cpumask_bits); |
} |
/** |
* cpumask_equal - *src1p == *src2p |
* @src1p: the first input |
* @src2p: the second input |
*/ |
static inline bool cpumask_equal(const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), |
nr_cpumask_bits); |
} |
/** |
* cpumask_intersects - (*src1p & *src2p) != 0 |
* @src1p: the first input |
* @src2p: the second input |
*/ |
static inline bool cpumask_intersects(const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), |
nr_cpumask_bits); |
} |
/** |
* cpumask_subset - (*src1p & ~*src2p) == 0 |
* @src1p: the first input |
* @src2p: the second input |
* |
* Returns 1 if *@src1p is a subset of *@src2p, else returns 0 |
*/ |
static inline int cpumask_subset(const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), |
nr_cpumask_bits); |
} |
/** |
* cpumask_empty - *srcp == 0 |
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. |
*/ |
static inline bool cpumask_empty(const struct cpumask *srcp) |
{ |
return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_full - *srcp == 0xFFFFFFFF... |
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set. |
*/ |
static inline bool cpumask_full(const struct cpumask *srcp) |
{ |
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_weight - Count of bits in *srcp |
* @srcp: the cpumask to count bits (< nr_cpu_ids) in. |
*/ |
static inline unsigned int cpumask_weight(const struct cpumask *srcp) |
{ |
return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_shift_right - *dstp = *srcp >> n |
* @dstp: the cpumask result |
* @srcp: the input to shift |
* @n: the number of bits to shift by |
*/ |
static inline void cpumask_shift_right(struct cpumask *dstp, |
const struct cpumask *srcp, int n) |
{ |
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, |
nr_cpumask_bits); |
} |
/** |
* cpumask_shift_left - *dstp = *srcp << n |
* @dstp: the cpumask result |
* @srcp: the input to shift |
* @n: the number of bits to shift by |
*/ |
static inline void cpumask_shift_left(struct cpumask *dstp, |
const struct cpumask *srcp, int n) |
{ |
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, |
nr_cpumask_bits); |
} |
/** |
* cpumask_copy - *dstp = *srcp |
* @dstp: the result |
* @srcp: the input cpumask |
*/ |
static inline void cpumask_copy(struct cpumask *dstp, |
const struct cpumask *srcp) |
{ |
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_any - pick a "random" cpu from *srcp |
* @srcp: the input cpumask |
* |
* Returns >= nr_cpu_ids if no cpus set. |
*/ |
#define cpumask_any(srcp) cpumask_first(srcp) |
/** |
* cpumask_first_and - return the first cpu from *srcp1 & *srcp2 |
* @src1p: the first input |
* @src2p: the second input |
* |
* Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). |
*/ |
#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) |
/** |
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 |
* @mask1: the first input cpumask |
* @mask2: the second input cpumask |
* |
* Returns >= nr_cpu_ids if no cpus set. |
*/ |
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) |
/** |
* cpumask_of - the cpumask containing just a given cpu |
* @cpu: the cpu (<= nr_cpu_ids) |
*/ |
#define cpumask_of(cpu) (get_cpu_mask(cpu)) |
/** |
* cpumask_scnprintf - print a cpumask into a string as comma-separated hex |
* @buf: the buffer to sprintf into |
* @len: the length of the buffer |
* @srcp: the cpumask to print |
* |
* If len is zero, returns zero. Otherwise returns the length of the |
* (nul-terminated) @buf string. |
*/ |
static inline int cpumask_scnprintf(char *buf, int len, |
const struct cpumask *srcp) |
{ |
return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_parse_user - extract a cpumask from a user string |
* @buf: the buffer to extract from |
* @len: the length of the buffer |
* @dstp: the cpumask to set. |
* |
* Returns -errno, or 0 for success. |
*/ |
static inline int cpumask_parse_user(const char __user *buf, int len, |
struct cpumask *dstp) |
{ |
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpumask_parselist_user - extract a cpumask from a user string |
* @buf: the buffer to extract from |
* @len: the length of the buffer |
* @dstp: the cpumask to set. |
* |
* Returns -errno, or 0 for success. |
*/ |
static inline int cpumask_parselist_user(const char __user *buf, int len, |
struct cpumask *dstp) |
{ |
return bitmap_parselist_user(buf, len, cpumask_bits(dstp), |
nr_cpumask_bits); |
} |
/** |
* cpulist_scnprintf - print a cpumask into a string as comma-separated list |
* @buf: the buffer to sprintf into |
* @len: the length of the buffer |
* @srcp: the cpumask to print |
* |
* If len is zero, returns zero. Otherwise returns the length of the |
* (nul-terminated) @buf string. |
*/ |
static inline int cpulist_scnprintf(char *buf, int len, |
const struct cpumask *srcp) |
{ |
return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), |
nr_cpumask_bits); |
} |
/** |
* cpumask_parse - extract a cpumask from from a string |
* @buf: the buffer to extract from |
* @dstp: the cpumask to set. |
* |
* Returns -errno, or 0 for success. |
*/ |
static inline int cpumask_parse(const char *buf, struct cpumask *dstp) |
{ |
char *nl = strchr(buf, '\n'); |
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); |
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpulist_parse - extract a cpumask from a user string of ranges |
* @buf: the buffer to extract from |
* @dstp: the cpumask to set. |
* |
* Returns -errno, or 0 for success. |
*/ |
static inline int cpulist_parse(const char *buf, struct cpumask *dstp) |
{ |
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpumask_size - size to allocate for a 'struct cpumask' in bytes |
* |
* This will eventually be a runtime variable, depending on nr_cpu_ids. |
*/ |
static inline size_t cpumask_size(void) |
{ |
/* FIXME: Once all cpumask assignments are eliminated, this |
* can be nr_cpumask_bits */ |
return BITS_TO_LONGS(NR_CPUS) * sizeof(long); |
} |
/* |
* cpumask_var_t: struct cpumask for stack usage. |
* |
* Oh, the wicked games we play! In order to make kernel coding a |
* little more difficult, we typedef cpumask_var_t to an array or a |
* pointer: doing &mask on an array is a noop, so it still works. |
* |
* ie. |
* cpumask_var_t tmpmask; |
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) |
* return -ENOMEM; |
* |
* ... use 'tmpmask' like a normal struct cpumask * ... |
* |
* free_cpumask_var(tmpmask); |
* |
* |
* However, one notable exception is there. alloc_cpumask_var() allocates |
* only nr_cpumask_bits bits (in the other hand, real cpumask_t always has |
* NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. |
* |
* cpumask_var_t tmpmask; |
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) |
* return -ENOMEM; |
* |
* var = *tmpmask; |
* |
* This code makes NR_CPUS length memcopy and brings to a memory corruption. |
* cpumask_copy() provide safe copy functionality. |
* |
* Note that there is another evil here: If you define a cpumask_var_t |
* as a percpu variable then the way to obtain the address of the cpumask |
* structure differently influences what this_cpu_* operation needs to be |
* used. Please use this_cpu_cpumask_var_t in those cases. The direct use |
* of this_cpu_ptr() or this_cpu_read() will lead to failures when the |
* other type of cpumask_var_t implementation is configured. |
*/ |
#ifdef CONFIG_CPUMASK_OFFSTACK |
typedef struct cpumask *cpumask_var_t; |
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) |
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); |
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); |
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
void alloc_bootmem_cpumask_var(cpumask_var_t *mask); |
void free_cpumask_var(cpumask_var_t mask); |
void free_bootmem_cpumask_var(cpumask_var_t mask); |
#else |
typedef struct cpumask cpumask_var_t[1]; |
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) |
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) |
{ |
return true; |
} |
static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, |
int node) |
{ |
return true; |
} |
static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) |
{ |
cpumask_clear(*mask); |
return true; |
} |
static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, |
int node) |
{ |
cpumask_clear(*mask); |
return true; |
} |
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
{ |
} |
static inline void free_cpumask_var(cpumask_var_t mask) |
{ |
} |
static inline void free_bootmem_cpumask_var(cpumask_var_t mask) |
{ |
} |
#endif /* CONFIG_CPUMASK_OFFSTACK */ |
/* It's common to want to use cpu_all_mask in struct member initializers, |
* so it has to refer to an address rather than a pointer. */ |
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); |
#define cpu_all_mask to_cpumask(cpu_all_bits) |
/* First bits of cpu_bit_bitmap are in fact unset. */ |
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) |
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) |
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) |
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) |
/* Wrappers for arch boot code to manipulate normally-constant masks */ |
void set_cpu_possible(unsigned int cpu, bool possible); |
void set_cpu_present(unsigned int cpu, bool present); |
void set_cpu_online(unsigned int cpu, bool online); |
void set_cpu_active(unsigned int cpu, bool active); |
void init_cpu_present(const struct cpumask *src); |
void init_cpu_possible(const struct cpumask *src); |
void init_cpu_online(const struct cpumask *src); |
/** |
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * |
* @bitmap: the bitmap |
* |
* There are a few places where cpumask_var_t isn't appropriate and |
* static cpumasks must be used (eg. very early boot), yet we don't |
* expose the definition of 'struct cpumask'. |
* |
* This does the conversion, and can be used as a constant initializer. |
*/ |
#define to_cpumask(bitmap) \ |
((struct cpumask *)(1 ? (bitmap) \ |
: (void *)sizeof(__check_is_bitmap(bitmap)))) |
static inline int __check_is_bitmap(const unsigned long *bitmap) |
{ |
return 1; |
} |
/* |
* Special-case data structure for "single bit set only" constant CPU masks. |
* |
* We pre-generate all the 64 (or 32) possible bit positions, with enough |
* padding to the left and the right, and return the constant pointer |
* appropriately offset. |
*/ |
extern const unsigned long |
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; |
static inline const struct cpumask *get_cpu_mask(unsigned int cpu) |
{ |
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; |
p -= cpu / BITS_PER_LONG; |
return to_cpumask(p); |
} |
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) |
#if NR_CPUS <= BITS_PER_LONG |
#define CPU_BITS_ALL \ |
{ \ |
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
} |
#else /* NR_CPUS > BITS_PER_LONG */ |
#define CPU_BITS_ALL \ |
{ \ |
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
} |
#endif /* NR_CPUS > BITS_PER_LONG */ |
/** |
* cpumap_print_to_pagebuf - copies the cpumask into the buffer either |
* as comma-separated list of cpus or hex values of cpumask |
* @list: indicates whether the cpumap must be list |
* @mask: the cpumask to copy |
* @buf: the buffer to copy into |
* |
* Returns the length of the (null-terminated) @buf string, zero if |
* nothing is copied. |
*/ |
static inline ssize_t |
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) |
{ |
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), |
nr_cpumask_bits); |
} |
/* |
* |
* From here down, all obsolete. Use cpumask_ variants! |
* |
*/ |
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS |
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) |
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) |
#if NR_CPUS <= BITS_PER_LONG |
#define CPU_MASK_ALL \ |
(cpumask_t) { { \ |
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
} } |
#else |
#define CPU_MASK_ALL \ |
(cpumask_t) { { \ |
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
} } |
#endif |
#define CPU_MASK_NONE \ |
(cpumask_t) { { \ |
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ |
} } |
#define CPU_MASK_CPU0 \ |
(cpumask_t) { { \ |
[0] = 1UL \ |
} } |
#if NR_CPUS == 1 |
#define first_cpu(src) ({ (void)(src); 0; }) |
#define next_cpu(n, src) ({ (void)(src); 1; }) |
#define any_online_cpu(mask) 0 |
#define for_each_cpu_mask(cpu, mask) \ |
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
#else /* NR_CPUS > 1 */ |
int __first_cpu(const cpumask_t *srcp); |
int __next_cpu(int n, const cpumask_t *srcp); |
#define first_cpu(src) __first_cpu(&(src)) |
#define next_cpu(n, src) __next_cpu((n), &(src)) |
#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) |
#define for_each_cpu_mask(cpu, mask) \ |
for ((cpu) = -1; \ |
(cpu) = next_cpu((cpu), (mask)), \ |
(cpu) < NR_CPUS; ) |
#endif /* SMP */ |
#if NR_CPUS <= 64 |
#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) |
#else /* NR_CPUS > 64 */ |
int __next_cpu_nr(int n, const cpumask_t *srcp); |
#define for_each_cpu_mask_nr(cpu, mask) \ |
for ((cpu) = -1; \ |
(cpu) = __next_cpu_nr((cpu), &(mask)), \ |
(cpu) < nr_cpu_ids; ) |
#endif /* NR_CPUS > 64 */ |
#define cpus_addr(src) ((src).bits) |
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) |
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) |
{ |
set_bit(cpu, dstp->bits); |
} |
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) |
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) |
{ |
clear_bit(cpu, dstp->bits); |
} |
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) |
static inline void __cpus_setall(cpumask_t *dstp, int nbits) |
{ |
bitmap_fill(dstp->bits, nbits); |
} |
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) |
static inline void __cpus_clear(cpumask_t *dstp, int nbits) |
{ |
bitmap_zero(dstp->bits, nbits); |
} |
/* No static inline type checking - see Subtlety (1) above. */ |
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) |
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) |
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) |
{ |
return test_and_set_bit(cpu, addr->bits); |
} |
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) |
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) |
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) |
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define cpus_andnot(dst, src1, src2) \ |
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) |
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) |
static inline int __cpus_equal(const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_equal(src1p->bits, src2p->bits, nbits); |
} |
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) |
static inline int __cpus_intersects(const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_intersects(src1p->bits, src2p->bits, nbits); |
} |
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) |
static inline int __cpus_subset(const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_subset(src1p->bits, src2p->bits, nbits); |
} |
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) |
static inline int __cpus_empty(const cpumask_t *srcp, int nbits) |
{ |
return bitmap_empty(srcp->bits, nbits); |
} |
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) |
static inline int __cpus_weight(const cpumask_t *srcp, int nbits) |
{ |
return bitmap_weight(srcp->bits, nbits); |
} |
#define cpus_shift_left(dst, src, n) \ |
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS) |
static inline void __cpus_shift_left(cpumask_t *dstp, |
const cpumask_t *srcp, int n, int nbits) |
{ |
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
} |
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ |
#endif /* __LINUX_CPUMASK_H */ |
/drivers/include/linux/delay.h |
---|
7,6 → 7,49 |
* Delay routines, using a pre-computed "loops_per_jiffy" value. |
*/ |
#define usleep_range(min, max) udelay(max) |
#include <linux/kernel.h> |
extern unsigned long loops_per_jiffy; |
#include <asm/delay.h> |
/* |
* Using udelay() for intervals greater than a few milliseconds can |
* risk overflow for high loops_per_jiffy (high bogomips) machines. The |
* mdelay() provides a wrapper to prevent this. For delays greater |
* than MAX_UDELAY_MS milliseconds, the wrapper is used. Architecture |
* specific values can be defined in asm-???/delay.h as an override. |
* The 2nd mdelay() definition ensures GCC will optimize away the |
* while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G. |
*/ |
#ifndef MAX_UDELAY_MS |
#define MAX_UDELAY_MS 5 |
#endif |
#ifndef mdelay |
#define mdelay(n) (\ |
(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \ |
({unsigned long __ms=(n); while (__ms--) udelay(1000);})) |
#endif |
#ifndef ndelay |
static inline void ndelay(unsigned long x) |
{ |
udelay(DIV_ROUND_UP(x, 1000)); |
} |
#define ndelay(x) ndelay(x) |
#endif |
extern unsigned long lpj_fine; |
void calibrate_delay(void); |
void msleep(unsigned int msecs); |
unsigned long msleep_interruptible(unsigned int msecs); |
void usleep_range(unsigned long min, unsigned long max); |
static inline void ssleep(unsigned int seconds) |
{ |
msleep(seconds * 1000); |
} |
#endif /* defined(_LINUX_DELAY_H) */ |
/drivers/include/linux/dma-buf.h |
---|
30,6 → 30,8 |
#include <linux/list.h> |
#include <linux/dma-mapping.h> |
#include <linux/fs.h> |
#include <linux/fence.h> |
#include <linux/wait.h> |
struct device; |
struct dma_buf; |
/drivers/include/linux/err.h |
---|
4,7 → 4,7 |
#include <linux/compiler.h> |
#include <linux/types.h> |
#include <errno.h> |
#include <asm/errno.h> |
/* |
* Kernel pointers have redundant information, so we can use a |
/drivers/include/linux/errno.h |
---|
1,116 → 1,32 |
#ifndef _ASM_GENERIC_ERRNO_H |
#define _ASM_GENERIC_ERRNO_H |
#ifndef _LINUX_ERRNO_H |
#define _LINUX_ERRNO_H |
#include <errno-base.h> |
#include <uapi/linux/errno.h> |
/* |
* These should never be seen by user programs. To return one of ERESTART* |
* codes, signal_pending() MUST be set. Note that ptrace can observe these |
* at syscall exit tracing, but they will never be left for the debugged user |
* process to see. |
*/ |
#define ERESTARTSYS 512 |
#define ERESTARTNOINTR 513 |
#define ERESTARTNOHAND 514 /* restart if no handler.. */ |
#define ENOIOCTLCMD 515 /* No ioctl command */ |
#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ |
#define EPROBE_DEFER 517 /* Driver requests probe retry */ |
#define EOPENSTALE 518 /* open found a stale dentry */ |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
#define EDEADLOCK EDEADLK |
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale NFS file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
#define ECANCELED 125 /* Operation Canceled */ |
#define ENOKEY 126 /* Required key not available */ |
#define EKEYEXPIRED 127 /* Key has expired */ |
#define EKEYREVOKED 128 /* Key has been revoked */ |
#define EKEYREJECTED 129 /* Key was rejected by service */ |
/* for robust mutexes */ |
#define EOWNERDEAD 130 /* Owner died */ |
#define ENOTRECOVERABLE 131 /* State not recoverable */ |
#define ERFKILL 132 /* Operation not possible due to RF-kill */ |
/* Defined for the NFSv3 protocol */ |
#define EBADHANDLE 521 /* Illegal NFS file handle */ |
#define ENOTSYNC 522 /* Update synchronization mismatch */ |
#define EBADCOOKIE 523 /* Cookie is stale */ |
#define ENOTSUPP 524 /* Operation is not supported */ |
#define ETOOSMALL 525 /* Buffer or request is too small */ |
#define ESERVERFAULT 526 /* An untranslatable error occurred */ |
#define EBADTYPE 527 /* Type not supported by server */ |
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ |
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */ |
#endif |
/drivers/include/linux/fence.h |
---|
0,0 → 1,357 |
/* |
* Fence mechanism for dma-buf to allow for asynchronous dma access |
* |
* Copyright (C) 2012 Canonical Ltd |
* Copyright (C) 2012 Texas Instruments |
* |
* Authors: |
* Rob Clark <robdclark@gmail.com> |
* Maarten Lankhorst <maarten.lankhorst@canonical.com> |
* |
* This program is free software; you can redistribute it and/or modify it |
* under the terms of the GNU General Public License version 2 as published by |
* the Free Software Foundation. |
* |
* This program is distributed in the hope that it will be useful, but WITHOUT |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
* more details. |
*/ |
#ifndef __LINUX_FENCE_H |
#define __LINUX_FENCE_H |
#include <linux/err.h> |
#include <linux/wait.h> |
#include <linux/list.h> |
#include <linux/bitops.h> |
#include <linux/kref.h> |
#include <linux/sched.h> |
#include <linux/printk.h> |
#include <linux/rcupdate.h> |
struct fence; |
struct fence_ops; |
struct fence_cb; |
/** |
* struct fence - software synchronization primitive |
* @refcount: refcount for this fence |
* @ops: fence_ops associated with this fence |
* @rcu: used for releasing fence with kfree_rcu |
* @cb_list: list of all callbacks to call |
* @lock: spin_lock_irqsave used for locking |
* @context: execution context this fence belongs to, returned by |
* fence_context_alloc() |
* @seqno: the sequence number of this fence inside the execution context, |
* can be compared to decide which fence would be signaled later. |
* @flags: A mask of FENCE_FLAG_* defined below |
* @timestamp: Timestamp when the fence was signaled. |
* @status: Optional, only valid if < 0, must be set before calling |
* fence_signal, indicates that the fence has completed with an error. |
* |
* the flags member must be manipulated and read using the appropriate |
* atomic ops (bit_*), so taking the spinlock will not be needed most |
* of the time. |
* |
* FENCE_FLAG_SIGNALED_BIT - fence is already signaled |
* FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* |
* FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the |
* implementer of the fence for its own purposes. Can be used in different |
* ways by different fence implementers, so do not rely on this. |
* |
* *) Since atomic bitops are used, this is not guaranteed to be the case. |
* Particularly, if the bit was set, but fence_signal was called right |
* before this bit was set, it would have been able to set the |
* FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. |
* Adding a check for FENCE_FLAG_SIGNALED_BIT after setting |
* FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that |
* after fence_signal was called, any enable_signaling call will have either |
* been completed, or never called at all. |
*/ |
struct fence { |
struct kref refcount; |
const struct fence_ops *ops; |
struct rcu_head rcu; |
struct list_head cb_list; |
spinlock_t *lock; |
unsigned context, seqno; |
unsigned long flags; |
// ktime_t timestamp; |
int status; |
}; |
enum fence_flag_bits { |
FENCE_FLAG_SIGNALED_BIT, |
FENCE_FLAG_ENABLE_SIGNAL_BIT, |
FENCE_FLAG_USER_BITS, /* must always be last member */ |
}; |
typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); |
/** |
* struct fence_cb - callback for fence_add_callback |
* @node: used by fence_add_callback to append this struct to fence::cb_list |
* @func: fence_func_t to call |
* |
* This struct will be initialized by fence_add_callback, additional |
* data can be passed along by embedding fence_cb in another struct. |
*/ |
struct fence_cb { |
struct list_head node; |
fence_func_t func; |
}; |
/** |
* struct fence_ops - operations implemented for fence |
* @get_driver_name: returns the driver name. |
* @get_timeline_name: return the name of the context this fence belongs to. |
* @enable_signaling: enable software signaling of fence. |
* @signaled: [optional] peek whether the fence is signaled, can be null. |
* @wait: custom wait implementation, or fence_default_wait. |
* @release: [optional] called on destruction of fence, can be null |
* @fill_driver_data: [optional] callback to fill in free-form debug info |
* Returns amount of bytes filled, or -errno. |
* @fence_value_str: [optional] fills in the value of the fence as a string |
* @timeline_value_str: [optional] fills in the current value of the timeline |
* as a string |
* |
* Notes on enable_signaling: |
* For fence implementations that have the capability for hw->hw |
* signaling, they can implement this op to enable the necessary |
* irqs, or insert commands into cmdstream, etc. This is called |
* in the first wait() or add_callback() path to let the fence |
* implementation know that there is another driver waiting on |
* the signal (ie. hw->sw case). |
* |
* This function can be called called from atomic context, but not |
* from irq context, so normal spinlocks can be used. |
* |
* A return value of false indicates the fence already passed, |
* or some failure occurred that made it impossible to enable |
* signaling. True indicates successful enabling. |
* |
* fence->status may be set in enable_signaling, but only when false is |
* returned. |
* |
* Calling fence_signal before enable_signaling is called allows |
* for a tiny race window in which enable_signaling is called during, |
* before, or after fence_signal. To fight this, it is recommended |
* that before enable_signaling returns true an extra reference is |
* taken on the fence, to be released when the fence is signaled. |
* This will mean fence_signal will still be called twice, but |
* the second time will be a noop since it was already signaled. |
* |
* Notes on signaled: |
* May set fence->status if returning true. |
* |
* Notes on wait: |
* Must not be NULL, set to fence_default_wait for default implementation. |
* the fence_default_wait implementation should work for any fence, as long |
* as enable_signaling works correctly. |
* |
* Must return -ERESTARTSYS if the wait is intr = true and the wait was |
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait |
* timed out. Can also return other error values on custom implementations, |
* which should be treated as if the fence is signaled. For example a hardware |
* lockup could be reported like that. |
* |
* Notes on release: |
* Can be NULL, this function allows additional commands to run on |
* destruction of the fence. Can be called from irq context. |
* If pointer is set to NULL, kfree will get called instead. |
*/ |
struct fence_ops { |
const char * (*get_driver_name)(struct fence *fence); |
const char * (*get_timeline_name)(struct fence *fence); |
bool (*enable_signaling)(struct fence *fence); |
bool (*signaled)(struct fence *fence); |
signed long (*wait)(struct fence *fence, bool intr, signed long timeout); |
void (*release)(struct fence *fence); |
int (*fill_driver_data)(struct fence *fence, void *data, int size); |
void (*fence_value_str)(struct fence *fence, char *str, int size); |
void (*timeline_value_str)(struct fence *fence, char *str, int size); |
}; |
void fence_init(struct fence *fence, const struct fence_ops *ops, |
spinlock_t *lock, unsigned context, unsigned seqno); |
void fence_release(struct kref *kref); |
void fence_free(struct fence *fence); |
/** |
* fence_get - increases refcount of the fence |
* @fence: [in] fence to increase refcount of |
* |
* Returns the same fence, with refcount increased by 1. |
*/ |
static inline struct fence *fence_get(struct fence *fence) |
{ |
if (fence) |
kref_get(&fence->refcount); |
return fence; |
} |
/** |
* fence_get_rcu - get a fence from a reservation_object_list with rcu read lock |
* @fence: [in] fence to increase refcount of |
* |
* Function returns NULL if no refcount could be obtained, or the fence. |
*/ |
static inline struct fence *fence_get_rcu(struct fence *fence) |
{ |
if (kref_get_unless_zero(&fence->refcount)) |
return fence; |
else |
return NULL; |
} |
/** |
* fence_put - decreases refcount of the fence |
* @fence: [in] fence to reduce refcount of |
*/ |
static inline void fence_put(struct fence *fence) |
{ |
if (fence) |
kref_put(&fence->refcount, fence_release); |
} |
int fence_signal(struct fence *fence); |
int fence_signal_locked(struct fence *fence); |
signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); |
int fence_add_callback(struct fence *fence, struct fence_cb *cb, |
fence_func_t func); |
bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); |
void fence_enable_sw_signaling(struct fence *fence); |
/** |
* fence_is_signaled_locked - Return an indication if the fence is signaled yet. |
* @fence: [in] the fence to check |
* |
* Returns true if the fence was already signaled, false if not. Since this |
* function doesn't enable signaling, it is not guaranteed to ever return |
* true if fence_add_callback, fence_wait or fence_enable_sw_signaling |
* haven't been called before. |
* |
* This function requires fence->lock to be held. |
*/ |
static inline bool |
fence_is_signaled_locked(struct fence *fence) |
{ |
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
return true; |
if (fence->ops->signaled && fence->ops->signaled(fence)) { |
fence_signal_locked(fence); |
return true; |
} |
return false; |
} |
/** |
* fence_is_signaled - Return an indication if the fence is signaled yet. |
* @fence: [in] the fence to check |
* |
* Returns true if the fence was already signaled, false if not. Since this |
* function doesn't enable signaling, it is not guaranteed to ever return |
* true if fence_add_callback, fence_wait or fence_enable_sw_signaling |
* haven't been called before. |
* |
* It's recommended for seqno fences to call fence_signal when the |
* operation is complete, it makes it possible to prevent issues from |
* wraparound between time of issue and time of use by checking the return |
* value of this function before calling hardware-specific wait instructions. |
*/ |
static inline bool |
fence_is_signaled(struct fence *fence) |
{ |
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
return true; |
if (fence->ops->signaled && fence->ops->signaled(fence)) { |
fence_signal(fence); |
return true; |
} |
return false; |
} |
/** |
* fence_later - return the chronologically later fence |
* @f1: [in] the first fence from the same context |
* @f2: [in] the second fence from the same context |
* |
* Returns NULL if both fences are signaled, otherwise the fence that would be |
* signaled last. Both fences must be from the same context, since a seqno is |
* not re-used across contexts. |
*/ |
static inline struct fence *fence_later(struct fence *f1, struct fence *f2) |
{ |
if (WARN_ON(f1->context != f2->context)) |
return NULL; |
/* |
* can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been |
* set if enable_signaling wasn't called, and enabling that here is |
* overkill. |
*/ |
if (f2->seqno - f1->seqno <= INT_MAX) |
return fence_is_signaled(f2) ? NULL : f2; |
else |
return fence_is_signaled(f1) ? NULL : f1; |
} |
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); |
/** |
* fence_wait - sleep until the fence gets signaled |
* @fence: [in] the fence to wait on |
* @intr: [in] if true, do an interruptible wait |
* |
* This function will return -ERESTARTSYS if interrupted by a signal, |
* or 0 if the fence was signaled. Other error values may be |
* returned on custom implementations. |
* |
* Performs a synchronous wait on this fence. It is assumed the caller |
* directly or indirectly holds a reference to the fence, otherwise the |
* fence might be freed before return, resulting in undefined behavior. |
*/ |
static inline signed long fence_wait(struct fence *fence, bool intr) |
{ |
signed long ret; |
/* Since fence_wait_timeout cannot timeout with |
* MAX_SCHEDULE_TIMEOUT, only valid return values are |
* -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. |
*/ |
ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
return ret < 0 ? ret : 0; |
} |
unsigned fence_context_alloc(unsigned num); |
#define FENCE_TRACE(f, fmt, args...) \ |
do { \ |
struct fence *__ff = (f); \ |
} while (0) |
#define FENCE_WARN(f, fmt, args...) \ |
do { \ |
struct fence *__ff = (f); \ |
pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ |
##args); \ |
} while (0) |
#define FENCE_ERR(f, fmt, args...) \ |
do { \ |
struct fence *__ff = (f); \ |
pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ |
##args); \ |
} while (0) |
#endif /* __LINUX_FENCE_H */ |
/drivers/include/linux/gfp.h |
---|
0,0 → 1,239 |
#ifndef __LINUX_GFP_H |
#define __LINUX_GFP_H |
#include <linux/mmdebug.h> |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/linkage.h> |
struct vm_area_struct; |
/* Plain integer GFP bitmasks. Do not use this directly. */ |
#define ___GFP_DMA 0x01u |
#define ___GFP_HIGHMEM 0x02u |
#define ___GFP_DMA32 0x04u |
#define ___GFP_MOVABLE 0x08u |
#define ___GFP_WAIT 0x10u |
#define ___GFP_HIGH 0x20u |
#define ___GFP_IO 0x40u |
#define ___GFP_FS 0x80u |
#define ___GFP_COLD 0x100u |
#define ___GFP_NOWARN 0x200u |
#define ___GFP_REPEAT 0x400u |
#define ___GFP_NOFAIL 0x800u |
#define ___GFP_NORETRY 0x1000u |
#define ___GFP_MEMALLOC 0x2000u |
#define ___GFP_COMP 0x4000u |
#define ___GFP_ZERO 0x8000u |
#define ___GFP_NOMEMALLOC 0x10000u |
#define ___GFP_HARDWALL 0x20000u |
#define ___GFP_THISNODE 0x40000u |
#define ___GFP_RECLAIMABLE 0x80000u |
#define ___GFP_NOTRACK 0x200000u |
#define ___GFP_NO_KSWAPD 0x400000u |
#define ___GFP_OTHER_NODE 0x800000u |
#define ___GFP_WRITE 0x1000000u |
/* If the above are modified, __GFP_BITS_SHIFT may need updating */ |
/* |
* GFP bitmasks.. |
* |
* Zone modifiers (see linux/mmzone.h - low three bits) |
* |
* Do not put any conditional on these. If necessary modify the definitions |
* without the underscores and use them consistently. The definitions here may |
* be used in bit comparisons. |
*/ |
#define __GFP_DMA ((__force gfp_t)___GFP_DMA) |
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) |
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) |
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ |
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) |
/* |
* Action modifiers - doesn't change the zoning |
* |
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt |
* _might_ fail. This depends upon the particular VM implementation. |
* |
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller |
* cannot handle allocation failures. This modifier is deprecated and no new |
* users should be added. |
* |
* __GFP_NORETRY: The VM implementation must not retry indefinitely. |
* |
* __GFP_MOVABLE: Flag that this page will be movable by the page migration |
* mechanism or reclaimed |
*/ |
#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ |
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ |
#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ |
#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ |
#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ |
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ |
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ |
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ |
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ |
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ |
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ |
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ |
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. |
* This takes precedence over the |
* __GFP_MEMALLOC flag if both are |
* set |
*/ |
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ |
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ |
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ |
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ |
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) |
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ |
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ |
/* |
* This may seem redundant, but it's a way of annotating false positives vs. |
* allocations that simply cannot be supported (e.g. page tables). |
*/ |
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) |
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ |
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
/* This equals 0, but use constants in case they ever change */ |
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) |
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ |
#define GFP_ATOMIC (__GFP_HIGH) |
#define GFP_NOIO (__GFP_WAIT) |
#define GFP_NOFS (__GFP_WAIT | __GFP_IO) |
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) |
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
__GFP_RECLAIMABLE) |
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) |
#define GFP_IOFS (__GFP_IO | __GFP_FS) |
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ |
__GFP_NO_KSWAPD) |
/* |
* GFP_THISNODE does not perform any reclaim, you most likely want to |
* use __GFP_THISNODE to allocate from a given node without fallback! |
*/ |
#ifdef CONFIG_NUMA |
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
#else |
#define GFP_THISNODE ((__force gfp_t)0) |
#endif |
/* This mask makes up all the page movable related flags */ |
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
/* Control page allocator reclaim behavior */ |
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) |
/* Control slab gfp mask during early boot */ |
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) |
/* Control allocation constraints */ |
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
/* Do not use these with a slab allocator */ |
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
platforms, used as appropriate on others */ |
#define GFP_DMA __GFP_DMA |
/* 4GB DMA on some platforms */ |
#define GFP_DMA32 __GFP_DMA32 |
#ifdef CONFIG_HIGHMEM |
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM |
#else |
#define OPT_ZONE_HIGHMEM ZONE_NORMAL |
#endif |
#ifdef CONFIG_ZONE_DMA |
#define OPT_ZONE_DMA ZONE_DMA |
#else |
#define OPT_ZONE_DMA ZONE_NORMAL |
#endif |
#ifdef CONFIG_ZONE_DMA32 |
#define OPT_ZONE_DMA32 ZONE_DMA32 |
#else |
#define OPT_ZONE_DMA32 ZONE_NORMAL |
#endif |
/* |
* GFP_ZONE_TABLE is a word size bitstring that is used for looking up the |
* zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long |
* and there are 16 of them to cover all possible combinations of |
* __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
* |
* The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. |
* But GFP_MOVABLE is not only a zone specifier but also an allocation |
* policy. Therefore __GFP_MOVABLE plus another zone selector is valid. |
* Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
* |
* bit result |
* ================= |
* 0x0 => NORMAL |
* 0x1 => DMA or NORMAL |
* 0x2 => HIGHMEM or NORMAL |
* 0x3 => BAD (DMA+HIGHMEM) |
* 0x4 => DMA32 or DMA or NORMAL |
* 0x5 => BAD (DMA+DMA32) |
* 0x6 => BAD (HIGHMEM+DMA32) |
* 0x7 => BAD (HIGHMEM+DMA32+DMA) |
* 0x8 => NORMAL (MOVABLE+0) |
* 0x9 => DMA or NORMAL (MOVABLE+DMA) |
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) |
* 0xb => BAD (MOVABLE+HIGHMEM+DMA) |
* 0xc => DMA32 (MOVABLE+DMA32) |
* 0xd => BAD (MOVABLE+DMA32+DMA) |
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM) |
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) |
* |
* ZONES_SHIFT must be <= 2 on 32 bit platforms. |
*/ |
#if 16 * ZONES_SHIFT > BITS_PER_LONG |
#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer |
#endif |
#define GFP_ZONE_TABLE ( \ |
(ZONE_NORMAL << 0 * ZONES_SHIFT) \ |
| (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ |
| (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ |
| (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ |
| (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ |
| (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ |
| (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ |
| (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ |
) |
/* |
* GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
* __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per |
* entry starting with bit 0. Bit is set if the combination is not |
* allowed. |
*/ |
#define GFP_ZONE_BAD ( \ |
1 << (___GFP_DMA | ___GFP_HIGHMEM) \ |
| 1 << (___GFP_DMA | ___GFP_DMA32) \ |
| 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ |
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ |
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ |
) |
#endif /* __LINUX_GFP_H */ |
/drivers/include/linux/hash.h |
---|
36,6 → 36,9 |
{ |
u64 hash = val; |
#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 |
hash = hash * GOLDEN_RATIO_PRIME_64; |
#else |
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */ |
u64 n = hash; |
n <<= 18; |
50,6 → 53,7 |
hash += n; |
n <<= 2; |
hash += n; |
#endif |
/* High bits are more random, so use them. */ |
return hash >> (64 - bits); |
78,4 → 82,5 |
#endif |
return (u32)val; |
} |
#endif /* _LINUX_HASH_H */ |
/drivers/include/linux/hdmi.h |
---|
1,9 → 1,24 |
/* |
* Copyright (C) 2012 Avionic Design GmbH |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License version 2 as |
* published by the Free Software Foundation. |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sub license, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef __LINUX_HDMI_H_ |
/drivers/include/linux/i2c.h |
---|
31,6 → 31,9 |
#include <linux/module.h> |
#include <linux/i2c-id.h> |
#include <linux/mod_devicetable.h> |
#include <linux/sched.h> /* for completion */ |
#include <linux/mutex.h> |
#include <linux/jiffies.h> |
extern struct bus_type i2c_bus_type; |
extern struct device_type i2c_adapter_type; |
139,6 → 142,8 |
* @irq: indicates the IRQ generated by this device (if any) |
* @detected: member of an i2c_driver.clients list or i2c-core's |
* userspace_devices list |
* @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter |
* calls it to pass on slave events to the slave driver. |
* |
* An i2c_client identifies a single device (i.e. chip) connected to an |
* i2c bus. The behaviour exposed to Linux is defined by the driver |
160,6 → 165,13 |
extern struct i2c_client *i2c_verify_client(struct device *dev); |
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); |
enum i2c_slave_event { |
I2C_SLAVE_REQ_READ_START, |
I2C_SLAVE_REQ_READ_END, |
I2C_SLAVE_REQ_WRITE_START, |
I2C_SLAVE_REQ_WRITE_END, |
I2C_SLAVE_STOP, |
}; |
/** |
* struct i2c_board_info - template for device creation |
* @type: chip type, to initialize i2c_client.name |
210,7 → 222,7 |
* to name two of the most common. |
* |
* The return codes from the @master_xfer field should indicate the type of |
* error code that occured during the transfer, as documented in the kernel |
* error code that occurred during the transfer, as documented in the kernel |
* Documentation file Documentation/i2c/fault-codes. |
*/ |
struct i2c_algorithm { |
230,6 → 242,12 |
u32 (*functionality) (struct i2c_adapter *); |
}; |
int i2c_recover_bus(struct i2c_adapter *adap); |
/* Generic recovery routines */ |
int i2c_generic_gpio_recovery(struct i2c_adapter *adap); |
int i2c_generic_scl_recovery(struct i2c_adapter *adap); |
/* |
* i2c_adapter is the structure used to identify a physical i2c bus along |
* with the access algorithms necessary to access it. |
/drivers/include/linux/idr.h |
---|
14,15 → 14,10 |
#include <syscall.h> |
#include <linux/types.h> |
#include <errno-base.h> |
#include <linux/bitops.h> |
//#include <linux/init.h> |
//#include <linux/rcupdate.h> |
#include <linux/spinlock.h> |
#include <linux/bitmap.h> |
#include <linux/bug.h> |
#include <linux/rcupdate.h> |
/* |
* We want shallower trees and thus more bits covered at each layer. 8 |
* bits gives us large enough first layer for most use cases and maximum |
/drivers/include/linux/irqflags.h |
---|
0,0 → 1,150 |
/* |
* include/linux/irqflags.h |
* |
* IRQ flags tracing: follow the state of the hardirq and softirq flags and |
* provide callbacks for transitions between ON and OFF states. |
* |
* This file gets included from lowlevel asm headers too, to provide |
* wrapped versions of the local_irq_*() APIs, based on the |
* raw_local_irq_*() macros from the lowlevel headers. |
*/ |
#ifndef _LINUX_TRACE_IRQFLAGS_H |
#define _LINUX_TRACE_IRQFLAGS_H |
#include <linux/typecheck.h> |
#include <asm/irqflags.h> |
#ifdef CONFIG_TRACE_IRQFLAGS |
extern void trace_softirqs_on(unsigned long ip); |
extern void trace_softirqs_off(unsigned long ip); |
extern void trace_hardirqs_on(void); |
extern void trace_hardirqs_off(void); |
# define trace_hardirq_context(p) ((p)->hardirq_context) |
# define trace_softirq_context(p) ((p)->softirq_context) |
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) |
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) |
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) |
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) |
# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) |
# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) |
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, |
#else |
# define trace_hardirqs_on() do { } while (0) |
# define trace_hardirqs_off() do { } while (0) |
# define trace_softirqs_on(ip) do { } while (0) |
# define trace_softirqs_off(ip) do { } while (0) |
# define trace_hardirq_context(p) 0 |
# define trace_softirq_context(p) 0 |
# define trace_hardirqs_enabled(p) 0 |
# define trace_softirqs_enabled(p) 0 |
# define trace_hardirq_enter() do { } while (0) |
# define trace_hardirq_exit() do { } while (0) |
# define lockdep_softirq_enter() do { } while (0) |
# define lockdep_softirq_exit() do { } while (0) |
# define INIT_TRACE_IRQFLAGS |
#endif |
#if defined(CONFIG_IRQSOFF_TRACER) || \ |
defined(CONFIG_PREEMPT_TRACER) |
extern void stop_critical_timings(void); |
extern void start_critical_timings(void); |
#else |
# define stop_critical_timings() do { } while (0) |
# define start_critical_timings() do { } while (0) |
#endif |
/* |
* Wrap the arch provided IRQ routines to provide appropriate checks. |
*/ |
#define raw_local_irq_disable() arch_local_irq_disable() |
#define raw_local_irq_enable() arch_local_irq_enable() |
#define raw_local_irq_save(flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = arch_local_irq_save(); \ |
} while (0) |
#define raw_local_irq_restore(flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
arch_local_irq_restore(flags); \ |
} while (0) |
#define raw_local_save_flags(flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = arch_local_save_flags(); \ |
} while (0) |
#define raw_irqs_disabled_flags(flags) \ |
({ \ |
typecheck(unsigned long, flags); \ |
arch_irqs_disabled_flags(flags); \ |
}) |
#define raw_irqs_disabled() (arch_irqs_disabled()) |
#define raw_safe_halt() arch_safe_halt() |
/* |
* The local_irq_*() APIs are equal to the raw_local_irq*() |
* if !TRACE_IRQFLAGS. |
*/ |
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
#define local_irq_enable() \ |
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) |
#define local_irq_disable() \ |
do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) |
#define local_irq_save(flags) \ |
do { \ |
raw_local_irq_save(flags); \ |
trace_hardirqs_off(); \ |
} while (0) |
#define local_irq_restore(flags) \ |
do { \ |
if (raw_irqs_disabled_flags(flags)) { \ |
raw_local_irq_restore(flags); \ |
trace_hardirqs_off(); \ |
} else { \ |
trace_hardirqs_on(); \ |
raw_local_irq_restore(flags); \ |
} \ |
} while (0) |
#define local_save_flags(flags) \ |
do { \ |
raw_local_save_flags(flags); \ |
} while (0) |
#define irqs_disabled_flags(flags) \ |
({ \ |
raw_irqs_disabled_flags(flags); \ |
}) |
#define irqs_disabled() \ |
({ \ |
unsigned long _flags; \ |
raw_local_save_flags(_flags); \ |
raw_irqs_disabled_flags(_flags); \ |
}) |
#define safe_halt() \ |
do { \ |
trace_hardirqs_on(); \ |
raw_safe_halt(); \ |
} while (0) |
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
#define local_irq_enable() do { raw_local_irq_enable(); } while (0) |
#define local_irq_disable() do { raw_local_irq_disable(); } while (0) |
#define local_irq_save(flags) \ |
do { \ |
raw_local_irq_save(flags); \ |
} while (0) |
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) |
#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) |
#define irqs_disabled() (raw_irqs_disabled()) |
#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) |
#define safe_halt() do { raw_safe_halt(); } while (0) |
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
#endif |
/drivers/include/linux/jiffies.h |
---|
77,8 → 77,8 |
* without sampling the sequence number in jiffies_lock. |
* get_jiffies_64() will do this for you as appropriate. |
*/ |
extern u64 jiffies_64; |
extern unsigned long volatile jiffies; |
extern u64 __jiffy_data jiffies_64; |
extern unsigned long volatile __jiffy_data jiffies; |
#if (BITS_PER_LONG < 64) |
u64 get_jiffies_64(void); |
262,24 → 262,12 |
#define SEC_JIFFIE_SC (32 - SHIFT_HZ) |
#endif |
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) |
#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19) |
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
#define USEC_CONVERSION \ |
((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
/* |
* USEC_ROUND is used in the timeval to jiffie conversion. See there |
* for more details. It is the scaled resolution rounding value. Note |
* that it is a 64-bit value. Since, when it is applied, we are already |
* in jiffies (albit scaled), it is nothing but the bits we will shift |
* off. |
*/ |
#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1) |
/* |
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that |
* into seconds. The 64-bit case will overflow if we are not careful, |
* so use the messy SH_DIV macro to do it. Still all constants. |
325,35 → 313,6 |
extern u64 nsecs_to_jiffies64(u64 n); |
extern unsigned long nsecs_to_jiffies(u64 n); |
static unsigned long round_jiffies_common(unsigned long j, bool force_up) |
{ |
int rem; |
unsigned long original = j; |
rem = j % HZ; |
/* |
* If the target jiffie is just after a whole second (which can happen |
* due to delays of the timer irq, long irq off times etc etc) then |
* we should round down to the whole second, not up. Use 1/4th second |
* as cutoff for this rounding as an extreme upper bound for this. |
* But never round down if @force_up is set. |
*/ |
if (rem < HZ/4 && !force_up) /* round down */ |
j = j - rem; |
else /* round up */ |
j = j - rem + HZ; |
if (j <= GetTimerTicks()) /* rounding ate our timeout entirely; */ |
return original; |
return j; |
} |
unsigned long round_jiffies_up_relative(unsigned long j); |
#define TIMESTAMP_SIZE 30 |
#endif |
/drivers/include/linux/kernel.h |
---|
1,22 → 1,19 |
#ifndef _LINUX_KERNEL_H |
#define _LINUX_KERNEL_H |
/* |
* 'kernel.h' contains some often-used function prototypes etc |
*/ |
#ifdef __KERNEL__ |
#include <stdarg.h> |
#include <linux/linkage.h> |
#include <linux/stddef.h> |
#include <linux/types.h> |
#include <linux/compiler.h> |
#include <linux/bitops.h> |
#include <linux/errno.h> |
#include <linux/log2.h> |
#include <linux/typecheck.h> |
#include <linux/printk.h> |
#include <asm/byteorder.h> |
#include <uapi/linux/kernel.h> |
#define __init |
#define USHRT_MAX ((u16)(~0U)) |
#define SHRT_MAX ((s16)(USHRT_MAX>>1)) |
#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) |
44,8 → 41,12 |
#define S64_MAX ((s64)(U64_MAX>>1)) |
#define S64_MIN ((s64)(-S64_MAX - 1)) |
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
#define STACK_MAGIC 0xdeadbeef |
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) |
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) |
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) |
114,14 → 115,23 |
} \ |
) |
#define clamp_t(type, val, min, max) ({ \ |
type __val = (val); \ |
type __min = (min); \ |
type __max = (max); \ |
__val = __val < __min ? __min: __val; \ |
__val > __max ? __max: __val; }) |
#define _RET_IP_ (unsigned long)__builtin_return_address(0) |
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) |
#ifdef CONFIG_LBDAF |
# include <asm/div64.h> |
# define sector_div(a, b) do_div(a, b) |
#else |
# define sector_div(n, b)( \ |
{ \ |
int _res; \ |
_res = (n) % (b); \ |
(n) /= (b); \ |
_res; \ |
} \ |
) |
#endif |
/** |
* upper_32_bits - return bits 32-63 of a number |
140,6 → 150,23 |
#define lower_32_bits(n) ((u32)(n)) |
/* |
* abs() handles unsigned and signed longs, ints, shorts and chars. For all |
* input types abs() returns a signed long. |
* abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() |
* for those. |
*/ |
#define abs(x) ({ \ |
long ret; \ |
if (sizeof(x) == sizeof(long)) { \ |
long __x = (x); \ |
ret = (__x < 0) ? -__x : __x; \ |
} else { \ |
int __x = (x); \ |
ret = (__x < 0) ? -__x : __x; \ |
} \ |
ret; \ |
}) |
#define abs64(x) ({ \ |
s64 __x = (x); \ |
154,11 → 181,60 |
#define KERN_NOTICE "<5>" /* normal but significant condition */ |
#define KERN_INFO "<6>" /* informational */ |
#define KERN_DEBUG "<7>" /* debug-level messages */ |
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); |
extern __printf(3, 4) |
int snprintf(char *buf, size_t size, const char *fmt, ...); |
extern __printf(3, 0) |
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); |
extern __printf(3, 4) |
int scnprintf(char *buf, size_t size, const char *fmt, ...); |
extern __printf(3, 0) |
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); |
extern __printf(2, 3) |
char *kasprintf(gfp_t gfp, const char *fmt, ...); |
extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); |
enum lockdep_ok { |
LOCKDEP_STILL_OK, |
LOCKDEP_NOW_UNRELIABLE |
}; |
extern void add_taint(unsigned flag, enum lockdep_ok); |
extern int test_taint(unsigned flag); |
extern unsigned long get_taint(void); |
extern int root_mountflags; |
extern bool early_boot_irqs_disabled; |
/* Values used for system_state */ |
extern enum system_states { |
SYSTEM_BOOTING, |
SYSTEM_RUNNING, |
SYSTEM_HALT, |
SYSTEM_POWER_OFF, |
SYSTEM_RESTART, |
} system_state; |
#define TAINT_PROPRIETARY_MODULE 0 |
#define TAINT_FORCED_MODULE 1 |
#define TAINT_CPU_OUT_OF_SPEC 2 |
#define TAINT_FORCED_RMMOD 3 |
#define TAINT_MACHINE_CHECK 4 |
#define TAINT_BAD_PAGE 5 |
#define TAINT_USER 6 |
#define TAINT_DIE 7 |
#define TAINT_OVERRIDDEN_ACPI_TABLE 8 |
#define TAINT_WARN 9 |
#define TAINT_CRAP 10 |
#define TAINT_FIRMWARE_WORKAROUND 11 |
#define TAINT_OOT_MODULE 12 |
#define TAINT_UNSIGNED_MODULE 13 |
#define TAINT_SOFTLOCKUP 14 |
extern const char hex_asc[]; |
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)] |
#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] |
static inline char *pack_hex_byte(char *buf, u8 byte) |
static inline char *hex_byte_pack(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_hi(byte); |
*buf++ = hex_asc_lo(byte); |
165,25 → 241,223 |
return buf; |
} |
enum { |
DUMP_PREFIX_NONE, |
DUMP_PREFIX_ADDRESS, |
DUMP_PREFIX_OFFSET |
extern const char hex_asc_upper[]; |
#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] |
#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] |
static inline char *hex_byte_pack_upper(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_upper_hi(byte); |
*buf++ = hex_asc_upper_lo(byte); |
return buf; |
} |
extern int hex_to_bin(char ch); |
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); |
extern char *bin2hex(char *dst, const void *src, size_t count); |
bool mac_pton(const char *s, u8 *mac); |
/* |
* General tracing related utility functions - trace_printk(), |
* tracing_on/tracing_off and tracing_start()/tracing_stop |
* |
* Use tracing_on/tracing_off when you want to quickly turn on or off |
* tracing. It simply enables or disables the recording of the trace events. |
* This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on |
* file, which gives a means for the kernel and userspace to interact. |
* Place a tracing_off() in the kernel where you want tracing to end. |
* From user space, examine the trace, and then echo 1 > tracing_on |
* to continue tracing. |
* |
* tracing_stop/tracing_start has slightly more overhead. It is used |
* by things like suspend to ram where disabling the recording of the |
* trace is not enough, but tracing must actually stop because things |
* like calling smp_processor_id() may crash the system. |
* |
* Most likely, you want to use tracing_on/tracing_off. |
*/ |
#ifdef CONFIG_RING_BUFFER |
/* trace_off_permanent stops recording with no way to bring it back */ |
void tracing_off_permanent(void); |
#else |
static inline void tracing_off_permanent(void) { } |
#endif |
enum ftrace_dump_mode { |
DUMP_NONE, |
DUMP_ALL, |
DUMP_ORIG, |
}; |
int hex_to_bin(char ch); |
int hex2bin(u8 *dst, const char *src, size_t count); |
#ifdef CONFIG_TRACING |
void tracing_on(void); |
void tracing_off(void); |
int tracing_is_on(void); |
void tracing_snapshot(void); |
void tracing_snapshot_alloc(void); |
extern void tracing_start(void); |
extern void tracing_stop(void); |
//int printk(const char *fmt, ...); |
static inline __printf(1, 2) |
void ____trace_printk_check_format(const char *fmt, ...) |
{ |
} |
#define __trace_printk_check_format(fmt, args...) \ |
do { \ |
if (0) \ |
____trace_printk_check_format(fmt, ##args); \ |
} while (0) |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
/** |
* trace_printk - printf formatting in the ftrace buffer |
* @fmt: the printf format for printing |
* |
* Note: __trace_printk is an internal function for trace_printk and |
* the @ip is passed in via the trace_printk macro. |
* |
* This function allows a kernel developer to debug fast path sections |
* that printk is not appropriate for. By scattering in various |
* printk like tracing in the code, a developer can quickly see |
* where problems are occurring. |
* |
* This is intended as a debugging tool for the developer only. |
* Please refrain from leaving trace_printks scattered around in |
* your code. (Extra memory is used for special buffers that are |
* allocated when trace_printk() is used) |
* |
* A little optization trick is done here. If there's only one |
* argument, there's no need to scan the string for printf formats. |
* The trace_puts() will suffice. But how can we take advantage of |
* using trace_puts() when trace_printk() has only one argument? |
* By stringifying the args and checking the size we can tell |
* whether or not there are args. __stringify((__VA_ARGS__)) will |
* turn into "()\0" with a size of 3 when there are no args, anything |
* else will be bigger. All we need to do is define a string to this, |
* and then take its size and compare to 3. If it's bigger, use |
* do_trace_printk() otherwise, optimize it to trace_puts(). Then just |
* let gcc optimize the rest. |
*/ |
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
#define trace_printk(fmt, ...) \ |
do { \ |
char _______STR[] = __stringify((__VA_ARGS__)); \ |
if (sizeof(_______STR) > 3) \ |
do_trace_printk(fmt, ##__VA_ARGS__); \ |
else \ |
trace_puts(fmt); \ |
} while (0) |
#define do_trace_printk(fmt, args...) \ |
do { \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(fmt) ? fmt : NULL; \ |
\ |
__trace_printk_check_format(fmt, ##args); \ |
\ |
if (__builtin_constant_p(fmt)) \ |
__trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ |
else \ |
__trace_printk(_THIS_IP_, fmt, ##args); \ |
} while (0) |
extern __printf(2, 3) |
char *kasprintf(gfp_t gfp, const char *fmt, ...); |
int __trace_bprintk(unsigned long ip, const char *fmt, ...); |
extern __printf(2, 3) |
int __trace_printk(unsigned long ip, const char *fmt, ...); |
/** |
* trace_puts - write a string into the ftrace buffer |
* @str: the string to record |
* |
* Note: __trace_bputs is an internal function for trace_puts and |
* the @ip is passed in via the trace_puts macro. |
* |
* This is similar to trace_printk() but is made for those really fast |
* paths that a developer wants the least amount of "Heisenbug" affects, |
* where the processing of the print format is still too much. |
* |
* This function allows a kernel developer to debug fast path sections |
* that printk is not appropriate for. By scattering in various |
* printk like tracing in the code, a developer can quickly see |
* where problems are occurring. |
* |
* This is intended as a debugging tool for the developer only. |
* Please refrain from leaving trace_puts scattered around in |
* your code. (Extra memory is used for special buffers that are |
* allocated when trace_puts() is used) |
* |
* Returns: 0 if nothing was written, positive # if string was. |
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
*/ |
#define trace_puts(str) ({ \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(str) ? str : NULL; \ |
\ |
if (__builtin_constant_p(str)) \ |
__trace_bputs(_THIS_IP_, trace_printk_fmt); \ |
else \ |
__trace_puts(_THIS_IP_, str, strlen(str)); \ |
}) |
extern int __trace_bputs(unsigned long ip, const char *str); |
extern int __trace_puts(unsigned long ip, const char *str, int size); |
extern void trace_dump_stack(int skip); |
/* |
* The double __builtin_constant_p is because gcc will give us an error |
* if we try to allocate the static variable to fmt if it is not a |
* constant. Even with the outer if statement. |
*/ |
#define ftrace_vprintk(fmt, vargs) \ |
do { \ |
if (__builtin_constant_p(fmt)) { \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(fmt) ? fmt : NULL; \ |
\ |
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ |
} else \ |
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \ |
} while (0) |
extern int |
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); |
extern int |
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); |
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
#else |
static inline void tracing_start(void) { } |
static inline void tracing_stop(void) { } |
static inline void trace_dump_stack(int skip) { } |
static inline void tracing_on(void) { } |
static inline void tracing_off(void) { } |
static inline int tracing_is_on(void) { return 0; } |
static inline void tracing_snapshot(void) { } |
static inline void tracing_snapshot_alloc(void) { } |
static inline __printf(1, 2) |
int trace_printk(const char *fmt, ...) |
{ |
return 0; |
} |
static inline int |
ftrace_vprintk(const char *fmt, va_list ap) |
{ |
return 0; |
} |
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } |
#endif /* CONFIG_TRACING */ |
/* |
* min()/max()/clamp() macros that also do |
* strict type-checking.. See the |
* "unnecessary" pointer comparison. |
200,24 → 474,9 |
(void) (&_max1 == &_max2); \ |
_max1 > _max2 ? _max1 : _max2; }) |
#define min3(x, y, z) ({ \ |
typeof(x) _min1 = (x); \ |
typeof(y) _min2 = (y); \ |
typeof(z) _min3 = (z); \ |
(void) (&_min1 == &_min2); \ |
(void) (&_min1 == &_min3); \ |
_min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ |
(_min2 < _min3 ? _min2 : _min3); }) |
#define min3(x, y, z) min((typeof(x))min(x, y), z) |
#define max3(x, y, z) max((typeof(x))max(x, y), z) |
#define max3(x, y, z) ({ \ |
typeof(x) _max1 = (x); \ |
typeof(y) _max2 = (y); \ |
typeof(z) _max3 = (z); \ |
(void) (&_max1 == &_max2); \ |
(void) (&_max1 == &_max3); \ |
_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ |
(_max2 > _max3 ? _max2 : _max3); }) |
/** |
* min_not_zero - return the minimum that is _not_ zero, unless both are zero |
* @x: value1 |
231,20 → 490,13 |
/** |
* clamp - return a value clamped to a given range with strict typechecking |
* @val: current value |
* @min: minimum allowable value |
* @max: maximum allowable value |
* @lo: lowest allowable value |
* @hi: highest allowable value |
* |
* This macro does strict typechecking of min/max to make sure they are of the |
* This macro does strict typechecking of lo/hi to make sure they are of the |
* same type as val. See the unnecessary pointer comparisons. |
*/ |
#define clamp(val, min, max) ({ \ |
typeof(val) __val = (val); \ |
typeof(min) __min = (min); \ |
typeof(max) __max = (max); \ |
(void) (&__val == &__min); \ |
(void) (&__val == &__max); \ |
__val = __val < __min ? __min: __val; \ |
__val > __max ? __max: __val; }) |
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
/* |
* ..and if you can't take the strict |
263,6 → 515,38 |
__max1 > __max2 ? __max1: __max2; }) |
/** |
* clamp_t - return a value clamped to a given range using a given type |
* @type: the type of variable to use |
* @val: current value |
* @lo: minimum allowable value |
* @hi: maximum allowable value |
* |
* This macro does no typechecking and uses temporary variables of type |
* 'type' to make all the comparisons. |
*/ |
#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) |
/** |
* clamp_val - return a value clamped to a given range using val's type |
* @val: current value |
* @lo: minimum allowable value |
* @hi: maximum allowable value |
* |
* This macro does no typechecking and uses temporary variables of whatever |
* type the input argument 'val' is. This is useful when val is an unsigned |
* type and min and max are literals that will otherwise be assigned a signed |
* integer type. |
*/ |
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) |
/* |
* swap - swap value of @a and @b |
*/ |
#define swap(a, b) \ |
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
/** |
* container_of - cast a member of a structure out to the containing structure |
* @ptr: the pointer to the member. |
* @type: the type of the container struct this is embedded in. |
273,22 → 557,28 |
const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
(type *)( (char *)__mptr - offsetof(type,member) );}) |
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ |
#ifdef CONFIG_FTRACE_MCOUNT_RECORD |
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD |
#endif |
static inline void *kcalloc(size_t n, size_t size, uint32_t flags) |
{ |
if (n != 0 && size > ULONG_MAX / n) |
return NULL; |
return kzalloc(n * size, 0); |
} |
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ |
#define VERIFY_OCTAL_PERMISSIONS(perms) \ |
(BUILD_BUG_ON_ZERO((perms) < 0) + \ |
BUILD_BUG_ON_ZERO((perms) > 0777) + \ |
/* User perms >= group perms >= other perms */ \ |
BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \ |
BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \ |
/* Other writable? Generally considered a bad idea. */ \ |
BUILD_BUG_ON_ZERO((perms) & 2) + \ |
(perms)) |
void free (void *ptr); |
#endif /* __KERNEL__ */ |
typedef unsigned long pgprotval_t; |
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
struct file |
{ |
352,17 → 642,7 |
# define del_timer_sync(t) del_timer(t) |
struct timespec { |
long tv_sec; /* seconds */ |
long tv_nsec; /* nanoseconds */ |
}; |
#define mb() asm volatile("mfence" : : : "memory") |
#define rmb() asm volatile("lfence" : : : "memory") |
#define wmb() asm volatile("sfence" : : : "memory") |
#define build_mmio_read(name, size, type, reg, barrier) \ |
static inline type name(const volatile void __iomem *addr) \ |
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
400,23 → 680,6 |
#define __raw_writew __writew |
#define __raw_writel __writel |
static inline __u64 readq(const volatile void __iomem *addr) |
{ |
const volatile u32 __iomem *p = addr; |
u32 low, high; |
low = readl(p); |
high = readl(p + 1); |
return low + ((u64)high << 32); |
} |
static inline void writeq(__u64 val, volatile void __iomem *addr) |
{ |
writel(val, addr); |
writel(val >> 32, addr+4); |
} |
#define swap(a, b) \ |
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
432,9 → 695,6 |
#define dev_info(dev, format, arg...) \ |
printk("Info %s " format , __func__, ## arg) |
//#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
#define BUILD_BUG_ON(condition) |
struct page |
{ |
unsigned int addr; |
467,8 → 727,6 |
#define get_page(a) |
#define put_page(a) |
#define set_pages_uc(a,b) |
#define set_pages_wb(a,b) |
#define pci_map_page(dev, page, offset, size, direction) \ |
(dma_addr_t)( (offset)+page_to_phys(page)) |
475,36 → 733,31 |
#define pci_unmap_page(dev, dma_address, size, direction) |
#define GFP_TEMPORARY 0 |
#define __GFP_NOWARN 0 |
#define __GFP_NORETRY 0 |
#define GFP_NOWAIT 0 |
#define IS_ENABLED(a) 0 |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
#define RCU_INIT_POINTER(p, v) \ |
do { \ |
p = (typeof(*v) __force __rcu *)(v); \ |
} while (0) |
//#define RCU_INIT_POINTER(p, v) \ |
// do { \ |
// p = (typeof(*v) __force __rcu *)(v); \ |
// } while (0) |
#define rcu_dereference_raw(p) ({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
(_________p1); \ |
}) |
#define rcu_assign_pointer(p, v) \ |
({ \ |
if (!__builtin_constant_p(v) || \ |
((v) != NULL)) \ |
(p) = (v); \ |
}) |
//#define rcu_dereference_raw(p) ({ \ |
// typeof(p) _________p1 = ACCESS_ONCE(p); \ |
// (_________p1); \ |
// }) |
//#define rcu_assign_pointer(p, v) \ |
// ({ \ |
// if (!__builtin_constant_p(v) || \ |
// ((v) != NULL)) \ |
// (p) = (v); \ |
// }) |
unsigned int hweight16(unsigned int w); |
#define cpufreq_quick_get_max(x) GetCpuFreq() |
extern unsigned int tsc_khz; |
540,7 → 793,7 |
} |
} |
memcpy((void __force *)to, from, n); |
__builtin_memcpy((void __force *)to, from, n); |
return 0; |
} |
551,6 → 804,14 |
void kunmap(struct page *page); |
void kunmap_atomic(void *vaddr); |
typedef u64 async_cookie_t; |
#define iowrite32(v, addr) writel((v), (addr)) |
#define __init |
#define CONFIG_PAGE_OFFSET 0 |
#endif |
/drivers/include/linux/kobject.h |
---|
25,7 → 25,8 |
//#include <linux/kobject_ns.h> |
#include <linux/kernel.h> |
#include <linux/wait.h> |
//#include <linux/atomic.h> |
#include <linux/atomic.h> |
#include <linux/workqueue.h> |
#define UEVENT_HELPER_PATH_LEN 256 |
#define UEVENT_NUM_ENVP 32 /* number of env pointers */ |
/drivers/include/linux/kref.h |
---|
15,7 → 15,11 |
#ifndef _KREF_H_ |
#define _KREF_H_ |
#include <linux/types.h> |
#include <linux/bug.h> |
#include <linux/atomic.h> |
#include <linux/kernel.h> |
#include <linux/mutex.h> |
#include <linux/spinlock.h> |
struct kref { |
atomic_t refcount; |
/drivers/include/linux/linkage.h |
---|
0,0 → 1,112 |
#ifndef _LINUX_LINKAGE_H |
#define _LINUX_LINKAGE_H |
#include <linux/compiler.h> |
#include <linux/stringify.h> |
#include <linux/export.h> |
#include <asm/linkage.h> |
/* Some toolchains use other characters (e.g. '`') to mark new line in macro */ |
#ifndef ASM_NL |
#define ASM_NL ; |
#endif |
#ifdef __cplusplus |
#define CPP_ASMLINKAGE extern "C" |
#else |
#define CPP_ASMLINKAGE |
#endif |
#ifndef asmlinkage |
#define asmlinkage CPP_ASMLINKAGE |
#endif |
#ifndef cond_syscall |
#define cond_syscall(x) asm( \ |
".weak " VMLINUX_SYMBOL_STR(x) "\n\t" \ |
".set " VMLINUX_SYMBOL_STR(x) "," \ |
VMLINUX_SYMBOL_STR(sys_ni_syscall)) |
#endif |
#ifndef SYSCALL_ALIAS |
#define SYSCALL_ALIAS(alias, name) asm( \ |
".globl " VMLINUX_SYMBOL_STR(alias) "\n\t" \ |
".set " VMLINUX_SYMBOL_STR(alias) "," \ |
VMLINUX_SYMBOL_STR(name)) |
#endif |
#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) |
#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) |
/* |
* For assembly routines. |
* |
* Note when using these that you must specify the appropriate |
* alignment directives yourself |
*/ |
#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" |
#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw" |
/* |
* This is used by architectures to keep arguments on the stack |
* untouched by the compiler by keeping them live until the end. |
* The argument stack may be owned by the assembly-language |
* caller, not the callee, and gcc doesn't always understand |
* that. |
* |
* We have the return value, and a maximum of six arguments. |
* |
* This should always be followed by a "return ret" for the |
* protection to work (ie no more work that the compiler might |
* end up needing stack temporaries for). |
*/ |
/* Assembly files may be compiled with -traditional .. */ |
#ifndef __ASSEMBLY__ |
#ifndef asmlinkage_protect |
# define asmlinkage_protect(n, ret, args...) do { } while (0) |
#endif |
#endif |
#ifndef __ALIGN |
#define __ALIGN .align 4,0x90 |
#define __ALIGN_STR ".align 4,0x90" |
#endif |
#ifdef __ASSEMBLY__ |
#ifndef LINKER_SCRIPT |
#define ALIGN __ALIGN |
#define ALIGN_STR __ALIGN_STR |
#ifndef ENTRY |
#define ENTRY(name) \ |
.globl name ASM_NL \ |
ALIGN ASM_NL \ |
name: |
#endif |
#endif /* LINKER_SCRIPT */ |
#ifndef WEAK |
#define WEAK(name) \ |
.weak name ASM_NL \ |
name: |
#endif |
#ifndef END |
#define END(name) \ |
.size name, .-name |
#endif |
/* If symbol 'name' is treated as a subroutine (gets called, and returns) |
* then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of |
* static analysis tools such as stack depth analyzer. |
*/ |
#ifndef ENDPROC |
#define ENDPROC(name) \ |
.type name, @function ASM_NL \ |
END(name) |
#endif |
#endif |
#endif |
/drivers/include/linux/list.h |
---|
4,6 → 4,8 |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/poison.h> |
#include <linux/const.h> |
#include <linux/kernel.h> |
/* |
* Simple doubly linked list implementation. |
344,7 → 346,7 |
* list_entry - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_entry(ptr, type, member) \ |
container_of(ptr, type, member) |
353,7 → 355,7 |
* list_first_entry - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
364,7 → 366,7 |
* list_last_entry - get the last element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
375,7 → 377,7 |
* list_first_entry_or_null - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
*/ |
385,7 → 387,7 |
/** |
* list_next_entry - get the next element in list |
* @pos: the type * to cursor |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_next_entry(pos, member) \ |
list_entry((pos)->member.next, typeof(*(pos)), member) |
393,7 → 395,7 |
/** |
* list_prev_entry - get the prev element in list |
* @pos: the type * to cursor |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_prev_entry(pos, member) \ |
list_entry((pos)->member.prev, typeof(*(pos)), member) |
439,7 → 441,7 |
* list_for_each_entry - iterate over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_for_each_entry(pos, head, member) \ |
for (pos = list_first_entry(head, typeof(*pos), member); \ |
450,7 → 452,7 |
* list_for_each_entry_reverse - iterate backwards over list of given type. |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_for_each_entry_reverse(pos, head, member) \ |
for (pos = list_last_entry(head, typeof(*pos), member); \ |
461,7 → 463,7 |
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
* @pos: the type * to use as a start point |
* @head: the head of the list |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Prepares a pos entry for use as a start point in list_for_each_entry_continue(). |
*/ |
472,7 → 474,7 |
* list_for_each_entry_continue - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
486,7 → 488,7 |
* list_for_each_entry_continue_reverse - iterate backwards from the given point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Start to iterate over list of given type backwards, continuing after |
* the current position. |
500,7 → 502,7 |
* list_for_each_entry_from - iterate over list of given type from the current point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Iterate over list of given type, continuing from current position. |
*/ |
513,7 → 515,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_for_each_entry_safe(pos, n, head, member) \ |
for (pos = list_first_entry(head, typeof(*pos), member), \ |
526,7 → 528,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Iterate over list of given type, continuing after current point, |
* safe against removal of list entry. |
542,7 → 544,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Iterate over list of given type from current point, safe against |
* removal of list entry. |
557,7 → 559,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Iterate backwards over list of given type, safe against removal |
* of list entry. |
572,7 → 574,7 |
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop |
* @pos: the loop cursor used in the list_for_each_entry_safe loop |
* @n: temporary storage used in list_for_each_entry_safe |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* list_safe_reset_next is not safe to use in general if the list may be |
* modified concurrently (eg. the lock is dropped in the loop body). An |
/drivers/include/linux/lockdep.h |
---|
4,7 → 4,7 |
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
* |
* see Documentation/lockdep-design.txt for more details. |
* see Documentation/locking/lockdep-design.txt for more details. |
*/ |
#ifndef __LINUX_LOCKDEP_H |
#define __LINUX_LOCKDEP_H |
12,6 → 12,10 |
struct task_struct; |
struct lockdep_map; |
/* for sysctl */ |
extern int prove_locking; |
extern int lock_stat; |
#ifdef CONFIG_LOCKDEP |
#include <linux/linkage.h> |
51,6 → 55,8 |
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
}; |
extern struct lock_class_key __lockdep_no_validate__; |
#define LOCKSTAT_POINTS 4 |
/* |
151,7 → 157,25 |
#endif |
}; |
static inline void lockdep_copy_map(struct lockdep_map *to, |
struct lockdep_map *from) |
{ |
int i; |
*to = *from; |
/* |
* Since the class cache can be modified concurrently we could observe |
* half pointers (64bit arch using 32bit copy insns). Therefore clear |
* the caches and take the performance hit. |
* |
* XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
* that relies on cache abuse. |
*/ |
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
to->class_cache[i] = NULL; |
} |
/* |
* Every lock has a list of other locks that were taken after it. |
* We only grow the list, never remove from it: |
*/ |
338,6 → 362,10 |
WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
} while (0) |
#define lockdep_assert_held_once(l) do { \ |
WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
} while (0) |
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
#else /* !CONFIG_LOCKDEP */ |
388,6 → 416,7 |
#define lockdep_depth(tsk) (0) |
#define lockdep_assert_held(l) do { (void)(l); } while (0) |
#define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
#define lockdep_recursing(tsk) (0) |
454,82 → 483,35 |
* on the per lock-class debug mode: |
*/ |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# else |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# endif |
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
# define spin_release(l, n, i) lock_release(l, n, i) |
#else |
# define spin_acquire(l, s, t, i) do { } while (0) |
# define spin_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
# else |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
# endif |
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
# define rwlock_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwlock_acquire(l, s, t, i) do { } while (0) |
# define rwlock_acquire_read(l, s, t, i) do { } while (0) |
# define rwlock_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# else |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
# endif |
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
#define seqcount_release(l, n, i) lock_release(l, n, i) |
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
# define mutex_release(l, n, i) lock_release(l, n, i) |
#else |
# define mutex_acquire(l, s, t, i) do { } while (0) |
# define mutex_acquire_nest(l, s, t, n, i) do { } while (0) |
# define mutex_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
# else |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
# endif |
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
# define rwsem_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwsem_acquire(l, s, t, i) do { } while (0) |
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) |
# define rwsem_acquire_read(l, s, t, i) do { } while (0) |
# define rwsem_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) |
# else |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) |
# endif |
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
#else |
# define lock_map_acquire(l) do { } while (0) |
# define lock_map_acquire_read(l) do { } while (0) |
# define lock_map_release(l) do { } while (0) |
#endif |
#ifdef CONFIG_PROVE_LOCKING |
# define might_lock(lock) \ |
/drivers/include/linux/mm.h |
---|
1,13 → 1,13 |
#ifndef _LINUX_MM_H |
#define _LINUX_MM_H |
#include <kernel.h> |
#include <linux/errno.h> |
#define VM_NORESERVE 0x00200000 |
#define nth_page(page,n) ((void*)(((page_to_phys(page)>>12)+(n))<<12)) |
#define page_to_pfn(page) (page_to_phys(page)>>12) |
#define __page_to_pfn(page) (page_to_phys(page)>>12) |
/* to align the pointer to the (next) page boundary */ |
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) |
/drivers/include/linux/mmdebug.h |
---|
0,0 → 1,58 |
#ifndef LINUX_MM_DEBUG_H |
#define LINUX_MM_DEBUG_H 1 |
#include <linux/stringify.h> |
struct page; |
struct vm_area_struct; |
struct mm_struct; |
extern void dump_page(struct page *page, const char *reason); |
extern void dump_page_badflags(struct page *page, const char *reason, |
unsigned long badflags); |
void dump_vma(const struct vm_area_struct *vma); |
void dump_mm(const struct mm_struct *mm); |
#ifdef CONFIG_DEBUG_VM |
#define VM_BUG_ON(cond) BUG_ON(cond) |
#define VM_BUG_ON_PAGE(cond, page) \ |
do { \ |
if (unlikely(cond)) { \ |
dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\ |
BUG(); \ |
} \ |
} while (0) |
#define VM_BUG_ON_VMA(cond, vma) \ |
do { \ |
if (unlikely(cond)) { \ |
dump_vma(vma); \ |
BUG(); \ |
} \ |
} while (0) |
#define VM_BUG_ON_MM(cond, mm) \ |
do { \ |
if (unlikely(cond)) { \ |
dump_mm(mm); \ |
BUG(); \ |
} \ |
} while (0) |
#define VM_WARN_ON(cond) WARN_ON(cond) |
#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) |
#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) |
#else |
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) |
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) |
#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) |
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) |
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) |
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) |
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) |
#endif |
#ifdef CONFIG_DEBUG_VIRTUAL |
#define VIRTUAL_BUG_ON(cond) BUG_ON(cond) |
#else |
#define VIRTUAL_BUG_ON(cond) do { } while (0) |
#endif |
#endif |
/drivers/include/linux/mod_devicetable.h |
---|
9,7 → 9,7 |
#ifdef __KERNEL__ |
#include <linux/types.h> |
#include <mutex.h> |
#include <linux/uuid.h> |
typedef unsigned long kernel_ulong_t; |
#endif |
69,7 → 69,7 |
* @bDeviceClass: Class of device; numbers are assigned |
* by the USB forum. Products may choose to implement classes, |
* or be vendor-specific. Device classes specify behavior of all |
* the interfaces on a devices. |
* the interfaces on a device. |
* @bDeviceSubClass: Subclass of device; associated with bDeviceClass. |
* @bDeviceProtocol: Protocol of device; associated with bDeviceClass. |
* @bInterfaceClass: Class of interface; numbers are assigned |
/drivers/include/linux/module.h |
---|
8,9 → 8,13 |
*/ |
#include <linux/list.h> |
#include <linux/compiler.h> |
#include <linux/cache.h> |
#include <linux/compiler.h> |
#include <linux/kernel.h> |
#include <linux/moduleparam.h> |
#include <linux/export.h> |
#include <linux/printk.h> |
#define MODULE_FIRMWARE(x) |
/drivers/include/linux/moduleparam.h |
---|
1,3 → 1,10 |
#ifndef _LINUX_MODULE_PARAMS_H |
#define _LINUX_MODULE_PARAMS_H |
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */ |
#include <linux/kernel.h> |
#define MODULE_PARM_DESC(_parm, desc) |
#define module_param_named(name, value, type, perm) |
#define module_param_named_unsafe(name, value, type, perm) |
#endif |
/drivers/include/linux/mutex.h |
---|
10,8 → 10,12 |
#ifndef __LINUX_MUTEX_H |
#define __LINUX_MUTEX_H |
#include <asm/current.h> |
#include <linux/list.h> |
#include <asm/atomic.h> |
#include <linux/linkage.h> |
#include <linux/lockdep.h> |
#include <asm/processor.h> |
/* |
* Simple, straightforward mutexes with strict semantics: |
/drivers/include/linux/pci.h |
---|
17,9 → 17,13 |
#define LINUX_PCI_H |
#include <linux/types.h> |
#include <list.h> |
#include <linux/list.h> |
#include <linux/compiler.h> |
#include <linux/errno.h> |
#include <linux/atomic.h> |
#include <linux/pci_regs.h> /* The pci register defines */ |
#include <ioport.h> |
#include <linux/ioport.h> |
#define PCI_CFG_SPACE_SIZE 256 |
311,6 → 315,19 |
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, |
}; |
/* These values come from the PCI Express Spec */ |
enum pcie_link_width { |
PCIE_LNK_WIDTH_RESRV = 0x00, |
PCIE_LNK_X1 = 0x01, |
PCIE_LNK_X2 = 0x02, |
PCIE_LNK_X4 = 0x04, |
PCIE_LNK_X8 = 0x08, |
PCIE_LNK_X12 = 0x0C, |
PCIE_LNK_X16 = 0x10, |
PCIE_LNK_X32 = 0x20, |
PCIE_LNK_WIDTH_UNKNOWN = 0xFF, |
}; |
/* Based on the PCI Hotplug Spec, but some values are made up by us */ |
enum pci_bus_speed { |
PCI_SPEED_33MHz = 0x00, |
338,6 → 355,23 |
PCI_SPEED_UNKNOWN = 0xff, |
}; |
struct pci_cap_saved_data { |
u16 cap_nr; |
bool cap_extended; |
unsigned int size; |
u32 data[0]; |
}; |
struct pci_cap_saved_state { |
struct hlist_node next; |
struct pci_cap_saved_data cap; |
}; |
struct pcie_link_state; |
struct pci_vpd; |
struct pci_sriov; |
struct pci_ats; |
/* |
* The pci_dev structure is used to describe PCI devices. |
*/ |
349,7 → 383,7 |
void *sysdata; /* hook for sys-specific extension */ |
// struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ |
struct pci_slot *slot; /* Physical slot this device is in */ |
u32_t busnr; |
u32 busnr; |
unsigned int devfn; /* encoded device & function index */ |
unsigned short vendor; |
unsigned short device; |
365,7 → 399,7 |
u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */ |
// struct pci_driver *driver; /* which driver has allocated this device */ |
uint64_t dma_mask; /* Mask of the bits of bus address this |
u64 dma_mask; /* Mask of the bits of bus address this |
device implements. Normally this is |
0xffffffff. You only need to change |
this if your device has broken DMA |
548,7 → 582,7 |
case PCIBIOS_FUNC_NOT_SUPPORTED: |
return -ENOENT; |
case PCIBIOS_BAD_VENDOR_ID: |
return -EINVAL; |
return -ENOTTY; |
case PCIBIOS_DEVICE_NOT_FOUND: |
return -ENODEV; |
case PCIBIOS_BAD_REGISTER_NUMBER: |
559,7 → 593,7 |
return -ENOSPC; |
} |
return -ENOTTY; |
return -ERANGE; |
} |
/* Low-level architecture-dependent routines */ |
569,7 → 603,20 |
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); |
}; |
/* |
* ACPI needs to be able to access PCI config space before we've done a |
* PCI bus scan and created pci_bus structures. |
*/ |
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, |
int reg, int len, u32 *val); |
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, |
int reg, int len, u32 val); |
struct pci_bus_region { |
dma_addr_t start; |
dma_addr_t end; |
}; |
enum pci_bar_type { |
pci_bar_unknown, /* Standard PCI BAR probe */ |
pci_bar_io, /* An io port BAR */ |
/drivers/include/linux/percpu-defs.h |
---|
0,0 → 1,516 |
/* |
* linux/percpu-defs.h - basic definitions for percpu areas |
* |
* DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. |
* |
* This file is separate from linux/percpu.h to avoid cyclic inclusion |
* dependency from arch header files. Only to be included from |
* asm/percpu.h. |
* |
* This file includes macros necessary to declare percpu sections and |
* variables, and definitions of percpu accessors and operations. It |
* should provide enough percpu features to arch header files even when |
* they can only include asm/percpu.h to avoid cyclic inclusion dependency. |
*/ |
#ifndef _LINUX_PERCPU_DEFS_H |
#define _LINUX_PERCPU_DEFS_H |
#ifdef CONFIG_SMP |
#ifdef MODULE |
#define PER_CPU_SHARED_ALIGNED_SECTION "" |
#define PER_CPU_ALIGNED_SECTION "" |
#else |
#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" |
#define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
#endif |
#define PER_CPU_FIRST_SECTION "..first" |
#else |
#define PER_CPU_SHARED_ALIGNED_SECTION "" |
#define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
#define PER_CPU_FIRST_SECTION "" |
#endif |
/* |
* Base implementations of per-CPU variable declarations and definitions, where |
* the section in which the variable is to be placed is provided by the |
* 'sec' argument. This may be used to affect the parameters governing the |
* variable's storage. |
* |
* NOTE! The sections for the DECLARE and for the DEFINE must match, lest |
* linkage errors occur due the compiler generating the wrong code to access |
* that section. |
*/ |
#define __PCPU_ATTRS(sec) \ |
__percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ |
PER_CPU_ATTRIBUTES |
#define __PCPU_DUMMY_ATTRS \ |
__attribute__((section(".discard"), unused)) |
/* |
* s390 and alpha modules require percpu variables to be defined as |
* weak to force the compiler to generate GOT based external |
* references for them. This is necessary because percpu sections |
* will be located outside of the usually addressable area. |
* |
* This definition puts the following two extra restrictions when |
* defining percpu variables. |
* |
* 1. The symbol must be globally unique, even the static ones. |
* 2. Static percpu variables cannot be defined inside a function. |
* |
* Archs which need weak percpu definitions should define |
* ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. |
* |
* To ensure that the generic code observes the above two |
* restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak |
* definition is used for all cases. |
*/ |
#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) |
/* |
* __pcpu_scope_* dummy variable is used to enforce scope. It |
* receives the static modifier when it's used in front of |
* DEFINE_PER_CPU() and will trigger build failure if |
* DECLARE_PER_CPU() is used for the same variable. |
* |
* __pcpu_unique_* dummy variable is used to enforce symbol uniqueness |
* such that hidden weak symbol collision, which will cause unrelated |
* variables to share the same address, can be detected during build. |
*/ |
#define DECLARE_PER_CPU_SECTION(type, name, sec) \ |
extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
extern __PCPU_ATTRS(sec) __typeof__(type) name |
#define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
extern __PCPU_ATTRS(sec) __typeof__(type) name; \ |
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ |
__typeof__(type) name |
#else |
/* |
* Normal declaration and definition macros. |
*/ |
#define DECLARE_PER_CPU_SECTION(type, name, sec) \ |
extern __PCPU_ATTRS(sec) __typeof__(type) name |
#define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ |
__typeof__(type) name |
#endif |
/* |
* Variant on the per-CPU variable declaration/definition theme used for |
* ordinary per-CPU variables. |
*/ |
#define DECLARE_PER_CPU(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, "") |
#define DEFINE_PER_CPU(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, "") |
/* |
* Declaration/definition used for per-CPU variables that must come first in |
* the set of variables. |
*/ |
#define DECLARE_PER_CPU_FIRST(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
#define DEFINE_PER_CPU_FIRST(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
/* |
* Declaration/definition used for per-CPU variables that must be cacheline |
* aligned under SMP conditions so that, whilst a particular instance of the |
* data corresponds to a particular CPU, inefficiencies due to direct access by |
* other CPUs are reduced by preventing the data from unnecessarily spanning |
* cachelines. |
* |
* An example of this would be statistical data, where each CPU's set of data |
* is updated by that CPU alone, but the data from across all CPUs is collated |
* by a CPU processing a read from a proc file. |
*/ |
#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
____cacheline_aligned_in_smp |
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
____cacheline_aligned_in_smp |
#define DECLARE_PER_CPU_ALIGNED(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
____cacheline_aligned |
#define DEFINE_PER_CPU_ALIGNED(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
____cacheline_aligned |
/* |
* Declaration/definition used for per-CPU variables that must be page aligned. |
*/ |
#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
__aligned(PAGE_SIZE) |
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
__aligned(PAGE_SIZE) |
/* |
* Declaration/definition used for per-CPU variables that must be read mostly. |
*/ |
#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") |
/* |
* Intermodule exports for per-CPU variables. sparse forgets about |
* address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to |
* noop if __CHECKER__. |
*/ |
#ifndef __CHECKER__ |
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) |
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) |
#else |
#define EXPORT_PER_CPU_SYMBOL(var) |
#define EXPORT_PER_CPU_SYMBOL_GPL(var) |
#endif |
/* |
* Accessors and operations. |
*/ |
#ifndef __ASSEMBLY__ |
/* |
* __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating |
* @ptr and is invoked once before a percpu area is accessed by all |
* accessors and operations. This is performed in the generic part of |
* percpu and arch overrides don't need to worry about it; however, if an |
* arch wants to implement an arch-specific percpu accessor or operation, |
* it may use __verify_pcpu_ptr() to verify the parameters. |
* |
* + 0 is required in order to convert the pointer type from a |
* potential array type to a pointer to a single item of the array. |
*/ |
#define __verify_pcpu_ptr(ptr) \ |
do { \ |
const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ |
(void)__vpp_verify; \ |
} while (0) |
#ifdef CONFIG_SMP |
/* |
* Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() |
* to prevent the compiler from making incorrect assumptions about the |
* pointer value. The weird cast keeps both GCC and sparse happy. |
*/ |
#define SHIFT_PERCPU_PTR(__p, __offset) \ |
RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) |
#define per_cpu_ptr(ptr, cpu) \ |
({ \ |
__verify_pcpu_ptr(ptr); \ |
SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ |
}) |
#define raw_cpu_ptr(ptr) \ |
({ \ |
__verify_pcpu_ptr(ptr); \ |
arch_raw_cpu_ptr(ptr); \ |
}) |
#ifdef CONFIG_DEBUG_PREEMPT |
#define this_cpu_ptr(ptr) \ |
({ \ |
__verify_pcpu_ptr(ptr); \ |
SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ |
}) |
#else |
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
#endif |
#else /* CONFIG_SMP */ |
#define VERIFY_PERCPU_PTR(__p) \ |
({ \ |
__verify_pcpu_ptr(__p); \ |
(typeof(*(__p)) __kernel __force *)(__p); \ |
}) |
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) |
#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) |
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
#endif /* CONFIG_SMP */ |
#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) |
/* |
* Must be an lvalue. Since @var must be a simple identifier, |
* we force a syntax error here if it isn't. |
*/ |
#define get_cpu_var(var) \ |
(*({ \ |
preempt_disable(); \ |
this_cpu_ptr(&var); \ |
})) |
/* |
* The weird & is necessary because sparse considers (void)(var) to be |
* a direct dereference of percpu variable (var). |
*/ |
#define put_cpu_var(var) \ |
do { \ |
(void)&(var); \ |
preempt_enable(); \ |
} while (0) |
#define get_cpu_ptr(var) \ |
({ \ |
preempt_disable(); \ |
this_cpu_ptr(var); \ |
}) |
#define put_cpu_ptr(var) \ |
do { \ |
(void)(var); \ |
preempt_enable(); \ |
} while (0) |
/* |
* Branching function to split up a function into a set of functions that |
* are called for different scalar sizes of the objects handled. |
*/ |
extern void __bad_size_call_parameter(void); |
#ifdef CONFIG_DEBUG_PREEMPT |
extern void __this_cpu_preempt_check(const char *op); |
#else |
static inline void __this_cpu_preempt_check(const char *op) { } |
#endif |
#define __pcpu_size_call_return(stem, variable) \ |
({ \ |
typeof(variable) pscr_ret__; \ |
__verify_pcpu_ptr(&(variable)); \ |
switch(sizeof(variable)) { \ |
case 1: pscr_ret__ = stem##1(variable); break; \ |
case 2: pscr_ret__ = stem##2(variable); break; \ |
case 4: pscr_ret__ = stem##4(variable); break; \ |
case 8: pscr_ret__ = stem##8(variable); break; \ |
default: \ |
__bad_size_call_parameter(); break; \ |
} \ |
pscr_ret__; \ |
}) |
#define __pcpu_size_call_return2(stem, variable, ...) \ |
({ \ |
typeof(variable) pscr2_ret__; \ |
__verify_pcpu_ptr(&(variable)); \ |
switch(sizeof(variable)) { \ |
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ |
case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ |
case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ |
case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ |
default: \ |
__bad_size_call_parameter(); break; \ |
} \ |
pscr2_ret__; \ |
}) |
/* |
* Special handling for cmpxchg_double. cmpxchg_double is passed two |
* percpu variables. The first has to be aligned to a double word |
* boundary and the second has to follow directly thereafter. |
* We enforce this on all architectures even if they don't support |
* a double cmpxchg instruction, since it's a cheap requirement, and it |
* avoids breaking the requirement for architectures with the instruction. |
*/ |
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ |
({ \ |
bool pdcrb_ret__; \ |
__verify_pcpu_ptr(&(pcp1)); \ |
BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ |
VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ |
VM_BUG_ON((unsigned long)(&(pcp2)) != \ |
(unsigned long)(&(pcp1)) + sizeof(pcp1)); \ |
switch(sizeof(pcp1)) { \ |
case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ |
case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ |
case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ |
case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ |
default: \ |
__bad_size_call_parameter(); break; \ |
} \ |
pdcrb_ret__; \ |
}) |
#define __pcpu_size_call(stem, variable, ...) \ |
do { \ |
__verify_pcpu_ptr(&(variable)); \ |
switch(sizeof(variable)) { \ |
case 1: stem##1(variable, __VA_ARGS__);break; \ |
case 2: stem##2(variable, __VA_ARGS__);break; \ |
case 4: stem##4(variable, __VA_ARGS__);break; \ |
case 8: stem##8(variable, __VA_ARGS__);break; \ |
default: \ |
__bad_size_call_parameter();break; \ |
} \ |
} while (0) |
/* |
* this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> |
* |
* Optimized manipulation for memory allocated through the per cpu |
* allocator or for addresses of per cpu variables. |
* |
* These operation guarantee exclusivity of access for other operations |
* on the *same* processor. The assumption is that per cpu data is only |
* accessed by a single processor instance (the current one). |
* |
* The arch code can provide optimized implementation by defining macros |
* for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per |
* cpu atomic operations for 2 byte sized RMW actions. If arch code does |
* not provide operations for a scalar size then the fallback in the |
* generic code will be used. |
* |
* cmpxchg_double replaces two adjacent scalars at once. The first two |
* parameters are per cpu variables which have to be of the same size. A |
* truth value is returned to indicate success or failure (since a double |
* register result is difficult to handle). There is very limited hardware |
* support for these operations, so only certain sizes may work. |
*/ |
/* |
* Operations for contexts where we do not want to do any checks for |
* preemptions. Unless strictly necessary, always use [__]this_cpu_*() |
* instead. |
* |
* If there is no other protection through preempt disable and/or disabling |
* interupts then one of these RMW operations can show unexpected behavior |
* because the execution thread was rescheduled on another processor or an |
* interrupt occurred and the same percpu variable was modified from the |
* interrupt context. |
*/ |
#define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) |
#define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) |
#define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) |
#define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) |
#define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) |
#define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) |
#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) |
#define raw_cpu_cmpxchg(pcp, oval, nval) \ |
__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) |
#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) |
#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) |
#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) |
#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) |
#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) |
#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) |
#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) |
/* |
* Operations for contexts that are safe from preemption/interrupts. These |
* operations verify that preemption is disabled. |
*/ |
#define __this_cpu_read(pcp) \ |
({ \ |
__this_cpu_preempt_check("read"); \ |
raw_cpu_read(pcp); \ |
}) |
#define __this_cpu_write(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("write"); \ |
raw_cpu_write(pcp, val); \ |
}) |
#define __this_cpu_add(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("add"); \ |
raw_cpu_add(pcp, val); \ |
}) |
#define __this_cpu_and(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("and"); \ |
raw_cpu_and(pcp, val); \ |
}) |
#define __this_cpu_or(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("or"); \ |
raw_cpu_or(pcp, val); \ |
}) |
#define __this_cpu_add_return(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("add_return"); \ |
raw_cpu_add_return(pcp, val); \ |
}) |
#define __this_cpu_xchg(pcp, nval) \ |
({ \ |
__this_cpu_preempt_check("xchg"); \ |
raw_cpu_xchg(pcp, nval); \ |
}) |
#define __this_cpu_cmpxchg(pcp, oval, nval) \ |
({ \ |
__this_cpu_preempt_check("cmpxchg"); \ |
raw_cpu_cmpxchg(pcp, oval, nval); \ |
}) |
#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
({ __this_cpu_preempt_check("cmpxchg_double"); \ |
raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ |
}) |
#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) |
#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) |
#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) |
#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) |
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) |
/* |
* Operations with implied preemption protection. These operations can be |
* used without worrying about preemption. Note that interrupts may still |
* occur while an operation is in progress and if the interrupt modifies |
* the variable too then RMW actions may not be reliable. |
*/ |
#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) |
#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) |
#define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) |
#define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) |
#define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) |
#define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) |
#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) |
#define this_cpu_cmpxchg(pcp, oval, nval) \ |
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) |
#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) |
#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) |
#define this_cpu_inc(pcp) this_cpu_add(pcp, 1) |
#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) |
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) |
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) |
#endif /* __ASSEMBLY__ */ |
#endif /* _LINUX_PERCPU_DEFS_H */ |
/drivers/include/linux/personality.h |
---|
0,0 → 1,54 |
#ifndef _LINUX_PERSONALITY_H |
#define _LINUX_PERSONALITY_H |
#include <uapi/linux/personality.h> |
/* |
* Handling of different ABIs (personalities). |
*/ |
struct exec_domain; |
struct pt_regs; |
extern int register_exec_domain(struct exec_domain *); |
extern int unregister_exec_domain(struct exec_domain *); |
extern int __set_personality(unsigned int); |
/* |
* Description of an execution domain. |
* |
* The first two members are refernced from assembly source |
* and should stay where they are unless explicitly needed. |
*/ |
typedef void (*handler_t)(int, struct pt_regs *); |
struct exec_domain { |
const char *name; /* name of the execdomain */ |
handler_t handler; /* handler for syscalls */ |
unsigned char pers_low; /* lowest personality */ |
unsigned char pers_high; /* highest personality */ |
unsigned long *signal_map; /* signal mapping */ |
unsigned long *signal_invmap; /* reverse signal mapping */ |
struct map_segment *err_map; /* error mapping */ |
struct map_segment *socktype_map; /* socket type mapping */ |
struct map_segment *sockopt_map; /* socket option mapping */ |
struct map_segment *af_map; /* address family mapping */ |
struct module *module; /* module context of the ed. */ |
struct exec_domain *next; /* linked list (internal) */ |
}; |
/* |
* Return the base personality without flags. |
*/ |
#define personality(pers) (pers & PER_MASK) |
/* |
* Change personality of the currently running process. |
*/ |
#define set_personality(pers) \ |
((current->personality == (pers)) ? 0 : __set_personality(pers)) |
#endif /* _LINUX_PERSONALITY_H */ |
/drivers/include/linux/printk.h |
---|
0,0 → 1,264 |
#ifndef __KERNEL_PRINTK__ |
#define __KERNEL_PRINTK__ |
#include <stdarg.h> |
#include <linux/linkage.h> |
#include <linux/cache.h> |
extern const char linux_banner[]; |
extern const char linux_proc_banner[]; |
extern char *log_buf_addr_get(void); |
extern u32 log_buf_len_get(void); |
/* printk's without a loglevel use this.. */ |
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT |
/* We show everything that is MORE important than this.. */ |
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ |
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ |
#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */ |
#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */ |
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ |
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ |
struct va_format { |
const char *fmt; |
va_list *va; |
}; |
/* |
* FW_BUG |
* Add this to a message where you are sure the firmware is buggy or behaves |
* really stupid or out of spec. Be aware that the responsible BIOS developer |
* should be able to fix this issue or at least get a concrete idea of the |
* problem by reading your message without the need of looking at the kernel |
* code. |
* |
* Use it for definite and high priority BIOS bugs. |
* |
* FW_WARN |
* Use it for not that clear (e.g. could the kernel messed up things already?) |
* and medium priority BIOS bugs. |
* |
* FW_INFO |
* Use this one if you want to tell the user or vendor about something |
* suspicious, but generally harmless related to the firmware. |
* |
* Use it for information or very low priority BIOS bugs. |
*/ |
#define FW_BUG "[Firmware Bug]: " |
#define FW_WARN "[Firmware Warn]: " |
#define FW_INFO "[Firmware Info]: " |
/* |
* HW_ERR |
* Add this to a message for hardware errors, so that user can report |
* it to hardware vendor instead of LKML or software vendor. |
*/ |
#define HW_ERR "[Hardware Error]: " |
/* |
* DEPRECATED |
* Add this to a message whenever you want to warn user space about the use |
* of a deprecated aspect of an API so they can stop using it |
*/ |
#define DEPRECATED "[Deprecated]: " |
static inline __printf(1, 2) |
int no_printk(const char *fmt, ...) |
{ |
return 0; |
} |
__printf(1, 2) int dbgprintf(const char *fmt, ...); |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
#ifndef pr_fmt |
#define pr_fmt(fmt) fmt |
#endif |
#define pr_debug(fmt, ...) \ |
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
/* |
* These can be used to print at the various log levels. |
* All of these will print unconditionally, although note that pr_debug() |
* and other debug macros are compiled out unless either DEBUG is defined |
* or CONFIG_DYNAMIC_DEBUG is set. |
*/ |
#define pr_emerg(fmt, ...) \ |
printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_alert(fmt, ...) \ |
printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_crit(fmt, ...) \ |
printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_err(fmt, ...) \ |
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_warning(fmt, ...) \ |
printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_warn pr_warning |
#define pr_notice(fmt, ...) \ |
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_info(fmt, ...) \ |
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_cont(fmt, ...) \ |
printk(KERN_CONT fmt, ##__VA_ARGS__) |
/* pr_devel() should produce zero code unless DEBUG is defined */ |
#ifdef DEBUG |
#define pr_devel(fmt, ...) \ |
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_devel(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
/* |
* Print a one-time message (analogous to WARN_ONCE() et al): |
*/ |
#ifdef CONFIG_PRINTK |
#define printk_once(fmt, ...) \ |
({ \ |
static bool __print_once __read_mostly; \ |
\ |
if (!__print_once) { \ |
__print_once = true; \ |
printk(fmt, ##__VA_ARGS__); \ |
} \ |
}) |
#define printk_deferred_once(fmt, ...) \ |
({ \ |
static bool __print_once __read_mostly; \ |
\ |
if (!__print_once) { \ |
__print_once = true; \ |
printk_deferred(fmt, ##__VA_ARGS__); \ |
} \ |
}) |
#else |
#define printk_once(fmt, ...) \ |
no_printk(fmt, ##__VA_ARGS__) |
#define printk_deferred_once(fmt, ...) \ |
no_printk(fmt, ##__VA_ARGS__) |
#endif |
#define pr_emerg_once(fmt, ...) \ |
printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_alert_once(fmt, ...) \ |
printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_crit_once(fmt, ...) \ |
printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_err_once(fmt, ...) \ |
printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_warn_once(fmt, ...) \ |
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_notice_once(fmt, ...) \ |
printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_info_once(fmt, ...) \ |
printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_cont_once(fmt, ...) \ |
printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) |
#if defined(DEBUG) |
#define pr_devel_once(fmt, ...) \ |
printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_devel_once(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
/* If you are writing a driver, please use dev_dbg instead */ |
#if defined(DEBUG) |
#define pr_debug_once(fmt, ...) \ |
printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_debug_once(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
/* |
* ratelimited messages with local ratelimit_state, |
* no local ratelimit_state used in the !PRINTK case |
*/ |
#ifdef CONFIG_PRINTK |
#define printk_ratelimited(fmt, ...) \ |
({ \ |
static DEFINE_RATELIMIT_STATE(_rs, \ |
DEFAULT_RATELIMIT_INTERVAL, \ |
DEFAULT_RATELIMIT_BURST); \ |
\ |
if (__ratelimit(&_rs)) \ |
printk(fmt, ##__VA_ARGS__); \ |
}) |
#else |
#define printk_ratelimited(fmt, ...) \ |
no_printk(fmt, ##__VA_ARGS__) |
#endif |
#define pr_emerg_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_alert_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_crit_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_err_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_warn_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_notice_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_info_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
/* no pr_cont_ratelimited, don't do that... */ |
#if defined(DEBUG) |
#define pr_devel_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_devel_ratelimited(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
/* If you are writing a driver, please use dev_dbg instead */ |
#if defined(CONFIG_DYNAMIC_DEBUG) |
/* descriptor check is first to prevent flooding with "callbacks suppressed" */ |
#define pr_debug_ratelimited(fmt, ...) \ |
do { \ |
static DEFINE_RATELIMIT_STATE(_rs, \ |
DEFAULT_RATELIMIT_INTERVAL, \ |
DEFAULT_RATELIMIT_BURST); \ |
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ |
__ratelimit(&_rs)) \ |
__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ |
} while (0) |
#elif defined(DEBUG) |
#define pr_debug_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_debug_ratelimited(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
extern const struct file_operations kmsg_fops; |
enum { |
DUMP_PREFIX_NONE, |
DUMP_PREFIX_ADDRESS, |
DUMP_PREFIX_OFFSET |
}; |
extern void hex_dump_to_buffer(const void *buf, size_t len, |
int rowsize, int groupsize, |
char *linebuf, size_t linebuflen, bool ascii); |
extern void print_hex_dump(const char *level, const char *prefix_str, |
int prefix_type, int rowsize, int groupsize, |
const void *buf, size_t len, bool ascii); |
extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, |
const void *buf, size_t len); |
#endif |
/drivers/include/linux/range.h |
---|
0,0 → 1,30 |
#ifndef _LINUX_RANGE_H |
#define _LINUX_RANGE_H |
struct range { |
u64 start; |
u64 end; |
}; |
int add_range(struct range *range, int az, int nr_range, |
u64 start, u64 end); |
int add_range_with_merge(struct range *range, int az, int nr_range, |
u64 start, u64 end); |
void subtract_range(struct range *range, int az, u64 start, u64 end); |
int clean_sort_range(struct range *range, int az); |
void sort_range(struct range *range, int nr_range); |
#define MAX_RESOURCE ((resource_size_t)~0) |
static inline resource_size_t cap_resource(u64 val) |
{ |
if (val > MAX_RESOURCE) |
return MAX_RESOURCE; |
return val; |
} |
#endif |
/drivers/include/linux/rbtree_augmented.h |
---|
43,6 → 43,16 |
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); |
/* |
* Fixup the rbtree and update the augmented information when rebalancing. |
* |
* On insertion, the user must update the augmented information on the path |
* leading to the inserted node, then call rb_link_node() as usual and |
* rb_augment_inserted() instead of the usual rb_insert_color() call. |
* If rb_augment_inserted() rebalances the rbtree, it will callback into |
* a user provided function to update the augmented information on the |
* affected subtrees. |
*/ |
static inline void |
rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
const struct rb_augment_callbacks *augment) |
/drivers/include/linux/rculist.h |
---|
7,7 → 7,7 |
* RCU-protected list version |
*/ |
#include <linux/list.h> |
//#include <linux/rcupdate.h> |
#include <linux/rcupdate.h> |
/* |
* Why is there no list_empty_rcu()? Because list_empty() serves this |
19,6 → 19,21 |
*/ |
/* |
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers |
* @list: list to be initialized |
* |
* You should instead use INIT_LIST_HEAD() for normal initialization and |
* cleanup tasks, when readers have no access to the list being initialized. |
* However, if the list being initialized is visible to readers, you |
* need to keep the compiler from being too mischievous. |
*/ |
static inline void INIT_LIST_HEAD_RCU(struct list_head *list) |
{ |
ACCESS_ONCE(list->next) = list; |
ACCESS_ONCE(list->prev) = list; |
} |
/* |
* return the ->next pointer of a list_head in an rcu safe |
* way, we must not access it directly |
*/ |
197,7 → 212,7 |
* instead of INIT_LIST_HEAD(). |
*/ |
INIT_LIST_HEAD(list); |
INIT_LIST_HEAD_RCU(list); |
/* |
* At this point, the list body still points to the source list. |
226,7 → 241,7 |
* list_entry_rcu - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
263,7 → 278,7 |
* list_first_or_null_rcu - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
* |
281,7 → 296,7 |
* list_for_each_entry_rcu - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as list_add_rcu() |
296,7 → 311,7 |
* list_for_each_entry_continue_rcu - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
527,6 → 542,15 |
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_from_rcu(pos, member) \ |
for (; pos; \ |
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ |
typeof(*(pos)), member)) |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/rcupdate.h |
---|
0,0 → 1,1158 |
/* |
* Read-Copy Update mechanism for mutual exclusion |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, you can access it online at |
* http://www.gnu.org/licenses/gpl-2.0.html. |
* |
* Copyright IBM Corporation, 2001 |
* |
* Author: Dipankar Sarma <dipankar@in.ibm.com> |
* |
* Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
* Papers: |
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
* |
* For detailed explanation of Read-Copy Update mechanism see - |
* http://lse.sourceforge.net/locking/rcupdate.html |
* |
*/ |
#ifndef __LINUX_RCUPDATE_H |
#define __LINUX_RCUPDATE_H |
#include <linux/types.h> |
#include <linux/cache.h> |
#include <linux/spinlock.h> |
#include <linux/threads.h> |
//#include <linux/cpumask.h> |
#include <linux/seqlock.h> |
#include <linux/lockdep.h> |
#include <linux/completion.h> |
//#include <linux/debugobjects.h> |
#include <linux/bug.h> |
#include <linux/compiler.h> |
#include <asm/barrier.h> |
extern int rcu_expedited; /* for sysctl */ |
enum rcutorture_type { |
RCU_FLAVOR, |
RCU_BH_FLAVOR, |
RCU_SCHED_FLAVOR, |
RCU_TASKS_FLAVOR, |
SRCU_FLAVOR, |
INVALID_RCU_FLAVOR |
}; |
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, |
unsigned long *gpnum, unsigned long *completed); |
void rcutorture_record_test_transition(void); |
void rcutorture_record_progress(unsigned long vernum); |
void do_trace_rcu_torture_read(const char *rcutorturename, |
struct rcu_head *rhp, |
unsigned long secs, |
unsigned long c_old, |
unsigned long c); |
#else |
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, |
int *flags, |
unsigned long *gpnum, |
unsigned long *completed) |
{ |
*flags = 0; |
*gpnum = 0; |
*completed = 0; |
} |
static inline void rcutorture_record_test_transition(void) |
{ |
} |
static inline void rcutorture_record_progress(unsigned long vernum) |
{ |
} |
#ifdef CONFIG_RCU_TRACE |
void do_trace_rcu_torture_read(const char *rcutorturename, |
struct rcu_head *rhp, |
unsigned long secs, |
unsigned long c_old, |
unsigned long c); |
#else |
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
do { } while (0) |
#endif |
#endif |
#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) |
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) |
#define ulong2long(a) (*(long *)(&(a))) |
/* Exported common interfaces */ |
#ifdef CONFIG_PREEMPT_RCU |
/** |
* call_rcu() - Queue an RCU callback for invocation after a grace period. |
* @head: structure to be used for queueing the RCU updates. |
* @func: actual callback function to be invoked after the grace period |
* |
* The callback function will be invoked some time after a full grace |
* period elapses, in other words after all pre-existing RCU read-side |
* critical sections have completed. However, the callback function |
* might well execute concurrently with RCU read-side critical sections |
* that started after call_rcu() was invoked. RCU read-side critical |
* sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
* and may be nested. |
* |
* Note that all CPUs must agree that the grace period extended beyond |
* all pre-existing RCU read-side critical section. On systems with more |
* than one CPU, this means that when "func()" is invoked, each CPU is |
* guaranteed to have executed a full memory barrier since the end of its |
* last RCU read-side critical section whose beginning preceded the call |
* to call_rcu(). It also means that each CPU executing an RCU read-side |
* critical section that continues beyond the start of "func()" must have |
* executed a memory barrier after the call_rcu() but before the beginning |
* of that RCU read-side critical section. Note that these guarantees |
* include CPUs that are offline, idle, or executing in user mode, as |
* well as CPUs that are executing in the kernel. |
* |
* Furthermore, if CPU A invoked call_rcu() and CPU B invoked the |
* resulting RCU callback function "func()", then both CPU A and CPU B are |
* guaranteed to execute a full memory barrier during the time interval |
* between the call to call_rcu() and the invocation of "func()" -- even |
* if CPU A and CPU B are the same CPU (but again only if the system has |
* more than one CPU). |
*/ |
void call_rcu(struct rcu_head *head, |
void (*func)(struct rcu_head *head)); |
#else /* #ifdef CONFIG_PREEMPT_RCU */ |
/* In classic RCU, call_rcu() is just call_rcu_sched(). */ |
#define call_rcu call_rcu_sched |
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
/** |
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
* @head: structure to be used for queueing the RCU updates. |
* @func: actual callback function to be invoked after the grace period |
* |
* The callback function will be invoked some time after a full grace |
* period elapses, in other words after all currently executing RCU |
* read-side critical sections have completed. call_rcu_bh() assumes |
* that the read-side critical sections end on completion of a softirq |
* handler. This means that read-side critical sections in process |
* context must not be interrupted by softirqs. This interface is to be |
* used when most of the read-side critical sections are in softirq context. |
* RCU read-side critical sections are delimited by : |
* - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. |
* OR |
* - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. |
* These may be nested. |
* |
* See the description of call_rcu() for more detailed information on |
* memory ordering guarantees. |
*/ |
void call_rcu_bh(struct rcu_head *head, |
void (*func)(struct rcu_head *head)); |
/** |
* call_rcu_sched() - Queue an RCU for invocation after sched grace period. |
* @head: structure to be used for queueing the RCU updates. |
* @func: actual callback function to be invoked after the grace period |
* |
* The callback function will be invoked some time after a full grace |
* period elapses, in other words after all currently executing RCU |
* read-side critical sections have completed. call_rcu_sched() assumes |
* that the read-side critical sections end on enabling of preemption |
* or on voluntary preemption. |
* RCU read-side critical sections are delimited by : |
* - rcu_read_lock_sched() and rcu_read_unlock_sched(), |
* OR |
* anything that disables preemption. |
* These may be nested. |
* |
* See the description of call_rcu() for more detailed information on |
* memory ordering guarantees. |
*/ |
void call_rcu_sched(struct rcu_head *head, |
void (*func)(struct rcu_head *rcu)); |
void synchronize_sched(void); |
/** |
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period |
* @head: structure to be used for queueing the RCU updates. |
* @func: actual callback function to be invoked after the grace period |
* |
* The callback function will be invoked some time after a full grace |
* period elapses, in other words after all currently executing RCU |
* read-side critical sections have completed. call_rcu_tasks() assumes |
* that the read-side critical sections end at a voluntary context |
* switch (not a preemption!), entry into idle, or transition to usermode |
* execution. As such, there are no read-side primitives analogous to |
* rcu_read_lock() and rcu_read_unlock() because this primitive is intended |
* to determine that all tasks have passed through a safe state, not so |
* much for data-strcuture synchronization. |
* |
* See the description of call_rcu() for more detailed information on |
* memory ordering guarantees. |
*/ |
void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); |
void synchronize_rcu_tasks(void); |
void rcu_barrier_tasks(void); |
#ifdef CONFIG_PREEMPT_RCU |
void __rcu_read_lock(void); |
void __rcu_read_unlock(void); |
void rcu_read_unlock_special(struct task_struct *t); |
void synchronize_rcu(void); |
/* |
* Defined as a macro as it is a very low level header included from |
* areas that don't even know about current. This gives the rcu_read_lock() |
* nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other |
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable. |
*/ |
#define rcu_preempt_depth() (current->rcu_read_lock_nesting) |
#else /* #ifdef CONFIG_PREEMPT_RCU */ |
static inline void __rcu_read_lock(void) |
{ |
preempt_disable(); |
} |
static inline void __rcu_read_unlock(void) |
{ |
preempt_enable(); |
} |
static inline void synchronize_rcu(void) |
{ |
synchronize_sched(); |
} |
static inline int rcu_preempt_depth(void) |
{ |
return 0; |
} |
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
/* Internal to kernel */ |
void rcu_init(void); |
void rcu_sched_qs(void); |
void rcu_bh_qs(void); |
void rcu_check_callbacks(int user); |
struct notifier_block; |
void rcu_idle_enter(void); |
void rcu_idle_exit(void); |
void rcu_irq_enter(void); |
void rcu_irq_exit(void); |
#ifdef CONFIG_RCU_STALL_COMMON |
void rcu_sysrq_start(void); |
void rcu_sysrq_end(void); |
#else /* #ifdef CONFIG_RCU_STALL_COMMON */ |
static inline void rcu_sysrq_start(void) |
{ |
} |
static inline void rcu_sysrq_end(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ |
#ifdef CONFIG_RCU_USER_QS |
void rcu_user_enter(void); |
void rcu_user_exit(void); |
#else |
static inline void rcu_user_enter(void) { } |
static inline void rcu_user_exit(void) { } |
static inline void rcu_user_hooks_switch(struct task_struct *prev, |
struct task_struct *next) { } |
#endif /* CONFIG_RCU_USER_QS */ |
#ifdef CONFIG_RCU_NOCB_CPU |
void rcu_init_nohz(void); |
#else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
static inline void rcu_init_nohz(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
/** |
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers |
* @a: Code that RCU needs to pay attention to. |
* |
* RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden |
* in the inner idle loop, that is, between the rcu_idle_enter() and |
* the rcu_idle_exit() -- RCU will happily ignore any such read-side |
* critical sections. However, things like powertop need tracepoints |
* in the inner idle loop. |
* |
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) |
* will tell RCU that it needs to pay attending, invoke its argument |
* (in this example, a call to the do_something_with_RCU() function), |
* and then tell RCU to go back to ignoring this CPU. It is permissible |
* to nest RCU_NONIDLE() wrappers, but the nesting level is currently |
* quite limited. If deeper nesting is required, it will be necessary |
* to adjust DYNTICK_TASK_NESTING_VALUE accordingly. |
*/ |
#define RCU_NONIDLE(a) \ |
do { \ |
rcu_irq_enter(); \ |
do { a; } while (0); \ |
rcu_irq_exit(); \ |
} while (0) |
/* |
* Note a voluntary context switch for RCU-tasks benefit. This is a |
* macro rather than an inline function to avoid #include hell. |
*/ |
#ifdef CONFIG_TASKS_RCU |
#define TASKS_RCU(x) x |
extern struct srcu_struct tasks_rcu_exit_srcu; |
#define rcu_note_voluntary_context_switch(t) \ |
do { \ |
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ |
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ |
} while (0) |
#else /* #ifdef CONFIG_TASKS_RCU */ |
#define TASKS_RCU(x) do { } while (0) |
#define rcu_note_voluntary_context_switch(t) do { } while (0) |
#endif /* #else #ifdef CONFIG_TASKS_RCU */ |
/** |
* cond_resched_rcu_qs - Report potential quiescent states to RCU |
* |
* This macro resembles cond_resched(), except that it is defined to |
* report potential quiescent states to RCU-tasks even if the cond_resched() |
* machinery were to be shut off, as some advocate for PREEMPT kernels. |
*/ |
#define cond_resched_rcu_qs() \ |
do { \ |
if (!cond_resched()) \ |
rcu_note_voluntary_context_switch(current); \ |
} while (0) |
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) |
bool __rcu_is_watching(void); |
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ |
/* |
* Infrastructure to implement the synchronize_() primitives in |
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
*/ |
typedef void call_rcu_func_t(struct rcu_head *head, |
void (*func)(struct rcu_head *head)); |
void wait_rcu_gp(call_rcu_func_t crf); |
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
#include <linux/rcutree.h> |
#elif defined(CONFIG_TINY_RCU) |
#include <linux/rcutiny.h> |
#else |
#error "Unknown RCU implementation specified to kernel configuration" |
#endif |
/* |
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic |
* initialization and destruction of rcu_head on the stack. rcu_head structures |
* allocated dynamically in the heap or defined statically don't need any |
* initialization. |
*/ |
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
void init_rcu_head(struct rcu_head *head); |
void destroy_rcu_head(struct rcu_head *head); |
void init_rcu_head_on_stack(struct rcu_head *head); |
void destroy_rcu_head_on_stack(struct rcu_head *head); |
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
static inline void init_rcu_head(struct rcu_head *head) |
{ |
} |
static inline void destroy_rcu_head(struct rcu_head *head) |
{ |
} |
static inline void init_rcu_head_on_stack(struct rcu_head *head) |
{ |
} |
static inline void destroy_rcu_head_on_stack(struct rcu_head *head) |
{ |
} |
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) |
bool rcu_lockdep_current_cpu_online(void); |
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
static inline bool rcu_lockdep_current_cpu_online(void) |
{ |
return true; |
} |
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
static inline void rcu_lock_acquire(struct lockdep_map *map) |
{ |
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); |
} |
static inline void rcu_lock_release(struct lockdep_map *map) |
{ |
lock_release(map, 1, _THIS_IP_); |
} |
extern struct lockdep_map rcu_lock_map; |
extern struct lockdep_map rcu_bh_lock_map; |
extern struct lockdep_map rcu_sched_lock_map; |
extern struct lockdep_map rcu_callback_map; |
int debug_lockdep_rcu_enabled(void); |
int rcu_read_lock_held(void); |
int rcu_read_lock_bh_held(void); |
/** |
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
* |
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
* RCU-sched read-side critical section. In absence of |
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
* critical section unless it can prove otherwise. Note that disabling |
* of preemption (including disabling irqs) counts as an RCU-sched |
* read-side critical section. This is useful for debug checks in functions |
* that required that they be called within an RCU-sched read-side |
* critical section. |
* |
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
* and while lockdep is disabled. |
* |
* Note that if the CPU is in the idle loop from an RCU point of |
* view (ie: that we are in the section between rcu_idle_enter() and |
* rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU |
* did an rcu_read_lock(). The reason for this is that RCU ignores CPUs |
* that are in such a section, considering these as in extended quiescent |
* state, so such a CPU is effectively never in an RCU read-side critical |
* section regardless of what RCU primitives it invokes. This state of |
* affairs is required --- we need to keep an RCU-free window in idle |
* where the CPU may possibly enter into low power mode. This way we can |
* notice an extended quiescent state to other CPUs that started a grace |
* period. Otherwise we would delay any grace period as long as we run in |
* the idle task. |
* |
* Similarly, we avoid claiming an SRCU read lock held if the current |
* CPU is offline. |
*/ |
#ifdef CONFIG_PREEMPT_COUNT |
static inline int rcu_read_lock_sched_held(void) |
{ |
int lockdep_opinion = 0; |
if (!debug_lockdep_rcu_enabled()) |
return 1; |
if (!rcu_is_watching()) |
return 0; |
if (!rcu_lockdep_current_cpu_online()) |
return 0; |
if (debug_locks) |
lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
} |
#else /* #ifdef CONFIG_PREEMPT_COUNT */ |
static inline int rcu_read_lock_sched_held(void) |
{ |
return 1; |
} |
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ |
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
# define rcu_lock_acquire(a) do { } while (0) |
# define rcu_lock_release(a) do { } while (0) |
static inline int rcu_read_lock_held(void) |
{ |
return 1; |
} |
static inline int rcu_read_lock_bh_held(void) |
{ |
return 1; |
} |
#ifdef CONFIG_PREEMPT_COUNT |
static inline int rcu_read_lock_sched_held(void) |
{ |
return preempt_count() != 0 || irqs_disabled(); |
} |
#else /* #ifdef CONFIG_PREEMPT_COUNT */ |
static inline int rcu_read_lock_sched_held(void) |
{ |
return 1; |
} |
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ |
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
#ifdef CONFIG_PROVE_RCU |
/** |
* rcu_lockdep_assert - emit lockdep splat if specified condition not met |
* @c: condition to check |
* @s: informative message |
*/ |
#define rcu_lockdep_assert(c, s) \ |
do { \ |
static bool __section(.data.unlikely) __warned; \ |
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
__warned = true; \ |
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ |
} \ |
} while (0) |
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) |
static inline void rcu_preempt_sleep_check(void) |
{ |
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), |
"Illegal context switch in RCU read-side critical section"); |
} |
#else /* #ifdef CONFIG_PROVE_RCU */ |
static inline void rcu_preempt_sleep_check(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_PROVE_RCU */ |
#define rcu_sleep_check() \ |
do { \ |
rcu_preempt_sleep_check(); \ |
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ |
"Illegal context switch in RCU-bh read-side critical section"); \ |
rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ |
"Illegal context switch in RCU-sched read-side critical section"); \ |
} while (0) |
#else /* #ifdef CONFIG_PROVE_RCU */ |
#define rcu_lockdep_assert(c, s) do { } while (0) |
#define rcu_sleep_check() do { } while (0) |
#endif /* #else #ifdef CONFIG_PROVE_RCU */ |
/* |
* Helper functions for rcu_dereference_check(), rcu_dereference_protected() |
* and rcu_assign_pointer(). Some of these could be folded into their |
* callers, but they are left separate in order to ease introduction of |
* multiple flavors of pointers to match the multiple flavors of RCU |
* (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in |
* the future. |
*/ |
#ifdef __CHECKER__ |
#define rcu_dereference_sparse(p, space) \ |
((void)(((typeof(*p) space *)p) == p)) |
#else /* #ifdef __CHECKER__ */ |
#define rcu_dereference_sparse(p, space) |
#endif /* #else #ifdef __CHECKER__ */ |
#define __rcu_access_pointer(p, space) \ |
({ \ |
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ |
rcu_dereference_sparse(p, space); \ |
((typeof(*p) __force __kernel *)(_________p1)); \ |
}) |
#define __rcu_dereference_check(p, c, space) \ |
({ \ |
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ |
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ |
rcu_dereference_sparse(p, space); \ |
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
((typeof(*p) __force __kernel *)(_________p1)); \ |
}) |
#define __rcu_dereference_protected(p, c, space) \ |
({ \ |
rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \ |
rcu_dereference_sparse(p, space); \ |
((typeof(*p) __force __kernel *)(p)); \ |
}) |
#define __rcu_access_index(p, space) \ |
({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
rcu_dereference_sparse(p, space); \ |
(_________p1); \ |
}) |
#define __rcu_dereference_index_check(p, c) \ |
({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
rcu_lockdep_assert(c, \ |
"suspicious rcu_dereference_index_check() usage"); \ |
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
(_________p1); \ |
}) |
/** |
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable |
* @v: The value to statically initialize with. |
*/ |
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) |
/** |
* lockless_dereference() - safely load a pointer for later dereference |
* @p: The pointer to load |
* |
* Similar to rcu_dereference(), but for situations where the pointed-to |
* object's lifetime is managed by something other than RCU. That |
* "something other" might be reference counting or simple immortality. |
*/ |
#define lockless_dereference(p) \ |
({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
(_________p1); \ |
}) |
/** |
* rcu_assign_pointer() - assign to RCU-protected pointer |
* @p: pointer to assign to |
* @v: value to assign (publish) |
* |
* Assigns the specified value to the specified RCU-protected |
* pointer, ensuring that any concurrent RCU readers will see |
* any prior initialization. |
* |
* Inserts memory barriers on architectures that require them |
* (which is most of them), and also prevents the compiler from |
* reordering the code that initializes the structure after the pointer |
* assignment. More importantly, this call documents which pointers |
* will be dereferenced by RCU read-side code. |
* |
* In some special cases, you may use RCU_INIT_POINTER() instead |
* of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due |
* to the fact that it does not constrain either the CPU or the compiler. |
* That said, using RCU_INIT_POINTER() when you should have used |
* rcu_assign_pointer() is a very bad thing that results in |
* impossible-to-diagnose memory corruption. So please be careful. |
* See the RCU_INIT_POINTER() comment header for details. |
* |
* Note that rcu_assign_pointer() evaluates each of its arguments only |
* once, appearances notwithstanding. One of the "extra" evaluations |
* is in typeof() and the other visible only to sparse (__CHECKER__), |
* neither of which actually execute the argument. As with most cpp |
* macros, this execute-arguments-only-once property is important, so |
* please be careful when making changes to rcu_assign_pointer() and the |
* other macros that it invokes. |
*/ |
#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) |
/** |
* rcu_access_pointer() - fetch RCU pointer with no dereferencing |
* @p: The pointer to read |
* |
* Return the value of the specified RCU-protected pointer, but omit the |
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful |
* when the value of this pointer is accessed, but the pointer is not |
* dereferenced, for example, when testing an RCU-protected pointer against |
* NULL. Although rcu_access_pointer() may also be used in cases where |
* update-side locks prevent the value of the pointer from changing, you |
* should instead use rcu_dereference_protected() for this use case. |
* |
* It is also permissible to use rcu_access_pointer() when read-side |
* access to the pointer was removed at least one grace period ago, as |
* is the case in the context of the RCU callback that is freeing up |
* the data, or after a synchronize_rcu() returns. This can be useful |
* when tearing down multi-linked structures after a grace period |
* has elapsed. |
*/ |
#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) |
/** |
* rcu_dereference_check() - rcu_dereference with debug checking |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* Do an rcu_dereference(), but check that the conditions under which the |
* dereference will take place are correct. Typically the conditions |
* indicate the various locking conditions that should be held at that |
* point. The check should return true if the conditions are satisfied. |
* An implicit check for being in an RCU read-side critical section |
* (rcu_read_lock()) is included. |
* |
* For example: |
* |
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
* |
* could be used to indicate to lockdep that foo->bar may only be dereferenced |
* if either rcu_read_lock() is held, or that the lock required to replace |
* the bar struct at foo->bar is held. |
* |
* Note that the list of conditions may also include indications of when a lock |
* need not be held, for example during initialisation or destruction of the |
* target struct: |
* |
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
* atomic_read(&foo->usage) == 0); |
* |
* Inserts memory barriers on architectures that require them |
* (currently only the Alpha), prevents the compiler from refetching |
* (and from merging fetches), and, more importantly, documents exactly |
* which pointers are protected by RCU and checks that the pointer is |
* annotated as __rcu. |
*/ |
#define rcu_dereference_check(p, c) \ |
__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) |
/** |
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* This is the RCU-bh counterpart to rcu_dereference_check(). |
*/ |
#define rcu_dereference_bh_check(p, c) \ |
__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) |
/** |
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* This is the RCU-sched counterpart to rcu_dereference_check(). |
*/ |
#define rcu_dereference_sched_check(p, c) \ |
__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ |
__rcu) |
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ |
/* |
* The tracing infrastructure traces RCU (we want that), but unfortunately |
* some of the RCU checks causes tracing to lock up the system. |
* |
* The tracing version of rcu_dereference_raw() must not call |
* rcu_read_lock_held(). |
*/ |
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) |
/** |
* rcu_access_index() - fetch RCU index with no dereferencing |
* @p: The index to read |
* |
* Return the value of the specified RCU-protected index, but omit the |
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful |
* when the value of this index is accessed, but the index is not |
* dereferenced, for example, when testing an RCU-protected index against |
* -1. Although rcu_access_index() may also be used in cases where |
* update-side locks prevent the value of the index from changing, you |
* should instead use rcu_dereference_index_protected() for this use case. |
*/ |
#define rcu_access_index(p) __rcu_access_index((p), __rcu) |
/** |
* rcu_dereference_index_check() - rcu_dereference for indices with debug checking |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* Similar to rcu_dereference_check(), but omits the sparse checking. |
* This allows rcu_dereference_index_check() to be used on integers, |
* which can then be used as array indices. Attempting to use |
* rcu_dereference_check() on an integer will give compiler warnings |
* because the sparse address-space mechanism relies on dereferencing |
* the RCU-protected pointer. Dereferencing integers is not something |
* that even gcc will put up with. |
* |
* Note that this function does not implicitly check for RCU read-side |
* critical sections. If this function gains lots of uses, it might |
* make sense to provide versions for each flavor of RCU, but it does |
* not make sense as of early 2010. |
*/ |
#define rcu_dereference_index_check(p, c) \ |
__rcu_dereference_index_check((p), (c)) |
/** |
* rcu_dereference_protected() - fetch RCU pointer when updates prevented |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* Return the value of the specified RCU-protected pointer, but omit |
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This |
* is useful in cases where update-side locks prevent the value of the |
* pointer from changing. Please note that this primitive does -not- |
* prevent the compiler from repeating this reference or combining it |
* with other references, so it should not be used without protection |
* of appropriate locks. |
* |
* This function is only for update-side use. Using this function |
* when protected only by rcu_read_lock() will result in infrequent |
* but very ugly failures. |
*/ |
#define rcu_dereference_protected(p, c) \ |
__rcu_dereference_protected((p), (c), __rcu) |
/** |
* rcu_dereference() - fetch RCU-protected pointer for dereferencing |
* @p: The pointer to read, prior to dereferencing |
* |
* This is a simple wrapper around rcu_dereference_check(). |
*/ |
#define rcu_dereference(p) rcu_dereference_check(p, 0) |
/** |
* rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing |
* @p: The pointer to read, prior to dereferencing |
* |
* Makes rcu_dereference_check() do the dirty work. |
*/ |
#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) |
/** |
* rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing |
* @p: The pointer to read, prior to dereferencing |
* |
* Makes rcu_dereference_check() do the dirty work. |
*/ |
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
/** |
* rcu_read_lock() - mark the beginning of an RCU read-side critical section |
* |
* When synchronize_rcu() is invoked on one CPU while other CPUs |
* are within RCU read-side critical sections, then the |
* synchronize_rcu() is guaranteed to block until after all the other |
* CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
* on one CPU while other CPUs are within RCU read-side critical |
* sections, invocation of the corresponding RCU callback is deferred |
* until after the all the other CPUs exit their critical sections. |
* |
* Note, however, that RCU callbacks are permitted to run concurrently |
* with new RCU read-side critical sections. One way that this can happen |
* is via the following sequence of events: (1) CPU 0 enters an RCU |
* read-side critical section, (2) CPU 1 invokes call_rcu() to register |
* an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
* (4) CPU 2 enters a RCU read-side critical section, (5) the RCU |
* callback is invoked. This is legal, because the RCU read-side critical |
* section that was running concurrently with the call_rcu() (and which |
* therefore might be referencing something that the corresponding RCU |
* callback would free up) has completed before the corresponding |
* RCU callback is invoked. |
* |
* RCU read-side critical sections may be nested. Any deferred actions |
* will be deferred until the outermost RCU read-side critical section |
* completes. |
* |
* You can avoid reading and understanding the next paragraph by |
* following this rule: don't put anything in an rcu_read_lock() RCU |
* read-side critical section that would block in a !PREEMPT kernel. |
* But if you want the full story, read on! |
* |
* In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), |
* it is illegal to block while in an RCU read-side critical section. |
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT |
* kernel builds, RCU read-side critical sections may be preempted, |
* but explicit blocking is illegal. Finally, in preemptible RCU |
* implementations in real-time (with -rt patchset) kernel builds, RCU |
* read-side critical sections may be preempted and they may also block, but |
* only when acquiring spinlocks that are subject to priority inheritance. |
*/ |
static inline void rcu_read_lock(void) |
{ |
__rcu_read_lock(); |
__acquire(RCU); |
rcu_lock_acquire(&rcu_lock_map); |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock() used illegally while idle"); |
} |
/* |
* So where is rcu_write_lock()? It does not exist, as there is no |
* way for writers to lock out RCU readers. This is a feature, not |
* a bug -- this property is what provides RCU's performance benefits. |
* Of course, writers must coordinate with each other. The normal |
* spinlock primitives work well for this, but any other technique may be |
* used as well. RCU does not care how the writers keep out of each |
* others' way, as long as they do so. |
*/ |
/** |
* rcu_read_unlock() - marks the end of an RCU read-side critical section. |
* |
* In most situations, rcu_read_unlock() is immune from deadlock. |
* However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() |
* is responsible for deboosting, which it does via rt_mutex_unlock(). |
* Unfortunately, this function acquires the scheduler's runqueue and |
* priority-inheritance spinlocks. This means that deadlock could result |
* if the caller of rcu_read_unlock() already holds one of these locks or |
* any lock that is ever acquired while holding them; or any lock which |
* can be taken from interrupt context because rcu_boost()->rt_mutex_lock() |
* does not disable irqs while taking ->wait_lock. |
* |
* That said, RCU readers are never priority boosted unless they were |
* preempted. Therefore, one way to avoid deadlock is to make sure |
* that preemption never happens within any RCU read-side critical |
* section whose outermost rcu_read_unlock() is called with one of |
* rt_mutex_unlock()'s locks held. Such preemption can be avoided in |
* a number of ways, for example, by invoking preempt_disable() before |
* critical section's outermost rcu_read_lock(). |
* |
* Given that the set of locks acquired by rt_mutex_unlock() might change |
* at any time, a somewhat more future-proofed approach is to make sure |
* that that preemption never happens within any RCU read-side critical |
* section whose outermost rcu_read_unlock() is called with irqs disabled. |
* This approach relies on the fact that rt_mutex_unlock() currently only |
* acquires irq-disabled locks. |
* |
* The second of these two approaches is best in most situations, |
* however, the first approach can also be useful, at least to those |
* developers willing to keep abreast of the set of locks acquired by |
* rt_mutex_unlock(). |
* |
* See rcu_read_lock() for more information. |
*/ |
static inline void rcu_read_unlock(void) |
{ |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock() used illegally while idle"); |
rcu_lock_release(&rcu_lock_map); |
__release(RCU); |
__rcu_read_unlock(); |
} |
/** |
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
* |
* This is equivalent of rcu_read_lock(), but to be used when updates |
* are being done using call_rcu_bh() or synchronize_rcu_bh(). Since |
* both call_rcu_bh() and synchronize_rcu_bh() consider completion of a |
* softirq handler to be a quiescent state, a process in RCU read-side |
* critical section must be protected by disabling softirqs. Read-side |
* critical sections in interrupt context can use just rcu_read_lock(), |
* though this should at least be commented to avoid confusing people |
* reading the code. |
* |
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() |
* must occur in the same context, for example, it is illegal to invoke |
* rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() |
* was invoked from some other task. |
*/ |
static inline void rcu_read_lock_bh(void) |
{ |
local_bh_disable(); |
__acquire(RCU_BH); |
rcu_lock_acquire(&rcu_bh_lock_map); |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock_bh() used illegally while idle"); |
} |
/* |
* rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
* |
* See rcu_read_lock_bh() for more information. |
*/ |
static inline void rcu_read_unlock_bh(void) |
{ |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock_bh() used illegally while idle"); |
rcu_lock_release(&rcu_bh_lock_map); |
__release(RCU_BH); |
local_bh_enable(); |
} |
/** |
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
* |
* This is equivalent of rcu_read_lock(), but to be used when updates |
* are being done using call_rcu_sched() or synchronize_rcu_sched(). |
* Read-side critical sections can also be introduced by anything that |
* disables preemption, including local_irq_disable() and friends. |
* |
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() |
* must occur in the same context, for example, it is illegal to invoke |
* rcu_read_unlock_sched() from process context if the matching |
* rcu_read_lock_sched() was invoked from an NMI handler. |
*/ |
static inline void rcu_read_lock_sched(void) |
{ |
preempt_disable(); |
__acquire(RCU_SCHED); |
rcu_lock_acquire(&rcu_sched_lock_map); |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock_sched() used illegally while idle"); |
} |
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
static inline notrace void rcu_read_lock_sched_notrace(void) |
{ |
preempt_disable_notrace(); |
__acquire(RCU_SCHED); |
} |
/* |
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section |
* |
* See rcu_read_lock_sched for more information. |
*/ |
static inline void rcu_read_unlock_sched(void) |
{ |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock_sched() used illegally while idle"); |
rcu_lock_release(&rcu_sched_lock_map); |
__release(RCU_SCHED); |
preempt_enable(); |
} |
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
static inline notrace void rcu_read_unlock_sched_notrace(void) |
{ |
__release(RCU_SCHED); |
preempt_enable_notrace(); |
} |
/** |
* RCU_INIT_POINTER() - initialize an RCU protected pointer |
* |
* Initialize an RCU-protected pointer in special cases where readers |
* do not need ordering constraints on the CPU or the compiler. These |
* special cases are: |
* |
* 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- |
* 2. The caller has taken whatever steps are required to prevent |
* RCU readers from concurrently accessing this pointer -or- |
* 3. The referenced data structure has already been exposed to |
* readers either at compile time or via rcu_assign_pointer() -and- |
* a. You have not made -any- reader-visible changes to |
* this structure since then -or- |
* b. It is OK for readers accessing this structure from its |
* new location to see the old state of the structure. (For |
* example, the changes were to statistical counters or to |
* other state where exact synchronization is not required.) |
* |
* Failure to follow these rules governing use of RCU_INIT_POINTER() will |
* result in impossible-to-diagnose memory corruption. As in the structures |
* will look OK in crash dumps, but any concurrent RCU readers might |
* see pre-initialized values of the referenced data structure. So |
* please be very careful how you use RCU_INIT_POINTER()!!! |
* |
* If you are creating an RCU-protected linked structure that is accessed |
* by a single external-to-structure RCU-protected pointer, then you may |
* use RCU_INIT_POINTER() to initialize the internal RCU-protected |
* pointers, but you must use rcu_assign_pointer() to initialize the |
* external-to-structure pointer -after- you have completely initialized |
* the reader-accessible portions of the linked structure. |
* |
* Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no |
* ordering guarantees for either the CPU or the compiler. |
*/ |
#define RCU_INIT_POINTER(p, v) \ |
do { \ |
rcu_dereference_sparse(p, __rcu); \ |
p = RCU_INITIALIZER(v); \ |
} while (0) |
/** |
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer |
* |
* GCC-style initialization for an RCU-protected pointer in a structure field. |
*/ |
#define RCU_POINTER_INITIALIZER(p, v) \ |
.p = RCU_INITIALIZER(v) |
/* |
* Does the specified offset indicate that the corresponding rcu_head |
* structure can be handled by kfree_rcu()? |
*/ |
#define __is_kfree_rcu_offset(offset) ((offset) < 4096) |
/* |
* Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. |
*/ |
#define __kfree_rcu(head, offset) \ |
do { \ |
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ |
kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ |
} while (0) |
/** |
* kfree_rcu() - kfree an object after a grace period. |
* @ptr: pointer to kfree |
* @rcu_head: the name of the struct rcu_head within the type of @ptr. |
* |
* Many rcu callbacks functions just call kfree() on the base structure. |
* These functions are trivial, but their size adds up, and furthermore |
* when they are used in a kernel module, that module must invoke the |
* high-latency rcu_barrier() function at module-unload time. |
* |
* The kfree_rcu() function handles this issue. Rather than encoding a |
* function address in the embedded rcu_head structure, kfree_rcu() instead |
* encodes the offset of the rcu_head structure within the base structure. |
* Because the functions are not allowed in the low-order 4096 bytes of |
* kernel virtual memory, offsets up to 4095 bytes can be accommodated. |
* If the offset is larger than 4095 bytes, a compile-time error will |
* be generated in __kfree_rcu(). If this error is triggered, you can |
* either fall back to use of call_rcu() or rearrange the structure to |
* position the rcu_head structure into the first 4096 bytes. |
* |
* Note that the allowable offset might decrease in the future, for example, |
* to allow something like kmem_cache_free_rcu(). |
* |
* The BUILD_BUG_ON check must not involve any function calls, hence the |
* checks are done in macros here. |
*/ |
#define kfree_rcu(ptr, rcu_head) \ |
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) |
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) |
static inline int rcu_needs_cpu(unsigned long *delta_jiffies) |
{ |
*delta_jiffies = ULONG_MAX; |
return 0; |
} |
#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */ |
#if defined(CONFIG_RCU_NOCB_CPU_ALL) |
static inline bool rcu_is_nocb_cpu(int cpu) { return true; } |
#elif defined(CONFIG_RCU_NOCB_CPU) |
bool rcu_is_nocb_cpu(int cpu); |
#else |
static inline bool rcu_is_nocb_cpu(int cpu) { return false; } |
#endif |
/* Only for use by adaptive-ticks code. */ |
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
bool rcu_sys_is_idle(void); |
void rcu_sysidle_force_exit(void); |
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
static inline bool rcu_sys_is_idle(void) |
{ |
return false; |
} |
static inline void rcu_sysidle_force_exit(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
#endif /* __LINUX_RCUPDATE_H */ |
/drivers/include/linux/rcutiny.h |
---|
0,0 → 1,160 |
/* |
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, you can access it online at |
* http://www.gnu.org/licenses/gpl-2.0.html. |
* |
* Copyright IBM Corporation, 2008 |
* |
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
* |
* For detailed explanation of Read-Copy Update mechanism see - |
* Documentation/RCU |
*/ |
#ifndef __LINUX_TINY_H |
#define __LINUX_TINY_H |
#include <linux/cache.h> |
static inline unsigned long get_state_synchronize_rcu(void) |
{ |
return 0; |
} |
static inline void cond_synchronize_rcu(unsigned long oldstate) |
{ |
might_sleep(); |
} |
static inline void rcu_barrier_bh(void) |
{ |
wait_rcu_gp(call_rcu_bh); |
} |
static inline void rcu_barrier_sched(void) |
{ |
wait_rcu_gp(call_rcu_sched); |
} |
static inline void synchronize_rcu_expedited(void) |
{ |
synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
} |
static inline void rcu_barrier(void) |
{ |
rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
} |
static inline void synchronize_rcu_bh(void) |
{ |
synchronize_sched(); |
} |
static inline void synchronize_rcu_bh_expedited(void) |
{ |
synchronize_sched(); |
} |
static inline void synchronize_sched_expedited(void) |
{ |
synchronize_sched(); |
} |
static inline void kfree_call_rcu(struct rcu_head *head, |
void (*func)(struct rcu_head *rcu)) |
{ |
call_rcu(head, func); |
} |
static inline void rcu_note_context_switch(void) |
{ |
rcu_sched_qs(); |
} |
/* |
* Take advantage of the fact that there is only one CPU, which |
* allows us to ignore virtualization-based context switches. |
*/ |
static inline void rcu_virt_note_context_switch(int cpu) |
{ |
} |
/* |
* Return the number of grace periods. |
*/ |
static inline long rcu_batches_completed(void) |
{ |
return 0; |
} |
/* |
* Return the number of bottom-half grace periods. |
*/ |
static inline long rcu_batches_completed_bh(void) |
{ |
return 0; |
} |
static inline void rcu_force_quiescent_state(void) |
{ |
} |
static inline void rcu_bh_force_quiescent_state(void) |
{ |
} |
static inline void rcu_sched_force_quiescent_state(void) |
{ |
} |
static inline void show_rcu_gp_kthreads(void) |
{ |
} |
static inline void rcu_cpu_stall_reset(void) |
{ |
} |
static inline void exit_rcu(void) |
{ |
} |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
extern int rcu_scheduler_active __read_mostly; |
void rcu_scheduler_starting(void); |
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
static inline void rcu_scheduler_starting(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
static inline bool rcu_is_watching(void) |
{ |
return __rcu_is_watching(); |
} |
#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
static inline bool rcu_is_watching(void) |
{ |
return true; |
} |
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
#endif /* __LINUX_RCUTINY_H */ |
/drivers/include/linux/reservation.h |
---|
40,23 → 40,103 |
#define _LINUX_RESERVATION_H |
#include <linux/ww_mutex.h> |
#include <linux/fence.h> |
#include <linux/slab.h> |
#include <linux/seqlock.h> |
#include <linux/rcupdate.h> |
extern struct ww_class reservation_ww_class; |
extern struct lock_class_key reservation_seqcount_class; |
extern const char reservation_seqcount_string[]; |
struct reservation_object_list { |
struct rcu_head rcu; |
u32 shared_count, shared_max; |
struct fence __rcu *shared[]; |
}; |
struct reservation_object { |
struct ww_mutex lock; |
seqcount_t seq; |
struct fence __rcu *fence_excl; |
struct reservation_object_list __rcu *fence; |
struct reservation_object_list *staged; |
}; |
#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) |
#define reservation_object_assert_held(obj) \ |
lockdep_assert_held(&(obj)->lock.base) |
static inline void |
reservation_object_init(struct reservation_object *obj) |
{ |
ww_mutex_init(&obj->lock, &reservation_ww_class); |
__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); |
RCU_INIT_POINTER(obj->fence, NULL); |
RCU_INIT_POINTER(obj->fence_excl, NULL); |
obj->staged = NULL; |
} |
static inline void |
reservation_object_fini(struct reservation_object *obj) |
{ |
int i; |
struct reservation_object_list *fobj; |
struct fence *excl; |
/* |
* This object should be dead and all references must have |
* been released to it, so no need to be protected with rcu. |
*/ |
excl = rcu_dereference_protected(obj->fence_excl, 1); |
if (excl) |
fence_put(excl); |
fobj = rcu_dereference_protected(obj->fence, 1); |
if (fobj) { |
for (i = 0; i < fobj->shared_count; ++i) |
fence_put(rcu_dereference_protected(fobj->shared[i], 1)); |
kfree(fobj); |
} |
kfree(obj->staged); |
ww_mutex_destroy(&obj->lock); |
} |
static inline struct reservation_object_list * |
reservation_object_get_list(struct reservation_object *obj) |
{ |
return rcu_dereference_protected(obj->fence, |
reservation_object_held(obj)); |
} |
static inline struct fence * |
reservation_object_get_excl(struct reservation_object *obj) |
{ |
return rcu_dereference_protected(obj->fence_excl, |
reservation_object_held(obj)); |
} |
int reservation_object_reserve_shared(struct reservation_object *obj); |
void reservation_object_add_shared_fence(struct reservation_object *obj, |
struct fence *fence); |
void reservation_object_add_excl_fence(struct reservation_object *obj, |
struct fence *fence); |
int reservation_object_get_fences_rcu(struct reservation_object *obj, |
struct fence **pfence_excl, |
unsigned *pshared_count, |
struct fence ***pshared); |
long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
bool wait_all, bool intr, |
unsigned long timeout); |
bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
bool test_all); |
#endif /* _LINUX_RESERVATION_H */ |
/drivers/include/linux/scatterlist.h |
---|
101,6 → 101,22 |
return (struct page *)((sg)->page_link & ~0x3); |
} |
/** |
* sg_set_buf - Set sg entry to point at given data |
* @sg: SG entry |
* @buf: Data |
* @buflen: Data length |
* |
**/ |
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf, |
// unsigned int buflen) |
//{ |
//#ifdef CONFIG_DEBUG_SG |
// BUG_ON(!virt_addr_valid(buf)); |
//#endif |
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); |
//} |
/* |
* Loop over each sg element, following the pointer to a new list if necessary |
*/ |
120,7 → 136,7 |
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, |
struct scatterlist *sgl) |
{ |
#ifndef ARCH_HAS_SG_CHAIN |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
BUG(); |
#endif |
/drivers/include/linux/sched.h |
---|
7,5 → 7,6 |
#define TASK_COMM_LEN 16 |
#define schedule_timeout(x) delay(x) |
#define MAX_SCHEDULE_TIMEOUT LONG_MAX |
#endif |
/drivers/include/linux/seq_file.h |
---|
4,5 → 4,6 |
#include <errno.h> |
#endif |
/drivers/include/linux/seqlock.h |
---|
0,0 → 1,478 |
#ifndef __LINUX_SEQLOCK_H |
#define __LINUX_SEQLOCK_H |
/* |
* Reader/writer consistent mechanism without starving writers. This type of |
* lock for data where the reader wants a consistent set of information |
* and is willing to retry if the information changes. There are two types |
* of readers: |
* 1. Sequence readers which never block a writer but they may have to retry |
* if a writer is in progress by detecting change in sequence number. |
* Writers do not wait for a sequence reader. |
* 2. Locking readers which will wait if a writer or another locking reader |
* is in progress. A locking reader in progress will also block a writer |
* from going forward. Unlike the regular rwlock, the read lock here is |
* exclusive so that only one locking reader can get it. |
* |
* This is not as cache friendly as brlock. Also, this may not work well |
* for data that contains pointers, because any writer could |
* invalidate a pointer that a reader was following. |
* |
* Expected non-blocking reader usage: |
* do { |
* seq = read_seqbegin(&foo); |
* ... |
* } while (read_seqretry(&foo, seq)); |
* |
* |
* On non-SMP the spin locks disappear but the writer still needs |
* to increment the sequence variables because an interrupt routine could |
* change the state of the data. |
* |
* Based on x86_64 vsyscall gettimeofday |
* by Keith Owens and Andrea Arcangeli |
*/ |
#include <linux/spinlock.h> |
//#include <linux/preempt.h> |
#include <linux/lockdep.h> |
#include <asm/processor.h> |
/* |
* Version using sequence counter only. |
* This can be used when code has its own mutex protecting the |
* updating starting before the write_seqcountbeqin() and ending |
* after the write_seqcount_end(). |
*/ |
typedef struct seqcount { |
unsigned sequence; |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} seqcount_t; |
static inline void __seqcount_init(seqcount_t *s, const char *name, |
struct lock_class_key *key) |
{ |
/* |
* Make sure we are not reinitializing a held lock: |
*/ |
lockdep_init_map(&s->dep_map, name, key, 0); |
s->sequence = 0; |
} |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define SEQCOUNT_DEP_MAP_INIT(lockname) \ |
.dep_map = { .name = #lockname } \ |
# define seqcount_init(s) \ |
do { \ |
static struct lock_class_key __key; \ |
__seqcount_init((s), #s, &__key); \ |
} while (0) |
static inline void seqcount_lockdep_reader_access(const seqcount_t *s) |
{ |
seqcount_t *l = (seqcount_t *)s; |
unsigned long flags; |
local_irq_save(flags); |
seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); |
seqcount_release(&l->dep_map, 1, _RET_IP_); |
local_irq_restore(flags); |
} |
#else |
# define SEQCOUNT_DEP_MAP_INIT(lockname) |
# define seqcount_init(s) __seqcount_init(s, NULL, NULL) |
# define seqcount_lockdep_reader_access(x) |
#endif |
#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)} |
/** |
* __read_seqcount_begin - begin a seq-read critical section (without barrier) |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() |
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
* provided before actually loading any of the variables that are to be |
* protected in this critical section. |
* |
* Use carefully, only in critical code, and comment how the barrier is |
* provided. |
*/ |
static inline unsigned __read_seqcount_begin(const seqcount_t *s) |
{ |
unsigned ret; |
repeat: |
ret = ACCESS_ONCE(s->sequence); |
if (unlikely(ret & 1)) { |
cpu_relax(); |
goto repeat; |
} |
return ret; |
} |
/** |
* raw_read_seqcount - Read the raw seqcount |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* raw_read_seqcount opens a read critical section of the given |
* seqcount without any lockdep checking and without checking or |
* masking the LSB. Calling code is responsible for handling that. |
*/ |
static inline unsigned raw_read_seqcount(const seqcount_t *s) |
{ |
unsigned ret = ACCESS_ONCE(s->sequence); |
smp_rmb(); |
return ret; |
} |
/** |
* raw_read_seqcount_begin - start seq-read critical section w/o lockdep |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* raw_read_seqcount_begin opens a read critical section of the given |
* seqcount, but without any lockdep checking. Validity of the critical |
* section is tested by checking read_seqcount_retry function. |
*/ |
static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) |
{ |
unsigned ret = __read_seqcount_begin(s); |
smp_rmb(); |
return ret; |
} |
/** |
* read_seqcount_begin - begin a seq-read critical section |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* read_seqcount_begin opens a read critical section of the given seqcount. |
* Validity of the critical section is tested by checking read_seqcount_retry |
* function. |
*/ |
static inline unsigned read_seqcount_begin(const seqcount_t *s) |
{ |
seqcount_lockdep_reader_access(s); |
return raw_read_seqcount_begin(s); |
} |
/** |
* raw_seqcount_begin - begin a seq-read critical section |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* raw_seqcount_begin opens a read critical section of the given seqcount. |
* Validity of the critical section is tested by checking read_seqcount_retry |
* function. |
* |
* Unlike read_seqcount_begin(), this function will not wait for the count |
* to stabilize. If a writer is active when we begin, we will fail the |
* read_seqcount_retry() instead of stabilizing at the beginning of the |
* critical section. |
*/ |
static inline unsigned raw_seqcount_begin(const seqcount_t *s) |
{ |
unsigned ret = ACCESS_ONCE(s->sequence); |
smp_rmb(); |
return ret & ~1; |
} |
/** |
* __read_seqcount_retry - end a seq-read critical section (without barrier) |
* @s: pointer to seqcount_t |
* @start: count, from read_seqcount_begin |
* Returns: 1 if retry is required, else 0 |
* |
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() |
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
* provided before actually loading any of the variables that are to be |
* protected in this critical section. |
* |
* Use carefully, only in critical code, and comment how the barrier is |
* provided. |
*/ |
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) |
{ |
return unlikely(s->sequence != start); |
} |
/** |
* read_seqcount_retry - end a seq-read critical section |
* @s: pointer to seqcount_t |
* @start: count, from read_seqcount_begin |
* Returns: 1 if retry is required, else 0 |
* |
* read_seqcount_retry closes a read critical section of the given seqcount. |
* If the critical section was invalid, it must be ignored (and typically |
* retried). |
*/ |
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
{ |
smp_rmb(); |
return __read_seqcount_retry(s, start); |
} |
static inline void raw_write_seqcount_begin(seqcount_t *s) |
{ |
s->sequence++; |
smp_wmb(); |
} |
static inline void raw_write_seqcount_end(seqcount_t *s) |
{ |
smp_wmb(); |
s->sequence++; |
} |
/* |
* raw_write_seqcount_latch - redirect readers to even/odd copy |
* @s: pointer to seqcount_t |
*/ |
static inline void raw_write_seqcount_latch(seqcount_t *s) |
{ |
smp_wmb(); /* prior stores before incrementing "sequence" */ |
s->sequence++; |
smp_wmb(); /* increment "sequence" before following stores */ |
} |
/* |
* Sequence counter only version assumes that callers are using their |
* own mutexing. |
*/ |
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) |
{ |
raw_write_seqcount_begin(s); |
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); |
} |
static inline void write_seqcount_begin(seqcount_t *s) |
{ |
write_seqcount_begin_nested(s, 0); |
} |
static inline void write_seqcount_end(seqcount_t *s) |
{ |
seqcount_release(&s->dep_map, 1, _RET_IP_); |
raw_write_seqcount_end(s); |
} |
/** |
* write_seqcount_barrier - invalidate in-progress read-side seq operations |
* @s: pointer to seqcount_t |
* |
* After write_seqcount_barrier, no read-side seq operations will complete |
* successfully and see data older than this. |
*/ |
static inline void write_seqcount_barrier(seqcount_t *s) |
{ |
smp_wmb(); |
s->sequence+=2; |
} |
typedef struct { |
struct seqcount seqcount; |
spinlock_t lock; |
} seqlock_t; |
/* |
* These macros triggered gcc-3.x compile-time problems. We think these are |
* OK now. Be cautious. |
*/ |
#define __SEQLOCK_UNLOCKED(lockname) \ |
{ \ |
.seqcount = SEQCNT_ZERO(lockname), \ |
.lock = __SPIN_LOCK_UNLOCKED(lockname) \ |
} |
#define seqlock_init(x) \ |
do { \ |
seqcount_init(&(x)->seqcount); \ |
spin_lock_init(&(x)->lock); \ |
} while (0) |
#define DEFINE_SEQLOCK(x) \ |
seqlock_t x = __SEQLOCK_UNLOCKED(x) |
/* |
* Read side functions for starting and finalizing a read side section. |
*/ |
static inline unsigned read_seqbegin(const seqlock_t *sl) |
{ |
return read_seqcount_begin(&sl->seqcount); |
} |
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) |
{ |
return read_seqcount_retry(&sl->seqcount, start); |
} |
/* |
* Lock out other writers and update the count. |
* Acts like a normal spin_lock/unlock. |
* Don't need preempt_disable() because that is in the spin_lock already. |
*/ |
static inline void write_seqlock(seqlock_t *sl) |
{ |
spin_lock(&sl->lock); |
write_seqcount_begin(&sl->seqcount); |
} |
static inline void write_sequnlock(seqlock_t *sl) |
{ |
write_seqcount_end(&sl->seqcount); |
spin_unlock(&sl->lock); |
} |
static inline void write_seqlock_bh(seqlock_t *sl) |
{ |
spin_lock_bh(&sl->lock); |
write_seqcount_begin(&sl->seqcount); |
} |
static inline void write_sequnlock_bh(seqlock_t *sl) |
{ |
write_seqcount_end(&sl->seqcount); |
spin_unlock_bh(&sl->lock); |
} |
static inline void write_seqlock_irq(seqlock_t *sl) |
{ |
spin_lock_irq(&sl->lock); |
write_seqcount_begin(&sl->seqcount); |
} |
static inline void write_sequnlock_irq(seqlock_t *sl) |
{ |
write_seqcount_end(&sl->seqcount); |
spin_unlock_irq(&sl->lock); |
} |
static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) |
{ |
unsigned long flags; |
spin_lock_irqsave(&sl->lock, flags); |
write_seqcount_begin(&sl->seqcount); |
return flags; |
} |
#define write_seqlock_irqsave(lock, flags) \ |
do { flags = __write_seqlock_irqsave(lock); } while (0) |
static inline void |
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) |
{ |
write_seqcount_end(&sl->seqcount); |
spin_unlock_irqrestore(&sl->lock, flags); |
} |
/* |
* A locking reader exclusively locks out other writers and locking readers, |
* but doesn't update the sequence number. Acts like a normal spin_lock/unlock. |
* Don't need preempt_disable() because that is in the spin_lock already. |
*/ |
static inline void read_seqlock_excl(seqlock_t *sl) |
{ |
spin_lock(&sl->lock); |
} |
static inline void read_sequnlock_excl(seqlock_t *sl) |
{ |
spin_unlock(&sl->lock); |
} |
/** |
* read_seqbegin_or_lock - begin a sequence number check or locking block |
* @lock: sequence lock |
* @seq : sequence number to be checked |
* |
* First try it once optimistically without taking the lock. If that fails, |
* take the lock. The sequence number is also used as a marker for deciding |
* whether to be a reader (even) or writer (odd). |
* N.B. seq must be initialized to an even number to begin with. |
*/ |
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) |
{ |
if (!(*seq & 1)) /* Even */ |
*seq = read_seqbegin(lock); |
else /* Odd */ |
read_seqlock_excl(lock); |
} |
static inline int need_seqretry(seqlock_t *lock, int seq) |
{ |
return !(seq & 1) && read_seqretry(lock, seq); |
} |
static inline void done_seqretry(seqlock_t *lock, int seq) |
{ |
if (seq & 1) |
read_sequnlock_excl(lock); |
} |
static inline void read_seqlock_excl_bh(seqlock_t *sl) |
{ |
spin_lock_bh(&sl->lock); |
} |
static inline void read_sequnlock_excl_bh(seqlock_t *sl) |
{ |
spin_unlock_bh(&sl->lock); |
} |
static inline void read_seqlock_excl_irq(seqlock_t *sl) |
{ |
spin_lock_irq(&sl->lock); |
} |
static inline void read_sequnlock_excl_irq(seqlock_t *sl) |
{ |
spin_unlock_irq(&sl->lock); |
} |
static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) |
{ |
unsigned long flags; |
spin_lock_irqsave(&sl->lock, flags); |
return flags; |
} |
#define read_seqlock_excl_irqsave(lock, flags) \ |
do { flags = __read_seqlock_excl_irqsave(lock); } while (0) |
static inline void |
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) |
{ |
spin_unlock_irqrestore(&sl->lock, flags); |
} |
static inline unsigned long |
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) |
{ |
unsigned long flags = 0; |
if (!(*seq & 1)) /* Even */ |
*seq = read_seqbegin(lock); |
else /* Odd */ |
read_seqlock_excl_irqsave(lock, flags); |
return flags; |
} |
static inline void |
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) |
{ |
if (seq & 1) |
read_sequnlock_excl_irqrestore(lock, flags); |
} |
#endif /* __LINUX_SEQLOCK_H */ |
/drivers/include/linux/shmem_fs.h |
---|
1,8 → 1,9 |
#ifndef __SHMEM_FS_H |
#define __SHMEM_FS_H |
#include <kernel.h> |
#include <linux/file.h> |
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); |
struct page *shmem_read_mapping_page_gfp(struct file *filep, |
pgoff_t index, gfp_t gfp); |
/drivers/include/linux/slab.h |
---|
11,6 → 11,140 |
#ifndef _LINUX_SLAB_H |
#define _LINUX_SLAB_H |
#include <errno.h> |
// stub |
#include <linux/gfp.h> |
#include <linux/types.h> |
#include <linux/workqueue.h> |
/* |
* Flags to pass to kmem_cache_create(). |
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. |
*/ |
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ |
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ |
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ |
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ |
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ |
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ |
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ |
/* |
* SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! |
* |
* This delays freeing the SLAB page by a grace period, it does _NOT_ |
* delay object freeing. This means that if you do kmem_cache_free() |
* that memory location is free to be reused at any time. Thus it may |
* be possible to see another object there in the same RCU grace period. |
* |
* This feature only ensures the memory location backing the object |
* stays valid, the trick to using this is relying on an independent |
* object validation pass. Something like: |
* |
* rcu_read_lock() |
* again: |
* obj = lockless_lookup(key); |
* if (obj) { |
* if (!try_get_ref(obj)) // might fail for free objects |
* goto again; |
* |
* if (obj->key != key) { // not the object we expected |
* put_ref(obj); |
* goto again; |
* } |
* } |
* rcu_read_unlock(); |
* |
* This is useful if we need to approach a kernel structure obliquely, |
* from its address obtained without the usual locking. We can lock |
* the structure to stabilize it and check it's still at the given address, |
* only if we can be sure that the memory has not been meanwhile reused |
* for some other kind of object (which our subsystem's lock might corrupt). |
* |
* rcu_read_lock before reading the address, then rcu_read_unlock after |
* taking the spinlock within the structure expected at that address. |
*/ |
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ |
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
/* Flag to prevent checks on free */ |
#ifdef CONFIG_DEBUG_OBJECTS |
# define SLAB_DEBUG_OBJECTS 0x00400000UL |
#else |
# define SLAB_DEBUG_OBJECTS 0x00000000UL |
#endif |
#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ |
/* Don't track use of uninitialized memory */ |
#ifdef CONFIG_KMEMCHECK |
# define SLAB_NOTRACK 0x01000000UL |
#else |
# define SLAB_NOTRACK 0x00000000UL |
#endif |
#ifdef CONFIG_FAILSLAB |
# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ |
#else |
# define SLAB_FAILSLAB 0x00000000UL |
#endif |
/* The following flags affect the page allocator grouping pages by mobility */ |
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
/* |
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. |
* |
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. |
* |
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. |
* Both make kfree a no-op. |
*/ |
#define ZERO_SIZE_PTR ((void *)16) |
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
(unsigned long)ZERO_SIZE_PTR) |
void __init kmem_cache_init(void); |
int slab_is_available(void); |
void kmem_cache_destroy(struct kmem_cache *); |
int kmem_cache_shrink(struct kmem_cache *); |
void kmem_cache_free(struct kmem_cache *, void *); |
static inline void *krealloc(void *p, size_t new_size, gfp_t flags) |
{ |
return __builtin_realloc(p, new_size); |
} |
static inline void kfree(void *p) |
{ |
__builtin_free(p); |
} |
static __always_inline void *kmalloc(size_t size, gfp_t flags) |
{ |
return __builtin_malloc(size); |
} |
/** |
* kzalloc - allocate memory. The memory is set to zero. |
* @size: how many bytes of memory are required. |
* @flags: the type of memory to allocate (see kmalloc). |
*/ |
static inline void *kzalloc(size_t size, gfp_t flags) |
{ |
void *ret = __builtin_malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
static inline void *kcalloc(size_t n, size_t size, uint32_t flags) |
{ |
return (void*)kzalloc(n * size, 0); |
} |
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
{ |
// if (size != 0 && n > SIZE_MAX / size) |
// return NULL; |
return (void*)kmalloc(n * size, flags); |
} |
#endif /* _LINUX_SLAB_H */ |
/drivers/include/linux/spinlock.h |
---|
48,14 → 48,14 |
#include <linux/typecheck.h> |
//#include <linux/preempt.h> |
//#include <linux/linkage.h> |
#include <linux/linkage.h> |
#include <linux/compiler.h> |
//#include <linux/thread_info.h> |
#include <linux/kernel.h> |
#include <linux/stringify.h> |
//#include <linux/bottom_half.h> |
#include <asm/barrier.h> |
//#include <asm/system.h> |
/* |
* Must define these before including other files, inline functions need them |
/drivers/include/linux/stddef.h |
---|
1,8 → 1,9 |
#ifndef _LINUX_STDDEF_H |
#define _LINUX_STDDEF_H |
#include <linux/compiler.h> |
#include <uapi/linux/stddef.h> |
#undef NULL |
#define NULL ((void *)0) |
/drivers/include/linux/string.h |
---|
6,6 → 6,7 |
#include <linux/types.h> /* for size_t */ |
#include <linux/stddef.h> /* for NULL */ |
#include <stdarg.h> |
#include <uapi/linux/string.h> |
extern char *strndup_user(const char __user *, long); |
extern void *memdup_user(const void __user *, size_t); |
40,7 → 41,7 |
extern int strncmp(const char *,const char *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRNICMP |
extern int strnicmp(const char *, const char *, __kernel_size_t); |
#define strnicmp strncasecmp |
#endif |
#ifndef __HAVE_ARCH_STRCASECMP |
extern int strcasecmp(const char *s1, const char *s2); |
143,7 → 144,8 |
return strncmp(str, prefix, strlen(prefix)) == 0; |
} |
extern size_t memweight(const void *ptr, size_t bytes); |
size_t memweight(const void *ptr, size_t bytes); |
void memzero_explicit(void *s, size_t count); |
/** |
* kbasename - return the last part of a pathname. |
/drivers/include/linux/threads.h |
---|
0,0 → 1,45 |
#ifndef _LINUX_THREADS_H |
#define _LINUX_THREADS_H |
/* |
* The default limit for the nr of threads is now in |
* /proc/sys/kernel/threads-max. |
*/ |
/* |
* Maximum supported processors. Setting this smaller saves quite a |
* bit of memory. Use nr_cpu_ids instead of this except for static bitmaps. |
*/ |
#ifndef CONFIG_NR_CPUS |
/* FIXME: This should be fixed in the arch's Kconfig */ |
#define CONFIG_NR_CPUS 1 |
#endif |
/* Places which use this should consider cpumask_var_t. */ |
#define NR_CPUS CONFIG_NR_CPUS |
#define MIN_THREADS_LEFT_FOR_ROOT 4 |
/* |
* This controls the default maximum pid allocated to a process |
*/ |
#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000) |
/* |
* A maximum of 4 million PIDs should be enough for a while. |
* [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.] |
*/ |
#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ |
(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) |
/* |
* Define a minimum number of pids per cpu. Heuristically based |
* on original pid max of 32k for 32 cpus. Also, increase the |
* minimum settable value for pid_max on the running system based |
* on similar defaults. See kernel/pid.c:pidmap_init() for details. |
*/ |
#define PIDS_PER_CPU_DEFAULT 1024 |
#define PIDS_PER_CPU_MIN 8 |
#endif |
/drivers/include/linux/time.h |
---|
1,22 → 1,13 |
#ifndef _LINUX_TIME_H |
#define _LINUX_TIME_H |
//# include <linux/cache.h> |
//# include <linux/seqlock.h> |
# include <linux/cache.h> |
# include <linux/seqlock.h> |
# include <linux/math64.h> |
//#include <uapi/linux/time.h> |
# include <linux/time64.h> |
extern struct timezone sys_tz; |
/* Parameters used to convert the timespec values: */ |
#define MSEC_PER_SEC 1000L |
#define USEC_PER_MSEC 1000L |
#define NSEC_PER_USEC 1000L |
#define NSEC_PER_MSEC 1000000L |
#define USEC_PER_SEC 1000000L |
#define NSEC_PER_SEC 1000000000L |
#define FSEC_PER_SEC 1000000000000000LL |
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) |
static inline int timespec_equal(const struct timespec *a, |
48,10 → 39,21 |
return lhs->tv_usec - rhs->tv_usec; |
} |
extern unsigned long mktime(const unsigned int year, const unsigned int mon, |
extern time64_t mktime64(const unsigned int year, const unsigned int mon, |
const unsigned int day, const unsigned int hour, |
const unsigned int min, const unsigned int sec); |
/** |
* Deprecated. Use mktime64(). |
*/ |
static inline unsigned long mktime(const unsigned int year, |
const unsigned int mon, const unsigned int day, |
const unsigned int hour, const unsigned int min, |
const unsigned int sec) |
{ |
return mktime64(year, mon, day, hour, min, sec); |
} |
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); |
/* |
84,13 → 86,6 |
return ts_delta; |
} |
#define KTIME_MAX ((s64)~((u64)1 << 63)) |
#if (BITS_PER_LONG == 64) |
# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
#else |
# define KTIME_SEC_MAX LONG_MAX |
#endif |
/* |
* Returns true if the timespec is norm, false if denorm: |
*/ |
115,28 → 110,8 |
return true; |
} |
extern bool persistent_clock_exist; |
extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
static inline bool has_persistent_clock(void) |
{ |
return persistent_clock_exist; |
} |
extern void read_persistent_clock(struct timespec *ts); |
extern void read_boot_clock(struct timespec *ts); |
extern int persistent_clock_is_local; |
extern int update_persistent_clock(struct timespec now); |
void timekeeping_init(void); |
extern int timekeeping_suspended; |
unsigned long get_seconds(void); |
struct timespec current_kernel_time(void); |
struct timespec __current_kernel_time(void); /* does not take xtime_lock */ |
struct timespec get_monotonic_coarse(void); |
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
struct timespec *wtom, struct timespec *sleep); |
void timekeeping_inject_sleeptime(struct timespec *delta); |
#define CURRENT_TIME (current_kernel_time()) |
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
153,34 → 128,15 |
extern u32 (*arch_gettimeoffset)(void); |
#endif |
extern void do_gettimeofday(struct timeval *tv); |
extern int do_settimeofday(const struct timespec *tv); |
extern int do_sys_settimeofday(const struct timespec *tv, |
const struct timezone *tz); |
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) |
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
struct itimerval; |
extern int do_setitimer(int which, struct itimerval *value, |
struct itimerval *ovalue); |
extern unsigned int alarm_setitimer(unsigned int seconds); |
extern int do_getitimer(int which, struct itimerval *value); |
extern int __getnstimeofday(struct timespec *tv); |
extern void getnstimeofday(struct timespec *tv); |
extern void getrawmonotonic(struct timespec *ts); |
extern void getnstime_raw_and_real(struct timespec *ts_raw, |
struct timespec *ts_real); |
extern void getboottime(struct timespec *ts); |
extern void monotonic_to_bootbased(struct timespec *ts); |
extern void get_monotonic_boottime(struct timespec *ts); |
extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
extern int timekeeping_valid_for_hres(void); |
extern u64 timekeeping_max_deferment(void); |
extern int timekeeping_inject_offset(struct timespec *ts); |
extern s32 timekeeping_get_tai_offset(void); |
extern void timekeeping_set_tai_offset(s32 tai_offset); |
extern void timekeeping_clocktai(struct timespec *ts); |
extern unsigned int alarm_setitimer(unsigned int seconds); |
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
struct tms; |
extern void do_sys_times(struct tms *); |
/drivers/include/linux/time64.h |
---|
0,0 → 1,190 |
#ifndef _LINUX_TIME64_H |
#define _LINUX_TIME64_H |
#include <uapi/linux/time.h> |
typedef __s64 time64_t; |
/* |
* This wants to go into uapi/linux/time.h once we agreed about the |
* userspace interfaces. |
*/ |
#if __BITS_PER_LONG == 64 |
# define timespec64 timespec |
#else |
struct timespec64 { |
time64_t tv_sec; /* seconds */ |
long tv_nsec; /* nanoseconds */ |
}; |
#endif |
/* Parameters used to convert the timespec values: */ |
#define MSEC_PER_SEC 1000L |
#define USEC_PER_MSEC 1000L |
#define NSEC_PER_USEC 1000L |
#define NSEC_PER_MSEC 1000000L |
#define USEC_PER_SEC 1000000L |
#define NSEC_PER_SEC 1000000000L |
#define FSEC_PER_SEC 1000000000000000LL |
/* Located here for timespec[64]_valid_strict */ |
#define KTIME_MAX ((s64)~((u64)1 << 63)) |
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
#if __BITS_PER_LONG == 64 |
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) |
{ |
return ts64; |
} |
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) |
{ |
return ts; |
} |
# define timespec64_equal timespec_equal |
# define timespec64_compare timespec_compare |
# define set_normalized_timespec64 set_normalized_timespec |
# define timespec64_add_safe timespec_add_safe |
# define timespec64_add timespec_add |
# define timespec64_sub timespec_sub |
# define timespec64_valid timespec_valid |
# define timespec64_valid_strict timespec_valid_strict |
# define timespec64_to_ns timespec_to_ns |
# define ns_to_timespec64 ns_to_timespec |
# define timespec64_add_ns timespec_add_ns |
#else |
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) |
{ |
struct timespec ret; |
ret.tv_sec = (time_t)ts64.tv_sec; |
ret.tv_nsec = ts64.tv_nsec; |
return ret; |
} |
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) |
{ |
struct timespec64 ret; |
ret.tv_sec = ts.tv_sec; |
ret.tv_nsec = ts.tv_nsec; |
return ret; |
} |
static inline int timespec64_equal(const struct timespec64 *a, |
const struct timespec64 *b) |
{ |
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); |
} |
/* |
* lhs < rhs: return <0 |
* lhs == rhs: return 0 |
* lhs > rhs: return >0 |
*/ |
static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) |
{ |
if (lhs->tv_sec < rhs->tv_sec) |
return -1; |
if (lhs->tv_sec > rhs->tv_sec) |
return 1; |
return lhs->tv_nsec - rhs->tv_nsec; |
} |
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); |
/* |
* timespec64_add_safe assumes both values are positive and checks for |
* overflow. It will return TIME_T_MAX if the returned value would be |
* smaller then either of the arguments. |
*/ |
extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, |
const struct timespec64 rhs); |
static inline struct timespec64 timespec64_add(struct timespec64 lhs, |
struct timespec64 rhs) |
{ |
struct timespec64 ts_delta; |
set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, |
lhs.tv_nsec + rhs.tv_nsec); |
return ts_delta; |
} |
/* |
* sub = lhs - rhs, in normalized form |
*/ |
static inline struct timespec64 timespec64_sub(struct timespec64 lhs, |
struct timespec64 rhs) |
{ |
struct timespec64 ts_delta; |
set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, |
lhs.tv_nsec - rhs.tv_nsec); |
return ts_delta; |
} |
/* |
* Returns true if the timespec64 is norm, false if denorm: |
*/ |
static inline bool timespec64_valid(const struct timespec64 *ts) |
{ |
/* Dates before 1970 are bogus */ |
if (ts->tv_sec < 0) |
return false; |
/* Can't have more nanoseconds then a second */ |
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
return false; |
return true; |
} |
static inline bool timespec64_valid_strict(const struct timespec64 *ts) |
{ |
if (!timespec64_valid(ts)) |
return false; |
/* Disallow values that could overflow ktime_t */ |
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) |
return false; |
return true; |
} |
/** |
* timespec64_to_ns - Convert timespec64 to nanoseconds |
* @ts: pointer to the timespec64 variable to be converted |
* |
* Returns the scalar nanosecond representation of the timespec64 |
* parameter. |
*/ |
static inline s64 timespec64_to_ns(const struct timespec64 *ts) |
{ |
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; |
} |
/** |
* ns_to_timespec64 - Convert nanoseconds to timespec64 |
* @nsec: the nanoseconds value to be converted |
* |
* Returns the timespec64 representation of the nsec parameter. |
*/ |
extern struct timespec64 ns_to_timespec64(const s64 nsec); |
/** |
* timespec64_add_ns - Adds nanoseconds to a timespec64 |
* @a: pointer to timespec64 to be incremented |
* @ns: unsigned nanoseconds value to be added |
* |
* This must always be inlined because its used from the x86-64 vdso, |
* which cannot call other kernel functions. |
*/ |
static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) |
{ |
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); |
a->tv_nsec = ns; |
} |
#endif |
#endif /* _LINUX_TIME64_H */ |
/drivers/include/linux/types.h |
---|
1,23 → 1,14 |
#ifndef _LINUX_TYPES_H |
#define _LINUX_TYPES_H |
#include <asm/types.h> |
#define __EXPORTED_HEADERS__ |
#include <uapi/linux/types.h> |
#ifndef __ASSEMBLY__ |
#ifdef __KERNEL__ |
#define DECLARE_BITMAP(name,bits) \ |
unsigned long name[BITS_TO_LONGS(bits)] |
#else |
#ifndef __EXPORTED_HEADERS__ |
#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" |
#endif /* __EXPORTED_HEADERS__ */ |
#endif |
#include <linux/posix_types.h> |
#ifdef __KERNEL__ |
typedef __u32 __kernel_dev_t; |
typedef __kernel_fd_set fd_set; |
158,48 → 149,12 |
typedef u32 dma_addr_t; |
#endif /* dma_addr_t */ |
#endif /* __KERNEL__ */ |
/* |
* Below are truly Linux-specific types that should never collide with |
* any application/library that wants linux/types.h. |
*/ |
#ifdef __CHECKER__ |
#define __bitwise__ __attribute__((bitwise)) |
#else |
#define __bitwise__ |
#endif |
#ifdef __CHECK_ENDIAN__ |
#define __bitwise __bitwise__ |
#else |
#define __bitwise |
#endif |
typedef __u16 __bitwise __le16; |
typedef __u16 __bitwise __be16; |
typedef __u32 __bitwise __le32; |
typedef __u32 __bitwise __be32; |
typedef __u64 __bitwise __le64; |
typedef __u64 __bitwise __be64; |
typedef __u16 __bitwise __sum16; |
typedef __u32 __bitwise __wsum; |
/* |
* aligned_u64 should be used in defining kernel<->userspace ABIs to avoid |
* common 32/64-bit compat problems. |
* 64-bit values align to 4-byte boundaries on x86_32 (and possibly other |
* architectures) and to 8-byte boundaries on 64-bit architetures. The new |
* aligned_64 type enforces 8-byte alignment so that structs containing |
* aligned_64 values have the same alignment on 32-bit and 64-bit architectures. |
* No conversions are necessary between 32-bit user-space and a 64-bit kernel. |
*/ |
#define __aligned_u64 __u64 __attribute__((aligned(8))) |
#define __aligned_be64 __be64 __attribute__((aligned(8))) |
#define __aligned_le64 __le64 __attribute__((aligned(8))) |
#ifdef __KERNEL__ |
typedef unsigned __bitwise__ gfp_t; |
typedef unsigned __bitwise__ fmode_t; |
typedef unsigned __bitwise__ oom_flags_t; |
247,111 → 202,6 |
char f_fpack[6]; |
}; |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
typedef unsigned char u8_t; |
typedef unsigned short u16_t; |
typedef unsigned long u32_t; |
typedef unsigned long long u64_t; |
typedef unsigned int addr_t; |
typedef unsigned int count_t; |
#define false 0 |
#define true 1 |
#define likely(x) __builtin_expect(!!(x), 1) |
#define unlikely(x) __builtin_expect(!!(x), 0) |
#define BITS_PER_LONG 32 |
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) |
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
#define MTRR_TYPE_UNCACHABLE 0 |
#define MTRR_TYPE_WRCOMB 1 |
#define MTRR_TYPE_WRTHROUGH 4 |
#define MTRR_TYPE_WRPROT 5 |
#define MTRR_TYPE_WRBACK 6 |
#define MTRR_NUM_TYPES 7 |
int dbgprintf(const char* format, ...); |
#define GFP_KERNEL 0 |
#define GFP_ATOMIC 0 |
//#include <stdio.h> |
int snprintf(char *str, size_t size, const char *format, ...); |
//#include <string.h> |
void* memcpy(void *s1, const void *s2, size_t n); |
void* memset(void *s, int c, size_t n); |
size_t strlen(const char *s); |
char *strcpy(char *s1, const char *s2); |
char *strncpy (char *dst, const char *src, size_t len); |
void *malloc(size_t size); |
void* realloc(void* oldmem, size_t bytes); |
#define kfree free |
static inline void *krealloc(void *p, size_t new_size, gfp_t flags) |
{ |
return realloc(p, new_size); |
} |
static inline void *kzalloc(size_t size, uint32_t flags) |
{ |
void *ret = malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
#define kmalloc(s,f) kzalloc((s), (f)) |
struct drm_file; |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (1UL << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__) |
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) |
struct timeval |
{ |
__kernel_time_t tv_sec; /* seconds */ |
__kernel_suseconds_t tv_usec; /* microseconds */ |
}; |
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
#ifndef __read_mostly |
#define __read_mostly |
#endif |
/** |
* struct callback_head - callback structure for use with RCU and task_work |
* @next: next update requests in a list |
363,4 → 213,5 |
}; |
#define rcu_head callback_head |
#endif /* __ASSEMBLY__ */ |
#endif /* _LINUX_TYPES_H */ |
/drivers/include/linux/uuid.h |
---|
0,0 → 1,58 |
/* |
* UUID/GUID definition |
* |
* Copyright (C) 2010, Intel Corp. |
* Huang Ying <ying.huang@intel.com> |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License version |
* 2 as published by the Free Software Foundation; |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
*/ |
#ifndef _UAPI_LINUX_UUID_H_ |
#define _UAPI_LINUX_UUID_H_ |
#include <linux/types.h> |
#include <linux/string.h> |
typedef struct { |
__u8 b[16]; |
} uuid_le; |
typedef struct { |
__u8 b[16]; |
} uuid_be; |
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ |
((uuid_le) \ |
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ |
(b) & 0xff, ((b) >> 8) & 0xff, \ |
(c) & 0xff, ((c) >> 8) & 0xff, \ |
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) |
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ |
((uuid_be) \ |
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ |
((b) >> 8) & 0xff, (b) & 0xff, \ |
((c) >> 8) & 0xff, (c) & 0xff, \ |
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) |
#define NULL_UUID_LE \ |
UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ |
0x00, 0x00, 0x00, 0x00) |
#define NULL_UUID_BE \ |
UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ |
0x00, 0x00, 0x00, 0x00) |
#endif /* _UAPI_LINUX_UUID_H_ */ |
/drivers/include/linux/vgaarb.h |
---|
0,0 → 1,249 |
/* |
* The VGA aribiter manages VGA space routing and VGA resource decode to |
* allow multiple VGA devices to be used in a system in a safe way. |
* |
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> |
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> |
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS |
* IN THE SOFTWARE. |
* |
*/ |
#ifndef LINUX_VGA_H |
#define LINUX_VGA_H |
//#include <video/vga.h> |
/* Legacy VGA regions */ |
#define VGA_RSRC_NONE 0x00 |
#define VGA_RSRC_LEGACY_IO 0x01 |
#define VGA_RSRC_LEGACY_MEM 0x02 |
#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM) |
/* Non-legacy access */ |
#define VGA_RSRC_NORMAL_IO 0x04 |
#define VGA_RSRC_NORMAL_MEM 0x08 |
/* Passing that instead of a pci_dev to use the system "default" |
* device, that is the one used by vgacon. Archs will probably |
* have to provide their own vga_default_device(); |
*/ |
#define VGA_DEFAULT_DEVICE (NULL) |
struct pci_dev; |
/* For use by clients */ |
/** |
* vga_set_legacy_decoding |
* |
* @pdev: pci device of the VGA card |
* @decodes: bit mask of what legacy regions the card decodes |
* |
* Indicates to the arbiter if the card decodes legacy VGA IOs, |
* legacy VGA Memory, both, or none. All cards default to both, |
* the card driver (fbdev for example) should tell the arbiter |
* if it has disabled legacy decoding, so the card can be left |
* out of the arbitration process (and can be safe to take |
* interrupts at any time. |
*/ |
extern void vga_set_legacy_decoding(struct pci_dev *pdev, |
unsigned int decodes); |
/** |
* vga_get - acquire & locks VGA resources |
* |
* @pdev: pci device of the VGA card or NULL for the system default |
* @rsrc: bit mask of resources to acquire and lock |
* @interruptible: blocking should be interruptible by signals ? |
* |
* This function acquires VGA resources for the given |
* card and mark those resources locked. If the resource requested |
* are "normal" (and not legacy) resources, the arbiter will first check |
* whether the card is doing legacy decoding for that type of resource. If |
* yes, the lock is "converted" into a legacy resource lock. |
* The arbiter will first look for all VGA cards that might conflict |
* and disable their IOs and/or Memory access, including VGA forwarding |
* on P2P bridges if necessary, so that the requested resources can |
* be used. Then, the card is marked as locking these resources and |
* the IO and/or Memory accesse are enabled on the card (including |
* VGA forwarding on parent P2P bridges if any). |
* This function will block if some conflicting card is already locking |
* one of the required resources (or any resource on a different bus |
* segment, since P2P bridges don't differenciate VGA memory and IO |
* afaik). You can indicate whether this blocking should be interruptible |
* by a signal (for userland interface) or not. |
* Must not be called at interrupt time or in atomic context. |
* If the card already owns the resources, the function succeeds. |
* Nested calls are supported (a per-resource counter is maintained) |
*/ |
#if defined(CONFIG_VGA_ARB) |
extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); |
#else |
static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; } |
#endif |
/** |
* vga_get_interruptible |
* |
* Shortcut to vga_get |
*/ |
static inline int vga_get_interruptible(struct pci_dev *pdev, |
unsigned int rsrc) |
{ |
return vga_get(pdev, rsrc, 1); |
} |
/** |
* vga_get_uninterruptible |
* |
* Shortcut to vga_get |
*/ |
static inline int vga_get_uninterruptible(struct pci_dev *pdev, |
unsigned int rsrc) |
{ |
return vga_get(pdev, rsrc, 0); |
} |
/** |
* vga_tryget - try to acquire & lock legacy VGA resources |
* |
* @pdev: pci devivce of VGA card or NULL for system default |
* @rsrc: bit mask of resources to acquire and lock |
* |
* This function performs the same operation as vga_get(), but |
* will return an error (-EBUSY) instead of blocking if the resources |
* are already locked by another card. It can be called in any context |
*/ |
#if defined(CONFIG_VGA_ARB) |
extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); |
#else |
static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } |
#endif |
/** |
* vga_put - release lock on legacy VGA resources |
* |
* @pdev: pci device of VGA card or NULL for system default |
* @rsrc: but mask of resource to release |
* |
* This function releases resources previously locked by vga_get() |
* or vga_tryget(). The resources aren't disabled right away, so |
* that a subsequence vga_get() on the same card will succeed |
* immediately. Resources have a counter, so locks are only |
* released if the counter reaches 0. |
*/ |
#if defined(CONFIG_VGA_ARB) |
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); |
#else |
#define vga_put(pdev, rsrc) |
#endif |
/** |
* vga_default_device |
* |
* This can be defined by the platform. The default implementation |
* is rather dumb and will probably only work properly on single |
* vga card setups and/or x86 platforms. |
* |
* If your VGA default device is not PCI, you'll have to return |
* NULL here. In this case, I assume it will not conflict with |
* any PCI card. If this is not true, I'll have to define two archs |
* hooks for enabling/disabling the VGA default device if that is |
* possible. This may be a problem with real _ISA_ VGA cards, in |
* addition to a PCI one. I don't know at this point how to deal |
* with that card. Can theirs IOs be disabled at all ? If not, then |
* I suppose it's a matter of having the proper arch hook telling |
* us about it, so we basically never allow anybody to succeed a |
* vga_get()... |
*/ |
#ifdef CONFIG_VGA_ARB |
extern struct pci_dev *vga_default_device(void); |
extern void vga_set_default_device(struct pci_dev *pdev); |
#else |
static inline struct pci_dev *vga_default_device(void) { return NULL; }; |
static inline void vga_set_default_device(struct pci_dev *pdev) { }; |
#endif |
/** |
* vga_conflicts |
* |
* Architectures should define this if they have several |
* independent PCI domains that can afford concurrent VGA |
* decoding |
*/ |
#ifndef __ARCH_HAS_VGA_CONFLICT |
static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) |
{ |
return 1; |
} |
#endif |
/** |
* vga_client_register |
* |
* @pdev: pci device of the VGA client |
* @cookie: client cookie to be used in callbacks |
* @irq_set_state: irq state change callback |
* @set_vga_decode: vga decode change callback |
* |
* return value: 0 on success, -1 on failure |
* Register a client with the VGA arbitration logic |
* |
* Clients have two callback mechanisms they can use. |
* irq enable/disable callback - |
* If a client can't disable its GPUs VGA resources, then we |
* need to be able to ask it to turn off its irqs when we |
* turn off its mem and io decoding. |
* set_vga_decode |
* If a client can disable its GPU VGA resource, it will |
* get a callback from this to set the encode/decode state |
* |
* Rationale: we cannot disable VGA decode resources unconditionally |
* some single GPU laptops seem to require ACPI or BIOS access to the |
* VGA registers to control things like backlights etc. |
* Hopefully newer multi-GPU laptops do something saner, and desktops |
* won't have any special ACPI for this. |
* They driver will get a callback when VGA arbitration is first used |
* by userspace since we some older X servers have issues. |
*/ |
#if defined(CONFIG_VGA_ARB) |
int vga_client_register(struct pci_dev *pdev, void *cookie, |
void (*irq_set_state)(void *cookie, bool state), |
unsigned int (*set_vga_decode)(void *cookie, bool state)); |
#else |
static inline int vga_client_register(struct pci_dev *pdev, void *cookie, |
void (*irq_set_state)(void *cookie, bool state), |
unsigned int (*set_vga_decode)(void *cookie, bool state)) |
{ |
return 0; |
} |
#endif |
#endif /* LINUX_VGA_H */ |
/drivers/include/linux/wait.h |
---|
1,8 → 1,15 |
#ifndef _LINUX_WAIT_H |
#define _LINUX_WAIT_H |
/* |
* Linux wait queue related types and methods |
*/ |
#include <linux/list.h> |
#include <linux/stddef.h> |
#include <linux/spinlock.h> |
#include <asm/current.h> |
#include <linux/list.h> |
#include <syscall.h> |
typedef struct __wait_queue wait_queue_t; |
28,6 → 35,10 |
return !list_empty(&q->task_list); |
} |
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); |
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) |
{ |
list_add(&new->task_list, &head->task_list); |
145,10 → 156,10 |
}; |
struct completion { |
unsigned int done; |
wait_queue_head_t wait; |
}; |
//struct completion { |
// unsigned int done; |
// wait_queue_head_t wait; |
//}; |
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
/drivers/include/linux/workqueue.h |
---|
1,11 → 1,21 |
/* |
* workqueue.h --- work queue handling for Linux. |
*/ |
#ifndef _LINUX_WORKQUEUE_H |
#define _LINUX_WORKQUEUE_H |
#include <linux/list.h> |
#include <linux/linkage.h> |
#include <linux/lockdep.h> |
#include <linux/threads.h> |
#include <syscall.h> |
struct workqueue_struct; |
struct work_struct; |
typedef void (*work_func_t)(struct work_struct *work); |
void __stdcall delayed_work_timer_fn(unsigned long __data); |
/* |
* Workqueue flags and constants. For details, please refer to |
38,6 → 48,9 |
struct list_head entry; |
struct workqueue_struct *data; |
work_func_t func; |
#ifdef CONFIG_LOCKDEP |
struct lockdep_map lockdep_map; |
#endif |
}; |
struct delayed_work { |
/drivers/include/linux/ww_mutex.h |
---|
17,8 → 17,6 |
#include <linux/mutex.h> |
#include <syscall.h> |
#define current (void*)GetPid() |
struct ww_class { |
atomic_long_t stamp; |
struct lock_class_key acquire_key; |
/drivers/include/syscall.h |
---|
4,6 → 4,9 |
#ifndef __SYSCALL_H__ |
#define __SYSCALL_H__ |
typedef u32 addr_t; |
typedef u32 count_t; |
/////////////////////////////////////////////////////////////////////////////// |
#define STDCALL __attribute__ ((stdcall)) __attribute__ ((dllimport)) |
25,7 → 28,7 |
void* STDCALL AllocKernelSpace(size_t size)__asm__("AllocKernelSpace"); |
void STDCALL FreeKernelSpace(void *mem)__asm__("FreeKernelSpace"); |
addr_t STDCALL MapIoMem(addr_t base, size_t size, u32_t flags)__asm__("MapIoMem"); |
addr_t STDCALL MapIoMem(addr_t base, size_t size, u32 flags)__asm__("MapIoMem"); |
void* STDCALL KernelAlloc(size_t size)__asm__("KernelAlloc"); |
void* STDCALL KernelFree(void *mem)__asm__("KernelFree"); |
void* STDCALL UserAlloc(size_t size)__asm__("UserAlloc"); |
33,20 → 36,20 |
void* STDCALL GetDisplay(void)__asm__("GetDisplay"); |
u32_t IMPORT GetTimerTicks(void)__asm__("GetTimerTicks"); |
u32 IMPORT GetTimerTicks(void)__asm__("GetTimerTicks"); |
addr_t STDCALL AllocPage(void)__asm__("AllocPage"); |
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages"); |
void IMPORT __attribute__((regparm(1))) |
FreePage(addr_t page)__asm__("FreePage"); |
void STDCALL MapPage(void *vaddr, addr_t paddr, u32_t flags)__asm__("MapPage"); |
void STDCALL MapPage(void *vaddr, addr_t paddr, u32 flags)__asm__("MapPage"); |
void* STDCALL CreateRingBuffer(size_t size, u32_t map)__asm__("CreateRingBuffer"); |
void* STDCALL CreateRingBuffer(size_t size, u32 map)__asm__("CreateRingBuffer"); |
u32_t STDCALL RegService(char *name, srv_proc_t proc)__asm__("RegService"); |
u32 STDCALL RegService(char *name, srv_proc_t proc)__asm__("RegService"); |
int STDCALL AttachIntHandler(int irq, void *handler, u32_t access) __asm__("AttachIntHandler"); |
int STDCALL AttachIntHandler(int irq, void *handler, u32 access) __asm__("AttachIntHandler"); |
void FASTCALL MutexInit(struct mutex*)__asm__("MutexInit"); |
void FASTCALL MutexLock(struct mutex*)__asm__("MutexLock"); |
53,7 → 56,7 |
void FASTCALL MutexUnlock(struct mutex*)__asm__("MutexUnlock"); |
addr_t IMPORT GetStackBase(void)__asm__("GetStackBase"); |
u32_t IMPORT GetPid(void)__asm__("GetPid"); |
u32 IMPORT GetPid(void)__asm__("GetPid"); |
u32 STDCALL TimerHS(u32 delay, u32 interval, |
void *fn, void *data)asm("TimerHS"); |
67,16 → 70,16 |
void STDCALL SetMouseData(int btn, int x, int y, |
int z, int h)__asm__("SetMouseData"); |
void FASTCALL SetKeyboardData(u32_t data)__asm__("SetKeyboardData"); |
void FASTCALL SetKeyboardData(u32 data)__asm__("SetKeyboardData"); |
u8_t STDCALL PciRead8 (u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead8"); |
u16_t STDCALL PciRead16(u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead16"); |
u32_t STDCALL PciRead32(u32_t bus, u32_t devfn, u32_t reg)__asm__("PciRead32"); |
u8 STDCALL PciRead8 (u32 bus, u32 devfn, u32 reg)__asm__("PciRead8"); |
u16 STDCALL PciRead16(u32 bus, u32 devfn, u32 reg)__asm__("PciRead16"); |
u32 STDCALL PciRead32(u32 bus, u32 devfn, u32 reg)__asm__("PciRead32"); |
u32_t STDCALL PciWrite8 (u32_t bus, u32_t devfn, u32_t reg,u8_t val) __asm__("PciWrite8"); |
u32_t STDCALL PciWrite16(u32_t bus, u32_t devfn, u32_t reg,u16_t val)__asm__("PciWrite16"); |
u32_t STDCALL PciWrite32(u32_t bus, u32_t devfn, u32_t reg,u32_t val)__asm__("PciWrite32"); |
u32 STDCALL PciWrite8 (u32 bus, u32 devfn, u32 reg,u8 val) __asm__("PciWrite8"); |
u32 STDCALL PciWrite16(u32 bus, u32 devfn, u32 reg,u16 val)__asm__("PciWrite16"); |
u32 STDCALL PciWrite32(u32 bus, u32 devfn, u32 reg,u32 val)__asm__("PciWrite32"); |
#define pciReadByte(tag, reg) \ |
PciRead8(PCI_BUS_FROM_TAG(tag),PCI_DFN_FROM_TAG(tag),(reg)) |
158,7 → 161,7 |
}; |
static inline evhandle_t CreateEvent(kevent_t *ev, u32_t flags) |
static inline evhandle_t CreateEvent(kevent_t *ev, u32 flags) |
{ |
evhandle_t evh; |
172,7 → 175,7 |
return evh; |
}; |
static inline void RaiseEvent(evhandle_t evh, u32_t flags, kevent_t *ev) |
static inline void RaiseEvent(evhandle_t evh, u32 flags, kevent_t *ev) |
{ |
__asm__ __volatile__ ( |
"call *__imp__RaiseEvent" |
209,9 → 212,9 |
__asm__ __volatile__ ("":::"ebx","ecx","edx","esi","edi"); |
}; |
static inline u32_t GetEvent(kevent_t *ev) |
static inline u32 GetEvent(kevent_t *ev) |
{ |
u32_t handle; |
u32 handle; |
__asm__ __volatile__ ( |
"call *__imp__GetEvent" |
253,9 → 256,9 |
return retval; |
} |
static inline u32_t GetPgAddr(void *mem) |
static inline u32 GetPgAddr(void *mem) |
{ |
u32_t retval; |
u32 retval; |
__asm__ __volatile__ ( |
"call *__imp__GetPgAddr \n\t" |
264,7 → 267,7 |
return retval; |
}; |
static inline void CommitPages(void *mem, u32_t page, u32_t size) |
static inline void CommitPages(void *mem, u32 page, u32 size) |
{ |
size = (size+4095) & ~4095; |
__asm__ __volatile__ ( |
284,7 → 287,7 |
__asm__ __volatile__ ("":::"eax","ecx"); |
}; |
static inline void usleep(u32_t delay) |
static inline void usleep(u32 delay) |
{ |
if( !delay ) |
delay++; |
297,7 → 300,7 |
:::"eax","ebx","ecx","edx"); |
}; |
static inline void udelay(u32_t delay) |
static inline void udelay1(u32 delay) |
{ |
if(!delay) delay++; |
delay*= 100; |
311,7 → 314,7 |
} |
} |
static inline void msleep(unsigned int msecs) |
static inline void msleep1(unsigned int msecs) |
{ |
msecs /= 10; |
if(!msecs) msecs = 1; |
324,7 → 327,7 |
}; |
static inline void mdelay(u32_t time) |
static inline void mdelay1(u32 time) |
{ |
time /= 10; |
if(!time) time = 1; |
337,9 → 340,9 |
}; |
static inline u32_t __PciApi(int cmd) |
static inline u32 __PciApi(int cmd) |
{ |
u32_t retval; |
u32 retval; |
__asm__ __volatile__ ( |
"call *__imp__PciApi \n\t" |
351,7 → 354,7 |
return retval; |
}; |
static inline void* __CreateObject(u32_t pid, size_t size) |
static inline void* __CreateObject(u32 pid, size_t size) |
{ |
void *retval; |
374,9 → 377,9 |
:::"eax","ebx","ecx","edx","esi","edi","cc","memory"); |
} |
static inline u32_t GetService(const char *name) |
static inline u32 GetService(const char *name) |
{ |
u32_t handle; |
u32 handle; |
__asm__ __volatile__ |
( |
389,9 → 392,9 |
return handle; |
}; |
static inline u32_t safe_cli(void) |
static inline u32 safe_cli(void) |
{ |
u32_t ifl; |
u32 ifl; |
__asm__ __volatile__ ( |
"pushf\n\t" |
"popl %0\n\t" |
400,15 → 403,15 |
return ifl; |
} |
static inline void safe_sti(u32_t efl) |
static inline void safe_sti(u32 efl) |
{ |
if (efl & (1<<9)) |
__asm__ __volatile__ ("sti"); |
} |
static inline u32_t get_eflags(void) |
static inline u32 get_eflags(void) |
{ |
u32_t val; |
u32 val; |
asm volatile ( |
"pushfl\n\t" |
"popl %0\n" |
418,7 → 421,7 |
static inline void __clear (void * dst, unsigned len) |
{ |
u32_t tmp; |
u32 tmp; |
__asm__ __volatile__ ( |
"cld \n\t" |
"rep stosb \n" |
427,43 → 430,43 |
__asm__ __volatile__ ("":::"ecx","edi"); |
}; |
static inline void out8(const u16_t port, const u8_t val) |
static inline void out8(const u16 port, const u8 val) |
{ |
__asm__ __volatile__ |
("outb %1, %0\n" : : "dN"(port), "a"(val)); |
} |
static inline void out16(const u16_t port, const u16_t val) |
static inline void out16(const u16 port, const u16 val) |
{ |
__asm__ __volatile__ |
("outw %1, %0\n" : : "dN"(port), "a"(val)); |
} |
static inline void out32(const u16_t port, const u32_t val) |
static inline void out32(const u16 port, const u32 val) |
{ |
__asm__ __volatile__ |
("outl %1, %0\n" : : "dN"(port), "a"(val)); |
} |
static inline u8_t in8(const u16_t port) |
static inline u8 in8(const u16 port) |
{ |
u8_t tmp; |
u8 tmp; |
__asm__ __volatile__ |
("inb %1, %0\n" : "=a"(tmp) : "dN"(port)); |
return tmp; |
}; |
static inline u16_t in16(const u16_t port) |
static inline u16 in16(const u16 port) |
{ |
u16_t tmp; |
u16 tmp; |
__asm__ __volatile__ |
("inw %1, %0\n" : "=a"(tmp) : "dN"(port)); |
return tmp; |
}; |
static inline u32_t in32(const u16_t port) |
static inline u32 in32(const u16 port) |
{ |
u32_t tmp; |
u32 tmp; |
__asm__ __volatile__ |
("inl %1, %0\n" : "=a"(tmp) : "dN"(port)); |
return tmp; |
499,12 → 502,12 |
int drm_order(unsigned long size); |
static inline void __iomem *ioremap(uint32_t offset, size_t size) |
static inline void __iomem *ioremap(u32 offset, size_t size) |
{ |
return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100); |
} |
static inline void __iomem *ioremap_wc(uint32_t offset, size_t size) |
static inline void __iomem *ioremap_wc(u32 offset, size_t size) |
{ |
return (void __iomem*) MapIoMem(offset, size, PG_SW|0x100); |
} |
/drivers/include/uapi/asm/e820.h |
---|
0,0 → 1,70 |
#ifndef _UAPI_ASM_X86_E820_H |
#define _UAPI_ASM_X86_E820_H |
#define E820MAP 0x2d0 /* our map */ |
#define E820MAX 128 /* number of entries in E820MAP */ |
/* |
* Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the |
* constrained space in the zeropage. If we have more nodes than |
* that, and if we've booted off EFI firmware, then the EFI tables |
* passed us from the EFI firmware can list more nodes. Size our |
* internal memory map tables to have room for these additional |
* nodes, based on up to three entries per node for which the |
* kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT), |
* plus E820MAX, allowing space for the possible duplicate E820 |
* entries that might need room in the same arrays, prior to the |
* call to sanitize_e820_map() to remove duplicates. The allowance |
* of three memory map entries per node is "enough" entries for |
* the initial hardware platform motivating this mechanism to make |
* use of additional EFI map entries. Future platforms may want |
* to allow more than three entries per node or otherwise refine |
* this size. |
*/ |
#ifndef __KERNEL__ |
#define E820_X_MAX E820MAX |
#endif |
#define E820NR 0x1e8 /* # entries in E820MAP */ |
#define E820_RAM 1 |
#define E820_RESERVED 2 |
#define E820_ACPI 3 |
#define E820_NVS 4 |
#define E820_UNUSABLE 5 |
/* |
* reserved RAM used by kernel itself |
* if CONFIG_INTEL_TXT is enabled, memory of this type will be |
* included in the S3 integrity calculation and so should not include |
* any memory that BIOS might alter over the S3 transition |
*/ |
#define E820_RESERVED_KERN 128 |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
struct e820entry { |
__u64 addr; /* start of memory segment */ |
__u64 size; /* size of memory segment */ |
__u32 type; /* type of memory segment */ |
} __attribute__((packed)); |
struct e820map { |
__u32 nr_map; |
struct e820entry map[E820_X_MAX]; |
}; |
#define ISA_START_ADDRESS 0xa0000 |
#define ISA_END_ADDRESS 0x100000 |
#define BIOS_BEGIN 0x000a0000 |
#define BIOS_END 0x00100000 |
#define BIOS_ROM_BASE 0xffe00000 |
#define BIOS_ROM_END 0xffffffff |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_X86_E820_H */ |
/drivers/include/uapi/asm/errno.h |
---|
0,0 → 1,0 |
#include <asm-generic/errno.h> |
/drivers/include/uapi/asm/ioctl.h |
---|
0,0 → 1,0 |
#include <asm-generic/ioctl.h> |
/drivers/include/uapi/asm/msr-index.h |
---|
0,0 → 1,624 |
#ifndef _ASM_X86_MSR_INDEX_H |
#define _ASM_X86_MSR_INDEX_H |
/* CPU model specific register (MSR) numbers */ |
/* x86-64 specific MSRs */ |
#define MSR_EFER 0xc0000080 /* extended feature register */ |
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ |
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ |
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ |
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ |
#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ |
#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ |
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ |
#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ |
/* EFER bits: */ |
#define _EFER_SCE 0 /* SYSCALL/SYSRET */ |
#define _EFER_LME 8 /* Long mode enable */ |
#define _EFER_LMA 10 /* Long mode active (read-only) */ |
#define _EFER_NX 11 /* No execute enable */ |
#define _EFER_SVME 12 /* Enable virtualization */ |
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ |
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ |
#define EFER_SCE (1<<_EFER_SCE) |
#define EFER_LME (1<<_EFER_LME) |
#define EFER_LMA (1<<_EFER_LMA) |
#define EFER_NX (1<<_EFER_NX) |
#define EFER_SVME (1<<_EFER_SVME) |
#define EFER_LMSLE (1<<_EFER_LMSLE) |
#define EFER_FFXSR (1<<_EFER_FFXSR) |
/* Intel MSRs. Some also available on other CPUs */ |
#define MSR_IA32_PERFCTR0 0x000000c1 |
#define MSR_IA32_PERFCTR1 0x000000c2 |
#define MSR_FSB_FREQ 0x000000cd |
#define MSR_NHM_PLATFORM_INFO 0x000000ce |
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 |
#define NHM_C3_AUTO_DEMOTE (1UL << 25) |
#define NHM_C1_AUTO_DEMOTE (1UL << 26) |
#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) |
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) |
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) |
#define MSR_PLATFORM_INFO 0x000000ce |
#define MSR_MTRRcap 0x000000fe |
#define MSR_IA32_BBL_CR_CTL 0x00000119 |
#define MSR_IA32_BBL_CR_CTL3 0x0000011e |
#define MSR_IA32_SYSENTER_CS 0x00000174 |
#define MSR_IA32_SYSENTER_ESP 0x00000175 |
#define MSR_IA32_SYSENTER_EIP 0x00000176 |
#define MSR_IA32_MCG_CAP 0x00000179 |
#define MSR_IA32_MCG_STATUS 0x0000017a |
#define MSR_IA32_MCG_CTL 0x0000017b |
#define MSR_OFFCORE_RSP_0 0x000001a6 |
#define MSR_OFFCORE_RSP_1 0x000001a7 |
#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad |
#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae |
#define MSR_LBR_SELECT 0x000001c8 |
#define MSR_LBR_TOS 0x000001c9 |
#define MSR_LBR_NHM_FROM 0x00000680 |
#define MSR_LBR_NHM_TO 0x000006c0 |
#define MSR_LBR_CORE_FROM 0x00000040 |
#define MSR_LBR_CORE_TO 0x00000060 |
#define MSR_IA32_PEBS_ENABLE 0x000003f1 |
#define MSR_IA32_DS_AREA 0x00000600 |
#define MSR_IA32_PERF_CAPABILITIES 0x00000345 |
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 |
#define MSR_MTRRfix64K_00000 0x00000250 |
#define MSR_MTRRfix16K_80000 0x00000258 |
#define MSR_MTRRfix16K_A0000 0x00000259 |
#define MSR_MTRRfix4K_C0000 0x00000268 |
#define MSR_MTRRfix4K_C8000 0x00000269 |
#define MSR_MTRRfix4K_D0000 0x0000026a |
#define MSR_MTRRfix4K_D8000 0x0000026b |
#define MSR_MTRRfix4K_E0000 0x0000026c |
#define MSR_MTRRfix4K_E8000 0x0000026d |
#define MSR_MTRRfix4K_F0000 0x0000026e |
#define MSR_MTRRfix4K_F8000 0x0000026f |
#define MSR_MTRRdefType 0x000002ff |
#define MSR_IA32_CR_PAT 0x00000277 |
#define MSR_IA32_DEBUGCTLMSR 0x000001d9 |
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db |
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc |
#define MSR_IA32_LASTINTFROMIP 0x000001dd |
#define MSR_IA32_LASTINTTOIP 0x000001de |
/* DEBUGCTLMSR bits (others vary by model): */ |
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ |
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ |
#define DEBUGCTLMSR_TR (1UL << 6) |
#define DEBUGCTLMSR_BTS (1UL << 7) |
#define DEBUGCTLMSR_BTINT (1UL << 8) |
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9) |
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) |
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) |
#define MSR_IA32_POWER_CTL 0x000001fc |
#define MSR_IA32_MC0_CTL 0x00000400 |
#define MSR_IA32_MC0_STATUS 0x00000401 |
#define MSR_IA32_MC0_ADDR 0x00000402 |
#define MSR_IA32_MC0_MISC 0x00000403 |
/* C-state Residency Counters */ |
#define MSR_PKG_C3_RESIDENCY 0x000003f8 |
#define MSR_PKG_C6_RESIDENCY 0x000003f9 |
#define MSR_PKG_C7_RESIDENCY 0x000003fa |
#define MSR_CORE_C3_RESIDENCY 0x000003fc |
#define MSR_CORE_C6_RESIDENCY 0x000003fd |
#define MSR_CORE_C7_RESIDENCY 0x000003fe |
#define MSR_PKG_C2_RESIDENCY 0x0000060d |
#define MSR_PKG_C8_RESIDENCY 0x00000630 |
#define MSR_PKG_C9_RESIDENCY 0x00000631 |
#define MSR_PKG_C10_RESIDENCY 0x00000632 |
/* Run Time Average Power Limiting (RAPL) Interface */ |
#define MSR_RAPL_POWER_UNIT 0x00000606 |
#define MSR_PKG_POWER_LIMIT 0x00000610 |
#define MSR_PKG_ENERGY_STATUS 0x00000611 |
#define MSR_PKG_PERF_STATUS 0x00000613 |
#define MSR_PKG_POWER_INFO 0x00000614 |
#define MSR_DRAM_POWER_LIMIT 0x00000618 |
#define MSR_DRAM_ENERGY_STATUS 0x00000619 |
#define MSR_DRAM_PERF_STATUS 0x0000061b |
#define MSR_DRAM_POWER_INFO 0x0000061c |
#define MSR_PP0_POWER_LIMIT 0x00000638 |
#define MSR_PP0_ENERGY_STATUS 0x00000639 |
#define MSR_PP0_POLICY 0x0000063a |
#define MSR_PP0_PERF_STATUS 0x0000063b |
#define MSR_PP1_POWER_LIMIT 0x00000640 |
#define MSR_PP1_ENERGY_STATUS 0x00000641 |
#define MSR_PP1_POLICY 0x00000642 |
#define MSR_CORE_C1_RES 0x00000660 |
#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 |
#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 |
/* Hardware P state interface */ |
#define MSR_PPERF 0x0000064e |
#define MSR_PERF_LIMIT_REASONS 0x0000064f |
#define MSR_PM_ENABLE 0x00000770 |
#define MSR_HWP_CAPABILITIES 0x00000771 |
#define MSR_HWP_REQUEST_PKG 0x00000772 |
#define MSR_HWP_INTERRUPT 0x00000773 |
#define MSR_HWP_REQUEST 0x00000774 |
#define MSR_HWP_STATUS 0x00000777 |
/* CPUID.6.EAX */ |
#define HWP_BASE_BIT (1<<7) |
#define HWP_NOTIFICATIONS_BIT (1<<8) |
#define HWP_ACTIVITY_WINDOW_BIT (1<<9) |
#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) |
#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) |
/* IA32_HWP_CAPABILITIES */ |
#define HWP_HIGHEST_PERF(x) (x & 0xff) |
#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) |
#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) |
#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) |
/* IA32_HWP_REQUEST */ |
#define HWP_MIN_PERF(x) (x & 0xff) |
#define HWP_MAX_PERF(x) ((x & 0xff) << 8) |
#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) |
#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) |
#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) |
#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) |
/* IA32_HWP_STATUS */ |
#define HWP_GUARANTEED_CHANGE(x) (x & 0x1) |
#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) |
/* IA32_HWP_INTERRUPT */ |
#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) |
#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) |
#define MSR_AMD64_MC0_MASK 0xc0010044 |
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) |
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) |
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) |
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) |
#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) |
/* These are consecutive and not in the normal 4er MCE bank block */ |
#define MSR_IA32_MC0_CTL2 0x00000280 |
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) |
#define MSR_P6_PERFCTR0 0x000000c1 |
#define MSR_P6_PERFCTR1 0x000000c2 |
#define MSR_P6_EVNTSEL0 0x00000186 |
#define MSR_P6_EVNTSEL1 0x00000187 |
#define MSR_KNC_PERFCTR0 0x00000020 |
#define MSR_KNC_PERFCTR1 0x00000021 |
#define MSR_KNC_EVNTSEL0 0x00000028 |
#define MSR_KNC_EVNTSEL1 0x00000029 |
/* Alternative perfctr range with full access. */ |
#define MSR_IA32_PMC0 0x000004c1 |
/* AMD64 MSRs. Not complete. See the architecture manual for a more |
complete list. */ |
#define MSR_AMD64_PATCH_LEVEL 0x0000008b |
#define MSR_AMD64_TSC_RATIO 0xc0000104 |
#define MSR_AMD64_NB_CFG 0xc001001f |
#define MSR_AMD64_PATCH_LOADER 0xc0010020 |
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 |
#define MSR_AMD64_OSVW_STATUS 0xc0010141 |
#define MSR_AMD64_LS_CFG 0xc0011020 |
#define MSR_AMD64_DC_CFG 0xc0011022 |
#define MSR_AMD64_BU_CFG2 0xc001102a |
#define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
#define MSR_AMD64_IBSFETCH_REG_COUNT 3 |
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1) |
#define MSR_AMD64_IBSOPCTL 0xc0011033 |
#define MSR_AMD64_IBSOPRIP 0xc0011034 |
#define MSR_AMD64_IBSOPDATA 0xc0011035 |
#define MSR_AMD64_IBSOPDATA2 0xc0011036 |
#define MSR_AMD64_IBSOPDATA3 0xc0011037 |
#define MSR_AMD64_IBSDCLINAD 0xc0011038 |
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039 |
#define MSR_AMD64_IBSOP_REG_COUNT 7 |
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) |
#define MSR_AMD64_IBSCTL 0xc001103a |
#define MSR_AMD64_IBSBRTARGET 0xc001103b |
#define MSR_AMD64_IBSOPDATA4 0xc001103d |
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ |
/* Fam 16h MSRs */ |
#define MSR_F16H_L2I_PERF_CTL 0xc0010230 |
#define MSR_F16H_L2I_PERF_CTR 0xc0010231 |
/* Fam 15h MSRs */ |
#define MSR_F15H_PERF_CTL 0xc0010200 |
#define MSR_F15H_PERF_CTR 0xc0010201 |
#define MSR_F15H_NB_PERF_CTL 0xc0010240 |
#define MSR_F15H_NB_PERF_CTR 0xc0010241 |
/* Fam 10h MSRs */ |
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 |
#define FAM10H_MMIO_CONF_ENABLE (1<<0) |
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf |
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 |
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL |
#define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
#define MSR_FAM10H_NODE_ID 0xc001100c |
/* K8 MSRs */ |
#define MSR_K8_TOP_MEM1 0xc001001a |
#define MSR_K8_TOP_MEM2 0xc001001d |
#define MSR_K8_SYSCFG 0xc0010010 |
#define MSR_K8_INT_PENDING_MSG 0xc0010055 |
/* C1E active bits in int pending message */ |
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000 |
#define MSR_K8_TSEG_ADDR 0xc0010112 |
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ |
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ |
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ |
/* K7 MSRs */ |
#define MSR_K7_EVNTSEL0 0xc0010000 |
#define MSR_K7_PERFCTR0 0xc0010004 |
#define MSR_K7_EVNTSEL1 0xc0010001 |
#define MSR_K7_PERFCTR1 0xc0010005 |
#define MSR_K7_EVNTSEL2 0xc0010002 |
#define MSR_K7_PERFCTR2 0xc0010006 |
#define MSR_K7_EVNTSEL3 0xc0010003 |
#define MSR_K7_PERFCTR3 0xc0010007 |
#define MSR_K7_CLK_CTL 0xc001001b |
#define MSR_K7_HWCR 0xc0010015 |
#define MSR_K7_FID_VID_CTL 0xc0010041 |
#define MSR_K7_FID_VID_STATUS 0xc0010042 |
/* K6 MSRs */ |
#define MSR_K6_WHCR 0xc0000082 |
#define MSR_K6_UWCCR 0xc0000085 |
#define MSR_K6_EPMR 0xc0000086 |
#define MSR_K6_PSOR 0xc0000087 |
#define MSR_K6_PFIR 0xc0000088 |
/* Centaur-Hauls/IDT defined MSRs. */ |
#define MSR_IDT_FCR1 0x00000107 |
#define MSR_IDT_FCR2 0x00000108 |
#define MSR_IDT_FCR3 0x00000109 |
#define MSR_IDT_FCR4 0x0000010a |
#define MSR_IDT_MCR0 0x00000110 |
#define MSR_IDT_MCR1 0x00000111 |
#define MSR_IDT_MCR2 0x00000112 |
#define MSR_IDT_MCR3 0x00000113 |
#define MSR_IDT_MCR4 0x00000114 |
#define MSR_IDT_MCR5 0x00000115 |
#define MSR_IDT_MCR6 0x00000116 |
#define MSR_IDT_MCR7 0x00000117 |
#define MSR_IDT_MCR_CTRL 0x00000120 |
/* VIA Cyrix defined MSRs*/ |
#define MSR_VIA_FCR 0x00001107 |
#define MSR_VIA_LONGHAUL 0x0000110a |
#define MSR_VIA_RNG 0x0000110b |
#define MSR_VIA_BCR2 0x00001147 |
/* Transmeta defined MSRs */ |
#define MSR_TMTA_LONGRUN_CTRL 0x80868010 |
#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 |
#define MSR_TMTA_LRTI_READOUT 0x80868018 |
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a |
/* Intel defined MSRs. */ |
#define MSR_IA32_P5_MC_ADDR 0x00000000 |
#define MSR_IA32_P5_MC_TYPE 0x00000001 |
#define MSR_IA32_TSC 0x00000010 |
#define MSR_IA32_PLATFORM_ID 0x00000017 |
#define MSR_IA32_EBL_CR_POWERON 0x0000002a |
#define MSR_EBC_FREQUENCY_ID 0x0000002c |
#define MSR_SMI_COUNT 0x00000034 |
#define MSR_IA32_FEATURE_CONTROL 0x0000003a |
#define MSR_IA32_TSC_ADJUST 0x0000003b |
#define MSR_IA32_BNDCFGS 0x00000d90 |
#define MSR_IA32_XSS 0x00000da0 |
#define FEATURE_CONTROL_LOCKED (1<<0) |
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) |
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) |
#define MSR_IA32_APICBASE 0x0000001b |
#define MSR_IA32_APICBASE_BSP (1<<8) |
#define MSR_IA32_APICBASE_ENABLE (1<<11) |
#define MSR_IA32_APICBASE_BASE (0xfffff<<12) |
#define MSR_IA32_TSCDEADLINE 0x000006e0 |
#define MSR_IA32_UCODE_WRITE 0x00000079 |
#define MSR_IA32_UCODE_REV 0x0000008b |
#define MSR_IA32_PERF_STATUS 0x00000198 |
#define MSR_IA32_PERF_CTL 0x00000199 |
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064 |
#define MSR_AMD_PERF_STATUS 0xc0010063 |
#define MSR_AMD_PERF_CTL 0xc0010062 |
#define MSR_IA32_MPERF 0x000000e7 |
#define MSR_IA32_APERF 0x000000e8 |
#define MSR_IA32_THERM_CONTROL 0x0000019a |
#define MSR_IA32_THERM_INTERRUPT 0x0000019b |
#define THERM_INT_HIGH_ENABLE (1 << 0) |
#define THERM_INT_LOW_ENABLE (1 << 1) |
#define THERM_INT_PLN_ENABLE (1 << 24) |
#define MSR_IA32_THERM_STATUS 0x0000019c |
#define THERM_STATUS_PROCHOT (1 << 0) |
#define THERM_STATUS_POWER_LIMIT (1 << 10) |
#define MSR_THERM2_CTL 0x0000019d |
#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16) |
#define MSR_IA32_MISC_ENABLE 0x000001a0 |
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 |
#define MSR_MISC_PWR_MGMT 0x000001aa |
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 |
#define ENERGY_PERF_BIAS_PERFORMANCE 0 |
#define ENERGY_PERF_BIAS_NORMAL 6 |
#define ENERGY_PERF_BIAS_POWERSAVE 15 |
#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1 |
#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0) |
#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10) |
#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2 |
#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0) |
#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) |
#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) |
/* Thermal Thresholds Support */ |
#define THERM_INT_THRESHOLD0_ENABLE (1 << 15) |
#define THERM_SHIFT_THRESHOLD0 8 |
#define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0) |
#define THERM_INT_THRESHOLD1_ENABLE (1 << 23) |
#define THERM_SHIFT_THRESHOLD1 16 |
#define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1) |
#define THERM_STATUS_THRESHOLD0 (1 << 6) |
#define THERM_LOG_THRESHOLD0 (1 << 7) |
#define THERM_STATUS_THRESHOLD1 (1 << 8) |
#define THERM_LOG_THRESHOLD1 (1 << 9) |
/* MISC_ENABLE bits: architectural */ |
#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0 |
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) |
#define MSR_IA32_MISC_ENABLE_TCC_BIT 1 |
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT) |
#define MSR_IA32_MISC_ENABLE_EMON_BIT 7 |
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT) |
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11 |
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT) |
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12 |
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT) |
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16 |
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT) |
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18 |
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT) |
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22 |
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) |
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23 |
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34 |
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT) |
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ |
#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2 |
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT) |
#define MSR_IA32_MISC_ENABLE_TM1_BIT 3 |
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT) |
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4 |
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6 |
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8 |
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT) |
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9 |
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_FERR_BIT 10 |
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT) |
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10 |
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT) |
#define MSR_IA32_MISC_ENABLE_TM2_BIT 13 |
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT) |
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19 |
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20 |
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT) |
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24 |
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT) |
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37 |
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38 |
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT) |
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39 |
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT) |
#define MSR_IA32_TSC_DEADLINE 0x000006E0 |
/* P4/Xeon+ specific */ |
#define MSR_IA32_MCG_EAX 0x00000180 |
#define MSR_IA32_MCG_EBX 0x00000181 |
#define MSR_IA32_MCG_ECX 0x00000182 |
#define MSR_IA32_MCG_EDX 0x00000183 |
#define MSR_IA32_MCG_ESI 0x00000184 |
#define MSR_IA32_MCG_EDI 0x00000185 |
#define MSR_IA32_MCG_EBP 0x00000186 |
#define MSR_IA32_MCG_ESP 0x00000187 |
#define MSR_IA32_MCG_EFLAGS 0x00000188 |
#define MSR_IA32_MCG_EIP 0x00000189 |
#define MSR_IA32_MCG_RESERVED 0x0000018a |
/* Pentium IV performance counter MSRs */ |
#define MSR_P4_BPU_PERFCTR0 0x00000300 |
#define MSR_P4_BPU_PERFCTR1 0x00000301 |
#define MSR_P4_BPU_PERFCTR2 0x00000302 |
#define MSR_P4_BPU_PERFCTR3 0x00000303 |
#define MSR_P4_MS_PERFCTR0 0x00000304 |
#define MSR_P4_MS_PERFCTR1 0x00000305 |
#define MSR_P4_MS_PERFCTR2 0x00000306 |
#define MSR_P4_MS_PERFCTR3 0x00000307 |
#define MSR_P4_FLAME_PERFCTR0 0x00000308 |
#define MSR_P4_FLAME_PERFCTR1 0x00000309 |
#define MSR_P4_FLAME_PERFCTR2 0x0000030a |
#define MSR_P4_FLAME_PERFCTR3 0x0000030b |
#define MSR_P4_IQ_PERFCTR0 0x0000030c |
#define MSR_P4_IQ_PERFCTR1 0x0000030d |
#define MSR_P4_IQ_PERFCTR2 0x0000030e |
#define MSR_P4_IQ_PERFCTR3 0x0000030f |
#define MSR_P4_IQ_PERFCTR4 0x00000310 |
#define MSR_P4_IQ_PERFCTR5 0x00000311 |
#define MSR_P4_BPU_CCCR0 0x00000360 |
#define MSR_P4_BPU_CCCR1 0x00000361 |
#define MSR_P4_BPU_CCCR2 0x00000362 |
#define MSR_P4_BPU_CCCR3 0x00000363 |
#define MSR_P4_MS_CCCR0 0x00000364 |
#define MSR_P4_MS_CCCR1 0x00000365 |
#define MSR_P4_MS_CCCR2 0x00000366 |
#define MSR_P4_MS_CCCR3 0x00000367 |
#define MSR_P4_FLAME_CCCR0 0x00000368 |
#define MSR_P4_FLAME_CCCR1 0x00000369 |
#define MSR_P4_FLAME_CCCR2 0x0000036a |
#define MSR_P4_FLAME_CCCR3 0x0000036b |
#define MSR_P4_IQ_CCCR0 0x0000036c |
#define MSR_P4_IQ_CCCR1 0x0000036d |
#define MSR_P4_IQ_CCCR2 0x0000036e |
#define MSR_P4_IQ_CCCR3 0x0000036f |
#define MSR_P4_IQ_CCCR4 0x00000370 |
#define MSR_P4_IQ_CCCR5 0x00000371 |
#define MSR_P4_ALF_ESCR0 0x000003ca |
#define MSR_P4_ALF_ESCR1 0x000003cb |
#define MSR_P4_BPU_ESCR0 0x000003b2 |
#define MSR_P4_BPU_ESCR1 0x000003b3 |
#define MSR_P4_BSU_ESCR0 0x000003a0 |
#define MSR_P4_BSU_ESCR1 0x000003a1 |
#define MSR_P4_CRU_ESCR0 0x000003b8 |
#define MSR_P4_CRU_ESCR1 0x000003b9 |
#define MSR_P4_CRU_ESCR2 0x000003cc |
#define MSR_P4_CRU_ESCR3 0x000003cd |
#define MSR_P4_CRU_ESCR4 0x000003e0 |
#define MSR_P4_CRU_ESCR5 0x000003e1 |
#define MSR_P4_DAC_ESCR0 0x000003a8 |
#define MSR_P4_DAC_ESCR1 0x000003a9 |
#define MSR_P4_FIRM_ESCR0 0x000003a4 |
#define MSR_P4_FIRM_ESCR1 0x000003a5 |
#define MSR_P4_FLAME_ESCR0 0x000003a6 |
#define MSR_P4_FLAME_ESCR1 0x000003a7 |
#define MSR_P4_FSB_ESCR0 0x000003a2 |
#define MSR_P4_FSB_ESCR1 0x000003a3 |
#define MSR_P4_IQ_ESCR0 0x000003ba |
#define MSR_P4_IQ_ESCR1 0x000003bb |
#define MSR_P4_IS_ESCR0 0x000003b4 |
#define MSR_P4_IS_ESCR1 0x000003b5 |
#define MSR_P4_ITLB_ESCR0 0x000003b6 |
#define MSR_P4_ITLB_ESCR1 0x000003b7 |
#define MSR_P4_IX_ESCR0 0x000003c8 |
#define MSR_P4_IX_ESCR1 0x000003c9 |
#define MSR_P4_MOB_ESCR0 0x000003aa |
#define MSR_P4_MOB_ESCR1 0x000003ab |
#define MSR_P4_MS_ESCR0 0x000003c0 |
#define MSR_P4_MS_ESCR1 0x000003c1 |
#define MSR_P4_PMH_ESCR0 0x000003ac |
#define MSR_P4_PMH_ESCR1 0x000003ad |
#define MSR_P4_RAT_ESCR0 0x000003bc |
#define MSR_P4_RAT_ESCR1 0x000003bd |
#define MSR_P4_SAAT_ESCR0 0x000003ae |
#define MSR_P4_SAAT_ESCR1 0x000003af |
#define MSR_P4_SSU_ESCR0 0x000003be |
#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ |
#define MSR_P4_TBPU_ESCR0 0x000003c2 |
#define MSR_P4_TBPU_ESCR1 0x000003c3 |
#define MSR_P4_TC_ESCR0 0x000003c4 |
#define MSR_P4_TC_ESCR1 0x000003c5 |
#define MSR_P4_U2L_ESCR0 0x000003b0 |
#define MSR_P4_U2L_ESCR1 0x000003b1 |
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2 |
/* Intel Core-based CPU performance counters */ |
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 |
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a |
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b |
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d |
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e |
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f |
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 |
/* Geode defined MSRs */ |
#define MSR_GEODE_BUSCONT_CONF0 0x00001900 |
/* Intel VT MSRs */ |
#define MSR_IA32_VMX_BASIC 0x00000480 |
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 |
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 |
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483 |
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 |
#define MSR_IA32_VMX_MISC 0x00000485 |
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486 |
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487 |
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488 |
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489 |
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a |
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b |
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c |
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d |
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e |
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f |
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490 |
#define MSR_IA32_VMX_VMFUNC 0x00000491 |
/* VMX_BASIC bits and bitmasks */ |
#define VMX_BASIC_VMCS_SIZE_SHIFT 32 |
#define VMX_BASIC_TRUE_CTLS (1ULL << 55) |
#define VMX_BASIC_64 0x0001000000000000LLU |
#define VMX_BASIC_MEM_TYPE_SHIFT 50 |
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU |
#define VMX_BASIC_MEM_TYPE_WB 6LLU |
#define VMX_BASIC_INOUT 0x0040000000000000LLU |
/* MSR_IA32_VMX_MISC bits */ |
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) |
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F |
/* AMD-V MSRs */ |
#define MSR_VM_CR 0xc0010114 |
#define MSR_VM_IGNNE 0xc0010115 |
#define MSR_VM_HSAVE_PA 0xc0010117 |
#endif /* _ASM_X86_MSR_INDEX_H */ |
/drivers/include/uapi/asm/msr.h |
---|
0,0 → 1,15 |
#ifndef _UAPI_ASM_X86_MSR_H |
#define _UAPI_ASM_X86_MSR_H |
#include <asm/msr-index.h> |
#ifndef __ASSEMBLY__ |
#include <linux/types.h> |
#include <linux/ioctl.h> |
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8]) |
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8]) |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_X86_MSR_H */ |
/drivers/include/uapi/asm/page_32_types.h |
---|
0,0 → 1,58 |
#ifndef _ASM_X86_PAGE_32_DEFS_H |
#define _ASM_X86_PAGE_32_DEFS_H |
#include <linux/const.h> |
/* |
* This handles the memory map. |
* |
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has |
* a virtual address space of one gigabyte, which limits the |
* amount of physical memory you can use to about 950MB. |
* |
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G |
* and CONFIG_HIGHMEM64G options in the kernel configuration. |
*/ |
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) |
#define __START_KERNEL_map __PAGE_OFFSET |
#define THREAD_SIZE_ORDER 1 |
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
#define DOUBLEFAULT_STACK 1 |
#define NMI_STACK 0 |
#define DEBUG_STACK 0 |
#define MCE_STACK 0 |
#define N_EXCEPTION_STACKS 1 |
#ifdef CONFIG_X86_PAE |
/* 44=32+12, the limit we can fit into an unsigned long pfn */ |
#define __PHYSICAL_MASK_SHIFT 44 |
#define __VIRTUAL_MASK_SHIFT 32 |
#else /* !CONFIG_X86_PAE */ |
#define __PHYSICAL_MASK_SHIFT 32 |
#define __VIRTUAL_MASK_SHIFT 32 |
#endif /* CONFIG_X86_PAE */ |
/* |
* Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S) |
*/ |
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) |
#ifndef __ASSEMBLY__ |
/* |
* This much address space is reserved for vmalloc() and iomap() |
* as well as fixmap mappings. |
*/ |
extern unsigned int __VMALLOC_RESERVE; |
extern int sysctl_legacy_va_layout; |
extern void find_low_pfn_range(void); |
extern void setup_bootmem_allocator(void); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PAGE_32_DEFS_H */ |
/drivers/include/uapi/asm/page_types.h |
---|
0,0 → 1,68 |
#ifndef _ASM_X86_PAGE_DEFS_H |
#define _ASM_X86_PAGE_DEFS_H |
#include <linux/const.h> |
#include <linux/types.h> |
/* PAGE_SHIFT determines the page size */ |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) |
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) |
/* Cast PAGE_MASK to a signed type so that it is sign-extended if |
virtual addresses are 32-bits but physical addresses are larger |
(ie, 32-bit PAE). */ |
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) |
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) |
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) |
#define HPAGE_SHIFT PMD_SHIFT |
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
#define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
#define HUGE_MAX_HSTATE 2 |
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
#define VM_DATA_DEFAULT_FLAGS \ |
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ |
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \ |
CONFIG_PHYSICAL_ALIGN) |
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) |
#ifdef CONFIG_X86_64 |
#include <asm/page_64_types.h> |
#else |
#include <asm/page_32_types.h> |
#endif /* CONFIG_X86_64 */ |
#ifndef __ASSEMBLY__ |
extern int devmem_is_allowed(unsigned long pagenr); |
extern unsigned long max_low_pfn_mapped; |
extern unsigned long max_pfn_mapped; |
static inline phys_addr_t get_max_mapped(void) |
{ |
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; |
} |
bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); |
extern unsigned long init_memory_mapping(unsigned long start, |
unsigned long end); |
extern void initmem_init(void); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PAGE_DEFS_H */ |
/drivers/include/uapi/asm/posix_types.h |
---|
0,0 → 1,5 |
# ifdef CONFIG_X86_32 |
# include <asm/posix_types_32.h> |
# else |
# include <asm/posix_types_64.h> |
# endif |
/drivers/include/uapi/asm/processor-flags.h |
---|
0,0 → 1,153 |
#ifndef _UAPI_ASM_X86_PROCESSOR_FLAGS_H |
#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H |
/* Various flags defined: can be included from assembler. */ |
#include <linux/const.h> |
/* |
* EFLAGS bits |
*/ |
#define X86_EFLAGS_CF_BIT 0 /* Carry Flag */ |
#define X86_EFLAGS_CF _BITUL(X86_EFLAGS_CF_BIT) |
#define X86_EFLAGS_FIXED_BIT 1 /* Bit 1 - always on */ |
#define X86_EFLAGS_FIXED _BITUL(X86_EFLAGS_FIXED_BIT) |
#define X86_EFLAGS_PF_BIT 2 /* Parity Flag */ |
#define X86_EFLAGS_PF _BITUL(X86_EFLAGS_PF_BIT) |
#define X86_EFLAGS_AF_BIT 4 /* Auxiliary carry Flag */ |
#define X86_EFLAGS_AF _BITUL(X86_EFLAGS_AF_BIT) |
#define X86_EFLAGS_ZF_BIT 6 /* Zero Flag */ |
#define X86_EFLAGS_ZF _BITUL(X86_EFLAGS_ZF_BIT) |
#define X86_EFLAGS_SF_BIT 7 /* Sign Flag */ |
#define X86_EFLAGS_SF _BITUL(X86_EFLAGS_SF_BIT) |
#define X86_EFLAGS_TF_BIT 8 /* Trap Flag */ |
#define X86_EFLAGS_TF _BITUL(X86_EFLAGS_TF_BIT) |
#define X86_EFLAGS_IF_BIT 9 /* Interrupt Flag */ |
#define X86_EFLAGS_IF _BITUL(X86_EFLAGS_IF_BIT) |
#define X86_EFLAGS_DF_BIT 10 /* Direction Flag */ |
#define X86_EFLAGS_DF _BITUL(X86_EFLAGS_DF_BIT) |
#define X86_EFLAGS_OF_BIT 11 /* Overflow Flag */ |
#define X86_EFLAGS_OF _BITUL(X86_EFLAGS_OF_BIT) |
#define X86_EFLAGS_IOPL_BIT 12 /* I/O Privilege Level (2 bits) */ |
#define X86_EFLAGS_IOPL (_AC(3,UL) << X86_EFLAGS_IOPL_BIT) |
#define X86_EFLAGS_NT_BIT 14 /* Nested Task */ |
#define X86_EFLAGS_NT _BITUL(X86_EFLAGS_NT_BIT) |
#define X86_EFLAGS_RF_BIT 16 /* Resume Flag */ |
#define X86_EFLAGS_RF _BITUL(X86_EFLAGS_RF_BIT) |
#define X86_EFLAGS_VM_BIT 17 /* Virtual Mode */ |
#define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT) |
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ |
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) |
#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ |
#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) |
#define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */ |
#define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT) |
#define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */ |
#define X86_EFLAGS_VIP _BITUL(X86_EFLAGS_VIP_BIT) |
#define X86_EFLAGS_ID_BIT 21 /* CPUID detection */ |
#define X86_EFLAGS_ID _BITUL(X86_EFLAGS_ID_BIT) |
/* |
* Basic CPU control in CR0 |
*/ |
#define X86_CR0_PE_BIT 0 /* Protection Enable */ |
#define X86_CR0_PE _BITUL(X86_CR0_PE_BIT) |
#define X86_CR0_MP_BIT 1 /* Monitor Coprocessor */ |
#define X86_CR0_MP _BITUL(X86_CR0_MP_BIT) |
#define X86_CR0_EM_BIT 2 /* Emulation */ |
#define X86_CR0_EM _BITUL(X86_CR0_EM_BIT) |
#define X86_CR0_TS_BIT 3 /* Task Switched */ |
#define X86_CR0_TS _BITUL(X86_CR0_TS_BIT) |
#define X86_CR0_ET_BIT 4 /* Extension Type */ |
#define X86_CR0_ET _BITUL(X86_CR0_ET_BIT) |
#define X86_CR0_NE_BIT 5 /* Numeric Error */ |
#define X86_CR0_NE _BITUL(X86_CR0_NE_BIT) |
#define X86_CR0_WP_BIT 16 /* Write Protect */ |
#define X86_CR0_WP _BITUL(X86_CR0_WP_BIT) |
#define X86_CR0_AM_BIT 18 /* Alignment Mask */ |
#define X86_CR0_AM _BITUL(X86_CR0_AM_BIT) |
#define X86_CR0_NW_BIT 29 /* Not Write-through */ |
#define X86_CR0_NW _BITUL(X86_CR0_NW_BIT) |
#define X86_CR0_CD_BIT 30 /* Cache Disable */ |
#define X86_CR0_CD _BITUL(X86_CR0_CD_BIT) |
#define X86_CR0_PG_BIT 31 /* Paging */ |
#define X86_CR0_PG _BITUL(X86_CR0_PG_BIT) |
/* |
* Paging options in CR3 |
*/ |
#define X86_CR3_PWT_BIT 3 /* Page Write Through */ |
#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) |
#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ |
#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) |
#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ |
/* |
* Intel CPU features in CR4 |
*/ |
#define X86_CR4_VME_BIT 0 /* enable vm86 extensions */ |
#define X86_CR4_VME _BITUL(X86_CR4_VME_BIT) |
#define X86_CR4_PVI_BIT 1 /* virtual interrupts flag enable */ |
#define X86_CR4_PVI _BITUL(X86_CR4_PVI_BIT) |
#define X86_CR4_TSD_BIT 2 /* disable time stamp at ipl 3 */ |
#define X86_CR4_TSD _BITUL(X86_CR4_TSD_BIT) |
#define X86_CR4_DE_BIT 3 /* enable debugging extensions */ |
#define X86_CR4_DE _BITUL(X86_CR4_DE_BIT) |
#define X86_CR4_PSE_BIT 4 /* enable page size extensions */ |
#define X86_CR4_PSE _BITUL(X86_CR4_PSE_BIT) |
#define X86_CR4_PAE_BIT 5 /* enable physical address extensions */ |
#define X86_CR4_PAE _BITUL(X86_CR4_PAE_BIT) |
#define X86_CR4_MCE_BIT 6 /* Machine check enable */ |
#define X86_CR4_MCE _BITUL(X86_CR4_MCE_BIT) |
#define X86_CR4_PGE_BIT 7 /* enable global pages */ |
#define X86_CR4_PGE _BITUL(X86_CR4_PGE_BIT) |
#define X86_CR4_PCE_BIT 8 /* enable performance counters at ipl 3 */ |
#define X86_CR4_PCE _BITUL(X86_CR4_PCE_BIT) |
#define X86_CR4_OSFXSR_BIT 9 /* enable fast FPU save and restore */ |
#define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT) |
#define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */ |
#define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT) |
#define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */ |
#define X86_CR4_VMXE _BITUL(X86_CR4_VMXE_BIT) |
#define X86_CR4_SMXE_BIT 14 /* enable safer mode (TXT) */ |
#define X86_CR4_SMXE _BITUL(X86_CR4_SMXE_BIT) |
#define X86_CR4_FSGSBASE_BIT 16 /* enable RDWRFSGS support */ |
#define X86_CR4_FSGSBASE _BITUL(X86_CR4_FSGSBASE_BIT) |
#define X86_CR4_PCIDE_BIT 17 /* enable PCID support */ |
#define X86_CR4_PCIDE _BITUL(X86_CR4_PCIDE_BIT) |
#define X86_CR4_OSXSAVE_BIT 18 /* enable xsave and xrestore */ |
#define X86_CR4_OSXSAVE _BITUL(X86_CR4_OSXSAVE_BIT) |
#define X86_CR4_SMEP_BIT 20 /* enable SMEP support */ |
#define X86_CR4_SMEP _BITUL(X86_CR4_SMEP_BIT) |
#define X86_CR4_SMAP_BIT 21 /* enable SMAP support */ |
#define X86_CR4_SMAP _BITUL(X86_CR4_SMAP_BIT) |
/* |
* x86-64 Task Priority Register, CR8 |
*/ |
#define X86_CR8_TPR _AC(0x0000000f,UL) /* task priority register */ |
/* |
* AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h> |
*/ |
/* |
* NSC/Cyrix CPU configuration register indexes |
*/ |
#define CX86_PCR0 0x20 |
#define CX86_GCR 0xb8 |
#define CX86_CCR0 0xc0 |
#define CX86_CCR1 0xc1 |
#define CX86_CCR2 0xc2 |
#define CX86_CCR3 0xc3 |
#define CX86_CCR4 0xe8 |
#define CX86_CCR5 0xe9 |
#define CX86_CCR6 0xea |
#define CX86_CCR7 0xeb |
#define CX86_PCR1 0xf0 |
#define CX86_DIR0 0xfe |
#define CX86_DIR1 0xff |
#define CX86_ARR_BASE 0xc4 |
#define CX86_RCR_BASE 0xdc |
#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */ |
/drivers/include/uapi/asm/ptrace.h |
---|
0,0 → 1,262 |
#ifndef _ASM_X86_PTRACE_H |
#define _ASM_X86_PTRACE_H |
#include <asm/segment.h> |
#include <asm/page_types.h> |
#include <uapi/asm/ptrace.h> |
#ifndef __ASSEMBLY__ |
#ifdef __i386__ |
struct pt_regs { |
unsigned long bx; |
unsigned long cx; |
unsigned long dx; |
unsigned long si; |
unsigned long di; |
unsigned long bp; |
unsigned long ax; |
unsigned long ds; |
unsigned long es; |
unsigned long fs; |
unsigned long gs; |
unsigned long orig_ax; |
unsigned long ip; |
unsigned long cs; |
unsigned long flags; |
unsigned long sp; |
unsigned long ss; |
}; |
#else /* __i386__ */ |
struct pt_regs { |
unsigned long r15; |
unsigned long r14; |
unsigned long r13; |
unsigned long r12; |
unsigned long bp; |
unsigned long bx; |
/* arguments: non interrupts/non tracing syscalls only save up to here*/ |
unsigned long r11; |
unsigned long r10; |
unsigned long r9; |
unsigned long r8; |
unsigned long ax; |
unsigned long cx; |
unsigned long dx; |
unsigned long si; |
unsigned long di; |
unsigned long orig_ax; |
/* end of arguments */ |
/* cpu exception frame or undefined */ |
unsigned long ip; |
unsigned long cs; |
unsigned long flags; |
unsigned long sp; |
unsigned long ss; |
/* top of stack page */ |
}; |
#endif /* !__i386__ */ |
#ifdef CONFIG_PARAVIRT |
#include <asm/paravirt_types.h> |
#endif |
struct cpuinfo_x86; |
struct task_struct; |
extern unsigned long profile_pc(struct pt_regs *regs); |
#define profile_pc profile_pc |
extern unsigned long |
convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); |
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, |
int error_code, int si_code); |
extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch); |
extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, |
unsigned long phase1_result); |
extern long syscall_trace_enter(struct pt_regs *); |
extern void syscall_trace_leave(struct pt_regs *); |
static inline unsigned long regs_return_value(struct pt_regs *regs) |
{ |
return regs->ax; |
} |
/* |
* user_mode_vm(regs) determines whether a register set came from user mode. |
* This is true if V8086 mode was enabled OR if the register set was from |
* protected mode with RPL-3 CS value. This tricky test checks that with |
* one comparison. Many places in the kernel can bypass this full check |
* if they have already ruled out V8086 mode, so user_mode(regs) can be used. |
*/ |
static inline int user_mode(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; |
#else |
return !!(regs->cs & 3); |
#endif |
} |
static inline int user_mode_vm(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= |
USER_RPL; |
#else |
return user_mode(regs); |
#endif |
} |
static inline int v8086_mode(struct pt_regs *regs) |
{ |
#ifdef CONFIG_X86_32 |
return (regs->flags & X86_VM_MASK); |
#else |
return 0; /* No V86 mode support in long mode */ |
#endif |
} |
#ifdef CONFIG_X86_64 |
static inline bool user_64bit_mode(struct pt_regs *regs) |
{ |
#ifndef CONFIG_PARAVIRT |
/* |
* On non-paravirt systems, this is the only long mode CPL 3 |
* selector. We do not allow long mode selectors in the LDT. |
*/ |
return regs->cs == __USER_CS; |
#else |
/* Headers are too twisted for this to go in paravirt.h. */ |
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; |
#endif |
} |
#define current_user_stack_pointer() this_cpu_read(old_rsp) |
/* ia32 vs. x32 difference */ |
#define compat_user_stack_pointer() \ |
(test_thread_flag(TIF_IA32) \ |
? current_pt_regs()->sp \ |
: this_cpu_read(old_rsp)) |
#endif |
#ifdef CONFIG_X86_32 |
extern unsigned long kernel_stack_pointer(struct pt_regs *regs); |
#else |
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) |
{ |
return regs->sp; |
} |
#endif |
#define GET_IP(regs) ((regs)->ip) |
#define GET_FP(regs) ((regs)->bp) |
#define GET_USP(regs) ((regs)->sp) |
#include <asm-generic/ptrace.h> |
/* Query offset/name of register from its name/offset */ |
extern int regs_query_register_offset(const char *name); |
extern const char *regs_query_register_name(unsigned int offset); |
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) |
/** |
* regs_get_register() - get register value from its offset |
* @regs: pt_regs from which register value is gotten. |
* @offset: offset number of the register. |
* |
* regs_get_register returns the value of a register. The @offset is the |
* offset of the register in struct pt_regs address which specified by @regs. |
* If @offset is bigger than MAX_REG_OFFSET, this returns 0. |
*/ |
static inline unsigned long regs_get_register(struct pt_regs *regs, |
unsigned int offset) |
{ |
if (unlikely(offset > MAX_REG_OFFSET)) |
return 0; |
#ifdef CONFIG_X86_32 |
/* |
* Traps from the kernel do not save sp and ss. |
* Use the helper function to retrieve sp. |
*/ |
if (offset == offsetof(struct pt_regs, sp) && |
regs->cs == __KERNEL_CS) |
return kernel_stack_pointer(regs); |
#endif |
return *(unsigned long *)((unsigned long)regs + offset); |
} |
/** |
* regs_within_kernel_stack() - check the address in the stack |
* @regs: pt_regs which contains kernel stack pointer. |
* @addr: address which is checked. |
* |
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s). |
* If @addr is within the kernel stack, it returns true. If not, returns false. |
*/ |
static inline int regs_within_kernel_stack(struct pt_regs *regs, |
unsigned long addr) |
{ |
return ((addr & ~(THREAD_SIZE - 1)) == |
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); |
} |
/** |
* regs_get_kernel_stack_nth() - get Nth entry of the stack |
* @regs: pt_regs which contains kernel stack pointer. |
* @n: stack entry number. |
* |
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which |
* is specified by @regs. If the @n th entry is NOT in the kernel stack, |
* this returns 0. |
*/ |
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, |
unsigned int n) |
{ |
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); |
addr += n; |
if (regs_within_kernel_stack(regs, (unsigned long)addr)) |
return *addr; |
else |
return 0; |
} |
#define arch_has_single_step() (1) |
#ifdef CONFIG_X86_DEBUGCTLMSR |
#define arch_has_block_step() (1) |
#else |
#define arch_has_block_step() (boot_cpu_data.x86 >= 6) |
#endif |
#define ARCH_HAS_USER_SINGLE_STEP_INFO |
/* |
* When hitting ptrace_stop(), we cannot return using SYSRET because |
* that does not restore the full CPU state, only a minimal set. The |
* ptracer can change arbitrary register values, which is usually okay |
* because the usual ptrace stops run off the signal delivery path which |
* forces IRET; however, ptrace_event() stops happen in arbitrary places |
* in the kernel and don't force IRET path. |
* |
* So force IRET path after a ptrace stop. |
*/ |
#define arch_ptrace_stop_needed(code, info) \ |
({ \ |
set_thread_flag(TIF_NOTIFY_RESUME); \ |
false; \ |
}) |
struct user_desc; |
extern int do_get_thread_area(struct task_struct *p, int idx, |
struct user_desc __user *info); |
extern int do_set_thread_area(struct task_struct *p, int idx, |
struct user_desc __user *info, int can_allocate); |
#endif /* !__ASSEMBLY__ */ |
#endif /* _ASM_X86_PTRACE_H */ |
/drivers/include/uapi/asm/segment.h |
---|
0,0 → 1,265 |
#ifndef _ASM_X86_SEGMENT_H |
#define _ASM_X86_SEGMENT_H |
#include <linux/const.h> |
/* Constructor for a conventional segment GDT (or LDT) entry */ |
/* This is a macro so it can be used in initializers */ |
#define GDT_ENTRY(flags, base, limit) \ |
((((base) & _AC(0xff000000,ULL)) << (56-24)) | \ |
(((flags) & _AC(0x0000f0ff,ULL)) << 40) | \ |
(((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \ |
(((base) & _AC(0x00ffffff,ULL)) << 16) | \ |
(((limit) & _AC(0x0000ffff,ULL)))) |
/* Simple and small GDT entries for booting only */ |
#define GDT_ENTRY_BOOT_CS 2 |
#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) |
#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) |
#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) |
#define GDT_ENTRY_BOOT_TSS (GDT_ENTRY_BOOT_CS + 2) |
#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS * 8) |
#define SEGMENT_RPL_MASK 0x3 /* |
* Bottom two bits of selector give the ring |
* privilege level |
*/ |
#define SEGMENT_TI_MASK 0x4 /* Bit 2 is table indicator (LDT/GDT) */ |
#define USER_RPL 0x3 /* User mode is privilege level 3 */ |
#define SEGMENT_LDT 0x4 /* LDT segment has TI set... */ |
#define SEGMENT_GDT 0x0 /* ... GDT has it cleared */ |
#ifdef CONFIG_X86_32 |
/* |
* The layout of the per-CPU GDT under Linux: |
* |
* 0 - null |
* 1 - reserved |
* 2 - reserved |
* 3 - reserved |
* |
* 4 - unused <==== new cacheline |
* 5 - unused |
* |
* ------- start of TLS (Thread-Local Storage) segments: |
* |
* 6 - TLS segment #1 [ glibc's TLS segment ] |
* 7 - TLS segment #2 [ Wine's %fs Win32 segment ] |
* 8 - TLS segment #3 |
* 9 - reserved |
* 10 - reserved |
* 11 - reserved |
* |
* ------- start of kernel segments: |
* |
* 12 - kernel code segment <==== new cacheline |
* 13 - kernel data segment |
* 14 - default user CS |
* 15 - default user DS |
* 16 - TSS |
* 17 - LDT |
* 18 - PNPBIOS support (16->32 gate) |
* 19 - PNPBIOS support |
* 20 - PNPBIOS support |
* 21 - PNPBIOS support |
* 22 - PNPBIOS support |
* 23 - APM BIOS support |
* 24 - APM BIOS support |
* 25 - APM BIOS support |
* |
* 26 - ESPFIX small SS |
* 27 - per-cpu [ offset to per-cpu data area ] |
* 28 - stack_canary-20 [ for stack protector ] |
* 29 - unused |
* 30 - unused |
* 31 - TSS for double fault handler |
*/ |
#define GDT_ENTRY_TLS_MIN 6 |
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) |
#define GDT_ENTRY_DEFAULT_USER_CS 14 |
#define GDT_ENTRY_DEFAULT_USER_DS 15 |
#define GDT_ENTRY_KERNEL_BASE (12) |
#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) |
#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) |
#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) |
#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE+5) |
#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE+6) |
#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE+11) |
#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE+14) |
#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8) |
#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE+15) |
#ifdef CONFIG_SMP |
#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) |
#else |
#define __KERNEL_PERCPU 0 |
#endif |
#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE+16) |
#ifdef CONFIG_CC_STACKPROTECTOR |
#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) |
#else |
#define __KERNEL_STACK_CANARY 0 |
#endif |
#define GDT_ENTRY_DOUBLEFAULT_TSS 31 |
/* |
* The GDT has 32 entries |
*/ |
#define GDT_ENTRIES 32 |
/* The PnP BIOS entries in the GDT */ |
#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) |
#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) |
#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) |
#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) |
#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) |
/* The PnP BIOS selectors */ |
#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ |
#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ |
#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ |
#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ |
#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ |
/* |
* Matching rules for certain types of segments. |
*/ |
/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ |
#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) |
#else |
#include <asm/cache.h> |
#define GDT_ENTRY_KERNEL32_CS 1 |
#define GDT_ENTRY_KERNEL_CS 2 |
#define GDT_ENTRY_KERNEL_DS 3 |
#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS * 8) |
/* |
* we cannot use the same code segment descriptor for user and kernel |
* -- not even in the long flat mode, because of different DPL /kkeil |
* The segment offset needs to contain a RPL. Grr. -AK |
* GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) |
*/ |
#define GDT_ENTRY_DEFAULT_USER32_CS 4 |
#define GDT_ENTRY_DEFAULT_USER_DS 5 |
#define GDT_ENTRY_DEFAULT_USER_CS 6 |
#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3) |
#define __USER32_DS __USER_DS |
#define GDT_ENTRY_TSS 8 /* needs two entries */ |
#define GDT_ENTRY_LDT 10 /* needs two entries */ |
#define GDT_ENTRY_TLS_MIN 12 |
#define GDT_ENTRY_TLS_MAX 14 |
#define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */ |
#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3) |
/* TLS indexes for 64bit - hardcoded in arch_prctl */ |
#define FS_TLS 0 |
#define GS_TLS 1 |
#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3) |
#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3) |
#define GDT_ENTRIES 16 |
#endif |
#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) |
#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) |
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) |
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) |
#ifndef CONFIG_PARAVIRT |
#define get_kernel_rpl() 0 |
#endif |
#define IDT_ENTRIES 256 |
#define NUM_EXCEPTION_VECTORS 32 |
/* Bitmask of exception vectors which push an error code on the stack */ |
#define EXCEPTION_ERRCODE_MASK 0x00027d00 |
#define GDT_SIZE (GDT_ENTRIES * 8) |
#define GDT_ENTRY_TLS_ENTRIES 3 |
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) |
#ifdef __KERNEL__ |
#ifndef __ASSEMBLY__ |
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; |
#ifdef CONFIG_TRACING |
#define trace_early_idt_handlers early_idt_handlers |
#endif |
/* |
* Load a segment. Fall back on loading the zero |
* segment if something goes wrong.. |
*/ |
#define loadsegment(seg, value) \ |
do { \ |
unsigned short __val = (value); \ |
\ |
asm volatile(" \n" \ |
"1: movl %k0,%%" #seg " \n" \ |
\ |
".section .fixup,\"ax\" \n" \ |
"2: xorl %k0,%k0 \n" \ |
" jmp 1b \n" \ |
".previous \n" \ |
\ |
_ASM_EXTABLE(1b, 2b) \ |
\ |
: "+r" (__val) : : "memory"); \ |
} while (0) |
/* |
* Save a segment register away |
*/ |
#define savesegment(seg, value) \ |
asm("mov %%" #seg ",%0":"=r" (value) : : "memory") |
/* |
* x86_32 user gs accessors. |
*/ |
#ifdef CONFIG_X86_32 |
#ifdef CONFIG_X86_32_LAZY_GS |
#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;}) |
#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) |
#define task_user_gs(tsk) ((tsk)->thread.gs) |
#define lazy_save_gs(v) savesegment(gs, (v)) |
#define lazy_load_gs(v) loadsegment(gs, (v)) |
#else /* X86_32_LAZY_GS */ |
#define get_user_gs(regs) (u16)((regs)->gs) |
#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) |
#define task_user_gs(tsk) (task_pt_regs(tsk)->gs) |
#define lazy_save_gs(v) do { } while (0) |
#define lazy_load_gs(v) do { } while (0) |
#endif /* X86_32_LAZY_GS */ |
#endif /* X86_32 */ |
static inline unsigned long get_limit(unsigned long segment) |
{ |
unsigned long __limit; |
asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); |
return __limit + 1; |
} |
#endif /* !__ASSEMBLY__ */ |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_SEGMENT_H */ |
/drivers/include/uapi/asm/sigcontext.h |
---|
0,0 → 1,221 |
#ifndef _UAPI_ASM_X86_SIGCONTEXT_H |
#define _UAPI_ASM_X86_SIGCONTEXT_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
#define FP_XSTATE_MAGIC1 0x46505853U |
#define FP_XSTATE_MAGIC2 0x46505845U |
#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) |
/* |
* bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame |
* are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes |
* are used to extended the fpstate pointer in the sigcontext, which now |
* includes the extended state information along with fpstate information. |
* |
* Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved |
* area and FP_XSTATE_MAGIC2 at the end of memory layout |
* (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the |
* extended state information in the memory layout pointed by the fpstate |
* pointer in sigcontext. |
*/ |
struct _fpx_sw_bytes { |
__u32 magic1; /* FP_XSTATE_MAGIC1 */ |
__u32 extended_size; /* total size of the layout referred by |
* fpstate pointer in the sigcontext. |
*/ |
__u64 xstate_bv; |
/* feature bit mask (including fp/sse/extended |
* state) that is present in the memory |
* layout. |
*/ |
__u32 xstate_size; /* actual xsave state size, based on the |
* features saved in the layout. |
* 'extended_size' will be greater than |
* 'xstate_size'. |
*/ |
__u32 padding[7]; /* for future use. */ |
}; |
#ifdef __i386__ |
/* |
* As documented in the iBCS2 standard.. |
* |
* The first part of "struct _fpstate" is just the normal i387 |
* hardware setup, the extra "status" word is used to save the |
* coprocessor status word before entering the handler. |
* |
* Pentium III FXSR, SSE support |
* Gareth Hughes <gareth@valinux.com>, May 2000 |
* |
* The FPU state data structure has had to grow to accommodate the |
* extended FPU state required by the Streaming SIMD Extensions. |
* There is no documented standard to accomplish this at the moment. |
*/ |
struct _fpreg { |
unsigned short significand[4]; |
unsigned short exponent; |
}; |
struct _fpxreg { |
unsigned short significand[4]; |
unsigned short exponent; |
unsigned short padding[3]; |
}; |
struct _xmmreg { |
unsigned long element[4]; |
}; |
struct _fpstate { |
/* Regular FPU environment */ |
unsigned long cw; |
unsigned long sw; |
unsigned long tag; |
unsigned long ipoff; |
unsigned long cssel; |
unsigned long dataoff; |
unsigned long datasel; |
struct _fpreg _st[8]; |
unsigned short status; |
unsigned short magic; /* 0xffff = regular FPU data only */ |
/* FXSR FPU environment */ |
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ |
unsigned long mxcsr; |
unsigned long reserved; |
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ |
struct _xmmreg _xmm[8]; |
unsigned long padding1[44]; |
union { |
unsigned long padding2[12]; |
struct _fpx_sw_bytes sw_reserved; /* represents the extended |
* state info */ |
}; |
}; |
#define X86_FXSR_MAGIC 0x0000 |
#ifndef __KERNEL__ |
/* |
* User-space might still rely on the old definition: |
*/ |
struct sigcontext { |
unsigned short gs, __gsh; |
unsigned short fs, __fsh; |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned long edi; |
unsigned long esi; |
unsigned long ebp; |
unsigned long esp; |
unsigned long ebx; |
unsigned long edx; |
unsigned long ecx; |
unsigned long eax; |
unsigned long trapno; |
unsigned long err; |
unsigned long eip; |
unsigned short cs, __csh; |
unsigned long eflags; |
unsigned long esp_at_signal; |
unsigned short ss, __ssh; |
struct _fpstate __user *fpstate; |
unsigned long oldmask; |
unsigned long cr2; |
}; |
#endif /* !__KERNEL__ */ |
#else /* __i386__ */ |
/* FXSAVE frame */ |
/* Note: reserved1/2 may someday contain valuable data. Always save/restore |
them when you change signal frames. */ |
struct _fpstate { |
__u16 cwd; |
__u16 swd; |
__u16 twd; /* Note this is not the same as the |
32bit/x87/FSAVE twd */ |
__u16 fop; |
__u64 rip; |
__u64 rdp; |
__u32 mxcsr; |
__u32 mxcsr_mask; |
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */ |
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ |
__u32 reserved2[12]; |
union { |
__u32 reserved3[12]; |
struct _fpx_sw_bytes sw_reserved; /* represents the extended |
* state information */ |
}; |
}; |
#ifndef __KERNEL__ |
/* |
* User-space might still rely on the old definition: |
*/ |
struct sigcontext { |
__u64 r8; |
__u64 r9; |
__u64 r10; |
__u64 r11; |
__u64 r12; |
__u64 r13; |
__u64 r14; |
__u64 r15; |
__u64 rdi; |
__u64 rsi; |
__u64 rbp; |
__u64 rbx; |
__u64 rdx; |
__u64 rax; |
__u64 rcx; |
__u64 rsp; |
__u64 rip; |
__u64 eflags; /* RFLAGS */ |
__u16 cs; |
__u16 gs; |
__u16 fs; |
__u16 __pad0; |
__u64 err; |
__u64 trapno; |
__u64 oldmask; |
__u64 cr2; |
struct _fpstate __user *fpstate; /* zero when no FPU context */ |
#ifdef __ILP32__ |
__u32 __fpstate_pad; |
#endif |
__u64 reserved1[8]; |
}; |
#endif /* !__KERNEL__ */ |
#endif /* !__i386__ */ |
struct _xsave_hdr { |
__u64 xstate_bv; |
__u64 reserved1[2]; |
__u64 reserved2[5]; |
}; |
struct _ymmh_state { |
/* 16 * 16 bytes for each YMMH-reg */ |
__u32 ymmh_space[64]; |
}; |
/* |
* Extended state pointed by the fpstate pointer in the sigcontext. |
* In addition to the fpstate, information encoded in the xstate_hdr |
* indicates the presence of other extended state information |
* supported by the processor and OS. |
*/ |
struct _xstate { |
struct _fpstate fpstate; |
struct _xsave_hdr xstate_hdr; |
struct _ymmh_state ymmh; |
/* new processor state extensions go here */ |
}; |
#endif /* _UAPI_ASM_X86_SIGCONTEXT_H */ |
/drivers/include/uapi/asm/vm86.h |
---|
0,0 → 1,129 |
#ifndef _UAPI_ASM_X86_VM86_H |
#define _UAPI_ASM_X86_VM86_H |
/* |
* I'm guessing at the VIF/VIP flag usage, but hope that this is how |
* the Pentium uses them. Linux will return from vm86 mode when both |
* VIF and VIP is set. |
* |
* On a Pentium, we could probably optimize the virtual flags directly |
* in the eflags register instead of doing it "by hand" in vflags... |
* |
* Linus |
*/ |
#include <asm/processor-flags.h> |
#define BIOSSEG 0x0f000 |
#define CPU_086 0 |
#define CPU_186 1 |
#define CPU_286 2 |
#define CPU_386 3 |
#define CPU_486 4 |
#define CPU_586 5 |
/* |
* Return values for the 'vm86()' system call |
*/ |
#define VM86_TYPE(retval) ((retval) & 0xff) |
#define VM86_ARG(retval) ((retval) >> 8) |
#define VM86_SIGNAL 0 /* return due to signal */ |
#define VM86_UNKNOWN 1 /* unhandled GP fault |
- IO-instruction or similar */ |
#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ |
#define VM86_STI 3 /* sti/popf/iret instruction enabled |
virtual interrupts */ |
/* |
* Additional return values when invoking new vm86() |
*/ |
#define VM86_PICRETURN 4 /* return due to pending PIC request */ |
#define VM86_TRAP 6 /* return due to DOS-debugger request */ |
/* |
* function codes when invoking new vm86() |
*/ |
#define VM86_PLUS_INSTALL_CHECK 0 |
#define VM86_ENTER 1 |
#define VM86_ENTER_NO_BYPASS 2 |
#define VM86_REQUEST_IRQ 3 |
#define VM86_FREE_IRQ 4 |
#define VM86_GET_IRQ_BITS 5 |
#define VM86_GET_AND_RESET_IRQ 6 |
/* |
* This is the stack-layout seen by the user space program when we have |
* done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout |
* is 'kernel_vm86_regs' (see below). |
*/ |
struct vm86_regs { |
/* |
* normal regs, with special meaning for the segment descriptors.. |
*/ |
long ebx; |
long ecx; |
long edx; |
long esi; |
long edi; |
long ebp; |
long eax; |
long __null_ds; |
long __null_es; |
long __null_fs; |
long __null_gs; |
long orig_eax; |
long eip; |
unsigned short cs, __csh; |
long eflags; |
long esp; |
unsigned short ss, __ssh; |
/* |
* these are specific to v86 mode: |
*/ |
unsigned short es, __esh; |
unsigned short ds, __dsh; |
unsigned short fs, __fsh; |
unsigned short gs, __gsh; |
}; |
struct revectored_struct { |
unsigned long __map[8]; /* 256 bits */ |
}; |
struct vm86_struct { |
struct vm86_regs regs; |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
}; |
/* |
* flags masks |
*/ |
#define VM86_SCREEN_BITMAP 0x0001 |
struct vm86plus_info_struct { |
unsigned long force_return_for_pic:1; |
unsigned long vm86dbg_active:1; /* for debugger */ |
unsigned long vm86dbg_TFpendig:1; /* for debugger */ |
unsigned long unused:28; |
unsigned long is_vm86pus:1; /* for vm86 internal use */ |
unsigned char vm86dbg_intxxtab[32]; /* for debugger */ |
}; |
struct vm86plus_struct { |
struct vm86_regs regs; |
unsigned long flags; |
unsigned long screen_bitmap; |
unsigned long cpu_type; |
struct revectored_struct int_revectored; |
struct revectored_struct int21_revectored; |
struct vm86plus_info_struct vm86plus; |
}; |
#endif /* _UAPI_ASM_X86_VM86_H */ |
/drivers/include/uapi/asm-generic/bitsperlong.h |
---|
0,0 → 1,15 |
#ifndef _UAPI__ASM_GENERIC_BITS_PER_LONG |
#define _UAPI__ASM_GENERIC_BITS_PER_LONG |
/* |
* There seems to be no way of detecting this automatically from user |
* space, so 64 bit architectures should override this in their |
* bitsperlong.h. In particular, an architecture that supports |
* both 32 and 64 bit user space must not rely on CONFIG_64BIT |
* to decide it, but rather check a compiler provided macro. |
*/ |
#ifndef __BITS_PER_LONG |
#define __BITS_PER_LONG 32 |
#endif |
#endif /* _UAPI__ASM_GENERIC_BITS_PER_LONG */ |
/drivers/include/uapi/asm-generic/errno-base.h |
---|
0,0 → 1,39 |
#ifndef _ASM_GENERIC_ERRNO_BASE_H |
#define _ASM_GENERIC_ERRNO_BASE_H |
#define EPERM 1 /* Operation not permitted */ |
#define ENOENT 2 /* No such file or directory */ |
#define ESRCH 3 /* No such process */ |
#define EINTR 4 /* Interrupted system call */ |
#define EIO 5 /* I/O error */ |
#define ENXIO 6 /* No such device or address */ |
#define E2BIG 7 /* Argument list too long */ |
#define ENOEXEC 8 /* Exec format error */ |
#define EBADF 9 /* Bad file number */ |
#define ECHILD 10 /* No child processes */ |
#define EAGAIN 11 /* Try again */ |
#define ENOMEM 12 /* Out of memory */ |
#define EACCES 13 /* Permission denied */ |
#define EFAULT 14 /* Bad address */ |
#define ENOTBLK 15 /* Block device required */ |
#define EBUSY 16 /* Device or resource busy */ |
#define EEXIST 17 /* File exists */ |
#define EXDEV 18 /* Cross-device link */ |
#define ENODEV 19 /* No such device */ |
#define ENOTDIR 20 /* Not a directory */ |
#define EISDIR 21 /* Is a directory */ |
#define EINVAL 22 /* Invalid argument */ |
#define ENFILE 23 /* File table overflow */ |
#define EMFILE 24 /* Too many open files */ |
#define ENOTTY 25 /* Not a typewriter */ |
#define ETXTBSY 26 /* Text file busy */ |
#define EFBIG 27 /* File too large */ |
#define ENOSPC 28 /* No space left on device */ |
#define ESPIPE 29 /* Illegal seek */ |
#define EROFS 30 /* Read-only file system */ |
#define EMLINK 31 /* Too many links */ |
#define EPIPE 32 /* Broken pipe */ |
#define EDOM 33 /* Math argument out of domain of func */ |
#define ERANGE 34 /* Math result not representable */ |
#endif |
/drivers/include/uapi/asm-generic/errno.h |
---|
0,0 → 1,113 |
#ifndef _ASM_GENERIC_ERRNO_H |
#define _ASM_GENERIC_ERRNO_H |
#include <asm-generic/errno-base.h> |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
#define EDEADLOCK EDEADLK |
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
#define ECANCELED 125 /* Operation Canceled */ |
#define ENOKEY 126 /* Required key not available */ |
#define EKEYEXPIRED 127 /* Key has expired */ |
#define EKEYREVOKED 128 /* Key has been revoked */ |
#define EKEYREJECTED 129 /* Key was rejected by service */ |
/* for robust mutexes */ |
#define EOWNERDEAD 130 /* Owner died */ |
#define ENOTRECOVERABLE 131 /* State not recoverable */ |
#define ERFKILL 132 /* Operation not possible due to RF-kill */ |
#define EHWPOISON 133 /* Memory page has hardware error */ |
#endif |
/drivers/include/uapi/asm-generic/int-l64.h |
---|
0,0 → 1,34 |
/* |
* asm-generic/int-l64.h |
* |
* Integer declarations for architectures which use "long" |
* for 64-bit types. |
*/ |
#ifndef _UAPI_ASM_GENERIC_INT_L64_H |
#define _UAPI_ASM_GENERIC_INT_L64_H |
#include <asm/bitsperlong.h> |
#ifndef __ASSEMBLY__ |
/* |
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
* header files exported to user space |
*/ |
typedef __signed__ char __s8; |
typedef unsigned char __u8; |
typedef __signed__ short __s16; |
typedef unsigned short __u16; |
typedef __signed__ int __s32; |
typedef unsigned int __u32; |
typedef __signed__ long __s64; |
typedef unsigned long __u64; |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_GENERIC_INT_L64_H */ |
/drivers/include/uapi/asm-generic/int-ll64.h |
---|
0,0 → 1,39 |
/* |
* asm-generic/int-ll64.h |
* |
* Integer declarations for architectures which use "long long" |
* for 64-bit types. |
*/ |
#ifndef _UAPI_ASM_GENERIC_INT_LL64_H |
#define _UAPI_ASM_GENERIC_INT_LL64_H |
#include <asm/bitsperlong.h> |
#ifndef __ASSEMBLY__ |
/* |
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the |
* header files exported to user space |
*/ |
typedef __signed__ char __s8; |
typedef unsigned char __u8; |
typedef __signed__ short __s16; |
typedef unsigned short __u16; |
typedef __signed__ int __s32; |
typedef unsigned int __u32; |
#ifdef __GNUC__ |
__extension__ typedef __signed__ long long __s64; |
__extension__ typedef unsigned long long __u64; |
#else |
typedef __signed__ long long __s64; |
typedef unsigned long long __u64; |
#endif |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_ASM_GENERIC_INT_LL64_H */ |
/drivers/include/uapi/asm-generic/ioctl.h |
---|
0,0 → 1,98 |
#ifndef _UAPI_ASM_GENERIC_IOCTL_H |
#define _UAPI_ASM_GENERIC_IOCTL_H |
/* ioctl command encoding: 32 bits total, command in lower 16 bits, |
* size of the parameter structure in the lower 14 bits of the |
* upper 16 bits. |
* Encoding the size of the parameter structure in the ioctl request |
* is useful for catching programs compiled with old versions |
* and to avoid overwriting user space outside the user buffer area. |
* The highest 2 bits are reserved for indicating the ``access mode''. |
* NOTE: This limits the max parameter size to 16kB -1 ! |
*/ |
/* |
* The following is for compatibility across the various Linux |
* platforms. The generic ioctl numbering scheme doesn't really enforce |
* a type field. De facto, however, the top 8 bits of the lower 16 |
* bits are indeed used as a type field, so we might just as well make |
* this explicit here. Please be sure to use the decoding macros |
* below from now on. |
*/ |
#define _IOC_NRBITS 8 |
#define _IOC_TYPEBITS 8 |
/* |
* Let any architecture override either of the following before |
* including this file. |
*/ |
#ifndef _IOC_SIZEBITS |
# define _IOC_SIZEBITS 14 |
#endif |
#ifndef _IOC_DIRBITS |
# define _IOC_DIRBITS 2 |
#endif |
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) |
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) |
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) |
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) |
#define _IOC_NRSHIFT 0 |
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) |
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) |
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) |
/* |
* Direction bits, which any architecture can choose to override |
* before including this file. |
*/ |
#ifndef _IOC_NONE |
# define _IOC_NONE 0U |
#endif |
#ifndef _IOC_WRITE |
# define _IOC_WRITE 1U |
#endif |
#ifndef _IOC_READ |
# define _IOC_READ 2U |
#endif |
#define _IOC(dir,type,nr,size) \ |
(((dir) << _IOC_DIRSHIFT) | \ |
((type) << _IOC_TYPESHIFT) | \ |
((nr) << _IOC_NRSHIFT) | \ |
((size) << _IOC_SIZESHIFT)) |
#ifndef __KERNEL__ |
#define _IOC_TYPECHECK(t) (sizeof(t)) |
#endif |
/* used to create numbers */ |
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) |
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) |
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) |
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) |
#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) |
#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) |
#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) |
/* used to decode ioctl numbers.. */ |
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) |
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) |
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) |
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) |
/* ...and for the drivers/sound files... */ |
#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) |
#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) |
#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) |
#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) |
#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) |
#endif /* _UAPI_ASM_GENERIC_IOCTL_H */ |
/drivers/include/uapi/asm-generic/posix_types.h |
---|
0,0 → 1,96 |
#ifndef __ASM_GENERIC_POSIX_TYPES_H |
#define __ASM_GENERIC_POSIX_TYPES_H |
#include <asm/bitsperlong.h> |
/* |
* This file is generally used by user-level software, so you need to |
* be a little careful about namespace pollution etc. |
* |
* First the types that are often defined in different ways across |
* architectures, so that you can override them. |
*/ |
#ifndef __kernel_long_t |
typedef long __kernel_long_t; |
typedef unsigned long __kernel_ulong_t; |
#endif |
#ifndef __kernel_ino_t |
typedef __kernel_ulong_t __kernel_ino_t; |
#endif |
#ifndef __kernel_mode_t |
typedef unsigned int __kernel_mode_t; |
#endif |
#ifndef __kernel_pid_t |
typedef int __kernel_pid_t; |
#endif |
#ifndef __kernel_ipc_pid_t |
typedef int __kernel_ipc_pid_t; |
#endif |
#ifndef __kernel_uid_t |
typedef unsigned int __kernel_uid_t; |
typedef unsigned int __kernel_gid_t; |
#endif |
#ifndef __kernel_suseconds_t |
typedef __kernel_long_t __kernel_suseconds_t; |
#endif |
#ifndef __kernel_daddr_t |
typedef int __kernel_daddr_t; |
#endif |
#ifndef __kernel_uid32_t |
typedef unsigned int __kernel_uid32_t; |
typedef unsigned int __kernel_gid32_t; |
#endif |
#ifndef __kernel_old_uid_t |
typedef __kernel_uid_t __kernel_old_uid_t; |
typedef __kernel_gid_t __kernel_old_gid_t; |
#endif |
#ifndef __kernel_old_dev_t |
typedef unsigned int __kernel_old_dev_t; |
#endif |
/* |
* Most 32 bit architectures use "unsigned int" size_t, |
* and all 64 bit architectures use "unsigned long" size_t. |
*/ |
#ifndef __kernel_size_t |
#if __BITS_PER_LONG != 64 |
typedef unsigned int __kernel_size_t; |
typedef int __kernel_ssize_t; |
typedef int __kernel_ptrdiff_t; |
#else |
typedef __kernel_ulong_t __kernel_size_t; |
typedef __kernel_long_t __kernel_ssize_t; |
typedef __kernel_long_t __kernel_ptrdiff_t; |
#endif |
#endif |
#ifndef __kernel_fsid_t |
typedef struct { |
int val[2]; |
} __kernel_fsid_t; |
#endif |
/* |
* anything below here should be completely generic |
*/ |
typedef __kernel_long_t __kernel_off_t; |
typedef long long __kernel_loff_t; |
typedef __kernel_long_t __kernel_time_t; |
typedef __kernel_long_t __kernel_clock_t; |
typedef int __kernel_timer_t; |
typedef int __kernel_clockid_t; |
typedef char * __kernel_caddr_t; |
typedef unsigned short __kernel_uid16_t; |
typedef unsigned short __kernel_gid16_t; |
#endif /* __ASM_GENERIC_POSIX_TYPES_H */ |
/drivers/include/uapi/asm-generic/types.h |
---|
0,0 → 1,8 |
#ifndef _ASM_GENERIC_TYPES_H |
#define _ASM_GENERIC_TYPES_H |
/* |
* int-ll64 is used everywhere now. |
*/ |
#include <asm-generic/int-ll64.h> |
#endif /* _ASM_GENERIC_TYPES_H */ |
/drivers/include/uapi/drm/drm.h |
---|
0,0 → 1,866 |
/** |
* \file drm.h |
* Header for the Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* |
* \par Acknowledgments: |
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg. |
*/ |
/* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_H_ |
#define _DRM_H_ |
#if defined(__KERNEL__) || defined(__linux__) |
#include <linux/types.h> |
//#include <asm/ioctl.h> |
typedef unsigned int drm_handle_t; |
#else /* One of the BSDs */ |
#include <sys/ioccom.h> |
#include <sys/types.h> |
typedef int8_t __s8; |
typedef uint8_t __u8; |
typedef int16_t __s16; |
typedef uint16_t __u16; |
typedef int32_t __s32; |
typedef uint32_t __u32; |
typedef int64_t __s64; |
typedef uint64_t __u64; |
typedef unsigned long drm_handle_t; |
#endif |
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ |
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ |
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */ |
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */ |
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */ |
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD) |
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) |
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) |
typedef unsigned int drm_context_t; |
typedef unsigned int drm_drawable_t; |
typedef unsigned int drm_magic_t; |
/** |
* Cliprect. |
* |
* \warning: If you change this structure, make sure you change |
* XF86DRIClipRectRec in the server as well |
* |
* \note KW: Actually it's illegal to change either for |
* backwards-compatibility reasons. |
*/ |
struct drm_clip_rect { |
unsigned short x1; |
unsigned short y1; |
unsigned short x2; |
unsigned short y2; |
}; |
/** |
* Drawable information. |
*/ |
struct drm_drawable_info { |
unsigned int num_rects; |
struct drm_clip_rect *rects; |
}; |
/** |
* Texture region, |
*/ |
struct drm_tex_region { |
unsigned char next; |
unsigned char prev; |
unsigned char in_use; |
unsigned char padding; |
unsigned int age; |
}; |
/** |
* Hardware lock. |
* |
* The lock structure is a simple cache-line aligned integer. To avoid |
* processor bus contention on a multiprocessor system, there should not be any |
* other data stored in the same cache line. |
*/ |
struct drm_hw_lock { |
__volatile__ unsigned int lock; /**< lock variable */ |
char padding[60]; /**< Pad to cache line */ |
}; |
/** |
* DRM_IOCTL_VERSION ioctl argument type. |
* |
* \sa drmGetVersion(). |
*/ |
struct drm_version { |
int version_major; /**< Major version */ |
int version_minor; /**< Minor version */ |
int version_patchlevel; /**< Patch level */ |
size_t name_len; /**< Length of name buffer */ |
char __user *name; /**< Name of driver */ |
size_t date_len; /**< Length of date buffer */ |
char __user *date; /**< User-space buffer to hold date */ |
size_t desc_len; /**< Length of desc buffer */ |
char __user *desc; /**< User-space buffer to hold desc */ |
}; |
/** |
* DRM_IOCTL_GET_UNIQUE ioctl argument type. |
* |
* \sa drmGetBusid() and drmSetBusId(). |
*/ |
struct drm_unique { |
size_t unique_len; /**< Length of unique */ |
char __user *unique; /**< Unique name for driver instantiation */ |
}; |
struct drm_list { |
int count; /**< Length of user-space structures */ |
struct drm_version __user *version; |
}; |
struct drm_block { |
int unused; |
}; |
/** |
* DRM_IOCTL_CONTROL ioctl argument type. |
* |
* \sa drmCtlInstHandler() and drmCtlUninstHandler(). |
*/ |
struct drm_control { |
enum { |
DRM_ADD_COMMAND, |
DRM_RM_COMMAND, |
DRM_INST_HANDLER, |
DRM_UNINST_HANDLER |
} func; |
int irq; |
}; |
/** |
* Type of memory to map. |
*/ |
enum drm_map_type { |
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */ |
_DRM_REGISTERS = 1, /**< no caching, no core dump */ |
_DRM_SHM = 2, /**< shared, cached */ |
_DRM_AGP = 3, /**< AGP/GART */ |
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ |
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ |
}; |
/** |
* Memory mapping flags. |
*/ |
enum drm_map_flags { |
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */ |
_DRM_READ_ONLY = 0x02, |
_DRM_LOCKED = 0x04, /**< shared, cached, locked */ |
_DRM_KERNEL = 0x08, /**< kernel requires access */ |
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */ |
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */ |
_DRM_REMOVABLE = 0x40, /**< Removable mapping */ |
_DRM_DRIVER = 0x80 /**< Managed by driver */ |
}; |
struct drm_ctx_priv_map { |
unsigned int ctx_id; /**< Context requesting private mapping */ |
void *handle; /**< Handle of map */ |
}; |
/** |
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls |
* argument type. |
* |
* \sa drmAddMap(). |
*/ |
struct drm_map { |
unsigned long offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
/* Private data */ |
}; |
/** |
* DRM_IOCTL_GET_CLIENT ioctl argument type. |
*/ |
struct drm_client { |
int idx; /**< Which client desired? */ |
int auth; /**< Is client authenticated? */ |
unsigned long pid; /**< Process ID */ |
unsigned long uid; /**< User ID */ |
unsigned long magic; /**< Magic */ |
unsigned long iocs; /**< Ioctl count */ |
}; |
enum drm_stat_type { |
_DRM_STAT_LOCK, |
_DRM_STAT_OPENS, |
_DRM_STAT_CLOSES, |
_DRM_STAT_IOCTLS, |
_DRM_STAT_LOCKS, |
_DRM_STAT_UNLOCKS, |
_DRM_STAT_VALUE, /**< Generic value */ |
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */ |
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */ |
_DRM_STAT_IRQ, /**< IRQ */ |
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */ |
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */ |
_DRM_STAT_DMA, /**< DMA */ |
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */ |
_DRM_STAT_MISSED /**< Missed DMA opportunity */ |
/* Add to the *END* of the list */ |
}; |
/** |
* DRM_IOCTL_GET_STATS ioctl argument type. |
*/ |
struct drm_stats { |
unsigned long count; |
struct { |
unsigned long value; |
enum drm_stat_type type; |
} data[15]; |
}; |
/** |
* Hardware locking flags. |
*/ |
enum drm_lock_flags { |
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */ |
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */ |
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */ |
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */ |
/* These *HALT* flags aren't supported yet |
-- they will be used to support the |
full-screen DGA-like mode. */ |
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */ |
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */ |
}; |
/** |
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type. |
* |
* \sa drmGetLock() and drmUnlock(). |
*/ |
struct drm_lock { |
int context; |
enum drm_lock_flags flags; |
}; |
/** |
* DMA flags |
* |
* \warning |
* These values \e must match xf86drm.h. |
* |
* \sa drm_dma. |
*/ |
enum drm_dma_flags { |
/* Flags for DMA buffer dispatch */ |
_DRM_DMA_BLOCK = 0x01, /**< |
* Block until buffer dispatched. |
* |
* \note The buffer may not yet have |
* been processed by the hardware -- |
* getting a hardware lock with the |
* hardware quiescent will ensure |
* that the buffer has been |
* processed. |
*/ |
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */ |
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */ |
/* Flags for DMA buffer request */ |
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */ |
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */ |
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */ |
}; |
/** |
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type. |
* |
* \sa drmAddBufs(). |
*/ |
struct drm_buf_desc { |
int count; /**< Number of buffers of this size */ |
int size; /**< Size in bytes */ |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
enum { |
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ |
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ |
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ |
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ |
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ |
} flags; |
unsigned long agp_start; /**< |
* Start address of where the AGP buffers are |
* in the AGP aperture |
*/ |
}; |
/** |
* DRM_IOCTL_INFO_BUFS ioctl argument type. |
*/ |
struct drm_buf_info { |
int count; /**< Entries in list */ |
struct drm_buf_desc __user *list; |
}; |
/** |
* DRM_IOCTL_FREE_BUFS ioctl argument type. |
*/ |
struct drm_buf_free { |
int count; |
int __user *list; |
}; |
/** |
* Buffer information |
* |
* \sa drm_buf_map. |
*/ |
struct drm_buf_pub { |
int idx; /**< Index into the master buffer list */ |
int total; /**< Buffer size */ |
int used; /**< Amount of buffer in use (for DMA) */ |
void __user *address; /**< Address of buffer */ |
}; |
/** |
* DRM_IOCTL_MAP_BUFS ioctl argument type. |
*/ |
struct drm_buf_map { |
int count; /**< Length of the buffer list */ |
void __user *virtual; /**< Mmap'd area in user-virtual */ |
struct drm_buf_pub __user *list; /**< Buffer information */ |
}; |
/** |
* DRM_IOCTL_DMA ioctl argument type. |
* |
* Indices here refer to the offset into the buffer list in drm_buf_get. |
* |
* \sa drmDMA(). |
*/ |
struct drm_dma { |
int context; /**< Context handle */ |
int send_count; /**< Number of buffers to send */ |
int __user *send_indices; /**< List of handles to buffers */ |
int __user *send_sizes; /**< Lengths of data to send */ |
enum drm_dma_flags flags; /**< Flags */ |
int request_count; /**< Number of buffers requested */ |
int request_size; /**< Desired size for buffers */ |
int __user *request_indices; /**< Buffer information */ |
int __user *request_sizes; |
int granted_count; /**< Number of buffers granted */ |
}; |
enum drm_ctx_flags { |
_DRM_CONTEXT_PRESERVED = 0x01, |
_DRM_CONTEXT_2DONLY = 0x02 |
}; |
/** |
* DRM_IOCTL_ADD_CTX ioctl argument type. |
* |
* \sa drmCreateContext() and drmDestroyContext(). |
*/ |
struct drm_ctx { |
drm_context_t handle; |
enum drm_ctx_flags flags; |
}; |
/** |
* DRM_IOCTL_RES_CTX ioctl argument type. |
*/ |
struct drm_ctx_res { |
int count; |
struct drm_ctx __user *contexts; |
}; |
/** |
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type. |
*/ |
struct drm_draw { |
drm_drawable_t handle; |
}; |
/** |
* DRM_IOCTL_UPDATE_DRAW ioctl argument type. |
*/ |
typedef enum { |
DRM_DRAWABLE_CLIPRECTS, |
} drm_drawable_info_type_t; |
struct drm_update_draw { |
drm_drawable_t handle; |
unsigned int type; |
unsigned int num; |
unsigned long long data; |
}; |
/** |
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. |
*/ |
struct drm_auth { |
drm_magic_t magic; |
}; |
/** |
* DRM_IOCTL_IRQ_BUSID ioctl argument type. |
* |
* \sa drmGetInterruptFromBusID(). |
*/ |
struct drm_irq_busid { |
int irq; /**< IRQ number */ |
int busnum; /**< bus number */ |
int devnum; /**< device number */ |
int funcnum; /**< function number */ |
}; |
enum drm_vblank_seq_type { |
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
/* bits 1-6 are reserved for high crtcs */ |
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, |
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ |
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ |
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ |
}; |
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 |
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) |
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ |
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) |
struct drm_wait_vblank_request { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
unsigned long signal; |
}; |
struct drm_wait_vblank_reply { |
enum drm_vblank_seq_type type; |
unsigned int sequence; |
long tval_sec; |
long tval_usec; |
}; |
/** |
* DRM_IOCTL_WAIT_VBLANK ioctl argument type. |
* |
* \sa drmWaitVBlank(). |
*/ |
union drm_wait_vblank { |
struct drm_wait_vblank_request request; |
struct drm_wait_vblank_reply reply; |
}; |
#define _DRM_PRE_MODESET 1 |
#define _DRM_POST_MODESET 2 |
/** |
* DRM_IOCTL_MODESET_CTL ioctl argument type |
* |
* \sa drmModesetCtl(). |
*/ |
struct drm_modeset_ctl { |
__u32 crtc; |
__u32 cmd; |
}; |
/** |
* DRM_IOCTL_AGP_ENABLE ioctl argument type. |
* |
* \sa drmAgpEnable(). |
*/ |
struct drm_agp_mode { |
unsigned long mode; /**< AGP mode */ |
}; |
/** |
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type. |
* |
* \sa drmAgpAlloc() and drmAgpFree(). |
*/ |
struct drm_agp_buffer { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for binding / unbinding */ |
unsigned long type; /**< Type of memory to allocate */ |
unsigned long physical; /**< Physical used by i810 */ |
}; |
/** |
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type. |
* |
* \sa drmAgpBind() and drmAgpUnbind(). |
*/ |
struct drm_agp_binding { |
unsigned long handle; /**< From drm_agp_buffer */ |
unsigned long offset; /**< In bytes -- will round to page boundary */ |
}; |
/** |
* DRM_IOCTL_AGP_INFO ioctl argument type. |
* |
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(), |
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(), |
* drmAgpVendorId() and drmAgpDeviceId(). |
*/ |
struct drm_agp_info { |
int agp_version_major; |
int agp_version_minor; |
unsigned long mode; |
unsigned long aperture_base; /* physical address */ |
unsigned long aperture_size; /* bytes */ |
unsigned long memory_allowed; /* bytes */ |
unsigned long memory_used; |
/* PCI information */ |
unsigned short id_vendor; |
unsigned short id_device; |
}; |
/** |
* DRM_IOCTL_SG_ALLOC ioctl argument type. |
*/ |
struct drm_scatter_gather { |
unsigned long size; /**< In bytes -- will round to page boundary */ |
unsigned long handle; /**< Used for mapping / unmapping */ |
}; |
/** |
* DRM_IOCTL_SET_VERSION ioctl argument type. |
*/ |
struct drm_set_version { |
int drm_di_major; |
int drm_di_minor; |
int drm_dd_major; |
int drm_dd_minor; |
}; |
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */ |
struct drm_gem_close { |
/** Handle of the object to be closed. */ |
__u32 handle; |
__u32 pad; |
}; |
/** DRM_IOCTL_GEM_FLINK ioctl argument type */ |
struct drm_gem_flink { |
/** Handle for the object being named */ |
__u32 handle; |
/** Returned global name */ |
__u32 name; |
}; |
/** DRM_IOCTL_GEM_OPEN ioctl argument type */ |
struct drm_gem_open { |
/** Name of object being opened */ |
__u32 name; |
/** Returned handle for the object */ |
__u32 handle; |
/** Returned size of the object */ |
__u64 size; |
}; |
#define DRM_CAP_DUMB_BUFFER 0x1 |
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2 |
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 |
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 |
#define DRM_CAP_PRIME 0x5 |
#define DRM_PRIME_CAP_IMPORT 0x1 |
#define DRM_PRIME_CAP_EXPORT 0x2 |
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 |
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 |
/* |
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight |
* combination for the hardware cursor. The intention is that a hardware |
* agnostic userspace can query a cursor plane size to use. |
* |
* Note that the cross-driver contract is to merely return a valid size; |
* drivers are free to attach another meaning on top, eg. i915 returns the |
* maximum plane size. |
*/ |
#define DRM_CAP_CURSOR_WIDTH 0x8 |
#define DRM_CAP_CURSOR_HEIGHT 0x9 |
/** DRM_IOCTL_GET_CAP ioctl argument type */ |
struct drm_get_cap { |
__u64 capability; |
__u64 value; |
}; |
/** |
* DRM_CLIENT_CAP_STEREO_3D |
* |
* if set to 1, the DRM core will expose the stereo 3D capabilities of the |
* monitor by advertising the supported 3D layouts in the flags of struct |
* drm_mode_modeinfo. |
*/ |
#define DRM_CLIENT_CAP_STEREO_3D 1 |
/** |
* DRM_CLIENT_CAP_UNIVERSAL_PLANES |
* |
* If set to 1, the DRM core will expose all planes (overlay, primary, and |
* cursor) to userspace. |
*/ |
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 |
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ |
struct drm_set_client_cap { |
__u64 capability; |
__u64 value; |
}; |
#define DRM_CLOEXEC O_CLOEXEC |
struct drm_prime_handle { |
__u32 handle; |
/** Flags.. only applicable for handle->fd */ |
__u32 flags; |
/** Returned dmabuf file descriptor */ |
__s32 fd; |
}; |
#include <drm/drm_mode.h> |
#define DRM_IOCTL_BASE 'd' |
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) |
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) |
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version) |
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique) |
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth) |
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid) |
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map) |
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) |
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) |
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) |
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) |
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) |
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) |
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) |
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) |
#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap) |
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) |
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) |
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block) |
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block) |
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control) |
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map) |
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc) |
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc) |
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info) |
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map) |
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free) |
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map) |
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map) |
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map) |
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e) |
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f) |
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx) |
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx) |
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx) |
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx) |
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx) |
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx) |
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res) |
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw) |
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw) |
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma) |
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock) |
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock) |
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock) |
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle) |
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle) |
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30) |
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31) |
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode) |
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info) |
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer) |
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding) |
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding) |
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather) |
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather) |
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) |
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) |
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) |
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc) |
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor) |
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut) |
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder) |
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector) |
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */ |
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property) |
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property) |
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob) |
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) |
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) |
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) |
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) |
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb) |
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb) |
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb) |
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res) |
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane) |
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane) |
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2) |
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) |
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) |
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) |
/** |
* Device specific ioctls should only be in their respective headers |
* The device specific ioctl range is from 0x40 to 0x9f. |
* Generic IOCTLS restart at 0xA0. |
* |
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and |
* drmCommandReadWrite(). |
*/ |
#define DRM_COMMAND_BASE 0x40 |
#define DRM_COMMAND_END 0xA0 |
/** |
* Header for events written back to userspace on the drm fd. The |
* type defines the type of event, the length specifies the total |
* length of the event (including the header), and user_data is |
* typically a 64 bit value passed with the ioctl that triggered the |
* event. A read on the drm fd will always only return complete |
* events, that is, if for example the read buffer is 100 bytes, and |
* there are two 64 byte events pending, only one will be returned. |
* |
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and |
* up are chipset specific. |
*/ |
struct drm_event { |
__u32 type; |
__u32 length; |
}; |
#define DRM_EVENT_VBLANK 0x01 |
#define DRM_EVENT_FLIP_COMPLETE 0x02 |
struct drm_event_vblank { |
struct drm_event base; |
__u64 user_data; |
__u32 tv_sec; |
__u32 tv_usec; |
__u32 sequence; |
__u32 reserved; |
}; |
/* typedef area */ |
#ifndef __KERNEL__ |
typedef struct drm_clip_rect drm_clip_rect_t; |
typedef struct drm_drawable_info drm_drawable_info_t; |
typedef struct drm_tex_region drm_tex_region_t; |
typedef struct drm_hw_lock drm_hw_lock_t; |
typedef struct drm_version drm_version_t; |
typedef struct drm_unique drm_unique_t; |
typedef struct drm_list drm_list_t; |
typedef struct drm_block drm_block_t; |
typedef struct drm_control drm_control_t; |
typedef enum drm_map_type drm_map_type_t; |
typedef enum drm_map_flags drm_map_flags_t; |
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t; |
typedef struct drm_map drm_map_t; |
typedef struct drm_client drm_client_t; |
typedef enum drm_stat_type drm_stat_type_t; |
typedef struct drm_stats drm_stats_t; |
typedef enum drm_lock_flags drm_lock_flags_t; |
typedef struct drm_lock drm_lock_t; |
typedef enum drm_dma_flags drm_dma_flags_t; |
typedef struct drm_buf_desc drm_buf_desc_t; |
typedef struct drm_buf_info drm_buf_info_t; |
typedef struct drm_buf_free drm_buf_free_t; |
typedef struct drm_buf_pub drm_buf_pub_t; |
typedef struct drm_buf_map drm_buf_map_t; |
typedef struct drm_dma drm_dma_t; |
typedef union drm_wait_vblank drm_wait_vblank_t; |
typedef struct drm_agp_mode drm_agp_mode_t; |
typedef enum drm_ctx_flags drm_ctx_flags_t; |
typedef struct drm_ctx drm_ctx_t; |
typedef struct drm_ctx_res drm_ctx_res_t; |
typedef struct drm_draw drm_draw_t; |
typedef struct drm_update_draw drm_update_draw_t; |
typedef struct drm_auth drm_auth_t; |
typedef struct drm_irq_busid drm_irq_busid_t; |
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t; |
typedef struct drm_agp_buffer drm_agp_buffer_t; |
typedef struct drm_agp_binding drm_agp_binding_t; |
typedef struct drm_agp_info drm_agp_info_t; |
typedef struct drm_scatter_gather drm_scatter_gather_t; |
typedef struct drm_set_version drm_set_version_t; |
#endif |
#endif |
/drivers/include/uapi/drm/drm_fourcc.h |
---|
0,0 → 1,135 |
/* |
* Copyright 2011 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef DRM_FOURCC_H |
#define DRM_FOURCC_H |
#include <linux/types.h> |
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ |
((__u32)(c) << 16) | ((__u32)(d) << 24)) |
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */ |
/* color index */ |
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ |
/* 8 bpp RGB */ |
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ |
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ |
/* 16 bpp RGB */ |
#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */ |
#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */ |
#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */ |
#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */ |
#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */ |
#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */ |
#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */ |
#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */ |
#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */ |
#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */ |
#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */ |
#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */ |
#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */ |
#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */ |
#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */ |
#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */ |
#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */ |
#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */ |
/* 24 bpp RGB */ |
#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */ |
#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */ |
/* 32 bpp RGB */ |
#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */ |
#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */ |
#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */ |
#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */ |
#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */ |
#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */ |
#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */ |
#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */ |
#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */ |
#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */ |
#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */ |
#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */ |
#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */ |
#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */ |
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ |
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ |
/* packed YCbCr */ |
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */ |
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */ |
#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */ |
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */ |
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ |
/* |
* 2 plane YCbCr |
* index 0 = Y plane, [7:0] Y |
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian |
* or |
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian |
*/ |
#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */ |
/* special NV12 tiled format */ |
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */ |
/* |
* 3 plane YCbCr |
* index 0: Y plane, [7:0] Y |
* index 1: Cb plane, [7:0] Cb |
* index 2: Cr plane, [7:0] Cr |
* or |
* index 1: Cr plane, [7:0] Cr |
* index 2: Cb plane, [7:0] Cb |
*/ |
#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ |
#endif /* DRM_FOURCC_H */ |
/drivers/include/uapi/drm/drm_mode.h |
---|
0,0 → 1,522 |
/* |
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie> |
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com> |
* Copyright (c) 2008 Red Hat Inc. |
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA |
* Copyright (c) 2007-2008 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#ifndef _DRM_MODE_H |
#define _DRM_MODE_H |
#include <linux/types.h> |
#define DRM_DISPLAY_INFO_LEN 32 |
#define DRM_CONNECTOR_NAME_LEN 32 |
#define DRM_DISPLAY_MODE_LEN 32 |
#define DRM_PROP_NAME_LEN 32 |
#define DRM_MODE_TYPE_BUILTIN (1<<0) |
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_PREFERRED (1<<3) |
#define DRM_MODE_TYPE_DEFAULT (1<<4) |
#define DRM_MODE_TYPE_USERDEF (1<<5) |
#define DRM_MODE_TYPE_DRIVER (1<<6) |
/* Video mode flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_FLAG_PHSYNC (1<<0) |
#define DRM_MODE_FLAG_NHSYNC (1<<1) |
#define DRM_MODE_FLAG_PVSYNC (1<<2) |
#define DRM_MODE_FLAG_NVSYNC (1<<3) |
#define DRM_MODE_FLAG_INTERLACE (1<<4) |
#define DRM_MODE_FLAG_DBLSCAN (1<<5) |
#define DRM_MODE_FLAG_CSYNC (1<<6) |
#define DRM_MODE_FLAG_PCSYNC (1<<7) |
#define DRM_MODE_FLAG_NCSYNC (1<<8) |
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ |
#define DRM_MODE_FLAG_BCAST (1<<10) |
#define DRM_MODE_FLAG_PIXMUX (1<<11) |
#define DRM_MODE_FLAG_DBLCLK (1<<12) |
#define DRM_MODE_FLAG_CLKDIV2 (1<<13) |
/* |
* When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX |
* (define not exposed to user space). |
*/ |
#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) |
#define DRM_MODE_FLAG_3D_NONE (0<<14) |
#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) |
#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14) |
#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14) |
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14) |
#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14) |
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14) |
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) |
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) |
/* DPMS flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_DPMS_ON 0 |
#define DRM_MODE_DPMS_STANDBY 1 |
#define DRM_MODE_DPMS_SUSPEND 2 |
#define DRM_MODE_DPMS_OFF 3 |
/* Scaling mode options */ |
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or |
software can still scale) */ |
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ |
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ |
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ |
/* Picture aspect ratio options */ |
#define DRM_MODE_PICTURE_ASPECT_NONE 0 |
#define DRM_MODE_PICTURE_ASPECT_4_3 1 |
#define DRM_MODE_PICTURE_ASPECT_16_9 2 |
/* Dithering mode options */ |
#define DRM_MODE_DITHERING_OFF 0 |
#define DRM_MODE_DITHERING_ON 1 |
#define DRM_MODE_DITHERING_AUTO 2 |
/* Dirty info options */ |
#define DRM_MODE_DIRTY_OFF 0 |
#define DRM_MODE_DIRTY_ON 1 |
#define DRM_MODE_DIRTY_ANNOTATE 2 |
struct drm_mode_modeinfo { |
__u32 clock; |
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew; |
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; |
__u32 vrefresh; |
__u32 flags; |
__u32 type; |
char name[DRM_DISPLAY_MODE_LEN]; |
}; |
struct drm_mode_card_res { |
__u64 fb_id_ptr; |
__u64 crtc_id_ptr; |
__u64 connector_id_ptr; |
__u64 encoder_id_ptr; |
__u32 count_fbs; |
__u32 count_crtcs; |
__u32 count_connectors; |
__u32 count_encoders; |
__u32 min_width, max_width; |
__u32 min_height, max_height; |
}; |
struct drm_mode_crtc { |
__u64 set_connectors_ptr; |
__u32 count_connectors; |
__u32 crtc_id; /**< Id */ |
__u32 fb_id; /**< Id of framebuffer */ |
__u32 x, y; /**< Position on the frameuffer */ |
__u32 gamma_size; |
__u32 mode_valid; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0) |
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) |
/* Planes blend with or override other bits on the CRTC */ |
struct drm_mode_set_plane { |
__u32 plane_id; |
__u32 crtc_id; |
__u32 fb_id; /* fb object contains surface format type */ |
__u32 flags; /* see above flags */ |
/* Signed dest location allows it to be partially off screen */ |
__s32 crtc_x, crtc_y; |
__u32 crtc_w, crtc_h; |
/* Source values are 16.16 fixed point */ |
__u32 src_x, src_y; |
__u32 src_h, src_w; |
}; |
struct drm_mode_get_plane { |
__u32 plane_id; |
__u32 crtc_id; |
__u32 fb_id; |
__u32 possible_crtcs; |
__u32 gamma_size; |
__u32 count_format_types; |
__u64 format_type_ptr; |
}; |
struct drm_mode_get_plane_res { |
__u64 plane_id_ptr; |
__u32 count_planes; |
}; |
#define DRM_MODE_ENCODER_NONE 0 |
#define DRM_MODE_ENCODER_DAC 1 |
#define DRM_MODE_ENCODER_TMDS 2 |
#define DRM_MODE_ENCODER_LVDS 3 |
#define DRM_MODE_ENCODER_TVDAC 4 |
#define DRM_MODE_ENCODER_VIRTUAL 5 |
#define DRM_MODE_ENCODER_DSI 6 |
#define DRM_MODE_ENCODER_DPMST 7 |
struct drm_mode_get_encoder { |
__u32 encoder_id; |
__u32 encoder_type; |
__u32 crtc_id; /**< Id of crtc */ |
__u32 possible_crtcs; |
__u32 possible_clones; |
}; |
/* This is for connectors with multiple signal types. */ |
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ |
#define DRM_MODE_SUBCONNECTOR_Automatic 0 |
#define DRM_MODE_SUBCONNECTOR_Unknown 0 |
#define DRM_MODE_SUBCONNECTOR_DVID 3 |
#define DRM_MODE_SUBCONNECTOR_DVIA 4 |
#define DRM_MODE_SUBCONNECTOR_Composite 5 |
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 |
#define DRM_MODE_SUBCONNECTOR_Component 8 |
#define DRM_MODE_SUBCONNECTOR_SCART 9 |
#define DRM_MODE_CONNECTOR_Unknown 0 |
#define DRM_MODE_CONNECTOR_VGA 1 |
#define DRM_MODE_CONNECTOR_DVII 2 |
#define DRM_MODE_CONNECTOR_DVID 3 |
#define DRM_MODE_CONNECTOR_DVIA 4 |
#define DRM_MODE_CONNECTOR_Composite 5 |
#define DRM_MODE_CONNECTOR_SVIDEO 6 |
#define DRM_MODE_CONNECTOR_LVDS 7 |
#define DRM_MODE_CONNECTOR_Component 8 |
#define DRM_MODE_CONNECTOR_9PinDIN 9 |
#define DRM_MODE_CONNECTOR_DisplayPort 10 |
#define DRM_MODE_CONNECTOR_HDMIA 11 |
#define DRM_MODE_CONNECTOR_HDMIB 12 |
#define DRM_MODE_CONNECTOR_TV 13 |
#define DRM_MODE_CONNECTOR_eDP 14 |
#define DRM_MODE_CONNECTOR_VIRTUAL 15 |
#define DRM_MODE_CONNECTOR_DSI 16 |
struct drm_mode_get_connector { |
__u64 encoders_ptr; |
__u64 modes_ptr; |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_modes; |
__u32 count_props; |
__u32 count_encoders; |
__u32 encoder_id; /**< Current Encoder */ |
__u32 connector_id; /**< Id */ |
__u32 connector_type; |
__u32 connector_type_id; |
__u32 connection; |
__u32 mm_width, mm_height; /**< HxW in millimeters */ |
__u32 subpixel; |
__u32 pad; |
}; |
#define DRM_MODE_PROP_PENDING (1<<0) |
#define DRM_MODE_PROP_RANGE (1<<1) |
#define DRM_MODE_PROP_IMMUTABLE (1<<2) |
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ |
#define DRM_MODE_PROP_BLOB (1<<4) |
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ |
/* non-extended types: legacy bitmask, one bit per type: */ |
#define DRM_MODE_PROP_LEGACY_TYPE ( \ |
DRM_MODE_PROP_RANGE | \ |
DRM_MODE_PROP_ENUM | \ |
DRM_MODE_PROP_BLOB | \ |
DRM_MODE_PROP_BITMASK) |
/* extended-types: rather than continue to consume a bit per type, |
* grab a chunk of the bits to use as integer type id. |
*/ |
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0 |
#define DRM_MODE_PROP_TYPE(n) ((n) << 6) |
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) |
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) |
struct drm_mode_property_enum { |
__u64 value; |
char name[DRM_PROP_NAME_LEN]; |
}; |
struct drm_mode_get_property { |
__u64 values_ptr; /* values and blob lengths */ |
__u64 enum_blob_ptr; /* enum and blob id ptrs */ |
__u32 prop_id; |
__u32 flags; |
char name[DRM_PROP_NAME_LEN]; |
__u32 count_values; |
/* This is only used to count enum values, not blobs. The _blobs is |
* simply because of a historical reason, i.e. backwards compat. */ |
__u32 count_enum_blobs; |
}; |
struct drm_mode_connector_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 connector_id; |
}; |
struct drm_mode_obj_get_properties { |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_props; |
__u32 obj_id; |
__u32 obj_type; |
}; |
struct drm_mode_obj_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 obj_id; |
__u32 obj_type; |
}; |
struct drm_mode_get_blob { |
__u32 blob_id; |
__u32 length; |
__u64 data; |
}; |
struct drm_mode_fb_cmd { |
__u32 fb_id; |
__u32 width, height; |
__u32 pitch; |
__u32 bpp; |
__u32 depth; |
/* driver specific handle */ |
__u32 handle; |
}; |
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ |
struct drm_mode_fb_cmd2 { |
__u32 fb_id; |
__u32 width, height; |
__u32 pixel_format; /* fourcc code from drm_fourcc.h */ |
__u32 flags; /* see above flags */ |
/* |
* In case of planar formats, this ioctl allows up to 4 |
* buffer objects with offets and pitches per plane. |
* The pitch and offset order is dictated by the fourcc, |
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as: |
* |
* YUV 4:2:0 image with a plane of 8 bit Y samples |
* followed by an interleaved U/V plane containing |
* 8 bit 2x2 subsampled colour difference samples. |
* |
* So it would consist of Y as offset[0] and UV as |
* offeset[1]. Note that offset[0] will generally |
* be 0. |
*/ |
__u32 handles[4]; |
__u32 pitches[4]; /* pitch for each plane */ |
__u32 offsets[4]; /* offset of each plane */ |
}; |
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 |
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 |
#define DRM_MODE_FB_DIRTY_FLAGS 0x03 |
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 |
/* |
* Mark a region of a framebuffer as dirty. |
* |
* Some hardware does not automatically update display contents |
* as a hardware or software draw to a framebuffer. This ioctl |
* allows userspace to tell the kernel and the hardware what |
* regions of the framebuffer have changed. |
* |
* The kernel or hardware is free to update more then just the |
* region specified by the clip rects. The kernel or hardware |
* may also delay and/or coalesce several calls to dirty into a |
* single update. |
* |
* Userspace may annotate the updates, the annotates are a |
* promise made by the caller that the change is either a copy |
* of pixels or a fill of a single color in the region specified. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then |
* the number of updated regions are half of num_clips given, |
* where the clip rects are paired in src and dst. The width and |
* height of each one of the pairs must match. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller |
* promises that the region specified of the clip rects is filled |
* completely with a single color as given in the color argument. |
*/ |
struct drm_mode_fb_dirty_cmd { |
__u32 fb_id; |
__u32 flags; |
__u32 color; |
__u32 num_clips; |
__u64 clips_ptr; |
}; |
struct drm_mode_mode_cmd { |
__u32 connector_id; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_CURSOR_BO 0x01 |
#define DRM_MODE_CURSOR_MOVE 0x02 |
#define DRM_MODE_CURSOR_FLAGS 0x03 |
/* |
* depending on the value in flags different members are used. |
* |
* CURSOR_BO uses |
* crtc_id |
* width |
* height |
* handle - if 0 turns the cursor off |
* |
* CURSOR_MOVE uses |
* crtc_id |
* x |
* y |
*/ |
struct drm_mode_cursor { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
}; |
struct drm_mode_cursor2 { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
__s32 hot_x; |
__s32 hot_y; |
}; |
struct drm_mode_crtc_lut { |
__u32 crtc_id; |
__u32 gamma_size; |
/* pointers to arrays */ |
__u64 red; |
__u64 green; |
__u64 blue; |
}; |
#define DRM_MODE_PAGE_FLIP_EVENT 0x01 |
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02 |
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC) |
/* |
* Request a page flip on the specified crtc. |
* |
* This ioctl will ask KMS to schedule a page flip for the specified |
* crtc. Once any pending rendering targeting the specified fb (as of |
* ioctl time) has completed, the crtc will be reprogrammed to display |
* that fb after the next vertical refresh. The ioctl returns |
* immediately, but subsequent rendering to the current fb will block |
* in the execbuffer ioctl until the page flip happens. If a page |
* flip is already pending as the ioctl is called, EBUSY will be |
* returned. |
* |
* Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank |
* event (see drm.h: struct drm_event_vblank) when the page flip is |
* done. The user_data field passed in with this ioctl will be |
* returned as the user_data field in the vblank event struct. |
* |
* Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen |
* 'as soon as possible', meaning that it not delay waiting for vblank. |
* This may cause tearing on the screen. |
* |
* The reserved field must be zero until we figure out something |
* clever to use it for. |
*/ |
struct drm_mode_crtc_page_flip { |
__u32 crtc_id; |
__u32 fb_id; |
__u32 flags; |
__u32 reserved; |
__u64 user_data; |
}; |
/* create a dumb scanout buffer */ |
struct drm_mode_create_dumb { |
uint32_t height; |
uint32_t width; |
uint32_t bpp; |
uint32_t flags; |
/* handle, pitch, size will be returned */ |
uint32_t handle; |
uint32_t pitch; |
uint64_t size; |
}; |
/* set up for mmap of a dumb scanout buffer */ |
struct drm_mode_map_dumb { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** |
* Fake offset to use for subsequent mmap call |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 offset; |
}; |
struct drm_mode_destroy_dumb { |
uint32_t handle; |
}; |
#endif |
/drivers/include/uapi/drm/drm_sarea.h |
---|
0,0 → 1,86 |
/** |
* \file drm_sarea.h |
* \brief SAREA definitions |
* |
* \author Michel Dänzer <michel@daenzer.net> |
*/ |
/* |
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_SAREA_H_ |
#define _DRM_SAREA_H_ |
#include <drm/drm.h> |
/* SAREA area needs to be at least a page */ |
#if defined(__alpha__) |
#define SAREA_MAX 0x2000U |
#elif defined(__mips__) |
#define SAREA_MAX 0x4000U |
#elif defined(__ia64__) |
#define SAREA_MAX 0x10000U /* 64kB */ |
#else |
/* Intel 830M driver needs at least 8k SAREA */ |
#define SAREA_MAX 0x2000U |
#endif |
/** Maximum number of drawables in the SAREA */ |
#define SAREA_MAX_DRAWABLES 256 |
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000 |
/** SAREA drawable */ |
struct drm_sarea_drawable { |
unsigned int stamp; |
unsigned int flags; |
}; |
/** SAREA frame */ |
struct drm_sarea_frame { |
unsigned int x; |
unsigned int y; |
unsigned int width; |
unsigned int height; |
unsigned int fullscreen; |
}; |
/** SAREA */ |
struct drm_sarea { |
/** first thing is always the DRM locking structure */ |
struct drm_hw_lock lock; |
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */ |
struct drm_hw_lock drawable_lock; |
struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */ |
struct drm_sarea_frame frame; /**< frame */ |
drm_context_t dummy_context; |
}; |
#ifndef __KERNEL__ |
typedef struct drm_sarea_drawable drm_sarea_drawable_t; |
typedef struct drm_sarea_frame drm_sarea_frame_t; |
typedef struct drm_sarea drm_sarea_t; |
#endif |
#endif /* _DRM_SAREA_H_ */ |
/drivers/include/uapi/drm/i915_drm.h |
---|
0,0 → 1,1106 |
/* |
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
*/ |
#ifndef _UAPI_I915_DRM_H_ |
#define _UAPI_I915_DRM_H_ |
#include <drm/drm.h> |
/* Please note that modifications to all structs defined here are |
* subject to backwards-compatibility constraints. |
*/ |
/** |
* DOC: uevents generated by i915 on it's device node |
* |
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch |
* event from the gpu l3 cache. Additional information supplied is ROW, |
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep |
* track of these events and if a specific cache-line seems to have a |
* persistent error remap it with the l3 remapping tool supplied in |
* intel-gpu-tools. The value supplied with the event is always 1. |
* |
* I915_ERROR_UEVENT - Generated upon error detection, currently only via |
* hangcheck. The error detection event is a good indicator of when things |
* began to go badly. The value supplied with the event is a 1 upon error |
* detection, and a 0 upon reset completion, signifying no more error |
* exists. NOTE: Disabling hangcheck or reset via module parameter will |
* cause the related events to not be seen. |
* |
* I915_RESET_UEVENT - Event is generated just before an attempt to reset the |
* the GPU. The value supplied with the event is always 1. NOTE: Disable |
* reset via module parameter will cause this event to not be seen. |
*/ |
#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" |
#define I915_ERROR_UEVENT "ERROR" |
#define I915_RESET_UEVENT "RESET" |
/* Each region is a minimum of 16k, and there are at most 255 of them. |
*/ |
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use |
* of chars for next/prev indices */ |
#define I915_LOG_MIN_TEX_REGION_SIZE 14 |
typedef struct _drm_i915_init { |
enum { |
I915_INIT_DMA = 0x01, |
I915_CLEANUP_DMA = 0x02, |
I915_RESUME_DMA = 0x03 |
} func; |
unsigned int mmio_offset; |
int sarea_priv_offset; |
unsigned int ring_start; |
unsigned int ring_end; |
unsigned int ring_size; |
unsigned int front_offset; |
unsigned int back_offset; |
unsigned int depth_offset; |
unsigned int w; |
unsigned int h; |
unsigned int pitch; |
unsigned int pitch_bits; |
unsigned int back_pitch; |
unsigned int depth_pitch; |
unsigned int cpp; |
unsigned int chipset; |
} drm_i915_init_t; |
typedef struct _drm_i915_sarea { |
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; |
int last_upload; /* last time texture was uploaded */ |
int last_enqueue; /* last time a buffer was enqueued */ |
int last_dispatch; /* age of the most recently dispatched buffer */ |
int ctxOwner; /* last context to upload state */ |
int texAge; |
int pf_enabled; /* is pageflipping allowed? */ |
int pf_active; |
int pf_current_page; /* which buffer is being displayed? */ |
int perf_boxes; /* performance boxes to be displayed */ |
int width, height; /* screen size in pixels */ |
drm_handle_t front_handle; |
int front_offset; |
int front_size; |
drm_handle_t back_handle; |
int back_offset; |
int back_size; |
drm_handle_t depth_handle; |
int depth_offset; |
int depth_size; |
drm_handle_t tex_handle; |
int tex_offset; |
int tex_size; |
int log_tex_granularity; |
int pitch; |
int rotation; /* 0, 90, 180 or 270 */ |
int rotated_offset; |
int rotated_size; |
int rotated_pitch; |
int virtualX, virtualY; |
unsigned int front_tiled; |
unsigned int back_tiled; |
unsigned int depth_tiled; |
unsigned int rotated_tiled; |
unsigned int rotated2_tiled; |
int pipeA_x; |
int pipeA_y; |
int pipeA_w; |
int pipeA_h; |
int pipeB_x; |
int pipeB_y; |
int pipeB_w; |
int pipeB_h; |
/* fill out some space for old userspace triple buffer */ |
drm_handle_t unused_handle; |
__u32 unused1, unused2, unused3; |
/* buffer object handles for static buffers. May change |
* over the lifetime of the client. |
*/ |
__u32 front_bo_handle; |
__u32 back_bo_handle; |
__u32 unused_bo_handle; |
__u32 depth_bo_handle; |
} drm_i915_sarea_t; |
/* due to userspace building against these headers we need some compat here */ |
#define planeA_x pipeA_x |
#define planeA_y pipeA_y |
#define planeA_w pipeA_w |
#define planeA_h pipeA_h |
#define planeB_x pipeB_x |
#define planeB_y pipeB_y |
#define planeB_w pipeB_w |
#define planeB_h pipeB_h |
/* Flags for perf_boxes |
*/ |
#define I915_BOX_RING_EMPTY 0x1 |
#define I915_BOX_FLIP 0x2 |
#define I915_BOX_WAIT 0x4 |
#define I915_BOX_TEXTURE_LOAD 0x8 |
#define I915_BOX_LOST_CONTEXT 0x10 |
/* I915 specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_I915_INIT 0x00 |
#define DRM_I915_FLUSH 0x01 |
#define DRM_I915_FLIP 0x02 |
#define DRM_I915_BATCHBUFFER 0x03 |
#define DRM_I915_IRQ_EMIT 0x04 |
#define DRM_I915_IRQ_WAIT 0x05 |
#define DRM_I915_GETPARAM 0x06 |
#define DRM_I915_SETPARAM 0x07 |
#define DRM_I915_ALLOC 0x08 |
#define DRM_I915_FREE 0x09 |
#define DRM_I915_INIT_HEAP 0x0a |
#define DRM_I915_CMDBUFFER 0x0b |
#define DRM_I915_DESTROY_HEAP 0x0c |
#define DRM_I915_SET_VBLANK_PIPE 0x0d |
#define DRM_I915_GET_VBLANK_PIPE 0x0e |
#define DRM_I915_VBLANK_SWAP 0x0f |
#define DRM_I915_HWS_ADDR 0x11 |
#define DRM_I915_GEM_INIT 0x13 |
#define DRM_I915_GEM_EXECBUFFER 0x14 |
#define DRM_I915_GEM_PIN 0x15 |
#define DRM_I915_GEM_UNPIN 0x16 |
#define DRM_I915_GEM_BUSY 0x17 |
#define DRM_I915_GEM_THROTTLE 0x18 |
#define DRM_I915_GEM_ENTERVT 0x19 |
#define DRM_I915_GEM_LEAVEVT 0x1a |
#define DRM_I915_GEM_CREATE 0x1b |
#define DRM_I915_GEM_PREAD 0x1c |
#define DRM_I915_GEM_PWRITE 0x1d |
#define DRM_I915_GEM_MMAP 0x1e |
#define DRM_I915_GEM_SET_DOMAIN 0x1f |
#define DRM_I915_GEM_SW_FINISH 0x20 |
#define DRM_I915_GEM_SET_TILING 0x21 |
#define DRM_I915_GEM_GET_TILING 0x22 |
#define DRM_I915_GEM_GET_APERTURE 0x23 |
#define DRM_I915_GEM_MMAP_GTT 0x24 |
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 |
#define DRM_I915_GEM_MADVISE 0x26 |
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 |
#define DRM_I915_OVERLAY_ATTRS 0x28 |
#define DRM_I915_GEM_EXECBUFFER2 0x29 |
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a |
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b |
#define DRM_I915_GEM_WAIT 0x2c |
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d |
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e |
#define DRM_I915_GEM_SET_CACHING 0x2f |
#define DRM_I915_GEM_GET_CACHING 0x30 |
#define DRM_I915_REG_READ 0x31 |
#define DRM_I915_GET_RESET_STATS 0x32 |
#define DRM_I915_GEM_USERPTR 0x33 |
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) |
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) |
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) |
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) |
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) |
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) |
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) |
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) |
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) |
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) |
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) |
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) |
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) |
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) |
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) |
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) |
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) |
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) |
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) |
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) |
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) |
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) |
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) |
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) |
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) |
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) |
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) |
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) |
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) |
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) |
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) |
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) |
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) |
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) |
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) |
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) |
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) |
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) |
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) |
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) |
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) |
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) |
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) |
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) |
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) |
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) |
/* Allow drivers to submit batchbuffers directly to hardware, relying |
* on the security mechanisms provided by hardware. |
*/ |
typedef struct drm_i915_batchbuffer { |
int start; /* agp offset */ |
int used; /* nr bytes in use */ |
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ |
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ |
int num_cliprects; /* mulitpass with multiple cliprects? */ |
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ |
} drm_i915_batchbuffer_t; |
/* As above, but pass a pointer to userspace buffer which can be |
* validated by the kernel prior to sending to hardware. |
*/ |
typedef struct _drm_i915_cmdbuffer { |
char __user *buf; /* pointer to userspace command buffer */ |
int sz; /* nr bytes in buf */ |
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ |
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ |
int num_cliprects; /* mulitpass with multiple cliprects? */ |
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ |
} drm_i915_cmdbuffer_t; |
/* Userspace can request & wait on irq's: |
*/ |
typedef struct drm_i915_irq_emit { |
int __user *irq_seq; |
} drm_i915_irq_emit_t; |
typedef struct drm_i915_irq_wait { |
int irq_seq; |
} drm_i915_irq_wait_t; |
/* Ioctl to query kernel params: |
*/ |
#define I915_PARAM_IRQ_ACTIVE 1 |
#define I915_PARAM_ALLOW_BATCHBUFFER 2 |
#define I915_PARAM_LAST_DISPATCH 3 |
#define I915_PARAM_CHIPSET_ID 4 |
#define I915_PARAM_HAS_GEM 5 |
#define I915_PARAM_NUM_FENCES_AVAIL 6 |
#define I915_PARAM_HAS_OVERLAY 7 |
#define I915_PARAM_HAS_PAGEFLIPPING 8 |
#define I915_PARAM_HAS_EXECBUF2 9 |
#define I915_PARAM_HAS_BSD 10 |
#define I915_PARAM_HAS_BLT 11 |
#define I915_PARAM_HAS_RELAXED_FENCING 12 |
#define I915_PARAM_HAS_COHERENT_RINGS 13 |
#define I915_PARAM_HAS_EXEC_CONSTANTS 14 |
#define I915_PARAM_HAS_RELAXED_DELTA 15 |
#define I915_PARAM_HAS_GEN7_SOL_RESET 16 |
#define I915_PARAM_HAS_LLC 17 |
#define I915_PARAM_HAS_ALIASING_PPGTT 18 |
#define I915_PARAM_HAS_WAIT_TIMEOUT 19 |
#define I915_PARAM_HAS_SEMAPHORES 20 |
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 |
#define I915_PARAM_HAS_VEBOX 22 |
#define I915_PARAM_HAS_SECURE_BATCHES 23 |
#define I915_PARAM_HAS_PINNED_BATCHES 24 |
#define I915_PARAM_HAS_EXEC_NO_RELOC 25 |
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 |
#define I915_PARAM_HAS_WT 27 |
#define I915_PARAM_CMD_PARSER_VERSION 28 |
#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 |
typedef struct drm_i915_getparam { |
int param; |
int __user *value; |
} drm_i915_getparam_t; |
/* Ioctl to set kernel params: |
*/ |
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 |
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 |
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 |
#define I915_SETPARAM_NUM_USED_FENCES 4 |
typedef struct drm_i915_setparam { |
int param; |
int value; |
} drm_i915_setparam_t; |
/* A memory manager for regions of shared memory: |
*/ |
#define I915_MEM_REGION_AGP 1 |
typedef struct drm_i915_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or agp */ |
} drm_i915_mem_alloc_t; |
typedef struct drm_i915_mem_free { |
int region; |
int region_offset; |
} drm_i915_mem_free_t; |
typedef struct drm_i915_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_i915_mem_init_heap_t; |
/* Allow memory manager to be torn down and re-initialized (eg on |
* rotate): |
*/ |
typedef struct drm_i915_mem_destroy_heap { |
int region; |
} drm_i915_mem_destroy_heap_t; |
/* Allow X server to configure which pipes to monitor for vblank signals |
*/ |
#define DRM_I915_VBLANK_PIPE_A 1 |
#define DRM_I915_VBLANK_PIPE_B 2 |
typedef struct drm_i915_vblank_pipe { |
int pipe; |
} drm_i915_vblank_pipe_t; |
/* Schedule buffer swap at given vertical blank: |
*/ |
typedef struct drm_i915_vblank_swap { |
drm_drawable_t drawable; |
enum drm_vblank_seq_type seqtype; |
unsigned int sequence; |
} drm_i915_vblank_swap_t; |
typedef struct drm_i915_hws_addr { |
__u64 addr; |
} drm_i915_hws_addr_t; |
struct drm_i915_gem_init { |
/** |
* Beginning offset in the GTT to be managed by the DRM memory |
* manager. |
*/ |
__u64 gtt_start; |
/** |
* Ending offset in the GTT to be managed by the DRM memory |
* manager. |
*/ |
__u64 gtt_end; |
}; |
struct drm_i915_gem_create { |
/** |
* Requested size for the object. |
* |
* The (page-aligned) allocated size for the object will be returned. |
*/ |
__u64 size; |
/** |
* Returned handle for the object. |
* |
* Object handles are nonzero. |
*/ |
__u32 handle; |
__u32 pad; |
}; |
struct drm_i915_gem_pread { |
/** Handle for the object being read. */ |
__u32 handle; |
__u32 pad; |
/** Offset into the object to read from */ |
__u64 offset; |
/** Length of data to read */ |
__u64 size; |
/** |
* Pointer to write the data into. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 data_ptr; |
}; |
struct drm_i915_gem_pwrite { |
/** Handle for the object being written to. */ |
__u32 handle; |
__u32 pad; |
/** Offset into the object to write to */ |
__u64 offset; |
/** Length of data to write */ |
__u64 size; |
/** |
* Pointer to read the data from. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 data_ptr; |
}; |
struct drm_i915_gem_mmap { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** Offset in the object to map. */ |
__u64 offset; |
/** |
* Length of data to map. |
* |
* The value will be page-aligned. |
*/ |
__u64 size; |
/** |
* Returned pointer the data was mapped at. |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 addr_ptr; |
}; |
struct drm_i915_gem_mmap_gtt { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** |
* Fake offset to use for subsequent mmap call |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 offset; |
}; |
struct drm_i915_gem_set_domain { |
/** Handle for the object */ |
__u32 handle; |
/** New read domains */ |
__u32 read_domains; |
/** New write domain */ |
__u32 write_domain; |
}; |
struct drm_i915_gem_sw_finish { |
/** Handle for the object */ |
__u32 handle; |
}; |
struct drm_i915_gem_relocation_entry { |
/** |
* Handle of the buffer being pointed to by this relocation entry. |
* |
* It's appealing to make this be an index into the mm_validate_entry |
* list to refer to the buffer, but this allows the driver to create |
* a relocation list for state buffers and not re-write it per |
* exec using the buffer. |
*/ |
__u32 target_handle; |
/** |
* Value to be added to the offset of the target buffer to make up |
* the relocation entry. |
*/ |
__u32 delta; |
/** Offset in the buffer the relocation entry will be written into */ |
__u64 offset; |
/** |
* Offset value of the target buffer that the relocation entry was last |
* written as. |
* |
* If the buffer has the same offset as last time, we can skip syncing |
* and writing the relocation. This value is written back out by |
* the execbuffer ioctl when the relocation is written. |
*/ |
__u64 presumed_offset; |
/** |
* Target memory domains read by this operation. |
*/ |
__u32 read_domains; |
/** |
* Target memory domains written by this operation. |
* |
* Note that only one domain may be written by the whole |
* execbuffer operation, so that where there are conflicts, |
* the application will get -EINVAL back. |
*/ |
__u32 write_domain; |
}; |
/** @{ |
* Intel memory domains |
* |
* Most of these just align with the various caches in |
* the system and are used to flush and invalidate as |
* objects end up cached in different domains. |
*/ |
/** CPU cache */ |
#define I915_GEM_DOMAIN_CPU 0x00000001 |
/** Render cache, used by 2D and 3D drawing */ |
#define I915_GEM_DOMAIN_RENDER 0x00000002 |
/** Sampler cache, used by texture engine */ |
#define I915_GEM_DOMAIN_SAMPLER 0x00000004 |
/** Command queue, used to load batch buffers */ |
#define I915_GEM_DOMAIN_COMMAND 0x00000008 |
/** Instruction cache, used by shader programs */ |
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 |
/** Vertex address cache */ |
#define I915_GEM_DOMAIN_VERTEX 0x00000020 |
/** GTT domain - aperture and scanout */ |
#define I915_GEM_DOMAIN_GTT 0x00000040 |
/** @} */ |
struct drm_i915_gem_exec_object { |
/** |
* User's handle for a buffer to be bound into the GTT for this |
* operation. |
*/ |
__u32 handle; |
/** Number of relocations to be performed on this buffer */ |
__u32 relocation_count; |
/** |
* Pointer to array of struct drm_i915_gem_relocation_entry containing |
* the relocations to be performed in this buffer. |
*/ |
__u64 relocs_ptr; |
/** Required alignment in graphics aperture */ |
__u64 alignment; |
/** |
* Returned value of the updated offset of the object, for future |
* presumed_offset writes. |
*/ |
__u64 offset; |
}; |
struct drm_i915_gem_execbuffer { |
/** |
* List of buffers to be validated with their relocations to be |
* performend on them. |
* |
* This is a pointer to an array of struct drm_i915_gem_validate_entry. |
* |
* These buffers must be listed in an order such that all relocations |
* a buffer is performing refer to buffers that have already appeared |
* in the validate list. |
*/ |
__u64 buffers_ptr; |
__u32 buffer_count; |
/** Offset in the batchbuffer to start execution from. */ |
__u32 batch_start_offset; |
/** Bytes used in batchbuffer from batch_start_offset */ |
__u32 batch_len; |
__u32 DR1; |
__u32 DR4; |
__u32 num_cliprects; |
/** This is a struct drm_clip_rect *cliprects */ |
__u64 cliprects_ptr; |
}; |
struct drm_i915_gem_exec_object2 { |
/** |
* User's handle for a buffer to be bound into the GTT for this |
* operation. |
*/ |
__u32 handle; |
/** Number of relocations to be performed on this buffer */ |
__u32 relocation_count; |
/** |
* Pointer to array of struct drm_i915_gem_relocation_entry containing |
* the relocations to be performed in this buffer. |
*/ |
__u64 relocs_ptr; |
/** Required alignment in graphics aperture */ |
__u64 alignment; |
/** |
* Returned value of the updated offset of the object, for future |
* presumed_offset writes. |
*/ |
__u64 offset; |
#define EXEC_OBJECT_NEEDS_FENCE (1<<0) |
#define EXEC_OBJECT_NEEDS_GTT (1<<1) |
#define EXEC_OBJECT_WRITE (1<<2) |
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) |
__u64 flags; |
__u64 rsvd1; |
__u64 rsvd2; |
}; |
struct drm_i915_gem_execbuffer2 { |
/** |
* List of gem_exec_object2 structs |
*/ |
__u64 buffers_ptr; |
__u32 buffer_count; |
/** Offset in the batchbuffer to start execution from. */ |
__u32 batch_start_offset; |
/** Bytes used in batchbuffer from batch_start_offset */ |
__u32 batch_len; |
__u32 DR1; |
__u32 DR4; |
__u32 num_cliprects; |
/** This is a struct drm_clip_rect *cliprects */ |
__u64 cliprects_ptr; |
#define I915_EXEC_RING_MASK (7<<0) |
#define I915_EXEC_DEFAULT (0<<0) |
#define I915_EXEC_RENDER (1<<0) |
#define I915_EXEC_BSD (2<<0) |
#define I915_EXEC_BLT (3<<0) |
#define I915_EXEC_VEBOX (4<<0) |
/* Used for switching the constants addressing mode on gen4+ RENDER ring. |
* Gen6+ only supports relative addressing to dynamic state (default) and |
* absolute addressing. |
* |
* These flags are ignored for the BSD and BLT rings. |
*/ |
#define I915_EXEC_CONSTANTS_MASK (3<<6) |
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ |
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) |
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ |
__u64 flags; |
__u64 rsvd1; /* now used for context info */ |
__u64 rsvd2; |
}; |
/** Resets the SO write offset registers for transform feedback on gen7. */ |
#define I915_EXEC_GEN7_SOL_RESET (1<<8) |
/** Request a privileged ("secure") batch buffer. Note only available for |
* DRM_ROOT_ONLY | DRM_MASTER processes. |
*/ |
#define I915_EXEC_SECURE (1<<9) |
/** Inform the kernel that the batch is and will always be pinned. This |
* negates the requirement for a workaround to be performed to avoid |
* an incoherent CS (such as can be found on 830/845). If this flag is |
* not passed, the kernel will endeavour to make sure the batch is |
* coherent with the CS before execution. If this flag is passed, |
* userspace assumes the responsibility for ensuring the same. |
*/ |
#define I915_EXEC_IS_PINNED (1<<10) |
/** Provide a hint to the kernel that the command stream and auxiliary |
* state buffers already holds the correct presumed addresses and so the |
* relocation process may be skipped if no buffers need to be moved in |
* preparation for the execbuffer. |
*/ |
#define I915_EXEC_NO_RELOC (1<<11) |
/** Use the reloc.handle as an index into the exec object array rather |
* than as the per-file handle. |
*/ |
#define I915_EXEC_HANDLE_LUT (1<<12) |
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1) |
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) |
#define i915_execbuffer2_set_context_id(eb2, context) \ |
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK |
#define i915_execbuffer2_get_context_id(eb2) \ |
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) |
struct drm_i915_gem_pin { |
/** Handle of the buffer to be pinned. */ |
__u32 handle; |
__u32 pad; |
/** alignment required within the aperture */ |
__u64 alignment; |
/** Returned GTT offset of the buffer. */ |
__u64 offset; |
}; |
struct drm_i915_gem_unpin { |
/** Handle of the buffer to be unpinned. */ |
__u32 handle; |
__u32 pad; |
}; |
struct drm_i915_gem_busy { |
/** Handle of the buffer to check for busy */ |
__u32 handle; |
/** Return busy status (1 if busy, 0 if idle). |
* The high word is used to indicate on which rings the object |
* currently resides: |
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc) |
*/ |
__u32 busy; |
}; |
/** |
* I915_CACHING_NONE |
* |
* GPU access is not coherent with cpu caches. Default for machines without an |
* LLC. |
*/ |
#define I915_CACHING_NONE 0 |
/** |
* I915_CACHING_CACHED |
* |
* GPU access is coherent with cpu caches and furthermore the data is cached in |
* last-level caches shared between cpu cores and the gpu GT. Default on |
* machines with HAS_LLC. |
*/ |
#define I915_CACHING_CACHED 1 |
/** |
* I915_CACHING_DISPLAY |
* |
* Special GPU caching mode which is coherent with the scanout engines. |
* Transparently falls back to I915_CACHING_NONE on platforms where no special |
* cache mode (like write-through or gfdt flushing) is available. The kernel |
* automatically sets this mode when using a buffer as a scanout target. |
* Userspace can manually set this mode to avoid a costly stall and clflush in |
* the hotpath of drawing the first frame. |
*/ |
#define I915_CACHING_DISPLAY 2 |
struct drm_i915_gem_caching { |
/** |
* Handle of the buffer to set/get the caching level of. */ |
__u32 handle; |
/** |
* Cacheing level to apply or return value |
* |
* bits0-15 are for generic caching control (i.e. the above defined |
* values). bits16-31 are reserved for platform-specific variations |
* (e.g. l3$ caching on gen7). */ |
__u32 caching; |
}; |
#define I915_TILING_NONE 0 |
#define I915_TILING_X 1 |
#define I915_TILING_Y 2 |
#define I915_BIT_6_SWIZZLE_NONE 0 |
#define I915_BIT_6_SWIZZLE_9 1 |
#define I915_BIT_6_SWIZZLE_9_10 2 |
#define I915_BIT_6_SWIZZLE_9_11 3 |
#define I915_BIT_6_SWIZZLE_9_10_11 4 |
/* Not seen by userland */ |
#define I915_BIT_6_SWIZZLE_UNKNOWN 5 |
/* Seen by userland. */ |
#define I915_BIT_6_SWIZZLE_9_17 6 |
#define I915_BIT_6_SWIZZLE_9_10_17 7 |
struct drm_i915_gem_set_tiling { |
/** Handle of the buffer to have its tiling state updated */ |
__u32 handle; |
/** |
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, |
* I915_TILING_Y). |
* |
* This value is to be set on request, and will be updated by the |
* kernel on successful return with the actual chosen tiling layout. |
* |
* The tiling mode may be demoted to I915_TILING_NONE when the system |
* has bit 6 swizzling that can't be managed correctly by GEM. |
* |
* Buffer contents become undefined when changing tiling_mode. |
*/ |
__u32 tiling_mode; |
/** |
* Stride in bytes for the object when in I915_TILING_X or |
* I915_TILING_Y. |
*/ |
__u32 stride; |
/** |
* Returned address bit 6 swizzling required for CPU access through |
* mmap mapping. |
*/ |
__u32 swizzle_mode; |
}; |
struct drm_i915_gem_get_tiling { |
/** Handle of the buffer to get tiling state for. */ |
__u32 handle; |
/** |
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, |
* I915_TILING_Y). |
*/ |
__u32 tiling_mode; |
/** |
* Returned address bit 6 swizzling required for CPU access through |
* mmap mapping. |
*/ |
__u32 swizzle_mode; |
/** |
* Returned address bit 6 swizzling required for CPU access through |
* mmap mapping whilst bound. |
*/ |
__u32 phys_swizzle_mode; |
}; |
struct drm_i915_gem_get_aperture { |
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */ |
__u64 aper_size; |
/** |
* Available space in the aperture used by i915_gem_execbuffer, in |
* bytes |
*/ |
__u64 aper_available_size; |
}; |
struct drm_i915_get_pipe_from_crtc_id { |
/** ID of CRTC being requested **/ |
__u32 crtc_id; |
/** pipe of requested CRTC **/ |
__u32 pipe; |
}; |
#define I915_MADV_WILLNEED 0 |
#define I915_MADV_DONTNEED 1 |
#define __I915_MADV_PURGED 2 /* internal state */ |
struct drm_i915_gem_madvise { |
/** Handle of the buffer to change the backing store advice */ |
__u32 handle; |
/* Advice: either the buffer will be needed again in the near future, |
* or wont be and could be discarded under memory pressure. |
*/ |
__u32 madv; |
/** Whether the backing store still exists. */ |
__u32 retained; |
}; |
/* flags */ |
#define I915_OVERLAY_TYPE_MASK 0xff |
#define I915_OVERLAY_YUV_PLANAR 0x01 |
#define I915_OVERLAY_YUV_PACKED 0x02 |
#define I915_OVERLAY_RGB 0x03 |
#define I915_OVERLAY_DEPTH_MASK 0xff00 |
#define I915_OVERLAY_RGB24 0x1000 |
#define I915_OVERLAY_RGB16 0x2000 |
#define I915_OVERLAY_RGB15 0x3000 |
#define I915_OVERLAY_YUV422 0x0100 |
#define I915_OVERLAY_YUV411 0x0200 |
#define I915_OVERLAY_YUV420 0x0300 |
#define I915_OVERLAY_YUV410 0x0400 |
#define I915_OVERLAY_SWAP_MASK 0xff0000 |
#define I915_OVERLAY_NO_SWAP 0x000000 |
#define I915_OVERLAY_UV_SWAP 0x010000 |
#define I915_OVERLAY_Y_SWAP 0x020000 |
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 |
#define I915_OVERLAY_FLAGS_MASK 0xff000000 |
#define I915_OVERLAY_ENABLE 0x01000000 |
struct drm_intel_overlay_put_image { |
/* various flags and src format description */ |
__u32 flags; |
/* source picture description */ |
__u32 bo_handle; |
/* stride values and offsets are in bytes, buffer relative */ |
__u16 stride_Y; /* stride for packed formats */ |
__u16 stride_UV; |
__u32 offset_Y; /* offset for packet formats */ |
__u32 offset_U; |
__u32 offset_V; |
/* in pixels */ |
__u16 src_width; |
__u16 src_height; |
/* to compensate the scaling factors for partially covered surfaces */ |
__u16 src_scan_width; |
__u16 src_scan_height; |
/* output crtc description */ |
__u32 crtc_id; |
__u16 dst_x; |
__u16 dst_y; |
__u16 dst_width; |
__u16 dst_height; |
}; |
/* flags */ |
#define I915_OVERLAY_UPDATE_ATTRS (1<<0) |
#define I915_OVERLAY_UPDATE_GAMMA (1<<1) |
struct drm_intel_overlay_attrs { |
__u32 flags; |
__u32 color_key; |
__s32 brightness; |
__u32 contrast; |
__u32 saturation; |
__u32 gamma0; |
__u32 gamma1; |
__u32 gamma2; |
__u32 gamma3; |
__u32 gamma4; |
__u32 gamma5; |
}; |
/* |
* Intel sprite handling |
* |
* Color keying works with a min/mask/max tuple. Both source and destination |
* color keying is allowed. |
* |
* Source keying: |
* Sprite pixels within the min & max values, masked against the color channels |
* specified in the mask field, will be transparent. All other pixels will |
* be displayed on top of the primary plane. For RGB surfaces, only the min |
* and mask fields will be used; ranged compares are not allowed. |
* |
* Destination keying: |
* Primary plane pixels that match the min value, masked against the color |
* channels specified in the mask field, will be replaced by corresponding |
* pixels from the sprite plane. |
* |
* Note that source & destination keying are exclusive; only one can be |
* active on a given plane. |
*/ |
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */ |
#define I915_SET_COLORKEY_DESTINATION (1<<1) |
#define I915_SET_COLORKEY_SOURCE (1<<2) |
struct drm_intel_sprite_colorkey { |
__u32 plane_id; |
__u32 min_value; |
__u32 channel_mask; |
__u32 max_value; |
__u32 flags; |
}; |
struct drm_i915_gem_wait { |
/** Handle of BO we shall wait on */ |
__u32 bo_handle; |
__u32 flags; |
/** Number of nanoseconds to wait, Returns time remaining. */ |
__s64 timeout_ns; |
}; |
struct drm_i915_gem_context_create { |
/* output: id of new context*/ |
__u32 ctx_id; |
__u32 pad; |
}; |
struct drm_i915_gem_context_destroy { |
__u32 ctx_id; |
__u32 pad; |
}; |
struct drm_i915_reg_read { |
__u64 offset; |
__u64 val; /* Return value */ |
}; |
struct drm_i915_reset_stats { |
__u32 ctx_id; |
__u32 flags; |
/* All resets since boot/module reload, for all contexts */ |
__u32 reset_count; |
/* Number of batches lost when active in GPU, for this context */ |
__u32 batch_active; |
/* Number of batches lost pending for execution, for this context */ |
__u32 batch_pending; |
__u32 pad; |
}; |
struct drm_i915_gem_userptr { |
__u64 user_ptr; |
__u64 user_size; |
__u32 flags; |
#define I915_USERPTR_READ_ONLY 0x1 |
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000 |
/** |
* Returned handle for the object. |
* |
* Object handles are nonzero. |
*/ |
__u32 handle; |
}; |
struct drm_i915_mask { |
__u32 handle; |
__u32 width; |
__u32 height; |
__u32 bo_size; |
__u32 bo_pitch; |
__u32 bo_map; |
}; |
struct drm_i915_fb_info { |
__u32 name; |
__u32 width; |
__u32 height; |
__u32 pitch; |
__u32 tiling; |
__u32 crtc; |
__u32 pipe; |
}; |
struct drm_i915_mask_update { |
__u32 handle; |
__u32 dx; |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 bo_pitch; |
__u32 bo_map; |
}; |
#endif /* _UAPI_I915_DRM_H_ */ |
/drivers/include/uapi/drm/radeon_drm.h |
---|
0,0 → 1,1064 |
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- |
* |
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Fremont, California. |
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Kevin E. Martin <martin@valinux.com> |
* Gareth Hughes <gareth@valinux.com> |
* Keith Whitwell <keith@tungstengraphics.com> |
*/ |
#ifndef __RADEON_DRM_H__ |
#define __RADEON_DRM_H__ |
#include <drm/drm.h> |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the X server file (radeon_sarea.h) |
*/ |
#ifndef __RADEON_SAREA_DEFINES__ |
#define __RADEON_SAREA_DEFINES__ |
/* Old style state flags, required for sarea interface (1.1 and 1.2 |
* clears) and 1.2 drm_vertex2 ioctl. |
*/ |
#define RADEON_UPLOAD_CONTEXT 0x00000001 |
#define RADEON_UPLOAD_VERTFMT 0x00000002 |
#define RADEON_UPLOAD_LINE 0x00000004 |
#define RADEON_UPLOAD_BUMPMAP 0x00000008 |
#define RADEON_UPLOAD_MASKS 0x00000010 |
#define RADEON_UPLOAD_VIEWPORT 0x00000020 |
#define RADEON_UPLOAD_SETUP 0x00000040 |
#define RADEON_UPLOAD_TCL 0x00000080 |
#define RADEON_UPLOAD_MISC 0x00000100 |
#define RADEON_UPLOAD_TEX0 0x00000200 |
#define RADEON_UPLOAD_TEX1 0x00000400 |
#define RADEON_UPLOAD_TEX2 0x00000800 |
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 |
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 |
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 |
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ |
#define RADEON_REQUIRE_QUIESCENCE 0x00010000 |
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ |
#define RADEON_UPLOAD_ALL 0x003effff |
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff |
/* New style per-packet identifiers for use in cmd_buffer ioctl with |
* the RADEON_EMIT_PACKET command. Comments relate new packets to old |
* state bits and the packet size: |
*/ |
#define RADEON_EMIT_PP_MISC 0 /* context/7 */ |
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ |
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ |
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ |
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ |
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ |
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ |
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ |
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ |
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ |
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ |
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ |
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ |
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ |
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ |
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ |
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ |
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ |
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ |
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ |
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ |
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ |
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ |
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ |
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ |
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ |
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ |
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ |
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ |
#define R200_EMIT_VAP_CTL 32 /* vap/1 */ |
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ |
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ |
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ |
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ |
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ |
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ |
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ |
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ |
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ |
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ |
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ |
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ |
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ |
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ |
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ |
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ |
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ |
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ |
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ |
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ |
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ |
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ |
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ |
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ |
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ |
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ |
#define R200_EMIT_PP_CUBIC_FACES_0 61 |
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 |
#define R200_EMIT_PP_CUBIC_FACES_1 63 |
#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 |
#define R200_EMIT_PP_CUBIC_FACES_2 65 |
#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 |
#define R200_EMIT_PP_CUBIC_FACES_3 67 |
#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 |
#define R200_EMIT_PP_CUBIC_FACES_4 69 |
#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 |
#define R200_EMIT_PP_CUBIC_FACES_5 71 |
#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 |
#define RADEON_EMIT_PP_TEX_SIZE_0 73 |
#define RADEON_EMIT_PP_TEX_SIZE_1 74 |
#define RADEON_EMIT_PP_TEX_SIZE_2 75 |
#define R200_EMIT_RB3D_BLENDCOLOR 76 |
#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77 |
#define RADEON_EMIT_PP_CUBIC_FACES_0 78 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79 |
#define RADEON_EMIT_PP_CUBIC_FACES_1 80 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81 |
#define RADEON_EMIT_PP_CUBIC_FACES_2 82 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83 |
#define R200_EMIT_PP_TRI_PERF_CNTL 84 |
#define R200_EMIT_PP_AFS_0 85 |
#define R200_EMIT_PP_AFS_1 86 |
#define R200_EMIT_ATF_TFACTOR 87 |
#define R200_EMIT_PP_TXCTLALL_0 88 |
#define R200_EMIT_PP_TXCTLALL_1 89 |
#define R200_EMIT_PP_TXCTLALL_2 90 |
#define R200_EMIT_PP_TXCTLALL_3 91 |
#define R200_EMIT_PP_TXCTLALL_4 92 |
#define R200_EMIT_PP_TXCTLALL_5 93 |
#define R200_EMIT_VAP_PVS_CNTL 94 |
#define RADEON_MAX_STATE_PACKETS 95 |
/* Commands understood by cmd_buffer ioctl. More can be added but |
* obviously these can't be removed or changed: |
*/ |
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ |
#define RADEON_CMD_SCALARS 2 /* emit scalar data */ |
#define RADEON_CMD_VECTORS 3 /* emit vector data */ |
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ |
#define RADEON_CMD_PACKET3 5 /* emit hw packet */ |
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ |
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ |
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: |
* doesn't make the cpu wait, just |
* the graphics hardware */ |
#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */ |
typedef union { |
int i; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, packet_id, pad0, pad1; |
} packet; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} scalars; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} vectors; |
struct { |
unsigned char cmd_type, addr_lo, addr_hi, count; |
} veclinear; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
} drm_radeon_cmd_header_t; |
#define RADEON_WAIT_2D 0x1 |
#define RADEON_WAIT_3D 0x2 |
/* Allowed parameters for R300_CMD_PACKET3 |
*/ |
#define R300_CMD_PACKET3_CLEAR 0 |
#define R300_CMD_PACKET3_RAW 1 |
/* Commands understood by cmd_buffer ioctl for R300. |
* The interface has not been stabilized, so some of these may be removed |
* and eventually reordered before stabilization. |
*/ |
#define R300_CMD_PACKET0 1 |
#define R300_CMD_VPU 2 /* emit vertex program upload */ |
#define R300_CMD_PACKET3 3 /* emit a packet3 */ |
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ |
#define R300_CMD_CP_DELAY 5 |
#define R300_CMD_DMA_DISCARD 6 |
#define R300_CMD_WAIT 7 |
# define R300_WAIT_2D 0x1 |
# define R300_WAIT_3D 0x2 |
/* these two defines are DOING IT WRONG - however |
* we have userspace which relies on using these. |
* The wait interface is backwards compat new |
* code should use the NEW_WAIT defines below |
* THESE ARE NOT BIT FIELDS |
*/ |
# define R300_WAIT_2D_CLEAN 0x3 |
# define R300_WAIT_3D_CLEAN 0x4 |
# define R300_NEW_WAIT_2D_3D 0x3 |
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4 |
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6 |
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 |
#define R300_CMD_SCRATCH 8 |
#define R300_CMD_R500FP 9 |
typedef union { |
unsigned int u; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, count, reglo, reghi; |
} packet0; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi; |
} vpu; |
struct { |
unsigned char cmd_type, packet, pad0, pad1; |
} packet3; |
struct { |
unsigned char cmd_type, packet; |
unsigned short count; /* amount of packet2 to emit */ |
} delay; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
struct { |
unsigned char cmd_type, reg, n_bufs, flags; |
} scratch; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi_flags; |
} r500fp; |
} drm_r300_cmd_header_t; |
#define RADEON_FRONT 0x1 |
#define RADEON_BACK 0x2 |
#define RADEON_DEPTH 0x4 |
#define RADEON_STENCIL 0x8 |
#define RADEON_CLEAR_FASTZ 0x80000000 |
#define RADEON_USE_HIERZ 0x40000000 |
#define RADEON_USE_COMP_ZBUF 0x20000000 |
#define R500FP_CONSTANT_TYPE (1 << 1) |
#define R500FP_CONSTANT_CLAMP (1 << 2) |
/* Primitive types |
*/ |
#define RADEON_POINTS 0x1 |
#define RADEON_LINES 0x2 |
#define RADEON_LINE_STRIP 0x3 |
#define RADEON_TRIANGLES 0x4 |
#define RADEON_TRIANGLE_FAN 0x5 |
#define RADEON_TRIANGLE_STRIP 0x6 |
/* Vertex/indirect buffer size |
*/ |
#define RADEON_BUFFER_SIZE 65536 |
/* Byte offsets for indirect buffer data |
*/ |
#define RADEON_INDEX_PRIM_OFFSET 20 |
#define RADEON_SCRATCH_REG_OFFSET 32 |
#define R600_SCRATCH_REG_OFFSET 256 |
#define RADEON_NR_SAREA_CLIPRECTS 12 |
/* There are 2 heaps (local/GART). Each region within a heap is a |
* minimum of 64k, and there are at most 64 of them per heap. |
*/ |
#define RADEON_LOCAL_TEX_HEAP 0 |
#define RADEON_GART_TEX_HEAP 1 |
#define RADEON_NR_TEX_HEAPS 2 |
#define RADEON_NR_TEX_REGIONS 64 |
#define RADEON_LOG_TEX_GRANULARITY 16 |
#define RADEON_MAX_TEXTURE_LEVELS 12 |
#define RADEON_MAX_TEXTURE_UNITS 3 |
#define RADEON_MAX_SURFACES 8 |
/* Blits have strict offset rules. All blit offset must be aligned on |
* a 1K-byte boundary. |
*/ |
#define RADEON_OFFSET_SHIFT 10 |
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT) |
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1) |
#endif /* __RADEON_SAREA_DEFINES__ */ |
typedef struct { |
unsigned int red; |
unsigned int green; |
unsigned int blue; |
unsigned int alpha; |
} radeon_color_regs_t; |
typedef struct { |
/* Context state */ |
unsigned int pp_misc; /* 0x1c14 */ |
unsigned int pp_fog_color; |
unsigned int re_solid_color; |
unsigned int rb3d_blendcntl; |
unsigned int rb3d_depthoffset; |
unsigned int rb3d_depthpitch; |
unsigned int rb3d_zstencilcntl; |
unsigned int pp_cntl; /* 0x1c38 */ |
unsigned int rb3d_cntl; |
unsigned int rb3d_coloroffset; |
unsigned int re_width_height; |
unsigned int rb3d_colorpitch; |
unsigned int se_cntl; |
/* Vertex format state */ |
unsigned int se_coord_fmt; /* 0x1c50 */ |
/* Line state */ |
unsigned int re_line_pattern; /* 0x1cd0 */ |
unsigned int re_line_state; |
unsigned int se_line_width; /* 0x1db8 */ |
/* Bumpmap state */ |
unsigned int pp_lum_matrix; /* 0x1d00 */ |
unsigned int pp_rot_matrix_0; /* 0x1d58 */ |
unsigned int pp_rot_matrix_1; |
/* Mask state */ |
unsigned int rb3d_stencilrefmask; /* 0x1d7c */ |
unsigned int rb3d_ropcntl; |
unsigned int rb3d_planemask; |
/* Viewport state */ |
unsigned int se_vport_xscale; /* 0x1d98 */ |
unsigned int se_vport_xoffset; |
unsigned int se_vport_yscale; |
unsigned int se_vport_yoffset; |
unsigned int se_vport_zscale; |
unsigned int se_vport_zoffset; |
/* Setup state */ |
unsigned int se_cntl_status; /* 0x2140 */ |
/* Misc state */ |
unsigned int re_top_left; /* 0x26c0 */ |
unsigned int re_misc; |
} drm_radeon_context_regs_t; |
typedef struct { |
/* Zbias state */ |
unsigned int se_zbias_factor; /* 0x1dac */ |
unsigned int se_zbias_constant; |
} drm_radeon_context2_regs_t; |
/* Setup registers for each texture unit |
*/ |
typedef struct { |
unsigned int pp_txfilter; |
unsigned int pp_txformat; |
unsigned int pp_txoffset; |
unsigned int pp_txcblend; |
unsigned int pp_txablend; |
unsigned int pp_tfactor; |
unsigned int pp_border_color; |
} drm_radeon_texture_regs_t; |
typedef struct { |
unsigned int start; |
unsigned int finish; |
unsigned int prim:8; |
unsigned int stateidx:8; |
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ |
unsigned int vc_format; /* vertex format */ |
} drm_radeon_prim_t; |
typedef struct { |
drm_radeon_context_regs_t context; |
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; |
drm_radeon_context2_regs_t context2; |
unsigned int dirty; |
} drm_radeon_state_t; |
typedef struct { |
/* The channel for communication of state information to the |
* kernel on firing a vertex buffer with either of the |
* obsoleted vertex/index ioctls. |
*/ |
drm_radeon_context_regs_t context_state; |
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; |
unsigned int dirty; |
unsigned int vertsize; |
unsigned int vc_format; |
/* The current cliprects, or a subset thereof. |
*/ |
struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; |
unsigned int nbox; |
/* Counters for client-side throttling of rendering clients. |
*/ |
unsigned int last_frame; |
unsigned int last_dispatch; |
unsigned int last_clear; |
struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + |
1]; |
unsigned int tex_age[RADEON_NR_TEX_HEAPS]; |
int ctx_owner; |
int pfState; /* number of 3d windows (0,1,2ormore) */ |
int pfCurrentPage; /* which buffer is being displayed? */ |
int crtc2_base; /* CRTC2 frame offset */ |
int tiling_enabled; /* set by drm, read by 2d + 3d clients */ |
} drm_radeon_sarea_t; |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the Xserver file (xf86drmRadeon.h) |
* |
* KW: actually it's illegal to change any of this (backwards compatibility). |
*/ |
/* Radeon specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_RADEON_CP_INIT 0x00 |
#define DRM_RADEON_CP_START 0x01 |
#define DRM_RADEON_CP_STOP 0x02 |
#define DRM_RADEON_CP_RESET 0x03 |
#define DRM_RADEON_CP_IDLE 0x04 |
#define DRM_RADEON_RESET 0x05 |
#define DRM_RADEON_FULLSCREEN 0x06 |
#define DRM_RADEON_SWAP 0x07 |
#define DRM_RADEON_CLEAR 0x08 |
#define DRM_RADEON_VERTEX 0x09 |
#define DRM_RADEON_INDICES 0x0A |
#define DRM_RADEON_NOT_USED |
#define DRM_RADEON_STIPPLE 0x0C |
#define DRM_RADEON_INDIRECT 0x0D |
#define DRM_RADEON_TEXTURE 0x0E |
#define DRM_RADEON_VERTEX2 0x0F |
#define DRM_RADEON_CMDBUF 0x10 |
#define DRM_RADEON_GETPARAM 0x11 |
#define DRM_RADEON_FLIP 0x12 |
#define DRM_RADEON_ALLOC 0x13 |
#define DRM_RADEON_FREE 0x14 |
#define DRM_RADEON_INIT_HEAP 0x15 |
#define DRM_RADEON_IRQ_EMIT 0x16 |
#define DRM_RADEON_IRQ_WAIT 0x17 |
#define DRM_RADEON_CP_RESUME 0x18 |
#define DRM_RADEON_SETPARAM 0x19 |
#define DRM_RADEON_SURF_ALLOC 0x1a |
#define DRM_RADEON_SURF_FREE 0x1b |
/* KMS ioctl */ |
#define DRM_RADEON_GEM_INFO 0x1c |
#define DRM_RADEON_GEM_CREATE 0x1d |
#define DRM_RADEON_GEM_MMAP 0x1e |
#define DRM_RADEON_GEM_PREAD 0x21 |
#define DRM_RADEON_GEM_PWRITE 0x22 |
#define DRM_RADEON_GEM_SET_DOMAIN 0x23 |
#define DRM_RADEON_GEM_WAIT_IDLE 0x24 |
#define DRM_RADEON_CS 0x26 |
#define DRM_RADEON_INFO 0x27 |
#define DRM_RADEON_GEM_SET_TILING 0x28 |
#define DRM_RADEON_GEM_GET_TILING 0x29 |
#define DRM_RADEON_GEM_BUSY 0x2a |
#define DRM_RADEON_GEM_VA 0x2b |
#define DRM_RADEON_GEM_OP 0x2c |
#define DRM_RADEON_GEM_USERPTR 0x2d |
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) |
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET) |
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE) |
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET) |
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t) |
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP) |
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t) |
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t) |
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t) |
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t) |
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t) |
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t) |
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t) |
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t) |
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t) |
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP) |
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t) |
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t) |
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t) |
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t) |
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t) |
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME) |
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) |
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) |
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) |
/* KMS */ |
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info) |
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create) |
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap) |
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread) |
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite) |
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain) |
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) |
#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) |
#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) |
#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr) |
typedef struct drm_radeon_init { |
enum { |
RADEON_INIT_CP = 0x01, |
RADEON_CLEANUP_CP = 0x02, |
RADEON_INIT_R200_CP = 0x03, |
RADEON_INIT_R300_CP = 0x04, |
RADEON_INIT_R600_CP = 0x05 |
} func; |
unsigned long sarea_priv_offset; |
int is_pci; |
int cp_mode; |
int gart_size; |
int ring_size; |
int usec_timeout; |
unsigned int fb_bpp; |
unsigned int front_offset, front_pitch; |
unsigned int back_offset, back_pitch; |
unsigned int depth_bpp; |
unsigned int depth_offset, depth_pitch; |
unsigned long fb_offset; |
unsigned long mmio_offset; |
unsigned long ring_offset; |
unsigned long ring_rptr_offset; |
unsigned long buffers_offset; |
unsigned long gart_textures_offset; |
} drm_radeon_init_t; |
typedef struct drm_radeon_cp_stop { |
int flush; |
int idle; |
} drm_radeon_cp_stop_t; |
typedef struct drm_radeon_fullscreen { |
enum { |
RADEON_INIT_FULLSCREEN = 0x01, |
RADEON_CLEANUP_FULLSCREEN = 0x02 |
} func; |
} drm_radeon_fullscreen_t; |
#define CLEAR_X1 0 |
#define CLEAR_Y1 1 |
#define CLEAR_X2 2 |
#define CLEAR_Y2 3 |
#define CLEAR_DEPTH 4 |
typedef union drm_radeon_clear_rect { |
float f[5]; |
unsigned int ui[5]; |
} drm_radeon_clear_rect_t; |
typedef struct drm_radeon_clear { |
unsigned int flags; |
unsigned int clear_color; |
unsigned int clear_depth; |
unsigned int color_mask; |
unsigned int depth_mask; /* misnamed field: should be stencil */ |
drm_radeon_clear_rect_t __user *depth_boxes; |
} drm_radeon_clear_t; |
typedef struct drm_radeon_vertex { |
int prim; |
int idx; /* Index of vertex buffer */ |
int count; /* Number of vertices in buffer */ |
int discard; /* Client finished with buffer? */ |
} drm_radeon_vertex_t; |
typedef struct drm_radeon_indices { |
int prim; |
int idx; |
int start; |
int end; |
int discard; /* Client finished with buffer? */ |
} drm_radeon_indices_t; |
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices |
* - allows multiple primitives and state changes in a single ioctl |
* - supports driver change to emit native primitives |
*/ |
typedef struct drm_radeon_vertex2 { |
int idx; /* Index of vertex buffer */ |
int discard; /* Client finished with buffer? */ |
int nr_states; |
drm_radeon_state_t __user *state; |
int nr_prims; |
drm_radeon_prim_t __user *prim; |
} drm_radeon_vertex2_t; |
/* v1.3 - obsoletes drm_radeon_vertex2 |
* - allows arbitrarily large cliprect list |
* - allows updating of tcl packet, vector and scalar state |
* - allows memory-efficient description of state updates |
* - allows state to be emitted without a primitive |
* (for clears, ctx switches) |
* - allows more than one dma buffer to be referenced per ioctl |
* - supports tcl driver |
* - may be extended in future versions with new cmd types, packets |
*/ |
typedef struct drm_radeon_cmd_buffer { |
int bufsz; |
char __user *buf; |
int nbox; |
struct drm_clip_rect __user *boxes; |
} drm_radeon_cmd_buffer_t; |
typedef struct drm_radeon_tex_image { |
unsigned int x, y; /* Blit coordinates */ |
unsigned int width, height; |
const void __user *data; |
} drm_radeon_tex_image_t; |
typedef struct drm_radeon_texture { |
unsigned int offset; |
int pitch; |
int format; |
int width; /* Texture image coordinates */ |
int height; |
drm_radeon_tex_image_t __user *image; |
} drm_radeon_texture_t; |
typedef struct drm_radeon_stipple { |
unsigned int __user *mask; |
} drm_radeon_stipple_t; |
typedef struct drm_radeon_indirect { |
int idx; |
int start; |
int end; |
int discard; |
} drm_radeon_indirect_t; |
/* enum for card type parameters */ |
#define RADEON_CARD_PCI 0 |
#define RADEON_CARD_AGP 1 |
#define RADEON_CARD_PCIE 2 |
/* 1.3: An ioctl to get parameters that aren't available to the 3d |
* client any other way. |
*/ |
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */ |
#define RADEON_PARAM_LAST_FRAME 2 |
#define RADEON_PARAM_LAST_DISPATCH 3 |
#define RADEON_PARAM_LAST_CLEAR 4 |
/* Added with DRM version 1.6. */ |
#define RADEON_PARAM_IRQ_NR 5 |
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */ |
/* Added with DRM version 1.8. */ |
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */ |
#define RADEON_PARAM_STATUS_HANDLE 8 |
#define RADEON_PARAM_SAREA_HANDLE 9 |
#define RADEON_PARAM_GART_TEX_HANDLE 10 |
#define RADEON_PARAM_SCRATCH_OFFSET 11 |
#define RADEON_PARAM_CARD_TYPE 12 |
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ |
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ |
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ |
#define RADEON_PARAM_DEVICE_ID 16 |
#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */ |
typedef struct drm_radeon_getparam { |
int param; |
void __user *value; |
} drm_radeon_getparam_t; |
/* 1.6: Set up a memory manager for regions of shared memory: |
*/ |
#define RADEON_MEM_REGION_GART 1 |
#define RADEON_MEM_REGION_FB 2 |
typedef struct drm_radeon_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or GART */ |
} drm_radeon_mem_alloc_t; |
typedef struct drm_radeon_mem_free { |
int region; |
int region_offset; |
} drm_radeon_mem_free_t; |
typedef struct drm_radeon_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_radeon_mem_init_heap_t; |
/* 1.6: Userspace can request & wait on irq's: |
*/ |
typedef struct drm_radeon_irq_emit { |
int __user *irq_seq; |
} drm_radeon_irq_emit_t; |
typedef struct drm_radeon_irq_wait { |
int irq_seq; |
} drm_radeon_irq_wait_t; |
/* 1.10: Clients tell the DRM where they think the framebuffer is located in |
* the card's address space, via a new generic ioctl to set parameters |
*/ |
typedef struct drm_radeon_setparam { |
unsigned int param; |
__s64 value; |
} drm_radeon_setparam_t; |
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ |
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ |
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ |
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ |
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ |
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ |
/* 1.14: Clients can allocate/free a surface |
*/ |
typedef struct drm_radeon_surface_alloc { |
unsigned int address; |
unsigned int size; |
unsigned int flags; |
} drm_radeon_surface_alloc_t; |
typedef struct drm_radeon_surface_free { |
unsigned int address; |
} drm_radeon_surface_free_t; |
#define DRM_RADEON_VBLANK_CRTC1 1 |
#define DRM_RADEON_VBLANK_CRTC2 2 |
/* |
* Kernel modesetting world below. |
*/ |
#define RADEON_GEM_DOMAIN_CPU 0x1 |
#define RADEON_GEM_DOMAIN_GTT 0x2 |
#define RADEON_GEM_DOMAIN_VRAM 0x4 |
struct drm_radeon_gem_info { |
uint64_t gart_size; |
uint64_t vram_size; |
uint64_t vram_visible; |
}; |
#define RADEON_GEM_NO_BACKING_STORE (1 << 0) |
#define RADEON_GEM_GTT_UC (1 << 1) |
#define RADEON_GEM_GTT_WC (1 << 2) |
/* BO is expected to be accessed by the CPU */ |
#define RADEON_GEM_CPU_ACCESS (1 << 3) |
/* CPU access is not expected to work for this BO */ |
#define RADEON_GEM_NO_CPU_ACCESS (1 << 4) |
struct drm_radeon_gem_create { |
uint64_t size; |
uint64_t alignment; |
uint32_t handle; |
uint32_t initial_domain; |
uint32_t flags; |
}; |
/* |
* This is not a reliable API and you should expect it to fail for any |
* number of reasons and have fallback path that do not use userptr to |
* perform any operation. |
*/ |
#define RADEON_GEM_USERPTR_READONLY (1 << 0) |
#define RADEON_GEM_USERPTR_ANONONLY (1 << 1) |
#define RADEON_GEM_USERPTR_VALIDATE (1 << 2) |
#define RADEON_GEM_USERPTR_REGISTER (1 << 3) |
struct drm_radeon_gem_userptr { |
uint64_t addr; |
uint64_t size; |
uint32_t flags; |
uint32_t handle; |
}; |
#define RADEON_TILING_MACRO 0x1 |
#define RADEON_TILING_MICRO 0x2 |
#define RADEON_TILING_SWAP_16BIT 0x4 |
#define RADEON_TILING_SWAP_32BIT 0x8 |
/* this object requires a surface when mapped - i.e. front buffer */ |
#define RADEON_TILING_SURFACE 0x10 |
#define RADEON_TILING_MICRO_SQUARE 0x20 |
#define RADEON_TILING_EG_BANKW_SHIFT 8 |
#define RADEON_TILING_EG_BANKW_MASK 0xf |
#define RADEON_TILING_EG_BANKH_SHIFT 12 |
#define RADEON_TILING_EG_BANKH_MASK 0xf |
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16 |
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf |
#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24 |
#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf |
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28 |
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf |
struct drm_radeon_gem_set_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_get_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_mmap { |
uint32_t handle; |
uint32_t pad; |
uint64_t offset; |
uint64_t size; |
uint64_t addr_ptr; |
}; |
struct drm_radeon_gem_set_domain { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
}; |
struct drm_radeon_gem_wait_idle { |
uint32_t handle; |
uint32_t pad; |
}; |
struct drm_radeon_gem_busy { |
uint32_t handle; |
uint32_t domain; |
}; |
struct drm_radeon_gem_pread { |
/** Handle for the object being read. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to read from */ |
uint64_t offset; |
/** Length of data to read */ |
uint64_t size; |
/** Pointer to write the data into. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
struct drm_radeon_gem_pwrite { |
/** Handle for the object being written to. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to write to */ |
uint64_t offset; |
/** Length of data to write */ |
uint64_t size; |
/** Pointer to read the data from. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
/* Sets or returns a value associated with a buffer. */ |
struct drm_radeon_gem_op { |
uint32_t handle; /* buffer */ |
uint32_t op; /* RADEON_GEM_OP_* */ |
uint64_t value; /* input or return value */ |
}; |
#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 |
#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1 |
#define RADEON_VA_MAP 1 |
#define RADEON_VA_UNMAP 2 |
#define RADEON_VA_RESULT_OK 0 |
#define RADEON_VA_RESULT_ERROR 1 |
#define RADEON_VA_RESULT_VA_EXIST 2 |
#define RADEON_VM_PAGE_VALID (1 << 0) |
#define RADEON_VM_PAGE_READABLE (1 << 1) |
#define RADEON_VM_PAGE_WRITEABLE (1 << 2) |
#define RADEON_VM_PAGE_SYSTEM (1 << 3) |
#define RADEON_VM_PAGE_SNOOPED (1 << 4) |
struct drm_radeon_gem_va { |
uint32_t handle; |
uint32_t operation; |
uint32_t vm_id; |
uint32_t flags; |
uint64_t offset; |
}; |
#define RADEON_CHUNK_ID_RELOCS 0x01 |
#define RADEON_CHUNK_ID_IB 0x02 |
#define RADEON_CHUNK_ID_FLAGS 0x03 |
#define RADEON_CHUNK_ID_CONST_IB 0x04 |
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ |
#define RADEON_CS_KEEP_TILING_FLAGS 0x01 |
#define RADEON_CS_USE_VM 0x02 |
#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */ |
/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */ |
#define RADEON_CS_RING_GFX 0 |
#define RADEON_CS_RING_COMPUTE 1 |
#define RADEON_CS_RING_DMA 2 |
#define RADEON_CS_RING_UVD 3 |
#define RADEON_CS_RING_VCE 4 |
/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ |
/* 0 = normal, + = higher priority, - = lower priority */ |
struct drm_radeon_cs_chunk { |
uint32_t chunk_id; |
uint32_t length_dw; |
uint64_t chunk_data; |
}; |
/* drm_radeon_cs_reloc.flags */ |
#define RADEON_RELOC_PRIO_MASK (0xf << 0) |
struct drm_radeon_cs_reloc { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
uint32_t flags; |
}; |
struct drm_radeon_cs { |
uint32_t num_chunks; |
uint32_t cs_id; |
/* this points to uint64_t * which point to cs chunks */ |
uint64_t chunks; |
/* updates to the limits after this CS ioctl */ |
uint64_t gart_limit; |
uint64_t vram_limit; |
}; |
#define RADEON_INFO_DEVICE_ID 0x00 |
#define RADEON_INFO_NUM_GB_PIPES 0x01 |
#define RADEON_INFO_NUM_Z_PIPES 0x02 |
#define RADEON_INFO_ACCEL_WORKING 0x03 |
#define RADEON_INFO_CRTC_FROM_ID 0x04 |
#define RADEON_INFO_ACCEL_WORKING2 0x05 |
#define RADEON_INFO_TILING_CONFIG 0x06 |
#define RADEON_INFO_WANT_HYPERZ 0x07 |
#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ |
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ |
#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */ |
#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ |
#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ |
#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */ |
/* virtual address start, va < start are reserved by the kernel */ |
#define RADEON_INFO_VA_START 0x0e |
/* maximum size of ib using the virtual memory cs */ |
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f |
/* max pipes - needed for compute shaders */ |
#define RADEON_INFO_MAX_PIPES 0x10 |
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ |
#define RADEON_INFO_TIMESTAMP 0x11 |
/* max shader engines (SE) - needed for geometry shaders, etc. */ |
#define RADEON_INFO_MAX_SE 0x12 |
/* max SH per SE */ |
#define RADEON_INFO_MAX_SH_PER_SE 0x13 |
/* fast fb access is enabled */ |
#define RADEON_INFO_FASTFB_WORKING 0x14 |
/* query if a RADEON_CS_RING_* submission is supported */ |
#define RADEON_INFO_RING_WORKING 0x15 |
/* SI tile mode array */ |
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 |
/* query if CP DMA is supported on the compute ring */ |
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 |
/* CIK macrotile mode array */ |
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 |
/* query the number of render backends */ |
#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 |
/* max engine clock - needed for OpenCL */ |
#define RADEON_INFO_MAX_SCLK 0x1a |
/* version of VCE firmware */ |
#define RADEON_INFO_VCE_FW_VERSION 0x1b |
/* version of VCE feedback */ |
#define RADEON_INFO_VCE_FB_VERSION 0x1c |
#define RADEON_INFO_NUM_BYTES_MOVED 0x1d |
#define RADEON_INFO_VRAM_USAGE 0x1e |
#define RADEON_INFO_GTT_USAGE 0x1f |
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20 |
struct drm_radeon_info { |
uint32_t request; |
uint32_t pad; |
uint64_t value; |
}; |
/* Those correspond to the tile index to use, this is to explicitly state |
* the API that is implicitly defined by the tile mode array. |
*/ |
#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED 8 |
#define SI_TILE_MODE_COLOR_1D 13 |
#define SI_TILE_MODE_COLOR_1D_SCANOUT 9 |
#define SI_TILE_MODE_COLOR_2D_8BPP 14 |
#define SI_TILE_MODE_COLOR_2D_16BPP 15 |
#define SI_TILE_MODE_COLOR_2D_32BPP 16 |
#define SI_TILE_MODE_COLOR_2D_64BPP 17 |
#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP 11 |
#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP 12 |
#define SI_TILE_MODE_DEPTH_STENCIL_1D 4 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D 0 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA 3 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 |
#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5 |
#endif |
/drivers/include/uapi/drm/vmwgfx_drm.h |
---|
0,0 → 1,1062 |
/************************************************************************** |
* |
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
**************************************************************************/ |
#ifndef __VMWGFX_DRM_H__ |
#define __VMWGFX_DRM_H__ |
#ifndef __KERNEL__ |
#include <drm/drm.h> |
#endif |
#define DRM_VMW_MAX_SURFACE_FACES 6 |
#define DRM_VMW_MAX_MIP_LEVELS 24 |
#define DRM_VMW_GET_PARAM 0 |
#define DRM_VMW_ALLOC_DMABUF 1 |
#define DRM_VMW_UNREF_DMABUF 2 |
#define DRM_VMW_CURSOR_BYPASS 3 |
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ |
#define DRM_VMW_CONTROL_STREAM 4 |
#define DRM_VMW_CLAIM_STREAM 5 |
#define DRM_VMW_UNREF_STREAM 6 |
/* guarded by DRM_VMW_PARAM_3D == 1 */ |
#define DRM_VMW_CREATE_CONTEXT 7 |
#define DRM_VMW_UNREF_CONTEXT 8 |
#define DRM_VMW_CREATE_SURFACE 9 |
#define DRM_VMW_UNREF_SURFACE 10 |
#define DRM_VMW_REF_SURFACE 11 |
#define DRM_VMW_EXECBUF 12 |
#define DRM_VMW_GET_3D_CAP 13 |
#define DRM_VMW_FENCE_WAIT 14 |
#define DRM_VMW_FENCE_SIGNALED 15 |
#define DRM_VMW_FENCE_UNREF 16 |
#define DRM_VMW_FENCE_EVENT 17 |
#define DRM_VMW_PRESENT 18 |
#define DRM_VMW_PRESENT_READBACK 19 |
#define DRM_VMW_UPDATE_LAYOUT 20 |
#define DRM_VMW_CREATE_SHADER 21 |
#define DRM_VMW_UNREF_SHADER 22 |
#define DRM_VMW_GB_SURFACE_CREATE 23 |
#define DRM_VMW_GB_SURFACE_REF 24 |
#define DRM_VMW_SYNCCPU 25 |
/*************************************************************************/ |
/** |
* DRM_VMW_GET_PARAM - get device information. |
* |
* DRM_VMW_PARAM_FIFO_OFFSET: |
* Offset to use to map the first page of the FIFO read-only. |
* The fifo is mapped using the mmap() system call on the drm device. |
* |
* DRM_VMW_PARAM_OVERLAY_IOCTL: |
* Does the driver support the overlay ioctl. |
*/ |
#define DRM_VMW_PARAM_NUM_STREAMS 0 |
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 |
#define DRM_VMW_PARAM_3D 2 |
#define DRM_VMW_PARAM_HW_CAPS 3 |
#define DRM_VMW_PARAM_FIFO_CAPS 4 |
#define DRM_VMW_PARAM_MAX_FB_SIZE 5 |
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6 |
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 |
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 |
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 |
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 |
/** |
* enum drm_vmw_handle_type - handle type for ref ioctls |
* |
*/ |
enum drm_vmw_handle_type { |
DRM_VMW_HANDLE_LEGACY = 0, |
DRM_VMW_HANDLE_PRIME = 1 |
}; |
/** |
* struct drm_vmw_getparam_arg |
* |
* @value: Returned value. //Out |
* @param: Parameter to query. //In. |
* |
* Argument to the DRM_VMW_GET_PARAM Ioctl. |
*/ |
struct drm_vmw_getparam_arg { |
uint64_t value; |
uint32_t param; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_CONTEXT - Create a host context. |
* |
* Allocates a device unique context id, and queues a create context command |
* for the host. Does not wait for host completion. |
*/ |
/** |
* struct drm_vmw_context_arg |
* |
* @cid: Device unique context ID. |
* |
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. |
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. |
*/ |
struct drm_vmw_context_arg { |
int32_t cid; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_CONTEXT - Create a host context. |
* |
* Frees a global context id, and queues a destroy host command for the host. |
* Does not wait for host completion. The context ID can be used directly |
* in the command stream and shows up as the same context ID on the host. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_SURFACE - Create a host suface. |
* |
* Allocates a device unique surface id, and queues a create surface command |
* for the host. Does not wait for host completion. The surface ID can be |
* used directly in the command stream and shows up as the same surface |
* ID on the host. |
*/ |
/** |
* struct drm_wmv_surface_create_req |
* |
* @flags: Surface flags as understood by the host. |
* @format: Surface format as understood by the host. |
* @mip_levels: Number of mip levels for each face. |
* An unused face should have 0 encoded. |
* @size_addr: Address of a user-space array of sruct drm_vmw_size |
* cast to an uint64_t for 32-64 bit compatibility. |
* The size of the array should equal the total number of mipmap levels. |
* @shareable: Boolean whether other clients (as identified by file descriptors) |
* may reference this surface. |
* @scanout: Boolean whether the surface is intended to be used as a |
* scanout. |
* |
* Input data to the DRM_VMW_CREATE_SURFACE Ioctl. |
* Output data from the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
struct drm_vmw_surface_create_req { |
uint32_t flags; |
uint32_t format; |
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; |
uint64_t size_addr; |
int32_t shareable; |
int32_t scanout; |
}; |
/** |
* struct drm_wmv_surface_arg |
* |
* @sid: Surface id of created surface or surface to destroy or reference. |
* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. |
* |
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl. |
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. |
* Input argument to the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
struct drm_vmw_surface_arg { |
int32_t sid; |
enum drm_vmw_handle_type handle_type; |
}; |
/** |
* struct drm_vmw_size ioctl. |
* |
* @width - mip level width |
* @height - mip level height |
* @depth - mip level depth |
* |
* Description of a mip level. |
* Input data to the DRM_WMW_CREATE_SURFACE Ioctl. |
*/ |
struct drm_vmw_size { |
uint32_t width; |
uint32_t height; |
uint32_t depth; |
uint32_t pad64; |
}; |
/** |
* union drm_vmw_surface_create_arg |
* |
* @rep: Output data as described above. |
* @req: Input data as described above. |
* |
* Argument to the DRM_VMW_CREATE_SURFACE Ioctl. |
*/ |
union drm_vmw_surface_create_arg { |
struct drm_vmw_surface_arg rep; |
struct drm_vmw_surface_create_req req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_REF_SURFACE - Reference a host surface. |
* |
* Puts a reference on a host surface with a give sid, as previously |
* returned by the DRM_VMW_CREATE_SURFACE ioctl. |
* A reference will make sure the surface isn't destroyed while we hold |
* it and will allow the calling client to use the surface ID in the command |
* stream. |
* |
* On successful return, the Ioctl returns the surface information given |
* in the DRM_VMW_CREATE_SURFACE ioctl. |
*/ |
/** |
* union drm_vmw_surface_reference_arg |
* |
* @rep: Output data as described above. |
* @req: Input data as described above. |
* |
* Argument to the DRM_VMW_REF_SURFACE Ioctl. |
*/ |
union drm_vmw_surface_reference_arg { |
struct drm_vmw_surface_create_req rep; |
struct drm_vmw_surface_arg req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_SURFACE - Unreference a host surface. |
* |
* Clear a reference previously put on a host surface. |
* When all references are gone, including the one implicitly placed |
* on creation, |
* a destroy surface command will be queued for the host. |
* Does not wait for completion. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_EXECBUF |
* |
* Submit a command buffer for execution on the host, and return a |
* fence seqno that when signaled, indicates that the command buffer has |
* executed. |
*/ |
/** |
* struct drm_vmw_execbuf_arg |
* |
* @commands: User-space address of a command buffer cast to an uint64_t. |
* @command-size: Size in bytes of the command buffer. |
* @throttle-us: Sleep until software is less than @throttle_us |
* microseconds ahead of hardware. The driver may round this value |
* to the nearest kernel tick. |
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an |
* uint64_t. |
* @version: Allows expanding the execbuf ioctl parameters without breaking |
* backwards compatibility, since user-space will always tell the kernel |
* which version it uses. |
* @flags: Execbuf flags. None currently. |
* |
* Argument to the DRM_VMW_EXECBUF Ioctl. |
*/ |
#define DRM_VMW_EXECBUF_VERSION 1 |
struct drm_vmw_execbuf_arg { |
uint64_t commands; |
uint32_t command_size; |
uint32_t throttle_us; |
uint64_t fence_rep; |
uint32_t version; |
uint32_t flags; |
}; |
/** |
* struct drm_vmw_fence_rep |
* |
* @handle: Fence object handle for fence associated with a command submission. |
* @mask: Fence flags relevant for this fence object. |
* @seqno: Fence sequence number in fifo. A fence object with a lower |
* seqno will signal the EXEC flag before a fence object with a higher |
* seqno. This can be used by user-space to avoid kernel calls to determine |
* whether a fence has signaled the EXEC flag. Note that @seqno will |
* wrap at 32-bit. |
* @passed_seqno: The highest seqno number processed by the hardware |
* so far. This can be used to mark user-space fence objects as signaled, and |
* to determine whether a fence seqno might be stale. |
* @error: This member should've been set to -EFAULT on submission. |
* The following actions should be take on completion: |
* error == -EFAULT: Fence communication failed. The host is synchronized. |
* Use the last fence id read from the FIFO fence register. |
* error != 0 && error != -EFAULT: |
* Fence submission failed. The host is synchronized. Use the fence_seq member. |
* error == 0: All is OK, The host may not be synchronized. |
* Use the fence_seq member. |
* |
* Input / Output data to the DRM_VMW_EXECBUF Ioctl. |
*/ |
struct drm_vmw_fence_rep { |
uint32_t handle; |
uint32_t mask; |
uint32_t seqno; |
uint32_t passed_seqno; |
uint32_t pad64; |
int32_t error; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_ALLOC_DMABUF |
* |
* Allocate a DMA buffer that is visible also to the host. |
* NOTE: The buffer is |
* identified by a handle and an offset, which are private to the guest, but |
* useable in the command stream. The guest kernel may translate these |
* and patch up the command stream accordingly. In the future, the offset may |
* be zero at all times, or it may disappear from the interface before it is |
* fixed. |
* |
* The DMA buffer may stay user-space mapped in the guest at all times, |
* and is thus suitable for sub-allocation. |
* |
* DMA buffers are mapped using the mmap() syscall on the drm device. |
*/ |
/** |
* struct drm_vmw_alloc_dmabuf_req |
* |
* @size: Required minimum size of the buffer. |
* |
* Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
struct drm_vmw_alloc_dmabuf_req { |
uint32_t size; |
uint32_t pad64; |
}; |
/** |
* struct drm_vmw_dmabuf_rep |
* |
* @map_handle: Offset to use in the mmap() call used to map the buffer. |
* @handle: Handle unique to this buffer. Used for unreferencing. |
* @cur_gmr_id: GMR id to use in the command stream when this buffer is |
* referenced. See not above. |
* @cur_gmr_offset: Offset to use in the command stream when this buffer is |
* referenced. See note above. |
* |
* Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
struct drm_vmw_dmabuf_rep { |
uint64_t map_handle; |
uint32_t handle; |
uint32_t cur_gmr_id; |
uint32_t cur_gmr_offset; |
uint32_t pad64; |
}; |
/** |
* union drm_vmw_dmabuf_arg |
* |
* @req: Input data as described above. |
* @rep: Output data as described above. |
* |
* Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. |
*/ |
union drm_vmw_alloc_dmabuf_arg { |
struct drm_vmw_alloc_dmabuf_req req; |
struct drm_vmw_dmabuf_rep rep; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_DMABUF - Free a DMA buffer. |
* |
*/ |
/** |
* struct drm_vmw_unref_dmabuf_arg |
* |
* @handle: Handle indicating what buffer to free. Obtained from the |
* DRM_VMW_ALLOC_DMABUF Ioctl. |
* |
* Argument to the DRM_VMW_UNREF_DMABUF Ioctl. |
*/ |
struct drm_vmw_unref_dmabuf_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. |
* |
* This IOCTL controls the overlay units of the svga device. |
* The SVGA overlay units does not work like regular hardware units in |
* that they do not automaticaly read back the contents of the given dma |
* buffer. But instead only read back for each call to this ioctl, and |
* at any point between this call being made and a following call that |
* either changes the buffer or disables the stream. |
*/ |
/** |
* struct drm_vmw_rect |
* |
* Defines a rectangle. Used in the overlay ioctl to define |
* source and destination rectangle. |
*/ |
struct drm_vmw_rect { |
int32_t x; |
int32_t y; |
uint32_t w; |
uint32_t h; |
}; |
/** |
* struct drm_vmw_control_stream_arg |
* |
* @stream_id: Stearm to control |
* @enabled: If false all following arguments are ignored. |
* @handle: Handle to buffer for getting data from. |
* @format: Format of the overlay as understood by the host. |
* @width: Width of the overlay. |
* @height: Height of the overlay. |
* @size: Size of the overlay in bytes. |
* @pitch: Array of pitches, the two last are only used for YUV12 formats. |
* @offset: Offset from start of dma buffer to overlay. |
* @src: Source rect, must be within the defined area above. |
* @dst: Destination rect, x and y may be negative. |
* |
* Argument to the DRM_VMW_CONTROL_STREAM Ioctl. |
*/ |
struct drm_vmw_control_stream_arg { |
uint32_t stream_id; |
uint32_t enabled; |
uint32_t flags; |
uint32_t color_key; |
uint32_t handle; |
uint32_t offset; |
int32_t format; |
uint32_t size; |
uint32_t width; |
uint32_t height; |
uint32_t pitch[3]; |
uint32_t pad64; |
struct drm_vmw_rect src; |
struct drm_vmw_rect dst; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. |
* |
*/ |
#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) |
#define DRM_VMW_CURSOR_BYPASS_FLAGS (1) |
/** |
* struct drm_vmw_cursor_bypass_arg |
* |
* @flags: Flags. |
* @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. |
* @xpos: X position of cursor. |
* @ypos: Y position of cursor. |
* @xhot: X hotspot. |
* @yhot: Y hotspot. |
* |
* Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. |
*/ |
struct drm_vmw_cursor_bypass_arg { |
uint32_t flags; |
uint32_t crtc_id; |
int32_t xpos; |
int32_t ypos; |
int32_t xhot; |
int32_t yhot; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CLAIM_STREAM - Claim a single stream. |
*/ |
/** |
* struct drm_vmw_context_arg |
* |
* @stream_id: Device unique context ID. |
* |
* Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. |
* Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. |
*/ |
struct drm_vmw_stream_arg { |
uint32_t stream_id; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_STREAM - Unclaim a stream. |
* |
* Return a single stream that was claimed by this process. Also makes |
* sure that the stream has been stopped. |
*/ |
/*************************************************************************/ |
/** |
* DRM_VMW_GET_3D_CAP |
* |
* Read 3D capabilities from the FIFO |
* |
*/ |
/** |
* struct drm_vmw_get_3d_cap_arg |
* |
* @buffer: Pointer to a buffer for capability data, cast to an uint64_t |
* @size: Max size to copy |
* |
* Input argument to the DRM_VMW_GET_3D_CAP_IOCTL |
* ioctls. |
*/ |
struct drm_vmw_get_3d_cap_arg { |
uint64_t buffer; |
uint32_t max_size; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_WAIT |
* |
* Waits for a fence object to signal. The wait is interruptible, so that |
* signals may be delivered during the interrupt. The wait may timeout, |
* in which case the calls returns -EBUSY. If the wait is restarted, |
* that is restarting without resetting @cookie_valid to zero, |
* the timeout is computed from the first call. |
* |
* The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait |
* on: |
* DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command |
* stream |
* have executed. |
* DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish |
* commands |
* in the buffer given to the EXECBUF ioctl returning the fence object handle |
* are available to user-space. |
* |
* DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the |
* fenc wait ioctl returns 0, the fence object has been unreferenced after |
* the wait. |
*/ |
#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) |
#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) |
#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) |
/** |
* struct drm_vmw_fence_wait_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* @cookie_valid: Must be reset to 0 on first call. Left alone on restart. |
* @kernel_cookie: Set to 0 on first call. Left alone on restart. |
* @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. |
* @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick |
* before returning. |
* @flags: Fence flags to wait on. |
* @wait_options: Options that control the behaviour of the wait ioctl. |
* |
* Input argument to the DRM_VMW_FENCE_WAIT ioctl. |
*/ |
struct drm_vmw_fence_wait_arg { |
uint32_t handle; |
int32_t cookie_valid; |
uint64_t kernel_cookie; |
uint64_t timeout_us; |
int32_t lazy; |
int32_t flags; |
int32_t wait_options; |
int32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_SIGNALED |
* |
* Checks if a fence object is signaled.. |
*/ |
/** |
* struct drm_vmw_fence_signaled_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl |
* @signaled: Out: Flags signaled. |
* @sequence: Out: Highest sequence passed so far. Can be used to signal the |
* EXEC flag of user-space fence objects. |
* |
* Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF |
* ioctls. |
*/ |
struct drm_vmw_fence_signaled_arg { |
uint32_t handle; |
uint32_t flags; |
int32_t signaled; |
uint32_t passed_seqno; |
uint32_t signaled_flags; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_UNREF |
* |
* Unreferences a fence object, and causes it to be destroyed if there are no |
* other references to it. |
* |
*/ |
/** |
* struct drm_vmw_fence_arg |
* |
* @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. |
* |
* Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. |
*/ |
struct drm_vmw_fence_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_FENCE_EVENT |
* |
* Queues an event on a fence to be delivered on the drm character device |
* when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. |
* Optionally the approximate time when the fence signaled is |
* given by the event. |
*/ |
/* |
* The event type |
*/ |
#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 |
struct drm_vmw_event_fence { |
struct drm_event base; |
uint64_t user_data; |
uint32_t tv_sec; |
uint32_t tv_usec; |
}; |
/* |
* Flags that may be given to the command. |
*/ |
/* Request fence signaled time on the event. */ |
#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) |
/** |
* struct drm_vmw_fence_event_arg |
* |
* @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if |
* the fence is not supposed to be referenced by user-space. |
* @user_info: Info to be delivered with the event. |
* @handle: Attach the event to this fence only. |
* @flags: A set of flags as defined above. |
*/ |
struct drm_vmw_fence_event_arg { |
uint64_t fence_rep; |
uint64_t user_data; |
uint32_t handle; |
uint32_t flags; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_PRESENT |
* |
* Executes an SVGA present on a given fb for a given surface. The surface |
* is placed on the framebuffer. Cliprects are given relative to the given |
* point (the point disignated by dest_{x|y}). |
* |
*/ |
/** |
* struct drm_vmw_present_arg |
* @fb_id: framebuffer id to present / read back from. |
* @sid: Surface id to present from. |
* @dest_x: X placement coordinate for surface. |
* @dest_y: Y placement coordinate for surface. |
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. |
* @num_clips: Number of cliprects given relative to the framebuffer origin, |
* in the same coordinate space as the frame buffer. |
* @pad64: Unused 64-bit padding. |
* |
* Input argument to the DRM_VMW_PRESENT ioctl. |
*/ |
struct drm_vmw_present_arg { |
uint32_t fb_id; |
uint32_t sid; |
int32_t dest_x; |
int32_t dest_y; |
uint64_t clips_ptr; |
uint32_t num_clips; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_PRESENT_READBACK |
* |
* Executes an SVGA present readback from a given fb to the dma buffer |
* currently bound as the fb. If there is no dma buffer bound to the fb, |
* an error will be returned. |
* |
*/ |
/** |
* struct drm_vmw_present_arg |
* @fb_id: fb_id to present / read back from. |
* @num_clips: Number of cliprects. |
* @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. |
* @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. |
* If this member is NULL, then the ioctl should not return a fence. |
*/ |
struct drm_vmw_present_readback_arg { |
uint32_t fb_id; |
uint32_t num_clips; |
uint64_t clips_ptr; |
uint64_t fence_rep; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UPDATE_LAYOUT - Update layout |
* |
* Updates the preferred modes and connection status for connectors. The |
* command consists of one drm_vmw_update_layout_arg pointing to an array |
* of num_outputs drm_vmw_rect's. |
*/ |
/** |
* struct drm_vmw_update_layout_arg |
* |
* @num_outputs: number of active connectors |
* @rects: pointer to array of drm_vmw_rect cast to an uint64_t |
* |
* Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. |
*/ |
struct drm_vmw_update_layout_arg { |
uint32_t num_outputs; |
uint32_t pad64; |
uint64_t rects; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_CREATE_SHADER - Create shader |
* |
* Creates a shader and optionally binds it to a dma buffer containing |
* the shader byte-code. |
*/ |
/** |
* enum drm_vmw_shader_type - Shader types |
*/ |
enum drm_vmw_shader_type { |
drm_vmw_shader_type_vs = 0, |
drm_vmw_shader_type_ps, |
drm_vmw_shader_type_gs |
}; |
/** |
* struct drm_vmw_shader_create_arg |
* |
* @shader_type: Shader type of the shader to create. |
* @size: Size of the byte-code in bytes. |
* where the shader byte-code starts |
* @buffer_handle: Buffer handle identifying the buffer containing the |
* shader byte-code |
* @shader_handle: On successful completion contains a handle that |
* can be used to subsequently identify the shader. |
* @offset: Offset in bytes into the buffer given by @buffer_handle, |
* |
* Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. |
*/ |
struct drm_vmw_shader_create_arg { |
enum drm_vmw_shader_type shader_type; |
uint32_t size; |
uint32_t buffer_handle; |
uint32_t shader_handle; |
uint64_t offset; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_UNREF_SHADER - Unreferences a shader |
* |
* Destroys a user-space reference to a shader, optionally destroying |
* it. |
*/ |
/** |
* struct drm_vmw_shader_arg |
* |
* @handle: Handle identifying the shader to destroy. |
* |
* Input argument to the DRM_VMW_UNREF_SHADER ioctl. |
*/ |
struct drm_vmw_shader_arg { |
uint32_t handle; |
uint32_t pad64; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. |
* |
* Allocates a surface handle and queues a create surface command |
* for the host on the first use of the surface. The surface ID can |
* be used as the surface ID in commands referencing the surface. |
*/ |
/** |
* enum drm_vmw_surface_flags |
* |
* @drm_vmw_surface_flag_shareable: Whether the surface is shareable |
* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout |
* surface. |
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is |
* given. |
*/ |
enum drm_vmw_surface_flags { |
drm_vmw_surface_flag_shareable = (1 << 0), |
drm_vmw_surface_flag_scanout = (1 << 1), |
drm_vmw_surface_flag_create_buffer = (1 << 2) |
}; |
/** |
* struct drm_vmw_gb_surface_create_req |
* |
* @svga3d_flags: SVGA3d surface flags for the device. |
* @format: SVGA3d format. |
* @mip_level: Number of mip levels for all faces. |
* @drm_surface_flags Flags as described above. |
* @multisample_count Future use. Set to 0. |
* @autogen_filter Future use. Set to 0. |
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID |
* if none. |
* @base_size Size of the base mip level for all faces. |
* |
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. |
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. |
*/ |
struct drm_vmw_gb_surface_create_req { |
uint32_t svga3d_flags; |
uint32_t format; |
uint32_t mip_levels; |
enum drm_vmw_surface_flags drm_surface_flags; |
uint32_t multisample_count; |
uint32_t autogen_filter; |
uint32_t buffer_handle; |
uint32_t pad64; |
struct drm_vmw_size base_size; |
}; |
/** |
* struct drm_vmw_gb_surface_create_rep |
* |
* @handle: Surface handle. |
* @backup_size: Size of backup buffers for this surface. |
* @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. |
* @buffer_size: Actual size of the buffer identified by |
* @buffer_handle |
* @buffer_map_handle: Offset into device address space for the buffer |
* identified by @buffer_handle. |
* |
* Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. |
* Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
struct drm_vmw_gb_surface_create_rep { |
uint32_t handle; |
uint32_t backup_size; |
uint32_t buffer_handle; |
uint32_t buffer_size; |
uint64_t buffer_map_handle; |
}; |
/** |
* union drm_vmw_gb_surface_create_arg |
* |
* @req: Input argument as described above. |
* @rep: Output argument as described above. |
* |
* Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
union drm_vmw_gb_surface_create_arg { |
struct drm_vmw_gb_surface_create_rep rep; |
struct drm_vmw_gb_surface_create_req req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_GB_SURFACE_REF - Reference a host surface. |
* |
* Puts a reference on a host surface with a given handle, as previously |
* returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. |
* A reference will make sure the surface isn't destroyed while we hold |
* it and will allow the calling client to use the surface handle in |
* the command stream. |
* |
* On successful return, the Ioctl returns the surface information given |
* to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. |
*/ |
/** |
* struct drm_vmw_gb_surface_reference_arg |
* |
* @creq: The data used as input when the surface was created, as described |
* above at "struct drm_vmw_gb_surface_create_req" |
* @crep: Additional data output when the surface was created, as described |
* above at "struct drm_vmw_gb_surface_create_rep" |
* |
* Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. |
*/ |
struct drm_vmw_gb_surface_ref_rep { |
struct drm_vmw_gb_surface_create_req creq; |
struct drm_vmw_gb_surface_create_rep crep; |
}; |
/** |
* union drm_vmw_gb_surface_reference_arg |
* |
* @req: Input data as described above at "struct drm_vmw_surface_arg" |
* @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" |
* |
* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. |
*/ |
union drm_vmw_gb_surface_reference_arg { |
struct drm_vmw_gb_surface_ref_rep rep; |
struct drm_vmw_surface_arg req; |
}; |
/*************************************************************************/ |
/** |
* DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. |
* |
* Idles any previously submitted GPU operations on the buffer and |
* by default blocks command submissions that reference the buffer. |
* If the file descriptor used to grab a blocking CPU sync is closed, the |
* cpu sync is released. |
* The flags argument indicates how the grab / release operation should be |
* performed: |
*/ |
/** |
* enum drm_vmw_synccpu_flags - Synccpu flags: |
* |
* @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a |
* hint to the kernel to allow command submissions that references the buffer |
* for read-only. |
* @drm_vmw_synccpu_write: Sync for write. Block all command submissions |
* referencing this buffer. |
* @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return |
* -EBUSY should the buffer be busy. |
* @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer |
* while the buffer is synced for CPU. This is similar to the GEM bo idle |
* behavior. |
*/ |
enum drm_vmw_synccpu_flags { |
drm_vmw_synccpu_read = (1 << 0), |
drm_vmw_synccpu_write = (1 << 1), |
drm_vmw_synccpu_dontblock = (1 << 2), |
drm_vmw_synccpu_allow_cs = (1 << 3) |
}; |
/** |
* enum drm_vmw_synccpu_op - Synccpu operations: |
* |
* @drm_vmw_synccpu_grab: Grab the buffer for CPU operations |
* @drm_vmw_synccpu_release: Release a previous grab. |
*/ |
enum drm_vmw_synccpu_op { |
drm_vmw_synccpu_grab, |
drm_vmw_synccpu_release |
}; |
/** |
* struct drm_vmw_synccpu_arg |
* |
* @op: The synccpu operation as described above. |
* @handle: Handle identifying the buffer object. |
* @flags: Flags as described above. |
*/ |
struct drm_vmw_synccpu_arg { |
enum drm_vmw_synccpu_op op; |
enum drm_vmw_synccpu_flags flags; |
uint32_t handle; |
uint32_t pad64; |
}; |
#endif |
/drivers/include/uapi/linux/const.h |
---|
0,0 → 1,27 |
/* const.h: Macros for dealing with constants. */ |
#ifndef _LINUX_CONST_H |
#define _LINUX_CONST_H |
/* Some constant macros are used in both assembler and |
* C code. Therefore we cannot annotate them always with |
* 'UL' and other type specifiers unilaterally. We |
* use the following macros to deal with this. |
* |
* Similarly, _AT() will cast an expression with a type in C, but |
* leave it unchanged in asm. |
*/ |
#ifdef __ASSEMBLY__ |
#define _AC(X,Y) X |
#define _AT(T,X) X |
#else |
#define __AC(X,Y) (X##Y) |
#define _AC(X,Y) __AC(X,Y) |
#define _AT(T,X) ((T)(X)) |
#endif |
#define _BITUL(x) (_AC(1,UL) << (x)) |
#define _BITULL(x) (_AC(1,ULL) << (x)) |
#endif /* !(_LINUX_CONST_H) */ |
/drivers/include/uapi/linux/errno.h |
---|
0,0 → 1,0 |
#include <asm/errno.h> |
/drivers/include/uapi/linux/ioctl.h |
---|
0,0 → 1,7 |
#ifndef _LINUX_IOCTL_H |
#define _LINUX_IOCTL_H |
#include <asm/ioctl.h> |
#endif /* _LINUX_IOCTL_H */ |
/drivers/include/uapi/linux/kernel.h |
---|
0,0 → 1,13 |
#ifndef _UAPI_LINUX_KERNEL_H |
#define _UAPI_LINUX_KERNEL_H |
//#include <linux/sysinfo.h> |
/* |
* 'kernel.h' contains some often-used function prototypes etc |
*/ |
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) |
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) |
#endif /* _UAPI_LINUX_KERNEL_H */ |
/drivers/include/uapi/linux/personality.h |
---|
0,0 → 1,69 |
#ifndef _UAPI_LINUX_PERSONALITY_H |
#define _UAPI_LINUX_PERSONALITY_H |
/* |
* Flags for bug emulation. |
* |
* These occupy the top three bytes. |
*/ |
enum { |
UNAME26 = 0x0020000, |
ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ |
FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors |
* (signal handling) |
*/ |
MMAP_PAGE_ZERO = 0x0100000, |
ADDR_COMPAT_LAYOUT = 0x0200000, |
READ_IMPLIES_EXEC = 0x0400000, |
ADDR_LIMIT_32BIT = 0x0800000, |
SHORT_INODE = 0x1000000, |
WHOLE_SECONDS = 0x2000000, |
STICKY_TIMEOUTS = 0x4000000, |
ADDR_LIMIT_3GB = 0x8000000, |
}; |
/* |
* Security-relevant compatibility flags that must be |
* cleared upon setuid or setgid exec: |
*/ |
#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \ |
ADDR_NO_RANDOMIZE | \ |
ADDR_COMPAT_LAYOUT | \ |
MMAP_PAGE_ZERO) |
/* |
* Personality types. |
* |
* These go in the low byte. Avoid using the top bit, it will |
* conflict with error returns. |
*/ |
enum { |
PER_LINUX = 0x0000, |
PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, |
PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, |
PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, |
PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, |
PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | |
WHOLE_SECONDS | SHORT_INODE, |
PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, |
PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, |
PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, |
PER_BSD = 0x0006, |
PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, |
PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, |
PER_LINUX32 = 0x0008, |
PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, |
PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ |
PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ |
PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ |
PER_RISCOS = 0x000c, |
PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, |
PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, |
PER_OSF4 = 0x000f, /* OSF/1 v4 */ |
PER_HPUX = 0x0010, |
PER_MASK = 0x00ff, |
}; |
#endif /* _UAPI_LINUX_PERSONALITY_H */ |
/drivers/include/uapi/linux/stddef.h |
---|
0,0 → 1,0 |
#include <linux/compiler.h> |
/drivers/include/uapi/linux/string.h |
---|
0,0 → 1,9 |
#ifndef _UAPI_LINUX_STRING_H_ |
#define _UAPI_LINUX_STRING_H_ |
/* We don't want strings.h stuff being used by user stuff by accident */ |
#ifndef __KERNEL__ |
#include <string.h> |
#endif /* __KERNEL__ */ |
#endif /* _UAPI_LINUX_STRING_H_ */ |
/drivers/include/uapi/linux/sysinfo.h |
---|
0,0 → 1,24 |
#ifndef _LINUX_SYSINFO_H |
#define _LINUX_SYSINFO_H |
#include <linux/types.h> |
#define SI_LOAD_SHIFT 16 |
struct sysinfo { |
__kernel_long_t uptime; /* Seconds since boot */ |
__kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */ |
__kernel_ulong_t totalram; /* Total usable main memory size */ |
__kernel_ulong_t freeram; /* Available memory size */ |
__kernel_ulong_t sharedram; /* Amount of shared memory */ |
__kernel_ulong_t bufferram; /* Memory used by buffers */ |
__kernel_ulong_t totalswap; /* Total swap space size */ |
__kernel_ulong_t freeswap; /* swap space still available */ |
__u16 procs; /* Number of current processes */ |
__u16 pad; /* Explicit padding for m68k */ |
__kernel_ulong_t totalhigh; /* Total high memory size */ |
__kernel_ulong_t freehigh; /* Available high memory size */ |
__u32 mem_unit; /* Memory unit size in bytes */ |
char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)]; /* Padding: libc5 uses this.. */ |
}; |
#endif /* _LINUX_SYSINFO_H */ |
/drivers/include/uapi/linux/time.h |
---|
0,0 → 1,69 |
#ifndef _UAPI_LINUX_TIME_H |
#define _UAPI_LINUX_TIME_H |
#include <linux/types.h> |
#ifndef _STRUCT_TIMESPEC |
#define _STRUCT_TIMESPEC |
struct timespec { |
__kernel_time_t tv_sec; /* seconds */ |
long tv_nsec; /* nanoseconds */ |
}; |
#endif |
struct timeval { |
__kernel_time_t tv_sec; /* seconds */ |
__kernel_suseconds_t tv_usec; /* microseconds */ |
}; |
struct timezone { |
int tz_minuteswest; /* minutes west of Greenwich */ |
int tz_dsttime; /* type of dst correction */ |
}; |
/* |
* Names of the interval timers, and structure |
* defining a timer setting: |
*/ |
#define ITIMER_REAL 0 |
#define ITIMER_VIRTUAL 1 |
#define ITIMER_PROF 2 |
struct itimerspec { |
struct timespec it_interval; /* timer period */ |
struct timespec it_value; /* timer expiration */ |
}; |
struct itimerval { |
struct timeval it_interval; /* timer interval */ |
struct timeval it_value; /* current value */ |
}; |
/* |
* The IDs of the various system clocks (for POSIX.1b interval timers): |
*/ |
#define CLOCK_REALTIME 0 |
#define CLOCK_MONOTONIC 1 |
#define CLOCK_PROCESS_CPUTIME_ID 2 |
#define CLOCK_THREAD_CPUTIME_ID 3 |
#define CLOCK_MONOTONIC_RAW 4 |
#define CLOCK_REALTIME_COARSE 5 |
#define CLOCK_MONOTONIC_COARSE 6 |
#define CLOCK_BOOTTIME 7 |
#define CLOCK_REALTIME_ALARM 8 |
#define CLOCK_BOOTTIME_ALARM 9 |
#define CLOCK_SGI_CYCLE 10 /* Hardware specific */ |
#define CLOCK_TAI 11 |
#define MAX_CLOCKS 16 |
#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC) |
#define CLOCKS_MONO CLOCK_MONOTONIC |
/* |
* The various flags for setting POSIX.1b interval timers: |
*/ |
#define TIMER_ABSTIME 0x01 |
#endif /* _UAPI_LINUX_TIME_H */ |
/drivers/include/uapi/linux/types.h |
---|
0,0 → 1,56 |
#ifndef _UAPI_LINUX_TYPES_H |
#define _UAPI_LINUX_TYPES_H |
#include <asm/types.h> |
#ifndef __ASSEMBLY__ |
#ifndef __KERNEL__ |
#ifndef __EXPORTED_HEADERS__ |
#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" |
#endif /* __EXPORTED_HEADERS__ */ |
#endif |
#include <linux/posix_types.h> |
/* |
* Below are truly Linux-specific types that should never collide with |
* any application/library that wants linux/types.h. |
*/ |
#ifdef __CHECKER__ |
#define __bitwise__ __attribute__((bitwise)) |
#else |
#define __bitwise__ |
#endif |
#ifdef __CHECK_ENDIAN__ |
#define __bitwise __bitwise__ |
#else |
#define __bitwise |
#endif |
typedef __u16 __bitwise __le16; |
typedef __u16 __bitwise __be16; |
typedef __u32 __bitwise __le32; |
typedef __u32 __bitwise __be32; |
typedef __u64 __bitwise __le64; |
typedef __u64 __bitwise __be64; |
typedef __u16 __bitwise __sum16; |
typedef __u32 __bitwise __wsum; |
/* |
* aligned_u64 should be used in defining kernel<->userspace ABIs to avoid |
* common 32/64-bit compat problems. |
* 64-bit values align to 4-byte boundaries on x86_32 (and possibly other |
* architectures) and to 8-byte boundaries on 64-bit architectures. The new |
* aligned_64 type enforces 8-byte alignment so that structs containing |
* aligned_64 values have the same alignment on 32-bit and 64-bit architectures. |
* No conversions are necessary between 32-bit user-space and a 64-bit kernel. |
*/ |
#define __aligned_u64 __u64 __attribute__((aligned(8))) |
#define __aligned_be64 __be64 __attribute__((aligned(8))) |
#define __aligned_le64 __le64 __attribute__((aligned(8))) |
#endif /* __ASSEMBLY__ */ |
#endif /* _UAPI_LINUX_TYPES_H */ |